]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/kpi_interface.c
2109e94cbe7beefa30665dbba65c0d3e2b3d5301
[apple/xnu.git] / bsd / net / kpi_interface.c
1 /*
2 * Copyright (c) 2004-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "kpi_interface.h"
30
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
59 #include <sys/proc.h>
60 #include <sys/sysctl.h>
61 #include <sys/mbuf.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
68 #ifdef INET
69 #include <netinet/igmp_var.h>
70 #endif
71 #ifdef INET6
72 #include <netinet6/mld6_var.h>
73 #endif
74 #include <netkey/key.h>
75 #include <stdbool.h>
76
77 #include "net/net_str_id.h"
78
79 #if CONFIG_MACF
80 #include <sys/kauth.h>
81 #include <security/mac_framework.h>
82 #endif
83
84
85 #undef ifnet_allocate
86 errno_t ifnet_allocate(const struct ifnet_init_params *init,
87 ifnet_t *ifp);
88
89 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
90 ifnet_t *ifp, bool is_internal);
91
92
93 #define TOUCHLASTCHANGE(__if_lastchange) { \
94 (__if_lastchange)->tv_sec = net_uptime(); \
95 (__if_lastchange)->tv_usec = 0; \
96 }
97
98 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, int,
99 struct ifnet_llreach_info *);
100 static void ifnet_kpi_free(ifnet_t);
101 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
102 u_int32_t *);
103 static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
104 u_char, int);
105 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
106
107 /*
108 * Temporary work around until we have real reference counting
109 *
110 * We keep the bits about calling dlil_if_release (which should be
111 * called recycle) transparent by calling it from our if_free function
112 * pointer. We have to keep the client's original detach function
113 * somewhere so we can call it.
114 */
115 static void
116 ifnet_kpi_free(ifnet_t ifp)
117 {
118 ifnet_detached_func detach_func = ifp->if_kpi_storage;
119
120 if (detach_func != NULL)
121 detach_func(ifp);
122
123 if (ifp->if_broadcast.length > sizeof (ifp->if_broadcast.u.buffer)) {
124 FREE(ifp->if_broadcast.u.ptr, M_IFADDR);
125 ifp->if_broadcast.u.ptr = NULL;
126 }
127
128 dlil_if_release(ifp);
129 }
130
131 errno_t
132 ifnet_allocate_common(const struct ifnet_init_params *init,
133 ifnet_t *ifp, bool is_internal)
134 {
135 struct ifnet_init_eparams einit;
136
137 bzero(&einit, sizeof (einit));
138
139 einit.ver = IFNET_INIT_CURRENT_VERSION;
140 einit.len = sizeof (einit);
141 einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
142 if (!is_internal) {
143 einit.flags |= IFNET_INIT_ALLOC_KPI;
144 }
145 einit.uniqueid = init->uniqueid;
146 einit.uniqueid_len = init->uniqueid_len;
147 einit.name = init->name;
148 einit.unit = init->unit;
149 einit.family = init->family;
150 einit.type = init->type;
151 einit.output = init->output;
152 einit.demux = init->demux;
153 einit.add_proto = init->add_proto;
154 einit.del_proto = init->del_proto;
155 einit.check_multi = init->check_multi;
156 einit.framer = init->framer;
157 einit.softc = init->softc;
158 einit.ioctl = init->ioctl;
159 einit.set_bpf_tap = init->set_bpf_tap;
160 einit.detach = init->detach;
161 einit.event = init->event;
162 einit.broadcast_addr = init->broadcast_addr;
163 einit.broadcast_len = init->broadcast_len;
164
165 return (ifnet_allocate_extended(&einit, ifp));
166 }
167
168 errno_t
169 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
170 {
171 return (ifnet_allocate_common(init, ifp, true));
172 }
173
174 errno_t
175 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
176 {
177 return (ifnet_allocate_common(init, ifp, false));
178 }
179
180 errno_t
181 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
182 ifnet_t *interface)
183 {
184 struct ifnet_init_eparams einit;
185 struct ifnet *ifp = NULL;
186 char if_xname[IFXNAMSIZ] = {0};
187 int error;
188
189 einit = *einit0;
190
191 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
192 einit.len < sizeof (einit))
193 return (EINVAL);
194
195 if (einit.family == 0 || einit.name == NULL ||
196 strlen(einit.name) >= IFNAMSIZ ||
197 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0)
198 return (EINVAL);
199
200
201 if (einit.flags & IFNET_INIT_LEGACY) {
202 if (einit.output == NULL ||
203 (einit.flags & IFNET_INIT_INPUT_POLL))
204 return (EINVAL);
205
206 einit.pre_enqueue = NULL;
207 einit.start = NULL;
208 einit.output_ctl = NULL;
209 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
210 einit.input_poll = NULL;
211 einit.input_ctl = NULL;
212 } else {
213 if (einit.start == NULL)
214 return (EINVAL);
215
216 einit.output = NULL;
217 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX)
218 return (EINVAL);
219
220 if (einit.flags & IFNET_INIT_INPUT_POLL) {
221 if (einit.input_poll == NULL || einit.input_ctl == NULL)
222 return (EINVAL);
223 } else {
224 einit.input_poll = NULL;
225 einit.input_ctl = NULL;
226 }
227 }
228
229
230 /* Initialize external name (name + unit) */
231 (void) snprintf(if_xname, sizeof (if_xname), "%s%d",
232 einit.name, einit.unit);
233
234 if (einit.uniqueid == NULL) {
235 einit.uniqueid = if_xname;
236 einit.uniqueid_len = strlen(if_xname);
237 }
238
239 error = dlil_if_acquire(einit.family, einit.uniqueid,
240 einit.uniqueid_len, if_xname, &ifp);
241
242 if (error == 0) {
243 u_int64_t br;
244
245 /*
246 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
247 * to point to storage of at least IFNAMSIZ bytes. It is safe
248 * to write to this.
249 */
250 strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ);
251 ifp->if_type = einit.type;
252 ifp->if_family = einit.family;
253 ifp->if_subfamily = einit.subfamily;
254 ifp->if_unit = einit.unit;
255 ifp->if_output = einit.output;
256 ifp->if_pre_enqueue = einit.pre_enqueue;
257 ifp->if_start = einit.start;
258 ifp->if_output_ctl = einit.output_ctl;
259 ifp->if_output_sched_model = einit.output_sched_model;
260 ifp->if_output_bw.eff_bw = einit.output_bw;
261 ifp->if_output_bw.max_bw = einit.output_bw_max;
262 ifp->if_output_lt.eff_lt = einit.output_lt;
263 ifp->if_output_lt.max_lt = einit.output_lt_max;
264 ifp->if_input_poll = einit.input_poll;
265 ifp->if_input_ctl = einit.input_ctl;
266 ifp->if_input_bw.eff_bw = einit.input_bw;
267 ifp->if_input_bw.max_bw = einit.input_bw_max;
268 ifp->if_input_lt.eff_lt = einit.input_lt;
269 ifp->if_input_lt.max_lt = einit.input_lt_max;
270 ifp->if_demux = einit.demux;
271 ifp->if_add_proto = einit.add_proto;
272 ifp->if_del_proto = einit.del_proto;
273 ifp->if_check_multi = einit.check_multi;
274 ifp->if_framer_legacy = einit.framer;
275 ifp->if_framer = einit.framer_extended;
276 ifp->if_softc = einit.softc;
277 ifp->if_ioctl = einit.ioctl;
278 ifp->if_set_bpf_tap = einit.set_bpf_tap;
279 ifp->if_free = ifnet_kpi_free;
280 ifp->if_event = einit.event;
281 ifp->if_kpi_storage = einit.detach;
282
283 /* Initialize external name (name + unit) */
284 snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
285 "%s", if_xname);
286
287 /*
288 * On embedded, framer() is already in the extended form;
289 * we simply use it as is, unless the caller specifies
290 * framer_extended() which will then override it.
291 *
292 * On non-embedded, framer() has long been exposed as part
293 * of the public KPI, and therefore its signature must
294 * remain the same (without the pre- and postpend length
295 * parameters.) We special case ether_frameout, such that
296 * it gets mapped to its extended variant. All other cases
297 * utilize the stub routine which will simply return zeroes
298 * for those new parameters.
299 *
300 * Internally, DLIL will only use the extended callback
301 * variant which is represented by if_framer.
302 */
303 #if CONFIG_EMBEDDED
304 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL)
305 ifp->if_framer = ifp->if_framer_legacy;
306 #else /* !CONFIG_EMBEDDED */
307 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
308 if (ifp->if_framer_legacy == ether_frameout)
309 ifp->if_framer = ether_frameout_extended;
310 else
311 ifp->if_framer = ifnet_framer_stub;
312 }
313 #endif /* !CONFIG_EMBEDDED */
314
315 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
316 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
317 else if (ifp->if_output_bw.eff_bw == 0)
318 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
319
320 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
321 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
322 else if (ifp->if_input_bw.eff_bw == 0)
323 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
324
325 if (ifp->if_output_bw.max_bw == 0)
326 ifp->if_output_bw = ifp->if_input_bw;
327 else if (ifp->if_input_bw.max_bw == 0)
328 ifp->if_input_bw = ifp->if_output_bw;
329
330 /* Pin if_baudrate to 32 bits */
331 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
332 if (br != 0)
333 ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
334
335 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
336 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
337 else if (ifp->if_output_lt.eff_lt == 0)
338 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
339
340 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
341 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
342 else if (ifp->if_input_lt.eff_lt == 0)
343 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
344
345 if (ifp->if_output_lt.max_lt == 0)
346 ifp->if_output_lt = ifp->if_input_lt;
347 else if (ifp->if_input_lt.max_lt == 0)
348 ifp->if_input_lt = ifp->if_output_lt;
349
350 if (ifp->if_ioctl == NULL)
351 ifp->if_ioctl = ifp_if_ioctl;
352
353 ifp->if_eflags = 0;
354 if (ifp->if_start != NULL) {
355 ifp->if_eflags |= IFEF_TXSTART;
356 if (ifp->if_pre_enqueue == NULL)
357 ifp->if_pre_enqueue = ifnet_enqueue;
358 ifp->if_output = ifp->if_pre_enqueue;
359 } else {
360 ifp->if_eflags &= ~IFEF_TXSTART;
361 }
362
363 if (ifp->if_input_poll != NULL)
364 ifp->if_eflags |= IFEF_RXPOLL;
365 else
366 ifp->if_eflags &= ~IFEF_RXPOLL;
367
368 ifp->if_output_dlil = dlil_output_handler;
369 ifp->if_input_dlil = dlil_input_handler;
370
371 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
372 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
373 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
374 ifp->if_input_ctl == NULL));
375 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
376 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
377
378 if (einit.broadcast_len && einit.broadcast_addr) {
379 if (einit.broadcast_len >
380 sizeof (ifp->if_broadcast.u.buffer)) {
381 MALLOC(ifp->if_broadcast.u.ptr, u_char *,
382 einit.broadcast_len, M_IFADDR, M_NOWAIT);
383 if (ifp->if_broadcast.u.ptr == NULL) {
384 error = ENOMEM;
385 } else {
386 bcopy(einit.broadcast_addr,
387 ifp->if_broadcast.u.ptr,
388 einit.broadcast_len);
389 }
390 } else {
391 bcopy(einit.broadcast_addr,
392 ifp->if_broadcast.u.buffer,
393 einit.broadcast_len);
394 }
395 ifp->if_broadcast.length = einit.broadcast_len;
396 } else {
397 bzero(&ifp->if_broadcast, sizeof (ifp->if_broadcast));
398 }
399
400 ifp->if_xflags = 0;
401
402 /*
403 * output target queue delay is specified in millisecond
404 * convert it to nanoseconds
405 */
406 IFCQ_TARGET_QDELAY(&ifp->if_snd) =
407 einit.output_target_qdelay * 1000 * 1000;
408 IFCQ_MAXLEN(&ifp->if_snd) = einit.sndq_maxlen;
409
410 ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
411 einit.start_delay_timeout);
412
413 IFCQ_PKT_DROP_LIMIT(&ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
414
415 /*
416 * Set embryonic flag; this will be cleared
417 * later when it is fully attached.
418 */
419 ifp->if_refflags = IFRF_EMBRYONIC;
420
421 /*
422 * Count the newly allocated ifnet
423 */
424 OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
425 INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
426 if (einit.flags & IFNET_INIT_ALLOC_KPI) {
427 ifp->if_xflags |= IFXF_ALLOC_KPI;
428 } else {
429 OSIncrementAtomic64(
430 &net_api_stats.nas_ifnet_alloc_os_count);
431 INC_ATOMIC_INT64_LIM(
432 net_api_stats.nas_ifnet_alloc_os_total);
433 }
434
435 if (error == 0) {
436 *interface = ifp;
437 // temporary - this should be done in dlil_if_acquire
438 ifnet_reference(ifp);
439 } else {
440 dlil_if_release(ifp);
441 *interface = NULL;
442 }
443 }
444 return (error);
445 }
446
447 errno_t
448 ifnet_reference(ifnet_t ifp)
449 {
450 return (dlil_if_ref(ifp));
451 }
452
453 errno_t
454 ifnet_release(ifnet_t ifp)
455 {
456 return (dlil_if_free(ifp));
457 }
458
459 errno_t
460 ifnet_interface_family_find(const char *module_string,
461 ifnet_family_t *family_id)
462 {
463 if (module_string == NULL || family_id == NULL)
464 return (EINVAL);
465
466 return (net_str_id_find_internal(module_string, family_id,
467 NSI_IF_FAM_ID, 1));
468 }
469
470 void *
471 ifnet_softc(ifnet_t interface)
472 {
473 return ((interface == NULL) ? NULL : interface->if_softc);
474 }
475
476 const char *
477 ifnet_name(ifnet_t interface)
478 {
479 return ((interface == NULL) ? NULL : interface->if_name);
480 }
481
482 ifnet_family_t
483 ifnet_family(ifnet_t interface)
484 {
485 return ((interface == NULL) ? 0 : interface->if_family);
486 }
487
488 ifnet_subfamily_t
489 ifnet_subfamily(ifnet_t interface)
490 {
491 return ((interface == NULL) ? 0 : interface->if_subfamily);
492 }
493
494 u_int32_t
495 ifnet_unit(ifnet_t interface)
496 {
497 return ((interface == NULL) ? (u_int32_t)0xffffffff :
498 (u_int32_t)interface->if_unit);
499 }
500
501 u_int32_t
502 ifnet_index(ifnet_t interface)
503 {
504 return ((interface == NULL) ? (u_int32_t)0xffffffff :
505 interface->if_index);
506 }
507
508 errno_t
509 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
510 {
511 uint16_t old_flags;
512
513 if (interface == NULL)
514 return (EINVAL);
515
516 ifnet_lock_exclusive(interface);
517
518 /* If we are modifying the up/down state, call if_updown */
519 if ((mask & IFF_UP) != 0) {
520 if_updown(interface, (new_flags & IFF_UP) == IFF_UP);
521 }
522
523 old_flags = interface->if_flags;
524 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
525 /* If we are modifying the multicast flag, set/unset the silent flag */
526 if ((old_flags & IFF_MULTICAST) !=
527 (interface->if_flags & IFF_MULTICAST)) {
528 #if INET
529 if (IGMP_IFINFO(interface) != NULL)
530 igmp_initsilent(interface, IGMP_IFINFO(interface));
531 #endif /* INET */
532 #if INET6
533 if (MLD_IFINFO(interface) != NULL)
534 mld6_initsilent(interface, MLD_IFINFO(interface));
535 #endif /* INET6 */
536 }
537
538 ifnet_lock_done(interface);
539
540 return (0);
541 }
542
543 u_int16_t
544 ifnet_flags(ifnet_t interface)
545 {
546 return ((interface == NULL) ? 0 : interface->if_flags);
547 }
548
549 /*
550 * This routine ensures the following:
551 *
552 * If IFEF_AWDL is set by the caller, also set the rest of flags as
553 * defined in IFEF_AWDL_MASK.
554 *
555 * If IFEF_AWDL has been set on the interface and the caller attempts
556 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
557 * return failure.
558 *
559 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
560 * on the interface.
561 *
562 * All other flags not associated with AWDL are not affected.
563 *
564 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
565 */
566 static errno_t
567 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
568 {
569 u_int32_t eflags;
570
571 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
572
573 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
574
575 if (ifp->if_eflags & IFEF_AWDL) {
576 if (eflags & IFEF_AWDL) {
577 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK)
578 return (EINVAL);
579 } else {
580 *new_eflags &= ~IFEF_AWDL_MASK;
581 *mask |= IFEF_AWDL_MASK;
582 }
583 } else if (eflags & IFEF_AWDL) {
584 *new_eflags |= IFEF_AWDL_MASK;
585 *mask |= IFEF_AWDL_MASK;
586 } else if (eflags & IFEF_AWDL_RESTRICTED &&
587 !(ifp->if_eflags & IFEF_AWDL))
588 return (EINVAL);
589
590 return (0);
591 }
592
593 errno_t
594 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
595 {
596 uint32_t oeflags;
597 struct kev_msg ev_msg;
598 struct net_event_data ev_data;
599
600 if (interface == NULL)
601 return (EINVAL);
602
603 bzero(&ev_msg, sizeof(ev_msg));
604 ifnet_lock_exclusive(interface);
605 /*
606 * Sanity checks for IFEF_AWDL and its related flags.
607 */
608 if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
609 ifnet_lock_done(interface);
610 return (EINVAL);
611 }
612 oeflags = interface->if_eflags;
613 interface->if_eflags =
614 (new_flags & mask) | (interface->if_eflags & ~mask);
615 ifnet_lock_done(interface);
616 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
617 !(oeflags & IFEF_AWDL_RESTRICTED)) {
618 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
619 /*
620 * The interface is now restricted to applications that have
621 * the entitlement.
622 * The check for the entitlement will be done in the data
623 * path, so we don't have to do anything here.
624 */
625 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
626 !(interface->if_eflags & IFEF_AWDL_RESTRICTED))
627 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
628 /*
629 * Notify configd so that it has a chance to perform better
630 * reachability detection.
631 */
632 if (ev_msg.event_code) {
633 bzero(&ev_data, sizeof(ev_data));
634 ev_msg.vendor_code = KEV_VENDOR_APPLE;
635 ev_msg.kev_class = KEV_NETWORK_CLASS;
636 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
637 strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
638 ev_data.if_family = interface->if_family;
639 ev_data.if_unit = interface->if_unit;
640 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
641 ev_msg.dv[0].data_ptr = &ev_data;
642 ev_msg.dv[1].data_length = 0;
643 dlil_post_complete_msg(interface, &ev_msg);
644 }
645
646 return (0);
647 }
648
649 u_int32_t
650 ifnet_eflags(ifnet_t interface)
651 {
652 return ((interface == NULL) ? 0 : interface->if_eflags);
653 }
654
655 errno_t
656 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
657 {
658 int before, after;
659
660 if (ifp == NULL)
661 return (EINVAL);
662
663 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
664 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
665
666 /*
667 * If this is called prior to ifnet attach, the actual work will
668 * be done at attach time. Otherwise, if it is called after
669 * ifnet detach, then it is a no-op.
670 */
671 if (!ifnet_is_attached(ifp, 0)) {
672 ifp->if_idle_new_flags = new_flags;
673 ifp->if_idle_new_flags_mask = mask;
674 return (0);
675 } else {
676 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
677 }
678
679 before = ifp->if_idle_flags;
680 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
681 after = ifp->if_idle_flags;
682
683 if ((after - before) < 0 && ifp->if_idle_flags == 0 &&
684 ifp->if_want_aggressive_drain != 0) {
685 ifp->if_want_aggressive_drain = 0;
686 } else if ((after - before) > 0 && ifp->if_want_aggressive_drain == 0) {
687 ifp->if_want_aggressive_drain++;
688 }
689
690 return (0);
691 }
692
693 errno_t
694 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
695 {
696 errno_t err;
697
698 lck_mtx_lock(rnh_lock);
699 ifnet_lock_exclusive(ifp);
700 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
701 ifnet_lock_done(ifp);
702 lck_mtx_unlock(rnh_lock);
703
704 return (err);
705 }
706
707 u_int32_t
708 ifnet_idle_flags(ifnet_t ifp)
709 {
710 return ((ifp == NULL) ? 0 : ifp->if_idle_flags);
711 }
712
713 errno_t
714 ifnet_set_link_quality(ifnet_t ifp, int quality)
715 {
716 errno_t err = 0;
717
718 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
719 err = EINVAL;
720 goto done;
721 }
722
723 if (!ifnet_is_attached(ifp, 0)) {
724 err = ENXIO;
725 goto done;
726 }
727
728 if_lqm_update(ifp, quality, 0);
729
730 done:
731 return (err);
732 }
733
734 int
735 ifnet_link_quality(ifnet_t ifp)
736 {
737 int lqm;
738
739 if (ifp == NULL)
740 return (IFNET_LQM_THRESH_OFF);
741
742 ifnet_lock_shared(ifp);
743 lqm = ifp->if_interface_state.lqm_state;
744 ifnet_lock_done(ifp);
745
746 return (lqm);
747 }
748
749 errno_t
750 ifnet_set_interface_state(ifnet_t ifp,
751 struct if_interface_state *if_interface_state)
752 {
753 errno_t err = 0;
754
755 if (ifp == NULL || if_interface_state == NULL) {
756 err = EINVAL;
757 goto done;
758 }
759
760 if (!ifnet_is_attached(ifp, 0)) {
761 err = ENXIO;
762 goto done;
763 }
764
765 if_state_update(ifp, if_interface_state);
766
767 done:
768 return (err);
769 }
770
771 errno_t
772 ifnet_get_interface_state(ifnet_t ifp,
773 struct if_interface_state *if_interface_state)
774 {
775 errno_t err = 0;
776
777 if (ifp == NULL || if_interface_state == NULL) {
778 err = EINVAL;
779 goto done;
780 }
781
782 if (!ifnet_is_attached(ifp, 0)) {
783 err = ENXIO;
784 goto done;
785 }
786
787 if_get_state(ifp, if_interface_state);
788
789 done:
790 return (err);
791 }
792
793
794 static errno_t
795 ifnet_defrouter_llreachinfo(ifnet_t ifp, int af,
796 struct ifnet_llreach_info *iflri)
797 {
798 if (ifp == NULL || iflri == NULL)
799 return (EINVAL);
800
801 VERIFY(af == AF_INET || af == AF_INET6);
802
803 return (ifnet_llreach_get_defrouter(ifp, af, iflri));
804 }
805
806 errno_t
807 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
808 {
809 return (ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri));
810 }
811
812 errno_t
813 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
814 {
815 return (ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri));
816 }
817
818 errno_t
819 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
820 u_int32_t mask)
821 {
822 errno_t error = 0;
823 int tmp;
824
825 if (ifp == NULL)
826 return (EINVAL);
827
828 ifnet_lock_exclusive(ifp);
829 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
830 if ((tmp & ~IFCAP_VALID))
831 error = EINVAL;
832 else
833 ifp->if_capabilities = tmp;
834 ifnet_lock_done(ifp);
835
836 return (error);
837 }
838
839 u_int32_t
840 ifnet_capabilities_supported(ifnet_t ifp)
841 {
842 return ((ifp == NULL) ? 0 : ifp->if_capabilities);
843 }
844
845
846 errno_t
847 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
848 u_int32_t mask)
849 {
850 errno_t error = 0;
851 int tmp;
852 struct kev_msg ev_msg;
853 struct net_event_data ev_data;
854
855 if (ifp == NULL)
856 return (EINVAL);
857
858 ifnet_lock_exclusive(ifp);
859 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
860 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities))
861 error = EINVAL;
862 else
863 ifp->if_capenable = tmp;
864 ifnet_lock_done(ifp);
865
866 /* Notify application of the change */
867 bzero(&ev_data, sizeof (struct net_event_data));
868 bzero(&ev_msg, sizeof (struct kev_msg));
869 ev_msg.vendor_code = KEV_VENDOR_APPLE;
870 ev_msg.kev_class = KEV_NETWORK_CLASS;
871 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
872
873 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
874 strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
875 ev_data.if_family = ifp->if_family;
876 ev_data.if_unit = (u_int32_t)ifp->if_unit;
877 ev_msg.dv[0].data_length = sizeof (struct net_event_data);
878 ev_msg.dv[0].data_ptr = &ev_data;
879 ev_msg.dv[1].data_length = 0;
880 dlil_post_complete_msg(ifp, &ev_msg);
881
882 return (error);
883 }
884
885 u_int32_t
886 ifnet_capabilities_enabled(ifnet_t ifp)
887 {
888 return ((ifp == NULL) ? 0 : ifp->if_capenable);
889 }
890
891 static const ifnet_offload_t offload_mask =
892 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
893 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
894 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
895 IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
896 IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
897 IFNET_SW_TIMESTAMP);
898
899 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
900
901 errno_t
902 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
903 {
904 u_int32_t ifcaps = 0;
905
906 if (interface == NULL)
907 return (EINVAL);
908
909 ifnet_lock_exclusive(interface);
910 interface->if_hwassist = (offload & offload_mask);
911
912 /*
913 * Hardware capable of partial checksum offload is
914 * flexible enough to handle any transports utilizing
915 * Internet Checksumming. Include those transports
916 * here, and leave the final decision to IP.
917 */
918 if (interface->if_hwassist & IFNET_CSUM_PARTIAL) {
919 interface->if_hwassist |= (IFNET_CSUM_TCP | IFNET_CSUM_UDP |
920 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6);
921 }
922 if (dlil_verbose) {
923 log(LOG_DEBUG, "%s: set offload flags=%b\n",
924 if_name(interface),
925 interface->if_hwassist, IFNET_OFFLOADF_BITS);
926 }
927 ifnet_lock_done(interface);
928
929 if ((offload & any_offload_csum))
930 ifcaps |= IFCAP_HWCSUM;
931 if ((offload & IFNET_TSO_IPV4))
932 ifcaps |= IFCAP_TSO4;
933 if ((offload & IFNET_TSO_IPV6))
934 ifcaps |= IFCAP_TSO6;
935 if ((offload & IFNET_VLAN_MTU))
936 ifcaps |= IFCAP_VLAN_MTU;
937 if ((offload & IFNET_VLAN_TAGGING))
938 ifcaps |= IFCAP_VLAN_HWTAGGING;
939 if ((offload & IFNET_TX_STATUS))
940 ifcaps |= IFCAP_TXSTATUS;
941 if ((offload & IFNET_HW_TIMESTAMP))
942 ifcaps |= IFCAP_HW_TIMESTAMP;
943 if ((offload & IFNET_SW_TIMESTAMP))
944 ifcaps |= IFCAP_SW_TIMESTAMP;
945 if ((offload & IFNET_CSUM_PARTIAL))
946 ifcaps |= IFCAP_CSUM_PARTIAL;
947 if ((offload & IFNET_CSUM_ZERO_INVERT))
948 ifcaps |= IFCAP_CSUM_ZERO_INVERT;
949 if (ifcaps != 0) {
950 (void) ifnet_set_capabilities_supported(interface, ifcaps,
951 IFCAP_VALID);
952 (void) ifnet_set_capabilities_enabled(interface, ifcaps,
953 IFCAP_VALID);
954 }
955
956 return (0);
957 }
958
959 ifnet_offload_t
960 ifnet_offload(ifnet_t interface)
961 {
962 return ((interface == NULL) ?
963 0 : (interface->if_hwassist & offload_mask));
964 }
965
966 errno_t
967 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
968 {
969 errno_t error = 0;
970
971 if (interface == NULL || mtuLen < interface->if_mtu)
972 return (EINVAL);
973
974 switch (family) {
975 case AF_INET:
976 if (interface->if_hwassist & IFNET_TSO_IPV4)
977 interface->if_tso_v4_mtu = mtuLen;
978 else
979 error = EINVAL;
980 break;
981
982 case AF_INET6:
983 if (interface->if_hwassist & IFNET_TSO_IPV6)
984 interface->if_tso_v6_mtu = mtuLen;
985 else
986 error = EINVAL;
987 break;
988
989 default:
990 error = EPROTONOSUPPORT;
991 break;
992 }
993
994 return (error);
995 }
996
997 errno_t
998 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
999 {
1000 errno_t error = 0;
1001
1002 if (interface == NULL || mtuLen == NULL)
1003 return (EINVAL);
1004
1005 switch (family) {
1006 case AF_INET:
1007 if (interface->if_hwassist & IFNET_TSO_IPV4)
1008 *mtuLen = interface->if_tso_v4_mtu;
1009 else
1010 error = EINVAL;
1011 break;
1012
1013 case AF_INET6:
1014 if (interface->if_hwassist & IFNET_TSO_IPV6)
1015 *mtuLen = interface->if_tso_v6_mtu;
1016 else
1017 error = EINVAL;
1018 break;
1019
1020 default:
1021 error = EPROTONOSUPPORT;
1022 break;
1023 }
1024
1025 return (error);
1026 }
1027
1028 errno_t
1029 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1030 {
1031 struct kev_msg ev_msg;
1032 struct net_event_data ev_data;
1033
1034 bzero(&ev_data, sizeof (struct net_event_data));
1035 bzero(&ev_msg, sizeof (struct kev_msg));
1036
1037 if (interface == NULL)
1038 return (EINVAL);
1039
1040 /* Do not accept wacky values */
1041 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS)
1042 return (EINVAL);
1043
1044 ifnet_lock_exclusive(interface);
1045
1046 if (mask & IF_WAKE_ON_MAGIC_PACKET) {
1047 if (properties & IF_WAKE_ON_MAGIC_PACKET)
1048 interface->if_xflags |= IFXF_WAKE_ON_MAGIC_PACKET;
1049 else
1050 interface->if_xflags &= ~IFXF_WAKE_ON_MAGIC_PACKET;
1051 }
1052
1053 ifnet_lock_done(interface);
1054
1055 (void) ifnet_touch_lastchange(interface);
1056
1057 /* Notify application of the change */
1058 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1059 ev_msg.kev_class = KEV_NETWORK_CLASS;
1060 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1061
1062 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
1063 strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1064 ev_data.if_family = interface->if_family;
1065 ev_data.if_unit = (u_int32_t)interface->if_unit;
1066 ev_msg.dv[0].data_length = sizeof (struct net_event_data);
1067 ev_msg.dv[0].data_ptr = &ev_data;
1068 ev_msg.dv[1].data_length = 0;
1069 dlil_post_complete_msg(interface, &ev_msg);
1070
1071 return (0);
1072 }
1073
1074 u_int32_t
1075 ifnet_get_wake_flags(ifnet_t interface)
1076 {
1077 u_int32_t flags = 0;
1078
1079 if (interface == NULL)
1080 return (0);
1081
1082 if (interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET)
1083 flags |= IF_WAKE_ON_MAGIC_PACKET;
1084
1085 return (flags);
1086 }
1087
1088 /*
1089 * Should MIB data store a copy?
1090 */
1091 errno_t
1092 ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen)
1093 {
1094 if (interface == NULL)
1095 return (EINVAL);
1096
1097 ifnet_lock_exclusive(interface);
1098 interface->if_linkmib = (void*)mibData;
1099 interface->if_linkmiblen = mibLen;
1100 ifnet_lock_done(interface);
1101 return (0);
1102 }
1103
1104 errno_t
1105 ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen)
1106 {
1107 errno_t result = 0;
1108
1109 if (interface == NULL)
1110 return (EINVAL);
1111
1112 ifnet_lock_shared(interface);
1113 if (*mibLen < interface->if_linkmiblen)
1114 result = EMSGSIZE;
1115 if (result == 0 && interface->if_linkmib == NULL)
1116 result = ENOTSUP;
1117
1118 if (result == 0) {
1119 *mibLen = interface->if_linkmiblen;
1120 bcopy(interface->if_linkmib, mibData, *mibLen);
1121 }
1122 ifnet_lock_done(interface);
1123
1124 return (result);
1125 }
1126
1127 u_int32_t
1128 ifnet_get_link_mib_data_length(ifnet_t interface)
1129 {
1130 return ((interface == NULL) ? 0 : interface->if_linkmiblen);
1131 }
1132
1133 errno_t
1134 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1135 mbuf_t m, void *route, const struct sockaddr *dest)
1136 {
1137 if (interface == NULL || protocol_family == 0 || m == NULL) {
1138 if (m != NULL)
1139 mbuf_freem_list(m);
1140 return (EINVAL);
1141 }
1142 return (dlil_output(interface, protocol_family, m, route, dest, 0, NULL));
1143 }
1144
1145 errno_t
1146 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1147 {
1148 if (interface == NULL || m == NULL) {
1149 if (m != NULL)
1150 mbuf_freem_list(m);
1151 return (EINVAL);
1152 }
1153 return (dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL));
1154 }
1155
1156 errno_t
1157 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1158 {
1159 if (interface == NULL)
1160 return (EINVAL);
1161
1162 interface->if_mtu = mtu;
1163 return (0);
1164 }
1165
1166 u_int32_t
1167 ifnet_mtu(ifnet_t interface)
1168 {
1169 return ((interface == NULL) ? 0 : interface->if_mtu);
1170 }
1171
1172 u_char
1173 ifnet_type(ifnet_t interface)
1174 {
1175 return ((interface == NULL) ? 0 : interface->if_data.ifi_type);
1176 }
1177
1178 errno_t
1179 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1180 {
1181 if (interface == NULL)
1182 return (EINVAL);
1183
1184 interface->if_data.ifi_addrlen = addrlen;
1185 return (0);
1186 }
1187
1188 u_char
1189 ifnet_addrlen(ifnet_t interface)
1190 {
1191 return ((interface == NULL) ? 0 : interface->if_data.ifi_addrlen);
1192 }
1193
1194 errno_t
1195 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1196 {
1197 if (interface == NULL)
1198 return (EINVAL);
1199
1200 interface->if_data.ifi_hdrlen = hdrlen;
1201 return (0);
1202 }
1203
1204 u_char
1205 ifnet_hdrlen(ifnet_t interface)
1206 {
1207 return ((interface == NULL) ? 0 : interface->if_data.ifi_hdrlen);
1208 }
1209
1210 errno_t
1211 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1212 {
1213 if (interface == NULL)
1214 return (EINVAL);
1215
1216 interface->if_data.ifi_metric = metric;
1217 return (0);
1218 }
1219
1220 u_int32_t
1221 ifnet_metric(ifnet_t interface)
1222 {
1223 return ((interface == NULL) ? 0 : interface->if_data.ifi_metric);
1224 }
1225
1226 errno_t
1227 ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate)
1228 {
1229 if (ifp == NULL)
1230 return (EINVAL);
1231
1232 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1233 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1234
1235 /* Pin if_baudrate to 32 bits until we can change the storage size */
1236 ifp->if_baudrate = (baudrate > 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate;
1237
1238 return (0);
1239 }
1240
1241 u_int64_t
1242 ifnet_baudrate(struct ifnet *ifp)
1243 {
1244 return ((ifp == NULL) ? 0 : ifp->if_baudrate);
1245 }
1246
1247 errno_t
1248 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1249 struct if_bandwidths *input_bw)
1250 {
1251 if (ifp == NULL)
1252 return (EINVAL);
1253
1254 /* set input values first (if any), as output values depend on them */
1255 if (input_bw != NULL)
1256 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1257
1258 if (output_bw != NULL)
1259 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1260
1261 return (0);
1262 }
1263
1264 static void
1265 ifnet_set_link_status_outbw(struct ifnet *ifp)
1266 {
1267 struct if_wifi_status_v1 *sr;
1268 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1269 if (ifp->if_output_bw.eff_bw != 0) {
1270 sr->valid_bitmask |=
1271 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1272 sr->ul_effective_bandwidth =
1273 ifp->if_output_bw.eff_bw;
1274 }
1275 if (ifp->if_output_bw.max_bw != 0) {
1276 sr->valid_bitmask |=
1277 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1278 sr->ul_max_bandwidth =
1279 ifp->if_output_bw.max_bw;
1280 }
1281 }
1282
1283 errno_t
1284 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1285 boolean_t locked)
1286 {
1287 struct if_bandwidths old_bw;
1288 struct ifclassq *ifq;
1289 u_int64_t br;
1290
1291 VERIFY(ifp != NULL && bw != NULL);
1292
1293 ifq = &ifp->if_snd;
1294 if (!locked)
1295 IFCQ_LOCK(ifq);
1296 IFCQ_LOCK_ASSERT_HELD(ifq);
1297
1298 old_bw = ifp->if_output_bw;
1299 if (bw->eff_bw != 0)
1300 ifp->if_output_bw.eff_bw = bw->eff_bw;
1301 if (bw->max_bw != 0)
1302 ifp->if_output_bw.max_bw = bw->max_bw;
1303 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
1304 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1305 else if (ifp->if_output_bw.eff_bw == 0)
1306 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1307
1308 /* Pin if_baudrate to 32 bits */
1309 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1310 if (br != 0)
1311 ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
1312
1313 /* Adjust queue parameters if needed */
1314 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1315 old_bw.max_bw != ifp->if_output_bw.max_bw)
1316 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1317
1318 if (!locked)
1319 IFCQ_UNLOCK(ifq);
1320
1321 /*
1322 * If this is a Wifi interface, update the values in
1323 * if_link_status structure also.
1324 */
1325 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1326 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1327 ifnet_set_link_status_outbw(ifp);
1328 lck_rw_done(&ifp->if_link_status_lock);
1329 }
1330
1331 return (0);
1332 }
1333
1334 static void
1335 ifnet_set_link_status_inbw(struct ifnet *ifp)
1336 {
1337 struct if_wifi_status_v1 *sr;
1338
1339 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1340 if (ifp->if_input_bw.eff_bw != 0) {
1341 sr->valid_bitmask |=
1342 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1343 sr->dl_effective_bandwidth =
1344 ifp->if_input_bw.eff_bw;
1345 }
1346 if (ifp->if_input_bw.max_bw != 0) {
1347 sr->valid_bitmask |=
1348 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1349 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw;
1350 }
1351 }
1352
1353 errno_t
1354 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1355 {
1356 struct if_bandwidths old_bw;
1357
1358 VERIFY(ifp != NULL && bw != NULL);
1359
1360 old_bw = ifp->if_input_bw;
1361 if (bw->eff_bw != 0)
1362 ifp->if_input_bw.eff_bw = bw->eff_bw;
1363 if (bw->max_bw != 0)
1364 ifp->if_input_bw.max_bw = bw->max_bw;
1365 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
1366 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1367 else if (ifp->if_input_bw.eff_bw == 0)
1368 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1369
1370 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1371 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1372 ifnet_set_link_status_inbw(ifp);
1373 lck_rw_done(&ifp->if_link_status_lock);
1374 }
1375
1376 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1377 old_bw.max_bw != ifp->if_input_bw.max_bw)
1378 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1379
1380 return (0);
1381 }
1382
1383 u_int64_t
1384 ifnet_output_linkrate(struct ifnet *ifp)
1385 {
1386 struct ifclassq *ifq = &ifp->if_snd;
1387 u_int64_t rate;
1388
1389 IFCQ_LOCK_ASSERT_HELD(ifq);
1390
1391 rate = ifp->if_output_bw.eff_bw;
1392 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1393 u_int64_t tbr_rate = ifp->if_snd.ifcq_tbr.tbr_rate_raw;
1394 VERIFY(tbr_rate > 0);
1395 rate = MIN(rate, ifp->if_snd.ifcq_tbr.tbr_rate_raw);
1396 }
1397
1398 return (rate);
1399 }
1400
1401 u_int64_t
1402 ifnet_input_linkrate(struct ifnet *ifp)
1403 {
1404 return (ifp->if_input_bw.eff_bw);
1405 }
1406
1407 errno_t
1408 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1409 struct if_bandwidths *input_bw)
1410 {
1411 if (ifp == NULL)
1412 return (EINVAL);
1413
1414 if (output_bw != NULL)
1415 *output_bw = ifp->if_output_bw;
1416 if (input_bw != NULL)
1417 *input_bw = ifp->if_input_bw;
1418
1419 return (0);
1420 }
1421
1422 errno_t
1423 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1424 struct if_latencies *input_lt)
1425 {
1426 if (ifp == NULL)
1427 return (EINVAL);
1428
1429 if (output_lt != NULL)
1430 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1431
1432 if (input_lt != NULL)
1433 (void) ifnet_set_input_latencies(ifp, input_lt);
1434
1435 return (0);
1436 }
1437
1438 errno_t
1439 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1440 boolean_t locked)
1441 {
1442 struct if_latencies old_lt;
1443 struct ifclassq *ifq;
1444
1445 VERIFY(ifp != NULL && lt != NULL);
1446
1447 ifq = &ifp->if_snd;
1448 if (!locked)
1449 IFCQ_LOCK(ifq);
1450 IFCQ_LOCK_ASSERT_HELD(ifq);
1451
1452 old_lt = ifp->if_output_lt;
1453 if (lt->eff_lt != 0)
1454 ifp->if_output_lt.eff_lt = lt->eff_lt;
1455 if (lt->max_lt != 0)
1456 ifp->if_output_lt.max_lt = lt->max_lt;
1457 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
1458 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1459 else if (ifp->if_output_lt.eff_lt == 0)
1460 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1461
1462 /* Adjust queue parameters if needed */
1463 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1464 old_lt.max_lt != ifp->if_output_lt.max_lt)
1465 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1466
1467 if (!locked)
1468 IFCQ_UNLOCK(ifq);
1469
1470 return (0);
1471 }
1472
1473 errno_t
1474 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1475 {
1476 struct if_latencies old_lt;
1477
1478 VERIFY(ifp != NULL && lt != NULL);
1479
1480 old_lt = ifp->if_input_lt;
1481 if (lt->eff_lt != 0)
1482 ifp->if_input_lt.eff_lt = lt->eff_lt;
1483 if (lt->max_lt != 0)
1484 ifp->if_input_lt.max_lt = lt->max_lt;
1485 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
1486 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1487 else if (ifp->if_input_lt.eff_lt == 0)
1488 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1489
1490 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1491 old_lt.max_lt != ifp->if_input_lt.max_lt)
1492 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1493
1494 return (0);
1495 }
1496
1497 errno_t
1498 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1499 struct if_latencies *input_lt)
1500 {
1501 if (ifp == NULL)
1502 return (EINVAL);
1503
1504 if (output_lt != NULL)
1505 *output_lt = ifp->if_output_lt;
1506 if (input_lt != NULL)
1507 *input_lt = ifp->if_input_lt;
1508
1509 return (0);
1510 }
1511
1512 errno_t
1513 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1514 {
1515 errno_t err;
1516
1517 if (ifp == NULL)
1518 return (EINVAL);
1519 else if (!ifnet_is_attached(ifp, 1))
1520 return (ENXIO);
1521
1522 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1523
1524 /* Release the io ref count */
1525 ifnet_decr_iorefcnt(ifp);
1526
1527 return (err);
1528 }
1529
1530 errno_t
1531 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1532 {
1533 errno_t err;
1534
1535 if (ifp == NULL || p == NULL)
1536 return (EINVAL);
1537 else if (!ifnet_is_attached(ifp, 1))
1538 return (ENXIO);
1539
1540 err = dlil_rxpoll_get_params(ifp, p);
1541
1542 /* Release the io ref count */
1543 ifnet_decr_iorefcnt(ifp);
1544
1545 return (err);
1546 }
1547
1548 errno_t
1549 ifnet_stat_increment(struct ifnet *ifp,
1550 const struct ifnet_stat_increment_param *s)
1551 {
1552 if (ifp == NULL)
1553 return (EINVAL);
1554
1555 if (s->packets_in != 0)
1556 atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1557 if (s->bytes_in != 0)
1558 atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1559 if (s->errors_in != 0)
1560 atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1561
1562 if (s->packets_out != 0)
1563 atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out);
1564 if (s->bytes_out != 0)
1565 atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1566 if (s->errors_out != 0)
1567 atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1568
1569 if (s->collisions != 0)
1570 atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions);
1571 if (s->dropped != 0)
1572 atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1573
1574 /* Touch the last change time. */
1575 TOUCHLASTCHANGE(&ifp->if_lastchange);
1576
1577 if (ifp->if_data_threshold != 0)
1578 ifnet_notify_data_threshold(ifp);
1579
1580 return (0);
1581 }
1582
1583 errno_t
1584 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1585 u_int32_t bytes_in, u_int32_t errors_in)
1586 {
1587 if (ifp == NULL)
1588 return (EINVAL);
1589
1590 if (packets_in != 0)
1591 atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in);
1592 if (bytes_in != 0)
1593 atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in);
1594 if (errors_in != 0)
1595 atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in);
1596
1597 TOUCHLASTCHANGE(&ifp->if_lastchange);
1598
1599 if (ifp->if_data_threshold != 0)
1600 ifnet_notify_data_threshold(ifp);
1601
1602 return (0);
1603 }
1604
1605 errno_t
1606 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1607 u_int32_t bytes_out, u_int32_t errors_out)
1608 {
1609 if (ifp == NULL)
1610 return (EINVAL);
1611
1612 if (packets_out != 0)
1613 atomic_add_64(&ifp->if_data.ifi_opackets, packets_out);
1614 if (bytes_out != 0)
1615 atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out);
1616 if (errors_out != 0)
1617 atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out);
1618
1619 TOUCHLASTCHANGE(&ifp->if_lastchange);
1620
1621 if (ifp->if_data_threshold != 0)
1622 ifnet_notify_data_threshold(ifp);
1623
1624 return (0);
1625 }
1626
1627 errno_t
1628 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1629 {
1630 if (ifp == NULL)
1631 return (EINVAL);
1632
1633 atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1634 atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1635 atomic_set_64(&ifp->if_data.ifi_imcasts, s->multicasts_in);
1636 atomic_set_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1637
1638 atomic_set_64(&ifp->if_data.ifi_opackets, s->packets_out);
1639 atomic_set_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1640 atomic_set_64(&ifp->if_data.ifi_omcasts, s->multicasts_out);
1641 atomic_set_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1642
1643 atomic_set_64(&ifp->if_data.ifi_collisions, s->collisions);
1644 atomic_set_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1645 atomic_set_64(&ifp->if_data.ifi_noproto, s->no_protocol);
1646
1647 /* Touch the last change time. */
1648 TOUCHLASTCHANGE(&ifp->if_lastchange);
1649
1650 if (ifp->if_data_threshold != 0)
1651 ifnet_notify_data_threshold(ifp);
1652
1653 return (0);
1654 }
1655
1656 errno_t
1657 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1658 {
1659 if (ifp == NULL)
1660 return (EINVAL);
1661
1662 atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets);
1663 atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes);
1664 atomic_get_64(s->multicasts_in, &ifp->if_data.ifi_imcasts);
1665 atomic_get_64(s->errors_in, &ifp->if_data.ifi_ierrors);
1666
1667 atomic_get_64(s->packets_out, &ifp->if_data.ifi_opackets);
1668 atomic_get_64(s->bytes_out, &ifp->if_data.ifi_obytes);
1669 atomic_get_64(s->multicasts_out, &ifp->if_data.ifi_omcasts);
1670 atomic_get_64(s->errors_out, &ifp->if_data.ifi_oerrors);
1671
1672 atomic_get_64(s->collisions, &ifp->if_data.ifi_collisions);
1673 atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops);
1674 atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto);
1675
1676 if (ifp->if_data_threshold != 0)
1677 ifnet_notify_data_threshold(ifp);
1678
1679 return (0);
1680 }
1681
1682 errno_t
1683 ifnet_touch_lastchange(ifnet_t interface)
1684 {
1685 if (interface == NULL)
1686 return (EINVAL);
1687
1688 TOUCHLASTCHANGE(&interface->if_lastchange);
1689
1690 return (0);
1691 }
1692
1693 errno_t
1694 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1695 {
1696 if (interface == NULL)
1697 return (EINVAL);
1698
1699 *last_change = interface->if_data.ifi_lastchange;
1700 /* Crude conversion from uptime to calendar time */
1701 last_change->tv_sec += boottime_sec();
1702
1703 return (0);
1704 }
1705
1706 errno_t
1707 ifnet_touch_lastupdown(ifnet_t interface)
1708 {
1709 if (interface == NULL) {
1710 return (EINVAL);
1711 }
1712
1713 TOUCHLASTCHANGE(&interface->if_lastupdown);
1714
1715 return (0);
1716 }
1717
1718 errno_t
1719 ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
1720 {
1721 if (interface == NULL) {
1722 return (EINVAL);
1723 }
1724
1725 /* Calculate the delta */
1726 updown_delta->tv_sec = net_uptime();
1727 if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
1728 updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
1729 }
1730 updown_delta->tv_usec = 0;
1731
1732 return (0);
1733 }
1734
1735 errno_t
1736 ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
1737 {
1738 return (addresses == NULL ? EINVAL :
1739 ifnet_get_address_list_family(interface, addresses, 0));
1740 }
1741
1742 struct ifnet_addr_list {
1743 SLIST_ENTRY(ifnet_addr_list) ifal_le;
1744 struct ifaddr *ifal_ifa;
1745 };
1746
1747 errno_t
1748 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
1749 sa_family_t family)
1750 {
1751 return (ifnet_get_address_list_family_internal(interface, addresses,
1752 family, 0, M_NOWAIT, 0));
1753 }
1754
1755 errno_t
1756 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
1757 {
1758 return (addresses == NULL ? EINVAL :
1759 ifnet_get_address_list_family_internal(interface, addresses,
1760 0, 0, M_NOWAIT, 1));
1761 }
1762
1763 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
1764
1765 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
1766
1767 __private_extern__ errno_t
1768 ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
1769 sa_family_t family, int detached, int how, int return_inuse_addrs)
1770 {
1771 SLIST_HEAD(, ifnet_addr_list) ifal_head;
1772 struct ifnet_addr_list *ifal, *ifal_tmp;
1773 struct ifnet *ifp;
1774 int count = 0;
1775 errno_t err = 0;
1776 int usecount = 0;
1777 int index = 0;
1778
1779 SLIST_INIT(&ifal_head);
1780
1781 if (addresses == NULL) {
1782 err = EINVAL;
1783 goto done;
1784 }
1785 *addresses = NULL;
1786
1787 if (detached) {
1788 /*
1789 * Interface has been detached, so skip the lookup
1790 * at ifnet_head and go directly to inner loop.
1791 */
1792 ifp = interface;
1793 if (ifp == NULL) {
1794 err = EINVAL;
1795 goto done;
1796 }
1797 goto one;
1798 }
1799
1800 ifnet_head_lock_shared();
1801 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1802 if (interface != NULL && ifp != interface)
1803 continue;
1804 one:
1805 ifnet_lock_shared(ifp);
1806 if (interface == NULL || interface == ifp) {
1807 struct ifaddr *ifa;
1808 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1809 IFA_LOCK(ifa);
1810 if (family != 0 &&
1811 ifa->ifa_addr->sa_family != family) {
1812 IFA_UNLOCK(ifa);
1813 continue;
1814 }
1815 MALLOC(ifal, struct ifnet_addr_list *,
1816 sizeof (*ifal), M_TEMP, how);
1817 if (ifal == NULL) {
1818 IFA_UNLOCK(ifa);
1819 ifnet_lock_done(ifp);
1820 if (!detached)
1821 ifnet_head_done();
1822 err = ENOMEM;
1823 goto done;
1824 }
1825 ifal->ifal_ifa = ifa;
1826 IFA_ADDREF_LOCKED(ifa);
1827 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
1828 ++count;
1829 IFA_UNLOCK(ifa);
1830 }
1831 }
1832 ifnet_lock_done(ifp);
1833 if (detached)
1834 break;
1835 }
1836 if (!detached)
1837 ifnet_head_done();
1838
1839 if (count == 0) {
1840 err = ENXIO;
1841 goto done;
1842 }
1843 MALLOC(*addresses, ifaddr_t *, sizeof (ifaddr_t) * (count + 1),
1844 M_TEMP, how);
1845 if (*addresses == NULL) {
1846 err = ENOMEM;
1847 goto done;
1848 }
1849 bzero(*addresses, sizeof (ifaddr_t) * (count + 1));
1850
1851 done:
1852 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
1853 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
1854 if (err == 0) {
1855 if (return_inuse_addrs) {
1856 usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
1857 usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
1858 if (usecount) {
1859 (*addresses)[index] = ifal->ifal_ifa;
1860 index++;
1861 } else {
1862 IFA_REMREF(ifal->ifal_ifa);
1863 }
1864 } else {
1865 (*addresses)[--count] = ifal->ifal_ifa;
1866 }
1867 } else {
1868 IFA_REMREF(ifal->ifal_ifa);
1869 }
1870 FREE(ifal, M_TEMP);
1871 }
1872
1873 VERIFY(err == 0 || *addresses == NULL);
1874 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
1875 VERIFY(return_inuse_addrs == 1);
1876 FREE(*addresses, M_TEMP);
1877 err = ENXIO;
1878 }
1879 return (err);
1880 }
1881
1882 void
1883 ifnet_free_address_list(ifaddr_t *addresses)
1884 {
1885 int i;
1886
1887 if (addresses == NULL)
1888 return;
1889
1890 for (i = 0; addresses[i] != NULL; i++)
1891 IFA_REMREF(addresses[i]);
1892
1893 FREE(addresses, M_TEMP);
1894 }
1895
1896 void *
1897 ifnet_lladdr(ifnet_t interface)
1898 {
1899 struct ifaddr *ifa;
1900 void *lladdr;
1901
1902 if (interface == NULL)
1903 return (NULL);
1904
1905 /*
1906 * if_lladdr points to the permanent link address of
1907 * the interface and it never gets deallocated; internal
1908 * code should simply use IF_LLADDR() for performance.
1909 */
1910 ifa = interface->if_lladdr;
1911 IFA_LOCK_SPIN(ifa);
1912 lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
1913 IFA_UNLOCK(ifa);
1914
1915 return (lladdr);
1916 }
1917
1918 errno_t
1919 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
1920 size_t *out_len)
1921 {
1922 if (interface == NULL || addr == NULL || out_len == NULL)
1923 return (EINVAL);
1924
1925 *out_len = interface->if_broadcast.length;
1926
1927 if (buffer_len < interface->if_broadcast.length)
1928 return (EMSGSIZE);
1929
1930 if (interface->if_broadcast.length == 0)
1931 return (ENXIO);
1932
1933 if (interface->if_broadcast.length <=
1934 sizeof (interface->if_broadcast.u.buffer)) {
1935 bcopy(interface->if_broadcast.u.buffer, addr,
1936 interface->if_broadcast.length);
1937 } else {
1938 bcopy(interface->if_broadcast.u.ptr, addr,
1939 interface->if_broadcast.length);
1940 }
1941
1942 return (0);
1943 }
1944
1945 static errno_t
1946 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
1947 size_t lladdr_len, kauth_cred_t *credp)
1948 {
1949 const u_int8_t *bytes;
1950 size_t bytes_len;
1951 struct ifaddr *ifa;
1952 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
1953 errno_t error = 0;
1954
1955 /*
1956 * Make sure to accomodate the largest possible
1957 * size of SA(if_lladdr)->sa_len.
1958 */
1959 _CASSERT(sizeof (sdlbuf) == (SOCK_MAXADDRLEN + 1));
1960
1961 if (interface == NULL || lladdr == NULL)
1962 return (EINVAL);
1963
1964 ifa = interface->if_lladdr;
1965 IFA_LOCK_SPIN(ifa);
1966 bcopy(ifa->ifa_addr, &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
1967 IFA_UNLOCK(ifa);
1968
1969 bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
1970 if (bytes_len != lladdr_len) {
1971 bzero(lladdr, lladdr_len);
1972 error = EMSGSIZE;
1973 } else {
1974 bcopy(bytes, lladdr, bytes_len);
1975 }
1976
1977 return (error);
1978 }
1979
1980 errno_t
1981 ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1982 {
1983 return (ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1984 NULL));
1985 }
1986
1987 errno_t
1988 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1989 {
1990 #if CONFIG_MACF
1991 kauth_cred_t cred;
1992 net_thread_marks_t marks;
1993 #endif
1994 kauth_cred_t *credp;
1995 errno_t error;
1996
1997 credp = NULL;
1998 #if CONFIG_MACF
1999 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2000 cred = kauth_cred_proc_ref(current_proc());
2001 credp = &cred;
2002 #else
2003 credp = NULL;
2004 #endif
2005
2006 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2007 credp);
2008
2009 #if CONFIG_MACF
2010 kauth_cred_unref(credp);
2011 net_thread_marks_pop(marks);
2012 #endif
2013
2014 return (error);
2015 }
2016
2017 static errno_t
2018 ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
2019 size_t lladdr_len, u_char new_type, int apply_type)
2020 {
2021 struct ifaddr *ifa;
2022 errno_t error = 0;
2023
2024 if (interface == NULL)
2025 return (EINVAL);
2026
2027 ifnet_head_lock_shared();
2028 ifnet_lock_exclusive(interface);
2029 if (lladdr_len != 0 &&
2030 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2031 ifnet_lock_done(interface);
2032 ifnet_head_done();
2033 return (EINVAL);
2034 }
2035 ifa = ifnet_addrs[interface->if_index - 1];
2036 if (ifa != NULL) {
2037 struct sockaddr_dl *sdl;
2038
2039 IFA_LOCK_SPIN(ifa);
2040 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2041 if (lladdr_len != 0) {
2042 bcopy(lladdr, LLADDR(sdl), lladdr_len);
2043 } else {
2044 bzero(LLADDR(sdl), interface->if_addrlen);
2045 }
2046 sdl->sdl_alen = lladdr_len;
2047
2048 if (apply_type) {
2049 sdl->sdl_type = new_type;
2050 }
2051 IFA_UNLOCK(ifa);
2052 } else {
2053 error = ENXIO;
2054 }
2055 ifnet_lock_done(interface);
2056 ifnet_head_done();
2057
2058 /* Generate a kernel event */
2059 if (error == 0) {
2060 intf_event_enqueue_nwk_wq_entry(interface, NULL,
2061 INTF_EVENT_CODE_LLADDR_UPDATE);
2062 dlil_post_msg(interface, KEV_DL_SUBCLASS,
2063 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0);
2064 }
2065
2066 return (error);
2067 }
2068
2069 errno_t
2070 ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
2071 {
2072 return (ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0));
2073 }
2074
2075 errno_t
2076 ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
2077 size_t lladdr_len, u_char type)
2078 {
2079 return (ifnet_set_lladdr_internal(interface, lladdr,
2080 lladdr_len, type, 1));
2081 }
2082
2083 errno_t
2084 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2085 ifmultiaddr_t *ifmap)
2086 {
2087 if (interface == NULL || maddr == NULL)
2088 return (EINVAL);
2089
2090 /* Don't let users screw up protocols' entries. */
2091 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
2092 return (EINVAL);
2093
2094 return (if_addmulti_anon(interface, maddr, ifmap));
2095 }
2096
2097 errno_t
2098 ifnet_remove_multicast(ifmultiaddr_t ifma)
2099 {
2100 struct sockaddr *maddr;
2101
2102 if (ifma == NULL)
2103 return (EINVAL);
2104
2105 maddr = ifma->ifma_addr;
2106 /* Don't let users screw up protocols' entries. */
2107 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
2108 return (EINVAL);
2109
2110 return (if_delmulti_anon(ifma->ifma_ifp, maddr));
2111 }
2112
2113 errno_t
2114 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
2115 {
2116 int count = 0;
2117 int cmax = 0;
2118 struct ifmultiaddr *addr;
2119
2120 if (ifp == NULL || addresses == NULL)
2121 return (EINVAL);
2122
2123 ifnet_lock_shared(ifp);
2124 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2125 cmax++;
2126 }
2127
2128 MALLOC(*addresses, ifmultiaddr_t *, sizeof (ifmultiaddr_t) * (cmax + 1),
2129 M_TEMP, M_NOWAIT);
2130 if (*addresses == NULL) {
2131 ifnet_lock_done(ifp);
2132 return (ENOMEM);
2133 }
2134
2135 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2136 if (count + 1 > cmax)
2137 break;
2138 (*addresses)[count] = (ifmultiaddr_t)addr;
2139 ifmaddr_reference((*addresses)[count]);
2140 count++;
2141 }
2142 (*addresses)[cmax] = NULL;
2143 ifnet_lock_done(ifp);
2144
2145 return (0);
2146 }
2147
2148 void
2149 ifnet_free_multicast_list(ifmultiaddr_t *addresses)
2150 {
2151 int i;
2152
2153 if (addresses == NULL)
2154 return;
2155
2156 for (i = 0; addresses[i] != NULL; i++)
2157 ifmaddr_release(addresses[i]);
2158
2159 FREE(addresses, M_TEMP);
2160 }
2161
2162 errno_t
2163 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2164 {
2165 struct ifnet *ifp;
2166 int namelen;
2167
2168 if (ifname == NULL)
2169 return (EINVAL);
2170
2171 namelen = strlen(ifname);
2172
2173 *ifpp = NULL;
2174
2175 ifnet_head_lock_shared();
2176 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2177 struct ifaddr *ifa;
2178 struct sockaddr_dl *ll_addr;
2179
2180 ifa = ifnet_addrs[ifp->if_index - 1];
2181 if (ifa == NULL)
2182 continue;
2183
2184 IFA_LOCK(ifa);
2185 ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2186
2187 if (namelen == ll_addr->sdl_nlen && strncmp(ll_addr->sdl_data,
2188 ifname, ll_addr->sdl_nlen) == 0) {
2189 IFA_UNLOCK(ifa);
2190 *ifpp = ifp;
2191 ifnet_reference(*ifpp);
2192 break;
2193 }
2194 IFA_UNLOCK(ifa);
2195 }
2196 ifnet_head_done();
2197
2198 return ((ifp == NULL) ? ENXIO : 0);
2199 }
2200
2201 errno_t
2202 ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2203 {
2204 return (ifnet_list_get_common(family, FALSE, list, count));
2205 }
2206
2207 __private_extern__ errno_t
2208 ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2209 {
2210 return (ifnet_list_get_common(family, TRUE, list, count));
2211 }
2212
2213 struct ifnet_list {
2214 SLIST_ENTRY(ifnet_list) ifl_le;
2215 struct ifnet *ifl_ifp;
2216 };
2217
2218 static errno_t
2219 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
2220 u_int32_t *count)
2221 {
2222 #pragma unused(get_all)
2223 SLIST_HEAD(, ifnet_list) ifl_head;
2224 struct ifnet_list *ifl, *ifl_tmp;
2225 struct ifnet *ifp;
2226 int cnt = 0;
2227 errno_t err = 0;
2228
2229 SLIST_INIT(&ifl_head);
2230
2231 if (list == NULL || count == NULL) {
2232 err = EINVAL;
2233 goto done;
2234 }
2235 *count = 0;
2236 *list = NULL;
2237
2238 ifnet_head_lock_shared();
2239 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2240 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2241 MALLOC(ifl, struct ifnet_list *, sizeof (*ifl),
2242 M_TEMP, M_NOWAIT);
2243 if (ifl == NULL) {
2244 ifnet_head_done();
2245 err = ENOMEM;
2246 goto done;
2247 }
2248 ifl->ifl_ifp = ifp;
2249 ifnet_reference(ifp);
2250 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2251 ++cnt;
2252 }
2253 }
2254 ifnet_head_done();
2255
2256 if (cnt == 0) {
2257 err = ENXIO;
2258 goto done;
2259 }
2260
2261 MALLOC(*list, ifnet_t *, sizeof (ifnet_t) * (cnt + 1),
2262 M_TEMP, M_NOWAIT);
2263 if (*list == NULL) {
2264 err = ENOMEM;
2265 goto done;
2266 }
2267 bzero(*list, sizeof (ifnet_t) * (cnt + 1));
2268 *count = cnt;
2269
2270 done:
2271 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2272 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2273 if (err == 0)
2274 (*list)[--cnt] = ifl->ifl_ifp;
2275 else
2276 ifnet_release(ifl->ifl_ifp);
2277 FREE(ifl, M_TEMP);
2278 }
2279
2280 return (err);
2281 }
2282
2283 void
2284 ifnet_list_free(ifnet_t *interfaces)
2285 {
2286 int i;
2287
2288 if (interfaces == NULL)
2289 return;
2290
2291 for (i = 0; interfaces[i]; i++)
2292 ifnet_release(interfaces[i]);
2293
2294 FREE(interfaces, M_TEMP);
2295 }
2296
2297 /*************************************************************************/
2298 /* ifaddr_t accessors */
2299 /*************************************************************************/
2300
2301 errno_t
2302 ifaddr_reference(ifaddr_t ifa)
2303 {
2304 if (ifa == NULL)
2305 return (EINVAL);
2306
2307 IFA_ADDREF(ifa);
2308 return (0);
2309 }
2310
2311 errno_t
2312 ifaddr_release(ifaddr_t ifa)
2313 {
2314 if (ifa == NULL)
2315 return (EINVAL);
2316
2317 IFA_REMREF(ifa);
2318 return (0);
2319 }
2320
2321 sa_family_t
2322 ifaddr_address_family(ifaddr_t ifa)
2323 {
2324 sa_family_t family = 0;
2325
2326 if (ifa != NULL) {
2327 IFA_LOCK_SPIN(ifa);
2328 if (ifa->ifa_addr != NULL)
2329 family = ifa->ifa_addr->sa_family;
2330 IFA_UNLOCK(ifa);
2331 }
2332 return (family);
2333 }
2334
2335 errno_t
2336 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2337 {
2338 u_int32_t copylen;
2339
2340 if (ifa == NULL || out_addr == NULL)
2341 return (EINVAL);
2342
2343 IFA_LOCK_SPIN(ifa);
2344 if (ifa->ifa_addr == NULL) {
2345 IFA_UNLOCK(ifa);
2346 return (ENOTSUP);
2347 }
2348
2349 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2350 ifa->ifa_addr->sa_len : addr_size;
2351 bcopy(ifa->ifa_addr, out_addr, copylen);
2352
2353 if (ifa->ifa_addr->sa_len > addr_size) {
2354 IFA_UNLOCK(ifa);
2355 return (EMSGSIZE);
2356 }
2357
2358 IFA_UNLOCK(ifa);
2359 return (0);
2360 }
2361
2362 errno_t
2363 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2364 {
2365 u_int32_t copylen;
2366
2367 if (ifa == NULL || out_addr == NULL)
2368 return (EINVAL);
2369
2370 IFA_LOCK_SPIN(ifa);
2371 if (ifa->ifa_dstaddr == NULL) {
2372 IFA_UNLOCK(ifa);
2373 return (ENOTSUP);
2374 }
2375
2376 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2377 ifa->ifa_dstaddr->sa_len : addr_size;
2378 bcopy(ifa->ifa_dstaddr, out_addr, copylen);
2379
2380 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2381 IFA_UNLOCK(ifa);
2382 return (EMSGSIZE);
2383 }
2384
2385 IFA_UNLOCK(ifa);
2386 return (0);
2387 }
2388
2389 errno_t
2390 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2391 {
2392 u_int32_t copylen;
2393
2394 if (ifa == NULL || out_addr == NULL)
2395 return (EINVAL);
2396
2397 IFA_LOCK_SPIN(ifa);
2398 if (ifa->ifa_netmask == NULL) {
2399 IFA_UNLOCK(ifa);
2400 return (ENOTSUP);
2401 }
2402
2403 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2404 ifa->ifa_netmask->sa_len : addr_size;
2405 bcopy(ifa->ifa_netmask, out_addr, copylen);
2406
2407 if (ifa->ifa_netmask->sa_len > addr_size) {
2408 IFA_UNLOCK(ifa);
2409 return (EMSGSIZE);
2410 }
2411
2412 IFA_UNLOCK(ifa);
2413 return (0);
2414 }
2415
2416 ifnet_t
2417 ifaddr_ifnet(ifaddr_t ifa)
2418 {
2419 struct ifnet *ifp;
2420
2421 if (ifa == NULL)
2422 return (NULL);
2423
2424 /* ifa_ifp is set once at creation time; it is never changed */
2425 ifp = ifa->ifa_ifp;
2426
2427 return (ifp);
2428 }
2429
2430 ifaddr_t
2431 ifaddr_withaddr(const struct sockaddr *address)
2432 {
2433 if (address == NULL)
2434 return (NULL);
2435
2436 return (ifa_ifwithaddr(address));
2437 }
2438
2439 ifaddr_t
2440 ifaddr_withdstaddr(const struct sockaddr *address)
2441 {
2442 if (address == NULL)
2443 return (NULL);
2444
2445 return (ifa_ifwithdstaddr(address));
2446 }
2447
2448 ifaddr_t
2449 ifaddr_withnet(const struct sockaddr *net)
2450 {
2451 if (net == NULL)
2452 return (NULL);
2453
2454 return (ifa_ifwithnet(net));
2455 }
2456
2457 ifaddr_t
2458 ifaddr_withroute(int flags, const struct sockaddr *destination,
2459 const struct sockaddr *gateway)
2460 {
2461 if (destination == NULL || gateway == NULL)
2462 return (NULL);
2463
2464 return (ifa_ifwithroute(flags, destination, gateway));
2465 }
2466
2467 ifaddr_t
2468 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2469 {
2470 if (addr == NULL || interface == NULL)
2471 return (NULL);
2472
2473 return (ifaof_ifpforaddr_select(addr, interface));
2474 }
2475
2476 errno_t
2477 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2478 {
2479 if (ifmaddr == NULL)
2480 return (EINVAL);
2481
2482 IFMA_ADDREF(ifmaddr);
2483 return (0);
2484 }
2485
2486 errno_t
2487 ifmaddr_release(ifmultiaddr_t ifmaddr)
2488 {
2489 if (ifmaddr == NULL)
2490 return (EINVAL);
2491
2492 IFMA_REMREF(ifmaddr);
2493 return (0);
2494 }
2495
2496 errno_t
2497 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2498 u_int32_t addr_size)
2499 {
2500 u_int32_t copylen;
2501
2502 if (ifma == NULL || out_addr == NULL)
2503 return (EINVAL);
2504
2505 IFMA_LOCK(ifma);
2506 if (ifma->ifma_addr == NULL) {
2507 IFMA_UNLOCK(ifma);
2508 return (ENOTSUP);
2509 }
2510
2511 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2512 ifma->ifma_addr->sa_len : addr_size);
2513 bcopy(ifma->ifma_addr, out_addr, copylen);
2514
2515 if (ifma->ifma_addr->sa_len > addr_size) {
2516 IFMA_UNLOCK(ifma);
2517 return (EMSGSIZE);
2518 }
2519 IFMA_UNLOCK(ifma);
2520 return (0);
2521 }
2522
2523 errno_t
2524 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2525 u_int32_t addr_size)
2526 {
2527 struct ifmultiaddr *ifma_ll;
2528
2529 if (ifma == NULL || out_addr == NULL)
2530 return (EINVAL);
2531 if ((ifma_ll = ifma->ifma_ll) == NULL)
2532 return (ENOTSUP);
2533
2534 return (ifmaddr_address(ifma_ll, out_addr, addr_size));
2535 }
2536
2537 ifnet_t
2538 ifmaddr_ifnet(ifmultiaddr_t ifma)
2539 {
2540 return ((ifma == NULL) ? NULL : ifma->ifma_ifp);
2541 }
2542
2543 /**************************************************************************/
2544 /* interface cloner */
2545 /**************************************************************************/
2546
2547 errno_t
2548 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2549 if_clone_t *ifcloner)
2550 {
2551 errno_t error = 0;
2552 struct if_clone *ifc = NULL;
2553 size_t namelen;
2554
2555 if (cloner_params == NULL || ifcloner == NULL ||
2556 cloner_params->ifc_name == NULL ||
2557 cloner_params->ifc_create == NULL ||
2558 cloner_params->ifc_destroy == NULL ||
2559 (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
2560 error = EINVAL;
2561 goto fail;
2562 }
2563
2564 if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2565 printf("%s: already a cloner for %s\n", __func__,
2566 cloner_params->ifc_name);
2567 error = EEXIST;
2568 goto fail;
2569 }
2570
2571 /* Make room for name string */
2572 ifc = _MALLOC(sizeof (struct if_clone) + IFNAMSIZ + 1, M_CLONE,
2573 M_WAITOK | M_ZERO);
2574 if (ifc == NULL) {
2575 printf("%s: _MALLOC failed\n", __func__);
2576 error = ENOBUFS;
2577 goto fail;
2578 }
2579 strlcpy((char *)(ifc + 1), cloner_params->ifc_name, IFNAMSIZ + 1);
2580 ifc->ifc_name = (char *)(ifc + 1);
2581 ifc->ifc_namelen = namelen;
2582 ifc->ifc_maxunit = IF_MAXUNIT;
2583 ifc->ifc_create = cloner_params->ifc_create;
2584 ifc->ifc_destroy = cloner_params->ifc_destroy;
2585
2586 error = if_clone_attach(ifc);
2587 if (error != 0) {
2588 printf("%s: if_clone_attach failed %d\n", __func__, error);
2589 goto fail;
2590 }
2591 *ifcloner = ifc;
2592
2593 return (0);
2594 fail:
2595 if (ifc != NULL)
2596 FREE(ifc, M_CLONE);
2597 return (error);
2598 }
2599
2600 errno_t
2601 ifnet_clone_detach(if_clone_t ifcloner)
2602 {
2603 errno_t error = 0;
2604 struct if_clone *ifc = ifcloner;
2605
2606 if (ifc == NULL || ifc->ifc_name == NULL)
2607 return (EINVAL);
2608
2609 if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2610 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2611 error = EINVAL;
2612 goto fail;
2613 }
2614
2615 if_clone_detach(ifc);
2616
2617 FREE(ifc, M_CLONE);
2618
2619 fail:
2620 return (error);
2621 }
2622
2623 /**************************************************************************/
2624 /* misc */
2625 /**************************************************************************/
2626
2627 errno_t
2628 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
2629 u_int32_t flags, u_int8_t *bitfield)
2630 {
2631 u_int32_t ifindex;
2632 u_int32_t inp_flags = 0;
2633
2634 if (bitfield == NULL)
2635 return (EINVAL);
2636
2637 switch (protocol) {
2638 case PF_UNSPEC:
2639 case PF_INET:
2640 case PF_INET6:
2641 break;
2642 default:
2643 return (EINVAL);
2644 }
2645
2646 /* bit string is long enough to hold 16-bit port values */
2647 bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
2648
2649 if_ports_used_update_wakeuuid(ifp);
2650
2651
2652 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ?
2653 INPCB_GET_PORTS_USED_WILDCARDOK : 0);
2654 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ?
2655 INPCB_GET_PORTS_USED_NOWAKEUPOK : 0);
2656 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_RECVANYIFONLY) ?
2657 INPCB_GET_PORTS_USED_RECVANYIFONLY : 0);
2658 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY) ?
2659 INPCB_GET_PORTS_USED_EXTBGIDLEONLY : 0);
2660 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_ACTIVEONLY) ?
2661 INPCB_GET_PORTS_USED_ACTIVEONLY : 0);
2662
2663 ifindex = (ifp != NULL) ? ifp->if_index : 0;
2664
2665 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY))
2666 udp_get_ports_used(ifindex, protocol, inp_flags,
2667 bitfield);
2668
2669 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY))
2670 tcp_get_ports_used(ifindex, protocol, inp_flags,
2671 bitfield);
2672
2673 return (0);
2674 }
2675
2676 errno_t
2677 ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
2678 {
2679 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
2680 return (ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
2681 bitfield));
2682 }
2683
2684 errno_t
2685 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
2686 int lqm, int npm, u_int8_t srvinfo[48])
2687 {
2688 if (ifp == NULL || sa == NULL || srvinfo == NULL)
2689 return (EINVAL);
2690 if (sa->sa_len > sizeof(struct sockaddr_storage))
2691 return (EINVAL);
2692 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2693 return (EINVAL);
2694
2695 dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
2696 return (0);
2697 }
2698
2699 errno_t
2700 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
2701 {
2702 if (ifp == NULL || sa == NULL)
2703 return (EINVAL);
2704 if (sa->sa_len > sizeof(struct sockaddr_storage))
2705 return (EINVAL);
2706 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2707 return (EINVAL);
2708
2709 dlil_node_absent(ifp, sa);
2710 return (0);
2711 }
2712
2713 errno_t
2714 ifnet_notice_master_elected(ifnet_t ifp)
2715 {
2716 if (ifp == NULL)
2717 return (EINVAL);
2718
2719 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0);
2720 return (0);
2721 }
2722
2723 errno_t
2724 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
2725 {
2726 #pragma unused(val)
2727
2728 m_do_tx_compl_callback(m, ifp);
2729
2730 return (0);
2731 }
2732
2733 errno_t
2734 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
2735 {
2736 m_do_tx_compl_callback(m, ifp);
2737
2738 return (0);
2739 }
2740
2741 errno_t
2742 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
2743 u_int8_t info[IFNET_MODARGLEN])
2744 {
2745 if (ifp == NULL || modid == NULL)
2746 return (EINVAL);
2747
2748 dlil_report_issues(ifp, modid, info);
2749 return (0);
2750 }
2751
2752 errno_t
2753 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
2754 {
2755 ifnet_t odifp = NULL;
2756
2757 if (ifp == NULL)
2758 return (EINVAL);
2759 else if (!ifnet_is_attached(ifp, 1))
2760 return (ENXIO);
2761
2762 ifnet_lock_exclusive(ifp);
2763 odifp = ifp->if_delegated.ifp;
2764 if (odifp != NULL && odifp == delegated_ifp) {
2765 /* delegate info is unchanged; nothing more to do */
2766 ifnet_lock_done(ifp);
2767 goto done;
2768 }
2769 // Test if this delegate interface would cause a loop
2770 ifnet_t delegate_check_ifp = delegated_ifp;
2771 while (delegate_check_ifp != NULL) {
2772 if (delegate_check_ifp == ifp) {
2773 printf("%s: delegating to %s would cause a loop\n",
2774 ifp->if_xname, delegated_ifp->if_xname);
2775 ifnet_lock_done(ifp);
2776 goto done;
2777 }
2778 delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
2779 }
2780 bzero(&ifp->if_delegated, sizeof (ifp->if_delegated));
2781 if (delegated_ifp != NULL && ifp != delegated_ifp) {
2782 ifp->if_delegated.ifp = delegated_ifp;
2783 ifnet_reference(delegated_ifp);
2784 ifp->if_delegated.type = delegated_ifp->if_type;
2785 ifp->if_delegated.family = delegated_ifp->if_family;
2786 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
2787 ifp->if_delegated.expensive =
2788 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
2789
2790 /*
2791 * Propogate flags related to ECN from delegated interface
2792 */
2793 ifp->if_eflags &= ~(IFEF_ECN_ENABLE|IFEF_ECN_DISABLE);
2794 ifp->if_eflags |= (delegated_ifp->if_eflags &
2795 (IFEF_ECN_ENABLE|IFEF_ECN_DISABLE));
2796
2797 printf("%s: is now delegating %s (type 0x%x, family %u, "
2798 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
2799 delegated_ifp->if_type, delegated_ifp->if_family,
2800 delegated_ifp->if_subfamily);
2801 }
2802
2803 ifnet_lock_done(ifp);
2804
2805 if (odifp != NULL) {
2806 if (odifp != delegated_ifp) {
2807 printf("%s: is no longer delegating %s\n",
2808 ifp->if_xname, odifp->if_xname);
2809 }
2810 ifnet_release(odifp);
2811 }
2812
2813 /* Generate a kernel event */
2814 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0);
2815
2816 done:
2817 /* Release the io ref count */
2818 ifnet_decr_iorefcnt(ifp);
2819
2820 return (0);
2821 }
2822
2823 errno_t
2824 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
2825 {
2826 if (ifp == NULL || pdelegated_ifp == NULL)
2827 return (EINVAL);
2828 else if (!ifnet_is_attached(ifp, 1))
2829 return (ENXIO);
2830
2831 ifnet_lock_shared(ifp);
2832 if (ifp->if_delegated.ifp != NULL)
2833 ifnet_reference(ifp->if_delegated.ifp);
2834 *pdelegated_ifp = ifp->if_delegated.ifp;
2835 ifnet_lock_done(ifp);
2836
2837 /* Release the io ref count */
2838 ifnet_decr_iorefcnt(ifp);
2839
2840 return (0);
2841 }
2842
2843 errno_t
2844 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
2845 struct ifnet_keepalive_offload_frame *frames_array,
2846 u_int32_t frames_array_count, size_t frame_data_offset,
2847 u_int32_t *used_frames_count)
2848 {
2849 u_int32_t i;
2850
2851 if (frames_array == NULL || used_frames_count == NULL ||
2852 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE)
2853 return (EINVAL);
2854
2855 /* frame_data_offset should be 32-bit aligned */
2856 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
2857 frame_data_offset)
2858 return (EINVAL);
2859
2860 *used_frames_count = 0;
2861 if (frames_array_count == 0)
2862 return (0);
2863
2864 for (i = 0; i < frames_array_count; i++) {
2865 struct ifnet_keepalive_offload_frame *frame = frames_array + i;
2866
2867 bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
2868 }
2869
2870 /* First collect IPSec related keep-alive frames */
2871 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
2872 frames_array, frames_array_count, frame_data_offset);
2873
2874 /* If there is more room, collect other UDP keep-alive frames */
2875 if (*used_frames_count < frames_array_count)
2876 udp_fill_keepalive_offload_frames(ifp, frames_array,
2877 frames_array_count, frame_data_offset,
2878 used_frames_count);
2879
2880 /* If there is more room, collect other TCP keep-alive frames */
2881 if (*used_frames_count < frames_array_count)
2882 tcp_fill_keepalive_offload_frames(ifp, frames_array,
2883 frames_array_count, frame_data_offset,
2884 used_frames_count);
2885
2886 VERIFY(*used_frames_count <= frames_array_count);
2887
2888 return (0);
2889 }
2890
2891 errno_t
2892 ifnet_link_status_report(ifnet_t ifp, const void *buffer,
2893 size_t buffer_len)
2894 {
2895 struct if_link_status *ifsr;
2896 errno_t err = 0;
2897
2898 if (ifp == NULL || buffer == NULL || buffer_len == 0)
2899 return (EINVAL);
2900
2901 ifnet_lock_shared(ifp);
2902
2903 /*
2904 * Make sure that the interface is attached but there is no need
2905 * to take a reference because this call is coming from the driver.
2906 */
2907 if (!ifnet_is_attached(ifp, 0)) {
2908 ifnet_lock_done(ifp);
2909 return (ENXIO);
2910 }
2911
2912 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
2913
2914 /*
2915 * If this is the first status report then allocate memory
2916 * to store it.
2917 */
2918 if (ifp->if_link_status == NULL) {
2919 MALLOC(ifp->if_link_status, struct if_link_status *,
2920 sizeof(struct if_link_status), M_TEMP, M_ZERO);
2921 if (ifp->if_link_status == NULL) {
2922 err = ENOMEM;
2923 goto done;
2924 }
2925 }
2926
2927 ifsr = __DECONST(struct if_link_status *, buffer);
2928
2929 if (ifp->if_type == IFT_CELLULAR) {
2930 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
2931 /*
2932 * Currently we have a single version -- if it does
2933 * not match, just return.
2934 */
2935 if (ifsr->ifsr_version !=
2936 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
2937 err = ENOTSUP;
2938 goto done;
2939 }
2940
2941 if (ifsr->ifsr_len != sizeof(*if_cell_sr)) {
2942 err = EINVAL;
2943 goto done;
2944 }
2945
2946 if_cell_sr =
2947 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2948 new_cell_sr = &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2949 /* Check if we need to act on any new notifications */
2950 if ((new_cell_sr->valid_bitmask &
2951 IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
2952 new_cell_sr->mss_recommended !=
2953 if_cell_sr->mss_recommended) {
2954 atomic_bitset_32(&tcbinfo.ipi_flags,
2955 INPCBINFO_UPDATE_MSS);
2956 inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
2957 #if NECP
2958 necp_update_all_clients();
2959 #endif
2960 }
2961
2962 /* Finally copy the new information */
2963 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
2964 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
2965 if_cell_sr->valid_bitmask = 0;
2966 bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
2967
2968 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2969 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
2970
2971 /* Check version */
2972 if (ifsr->ifsr_version !=
2973 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
2974 err = ENOTSUP;
2975 goto done;
2976 }
2977
2978 if (ifsr->ifsr_len != sizeof(*if_wifi_sr)) {
2979 err = EINVAL;
2980 goto done;
2981 }
2982
2983 if_wifi_sr =
2984 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2985 new_wifi_sr =
2986 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2987 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
2988 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
2989 if_wifi_sr->valid_bitmask = 0;
2990 bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
2991
2992 /*
2993 * Update the bandwidth values if we got recent values
2994 * reported through the other KPI.
2995 */
2996 if (!(new_wifi_sr->valid_bitmask &
2997 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
2998 ifp->if_output_bw.max_bw > 0) {
2999 if_wifi_sr->valid_bitmask |=
3000 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3001 if_wifi_sr->ul_max_bandwidth =
3002 ifp->if_output_bw.max_bw;
3003 }
3004 if (!(new_wifi_sr->valid_bitmask &
3005 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3006 ifp->if_output_bw.eff_bw > 0) {
3007 if_wifi_sr->valid_bitmask |=
3008 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3009 if_wifi_sr->ul_effective_bandwidth =
3010 ifp->if_output_bw.eff_bw;
3011 }
3012 if (!(new_wifi_sr->valid_bitmask &
3013 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3014 ifp->if_input_bw.max_bw > 0) {
3015 if_wifi_sr->valid_bitmask |=
3016 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3017 if_wifi_sr->dl_max_bandwidth =
3018 ifp->if_input_bw.max_bw;
3019 }
3020 if (!(new_wifi_sr->valid_bitmask &
3021 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3022 ifp->if_input_bw.eff_bw > 0) {
3023 if_wifi_sr->valid_bitmask |=
3024 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3025 if_wifi_sr->dl_effective_bandwidth =
3026 ifp->if_input_bw.eff_bw;
3027 }
3028 }
3029
3030 done:
3031 lck_rw_done(&ifp->if_link_status_lock);
3032 ifnet_lock_done(ifp);
3033 return (err);
3034 }
3035
3036 /*************************************************************************/
3037 /* Fastlane QoS Ca */
3038 /*************************************************************************/
3039
3040 errno_t
3041 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3042 {
3043 if (interface == NULL)
3044 return (EINVAL);
3045
3046 if_set_qosmarking_mode(interface,
3047 capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3048
3049 return (0);
3050 }
3051
3052 errno_t
3053 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3054 {
3055 if (interface == NULL || capable == NULL)
3056 return (EINVAL);
3057 if (interface->if_eflags & IFEF_QOSMARKING_CAPABLE)
3058 *capable = true;
3059 else
3060 *capable = false;
3061 return (0);
3062 }
3063
3064 errno_t
3065 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3066 {
3067 int64_t bytes;
3068
3069 if (interface == NULL || unsent_bytes == NULL)
3070 return (EINVAL);
3071
3072 bytes = *unsent_bytes = 0;
3073
3074 if (!IF_FULLY_ATTACHED(interface))
3075 return (ENXIO);
3076
3077 bytes = interface->if_sndbyte_unsent;
3078
3079 if (interface->if_eflags & IFEF_TXSTART)
3080 bytes += IFCQ_BYTES(&interface->if_snd);
3081 *unsent_bytes = bytes;
3082
3083 return (0);
3084 }
3085
3086 errno_t
3087 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3088 {
3089 if (ifp == NULL || buf_status == NULL)
3090 return (EINVAL);
3091
3092 bzero(buf_status, sizeof (*buf_status));
3093
3094 if (!IF_FULLY_ATTACHED(ifp))
3095 return (ENXIO);
3096
3097 if (ifp->if_eflags & IFEF_TXSTART)
3098 buf_status->buf_interface = IFCQ_BYTES(&ifp->if_snd);
3099
3100 buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3101 (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3102
3103 return (0);
3104 }
3105
3106 void
3107 ifnet_normalise_unsent_data(void)
3108 {
3109 struct ifnet *ifp;
3110
3111 ifnet_head_lock_shared();
3112 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3113 ifnet_lock_exclusive(ifp);
3114 if (!IF_FULLY_ATTACHED(ifp)) {
3115 ifnet_lock_done(ifp);
3116 continue;
3117 }
3118 if (!(ifp->if_eflags & IFEF_TXSTART)) {
3119 ifnet_lock_done(ifp);
3120 continue;
3121 }
3122
3123 if (ifp->if_sndbyte_total > 0 ||
3124 IFCQ_BYTES(&ifp->if_snd) > 0)
3125 ifp->if_unsent_data_cnt++;
3126
3127 ifnet_lock_done(ifp);
3128 }
3129 ifnet_head_done();
3130 }