]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/kpi_interface.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / bsd / net / kpi_interface.c
1 /*
2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "kpi_interface.h"
30
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
59 #include <sys/proc.h>
60 #include <sys/sysctl.h>
61 #include <sys/mbuf.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
68 #ifdef INET
69 #include <netinet/igmp_var.h>
70 #endif
71 #include <netinet6/mld6_var.h>
72 #include <netkey/key.h>
73 #include <stdbool.h>
74
75 #include "net/net_str_id.h"
76
77 #if CONFIG_MACF
78 #include <sys/kauth.h>
79 #include <security/mac_framework.h>
80 #endif
81
82
83 #undef ifnet_allocate
84 errno_t ifnet_allocate(const struct ifnet_init_params *init,
85 ifnet_t *ifp);
86
87 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
88 ifnet_t *ifp, bool is_internal);
89
90
91 #define TOUCHLASTCHANGE(__if_lastchange) { \
92 (__if_lastchange)->tv_sec = (time_t)net_uptime(); \
93 (__if_lastchange)->tv_usec = 0; \
94 }
95
96 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t,
97 struct ifnet_llreach_info *);
98 static void ifnet_kpi_free(ifnet_t);
99 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
100 u_int32_t *);
101 static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
102 u_char, int);
103 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
104
105 /*
106 * Temporary work around until we have real reference counting
107 *
108 * We keep the bits about calling dlil_if_release (which should be
109 * called recycle) transparent by calling it from our if_free function
110 * pointer. We have to keep the client's original detach function
111 * somewhere so we can call it.
112 */
113 static void
114 ifnet_kpi_free(ifnet_t ifp)
115 {
116 ifnet_detached_func detach_func = ifp->if_kpi_storage;
117
118 if (detach_func != NULL) {
119 detach_func(ifp);
120 }
121
122 ifnet_dispose(ifp);
123 }
124
125 errno_t
126 ifnet_allocate_common(const struct ifnet_init_params *init,
127 ifnet_t *ifp, bool is_internal)
128 {
129 struct ifnet_init_eparams einit;
130
131 bzero(&einit, sizeof(einit));
132
133 einit.ver = IFNET_INIT_CURRENT_VERSION;
134 einit.len = sizeof(einit);
135 einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
136 if (!is_internal) {
137 einit.flags |= IFNET_INIT_ALLOC_KPI;
138 }
139 einit.uniqueid = init->uniqueid;
140 einit.uniqueid_len = init->uniqueid_len;
141 einit.name = init->name;
142 einit.unit = init->unit;
143 einit.family = init->family;
144 einit.type = init->type;
145 einit.output = init->output;
146 einit.demux = init->demux;
147 einit.add_proto = init->add_proto;
148 einit.del_proto = init->del_proto;
149 einit.check_multi = init->check_multi;
150 einit.framer = init->framer;
151 einit.softc = init->softc;
152 einit.ioctl = init->ioctl;
153 einit.set_bpf_tap = init->set_bpf_tap;
154 einit.detach = init->detach;
155 einit.event = init->event;
156 einit.broadcast_addr = init->broadcast_addr;
157 einit.broadcast_len = init->broadcast_len;
158
159 return ifnet_allocate_extended(&einit, ifp);
160 }
161
162 errno_t
163 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
164 {
165 return ifnet_allocate_common(init, ifp, true);
166 }
167
168 errno_t
169 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
170 {
171 return ifnet_allocate_common(init, ifp, false);
172 }
173
174 errno_t
175 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
176 ifnet_t *interface)
177 {
178 struct ifnet_init_eparams einit;
179 struct ifnet *ifp = NULL;
180 char if_xname[IFXNAMSIZ] = {0};
181 int error;
182
183 einit = *einit0;
184
185 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
186 einit.len < sizeof(einit)) {
187 return EINVAL;
188 }
189
190 if (einit.family == 0 || einit.name == NULL ||
191 strlen(einit.name) >= IFNAMSIZ ||
192 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) {
193 return EINVAL;
194 }
195
196
197 if (einit.flags & IFNET_INIT_LEGACY) {
198 if (einit.output == NULL ||
199 (einit.flags & IFNET_INIT_INPUT_POLL)) {
200 return EINVAL;
201 }
202 einit.pre_enqueue = NULL;
203 einit.start = NULL;
204 einit.output_ctl = NULL;
205 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
206 einit.input_poll = NULL;
207 einit.input_ctl = NULL;
208 } else {
209 if (einit.start == NULL) {
210 return EINVAL;
211 }
212
213 einit.output = NULL;
214 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) {
215 return EINVAL;
216 }
217
218 if (einit.flags & IFNET_INIT_INPUT_POLL) {
219 if (einit.input_poll == NULL || einit.input_ctl == NULL) {
220 return EINVAL;
221 }
222 } else {
223 einit.input_poll = NULL;
224 einit.input_ctl = NULL;
225 }
226 }
227
228 if (einit.type > UCHAR_MAX) {
229 return EINVAL;
230 }
231
232 if (einit.unit > SHRT_MAX) {
233 return EINVAL;
234 }
235
236 /* Initialize external name (name + unit) */
237 (void) snprintf(if_xname, sizeof(if_xname), "%s%d",
238 einit.name, einit.unit);
239
240 if (einit.uniqueid == NULL) {
241 einit.uniqueid = if_xname;
242 einit.uniqueid_len = (uint32_t)strlen(if_xname);
243 }
244
245 error = dlil_if_acquire(einit.family, einit.uniqueid,
246 einit.uniqueid_len, if_xname, &ifp);
247
248 if (error == 0) {
249 uint64_t br;
250
251 /*
252 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
253 * to point to storage of at least IFNAMSIZ bytes. It is safe
254 * to write to this.
255 */
256 strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ);
257 ifp->if_type = (u_char)einit.type;
258 ifp->if_family = einit.family;
259 ifp->if_subfamily = einit.subfamily;
260 ifp->if_unit = (short)einit.unit;
261 ifp->if_output = einit.output;
262 ifp->if_pre_enqueue = einit.pre_enqueue;
263 ifp->if_start = einit.start;
264 ifp->if_output_ctl = einit.output_ctl;
265 ifp->if_output_sched_model = einit.output_sched_model;
266 ifp->if_output_bw.eff_bw = einit.output_bw;
267 ifp->if_output_bw.max_bw = einit.output_bw_max;
268 ifp->if_output_lt.eff_lt = einit.output_lt;
269 ifp->if_output_lt.max_lt = einit.output_lt_max;
270 ifp->if_input_poll = einit.input_poll;
271 ifp->if_input_ctl = einit.input_ctl;
272 ifp->if_input_bw.eff_bw = einit.input_bw;
273 ifp->if_input_bw.max_bw = einit.input_bw_max;
274 ifp->if_input_lt.eff_lt = einit.input_lt;
275 ifp->if_input_lt.max_lt = einit.input_lt_max;
276 ifp->if_demux = einit.demux;
277 ifp->if_add_proto = einit.add_proto;
278 ifp->if_del_proto = einit.del_proto;
279 ifp->if_check_multi = einit.check_multi;
280 ifp->if_framer_legacy = einit.framer;
281 ifp->if_framer = einit.framer_extended;
282 ifp->if_softc = einit.softc;
283 ifp->if_ioctl = einit.ioctl;
284 ifp->if_set_bpf_tap = einit.set_bpf_tap;
285 ifp->if_free = (einit.free != NULL) ? einit.free : ifnet_kpi_free;
286 ifp->if_event = einit.event;
287 ifp->if_kpi_storage = einit.detach;
288
289 /* Initialize Network ID */
290 ifp->network_id_len = 0;
291 bzero(&ifp->network_id, sizeof(ifp->network_id));
292
293 /* Initialize external name (name + unit) */
294 snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
295 "%s", if_xname);
296
297 /*
298 * On embedded, framer() is already in the extended form;
299 * we simply use it as is, unless the caller specifies
300 * framer_extended() which will then override it.
301 *
302 * On non-embedded, framer() has long been exposed as part
303 * of the public KPI, and therefore its signature must
304 * remain the same (without the pre- and postpend length
305 * parameters.) We special case ether_frameout, such that
306 * it gets mapped to its extended variant. All other cases
307 * utilize the stub routine which will simply return zeroes
308 * for those new parameters.
309 *
310 * Internally, DLIL will only use the extended callback
311 * variant which is represented by if_framer.
312 */
313 #if !XNU_TARGET_OS_OSX
314 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
315 ifp->if_framer = ifp->if_framer_legacy;
316 }
317 #else /* XNU_TARGET_OS_OSX */
318 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
319 if (ifp->if_framer_legacy == ether_frameout) {
320 ifp->if_framer = ether_frameout_extended;
321 } else {
322 ifp->if_framer = ifnet_framer_stub;
323 }
324 }
325 #endif /* XNU_TARGET_OS_OSX */
326
327 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
328 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
329 } else if (ifp->if_output_bw.eff_bw == 0) {
330 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
331 }
332
333 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
334 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
335 } else if (ifp->if_input_bw.eff_bw == 0) {
336 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
337 }
338
339 if (ifp->if_output_bw.max_bw == 0) {
340 ifp->if_output_bw = ifp->if_input_bw;
341 } else if (ifp->if_input_bw.max_bw == 0) {
342 ifp->if_input_bw = ifp->if_output_bw;
343 }
344
345 /* Pin if_baudrate to 32 bits */
346 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
347 if (br != 0) {
348 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
349 }
350
351 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
352 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
353 } else if (ifp->if_output_lt.eff_lt == 0) {
354 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
355 }
356
357 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
358 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
359 } else if (ifp->if_input_lt.eff_lt == 0) {
360 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
361 }
362
363 if (ifp->if_output_lt.max_lt == 0) {
364 ifp->if_output_lt = ifp->if_input_lt;
365 } else if (ifp->if_input_lt.max_lt == 0) {
366 ifp->if_input_lt = ifp->if_output_lt;
367 }
368
369 if (ifp->if_ioctl == NULL) {
370 ifp->if_ioctl = ifp_if_ioctl;
371 }
372
373 if_clear_eflags(ifp, -1);
374 if (ifp->if_start != NULL) {
375 if_set_eflags(ifp, IFEF_TXSTART);
376 if (ifp->if_pre_enqueue == NULL) {
377 ifp->if_pre_enqueue = ifnet_enqueue;
378 }
379 ifp->if_output = ifp->if_pre_enqueue;
380 }
381
382 if (ifp->if_input_poll != NULL) {
383 if_set_eflags(ifp, IFEF_RXPOLL);
384 }
385
386 ifp->if_output_dlil = dlil_output_handler;
387 ifp->if_input_dlil = dlil_input_handler;
388
389 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
390 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
391 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
392 ifp->if_input_ctl == NULL));
393 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
394 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
395
396 if (einit.broadcast_len && einit.broadcast_addr) {
397 if (einit.broadcast_len >
398 sizeof(ifp->if_broadcast.u.buffer)) {
399 MALLOC(ifp->if_broadcast.u.ptr, u_char *,
400 einit.broadcast_len, M_IFADDR, M_NOWAIT);
401 if (ifp->if_broadcast.u.ptr == NULL) {
402 error = ENOMEM;
403 } else {
404 bcopy(einit.broadcast_addr,
405 ifp->if_broadcast.u.ptr,
406 einit.broadcast_len);
407 }
408 } else {
409 bcopy(einit.broadcast_addr,
410 ifp->if_broadcast.u.buffer,
411 einit.broadcast_len);
412 }
413 ifp->if_broadcast.length = einit.broadcast_len;
414 } else {
415 bzero(&ifp->if_broadcast, sizeof(ifp->if_broadcast));
416 }
417
418 if_clear_xflags(ifp, -1);
419 /* legacy interface */
420 if_set_xflags(ifp, IFXF_LEGACY);
421
422 /*
423 * output target queue delay is specified in millisecond
424 * convert it to nanoseconds
425 */
426 IFCQ_TARGET_QDELAY(&ifp->if_snd) =
427 einit.output_target_qdelay * 1000 * 1000;
428 IFCQ_MAXLEN(&ifp->if_snd) = einit.sndq_maxlen;
429
430 ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
431 einit.start_delay_timeout);
432
433 IFCQ_PKT_DROP_LIMIT(&ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
434
435 /*
436 * Set embryonic flag; this will be cleared
437 * later when it is fully attached.
438 */
439 ifp->if_refflags = IFRF_EMBRYONIC;
440
441 /*
442 * Count the newly allocated ifnet
443 */
444 OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
445 INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
446 if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) {
447 if_set_xflags(ifp, IFXF_ALLOC_KPI);
448 } else {
449 OSIncrementAtomic64(
450 &net_api_stats.nas_ifnet_alloc_os_count);
451 INC_ATOMIC_INT64_LIM(
452 net_api_stats.nas_ifnet_alloc_os_total);
453 }
454
455 if (error == 0) {
456 *interface = ifp;
457 // temporary - this should be done in dlil_if_acquire
458 ifnet_reference(ifp);
459 } else {
460 dlil_if_release(ifp);
461 *interface = NULL;
462 }
463 }
464 return error;
465 }
466
467 errno_t
468 ifnet_reference(ifnet_t ifp)
469 {
470 return dlil_if_ref(ifp);
471 }
472
473 void
474 ifnet_dispose(ifnet_t ifp)
475 {
476 if (ifp->if_broadcast.length > sizeof(ifp->if_broadcast.u.buffer)) {
477 FREE(ifp->if_broadcast.u.ptr, M_IFADDR);
478 ifp->if_broadcast.u.ptr = NULL;
479 }
480
481 dlil_if_release(ifp);
482 }
483
484 errno_t
485 ifnet_release(ifnet_t ifp)
486 {
487 return dlil_if_free(ifp);
488 }
489
490 errno_t
491 ifnet_interface_family_find(const char *module_string,
492 ifnet_family_t *family_id)
493 {
494 if (module_string == NULL || family_id == NULL) {
495 return EINVAL;
496 }
497
498 return net_str_id_find_internal(module_string, family_id,
499 NSI_IF_FAM_ID, 1);
500 }
501
502 void *
503 ifnet_softc(ifnet_t interface)
504 {
505 return (interface == NULL) ? NULL : interface->if_softc;
506 }
507
508 const char *
509 ifnet_name(ifnet_t interface)
510 {
511 return (interface == NULL) ? NULL : interface->if_name;
512 }
513
514 ifnet_family_t
515 ifnet_family(ifnet_t interface)
516 {
517 return (interface == NULL) ? 0 : interface->if_family;
518 }
519
520 ifnet_subfamily_t
521 ifnet_subfamily(ifnet_t interface)
522 {
523 return (interface == NULL) ? 0 : interface->if_subfamily;
524 }
525
526 u_int32_t
527 ifnet_unit(ifnet_t interface)
528 {
529 return (interface == NULL) ? (u_int32_t)0xffffffff :
530 (u_int32_t)interface->if_unit;
531 }
532
533 u_int32_t
534 ifnet_index(ifnet_t interface)
535 {
536 return (interface == NULL) ? (u_int32_t)0xffffffff :
537 interface->if_index;
538 }
539
540 errno_t
541 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
542 {
543 uint16_t old_flags;
544
545 if (interface == NULL) {
546 return EINVAL;
547 }
548
549 ifnet_lock_exclusive(interface);
550
551 /* If we are modifying the up/down state, call if_updown */
552 if ((mask & IFF_UP) != 0) {
553 if_updown(interface, (new_flags & IFF_UP) == IFF_UP);
554 }
555
556 old_flags = interface->if_flags;
557 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
558 /* If we are modifying the multicast flag, set/unset the silent flag */
559 if ((old_flags & IFF_MULTICAST) !=
560 (interface->if_flags & IFF_MULTICAST)) {
561 #if INET
562 if (IGMP_IFINFO(interface) != NULL) {
563 igmp_initsilent(interface, IGMP_IFINFO(interface));
564 }
565 #endif /* INET */
566 if (MLD_IFINFO(interface) != NULL) {
567 mld6_initsilent(interface, MLD_IFINFO(interface));
568 }
569 }
570
571 ifnet_lock_done(interface);
572
573 return 0;
574 }
575
576 u_int16_t
577 ifnet_flags(ifnet_t interface)
578 {
579 return (interface == NULL) ? 0 : interface->if_flags;
580 }
581
582 /*
583 * This routine ensures the following:
584 *
585 * If IFEF_AWDL is set by the caller, also set the rest of flags as
586 * defined in IFEF_AWDL_MASK.
587 *
588 * If IFEF_AWDL has been set on the interface and the caller attempts
589 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
590 * return failure.
591 *
592 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
593 * on the interface.
594 *
595 * All other flags not associated with AWDL are not affected.
596 *
597 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
598 */
599 static errno_t
600 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
601 {
602 u_int32_t eflags;
603
604 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
605
606 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
607
608 if (ifp->if_eflags & IFEF_AWDL) {
609 if (eflags & IFEF_AWDL) {
610 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) {
611 return EINVAL;
612 }
613 } else {
614 *new_eflags &= ~IFEF_AWDL_MASK;
615 *mask |= IFEF_AWDL_MASK;
616 }
617 } else if (eflags & IFEF_AWDL) {
618 *new_eflags |= IFEF_AWDL_MASK;
619 *mask |= IFEF_AWDL_MASK;
620 } else if (eflags & IFEF_AWDL_RESTRICTED &&
621 !(ifp->if_eflags & IFEF_AWDL)) {
622 return EINVAL;
623 }
624
625 return 0;
626 }
627
628 errno_t
629 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
630 {
631 uint32_t oeflags;
632 struct kev_msg ev_msg;
633 struct net_event_data ev_data;
634
635 if (interface == NULL) {
636 return EINVAL;
637 }
638
639 bzero(&ev_msg, sizeof(ev_msg));
640 ifnet_lock_exclusive(interface);
641 /*
642 * Sanity checks for IFEF_AWDL and its related flags.
643 */
644 if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
645 ifnet_lock_done(interface);
646 return EINVAL;
647 }
648 /*
649 * Currently Interface advisory reporting is supported only for
650 * skywalk interface.
651 */
652 if ((((new_flags & mask) & IFEF_ADV_REPORT) != 0) &&
653 ((interface->if_eflags & IFEF_SKYWALK_NATIVE) == 0)) {
654 ifnet_lock_done(interface);
655 return EINVAL;
656 }
657 oeflags = interface->if_eflags;
658 if_clear_eflags(interface, mask);
659 if (new_flags != 0) {
660 if_set_eflags(interface, (new_flags & mask));
661 }
662 ifnet_lock_done(interface);
663 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
664 !(oeflags & IFEF_AWDL_RESTRICTED)) {
665 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
666 /*
667 * The interface is now restricted to applications that have
668 * the entitlement.
669 * The check for the entitlement will be done in the data
670 * path, so we don't have to do anything here.
671 */
672 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
673 !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) {
674 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
675 }
676 /*
677 * Notify configd so that it has a chance to perform better
678 * reachability detection.
679 */
680 if (ev_msg.event_code) {
681 bzero(&ev_data, sizeof(ev_data));
682 ev_msg.vendor_code = KEV_VENDOR_APPLE;
683 ev_msg.kev_class = KEV_NETWORK_CLASS;
684 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
685 strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
686 ev_data.if_family = interface->if_family;
687 ev_data.if_unit = interface->if_unit;
688 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
689 ev_msg.dv[0].data_ptr = &ev_data;
690 ev_msg.dv[1].data_length = 0;
691 dlil_post_complete_msg(interface, &ev_msg);
692 }
693
694 return 0;
695 }
696
697 u_int32_t
698 ifnet_eflags(ifnet_t interface)
699 {
700 return (interface == NULL) ? 0 : interface->if_eflags;
701 }
702
703 errno_t
704 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
705 {
706 int before, after;
707
708 if (ifp == NULL) {
709 return EINVAL;
710 }
711
712 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
713 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
714
715 /*
716 * If this is called prior to ifnet attach, the actual work will
717 * be done at attach time. Otherwise, if it is called after
718 * ifnet detach, then it is a no-op.
719 */
720 if (!ifnet_is_attached(ifp, 0)) {
721 ifp->if_idle_new_flags = new_flags;
722 ifp->if_idle_new_flags_mask = mask;
723 return 0;
724 } else {
725 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
726 }
727
728 before = ifp->if_idle_flags;
729 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
730 after = ifp->if_idle_flags;
731
732 if ((after - before) < 0 && ifp->if_idle_flags == 0 &&
733 ifp->if_want_aggressive_drain != 0) {
734 ifp->if_want_aggressive_drain = 0;
735 } else if ((after - before) > 0 && ifp->if_want_aggressive_drain == 0) {
736 ifp->if_want_aggressive_drain++;
737 }
738
739 return 0;
740 }
741
742 errno_t
743 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
744 {
745 errno_t err;
746
747 lck_mtx_lock(rnh_lock);
748 ifnet_lock_exclusive(ifp);
749 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
750 ifnet_lock_done(ifp);
751 lck_mtx_unlock(rnh_lock);
752
753 return err;
754 }
755
756 u_int32_t
757 ifnet_idle_flags(ifnet_t ifp)
758 {
759 return (ifp == NULL) ? 0 : ifp->if_idle_flags;
760 }
761
762 errno_t
763 ifnet_set_link_quality(ifnet_t ifp, int quality)
764 {
765 errno_t err = 0;
766
767 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
768 err = EINVAL;
769 goto done;
770 }
771
772 if (!ifnet_is_attached(ifp, 0)) {
773 err = ENXIO;
774 goto done;
775 }
776
777 if_lqm_update(ifp, quality, 0);
778
779 done:
780 return err;
781 }
782
783 int
784 ifnet_link_quality(ifnet_t ifp)
785 {
786 int lqm;
787
788 if (ifp == NULL) {
789 return IFNET_LQM_THRESH_OFF;
790 }
791
792 ifnet_lock_shared(ifp);
793 lqm = ifp->if_interface_state.lqm_state;
794 ifnet_lock_done(ifp);
795
796 return lqm;
797 }
798
799 errno_t
800 ifnet_set_interface_state(ifnet_t ifp,
801 struct if_interface_state *if_interface_state)
802 {
803 errno_t err = 0;
804
805 if (ifp == NULL || if_interface_state == NULL) {
806 err = EINVAL;
807 goto done;
808 }
809
810 if (!ifnet_is_attached(ifp, 0)) {
811 err = ENXIO;
812 goto done;
813 }
814
815 if_state_update(ifp, if_interface_state);
816
817 done:
818 return err;
819 }
820
821 errno_t
822 ifnet_get_interface_state(ifnet_t ifp,
823 struct if_interface_state *if_interface_state)
824 {
825 errno_t err = 0;
826
827 if (ifp == NULL || if_interface_state == NULL) {
828 err = EINVAL;
829 goto done;
830 }
831
832 if (!ifnet_is_attached(ifp, 0)) {
833 err = ENXIO;
834 goto done;
835 }
836
837 if_get_state(ifp, if_interface_state);
838
839 done:
840 return err;
841 }
842
843
844 static errno_t
845 ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af,
846 struct ifnet_llreach_info *iflri)
847 {
848 if (ifp == NULL || iflri == NULL) {
849 return EINVAL;
850 }
851
852 VERIFY(af == AF_INET || af == AF_INET6);
853
854 return ifnet_llreach_get_defrouter(ifp, af, iflri);
855 }
856
857 errno_t
858 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
859 {
860 return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri);
861 }
862
863 errno_t
864 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
865 {
866 return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri);
867 }
868
869 errno_t
870 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
871 u_int32_t mask)
872 {
873 errno_t error = 0;
874 int tmp;
875
876 if (ifp == NULL) {
877 return EINVAL;
878 }
879
880 ifnet_lock_exclusive(ifp);
881 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
882 if ((tmp & ~IFCAP_VALID)) {
883 error = EINVAL;
884 } else {
885 ifp->if_capabilities = tmp;
886 }
887 ifnet_lock_done(ifp);
888
889 return error;
890 }
891
892 u_int32_t
893 ifnet_capabilities_supported(ifnet_t ifp)
894 {
895 return (ifp == NULL) ? 0 : ifp->if_capabilities;
896 }
897
898
899 errno_t
900 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
901 u_int32_t mask)
902 {
903 errno_t error = 0;
904 int tmp;
905 struct kev_msg ev_msg;
906 struct net_event_data ev_data;
907
908 if (ifp == NULL) {
909 return EINVAL;
910 }
911
912 ifnet_lock_exclusive(ifp);
913 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
914 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) {
915 error = EINVAL;
916 } else {
917 ifp->if_capenable = tmp;
918 }
919 ifnet_lock_done(ifp);
920
921 /* Notify application of the change */
922 bzero(&ev_data, sizeof(struct net_event_data));
923 bzero(&ev_msg, sizeof(struct kev_msg));
924 ev_msg.vendor_code = KEV_VENDOR_APPLE;
925 ev_msg.kev_class = KEV_NETWORK_CLASS;
926 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
927
928 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
929 strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
930 ev_data.if_family = ifp->if_family;
931 ev_data.if_unit = (u_int32_t)ifp->if_unit;
932 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
933 ev_msg.dv[0].data_ptr = &ev_data;
934 ev_msg.dv[1].data_length = 0;
935 dlil_post_complete_msg(ifp, &ev_msg);
936
937 return error;
938 }
939
940 u_int32_t
941 ifnet_capabilities_enabled(ifnet_t ifp)
942 {
943 return (ifp == NULL) ? 0 : ifp->if_capenable;
944 }
945
946 static const ifnet_offload_t offload_mask =
947 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
948 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
949 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
950 IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
951 IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
952 IFNET_SW_TIMESTAMP);
953
954 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
955
956 errno_t
957 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
958 {
959 u_int32_t ifcaps = 0;
960
961 if (interface == NULL) {
962 return EINVAL;
963 }
964
965 ifnet_lock_exclusive(interface);
966 interface->if_hwassist = (offload & offload_mask);
967
968 /*
969 * Hardware capable of partial checksum offload is
970 * flexible enough to handle any transports utilizing
971 * Internet Checksumming. Include those transports
972 * here, and leave the final decision to IP.
973 */
974 if (interface->if_hwassist & IFNET_CSUM_PARTIAL) {
975 interface->if_hwassist |= (IFNET_CSUM_TCP | IFNET_CSUM_UDP |
976 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6);
977 }
978 if (dlil_verbose) {
979 log(LOG_DEBUG, "%s: set offload flags=%b\n",
980 if_name(interface),
981 interface->if_hwassist, IFNET_OFFLOADF_BITS);
982 }
983 ifnet_lock_done(interface);
984
985 if ((offload & any_offload_csum)) {
986 ifcaps |= IFCAP_HWCSUM;
987 }
988 if ((offload & IFNET_TSO_IPV4)) {
989 ifcaps |= IFCAP_TSO4;
990 }
991 if ((offload & IFNET_TSO_IPV6)) {
992 ifcaps |= IFCAP_TSO6;
993 }
994 if ((offload & IFNET_VLAN_MTU)) {
995 ifcaps |= IFCAP_VLAN_MTU;
996 }
997 if ((offload & IFNET_VLAN_TAGGING)) {
998 ifcaps |= IFCAP_VLAN_HWTAGGING;
999 }
1000 if ((offload & IFNET_TX_STATUS)) {
1001 ifcaps |= IFCAP_TXSTATUS;
1002 }
1003 if ((offload & IFNET_HW_TIMESTAMP)) {
1004 ifcaps |= IFCAP_HW_TIMESTAMP;
1005 }
1006 if ((offload & IFNET_SW_TIMESTAMP)) {
1007 ifcaps |= IFCAP_SW_TIMESTAMP;
1008 }
1009 if ((offload & IFNET_CSUM_PARTIAL)) {
1010 ifcaps |= IFCAP_CSUM_PARTIAL;
1011 }
1012 if ((offload & IFNET_CSUM_ZERO_INVERT)) {
1013 ifcaps |= IFCAP_CSUM_ZERO_INVERT;
1014 }
1015 if (ifcaps != 0) {
1016 (void) ifnet_set_capabilities_supported(interface, ifcaps,
1017 IFCAP_VALID);
1018 (void) ifnet_set_capabilities_enabled(interface, ifcaps,
1019 IFCAP_VALID);
1020 }
1021
1022 return 0;
1023 }
1024
1025 ifnet_offload_t
1026 ifnet_offload(ifnet_t interface)
1027 {
1028 return (interface == NULL) ?
1029 0 : (interface->if_hwassist & offload_mask);
1030 }
1031
1032 errno_t
1033 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
1034 {
1035 errno_t error = 0;
1036
1037 if (interface == NULL || mtuLen < interface->if_mtu) {
1038 return EINVAL;
1039 }
1040
1041 switch (family) {
1042 case AF_INET:
1043 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1044 interface->if_tso_v4_mtu = mtuLen;
1045 } else {
1046 error = EINVAL;
1047 }
1048 break;
1049
1050 case AF_INET6:
1051 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1052 interface->if_tso_v6_mtu = mtuLen;
1053 } else {
1054 error = EINVAL;
1055 }
1056 break;
1057
1058 default:
1059 error = EPROTONOSUPPORT;
1060 break;
1061 }
1062
1063 return error;
1064 }
1065
1066 errno_t
1067 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
1068 {
1069 errno_t error = 0;
1070
1071 if (interface == NULL || mtuLen == NULL) {
1072 return EINVAL;
1073 }
1074
1075 switch (family) {
1076 case AF_INET:
1077 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1078 *mtuLen = interface->if_tso_v4_mtu;
1079 } else {
1080 error = EINVAL;
1081 }
1082 break;
1083
1084 case AF_INET6:
1085 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1086 *mtuLen = interface->if_tso_v6_mtu;
1087 } else {
1088 error = EINVAL;
1089 }
1090 break;
1091
1092 default:
1093 error = EPROTONOSUPPORT;
1094 break;
1095 }
1096
1097 return error;
1098 }
1099
1100 errno_t
1101 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1102 {
1103 struct kev_msg ev_msg;
1104 struct net_event_data ev_data;
1105
1106 bzero(&ev_data, sizeof(struct net_event_data));
1107 bzero(&ev_msg, sizeof(struct kev_msg));
1108
1109 if (interface == NULL) {
1110 return EINVAL;
1111 }
1112
1113 /* Do not accept wacky values */
1114 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) {
1115 return EINVAL;
1116 }
1117
1118 if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1119 if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1120 if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1121 } else {
1122 if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1123 }
1124 }
1125
1126 (void) ifnet_touch_lastchange(interface);
1127
1128 /* Notify application of the change */
1129 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1130 ev_msg.kev_class = KEV_NETWORK_CLASS;
1131 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1132
1133 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
1134 strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1135 ev_data.if_family = interface->if_family;
1136 ev_data.if_unit = (u_int32_t)interface->if_unit;
1137 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1138 ev_msg.dv[0].data_ptr = &ev_data;
1139 ev_msg.dv[1].data_length = 0;
1140 dlil_post_complete_msg(interface, &ev_msg);
1141
1142 return 0;
1143 }
1144
1145 u_int32_t
1146 ifnet_get_wake_flags(ifnet_t interface)
1147 {
1148 u_int32_t flags = 0;
1149
1150 if (interface == NULL) {
1151 return 0;
1152 }
1153
1154 if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) {
1155 flags |= IF_WAKE_ON_MAGIC_PACKET;
1156 }
1157
1158 return flags;
1159 }
1160
1161 /*
1162 * Should MIB data store a copy?
1163 */
1164 errno_t
1165 ifnet_set_link_mib_data(ifnet_t interface, void *mibData, uint32_t mibLen)
1166 {
1167 if (interface == NULL) {
1168 return EINVAL;
1169 }
1170
1171 ifnet_lock_exclusive(interface);
1172 interface->if_linkmib = (void*)mibData;
1173 interface->if_linkmiblen = mibLen;
1174 ifnet_lock_done(interface);
1175 return 0;
1176 }
1177
1178 errno_t
1179 ifnet_get_link_mib_data(ifnet_t interface, void *mibData, uint32_t *mibLen)
1180 {
1181 errno_t result = 0;
1182
1183 if (interface == NULL) {
1184 return EINVAL;
1185 }
1186
1187 ifnet_lock_shared(interface);
1188 if (*mibLen < interface->if_linkmiblen) {
1189 result = EMSGSIZE;
1190 }
1191 if (result == 0 && interface->if_linkmib == NULL) {
1192 result = ENOTSUP;
1193 }
1194
1195 if (result == 0) {
1196 *mibLen = interface->if_linkmiblen;
1197 bcopy(interface->if_linkmib, mibData, *mibLen);
1198 }
1199 ifnet_lock_done(interface);
1200
1201 return result;
1202 }
1203
1204 uint32_t
1205 ifnet_get_link_mib_data_length(ifnet_t interface)
1206 {
1207 return (interface == NULL) ? 0 : interface->if_linkmiblen;
1208 }
1209
1210 errno_t
1211 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1212 mbuf_t m, void *route, const struct sockaddr *dest)
1213 {
1214 if (interface == NULL || protocol_family == 0 || m == NULL) {
1215 if (m != NULL) {
1216 mbuf_freem_list(m);
1217 }
1218 return EINVAL;
1219 }
1220 return dlil_output(interface, protocol_family, m, route, dest, 0, NULL);
1221 }
1222
1223 errno_t
1224 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1225 {
1226 if (interface == NULL || m == NULL) {
1227 if (m != NULL) {
1228 mbuf_freem_list(m);
1229 }
1230 return EINVAL;
1231 }
1232 return dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL);
1233 }
1234
1235 errno_t
1236 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1237 {
1238 if (interface == NULL) {
1239 return EINVAL;
1240 }
1241
1242 interface->if_mtu = mtu;
1243 return 0;
1244 }
1245
1246 u_int32_t
1247 ifnet_mtu(ifnet_t interface)
1248 {
1249 return (interface == NULL) ? 0 : interface->if_mtu;
1250 }
1251
1252 u_char
1253 ifnet_type(ifnet_t interface)
1254 {
1255 return (interface == NULL) ? 0 : interface->if_data.ifi_type;
1256 }
1257
1258 errno_t
1259 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1260 {
1261 if (interface == NULL) {
1262 return EINVAL;
1263 }
1264
1265 interface->if_data.ifi_addrlen = addrlen;
1266 return 0;
1267 }
1268
1269 u_char
1270 ifnet_addrlen(ifnet_t interface)
1271 {
1272 return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen;
1273 }
1274
1275 errno_t
1276 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1277 {
1278 if (interface == NULL) {
1279 return EINVAL;
1280 }
1281
1282 interface->if_data.ifi_hdrlen = hdrlen;
1283 return 0;
1284 }
1285
1286 u_char
1287 ifnet_hdrlen(ifnet_t interface)
1288 {
1289 return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen;
1290 }
1291
1292 errno_t
1293 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1294 {
1295 if (interface == NULL) {
1296 return EINVAL;
1297 }
1298
1299 interface->if_data.ifi_metric = metric;
1300 return 0;
1301 }
1302
1303 u_int32_t
1304 ifnet_metric(ifnet_t interface)
1305 {
1306 return (interface == NULL) ? 0 : interface->if_data.ifi_metric;
1307 }
1308
1309 errno_t
1310 ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate)
1311 {
1312 if (ifp == NULL) {
1313 return EINVAL;
1314 }
1315
1316 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1317 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1318
1319 /* Pin if_baudrate to 32 bits until we can change the storage size */
1320 ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate;
1321
1322 return 0;
1323 }
1324
1325 u_int64_t
1326 ifnet_baudrate(struct ifnet *ifp)
1327 {
1328 return (ifp == NULL) ? 0 : ifp->if_baudrate;
1329 }
1330
1331 errno_t
1332 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1333 struct if_bandwidths *input_bw)
1334 {
1335 if (ifp == NULL) {
1336 return EINVAL;
1337 }
1338
1339 /* set input values first (if any), as output values depend on them */
1340 if (input_bw != NULL) {
1341 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1342 }
1343
1344 if (output_bw != NULL) {
1345 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1346 }
1347
1348 return 0;
1349 }
1350
1351 static void
1352 ifnet_set_link_status_outbw(struct ifnet *ifp)
1353 {
1354 struct if_wifi_status_v1 *sr;
1355 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1356 if (ifp->if_output_bw.eff_bw != 0) {
1357 sr->valid_bitmask |=
1358 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1359 sr->ul_effective_bandwidth =
1360 ifp->if_output_bw.eff_bw > UINT32_MAX ?
1361 UINT32_MAX :
1362 (uint32_t)ifp->if_output_bw.eff_bw;
1363 }
1364 if (ifp->if_output_bw.max_bw != 0) {
1365 sr->valid_bitmask |=
1366 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1367 sr->ul_max_bandwidth =
1368 ifp->if_output_bw.max_bw > UINT32_MAX ?
1369 UINT32_MAX :
1370 (uint32_t)ifp->if_output_bw.max_bw;
1371 }
1372 }
1373
1374 errno_t
1375 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1376 boolean_t locked)
1377 {
1378 struct if_bandwidths old_bw;
1379 struct ifclassq *ifq;
1380 u_int64_t br;
1381
1382 VERIFY(ifp != NULL && bw != NULL);
1383
1384 ifq = &ifp->if_snd;
1385 if (!locked) {
1386 IFCQ_LOCK(ifq);
1387 }
1388 IFCQ_LOCK_ASSERT_HELD(ifq);
1389
1390 old_bw = ifp->if_output_bw;
1391 if (bw->eff_bw != 0) {
1392 ifp->if_output_bw.eff_bw = bw->eff_bw;
1393 }
1394 if (bw->max_bw != 0) {
1395 ifp->if_output_bw.max_bw = bw->max_bw;
1396 }
1397 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
1398 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1399 } else if (ifp->if_output_bw.eff_bw == 0) {
1400 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1401 }
1402
1403 /* Pin if_baudrate to 32 bits */
1404 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1405 if (br != 0) {
1406 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
1407 }
1408
1409 /* Adjust queue parameters if needed */
1410 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1411 old_bw.max_bw != ifp->if_output_bw.max_bw) {
1412 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1413 }
1414
1415 if (!locked) {
1416 IFCQ_UNLOCK(ifq);
1417 }
1418
1419 /*
1420 * If this is a Wifi interface, update the values in
1421 * if_link_status structure also.
1422 */
1423 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1424 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1425 ifnet_set_link_status_outbw(ifp);
1426 lck_rw_done(&ifp->if_link_status_lock);
1427 }
1428
1429 return 0;
1430 }
1431
1432 static void
1433 ifnet_set_link_status_inbw(struct ifnet *ifp)
1434 {
1435 struct if_wifi_status_v1 *sr;
1436
1437 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1438 if (ifp->if_input_bw.eff_bw != 0) {
1439 sr->valid_bitmask |=
1440 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1441 sr->dl_effective_bandwidth =
1442 ifp->if_input_bw.eff_bw > UINT32_MAX ?
1443 UINT32_MAX :
1444 (uint32_t)ifp->if_input_bw.eff_bw;
1445 }
1446 if (ifp->if_input_bw.max_bw != 0) {
1447 sr->valid_bitmask |=
1448 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1449 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ?
1450 UINT32_MAX :
1451 (uint32_t)ifp->if_input_bw.max_bw;
1452 }
1453 }
1454
1455 errno_t
1456 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1457 {
1458 struct if_bandwidths old_bw;
1459
1460 VERIFY(ifp != NULL && bw != NULL);
1461
1462 old_bw = ifp->if_input_bw;
1463 if (bw->eff_bw != 0) {
1464 ifp->if_input_bw.eff_bw = bw->eff_bw;
1465 }
1466 if (bw->max_bw != 0) {
1467 ifp->if_input_bw.max_bw = bw->max_bw;
1468 }
1469 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
1470 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1471 } else if (ifp->if_input_bw.eff_bw == 0) {
1472 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1473 }
1474
1475 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1476 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1477 ifnet_set_link_status_inbw(ifp);
1478 lck_rw_done(&ifp->if_link_status_lock);
1479 }
1480
1481 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1482 old_bw.max_bw != ifp->if_input_bw.max_bw) {
1483 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1484 }
1485
1486 return 0;
1487 }
1488
1489 u_int64_t
1490 ifnet_output_linkrate(struct ifnet *ifp)
1491 {
1492 struct ifclassq *ifq = &ifp->if_snd;
1493 u_int64_t rate;
1494
1495 IFCQ_LOCK_ASSERT_HELD(ifq);
1496
1497 rate = ifp->if_output_bw.eff_bw;
1498 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1499 u_int64_t tbr_rate = ifp->if_snd.ifcq_tbr.tbr_rate_raw;
1500 VERIFY(tbr_rate > 0);
1501 rate = MIN(rate, ifp->if_snd.ifcq_tbr.tbr_rate_raw);
1502 }
1503
1504 return rate;
1505 }
1506
1507 u_int64_t
1508 ifnet_input_linkrate(struct ifnet *ifp)
1509 {
1510 return ifp->if_input_bw.eff_bw;
1511 }
1512
1513 errno_t
1514 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1515 struct if_bandwidths *input_bw)
1516 {
1517 if (ifp == NULL) {
1518 return EINVAL;
1519 }
1520
1521 if (output_bw != NULL) {
1522 *output_bw = ifp->if_output_bw;
1523 }
1524 if (input_bw != NULL) {
1525 *input_bw = ifp->if_input_bw;
1526 }
1527
1528 return 0;
1529 }
1530
1531 errno_t
1532 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1533 struct if_latencies *input_lt)
1534 {
1535 if (ifp == NULL) {
1536 return EINVAL;
1537 }
1538
1539 if (output_lt != NULL) {
1540 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1541 }
1542
1543 if (input_lt != NULL) {
1544 (void) ifnet_set_input_latencies(ifp, input_lt);
1545 }
1546
1547 return 0;
1548 }
1549
1550 errno_t
1551 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1552 boolean_t locked)
1553 {
1554 struct if_latencies old_lt;
1555 struct ifclassq *ifq;
1556
1557 VERIFY(ifp != NULL && lt != NULL);
1558
1559 ifq = &ifp->if_snd;
1560 if (!locked) {
1561 IFCQ_LOCK(ifq);
1562 }
1563 IFCQ_LOCK_ASSERT_HELD(ifq);
1564
1565 old_lt = ifp->if_output_lt;
1566 if (lt->eff_lt != 0) {
1567 ifp->if_output_lt.eff_lt = lt->eff_lt;
1568 }
1569 if (lt->max_lt != 0) {
1570 ifp->if_output_lt.max_lt = lt->max_lt;
1571 }
1572 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
1573 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1574 } else if (ifp->if_output_lt.eff_lt == 0) {
1575 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1576 }
1577
1578 /* Adjust queue parameters if needed */
1579 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1580 old_lt.max_lt != ifp->if_output_lt.max_lt) {
1581 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1582 }
1583
1584 if (!locked) {
1585 IFCQ_UNLOCK(ifq);
1586 }
1587
1588 return 0;
1589 }
1590
1591 errno_t
1592 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1593 {
1594 struct if_latencies old_lt;
1595
1596 VERIFY(ifp != NULL && lt != NULL);
1597
1598 old_lt = ifp->if_input_lt;
1599 if (lt->eff_lt != 0) {
1600 ifp->if_input_lt.eff_lt = lt->eff_lt;
1601 }
1602 if (lt->max_lt != 0) {
1603 ifp->if_input_lt.max_lt = lt->max_lt;
1604 }
1605 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
1606 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1607 } else if (ifp->if_input_lt.eff_lt == 0) {
1608 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1609 }
1610
1611 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1612 old_lt.max_lt != ifp->if_input_lt.max_lt) {
1613 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1614 }
1615
1616 return 0;
1617 }
1618
1619 errno_t
1620 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1621 struct if_latencies *input_lt)
1622 {
1623 if (ifp == NULL) {
1624 return EINVAL;
1625 }
1626
1627 if (output_lt != NULL) {
1628 *output_lt = ifp->if_output_lt;
1629 }
1630 if (input_lt != NULL) {
1631 *input_lt = ifp->if_input_lt;
1632 }
1633
1634 return 0;
1635 }
1636
1637 errno_t
1638 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1639 {
1640 errno_t err;
1641
1642 if (ifp == NULL) {
1643 return EINVAL;
1644 } else if (!ifnet_is_attached(ifp, 1)) {
1645 return ENXIO;
1646 }
1647
1648 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1649
1650 /* Release the io ref count */
1651 ifnet_decr_iorefcnt(ifp);
1652
1653 return err;
1654 }
1655
1656 errno_t
1657 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1658 {
1659 errno_t err;
1660
1661 if (ifp == NULL || p == NULL) {
1662 return EINVAL;
1663 } else if (!ifnet_is_attached(ifp, 1)) {
1664 return ENXIO;
1665 }
1666
1667 err = dlil_rxpoll_get_params(ifp, p);
1668
1669 /* Release the io ref count */
1670 ifnet_decr_iorefcnt(ifp);
1671
1672 return err;
1673 }
1674
1675 errno_t
1676 ifnet_stat_increment(struct ifnet *ifp,
1677 const struct ifnet_stat_increment_param *s)
1678 {
1679 if (ifp == NULL) {
1680 return EINVAL;
1681 }
1682
1683 if (s->packets_in != 0) {
1684 atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1685 }
1686 if (s->bytes_in != 0) {
1687 atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1688 }
1689 if (s->errors_in != 0) {
1690 atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1691 }
1692
1693 if (s->packets_out != 0) {
1694 atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out);
1695 }
1696 if (s->bytes_out != 0) {
1697 atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1698 }
1699 if (s->errors_out != 0) {
1700 atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1701 }
1702
1703 if (s->collisions != 0) {
1704 atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions);
1705 }
1706 if (s->dropped != 0) {
1707 atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1708 }
1709
1710 /* Touch the last change time. */
1711 TOUCHLASTCHANGE(&ifp->if_lastchange);
1712
1713 if (ifp->if_data_threshold != 0) {
1714 ifnet_notify_data_threshold(ifp);
1715 }
1716
1717 return 0;
1718 }
1719
1720 errno_t
1721 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1722 u_int32_t bytes_in, u_int32_t errors_in)
1723 {
1724 if (ifp == NULL) {
1725 return EINVAL;
1726 }
1727
1728 if (packets_in != 0) {
1729 atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in);
1730 }
1731 if (bytes_in != 0) {
1732 atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in);
1733 }
1734 if (errors_in != 0) {
1735 atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in);
1736 }
1737
1738 TOUCHLASTCHANGE(&ifp->if_lastchange);
1739
1740 if (ifp->if_data_threshold != 0) {
1741 ifnet_notify_data_threshold(ifp);
1742 }
1743
1744 return 0;
1745 }
1746
1747 errno_t
1748 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1749 u_int32_t bytes_out, u_int32_t errors_out)
1750 {
1751 if (ifp == NULL) {
1752 return EINVAL;
1753 }
1754
1755 if (packets_out != 0) {
1756 atomic_add_64(&ifp->if_data.ifi_opackets, packets_out);
1757 }
1758 if (bytes_out != 0) {
1759 atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out);
1760 }
1761 if (errors_out != 0) {
1762 atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out);
1763 }
1764
1765 TOUCHLASTCHANGE(&ifp->if_lastchange);
1766
1767 if (ifp->if_data_threshold != 0) {
1768 ifnet_notify_data_threshold(ifp);
1769 }
1770
1771 return 0;
1772 }
1773
1774 errno_t
1775 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1776 {
1777 if (ifp == NULL) {
1778 return EINVAL;
1779 }
1780
1781 atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1782 atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1783 atomic_set_64(&ifp->if_data.ifi_imcasts, s->multicasts_in);
1784 atomic_set_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1785
1786 atomic_set_64(&ifp->if_data.ifi_opackets, s->packets_out);
1787 atomic_set_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1788 atomic_set_64(&ifp->if_data.ifi_omcasts, s->multicasts_out);
1789 atomic_set_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1790
1791 atomic_set_64(&ifp->if_data.ifi_collisions, s->collisions);
1792 atomic_set_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1793 atomic_set_64(&ifp->if_data.ifi_noproto, s->no_protocol);
1794
1795 /* Touch the last change time. */
1796 TOUCHLASTCHANGE(&ifp->if_lastchange);
1797
1798 if (ifp->if_data_threshold != 0) {
1799 ifnet_notify_data_threshold(ifp);
1800 }
1801
1802 return 0;
1803 }
1804
1805 errno_t
1806 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1807 {
1808 if (ifp == NULL) {
1809 return EINVAL;
1810 }
1811
1812 atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets);
1813 atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes);
1814 atomic_get_64(s->multicasts_in, &ifp->if_data.ifi_imcasts);
1815 atomic_get_64(s->errors_in, &ifp->if_data.ifi_ierrors);
1816
1817 atomic_get_64(s->packets_out, &ifp->if_data.ifi_opackets);
1818 atomic_get_64(s->bytes_out, &ifp->if_data.ifi_obytes);
1819 atomic_get_64(s->multicasts_out, &ifp->if_data.ifi_omcasts);
1820 atomic_get_64(s->errors_out, &ifp->if_data.ifi_oerrors);
1821
1822 atomic_get_64(s->collisions, &ifp->if_data.ifi_collisions);
1823 atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops);
1824 atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto);
1825
1826 if (ifp->if_data_threshold != 0) {
1827 ifnet_notify_data_threshold(ifp);
1828 }
1829
1830 return 0;
1831 }
1832
1833 errno_t
1834 ifnet_touch_lastchange(ifnet_t interface)
1835 {
1836 if (interface == NULL) {
1837 return EINVAL;
1838 }
1839
1840 TOUCHLASTCHANGE(&interface->if_lastchange);
1841
1842 return 0;
1843 }
1844
1845 errno_t
1846 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1847 {
1848 if (interface == NULL) {
1849 return EINVAL;
1850 }
1851
1852 *last_change = interface->if_data.ifi_lastchange;
1853 /* Crude conversion from uptime to calendar time */
1854 last_change->tv_sec += boottime_sec();
1855
1856 return 0;
1857 }
1858
1859 errno_t
1860 ifnet_touch_lastupdown(ifnet_t interface)
1861 {
1862 if (interface == NULL) {
1863 return EINVAL;
1864 }
1865
1866 TOUCHLASTCHANGE(&interface->if_lastupdown);
1867
1868 return 0;
1869 }
1870
1871 errno_t
1872 ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
1873 {
1874 if (interface == NULL) {
1875 return EINVAL;
1876 }
1877
1878 /* Calculate the delta */
1879 updown_delta->tv_sec = (time_t)net_uptime();
1880 if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
1881 updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
1882 }
1883 updown_delta->tv_usec = 0;
1884
1885 return 0;
1886 }
1887
1888 errno_t
1889 ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
1890 {
1891 return addresses == NULL ? EINVAL :
1892 ifnet_get_address_list_family(interface, addresses, 0);
1893 }
1894
1895 struct ifnet_addr_list {
1896 SLIST_ENTRY(ifnet_addr_list) ifal_le;
1897 struct ifaddr *ifal_ifa;
1898 };
1899
1900 errno_t
1901 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
1902 sa_family_t family)
1903 {
1904 return ifnet_get_address_list_family_internal(interface, addresses,
1905 family, 0, M_NOWAIT, 0);
1906 }
1907
1908 errno_t
1909 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
1910 {
1911 return addresses == NULL ? EINVAL :
1912 ifnet_get_address_list_family_internal(interface, addresses,
1913 0, 0, M_NOWAIT, 1);
1914 }
1915
1916 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
1917
1918 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
1919
1920 __private_extern__ errno_t
1921 ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
1922 sa_family_t family, int detached, int how, int return_inuse_addrs)
1923 {
1924 SLIST_HEAD(, ifnet_addr_list) ifal_head;
1925 struct ifnet_addr_list *ifal, *ifal_tmp;
1926 struct ifnet *ifp;
1927 int count = 0;
1928 errno_t err = 0;
1929 int usecount = 0;
1930 int index = 0;
1931
1932 SLIST_INIT(&ifal_head);
1933
1934 if (addresses == NULL) {
1935 err = EINVAL;
1936 goto done;
1937 }
1938 *addresses = NULL;
1939
1940 if (detached) {
1941 /*
1942 * Interface has been detached, so skip the lookup
1943 * at ifnet_head and go directly to inner loop.
1944 */
1945 ifp = interface;
1946 if (ifp == NULL) {
1947 err = EINVAL;
1948 goto done;
1949 }
1950 goto one;
1951 }
1952
1953 ifnet_head_lock_shared();
1954 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1955 if (interface != NULL && ifp != interface) {
1956 continue;
1957 }
1958 one:
1959 ifnet_lock_shared(ifp);
1960 if (interface == NULL || interface == ifp) {
1961 struct ifaddr *ifa;
1962 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1963 IFA_LOCK(ifa);
1964 if (family != 0 &&
1965 ifa->ifa_addr->sa_family != family) {
1966 IFA_UNLOCK(ifa);
1967 continue;
1968 }
1969 MALLOC(ifal, struct ifnet_addr_list *,
1970 sizeof(*ifal), M_TEMP, how);
1971 if (ifal == NULL) {
1972 IFA_UNLOCK(ifa);
1973 ifnet_lock_done(ifp);
1974 if (!detached) {
1975 ifnet_head_done();
1976 }
1977 err = ENOMEM;
1978 goto done;
1979 }
1980 ifal->ifal_ifa = ifa;
1981 IFA_ADDREF_LOCKED(ifa);
1982 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
1983 ++count;
1984 IFA_UNLOCK(ifa);
1985 }
1986 }
1987 ifnet_lock_done(ifp);
1988 if (detached) {
1989 break;
1990 }
1991 }
1992 if (!detached) {
1993 ifnet_head_done();
1994 }
1995
1996 if (count == 0) {
1997 err = ENXIO;
1998 goto done;
1999 }
2000 MALLOC(*addresses, ifaddr_t *, sizeof(ifaddr_t) * (count + 1),
2001 M_TEMP, how);
2002 if (*addresses == NULL) {
2003 err = ENOMEM;
2004 goto done;
2005 }
2006 bzero(*addresses, sizeof(ifaddr_t) * (count + 1));
2007
2008 done:
2009 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
2010 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
2011 if (err == 0) {
2012 if (return_inuse_addrs) {
2013 usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
2014 usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
2015 if (usecount) {
2016 (*addresses)[index] = ifal->ifal_ifa;
2017 index++;
2018 } else {
2019 IFA_REMREF(ifal->ifal_ifa);
2020 }
2021 } else {
2022 (*addresses)[--count] = ifal->ifal_ifa;
2023 }
2024 } else {
2025 IFA_REMREF(ifal->ifal_ifa);
2026 }
2027 FREE(ifal, M_TEMP);
2028 }
2029
2030 VERIFY(err == 0 || *addresses == NULL);
2031 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
2032 VERIFY(return_inuse_addrs == 1);
2033 FREE(*addresses, M_TEMP);
2034 err = ENXIO;
2035 }
2036 return err;
2037 }
2038
2039 void
2040 ifnet_free_address_list(ifaddr_t *addresses)
2041 {
2042 int i;
2043
2044 if (addresses == NULL) {
2045 return;
2046 }
2047
2048 for (i = 0; addresses[i] != NULL; i++) {
2049 IFA_REMREF(addresses[i]);
2050 }
2051
2052 FREE(addresses, M_TEMP);
2053 }
2054
2055 void *
2056 ifnet_lladdr(ifnet_t interface)
2057 {
2058 struct ifaddr *ifa;
2059 void *lladdr;
2060
2061 if (interface == NULL) {
2062 return NULL;
2063 }
2064
2065 /*
2066 * if_lladdr points to the permanent link address of
2067 * the interface and it never gets deallocated; internal
2068 * code should simply use IF_LLADDR() for performance.
2069 */
2070 ifa = interface->if_lladdr;
2071 IFA_LOCK_SPIN(ifa);
2072 lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
2073 IFA_UNLOCK(ifa);
2074
2075 return lladdr;
2076 }
2077
2078 errno_t
2079 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
2080 size_t *out_len)
2081 {
2082 if (interface == NULL || addr == NULL || out_len == NULL) {
2083 return EINVAL;
2084 }
2085
2086 *out_len = interface->if_broadcast.length;
2087
2088 if (buffer_len < interface->if_broadcast.length) {
2089 return EMSGSIZE;
2090 }
2091
2092 if (interface->if_broadcast.length == 0) {
2093 return ENXIO;
2094 }
2095
2096 if (interface->if_broadcast.length <=
2097 sizeof(interface->if_broadcast.u.buffer)) {
2098 bcopy(interface->if_broadcast.u.buffer, addr,
2099 interface->if_broadcast.length);
2100 } else {
2101 bcopy(interface->if_broadcast.u.ptr, addr,
2102 interface->if_broadcast.length);
2103 }
2104
2105 return 0;
2106 }
2107
2108 static errno_t
2109 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
2110 size_t lladdr_len, kauth_cred_t *credp)
2111 {
2112 const u_int8_t *bytes;
2113 size_t bytes_len;
2114 struct ifaddr *ifa;
2115 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
2116 errno_t error = 0;
2117
2118 /*
2119 * Make sure to accomodate the largest possible
2120 * size of SA(if_lladdr)->sa_len.
2121 */
2122 _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
2123
2124 if (interface == NULL || lladdr == NULL) {
2125 return EINVAL;
2126 }
2127
2128 ifa = interface->if_lladdr;
2129 IFA_LOCK_SPIN(ifa);
2130 bcopy(ifa->ifa_addr, &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
2131 IFA_UNLOCK(ifa);
2132
2133 bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
2134 if (bytes_len != lladdr_len) {
2135 bzero(lladdr, lladdr_len);
2136 error = EMSGSIZE;
2137 } else {
2138 bcopy(bytes, lladdr, bytes_len);
2139 }
2140
2141 return error;
2142 }
2143
2144 errno_t
2145 ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
2146 {
2147 return ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2148 NULL);
2149 }
2150
2151 errno_t
2152 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
2153 {
2154 #if CONFIG_MACF
2155 kauth_cred_t cred;
2156 net_thread_marks_t marks;
2157 #endif
2158 kauth_cred_t *credp;
2159 errno_t error;
2160
2161 credp = NULL;
2162 #if CONFIG_MACF
2163 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2164 cred = kauth_cred_proc_ref(current_proc());
2165 credp = &cred;
2166 #else
2167 credp = NULL;
2168 #endif
2169
2170 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
2171 credp);
2172
2173 #if CONFIG_MACF
2174 kauth_cred_unref(credp);
2175 net_thread_marks_pop(marks);
2176 #endif
2177
2178 return error;
2179 }
2180
2181 static errno_t
2182 ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
2183 size_t lladdr_len, u_char new_type, int apply_type)
2184 {
2185 struct ifaddr *ifa;
2186 errno_t error = 0;
2187
2188 if (interface == NULL) {
2189 return EINVAL;
2190 }
2191
2192 ifnet_head_lock_shared();
2193 ifnet_lock_exclusive(interface);
2194 if (lladdr_len != 0 &&
2195 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2196 ifnet_lock_done(interface);
2197 ifnet_head_done();
2198 return EINVAL;
2199 }
2200 ifa = ifnet_addrs[interface->if_index - 1];
2201 if (ifa != NULL) {
2202 struct sockaddr_dl *sdl;
2203
2204 IFA_LOCK_SPIN(ifa);
2205 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2206 if (lladdr_len != 0) {
2207 bcopy(lladdr, LLADDR(sdl), lladdr_len);
2208 } else {
2209 bzero(LLADDR(sdl), interface->if_addrlen);
2210 }
2211 /* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2212 sdl->sdl_alen = (u_char)lladdr_len;
2213
2214 if (apply_type) {
2215 sdl->sdl_type = new_type;
2216 }
2217 IFA_UNLOCK(ifa);
2218 } else {
2219 error = ENXIO;
2220 }
2221 ifnet_lock_done(interface);
2222 ifnet_head_done();
2223
2224 /* Generate a kernel event */
2225 if (error == 0) {
2226 intf_event_enqueue_nwk_wq_entry(interface, NULL,
2227 INTF_EVENT_CODE_LLADDR_UPDATE);
2228 dlil_post_msg(interface, KEV_DL_SUBCLASS,
2229 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0);
2230 }
2231
2232 return error;
2233 }
2234
2235 errno_t
2236 ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
2237 {
2238 return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0);
2239 }
2240
2241 errno_t
2242 ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
2243 size_t lladdr_len, u_char type)
2244 {
2245 return ifnet_set_lladdr_internal(interface, lladdr,
2246 lladdr_len, type, 1);
2247 }
2248
2249 errno_t
2250 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2251 ifmultiaddr_t *ifmap)
2252 {
2253 if (interface == NULL || maddr == NULL) {
2254 return EINVAL;
2255 }
2256
2257 /* Don't let users screw up protocols' entries. */
2258 switch (maddr->sa_family) {
2259 case AF_LINK: {
2260 const struct sockaddr_dl *sdl =
2261 (const struct sockaddr_dl *)(uintptr_t)maddr;
2262 if (sdl->sdl_len < sizeof(struct sockaddr_dl) ||
2263 (sdl->sdl_nlen + sdl->sdl_alen + sdl->sdl_slen +
2264 offsetof(struct sockaddr_dl, sdl_data) > sdl->sdl_len)) {
2265 return EINVAL;
2266 }
2267 break;
2268 }
2269 case AF_UNSPEC:
2270 if (maddr->sa_len < ETHER_ADDR_LEN +
2271 offsetof(struct sockaddr, sa_data)) {
2272 return EINVAL;
2273 }
2274 break;
2275 default:
2276 return EINVAL;
2277 }
2278
2279 return if_addmulti_anon(interface, maddr, ifmap);
2280 }
2281
2282 errno_t
2283 ifnet_remove_multicast(ifmultiaddr_t ifma)
2284 {
2285 struct sockaddr *maddr;
2286
2287 if (ifma == NULL) {
2288 return EINVAL;
2289 }
2290
2291 maddr = ifma->ifma_addr;
2292 /* Don't let users screw up protocols' entries. */
2293 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) {
2294 return EINVAL;
2295 }
2296
2297 return if_delmulti_anon(ifma->ifma_ifp, maddr);
2298 }
2299
2300 errno_t
2301 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
2302 {
2303 int count = 0;
2304 int cmax = 0;
2305 struct ifmultiaddr *addr;
2306
2307 if (ifp == NULL || addresses == NULL) {
2308 return EINVAL;
2309 }
2310
2311 ifnet_lock_shared(ifp);
2312 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2313 cmax++;
2314 }
2315
2316 MALLOC(*addresses, ifmultiaddr_t *, sizeof(ifmultiaddr_t) * (cmax + 1),
2317 M_TEMP, M_WAITOK);
2318 if (*addresses == NULL) {
2319 ifnet_lock_done(ifp);
2320 return ENOMEM;
2321 }
2322
2323 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2324 if (count + 1 > cmax) {
2325 break;
2326 }
2327 (*addresses)[count] = (ifmultiaddr_t)addr;
2328 ifmaddr_reference((*addresses)[count]);
2329 count++;
2330 }
2331 (*addresses)[cmax] = NULL;
2332 ifnet_lock_done(ifp);
2333
2334 return 0;
2335 }
2336
2337 void
2338 ifnet_free_multicast_list(ifmultiaddr_t *addresses)
2339 {
2340 int i;
2341
2342 if (addresses == NULL) {
2343 return;
2344 }
2345
2346 for (i = 0; addresses[i] != NULL; i++) {
2347 ifmaddr_release(addresses[i]);
2348 }
2349
2350 FREE(addresses, M_TEMP);
2351 }
2352
2353 errno_t
2354 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2355 {
2356 struct ifnet *ifp;
2357 size_t namelen;
2358
2359 if (ifname == NULL) {
2360 return EINVAL;
2361 }
2362
2363 namelen = strlen(ifname);
2364
2365 *ifpp = NULL;
2366
2367 ifnet_head_lock_shared();
2368 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2369 struct ifaddr *ifa;
2370 struct sockaddr_dl *ll_addr;
2371
2372 ifa = ifnet_addrs[ifp->if_index - 1];
2373 if (ifa == NULL) {
2374 continue;
2375 }
2376
2377 IFA_LOCK(ifa);
2378 ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2379
2380 if (namelen == ll_addr->sdl_nlen && strncmp(ll_addr->sdl_data,
2381 ifname, ll_addr->sdl_nlen) == 0) {
2382 IFA_UNLOCK(ifa);
2383 *ifpp = ifp;
2384 ifnet_reference(*ifpp);
2385 break;
2386 }
2387 IFA_UNLOCK(ifa);
2388 }
2389 ifnet_head_done();
2390
2391 return (ifp == NULL) ? ENXIO : 0;
2392 }
2393
2394 errno_t
2395 ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2396 {
2397 return ifnet_list_get_common(family, FALSE, list, count);
2398 }
2399
2400 __private_extern__ errno_t
2401 ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2402 {
2403 return ifnet_list_get_common(family, TRUE, list, count);
2404 }
2405
2406 struct ifnet_list {
2407 SLIST_ENTRY(ifnet_list) ifl_le;
2408 struct ifnet *ifl_ifp;
2409 };
2410
2411 static errno_t
2412 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
2413 u_int32_t *count)
2414 {
2415 #pragma unused(get_all)
2416 SLIST_HEAD(, ifnet_list) ifl_head;
2417 struct ifnet_list *ifl, *ifl_tmp;
2418 struct ifnet *ifp;
2419 int cnt = 0;
2420 errno_t err = 0;
2421
2422 SLIST_INIT(&ifl_head);
2423
2424 if (list == NULL || count == NULL) {
2425 err = EINVAL;
2426 goto done;
2427 }
2428 *count = 0;
2429 *list = NULL;
2430
2431 ifnet_head_lock_shared();
2432 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2433 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2434 MALLOC(ifl, struct ifnet_list *, sizeof(*ifl),
2435 M_TEMP, M_NOWAIT);
2436 if (ifl == NULL) {
2437 ifnet_head_done();
2438 err = ENOMEM;
2439 goto done;
2440 }
2441 ifl->ifl_ifp = ifp;
2442 ifnet_reference(ifp);
2443 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2444 ++cnt;
2445 }
2446 }
2447 ifnet_head_done();
2448
2449 if (cnt == 0) {
2450 err = ENXIO;
2451 goto done;
2452 }
2453
2454 MALLOC(*list, ifnet_t *, sizeof(ifnet_t) * (cnt + 1),
2455 M_TEMP, M_NOWAIT);
2456 if (*list == NULL) {
2457 err = ENOMEM;
2458 goto done;
2459 }
2460 bzero(*list, sizeof(ifnet_t) * (cnt + 1));
2461 *count = cnt;
2462
2463 done:
2464 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2465 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2466 if (err == 0) {
2467 (*list)[--cnt] = ifl->ifl_ifp;
2468 } else {
2469 ifnet_release(ifl->ifl_ifp);
2470 }
2471 FREE(ifl, M_TEMP);
2472 }
2473
2474 return err;
2475 }
2476
2477 void
2478 ifnet_list_free(ifnet_t *interfaces)
2479 {
2480 int i;
2481
2482 if (interfaces == NULL) {
2483 return;
2484 }
2485
2486 for (i = 0; interfaces[i]; i++) {
2487 ifnet_release(interfaces[i]);
2488 }
2489
2490 FREE(interfaces, M_TEMP);
2491 }
2492
2493 /*************************************************************************/
2494 /* ifaddr_t accessors */
2495 /*************************************************************************/
2496
2497 errno_t
2498 ifaddr_reference(ifaddr_t ifa)
2499 {
2500 if (ifa == NULL) {
2501 return EINVAL;
2502 }
2503
2504 IFA_ADDREF(ifa);
2505 return 0;
2506 }
2507
2508 errno_t
2509 ifaddr_release(ifaddr_t ifa)
2510 {
2511 if (ifa == NULL) {
2512 return EINVAL;
2513 }
2514
2515 IFA_REMREF(ifa);
2516 return 0;
2517 }
2518
2519 sa_family_t
2520 ifaddr_address_family(ifaddr_t ifa)
2521 {
2522 sa_family_t family = 0;
2523
2524 if (ifa != NULL) {
2525 IFA_LOCK_SPIN(ifa);
2526 if (ifa->ifa_addr != NULL) {
2527 family = ifa->ifa_addr->sa_family;
2528 }
2529 IFA_UNLOCK(ifa);
2530 }
2531 return family;
2532 }
2533
2534 errno_t
2535 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2536 {
2537 u_int32_t copylen;
2538
2539 if (ifa == NULL || out_addr == NULL) {
2540 return EINVAL;
2541 }
2542
2543 IFA_LOCK_SPIN(ifa);
2544 if (ifa->ifa_addr == NULL) {
2545 IFA_UNLOCK(ifa);
2546 return ENOTSUP;
2547 }
2548
2549 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2550 ifa->ifa_addr->sa_len : addr_size;
2551 bcopy(ifa->ifa_addr, out_addr, copylen);
2552
2553 if (ifa->ifa_addr->sa_len > addr_size) {
2554 IFA_UNLOCK(ifa);
2555 return EMSGSIZE;
2556 }
2557
2558 IFA_UNLOCK(ifa);
2559 return 0;
2560 }
2561
2562 errno_t
2563 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2564 {
2565 u_int32_t copylen;
2566
2567 if (ifa == NULL || out_addr == NULL) {
2568 return EINVAL;
2569 }
2570
2571 IFA_LOCK_SPIN(ifa);
2572 if (ifa->ifa_dstaddr == NULL) {
2573 IFA_UNLOCK(ifa);
2574 return ENOTSUP;
2575 }
2576
2577 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2578 ifa->ifa_dstaddr->sa_len : addr_size;
2579 bcopy(ifa->ifa_dstaddr, out_addr, copylen);
2580
2581 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2582 IFA_UNLOCK(ifa);
2583 return EMSGSIZE;
2584 }
2585
2586 IFA_UNLOCK(ifa);
2587 return 0;
2588 }
2589
2590 errno_t
2591 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2592 {
2593 u_int32_t copylen;
2594
2595 if (ifa == NULL || out_addr == NULL) {
2596 return EINVAL;
2597 }
2598
2599 IFA_LOCK_SPIN(ifa);
2600 if (ifa->ifa_netmask == NULL) {
2601 IFA_UNLOCK(ifa);
2602 return ENOTSUP;
2603 }
2604
2605 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2606 ifa->ifa_netmask->sa_len : addr_size;
2607 bcopy(ifa->ifa_netmask, out_addr, copylen);
2608
2609 if (ifa->ifa_netmask->sa_len > addr_size) {
2610 IFA_UNLOCK(ifa);
2611 return EMSGSIZE;
2612 }
2613
2614 IFA_UNLOCK(ifa);
2615 return 0;
2616 }
2617
2618 ifnet_t
2619 ifaddr_ifnet(ifaddr_t ifa)
2620 {
2621 struct ifnet *ifp;
2622
2623 if (ifa == NULL) {
2624 return NULL;
2625 }
2626
2627 /* ifa_ifp is set once at creation time; it is never changed */
2628 ifp = ifa->ifa_ifp;
2629
2630 return ifp;
2631 }
2632
2633 ifaddr_t
2634 ifaddr_withaddr(const struct sockaddr *address)
2635 {
2636 if (address == NULL) {
2637 return NULL;
2638 }
2639
2640 return ifa_ifwithaddr(address);
2641 }
2642
2643 ifaddr_t
2644 ifaddr_withdstaddr(const struct sockaddr *address)
2645 {
2646 if (address == NULL) {
2647 return NULL;
2648 }
2649
2650 return ifa_ifwithdstaddr(address);
2651 }
2652
2653 ifaddr_t
2654 ifaddr_withnet(const struct sockaddr *net)
2655 {
2656 if (net == NULL) {
2657 return NULL;
2658 }
2659
2660 return ifa_ifwithnet(net);
2661 }
2662
2663 ifaddr_t
2664 ifaddr_withroute(int flags, const struct sockaddr *destination,
2665 const struct sockaddr *gateway)
2666 {
2667 if (destination == NULL || gateway == NULL) {
2668 return NULL;
2669 }
2670
2671 return ifa_ifwithroute(flags, destination, gateway);
2672 }
2673
2674 ifaddr_t
2675 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2676 {
2677 if (addr == NULL || interface == NULL) {
2678 return NULL;
2679 }
2680
2681 return ifaof_ifpforaddr_select(addr, interface);
2682 }
2683
2684 errno_t
2685 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2686 {
2687 if (ifmaddr == NULL) {
2688 return EINVAL;
2689 }
2690
2691 IFMA_ADDREF(ifmaddr);
2692 return 0;
2693 }
2694
2695 errno_t
2696 ifmaddr_release(ifmultiaddr_t ifmaddr)
2697 {
2698 if (ifmaddr == NULL) {
2699 return EINVAL;
2700 }
2701
2702 IFMA_REMREF(ifmaddr);
2703 return 0;
2704 }
2705
2706 errno_t
2707 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2708 u_int32_t addr_size)
2709 {
2710 u_int32_t copylen;
2711
2712 if (ifma == NULL || out_addr == NULL) {
2713 return EINVAL;
2714 }
2715
2716 IFMA_LOCK(ifma);
2717 if (ifma->ifma_addr == NULL) {
2718 IFMA_UNLOCK(ifma);
2719 return ENOTSUP;
2720 }
2721
2722 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2723 ifma->ifma_addr->sa_len : addr_size);
2724 bcopy(ifma->ifma_addr, out_addr, copylen);
2725
2726 if (ifma->ifma_addr->sa_len > addr_size) {
2727 IFMA_UNLOCK(ifma);
2728 return EMSGSIZE;
2729 }
2730 IFMA_UNLOCK(ifma);
2731 return 0;
2732 }
2733
2734 errno_t
2735 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2736 u_int32_t addr_size)
2737 {
2738 struct ifmultiaddr *ifma_ll;
2739
2740 if (ifma == NULL || out_addr == NULL) {
2741 return EINVAL;
2742 }
2743 if ((ifma_ll = ifma->ifma_ll) == NULL) {
2744 return ENOTSUP;
2745 }
2746
2747 return ifmaddr_address(ifma_ll, out_addr, addr_size);
2748 }
2749
2750 ifnet_t
2751 ifmaddr_ifnet(ifmultiaddr_t ifma)
2752 {
2753 return (ifma == NULL) ? NULL : ifma->ifma_ifp;
2754 }
2755
2756 /**************************************************************************/
2757 /* interface cloner */
2758 /**************************************************************************/
2759
2760 errno_t
2761 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2762 if_clone_t *ifcloner)
2763 {
2764 errno_t error = 0;
2765 struct if_clone *ifc = NULL;
2766 size_t namelen;
2767
2768 if (cloner_params == NULL || ifcloner == NULL ||
2769 cloner_params->ifc_name == NULL ||
2770 cloner_params->ifc_create == NULL ||
2771 cloner_params->ifc_destroy == NULL ||
2772 (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
2773 error = EINVAL;
2774 goto fail;
2775 }
2776
2777 if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2778 printf("%s: already a cloner for %s\n", __func__,
2779 cloner_params->ifc_name);
2780 error = EEXIST;
2781 goto fail;
2782 }
2783
2784 /* Make room for name string */
2785 ifc = _MALLOC(sizeof(struct if_clone) + IFNAMSIZ + 1, M_CLONE,
2786 M_WAITOK | M_ZERO);
2787 if (ifc == NULL) {
2788 printf("%s: _MALLOC failed\n", __func__);
2789 error = ENOBUFS;
2790 goto fail;
2791 }
2792 strlcpy((char *)(ifc + 1), cloner_params->ifc_name, IFNAMSIZ + 1);
2793 ifc->ifc_name = (char *)(ifc + 1);
2794 ifc->ifc_namelen = namelen;
2795 ifc->ifc_maxunit = IF_MAXUNIT;
2796 ifc->ifc_create = cloner_params->ifc_create;
2797 ifc->ifc_destroy = cloner_params->ifc_destroy;
2798
2799 error = if_clone_attach(ifc);
2800 if (error != 0) {
2801 printf("%s: if_clone_attach failed %d\n", __func__, error);
2802 goto fail;
2803 }
2804 *ifcloner = ifc;
2805
2806 return 0;
2807 fail:
2808 if (ifc != NULL) {
2809 FREE(ifc, M_CLONE);
2810 }
2811 return error;
2812 }
2813
2814 errno_t
2815 ifnet_clone_detach(if_clone_t ifcloner)
2816 {
2817 errno_t error = 0;
2818 struct if_clone *ifc = ifcloner;
2819
2820 if (ifc == NULL || ifc->ifc_name == NULL) {
2821 return EINVAL;
2822 }
2823
2824 if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2825 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2826 error = EINVAL;
2827 goto fail;
2828 }
2829
2830 if_clone_detach(ifc);
2831
2832 FREE(ifc, M_CLONE);
2833
2834 fail:
2835 return error;
2836 }
2837
2838 /**************************************************************************/
2839 /* misc */
2840 /**************************************************************************/
2841
2842 errno_t
2843 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
2844 u_int32_t flags, u_int8_t *bitfield)
2845 {
2846 u_int32_t ifindex;
2847
2848 if (bitfield == NULL) {
2849 return EINVAL;
2850 }
2851
2852 switch (protocol) {
2853 case PF_UNSPEC:
2854 case PF_INET:
2855 case PF_INET6:
2856 break;
2857 default:
2858 return EINVAL;
2859 }
2860
2861 /* bit string is long enough to hold 16-bit port values */
2862 bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
2863
2864 if_ports_used_update_wakeuuid(ifp);
2865
2866
2867 ifindex = (ifp != NULL) ? ifp->if_index : 0;
2868
2869 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) {
2870 udp_get_ports_used(ifindex, protocol, flags,
2871 bitfield);
2872 }
2873
2874 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) {
2875 tcp_get_ports_used(ifindex, protocol, flags,
2876 bitfield);
2877 }
2878
2879 return 0;
2880 }
2881
2882 errno_t
2883 ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
2884 {
2885 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
2886 return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
2887 bitfield);
2888 }
2889
2890 errno_t
2891 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
2892 int lqm, int npm, u_int8_t srvinfo[48])
2893 {
2894 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
2895 return EINVAL;
2896 }
2897 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
2898 return EINVAL;
2899 }
2900 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
2901 return EINVAL;
2902 }
2903
2904 return dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
2905 }
2906
2907 errno_t
2908 ifnet_notice_node_presence_v2(ifnet_t ifp, struct sockaddr *sa, struct sockaddr_dl *sdl,
2909 int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48])
2910 {
2911 /* Support older version if sdl is NULL */
2912 if (sdl == NULL) {
2913 return ifnet_notice_node_presence(ifp, sa, rssi, lqm, npm, srvinfo);
2914 }
2915
2916 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
2917 return EINVAL;
2918 }
2919 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
2920 return EINVAL;
2921 }
2922
2923 if (sa->sa_family != AF_INET6) {
2924 return EINVAL;
2925 }
2926
2927 if (sdl->sdl_family != AF_LINK) {
2928 return EINVAL;
2929 }
2930
2931 return dlil_node_present_v2(ifp, sa, sdl, rssi, lqm, npm, srvinfo);
2932 }
2933
2934 errno_t
2935 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
2936 {
2937 if (ifp == NULL || sa == NULL) {
2938 return EINVAL;
2939 }
2940 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
2941 return EINVAL;
2942 }
2943 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
2944 return EINVAL;
2945 }
2946
2947 dlil_node_absent(ifp, sa);
2948 return 0;
2949 }
2950
2951 errno_t
2952 ifnet_notice_master_elected(ifnet_t ifp)
2953 {
2954 if (ifp == NULL) {
2955 return EINVAL;
2956 }
2957
2958 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0);
2959 return 0;
2960 }
2961
2962 errno_t
2963 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
2964 {
2965 #pragma unused(val)
2966
2967 m_do_tx_compl_callback(m, ifp);
2968
2969 return 0;
2970 }
2971
2972 errno_t
2973 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
2974 {
2975 m_do_tx_compl_callback(m, ifp);
2976
2977 return 0;
2978 }
2979
2980 errno_t
2981 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
2982 u_int8_t info[IFNET_MODARGLEN])
2983 {
2984 if (ifp == NULL || modid == NULL) {
2985 return EINVAL;
2986 }
2987
2988 dlil_report_issues(ifp, modid, info);
2989 return 0;
2990 }
2991
2992 errno_t
2993 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
2994 {
2995 ifnet_t odifp = NULL;
2996
2997 if (ifp == NULL) {
2998 return EINVAL;
2999 } else if (!ifnet_is_attached(ifp, 1)) {
3000 return ENXIO;
3001 }
3002
3003 ifnet_lock_exclusive(ifp);
3004 odifp = ifp->if_delegated.ifp;
3005 if (odifp != NULL && odifp == delegated_ifp) {
3006 /* delegate info is unchanged; nothing more to do */
3007 ifnet_lock_done(ifp);
3008 goto done;
3009 }
3010 // Test if this delegate interface would cause a loop
3011 ifnet_t delegate_check_ifp = delegated_ifp;
3012 while (delegate_check_ifp != NULL) {
3013 if (delegate_check_ifp == ifp) {
3014 printf("%s: delegating to %s would cause a loop\n",
3015 ifp->if_xname, delegated_ifp->if_xname);
3016 ifnet_lock_done(ifp);
3017 goto done;
3018 }
3019 delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
3020 }
3021 bzero(&ifp->if_delegated, sizeof(ifp->if_delegated));
3022 if (delegated_ifp != NULL && ifp != delegated_ifp) {
3023 uint32_t set_eflags;
3024
3025 ifp->if_delegated.ifp = delegated_ifp;
3026 ifnet_reference(delegated_ifp);
3027 ifp->if_delegated.type = delegated_ifp->if_type;
3028 ifp->if_delegated.family = delegated_ifp->if_family;
3029 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
3030 ifp->if_delegated.expensive =
3031 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
3032 ifp->if_delegated.constrained =
3033 delegated_ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0;
3034
3035 /*
3036 * Propogate flags related to ECN from delegated interface
3037 */
3038 if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE);
3039 set_eflags = (delegated_ifp->if_eflags &
3040 (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE));
3041 if_set_eflags(ifp, set_eflags);
3042 printf("%s: is now delegating %s (type 0x%x, family %u, "
3043 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
3044 delegated_ifp->if_type, delegated_ifp->if_family,
3045 delegated_ifp->if_subfamily);
3046 }
3047
3048 ifnet_lock_done(ifp);
3049
3050 if (odifp != NULL) {
3051 if (odifp != delegated_ifp) {
3052 printf("%s: is no longer delegating %s\n",
3053 ifp->if_xname, odifp->if_xname);
3054 }
3055 ifnet_release(odifp);
3056 }
3057
3058 /* Generate a kernel event */
3059 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0);
3060
3061 done:
3062 /* Release the io ref count */
3063 ifnet_decr_iorefcnt(ifp);
3064
3065 return 0;
3066 }
3067
3068 errno_t
3069 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
3070 {
3071 if (ifp == NULL || pdelegated_ifp == NULL) {
3072 return EINVAL;
3073 } else if (!ifnet_is_attached(ifp, 1)) {
3074 return ENXIO;
3075 }
3076
3077 ifnet_lock_shared(ifp);
3078 if (ifp->if_delegated.ifp != NULL) {
3079 ifnet_reference(ifp->if_delegated.ifp);
3080 }
3081 *pdelegated_ifp = ifp->if_delegated.ifp;
3082 ifnet_lock_done(ifp);
3083
3084 /* Release the io ref count */
3085 ifnet_decr_iorefcnt(ifp);
3086
3087 return 0;
3088 }
3089
3090 errno_t
3091 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
3092 struct ifnet_keepalive_offload_frame *frames_array,
3093 u_int32_t frames_array_count, size_t frame_data_offset,
3094 u_int32_t *used_frames_count)
3095 {
3096 u_int32_t i;
3097
3098 if (frames_array == NULL || used_frames_count == NULL ||
3099 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3100 return EINVAL;
3101 }
3102
3103 /* frame_data_offset should be 32-bit aligned */
3104 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
3105 frame_data_offset) {
3106 return EINVAL;
3107 }
3108
3109 *used_frames_count = 0;
3110 if (frames_array_count == 0) {
3111 return 0;
3112 }
3113
3114 /* Keep-alive offload not required for CLAT interface */
3115 if (IS_INTF_CLAT46(ifp)) {
3116 return 0;
3117 }
3118
3119 for (i = 0; i < frames_array_count; i++) {
3120 struct ifnet_keepalive_offload_frame *frame = frames_array + i;
3121
3122 bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
3123 }
3124
3125 /* First collect IPsec related keep-alive frames */
3126 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
3127 frames_array, frames_array_count, frame_data_offset);
3128
3129 /* If there is more room, collect other UDP keep-alive frames */
3130 if (*used_frames_count < frames_array_count) {
3131 udp_fill_keepalive_offload_frames(ifp, frames_array,
3132 frames_array_count, frame_data_offset,
3133 used_frames_count);
3134 }
3135
3136 /* If there is more room, collect other TCP keep-alive frames */
3137 if (*used_frames_count < frames_array_count) {
3138 tcp_fill_keepalive_offload_frames(ifp, frames_array,
3139 frames_array_count, frame_data_offset,
3140 used_frames_count);
3141 }
3142
3143 VERIFY(*used_frames_count <= frames_array_count);
3144
3145 return 0;
3146 }
3147
3148 errno_t
3149 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,
3150 struct ifnet_keepalive_offload_frame *frame)
3151 {
3152 errno_t error = 0;
3153
3154 if (ifp == NULL || frame == NULL) {
3155 return EINVAL;
3156 }
3157
3158 if (frame->type != IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP) {
3159 return EINVAL;
3160 }
3161 if (frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 &&
3162 frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6) {
3163 return EINVAL;
3164 }
3165 if (frame->local_port == 0 || frame->remote_port == 0) {
3166 return EINVAL;
3167 }
3168
3169 error = tcp_notify_kao_timeout(ifp, frame);
3170
3171 return error;
3172 }
3173
3174 errno_t
3175 ifnet_link_status_report(ifnet_t ifp, const void *buffer,
3176 size_t buffer_len)
3177 {
3178 struct if_link_status *ifsr;
3179 errno_t err = 0;
3180
3181 if (ifp == NULL || buffer == NULL || buffer_len == 0) {
3182 return EINVAL;
3183 }
3184
3185 ifnet_lock_shared(ifp);
3186
3187 /*
3188 * Make sure that the interface is attached but there is no need
3189 * to take a reference because this call is coming from the driver.
3190 */
3191 if (!ifnet_is_attached(ifp, 0)) {
3192 ifnet_lock_done(ifp);
3193 return ENXIO;
3194 }
3195
3196 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
3197
3198 /*
3199 * If this is the first status report then allocate memory
3200 * to store it.
3201 */
3202 if (ifp->if_link_status == NULL) {
3203 MALLOC(ifp->if_link_status, struct if_link_status *,
3204 sizeof(struct if_link_status), M_TEMP, M_ZERO);
3205 if (ifp->if_link_status == NULL) {
3206 err = ENOMEM;
3207 goto done;
3208 }
3209 }
3210
3211 ifsr = __DECONST(struct if_link_status *, buffer);
3212
3213 if (ifp->if_type == IFT_CELLULAR) {
3214 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
3215 /*
3216 * Currently we have a single version -- if it does
3217 * not match, just return.
3218 */
3219 if (ifsr->ifsr_version !=
3220 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
3221 err = ENOTSUP;
3222 goto done;
3223 }
3224
3225 if (ifsr->ifsr_len != sizeof(*if_cell_sr)) {
3226 err = EINVAL;
3227 goto done;
3228 }
3229
3230 if_cell_sr =
3231 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3232 new_cell_sr = &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3233 /* Check if we need to act on any new notifications */
3234 if ((new_cell_sr->valid_bitmask &
3235 IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
3236 new_cell_sr->mss_recommended !=
3237 if_cell_sr->mss_recommended) {
3238 atomic_bitset_32(&tcbinfo.ipi_flags,
3239 INPCBINFO_UPDATE_MSS);
3240 inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
3241 #if NECP
3242 necp_update_all_clients();
3243 #endif
3244 }
3245
3246 /* Finally copy the new information */
3247 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
3248 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
3249 if_cell_sr->valid_bitmask = 0;
3250 bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
3251 } else if (IFNET_IS_WIFI(ifp)) {
3252 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
3253
3254 /* Check version */
3255 if (ifsr->ifsr_version !=
3256 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
3257 err = ENOTSUP;
3258 goto done;
3259 }
3260
3261 if (ifsr->ifsr_len != sizeof(*if_wifi_sr)) {
3262 err = EINVAL;
3263 goto done;
3264 }
3265
3266 if_wifi_sr =
3267 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3268 new_wifi_sr =
3269 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3270 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
3271 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
3272 if_wifi_sr->valid_bitmask = 0;
3273 bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
3274
3275 /*
3276 * Update the bandwidth values if we got recent values
3277 * reported through the other KPI.
3278 */
3279 if (!(new_wifi_sr->valid_bitmask &
3280 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
3281 ifp->if_output_bw.max_bw > 0) {
3282 if_wifi_sr->valid_bitmask |=
3283 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3284 if_wifi_sr->ul_max_bandwidth =
3285 ifp->if_output_bw.max_bw > UINT32_MAX ?
3286 UINT32_MAX :
3287 (uint32_t)ifp->if_output_bw.max_bw;
3288 }
3289 if (!(new_wifi_sr->valid_bitmask &
3290 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3291 ifp->if_output_bw.eff_bw > 0) {
3292 if_wifi_sr->valid_bitmask |=
3293 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3294 if_wifi_sr->ul_effective_bandwidth =
3295 ifp->if_output_bw.eff_bw > UINT32_MAX ?
3296 UINT32_MAX :
3297 (uint32_t)ifp->if_output_bw.eff_bw;
3298 }
3299 if (!(new_wifi_sr->valid_bitmask &
3300 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3301 ifp->if_input_bw.max_bw > 0) {
3302 if_wifi_sr->valid_bitmask |=
3303 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3304 if_wifi_sr->dl_max_bandwidth =
3305 ifp->if_input_bw.max_bw > UINT32_MAX ?
3306 UINT32_MAX :
3307 (uint32_t)ifp->if_input_bw.max_bw;
3308 }
3309 if (!(new_wifi_sr->valid_bitmask &
3310 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3311 ifp->if_input_bw.eff_bw > 0) {
3312 if_wifi_sr->valid_bitmask |=
3313 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3314 if_wifi_sr->dl_effective_bandwidth =
3315 ifp->if_input_bw.eff_bw > UINT32_MAX ?
3316 UINT32_MAX :
3317 (uint32_t)ifp->if_input_bw.eff_bw;
3318 }
3319 }
3320
3321 done:
3322 lck_rw_done(&ifp->if_link_status_lock);
3323 ifnet_lock_done(ifp);
3324 return err;
3325 }
3326
3327 /*************************************************************************/
3328 /* Fastlane QoS Ca */
3329 /*************************************************************************/
3330
3331 errno_t
3332 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3333 {
3334 if (interface == NULL) {
3335 return EINVAL;
3336 }
3337
3338 if_set_qosmarking_mode(interface,
3339 capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3340
3341 return 0;
3342 }
3343
3344 errno_t
3345 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3346 {
3347 if (interface == NULL || capable == NULL) {
3348 return EINVAL;
3349 }
3350 if (interface->if_qosmarking_mode == IFRTYPE_QOSMARKING_FASTLANE) {
3351 *capable = true;
3352 } else {
3353 *capable = false;
3354 }
3355 return 0;
3356 }
3357
3358 errno_t
3359 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3360 {
3361 int64_t bytes;
3362
3363 if (interface == NULL || unsent_bytes == NULL) {
3364 return EINVAL;
3365 }
3366
3367 bytes = *unsent_bytes = 0;
3368
3369 if (!IF_FULLY_ATTACHED(interface)) {
3370 return ENXIO;
3371 }
3372
3373 bytes = interface->if_sndbyte_unsent;
3374
3375 if (interface->if_eflags & IFEF_TXSTART) {
3376 bytes += IFCQ_BYTES(&interface->if_snd);
3377 }
3378 *unsent_bytes = bytes;
3379
3380 return 0;
3381 }
3382
3383 errno_t
3384 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3385 {
3386 if (ifp == NULL || buf_status == NULL) {
3387 return EINVAL;
3388 }
3389
3390 bzero(buf_status, sizeof(*buf_status));
3391
3392 if (!IF_FULLY_ATTACHED(ifp)) {
3393 return ENXIO;
3394 }
3395
3396 if (ifp->if_eflags & IFEF_TXSTART) {
3397 buf_status->buf_interface = IFCQ_BYTES(&ifp->if_snd);
3398 }
3399
3400 buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3401 (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3402
3403 return 0;
3404 }
3405
3406 void
3407 ifnet_normalise_unsent_data(void)
3408 {
3409 struct ifnet *ifp;
3410
3411 ifnet_head_lock_shared();
3412 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3413 ifnet_lock_exclusive(ifp);
3414 if (!IF_FULLY_ATTACHED(ifp)) {
3415 ifnet_lock_done(ifp);
3416 continue;
3417 }
3418 if (!(ifp->if_eflags & IFEF_TXSTART)) {
3419 ifnet_lock_done(ifp);
3420 continue;
3421 }
3422
3423 if (ifp->if_sndbyte_total > 0 ||
3424 IFCQ_BYTES(&ifp->if_snd) > 0) {
3425 ifp->if_unsent_data_cnt++;
3426 }
3427
3428 ifnet_lock_done(ifp);
3429 }
3430 ifnet_head_done();
3431 }
3432
3433 errno_t
3434 ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on)
3435 {
3436 errno_t error;
3437
3438 error = if_set_low_power(ifp, on);
3439
3440 return error;
3441 }
3442
3443 errno_t
3444 ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on)
3445 {
3446 if (ifp == NULL || on == NULL) {
3447 return EINVAL;
3448 }
3449
3450 *on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0);
3451 return 0;
3452 }
3453
3454 /*************************************************************************/
3455 /* Interface advisory notifications */
3456 /*************************************************************************/
3457 errno_t
3458 ifnet_interface_advisory_report(ifnet_t ifp,
3459 const struct ifnet_interface_advisory *advisory)
3460 {
3461
3462 #pragma unused(ifp)
3463 #pragma unused(advisory)
3464 return ENOTSUP;
3465
3466 }