]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/kpi_interface.c
c5f2682138eef5fcc6d42c8240ff6dc58e98f011
[apple/xnu.git] / bsd / net / kpi_interface.c
1 /*
2 * Copyright (c) 2004-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "kpi_interface.h"
30
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
45 #include <net/dlil.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
59 #include <sys/proc.h>
60 #include <sys/sysctl.h>
61 #include <sys/mbuf.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
68 #ifdef INET
69 #include <netinet/igmp_var.h>
70 #endif
71 #ifdef INET6
72 #include <netinet6/mld6_var.h>
73 #endif
74 #include <netkey/key.h>
75 #include <stdbool.h>
76
77 #include "net/net_str_id.h"
78
79 #if CONFIG_MACF
80 #include <sys/kauth.h>
81 #include <security/mac_framework.h>
82 #endif
83
84
85 #undef ifnet_allocate
86 errno_t ifnet_allocate(const struct ifnet_init_params *init,
87 ifnet_t *ifp);
88
89 static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
90 ifnet_t *ifp, bool is_internal);
91
92
93 #define TOUCHLASTCHANGE(__if_lastchange) { \
94 (__if_lastchange)->tv_sec = net_uptime(); \
95 (__if_lastchange)->tv_usec = 0; \
96 }
97
98 static errno_t ifnet_defrouter_llreachinfo(ifnet_t, int,
99 struct ifnet_llreach_info *);
100 static void ifnet_kpi_free(ifnet_t);
101 static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
102 u_int32_t *);
103 static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
104 u_char, int);
105 static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
106
107 /*
108 * Temporary work around until we have real reference counting
109 *
110 * We keep the bits about calling dlil_if_release (which should be
111 * called recycle) transparent by calling it from our if_free function
112 * pointer. We have to keep the client's original detach function
113 * somewhere so we can call it.
114 */
115 static void
116 ifnet_kpi_free(ifnet_t ifp)
117 {
118 ifnet_detached_func detach_func = ifp->if_kpi_storage;
119
120 if (detach_func != NULL)
121 detach_func(ifp);
122
123 if (ifp->if_broadcast.length > sizeof (ifp->if_broadcast.u.buffer)) {
124 FREE(ifp->if_broadcast.u.ptr, M_IFADDR);
125 ifp->if_broadcast.u.ptr = NULL;
126 }
127
128 dlil_if_release(ifp);
129 }
130
131 errno_t
132 ifnet_allocate_common(const struct ifnet_init_params *init,
133 ifnet_t *ifp, bool is_internal)
134 {
135 struct ifnet_init_eparams einit;
136
137 bzero(&einit, sizeof (einit));
138
139 einit.ver = IFNET_INIT_CURRENT_VERSION;
140 einit.len = sizeof (einit);
141 einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
142 if (!is_internal) {
143 einit.flags |= IFNET_INIT_ALLOC_KPI;
144 }
145 einit.uniqueid = init->uniqueid;
146 einit.uniqueid_len = init->uniqueid_len;
147 einit.name = init->name;
148 einit.unit = init->unit;
149 einit.family = init->family;
150 einit.type = init->type;
151 einit.output = init->output;
152 einit.demux = init->demux;
153 einit.add_proto = init->add_proto;
154 einit.del_proto = init->del_proto;
155 einit.check_multi = init->check_multi;
156 einit.framer = init->framer;
157 einit.softc = init->softc;
158 einit.ioctl = init->ioctl;
159 einit.set_bpf_tap = init->set_bpf_tap;
160 einit.detach = init->detach;
161 einit.event = init->event;
162 einit.broadcast_addr = init->broadcast_addr;
163 einit.broadcast_len = init->broadcast_len;
164
165 return (ifnet_allocate_extended(&einit, ifp));
166 }
167
168 errno_t
169 ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
170 {
171 return (ifnet_allocate_common(init, ifp, true));
172 }
173
174 errno_t
175 ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
176 {
177 return (ifnet_allocate_common(init, ifp, false));
178 }
179
180 errno_t
181 ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
182 ifnet_t *interface)
183 {
184 struct ifnet_init_eparams einit;
185 struct ifnet *ifp = NULL;
186 char if_xname[IFXNAMSIZ] = {0};
187 int error;
188
189 einit = *einit0;
190
191 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
192 einit.len < sizeof (einit))
193 return (EINVAL);
194
195 if (einit.family == 0 || einit.name == NULL ||
196 strlen(einit.name) >= IFNAMSIZ ||
197 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0)
198 return (EINVAL);
199
200
201 if (einit.flags & IFNET_INIT_LEGACY) {
202 if (einit.output == NULL ||
203 (einit.flags & IFNET_INIT_INPUT_POLL))
204 return (EINVAL);
205
206 einit.pre_enqueue = NULL;
207 einit.start = NULL;
208 einit.output_ctl = NULL;
209 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
210 einit.input_poll = NULL;
211 einit.input_ctl = NULL;
212 } else {
213 if (einit.start == NULL)
214 return (EINVAL);
215
216 einit.output = NULL;
217 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX)
218 return (EINVAL);
219
220 if (einit.flags & IFNET_INIT_INPUT_POLL) {
221 if (einit.input_poll == NULL || einit.input_ctl == NULL)
222 return (EINVAL);
223 } else {
224 einit.input_poll = NULL;
225 einit.input_ctl = NULL;
226 }
227 }
228
229
230 /* Initialize external name (name + unit) */
231 (void) snprintf(if_xname, sizeof (if_xname), "%s%d",
232 einit.name, einit.unit);
233
234 if (einit.uniqueid == NULL) {
235 einit.uniqueid = if_xname;
236 einit.uniqueid_len = strlen(if_xname);
237 }
238
239 error = dlil_if_acquire(einit.family, einit.uniqueid,
240 einit.uniqueid_len, if_xname, &ifp);
241
242 if (error == 0) {
243 u_int64_t br;
244
245 /*
246 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
247 * to point to storage of at least IFNAMSIZ bytes. It is safe
248 * to write to this.
249 */
250 strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ);
251 ifp->if_type = einit.type;
252 ifp->if_family = einit.family;
253 ifp->if_subfamily = einit.subfamily;
254 ifp->if_unit = einit.unit;
255 ifp->if_output = einit.output;
256 ifp->if_pre_enqueue = einit.pre_enqueue;
257 ifp->if_start = einit.start;
258 ifp->if_output_ctl = einit.output_ctl;
259 ifp->if_output_sched_model = einit.output_sched_model;
260 ifp->if_output_bw.eff_bw = einit.output_bw;
261 ifp->if_output_bw.max_bw = einit.output_bw_max;
262 ifp->if_output_lt.eff_lt = einit.output_lt;
263 ifp->if_output_lt.max_lt = einit.output_lt_max;
264 ifp->if_input_poll = einit.input_poll;
265 ifp->if_input_ctl = einit.input_ctl;
266 ifp->if_input_bw.eff_bw = einit.input_bw;
267 ifp->if_input_bw.max_bw = einit.input_bw_max;
268 ifp->if_input_lt.eff_lt = einit.input_lt;
269 ifp->if_input_lt.max_lt = einit.input_lt_max;
270 ifp->if_demux = einit.demux;
271 ifp->if_add_proto = einit.add_proto;
272 ifp->if_del_proto = einit.del_proto;
273 ifp->if_check_multi = einit.check_multi;
274 ifp->if_framer_legacy = einit.framer;
275 ifp->if_framer = einit.framer_extended;
276 ifp->if_softc = einit.softc;
277 ifp->if_ioctl = einit.ioctl;
278 ifp->if_set_bpf_tap = einit.set_bpf_tap;
279 ifp->if_free = ifnet_kpi_free;
280 ifp->if_event = einit.event;
281 ifp->if_kpi_storage = einit.detach;
282
283 /* Initialize external name (name + unit) */
284 snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
285 "%s", if_xname);
286
287 /*
288 * On embedded, framer() is already in the extended form;
289 * we simply use it as is, unless the caller specifies
290 * framer_extended() which will then override it.
291 *
292 * On non-embedded, framer() has long been exposed as part
293 * of the public KPI, and therefore its signature must
294 * remain the same (without the pre- and postpend length
295 * parameters.) We special case ether_frameout, such that
296 * it gets mapped to its extended variant. All other cases
297 * utilize the stub routine which will simply return zeroes
298 * for those new parameters.
299 *
300 * Internally, DLIL will only use the extended callback
301 * variant which is represented by if_framer.
302 */
303 #if CONFIG_EMBEDDED
304 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL)
305 ifp->if_framer = ifp->if_framer_legacy;
306 #else /* !CONFIG_EMBEDDED */
307 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
308 if (ifp->if_framer_legacy == ether_frameout)
309 ifp->if_framer = ether_frameout_extended;
310 else
311 ifp->if_framer = ifnet_framer_stub;
312 }
313 #endif /* !CONFIG_EMBEDDED */
314
315 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
316 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
317 else if (ifp->if_output_bw.eff_bw == 0)
318 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
319
320 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
321 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
322 else if (ifp->if_input_bw.eff_bw == 0)
323 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
324
325 if (ifp->if_output_bw.max_bw == 0)
326 ifp->if_output_bw = ifp->if_input_bw;
327 else if (ifp->if_input_bw.max_bw == 0)
328 ifp->if_input_bw = ifp->if_output_bw;
329
330 /* Pin if_baudrate to 32 bits */
331 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
332 if (br != 0)
333 ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
334
335 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
336 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
337 else if (ifp->if_output_lt.eff_lt == 0)
338 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
339
340 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
341 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
342 else if (ifp->if_input_lt.eff_lt == 0)
343 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
344
345 if (ifp->if_output_lt.max_lt == 0)
346 ifp->if_output_lt = ifp->if_input_lt;
347 else if (ifp->if_input_lt.max_lt == 0)
348 ifp->if_input_lt = ifp->if_output_lt;
349
350 if (ifp->if_ioctl == NULL)
351 ifp->if_ioctl = ifp_if_ioctl;
352
353 ifp->if_eflags = 0;
354 if (ifp->if_start != NULL) {
355 ifp->if_eflags |= IFEF_TXSTART;
356 if (ifp->if_pre_enqueue == NULL)
357 ifp->if_pre_enqueue = ifnet_enqueue;
358 ifp->if_output = ifp->if_pre_enqueue;
359 } else {
360 ifp->if_eflags &= ~IFEF_TXSTART;
361 }
362
363 if (ifp->if_input_poll != NULL)
364 ifp->if_eflags |= IFEF_RXPOLL;
365 else
366 ifp->if_eflags &= ~IFEF_RXPOLL;
367
368 ifp->if_output_dlil = dlil_output_handler;
369 ifp->if_input_dlil = dlil_input_handler;
370
371 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
372 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
373 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
374 ifp->if_input_ctl == NULL));
375 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
376 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
377
378 if (einit.broadcast_len && einit.broadcast_addr) {
379 if (einit.broadcast_len >
380 sizeof (ifp->if_broadcast.u.buffer)) {
381 MALLOC(ifp->if_broadcast.u.ptr, u_char *,
382 einit.broadcast_len, M_IFADDR, M_NOWAIT);
383 if (ifp->if_broadcast.u.ptr == NULL) {
384 error = ENOMEM;
385 } else {
386 bcopy(einit.broadcast_addr,
387 ifp->if_broadcast.u.ptr,
388 einit.broadcast_len);
389 }
390 } else {
391 bcopy(einit.broadcast_addr,
392 ifp->if_broadcast.u.buffer,
393 einit.broadcast_len);
394 }
395 ifp->if_broadcast.length = einit.broadcast_len;
396 } else {
397 bzero(&ifp->if_broadcast, sizeof (ifp->if_broadcast));
398 }
399
400 ifp->if_xflags = 0;
401
402 /*
403 * output target queue delay is specified in millisecond
404 * convert it to nanoseconds
405 */
406 IFCQ_TARGET_QDELAY(&ifp->if_snd) =
407 einit.output_target_qdelay * 1000 * 1000;
408 IFCQ_MAXLEN(&ifp->if_snd) = einit.sndq_maxlen;
409
410 ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
411 einit.start_delay_timeout);
412
413 IFCQ_PKT_DROP_LIMIT(&ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
414
415 /*
416 * Set embryonic flag; this will be cleared
417 * later when it is fully attached.
418 */
419 ifp->if_refflags = IFRF_EMBRYONIC;
420
421 /*
422 * Count the newly allocated ifnet
423 */
424 OSIncrementAtomic64(&net_api_stats.nas_ifnet_alloc_count);
425 INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
426 if (einit.flags & IFNET_INIT_ALLOC_KPI) {
427 ifp->if_xflags |= IFXF_ALLOC_KPI;
428 } else {
429 OSIncrementAtomic64(
430 &net_api_stats.nas_ifnet_alloc_os_count);
431 INC_ATOMIC_INT64_LIM(
432 net_api_stats.nas_ifnet_alloc_os_total);
433 }
434
435 if (error == 0) {
436 *interface = ifp;
437 // temporary - this should be done in dlil_if_acquire
438 ifnet_reference(ifp);
439 } else {
440 dlil_if_release(ifp);
441 *interface = NULL;
442 }
443 }
444 return (error);
445 }
446
447 errno_t
448 ifnet_reference(ifnet_t ifp)
449 {
450 return (dlil_if_ref(ifp));
451 }
452
453 errno_t
454 ifnet_release(ifnet_t ifp)
455 {
456 return (dlil_if_free(ifp));
457 }
458
459 errno_t
460 ifnet_interface_family_find(const char *module_string,
461 ifnet_family_t *family_id)
462 {
463 if (module_string == NULL || family_id == NULL)
464 return (EINVAL);
465
466 return (net_str_id_find_internal(module_string, family_id,
467 NSI_IF_FAM_ID, 1));
468 }
469
470 void *
471 ifnet_softc(ifnet_t interface)
472 {
473 return ((interface == NULL) ? NULL : interface->if_softc);
474 }
475
476 const char *
477 ifnet_name(ifnet_t interface)
478 {
479 return ((interface == NULL) ? NULL : interface->if_name);
480 }
481
482 ifnet_family_t
483 ifnet_family(ifnet_t interface)
484 {
485 return ((interface == NULL) ? 0 : interface->if_family);
486 }
487
488 ifnet_subfamily_t
489 ifnet_subfamily(ifnet_t interface)
490 {
491 return ((interface == NULL) ? 0 : interface->if_subfamily);
492 }
493
494 u_int32_t
495 ifnet_unit(ifnet_t interface)
496 {
497 return ((interface == NULL) ? (u_int32_t)0xffffffff :
498 (u_int32_t)interface->if_unit);
499 }
500
501 u_int32_t
502 ifnet_index(ifnet_t interface)
503 {
504 return ((interface == NULL) ? (u_int32_t)0xffffffff :
505 interface->if_index);
506 }
507
508 errno_t
509 ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
510 {
511 uint16_t old_flags;
512
513 if (interface == NULL)
514 return (EINVAL);
515
516 ifnet_lock_exclusive(interface);
517
518 /* If we are modifying the up/down state, call if_updown */
519 if ((mask & IFF_UP) != 0) {
520 if_updown(interface, (new_flags & IFF_UP) == IFF_UP);
521 }
522
523 old_flags = interface->if_flags;
524 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
525 /* If we are modifying the multicast flag, set/unset the silent flag */
526 if ((old_flags & IFF_MULTICAST) !=
527 (interface->if_flags & IFF_MULTICAST)) {
528 #if INET
529 if (IGMP_IFINFO(interface) != NULL)
530 igmp_initsilent(interface, IGMP_IFINFO(interface));
531 #endif /* INET */
532 #if INET6
533 if (MLD_IFINFO(interface) != NULL)
534 mld6_initsilent(interface, MLD_IFINFO(interface));
535 #endif /* INET6 */
536 }
537
538 ifnet_lock_done(interface);
539
540 return (0);
541 }
542
543 u_int16_t
544 ifnet_flags(ifnet_t interface)
545 {
546 return ((interface == NULL) ? 0 : interface->if_flags);
547 }
548
549 /*
550 * This routine ensures the following:
551 *
552 * If IFEF_AWDL is set by the caller, also set the rest of flags as
553 * defined in IFEF_AWDL_MASK.
554 *
555 * If IFEF_AWDL has been set on the interface and the caller attempts
556 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
557 * return failure.
558 *
559 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
560 * on the interface.
561 *
562 * All other flags not associated with AWDL are not affected.
563 *
564 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
565 */
566 static errno_t
567 ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
568 {
569 u_int32_t eflags;
570
571 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
572
573 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
574
575 if (ifp->if_eflags & IFEF_AWDL) {
576 if (eflags & IFEF_AWDL) {
577 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK)
578 return (EINVAL);
579 } else {
580 *new_eflags &= ~IFEF_AWDL_MASK;
581 *mask |= IFEF_AWDL_MASK;
582 }
583 } else if (eflags & IFEF_AWDL) {
584 *new_eflags |= IFEF_AWDL_MASK;
585 *mask |= IFEF_AWDL_MASK;
586 } else if (eflags & IFEF_AWDL_RESTRICTED &&
587 !(ifp->if_eflags & IFEF_AWDL))
588 return (EINVAL);
589
590 return (0);
591 }
592
593 errno_t
594 ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
595 {
596 uint32_t oeflags;
597 struct kev_msg ev_msg;
598 struct net_event_data ev_data;
599
600 if (interface == NULL)
601 return (EINVAL);
602
603 bzero(&ev_msg, sizeof(ev_msg));
604 ifnet_lock_exclusive(interface);
605 /*
606 * Sanity checks for IFEF_AWDL and its related flags.
607 */
608 if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
609 ifnet_lock_done(interface);
610 return (EINVAL);
611 }
612 oeflags = interface->if_eflags;
613 interface->if_eflags =
614 (new_flags & mask) | (interface->if_eflags & ~mask);
615 ifnet_lock_done(interface);
616 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
617 !(oeflags & IFEF_AWDL_RESTRICTED)) {
618 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
619 /*
620 * The interface is now restricted to applications that have
621 * the entitlement.
622 * The check for the entitlement will be done in the data
623 * path, so we don't have to do anything here.
624 */
625 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
626 !(interface->if_eflags & IFEF_AWDL_RESTRICTED))
627 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
628 /*
629 * Notify configd so that it has a chance to perform better
630 * reachability detection.
631 */
632 if (ev_msg.event_code) {
633 bzero(&ev_data, sizeof(ev_data));
634 ev_msg.vendor_code = KEV_VENDOR_APPLE;
635 ev_msg.kev_class = KEV_NETWORK_CLASS;
636 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
637 strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
638 ev_data.if_family = interface->if_family;
639 ev_data.if_unit = interface->if_unit;
640 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
641 ev_msg.dv[0].data_ptr = &ev_data;
642 ev_msg.dv[1].data_length = 0;
643 dlil_post_complete_msg(interface, &ev_msg);
644 }
645
646 return (0);
647 }
648
649 u_int32_t
650 ifnet_eflags(ifnet_t interface)
651 {
652 return ((interface == NULL) ? 0 : interface->if_eflags);
653 }
654
655 errno_t
656 ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
657 {
658 int before, after;
659
660 if (ifp == NULL)
661 return (EINVAL);
662
663 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
664 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
665
666 /*
667 * If this is called prior to ifnet attach, the actual work will
668 * be done at attach time. Otherwise, if it is called after
669 * ifnet detach, then it is a no-op.
670 */
671 if (!ifnet_is_attached(ifp, 0)) {
672 ifp->if_idle_new_flags = new_flags;
673 ifp->if_idle_new_flags_mask = mask;
674 return (0);
675 } else {
676 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
677 }
678
679 before = ifp->if_idle_flags;
680 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
681 after = ifp->if_idle_flags;
682
683 if ((after - before) < 0 && ifp->if_idle_flags == 0 &&
684 ifp->if_want_aggressive_drain != 0) {
685 ifp->if_want_aggressive_drain = 0;
686 } else if ((after - before) > 0 && ifp->if_want_aggressive_drain == 0) {
687 ifp->if_want_aggressive_drain++;
688 }
689
690 return (0);
691 }
692
693 errno_t
694 ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
695 {
696 errno_t err;
697
698 lck_mtx_lock(rnh_lock);
699 ifnet_lock_exclusive(ifp);
700 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
701 ifnet_lock_done(ifp);
702 lck_mtx_unlock(rnh_lock);
703
704 return (err);
705 }
706
707 u_int32_t
708 ifnet_idle_flags(ifnet_t ifp)
709 {
710 return ((ifp == NULL) ? 0 : ifp->if_idle_flags);
711 }
712
713 errno_t
714 ifnet_set_link_quality(ifnet_t ifp, int quality)
715 {
716 errno_t err = 0;
717
718 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
719 err = EINVAL;
720 goto done;
721 }
722
723 if (!ifnet_is_attached(ifp, 0)) {
724 err = ENXIO;
725 goto done;
726 }
727
728 if_lqm_update(ifp, quality, 0);
729
730 done:
731 return (err);
732 }
733
734 int
735 ifnet_link_quality(ifnet_t ifp)
736 {
737 int lqm;
738
739 if (ifp == NULL)
740 return (IFNET_LQM_THRESH_OFF);
741
742 ifnet_lock_shared(ifp);
743 lqm = ifp->if_interface_state.lqm_state;
744 ifnet_lock_done(ifp);
745
746 return (lqm);
747 }
748
749 errno_t
750 ifnet_set_interface_state(ifnet_t ifp,
751 struct if_interface_state *if_interface_state)
752 {
753 errno_t err = 0;
754
755 if (ifp == NULL || if_interface_state == NULL) {
756 err = EINVAL;
757 goto done;
758 }
759
760 if (!ifnet_is_attached(ifp, 0)) {
761 err = ENXIO;
762 goto done;
763 }
764
765 if_state_update(ifp, if_interface_state);
766
767 done:
768 return (err);
769 }
770
771 errno_t
772 ifnet_get_interface_state(ifnet_t ifp,
773 struct if_interface_state *if_interface_state)
774 {
775 errno_t err = 0;
776
777 if (ifp == NULL || if_interface_state == NULL) {
778 err = EINVAL;
779 goto done;
780 }
781
782 if (!ifnet_is_attached(ifp, 0)) {
783 err = ENXIO;
784 goto done;
785 }
786
787 if_get_state(ifp, if_interface_state);
788
789 done:
790 return (err);
791 }
792
793
794 static errno_t
795 ifnet_defrouter_llreachinfo(ifnet_t ifp, int af,
796 struct ifnet_llreach_info *iflri)
797 {
798 if (ifp == NULL || iflri == NULL)
799 return (EINVAL);
800
801 VERIFY(af == AF_INET || af == AF_INET6);
802
803 return (ifnet_llreach_get_defrouter(ifp, af, iflri));
804 }
805
806 errno_t
807 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
808 {
809 return (ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri));
810 }
811
812 errno_t
813 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
814 {
815 return (ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri));
816 }
817
818 errno_t
819 ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
820 u_int32_t mask)
821 {
822 errno_t error = 0;
823 int tmp;
824
825 if (ifp == NULL)
826 return (EINVAL);
827
828 ifnet_lock_exclusive(ifp);
829 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
830 if ((tmp & ~IFCAP_VALID))
831 error = EINVAL;
832 else
833 ifp->if_capabilities = tmp;
834 ifnet_lock_done(ifp);
835
836 return (error);
837 }
838
839 u_int32_t
840 ifnet_capabilities_supported(ifnet_t ifp)
841 {
842 return ((ifp == NULL) ? 0 : ifp->if_capabilities);
843 }
844
845
846 errno_t
847 ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
848 u_int32_t mask)
849 {
850 errno_t error = 0;
851 int tmp;
852 struct kev_msg ev_msg;
853 struct net_event_data ev_data;
854
855 if (ifp == NULL)
856 return (EINVAL);
857
858 ifnet_lock_exclusive(ifp);
859 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
860 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities))
861 error = EINVAL;
862 else
863 ifp->if_capenable = tmp;
864 ifnet_lock_done(ifp);
865
866 /* Notify application of the change */
867 bzero(&ev_data, sizeof (struct net_event_data));
868 bzero(&ev_msg, sizeof (struct kev_msg));
869 ev_msg.vendor_code = KEV_VENDOR_APPLE;
870 ev_msg.kev_class = KEV_NETWORK_CLASS;
871 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
872
873 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
874 strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
875 ev_data.if_family = ifp->if_family;
876 ev_data.if_unit = (u_int32_t)ifp->if_unit;
877 ev_msg.dv[0].data_length = sizeof (struct net_event_data);
878 ev_msg.dv[0].data_ptr = &ev_data;
879 ev_msg.dv[1].data_length = 0;
880 dlil_post_complete_msg(ifp, &ev_msg);
881
882 return (error);
883 }
884
885 u_int32_t
886 ifnet_capabilities_enabled(ifnet_t ifp)
887 {
888 return ((ifp == NULL) ? 0 : ifp->if_capenable);
889 }
890
891 static const ifnet_offload_t offload_mask =
892 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
893 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
894 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
895 IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
896 IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
897 IFNET_SW_TIMESTAMP);
898
899 static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
900
901 errno_t
902 ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
903 {
904 u_int32_t ifcaps = 0;
905
906 if (interface == NULL)
907 return (EINVAL);
908
909 ifnet_lock_exclusive(interface);
910 interface->if_hwassist = (offload & offload_mask);
911
912 /*
913 * Hardware capable of partial checksum offload is
914 * flexible enough to handle any transports utilizing
915 * Internet Checksumming. Include those transports
916 * here, and leave the final decision to IP.
917 */
918 if (interface->if_hwassist & IFNET_CSUM_PARTIAL) {
919 interface->if_hwassist |= (IFNET_CSUM_TCP | IFNET_CSUM_UDP |
920 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6);
921 }
922 if (dlil_verbose) {
923 log(LOG_DEBUG, "%s: set offload flags=%b\n",
924 if_name(interface),
925 interface->if_hwassist, IFNET_OFFLOADF_BITS);
926 }
927 ifnet_lock_done(interface);
928
929 if ((offload & any_offload_csum))
930 ifcaps |= IFCAP_HWCSUM;
931 if ((offload & IFNET_TSO_IPV4))
932 ifcaps |= IFCAP_TSO4;
933 if ((offload & IFNET_TSO_IPV6))
934 ifcaps |= IFCAP_TSO6;
935 if ((offload & IFNET_VLAN_MTU))
936 ifcaps |= IFCAP_VLAN_MTU;
937 if ((offload & IFNET_VLAN_TAGGING))
938 ifcaps |= IFCAP_VLAN_HWTAGGING;
939 if ((offload & IFNET_TX_STATUS))
940 ifcaps |= IFCAP_TXSTATUS;
941 if ((offload & IFNET_HW_TIMESTAMP))
942 ifcaps |= IFCAP_HW_TIMESTAMP;
943 if ((offload & IFNET_SW_TIMESTAMP))
944 ifcaps |= IFCAP_SW_TIMESTAMP;
945 if ((offload & IFNET_CSUM_PARTIAL))
946 ifcaps |= IFCAP_CSUM_PARTIAL;
947 if ((offload & IFNET_CSUM_ZERO_INVERT))
948 ifcaps |= IFCAP_CSUM_ZERO_INVERT;
949 if (ifcaps != 0) {
950 (void) ifnet_set_capabilities_supported(interface, ifcaps,
951 IFCAP_VALID);
952 (void) ifnet_set_capabilities_enabled(interface, ifcaps,
953 IFCAP_VALID);
954 }
955
956 return (0);
957 }
958
959 ifnet_offload_t
960 ifnet_offload(ifnet_t interface)
961 {
962 return ((interface == NULL) ?
963 0 : (interface->if_hwassist & offload_mask));
964 }
965
966 errno_t
967 ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
968 {
969 errno_t error = 0;
970
971 if (interface == NULL || mtuLen < interface->if_mtu)
972 return (EINVAL);
973
974 switch (family) {
975 case AF_INET:
976 if (interface->if_hwassist & IFNET_TSO_IPV4)
977 interface->if_tso_v4_mtu = mtuLen;
978 else
979 error = EINVAL;
980 break;
981
982 case AF_INET6:
983 if (interface->if_hwassist & IFNET_TSO_IPV6)
984 interface->if_tso_v6_mtu = mtuLen;
985 else
986 error = EINVAL;
987 break;
988
989 default:
990 error = EPROTONOSUPPORT;
991 break;
992 }
993
994 return (error);
995 }
996
997 errno_t
998 ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
999 {
1000 errno_t error = 0;
1001
1002 if (interface == NULL || mtuLen == NULL)
1003 return (EINVAL);
1004
1005 switch (family) {
1006 case AF_INET:
1007 if (interface->if_hwassist & IFNET_TSO_IPV4)
1008 *mtuLen = interface->if_tso_v4_mtu;
1009 else
1010 error = EINVAL;
1011 break;
1012
1013 case AF_INET6:
1014 if (interface->if_hwassist & IFNET_TSO_IPV6)
1015 *mtuLen = interface->if_tso_v6_mtu;
1016 else
1017 error = EINVAL;
1018 break;
1019
1020 default:
1021 error = EPROTONOSUPPORT;
1022 break;
1023 }
1024
1025 return (error);
1026 }
1027
1028 errno_t
1029 ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1030 {
1031 struct kev_msg ev_msg;
1032 struct net_event_data ev_data;
1033
1034 bzero(&ev_data, sizeof (struct net_event_data));
1035 bzero(&ev_msg, sizeof (struct kev_msg));
1036
1037 if (interface == NULL)
1038 return (EINVAL);
1039
1040 /* Do not accept wacky values */
1041 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS)
1042 return (EINVAL);
1043
1044 ifnet_lock_exclusive(interface);
1045
1046 if (mask & IF_WAKE_ON_MAGIC_PACKET) {
1047 if (properties & IF_WAKE_ON_MAGIC_PACKET)
1048 interface->if_xflags |= IFXF_WAKE_ON_MAGIC_PACKET;
1049 else
1050 interface->if_xflags &= ~IFXF_WAKE_ON_MAGIC_PACKET;
1051 }
1052
1053 ifnet_lock_done(interface);
1054
1055 (void) ifnet_touch_lastchange(interface);
1056
1057 /* Notify application of the change */
1058 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1059 ev_msg.kev_class = KEV_NETWORK_CLASS;
1060 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1061
1062 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
1063 strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1064 ev_data.if_family = interface->if_family;
1065 ev_data.if_unit = (u_int32_t)interface->if_unit;
1066 ev_msg.dv[0].data_length = sizeof (struct net_event_data);
1067 ev_msg.dv[0].data_ptr = &ev_data;
1068 ev_msg.dv[1].data_length = 0;
1069 dlil_post_complete_msg(interface, &ev_msg);
1070
1071 return (0);
1072 }
1073
1074 u_int32_t
1075 ifnet_get_wake_flags(ifnet_t interface)
1076 {
1077 u_int32_t flags = 0;
1078
1079 if (interface == NULL)
1080 return (0);
1081
1082 if (interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET)
1083 flags |= IF_WAKE_ON_MAGIC_PACKET;
1084
1085 return (flags);
1086 }
1087
1088 /*
1089 * Should MIB data store a copy?
1090 */
1091 errno_t
1092 ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen)
1093 {
1094 if (interface == NULL)
1095 return (EINVAL);
1096
1097 ifnet_lock_exclusive(interface);
1098 interface->if_linkmib = (void*)mibData;
1099 interface->if_linkmiblen = mibLen;
1100 ifnet_lock_done(interface);
1101 return (0);
1102 }
1103
1104 errno_t
1105 ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen)
1106 {
1107 errno_t result = 0;
1108
1109 if (interface == NULL)
1110 return (EINVAL);
1111
1112 ifnet_lock_shared(interface);
1113 if (*mibLen < interface->if_linkmiblen)
1114 result = EMSGSIZE;
1115 if (result == 0 && interface->if_linkmib == NULL)
1116 result = ENOTSUP;
1117
1118 if (result == 0) {
1119 *mibLen = interface->if_linkmiblen;
1120 bcopy(interface->if_linkmib, mibData, *mibLen);
1121 }
1122 ifnet_lock_done(interface);
1123
1124 return (result);
1125 }
1126
1127 u_int32_t
1128 ifnet_get_link_mib_data_length(ifnet_t interface)
1129 {
1130 return ((interface == NULL) ? 0 : interface->if_linkmiblen);
1131 }
1132
1133 errno_t
1134 ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1135 mbuf_t m, void *route, const struct sockaddr *dest)
1136 {
1137 if (interface == NULL || protocol_family == 0 || m == NULL) {
1138 if (m != NULL)
1139 mbuf_freem_list(m);
1140 return (EINVAL);
1141 }
1142 return (dlil_output(interface, protocol_family, m, route, dest, 0, NULL));
1143 }
1144
1145 errno_t
1146 ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1147 {
1148 if (interface == NULL || m == NULL) {
1149 if (m != NULL)
1150 mbuf_freem_list(m);
1151 return (EINVAL);
1152 }
1153 return (dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL));
1154 }
1155
1156 errno_t
1157 ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1158 {
1159 if (interface == NULL)
1160 return (EINVAL);
1161
1162 interface->if_mtu = mtu;
1163 return (0);
1164 }
1165
1166 u_int32_t
1167 ifnet_mtu(ifnet_t interface)
1168 {
1169 return ((interface == NULL) ? 0 : interface->if_mtu);
1170 }
1171
1172 u_char
1173 ifnet_type(ifnet_t interface)
1174 {
1175 return ((interface == NULL) ? 0 : interface->if_data.ifi_type);
1176 }
1177
1178 errno_t
1179 ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1180 {
1181 if (interface == NULL)
1182 return (EINVAL);
1183
1184 interface->if_data.ifi_addrlen = addrlen;
1185 return (0);
1186 }
1187
1188 u_char
1189 ifnet_addrlen(ifnet_t interface)
1190 {
1191 return ((interface == NULL) ? 0 : interface->if_data.ifi_addrlen);
1192 }
1193
1194 errno_t
1195 ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1196 {
1197 if (interface == NULL)
1198 return (EINVAL);
1199
1200 interface->if_data.ifi_hdrlen = hdrlen;
1201 return (0);
1202 }
1203
1204 u_char
1205 ifnet_hdrlen(ifnet_t interface)
1206 {
1207 return ((interface == NULL) ? 0 : interface->if_data.ifi_hdrlen);
1208 }
1209
1210 errno_t
1211 ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1212 {
1213 if (interface == NULL)
1214 return (EINVAL);
1215
1216 interface->if_data.ifi_metric = metric;
1217 return (0);
1218 }
1219
1220 u_int32_t
1221 ifnet_metric(ifnet_t interface)
1222 {
1223 return ((interface == NULL) ? 0 : interface->if_data.ifi_metric);
1224 }
1225
1226 errno_t
1227 ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate)
1228 {
1229 if (ifp == NULL)
1230 return (EINVAL);
1231
1232 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1233 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1234
1235 /* Pin if_baudrate to 32 bits until we can change the storage size */
1236 ifp->if_baudrate = (baudrate > 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate;
1237
1238 return (0);
1239 }
1240
1241 u_int64_t
1242 ifnet_baudrate(struct ifnet *ifp)
1243 {
1244 return ((ifp == NULL) ? 0 : ifp->if_baudrate);
1245 }
1246
1247 errno_t
1248 ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1249 struct if_bandwidths *input_bw)
1250 {
1251 if (ifp == NULL)
1252 return (EINVAL);
1253
1254 /* set input values first (if any), as output values depend on them */
1255 if (input_bw != NULL)
1256 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1257
1258 if (output_bw != NULL)
1259 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1260
1261 return (0);
1262 }
1263
1264 static void
1265 ifnet_set_link_status_outbw(struct ifnet *ifp)
1266 {
1267 struct if_wifi_status_v1 *sr;
1268 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1269 if (ifp->if_output_bw.eff_bw != 0) {
1270 sr->valid_bitmask |=
1271 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1272 sr->ul_effective_bandwidth =
1273 ifp->if_output_bw.eff_bw;
1274 }
1275 if (ifp->if_output_bw.max_bw != 0) {
1276 sr->valid_bitmask |=
1277 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1278 sr->ul_max_bandwidth =
1279 ifp->if_output_bw.max_bw;
1280 }
1281 }
1282
1283 errno_t
1284 ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1285 boolean_t locked)
1286 {
1287 struct if_bandwidths old_bw;
1288 struct ifclassq *ifq;
1289 u_int64_t br;
1290
1291 VERIFY(ifp != NULL && bw != NULL);
1292
1293 ifq = &ifp->if_snd;
1294 if (!locked)
1295 IFCQ_LOCK(ifq);
1296 IFCQ_LOCK_ASSERT_HELD(ifq);
1297
1298 old_bw = ifp->if_output_bw;
1299 if (bw->eff_bw != 0)
1300 ifp->if_output_bw.eff_bw = bw->eff_bw;
1301 if (bw->max_bw != 0)
1302 ifp->if_output_bw.max_bw = bw->max_bw;
1303 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
1304 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1305 else if (ifp->if_output_bw.eff_bw == 0)
1306 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1307
1308 /* Pin if_baudrate to 32 bits */
1309 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1310 if (br != 0)
1311 ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
1312
1313 /* Adjust queue parameters if needed */
1314 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1315 old_bw.max_bw != ifp->if_output_bw.max_bw)
1316 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1317
1318 if (!locked)
1319 IFCQ_UNLOCK(ifq);
1320
1321 /*
1322 * If this is a Wifi interface, update the values in
1323 * if_link_status structure also.
1324 */
1325 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1326 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1327 ifnet_set_link_status_outbw(ifp);
1328 lck_rw_done(&ifp->if_link_status_lock);
1329 }
1330
1331 return (0);
1332 }
1333
1334 static void
1335 ifnet_set_link_status_inbw(struct ifnet *ifp)
1336 {
1337 struct if_wifi_status_v1 *sr;
1338
1339 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1340 if (ifp->if_input_bw.eff_bw != 0) {
1341 sr->valid_bitmask |=
1342 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1343 sr->dl_effective_bandwidth =
1344 ifp->if_input_bw.eff_bw;
1345 }
1346 if (ifp->if_input_bw.max_bw != 0) {
1347 sr->valid_bitmask |=
1348 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1349 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw;
1350 }
1351 }
1352
1353 errno_t
1354 ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1355 {
1356 struct if_bandwidths old_bw;
1357
1358 VERIFY(ifp != NULL && bw != NULL);
1359
1360 old_bw = ifp->if_input_bw;
1361 if (bw->eff_bw != 0)
1362 ifp->if_input_bw.eff_bw = bw->eff_bw;
1363 if (bw->max_bw != 0)
1364 ifp->if_input_bw.max_bw = bw->max_bw;
1365 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
1366 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1367 else if (ifp->if_input_bw.eff_bw == 0)
1368 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1369
1370 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1371 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1372 ifnet_set_link_status_inbw(ifp);
1373 lck_rw_done(&ifp->if_link_status_lock);
1374 }
1375
1376 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1377 old_bw.max_bw != ifp->if_input_bw.max_bw)
1378 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1379
1380 return (0);
1381 }
1382
1383 u_int64_t
1384 ifnet_output_linkrate(struct ifnet *ifp)
1385 {
1386 struct ifclassq *ifq = &ifp->if_snd;
1387 u_int64_t rate;
1388
1389 IFCQ_LOCK_ASSERT_HELD(ifq);
1390
1391 rate = ifp->if_output_bw.eff_bw;
1392 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1393 u_int64_t tbr_rate = ifp->if_snd.ifcq_tbr.tbr_rate_raw;
1394 VERIFY(tbr_rate > 0);
1395 rate = MIN(rate, ifp->if_snd.ifcq_tbr.tbr_rate_raw);
1396 }
1397
1398 return (rate);
1399 }
1400
1401 u_int64_t
1402 ifnet_input_linkrate(struct ifnet *ifp)
1403 {
1404 return (ifp->if_input_bw.eff_bw);
1405 }
1406
1407 errno_t
1408 ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1409 struct if_bandwidths *input_bw)
1410 {
1411 if (ifp == NULL)
1412 return (EINVAL);
1413
1414 if (output_bw != NULL)
1415 *output_bw = ifp->if_output_bw;
1416 if (input_bw != NULL)
1417 *input_bw = ifp->if_input_bw;
1418
1419 return (0);
1420 }
1421
1422 errno_t
1423 ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1424 struct if_latencies *input_lt)
1425 {
1426 if (ifp == NULL)
1427 return (EINVAL);
1428
1429 if (output_lt != NULL)
1430 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1431
1432 if (input_lt != NULL)
1433 (void) ifnet_set_input_latencies(ifp, input_lt);
1434
1435 return (0);
1436 }
1437
1438 errno_t
1439 ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1440 boolean_t locked)
1441 {
1442 struct if_latencies old_lt;
1443 struct ifclassq *ifq;
1444
1445 VERIFY(ifp != NULL && lt != NULL);
1446
1447 ifq = &ifp->if_snd;
1448 if (!locked)
1449 IFCQ_LOCK(ifq);
1450 IFCQ_LOCK_ASSERT_HELD(ifq);
1451
1452 old_lt = ifp->if_output_lt;
1453 if (lt->eff_lt != 0)
1454 ifp->if_output_lt.eff_lt = lt->eff_lt;
1455 if (lt->max_lt != 0)
1456 ifp->if_output_lt.max_lt = lt->max_lt;
1457 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
1458 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1459 else if (ifp->if_output_lt.eff_lt == 0)
1460 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1461
1462 /* Adjust queue parameters if needed */
1463 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1464 old_lt.max_lt != ifp->if_output_lt.max_lt)
1465 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1466
1467 if (!locked)
1468 IFCQ_UNLOCK(ifq);
1469
1470 return (0);
1471 }
1472
1473 errno_t
1474 ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1475 {
1476 struct if_latencies old_lt;
1477
1478 VERIFY(ifp != NULL && lt != NULL);
1479
1480 old_lt = ifp->if_input_lt;
1481 if (lt->eff_lt != 0)
1482 ifp->if_input_lt.eff_lt = lt->eff_lt;
1483 if (lt->max_lt != 0)
1484 ifp->if_input_lt.max_lt = lt->max_lt;
1485 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
1486 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1487 else if (ifp->if_input_lt.eff_lt == 0)
1488 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1489
1490 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1491 old_lt.max_lt != ifp->if_input_lt.max_lt)
1492 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1493
1494 return (0);
1495 }
1496
1497 errno_t
1498 ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1499 struct if_latencies *input_lt)
1500 {
1501 if (ifp == NULL)
1502 return (EINVAL);
1503
1504 if (output_lt != NULL)
1505 *output_lt = ifp->if_output_lt;
1506 if (input_lt != NULL)
1507 *input_lt = ifp->if_input_lt;
1508
1509 return (0);
1510 }
1511
1512 errno_t
1513 ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1514 {
1515 errno_t err;
1516
1517 if (ifp == NULL)
1518 return (EINVAL);
1519 else if (!ifnet_is_attached(ifp, 1))
1520 return (ENXIO);
1521
1522 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1523
1524 /* Release the io ref count */
1525 ifnet_decr_iorefcnt(ifp);
1526
1527 return (err);
1528 }
1529
1530 errno_t
1531 ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1532 {
1533 errno_t err;
1534
1535 if (ifp == NULL || p == NULL)
1536 return (EINVAL);
1537 else if (!ifnet_is_attached(ifp, 1))
1538 return (ENXIO);
1539
1540 err = dlil_rxpoll_get_params(ifp, p);
1541
1542 /* Release the io ref count */
1543 ifnet_decr_iorefcnt(ifp);
1544
1545 return (err);
1546 }
1547
1548 errno_t
1549 ifnet_stat_increment(struct ifnet *ifp,
1550 const struct ifnet_stat_increment_param *s)
1551 {
1552 if (ifp == NULL)
1553 return (EINVAL);
1554
1555 if (s->packets_in != 0)
1556 atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1557 if (s->bytes_in != 0)
1558 atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1559 if (s->errors_in != 0)
1560 atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1561
1562 if (s->packets_out != 0)
1563 atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out);
1564 if (s->bytes_out != 0)
1565 atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1566 if (s->errors_out != 0)
1567 atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1568
1569 if (s->collisions != 0)
1570 atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions);
1571 if (s->dropped != 0)
1572 atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1573
1574 /* Touch the last change time. */
1575 TOUCHLASTCHANGE(&ifp->if_lastchange);
1576
1577 if (ifp->if_data_threshold != 0)
1578 ifnet_notify_data_threshold(ifp);
1579
1580 return (0);
1581 }
1582
1583 errno_t
1584 ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1585 u_int32_t bytes_in, u_int32_t errors_in)
1586 {
1587 if (ifp == NULL)
1588 return (EINVAL);
1589
1590 if (packets_in != 0)
1591 atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in);
1592 if (bytes_in != 0)
1593 atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in);
1594 if (errors_in != 0)
1595 atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in);
1596
1597 TOUCHLASTCHANGE(&ifp->if_lastchange);
1598
1599 if (ifp->if_data_threshold != 0)
1600 ifnet_notify_data_threshold(ifp);
1601
1602 return (0);
1603 }
1604
1605 errno_t
1606 ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1607 u_int32_t bytes_out, u_int32_t errors_out)
1608 {
1609 if (ifp == NULL)
1610 return (EINVAL);
1611
1612 if (packets_out != 0)
1613 atomic_add_64(&ifp->if_data.ifi_opackets, packets_out);
1614 if (bytes_out != 0)
1615 atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out);
1616 if (errors_out != 0)
1617 atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out);
1618
1619 TOUCHLASTCHANGE(&ifp->if_lastchange);
1620
1621 if (ifp->if_data_threshold != 0)
1622 ifnet_notify_data_threshold(ifp);
1623
1624 return (0);
1625 }
1626
1627 errno_t
1628 ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1629 {
1630 if (ifp == NULL)
1631 return (EINVAL);
1632
1633 atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1634 atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1635 atomic_set_64(&ifp->if_data.ifi_imcasts, s->multicasts_in);
1636 atomic_set_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1637
1638 atomic_set_64(&ifp->if_data.ifi_opackets, s->packets_out);
1639 atomic_set_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1640 atomic_set_64(&ifp->if_data.ifi_omcasts, s->multicasts_out);
1641 atomic_set_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1642
1643 atomic_set_64(&ifp->if_data.ifi_collisions, s->collisions);
1644 atomic_set_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1645 atomic_set_64(&ifp->if_data.ifi_noproto, s->no_protocol);
1646
1647 /* Touch the last change time. */
1648 TOUCHLASTCHANGE(&ifp->if_lastchange);
1649
1650 if (ifp->if_data_threshold != 0)
1651 ifnet_notify_data_threshold(ifp);
1652
1653 return (0);
1654 }
1655
1656 errno_t
1657 ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1658 {
1659 if (ifp == NULL)
1660 return (EINVAL);
1661
1662 atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets);
1663 atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes);
1664 atomic_get_64(s->multicasts_in, &ifp->if_data.ifi_imcasts);
1665 atomic_get_64(s->errors_in, &ifp->if_data.ifi_ierrors);
1666
1667 atomic_get_64(s->packets_out, &ifp->if_data.ifi_opackets);
1668 atomic_get_64(s->bytes_out, &ifp->if_data.ifi_obytes);
1669 atomic_get_64(s->multicasts_out, &ifp->if_data.ifi_omcasts);
1670 atomic_get_64(s->errors_out, &ifp->if_data.ifi_oerrors);
1671
1672 atomic_get_64(s->collisions, &ifp->if_data.ifi_collisions);
1673 atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops);
1674 atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto);
1675
1676 if (ifp->if_data_threshold != 0)
1677 ifnet_notify_data_threshold(ifp);
1678
1679 return (0);
1680 }
1681
1682 errno_t
1683 ifnet_touch_lastchange(ifnet_t interface)
1684 {
1685 if (interface == NULL)
1686 return (EINVAL);
1687
1688 TOUCHLASTCHANGE(&interface->if_lastchange);
1689
1690 return (0);
1691 }
1692
1693 errno_t
1694 ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1695 {
1696 if (interface == NULL)
1697 return (EINVAL);
1698
1699 *last_change = interface->if_data.ifi_lastchange;
1700 /* Crude conversion from uptime to calendar time */
1701 last_change->tv_sec += boottime_sec();
1702
1703 return (0);
1704 }
1705
1706 errno_t
1707 ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
1708 {
1709 return (addresses == NULL ? EINVAL :
1710 ifnet_get_address_list_family(interface, addresses, 0));
1711 }
1712
1713 struct ifnet_addr_list {
1714 SLIST_ENTRY(ifnet_addr_list) ifal_le;
1715 struct ifaddr *ifal_ifa;
1716 };
1717
1718 errno_t
1719 ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
1720 sa_family_t family)
1721 {
1722 return (ifnet_get_address_list_family_internal(interface, addresses,
1723 family, 0, M_NOWAIT, 0));
1724 }
1725
1726 errno_t
1727 ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
1728 {
1729 return (addresses == NULL ? EINVAL :
1730 ifnet_get_address_list_family_internal(interface, addresses,
1731 0, 0, M_NOWAIT, 1));
1732 }
1733
1734 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
1735
1736 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
1737
1738 __private_extern__ errno_t
1739 ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
1740 sa_family_t family, int detached, int how, int return_inuse_addrs)
1741 {
1742 SLIST_HEAD(, ifnet_addr_list) ifal_head;
1743 struct ifnet_addr_list *ifal, *ifal_tmp;
1744 struct ifnet *ifp;
1745 int count = 0;
1746 errno_t err = 0;
1747 int usecount = 0;
1748 int index = 0;
1749
1750 SLIST_INIT(&ifal_head);
1751
1752 if (addresses == NULL) {
1753 err = EINVAL;
1754 goto done;
1755 }
1756 *addresses = NULL;
1757
1758 if (detached) {
1759 /*
1760 * Interface has been detached, so skip the lookup
1761 * at ifnet_head and go directly to inner loop.
1762 */
1763 ifp = interface;
1764 if (ifp == NULL) {
1765 err = EINVAL;
1766 goto done;
1767 }
1768 goto one;
1769 }
1770
1771 ifnet_head_lock_shared();
1772 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1773 if (interface != NULL && ifp != interface)
1774 continue;
1775 one:
1776 ifnet_lock_shared(ifp);
1777 if (interface == NULL || interface == ifp) {
1778 struct ifaddr *ifa;
1779 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1780 IFA_LOCK(ifa);
1781 if (family != 0 &&
1782 ifa->ifa_addr->sa_family != family) {
1783 IFA_UNLOCK(ifa);
1784 continue;
1785 }
1786 MALLOC(ifal, struct ifnet_addr_list *,
1787 sizeof (*ifal), M_TEMP, how);
1788 if (ifal == NULL) {
1789 IFA_UNLOCK(ifa);
1790 ifnet_lock_done(ifp);
1791 if (!detached)
1792 ifnet_head_done();
1793 err = ENOMEM;
1794 goto done;
1795 }
1796 ifal->ifal_ifa = ifa;
1797 IFA_ADDREF_LOCKED(ifa);
1798 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
1799 ++count;
1800 IFA_UNLOCK(ifa);
1801 }
1802 }
1803 ifnet_lock_done(ifp);
1804 if (detached)
1805 break;
1806 }
1807 if (!detached)
1808 ifnet_head_done();
1809
1810 if (count == 0) {
1811 err = ENXIO;
1812 goto done;
1813 }
1814 MALLOC(*addresses, ifaddr_t *, sizeof (ifaddr_t) * (count + 1),
1815 M_TEMP, how);
1816 if (*addresses == NULL) {
1817 err = ENOMEM;
1818 goto done;
1819 }
1820 bzero(*addresses, sizeof (ifaddr_t) * (count + 1));
1821
1822 done:
1823 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
1824 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
1825 if (err == 0) {
1826 if (return_inuse_addrs) {
1827 usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
1828 usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
1829 if (usecount) {
1830 (*addresses)[index] = ifal->ifal_ifa;
1831 index++;
1832 } else {
1833 IFA_REMREF(ifal->ifal_ifa);
1834 }
1835 } else {
1836 (*addresses)[--count] = ifal->ifal_ifa;
1837 }
1838 } else {
1839 IFA_REMREF(ifal->ifal_ifa);
1840 }
1841 FREE(ifal, M_TEMP);
1842 }
1843
1844 VERIFY(err == 0 || *addresses == NULL);
1845 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
1846 VERIFY(return_inuse_addrs == 1);
1847 FREE(*addresses, M_TEMP);
1848 err = ENXIO;
1849 }
1850 return (err);
1851 }
1852
1853 void
1854 ifnet_free_address_list(ifaddr_t *addresses)
1855 {
1856 int i;
1857
1858 if (addresses == NULL)
1859 return;
1860
1861 for (i = 0; addresses[i] != NULL; i++)
1862 IFA_REMREF(addresses[i]);
1863
1864 FREE(addresses, M_TEMP);
1865 }
1866
1867 void *
1868 ifnet_lladdr(ifnet_t interface)
1869 {
1870 struct ifaddr *ifa;
1871 void *lladdr;
1872
1873 if (interface == NULL)
1874 return (NULL);
1875
1876 /*
1877 * if_lladdr points to the permanent link address of
1878 * the interface and it never gets deallocated; internal
1879 * code should simply use IF_LLADDR() for performance.
1880 */
1881 ifa = interface->if_lladdr;
1882 IFA_LOCK_SPIN(ifa);
1883 lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
1884 IFA_UNLOCK(ifa);
1885
1886 return (lladdr);
1887 }
1888
1889 errno_t
1890 ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
1891 size_t *out_len)
1892 {
1893 if (interface == NULL || addr == NULL || out_len == NULL)
1894 return (EINVAL);
1895
1896 *out_len = interface->if_broadcast.length;
1897
1898 if (buffer_len < interface->if_broadcast.length)
1899 return (EMSGSIZE);
1900
1901 if (interface->if_broadcast.length == 0)
1902 return (ENXIO);
1903
1904 if (interface->if_broadcast.length <=
1905 sizeof (interface->if_broadcast.u.buffer)) {
1906 bcopy(interface->if_broadcast.u.buffer, addr,
1907 interface->if_broadcast.length);
1908 } else {
1909 bcopy(interface->if_broadcast.u.ptr, addr,
1910 interface->if_broadcast.length);
1911 }
1912
1913 return (0);
1914 }
1915
1916 static errno_t
1917 ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
1918 size_t lladdr_len, kauth_cred_t *credp)
1919 {
1920 const u_int8_t *bytes;
1921 size_t bytes_len;
1922 struct ifaddr *ifa;
1923 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
1924 errno_t error = 0;
1925
1926 /*
1927 * Make sure to accomodate the largest possible
1928 * size of SA(if_lladdr)->sa_len.
1929 */
1930 _CASSERT(sizeof (sdlbuf) == (SOCK_MAXADDRLEN + 1));
1931
1932 if (interface == NULL || lladdr == NULL)
1933 return (EINVAL);
1934
1935 ifa = interface->if_lladdr;
1936 IFA_LOCK_SPIN(ifa);
1937 bcopy(ifa->ifa_addr, &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
1938 IFA_UNLOCK(ifa);
1939
1940 bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
1941 if (bytes_len != lladdr_len) {
1942 bzero(lladdr, lladdr_len);
1943 error = EMSGSIZE;
1944 } else {
1945 bcopy(bytes, lladdr, bytes_len);
1946 }
1947
1948 return (error);
1949 }
1950
1951 errno_t
1952 ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1953 {
1954 return (ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1955 NULL));
1956 }
1957
1958 errno_t
1959 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1960 {
1961 #if CONFIG_MACF
1962 kauth_cred_t cred;
1963 net_thread_marks_t marks;
1964 #endif
1965 kauth_cred_t *credp;
1966 errno_t error;
1967
1968 credp = NULL;
1969 #if CONFIG_MACF
1970 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
1971 cred = kauth_cred_proc_ref(current_proc());
1972 credp = &cred;
1973 #else
1974 credp = NULL;
1975 #endif
1976
1977 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1978 credp);
1979
1980 #if CONFIG_MACF
1981 kauth_cred_unref(credp);
1982 net_thread_marks_pop(marks);
1983 #endif
1984
1985 return (error);
1986 }
1987
1988 static errno_t
1989 ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
1990 size_t lladdr_len, u_char new_type, int apply_type)
1991 {
1992 struct ifaddr *ifa;
1993 errno_t error = 0;
1994
1995 if (interface == NULL)
1996 return (EINVAL);
1997
1998 ifnet_head_lock_shared();
1999 ifnet_lock_exclusive(interface);
2000 if (lladdr_len != 0 &&
2001 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2002 ifnet_lock_done(interface);
2003 ifnet_head_done();
2004 return (EINVAL);
2005 }
2006 ifa = ifnet_addrs[interface->if_index - 1];
2007 if (ifa != NULL) {
2008 struct sockaddr_dl *sdl;
2009
2010 IFA_LOCK_SPIN(ifa);
2011 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2012 if (lladdr_len != 0) {
2013 bcopy(lladdr, LLADDR(sdl), lladdr_len);
2014 } else {
2015 bzero(LLADDR(sdl), interface->if_addrlen);
2016 }
2017 sdl->sdl_alen = lladdr_len;
2018
2019 if (apply_type) {
2020 sdl->sdl_type = new_type;
2021 }
2022 IFA_UNLOCK(ifa);
2023 } else {
2024 error = ENXIO;
2025 }
2026 ifnet_lock_done(interface);
2027 ifnet_head_done();
2028
2029 /* Generate a kernel event */
2030 if (error == 0) {
2031 intf_event_enqueue_nwk_wq_entry(interface, NULL,
2032 INTF_EVENT_CODE_LLADDR_UPDATE);
2033 dlil_post_msg(interface, KEV_DL_SUBCLASS,
2034 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0);
2035 }
2036
2037 return (error);
2038 }
2039
2040 errno_t
2041 ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
2042 {
2043 return (ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0));
2044 }
2045
2046 errno_t
2047 ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
2048 size_t lladdr_len, u_char type)
2049 {
2050 return (ifnet_set_lladdr_internal(interface, lladdr,
2051 lladdr_len, type, 1));
2052 }
2053
2054 errno_t
2055 ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2056 ifmultiaddr_t *ifmap)
2057 {
2058 if (interface == NULL || maddr == NULL)
2059 return (EINVAL);
2060
2061 /* Don't let users screw up protocols' entries. */
2062 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
2063 return (EINVAL);
2064
2065 return (if_addmulti_anon(interface, maddr, ifmap));
2066 }
2067
2068 errno_t
2069 ifnet_remove_multicast(ifmultiaddr_t ifma)
2070 {
2071 struct sockaddr *maddr;
2072
2073 if (ifma == NULL)
2074 return (EINVAL);
2075
2076 maddr = ifma->ifma_addr;
2077 /* Don't let users screw up protocols' entries. */
2078 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
2079 return (EINVAL);
2080
2081 return (if_delmulti_anon(ifma->ifma_ifp, maddr));
2082 }
2083
2084 errno_t
2085 ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
2086 {
2087 int count = 0;
2088 int cmax = 0;
2089 struct ifmultiaddr *addr;
2090
2091 if (ifp == NULL || addresses == NULL)
2092 return (EINVAL);
2093
2094 ifnet_lock_shared(ifp);
2095 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2096 cmax++;
2097 }
2098
2099 MALLOC(*addresses, ifmultiaddr_t *, sizeof (ifmultiaddr_t) * (cmax + 1),
2100 M_TEMP, M_NOWAIT);
2101 if (*addresses == NULL) {
2102 ifnet_lock_done(ifp);
2103 return (ENOMEM);
2104 }
2105
2106 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2107 if (count + 1 > cmax)
2108 break;
2109 (*addresses)[count] = (ifmultiaddr_t)addr;
2110 ifmaddr_reference((*addresses)[count]);
2111 count++;
2112 }
2113 (*addresses)[cmax] = NULL;
2114 ifnet_lock_done(ifp);
2115
2116 return (0);
2117 }
2118
2119 void
2120 ifnet_free_multicast_list(ifmultiaddr_t *addresses)
2121 {
2122 int i;
2123
2124 if (addresses == NULL)
2125 return;
2126
2127 for (i = 0; addresses[i] != NULL; i++)
2128 ifmaddr_release(addresses[i]);
2129
2130 FREE(addresses, M_TEMP);
2131 }
2132
2133 errno_t
2134 ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2135 {
2136 struct ifnet *ifp;
2137 int namelen;
2138
2139 if (ifname == NULL)
2140 return (EINVAL);
2141
2142 namelen = strlen(ifname);
2143
2144 *ifpp = NULL;
2145
2146 ifnet_head_lock_shared();
2147 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2148 struct ifaddr *ifa;
2149 struct sockaddr_dl *ll_addr;
2150
2151 ifa = ifnet_addrs[ifp->if_index - 1];
2152 if (ifa == NULL)
2153 continue;
2154
2155 IFA_LOCK(ifa);
2156 ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2157
2158 if (namelen == ll_addr->sdl_nlen && strncmp(ll_addr->sdl_data,
2159 ifname, ll_addr->sdl_nlen) == 0) {
2160 IFA_UNLOCK(ifa);
2161 *ifpp = ifp;
2162 ifnet_reference(*ifpp);
2163 break;
2164 }
2165 IFA_UNLOCK(ifa);
2166 }
2167 ifnet_head_done();
2168
2169 return ((ifp == NULL) ? ENXIO : 0);
2170 }
2171
2172 errno_t
2173 ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2174 {
2175 return (ifnet_list_get_common(family, FALSE, list, count));
2176 }
2177
2178 __private_extern__ errno_t
2179 ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2180 {
2181 return (ifnet_list_get_common(family, TRUE, list, count));
2182 }
2183
2184 struct ifnet_list {
2185 SLIST_ENTRY(ifnet_list) ifl_le;
2186 struct ifnet *ifl_ifp;
2187 };
2188
2189 static errno_t
2190 ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
2191 u_int32_t *count)
2192 {
2193 #pragma unused(get_all)
2194 SLIST_HEAD(, ifnet_list) ifl_head;
2195 struct ifnet_list *ifl, *ifl_tmp;
2196 struct ifnet *ifp;
2197 int cnt = 0;
2198 errno_t err = 0;
2199
2200 SLIST_INIT(&ifl_head);
2201
2202 if (list == NULL || count == NULL) {
2203 err = EINVAL;
2204 goto done;
2205 }
2206 *count = 0;
2207 *list = NULL;
2208
2209 ifnet_head_lock_shared();
2210 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2211 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2212 MALLOC(ifl, struct ifnet_list *, sizeof (*ifl),
2213 M_TEMP, M_NOWAIT);
2214 if (ifl == NULL) {
2215 ifnet_head_done();
2216 err = ENOMEM;
2217 goto done;
2218 }
2219 ifl->ifl_ifp = ifp;
2220 ifnet_reference(ifp);
2221 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2222 ++cnt;
2223 }
2224 }
2225 ifnet_head_done();
2226
2227 if (cnt == 0) {
2228 err = ENXIO;
2229 goto done;
2230 }
2231
2232 MALLOC(*list, ifnet_t *, sizeof (ifnet_t) * (cnt + 1),
2233 M_TEMP, M_NOWAIT);
2234 if (*list == NULL) {
2235 err = ENOMEM;
2236 goto done;
2237 }
2238 bzero(*list, sizeof (ifnet_t) * (cnt + 1));
2239 *count = cnt;
2240
2241 done:
2242 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2243 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2244 if (err == 0)
2245 (*list)[--cnt] = ifl->ifl_ifp;
2246 else
2247 ifnet_release(ifl->ifl_ifp);
2248 FREE(ifl, M_TEMP);
2249 }
2250
2251 return (err);
2252 }
2253
2254 void
2255 ifnet_list_free(ifnet_t *interfaces)
2256 {
2257 int i;
2258
2259 if (interfaces == NULL)
2260 return;
2261
2262 for (i = 0; interfaces[i]; i++)
2263 ifnet_release(interfaces[i]);
2264
2265 FREE(interfaces, M_TEMP);
2266 }
2267
2268 /*************************************************************************/
2269 /* ifaddr_t accessors */
2270 /*************************************************************************/
2271
2272 errno_t
2273 ifaddr_reference(ifaddr_t ifa)
2274 {
2275 if (ifa == NULL)
2276 return (EINVAL);
2277
2278 IFA_ADDREF(ifa);
2279 return (0);
2280 }
2281
2282 errno_t
2283 ifaddr_release(ifaddr_t ifa)
2284 {
2285 if (ifa == NULL)
2286 return (EINVAL);
2287
2288 IFA_REMREF(ifa);
2289 return (0);
2290 }
2291
2292 sa_family_t
2293 ifaddr_address_family(ifaddr_t ifa)
2294 {
2295 sa_family_t family = 0;
2296
2297 if (ifa != NULL) {
2298 IFA_LOCK_SPIN(ifa);
2299 if (ifa->ifa_addr != NULL)
2300 family = ifa->ifa_addr->sa_family;
2301 IFA_UNLOCK(ifa);
2302 }
2303 return (family);
2304 }
2305
2306 errno_t
2307 ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2308 {
2309 u_int32_t copylen;
2310
2311 if (ifa == NULL || out_addr == NULL)
2312 return (EINVAL);
2313
2314 IFA_LOCK_SPIN(ifa);
2315 if (ifa->ifa_addr == NULL) {
2316 IFA_UNLOCK(ifa);
2317 return (ENOTSUP);
2318 }
2319
2320 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2321 ifa->ifa_addr->sa_len : addr_size;
2322 bcopy(ifa->ifa_addr, out_addr, copylen);
2323
2324 if (ifa->ifa_addr->sa_len > addr_size) {
2325 IFA_UNLOCK(ifa);
2326 return (EMSGSIZE);
2327 }
2328
2329 IFA_UNLOCK(ifa);
2330 return (0);
2331 }
2332
2333 errno_t
2334 ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2335 {
2336 u_int32_t copylen;
2337
2338 if (ifa == NULL || out_addr == NULL)
2339 return (EINVAL);
2340
2341 IFA_LOCK_SPIN(ifa);
2342 if (ifa->ifa_dstaddr == NULL) {
2343 IFA_UNLOCK(ifa);
2344 return (ENOTSUP);
2345 }
2346
2347 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2348 ifa->ifa_dstaddr->sa_len : addr_size;
2349 bcopy(ifa->ifa_dstaddr, out_addr, copylen);
2350
2351 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2352 IFA_UNLOCK(ifa);
2353 return (EMSGSIZE);
2354 }
2355
2356 IFA_UNLOCK(ifa);
2357 return (0);
2358 }
2359
2360 errno_t
2361 ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2362 {
2363 u_int32_t copylen;
2364
2365 if (ifa == NULL || out_addr == NULL)
2366 return (EINVAL);
2367
2368 IFA_LOCK_SPIN(ifa);
2369 if (ifa->ifa_netmask == NULL) {
2370 IFA_UNLOCK(ifa);
2371 return (ENOTSUP);
2372 }
2373
2374 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2375 ifa->ifa_netmask->sa_len : addr_size;
2376 bcopy(ifa->ifa_netmask, out_addr, copylen);
2377
2378 if (ifa->ifa_netmask->sa_len > addr_size) {
2379 IFA_UNLOCK(ifa);
2380 return (EMSGSIZE);
2381 }
2382
2383 IFA_UNLOCK(ifa);
2384 return (0);
2385 }
2386
2387 ifnet_t
2388 ifaddr_ifnet(ifaddr_t ifa)
2389 {
2390 struct ifnet *ifp;
2391
2392 if (ifa == NULL)
2393 return (NULL);
2394
2395 /* ifa_ifp is set once at creation time; it is never changed */
2396 ifp = ifa->ifa_ifp;
2397
2398 return (ifp);
2399 }
2400
2401 ifaddr_t
2402 ifaddr_withaddr(const struct sockaddr *address)
2403 {
2404 if (address == NULL)
2405 return (NULL);
2406
2407 return (ifa_ifwithaddr(address));
2408 }
2409
2410 ifaddr_t
2411 ifaddr_withdstaddr(const struct sockaddr *address)
2412 {
2413 if (address == NULL)
2414 return (NULL);
2415
2416 return (ifa_ifwithdstaddr(address));
2417 }
2418
2419 ifaddr_t
2420 ifaddr_withnet(const struct sockaddr *net)
2421 {
2422 if (net == NULL)
2423 return (NULL);
2424
2425 return (ifa_ifwithnet(net));
2426 }
2427
2428 ifaddr_t
2429 ifaddr_withroute(int flags, const struct sockaddr *destination,
2430 const struct sockaddr *gateway)
2431 {
2432 if (destination == NULL || gateway == NULL)
2433 return (NULL);
2434
2435 return (ifa_ifwithroute(flags, destination, gateway));
2436 }
2437
2438 ifaddr_t
2439 ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2440 {
2441 if (addr == NULL || interface == NULL)
2442 return (NULL);
2443
2444 return (ifaof_ifpforaddr(addr, interface));
2445 }
2446
2447 errno_t
2448 ifmaddr_reference(ifmultiaddr_t ifmaddr)
2449 {
2450 if (ifmaddr == NULL)
2451 return (EINVAL);
2452
2453 IFMA_ADDREF(ifmaddr);
2454 return (0);
2455 }
2456
2457 errno_t
2458 ifmaddr_release(ifmultiaddr_t ifmaddr)
2459 {
2460 if (ifmaddr == NULL)
2461 return (EINVAL);
2462
2463 IFMA_REMREF(ifmaddr);
2464 return (0);
2465 }
2466
2467 errno_t
2468 ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2469 u_int32_t addr_size)
2470 {
2471 u_int32_t copylen;
2472
2473 if (ifma == NULL || out_addr == NULL)
2474 return (EINVAL);
2475
2476 IFMA_LOCK(ifma);
2477 if (ifma->ifma_addr == NULL) {
2478 IFMA_UNLOCK(ifma);
2479 return (ENOTSUP);
2480 }
2481
2482 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2483 ifma->ifma_addr->sa_len : addr_size);
2484 bcopy(ifma->ifma_addr, out_addr, copylen);
2485
2486 if (ifma->ifma_addr->sa_len > addr_size) {
2487 IFMA_UNLOCK(ifma);
2488 return (EMSGSIZE);
2489 }
2490 IFMA_UNLOCK(ifma);
2491 return (0);
2492 }
2493
2494 errno_t
2495 ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2496 u_int32_t addr_size)
2497 {
2498 struct ifmultiaddr *ifma_ll;
2499
2500 if (ifma == NULL || out_addr == NULL)
2501 return (EINVAL);
2502 if ((ifma_ll = ifma->ifma_ll) == NULL)
2503 return (ENOTSUP);
2504
2505 return (ifmaddr_address(ifma_ll, out_addr, addr_size));
2506 }
2507
2508 ifnet_t
2509 ifmaddr_ifnet(ifmultiaddr_t ifma)
2510 {
2511 return ((ifma == NULL) ? NULL : ifma->ifma_ifp);
2512 }
2513
2514 /**************************************************************************/
2515 /* interface cloner */
2516 /**************************************************************************/
2517
2518 errno_t
2519 ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2520 if_clone_t *ifcloner)
2521 {
2522 errno_t error = 0;
2523 struct if_clone *ifc = NULL;
2524 size_t namelen;
2525
2526 if (cloner_params == NULL || ifcloner == NULL ||
2527 cloner_params->ifc_name == NULL ||
2528 cloner_params->ifc_create == NULL ||
2529 cloner_params->ifc_destroy == NULL ||
2530 (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
2531 error = EINVAL;
2532 goto fail;
2533 }
2534
2535 if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2536 printf("%s: already a cloner for %s\n", __func__,
2537 cloner_params->ifc_name);
2538 error = EEXIST;
2539 goto fail;
2540 }
2541
2542 /* Make room for name string */
2543 ifc = _MALLOC(sizeof (struct if_clone) + IFNAMSIZ + 1, M_CLONE,
2544 M_WAITOK | M_ZERO);
2545 if (ifc == NULL) {
2546 printf("%s: _MALLOC failed\n", __func__);
2547 error = ENOBUFS;
2548 goto fail;
2549 }
2550 strlcpy((char *)(ifc + 1), cloner_params->ifc_name, IFNAMSIZ + 1);
2551 ifc->ifc_name = (char *)(ifc + 1);
2552 ifc->ifc_namelen = namelen;
2553 ifc->ifc_maxunit = IF_MAXUNIT;
2554 ifc->ifc_create = cloner_params->ifc_create;
2555 ifc->ifc_destroy = cloner_params->ifc_destroy;
2556
2557 error = if_clone_attach(ifc);
2558 if (error != 0) {
2559 printf("%s: if_clone_attach failed %d\n", __func__, error);
2560 goto fail;
2561 }
2562 *ifcloner = ifc;
2563
2564 return (0);
2565 fail:
2566 if (ifc != NULL)
2567 FREE(ifc, M_CLONE);
2568 return (error);
2569 }
2570
2571 errno_t
2572 ifnet_clone_detach(if_clone_t ifcloner)
2573 {
2574 errno_t error = 0;
2575 struct if_clone *ifc = ifcloner;
2576
2577 if (ifc == NULL || ifc->ifc_name == NULL)
2578 return (EINVAL);
2579
2580 if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2581 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2582 error = EINVAL;
2583 goto fail;
2584 }
2585
2586 if_clone_detach(ifc);
2587
2588 FREE(ifc, M_CLONE);
2589
2590 fail:
2591 return (error);
2592 }
2593
2594 /**************************************************************************/
2595 /* misc */
2596 /**************************************************************************/
2597
2598 errno_t
2599 ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
2600 u_int32_t flags, u_int8_t *bitfield)
2601 {
2602 u_int32_t ifindex;
2603 u_int32_t inp_flags = 0;
2604
2605 if (bitfield == NULL)
2606 return (EINVAL);
2607
2608 switch (protocol) {
2609 case PF_UNSPEC:
2610 case PF_INET:
2611 case PF_INET6:
2612 break;
2613 default:
2614 return (EINVAL);
2615 }
2616
2617 /* bit string is long enough to hold 16-bit port values */
2618 bzero(bitfield, bitstr_size(IP_PORTRANGE_SIZE));
2619
2620 if_ports_used_update_wakeuuid(ifp);
2621
2622
2623 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ?
2624 INPCB_GET_PORTS_USED_WILDCARDOK : 0);
2625 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ?
2626 INPCB_GET_PORTS_USED_NOWAKEUPOK : 0);
2627 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_RECVANYIFONLY) ?
2628 INPCB_GET_PORTS_USED_RECVANYIFONLY : 0);
2629 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY) ?
2630 INPCB_GET_PORTS_USED_EXTBGIDLEONLY : 0);
2631 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_ACTIVEONLY) ?
2632 INPCB_GET_PORTS_USED_ACTIVEONLY : 0);
2633
2634 ifindex = (ifp != NULL) ? ifp->if_index : 0;
2635
2636 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY))
2637 udp_get_ports_used(ifindex, protocol, inp_flags,
2638 bitfield);
2639
2640 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY))
2641 tcp_get_ports_used(ifindex, protocol, inp_flags,
2642 bitfield);
2643
2644 return (0);
2645 }
2646
2647 errno_t
2648 ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
2649 {
2650 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
2651 return (ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
2652 bitfield));
2653 }
2654
2655 errno_t
2656 ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
2657 int lqm, int npm, u_int8_t srvinfo[48])
2658 {
2659 if (ifp == NULL || sa == NULL || srvinfo == NULL)
2660 return (EINVAL);
2661 if (sa->sa_len > sizeof(struct sockaddr_storage))
2662 return (EINVAL);
2663 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2664 return (EINVAL);
2665
2666 dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
2667 return (0);
2668 }
2669
2670 errno_t
2671 ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
2672 {
2673 if (ifp == NULL || sa == NULL)
2674 return (EINVAL);
2675 if (sa->sa_len > sizeof(struct sockaddr_storage))
2676 return (EINVAL);
2677 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2678 return (EINVAL);
2679
2680 dlil_node_absent(ifp, sa);
2681 return (0);
2682 }
2683
2684 errno_t
2685 ifnet_notice_master_elected(ifnet_t ifp)
2686 {
2687 if (ifp == NULL)
2688 return (EINVAL);
2689
2690 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0);
2691 return (0);
2692 }
2693
2694 errno_t
2695 ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
2696 {
2697 #pragma unused(val)
2698
2699 m_do_tx_compl_callback(m, ifp);
2700
2701 return (0);
2702 }
2703
2704 errno_t
2705 ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
2706 {
2707 m_do_tx_compl_callback(m, ifp);
2708
2709 return (0);
2710 }
2711
2712 errno_t
2713 ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
2714 u_int8_t info[IFNET_MODARGLEN])
2715 {
2716 if (ifp == NULL || modid == NULL)
2717 return (EINVAL);
2718
2719 dlil_report_issues(ifp, modid, info);
2720 return (0);
2721 }
2722
2723 errno_t
2724 ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
2725 {
2726 ifnet_t odifp = NULL;
2727
2728 if (ifp == NULL)
2729 return (EINVAL);
2730 else if (!ifnet_is_attached(ifp, 1))
2731 return (ENXIO);
2732
2733 ifnet_lock_exclusive(ifp);
2734 odifp = ifp->if_delegated.ifp;
2735 if (odifp != NULL && odifp == delegated_ifp) {
2736 /* delegate info is unchanged; nothing more to do */
2737 ifnet_lock_done(ifp);
2738 goto done;
2739 }
2740 // Test if this delegate interface would cause a loop
2741 ifnet_t delegate_check_ifp = delegated_ifp;
2742 while (delegate_check_ifp != NULL) {
2743 if (delegate_check_ifp == ifp) {
2744 printf("%s: delegating to %s would cause a loop\n",
2745 ifp->if_xname, delegated_ifp->if_xname);
2746 ifnet_lock_done(ifp);
2747 goto done;
2748 }
2749 delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
2750 }
2751 bzero(&ifp->if_delegated, sizeof (ifp->if_delegated));
2752 if (delegated_ifp != NULL && ifp != delegated_ifp) {
2753 ifp->if_delegated.ifp = delegated_ifp;
2754 ifnet_reference(delegated_ifp);
2755 ifp->if_delegated.type = delegated_ifp->if_type;
2756 ifp->if_delegated.family = delegated_ifp->if_family;
2757 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
2758 ifp->if_delegated.expensive =
2759 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
2760
2761 /*
2762 * Propogate flags related to ECN from delegated interface
2763 */
2764 ifp->if_eflags &= ~(IFEF_ECN_ENABLE|IFEF_ECN_DISABLE);
2765 ifp->if_eflags |= (delegated_ifp->if_eflags &
2766 (IFEF_ECN_ENABLE|IFEF_ECN_DISABLE));
2767
2768 printf("%s: is now delegating %s (type 0x%x, family %u, "
2769 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
2770 delegated_ifp->if_type, delegated_ifp->if_family,
2771 delegated_ifp->if_subfamily);
2772 }
2773
2774 ifnet_lock_done(ifp);
2775
2776 if (odifp != NULL) {
2777 if (odifp != delegated_ifp) {
2778 printf("%s: is no longer delegating %s\n",
2779 ifp->if_xname, odifp->if_xname);
2780 }
2781 ifnet_release(odifp);
2782 }
2783
2784 /* Generate a kernel event */
2785 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0);
2786
2787 done:
2788 /* Release the io ref count */
2789 ifnet_decr_iorefcnt(ifp);
2790
2791 return (0);
2792 }
2793
2794 errno_t
2795 ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
2796 {
2797 if (ifp == NULL || pdelegated_ifp == NULL)
2798 return (EINVAL);
2799 else if (!ifnet_is_attached(ifp, 1))
2800 return (ENXIO);
2801
2802 ifnet_lock_shared(ifp);
2803 if (ifp->if_delegated.ifp != NULL)
2804 ifnet_reference(ifp->if_delegated.ifp);
2805 *pdelegated_ifp = ifp->if_delegated.ifp;
2806 ifnet_lock_done(ifp);
2807
2808 /* Release the io ref count */
2809 ifnet_decr_iorefcnt(ifp);
2810
2811 return (0);
2812 }
2813
2814 errno_t
2815 ifnet_get_keepalive_offload_frames(ifnet_t ifp,
2816 struct ifnet_keepalive_offload_frame *frames_array,
2817 u_int32_t frames_array_count, size_t frame_data_offset,
2818 u_int32_t *used_frames_count)
2819 {
2820 u_int32_t i;
2821
2822 if (frames_array == NULL || used_frames_count == NULL ||
2823 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE)
2824 return (EINVAL);
2825
2826 /* frame_data_offset should be 32-bit aligned */
2827 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
2828 frame_data_offset)
2829 return (EINVAL);
2830
2831 *used_frames_count = 0;
2832 if (frames_array_count == 0)
2833 return (0);
2834
2835 for (i = 0; i < frames_array_count; i++) {
2836 struct ifnet_keepalive_offload_frame *frame = frames_array + i;
2837
2838 bzero(frame, sizeof(struct ifnet_keepalive_offload_frame));
2839 }
2840
2841 /* First collect IPSec related keep-alive frames */
2842 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
2843 frames_array, frames_array_count, frame_data_offset);
2844
2845 /* If there is more room, collect other UDP keep-alive frames */
2846 if (*used_frames_count < frames_array_count)
2847 udp_fill_keepalive_offload_frames(ifp, frames_array,
2848 frames_array_count, frame_data_offset,
2849 used_frames_count);
2850
2851 /* If there is more room, collect other TCP keep-alive frames */
2852 if (*used_frames_count < frames_array_count)
2853 tcp_fill_keepalive_offload_frames(ifp, frames_array,
2854 frames_array_count, frame_data_offset,
2855 used_frames_count);
2856
2857 VERIFY(*used_frames_count <= frames_array_count);
2858
2859 return (0);
2860 }
2861
2862 errno_t
2863 ifnet_link_status_report(ifnet_t ifp, const void *buffer,
2864 size_t buffer_len)
2865 {
2866 struct if_link_status *ifsr;
2867 errno_t err = 0;
2868
2869 if (ifp == NULL || buffer == NULL || buffer_len == 0)
2870 return (EINVAL);
2871
2872 ifnet_lock_shared(ifp);
2873
2874 /*
2875 * Make sure that the interface is attached but there is no need
2876 * to take a reference because this call is coming from the driver.
2877 */
2878 if (!ifnet_is_attached(ifp, 0)) {
2879 ifnet_lock_done(ifp);
2880 return (ENXIO);
2881 }
2882
2883 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
2884
2885 /*
2886 * If this is the first status report then allocate memory
2887 * to store it.
2888 */
2889 if (ifp->if_link_status == NULL) {
2890 MALLOC(ifp->if_link_status, struct if_link_status *,
2891 sizeof(struct if_link_status), M_TEMP, M_ZERO);
2892 if (ifp->if_link_status == NULL) {
2893 err = ENOMEM;
2894 goto done;
2895 }
2896 }
2897
2898 ifsr = __DECONST(struct if_link_status *, buffer);
2899
2900 if (ifp->if_type == IFT_CELLULAR) {
2901 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
2902 /*
2903 * Currently we have a single version -- if it does
2904 * not match, just return.
2905 */
2906 if (ifsr->ifsr_version !=
2907 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
2908 err = ENOTSUP;
2909 goto done;
2910 }
2911
2912 if (ifsr->ifsr_len != sizeof(*if_cell_sr)) {
2913 err = EINVAL;
2914 goto done;
2915 }
2916
2917 if_cell_sr =
2918 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2919 new_cell_sr = &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2920 /* Check if we need to act on any new notifications */
2921 if ((new_cell_sr->valid_bitmask &
2922 IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
2923 new_cell_sr->mss_recommended !=
2924 if_cell_sr->mss_recommended) {
2925 atomic_bitset_32(&tcbinfo.ipi_flags,
2926 INPCBINFO_UPDATE_MSS);
2927 inpcb_timer_sched(&tcbinfo, INPCB_TIMER_FAST);
2928 #if NECP
2929 necp_update_all_clients();
2930 #endif
2931 }
2932
2933 /* Finally copy the new information */
2934 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
2935 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
2936 if_cell_sr->valid_bitmask = 0;
2937 bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
2938
2939 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2940 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
2941
2942 /* Check version */
2943 if (ifsr->ifsr_version !=
2944 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
2945 err = ENOTSUP;
2946 goto done;
2947 }
2948
2949 if (ifsr->ifsr_len != sizeof(*if_wifi_sr)) {
2950 err = EINVAL;
2951 goto done;
2952 }
2953
2954 if_wifi_sr =
2955 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2956 new_wifi_sr =
2957 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2958 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
2959 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
2960 if_wifi_sr->valid_bitmask = 0;
2961 bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
2962
2963 /*
2964 * Update the bandwidth values if we got recent values
2965 * reported through the other KPI.
2966 */
2967 if (!(new_wifi_sr->valid_bitmask &
2968 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
2969 ifp->if_output_bw.max_bw > 0) {
2970 if_wifi_sr->valid_bitmask |=
2971 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
2972 if_wifi_sr->ul_max_bandwidth =
2973 ifp->if_output_bw.max_bw;
2974 }
2975 if (!(new_wifi_sr->valid_bitmask &
2976 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
2977 ifp->if_output_bw.eff_bw > 0) {
2978 if_wifi_sr->valid_bitmask |=
2979 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2980 if_wifi_sr->ul_effective_bandwidth =
2981 ifp->if_output_bw.eff_bw;
2982 }
2983 if (!(new_wifi_sr->valid_bitmask &
2984 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
2985 ifp->if_input_bw.max_bw > 0) {
2986 if_wifi_sr->valid_bitmask |=
2987 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
2988 if_wifi_sr->dl_max_bandwidth =
2989 ifp->if_input_bw.max_bw;
2990 }
2991 if (!(new_wifi_sr->valid_bitmask &
2992 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
2993 ifp->if_input_bw.eff_bw > 0) {
2994 if_wifi_sr->valid_bitmask |=
2995 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2996 if_wifi_sr->dl_effective_bandwidth =
2997 ifp->if_input_bw.eff_bw;
2998 }
2999 }
3000
3001 done:
3002 lck_rw_done(&ifp->if_link_status_lock);
3003 ifnet_lock_done(ifp);
3004 return (err);
3005 }
3006
3007 /*************************************************************************/
3008 /* Fastlane QoS Ca */
3009 /*************************************************************************/
3010
3011 errno_t
3012 ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3013 {
3014 if (interface == NULL)
3015 return (EINVAL);
3016
3017 if_set_qosmarking_mode(interface,
3018 capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3019
3020 return (0);
3021 }
3022
3023 errno_t
3024 ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3025 {
3026 if (interface == NULL || capable == NULL)
3027 return (EINVAL);
3028 if (interface->if_eflags & IFEF_QOSMARKING_CAPABLE)
3029 *capable = true;
3030 else
3031 *capable = false;
3032 return (0);
3033 }
3034
3035 errno_t
3036 ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3037 {
3038 int64_t bytes;
3039
3040 if (interface == NULL || unsent_bytes == NULL)
3041 return (EINVAL);
3042
3043 bytes = *unsent_bytes = 0;
3044
3045 if (!IF_FULLY_ATTACHED(interface))
3046 return (ENXIO);
3047
3048 bytes = interface->if_sndbyte_unsent;
3049
3050 if (interface->if_eflags & IFEF_TXSTART)
3051 bytes += IFCQ_BYTES(&interface->if_snd);
3052 *unsent_bytes = bytes;
3053
3054 return (0);
3055 }
3056
3057 errno_t
3058 ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3059 {
3060 if (ifp == NULL || buf_status == NULL)
3061 return (EINVAL);
3062
3063 bzero(buf_status, sizeof (*buf_status));
3064
3065 if (!IF_FULLY_ATTACHED(ifp))
3066 return (ENXIO);
3067
3068 if (ifp->if_eflags & IFEF_TXSTART)
3069 buf_status->buf_interface = IFCQ_BYTES(&ifp->if_snd);
3070
3071 buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3072 (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3073
3074 return (0);
3075 }
3076
3077 void
3078 ifnet_normalise_unsent_data(void)
3079 {
3080 struct ifnet *ifp;
3081
3082 ifnet_head_lock_shared();
3083 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3084 ifnet_lock_exclusive(ifp);
3085 if (!IF_FULLY_ATTACHED(ifp)) {
3086 ifnet_lock_done(ifp);
3087 continue;
3088 }
3089 if (!(ifp->if_eflags & IFEF_TXSTART)) {
3090 ifnet_lock_done(ifp);
3091 continue;
3092 }
3093
3094 if (ifp->if_sndbyte_total > 0 ||
3095 IFCQ_BYTES(&ifp->if_snd) > 0)
3096 ifp->if_unsent_data_cnt++;
3097
3098 ifnet_lock_done(ifp);
3099 }
3100 ifnet_head_done();
3101 }