2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include "kpi_interface.h"
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
69 #include <netinet/igmp_var.h>
71 #include <netinet6/mld6_var.h>
72 #include <netkey/key.h>
75 #include "net/net_str_id.h"
78 #include <sys/kauth.h>
79 #include <security/mac_framework.h>
84 errno_t
ifnet_allocate(const struct ifnet_init_params
*init
,
87 static errno_t
ifnet_allocate_common(const struct ifnet_init_params
*init
,
88 ifnet_t
*ifp
, bool is_internal
);
91 #define TOUCHLASTCHANGE(__if_lastchange) { \
92 (__if_lastchange)->tv_sec = (time_t)net_uptime(); \
93 (__if_lastchange)->tv_usec = 0; \
96 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t
, sa_family_t
,
97 struct ifnet_llreach_info
*);
98 static void ifnet_kpi_free(ifnet_t
);
99 static errno_t
ifnet_list_get_common(ifnet_family_t
, boolean_t
, ifnet_t
**,
101 static errno_t
ifnet_set_lladdr_internal(ifnet_t
, const void *, size_t,
103 static errno_t
ifnet_awdl_check_eflags(ifnet_t
, u_int32_t
*, u_int32_t
*);
106 * Temporary work around until we have real reference counting
108 * We keep the bits about calling dlil_if_release (which should be
109 * called recycle) transparent by calling it from our if_free function
110 * pointer. We have to keep the client's original detach function
111 * somewhere so we can call it.
114 ifnet_kpi_free(ifnet_t ifp
)
116 ifnet_detached_func detach_func
= ifp
->if_kpi_storage
;
118 if (detach_func
!= NULL
) {
126 ifnet_allocate_common(const struct ifnet_init_params
*init
,
127 ifnet_t
*ifp
, bool is_internal
)
129 struct ifnet_init_eparams einit
;
131 bzero(&einit
, sizeof(einit
));
133 einit
.ver
= IFNET_INIT_CURRENT_VERSION
;
134 einit
.len
= sizeof(einit
);
135 einit
.flags
= IFNET_INIT_LEGACY
| IFNET_INIT_NX_NOAUTO
;
137 einit
.flags
|= IFNET_INIT_ALLOC_KPI
;
139 einit
.uniqueid
= init
->uniqueid
;
140 einit
.uniqueid_len
= init
->uniqueid_len
;
141 einit
.name
= init
->name
;
142 einit
.unit
= init
->unit
;
143 einit
.family
= init
->family
;
144 einit
.type
= init
->type
;
145 einit
.output
= init
->output
;
146 einit
.demux
= init
->demux
;
147 einit
.add_proto
= init
->add_proto
;
148 einit
.del_proto
= init
->del_proto
;
149 einit
.check_multi
= init
->check_multi
;
150 einit
.framer
= init
->framer
;
151 einit
.softc
= init
->softc
;
152 einit
.ioctl
= init
->ioctl
;
153 einit
.set_bpf_tap
= init
->set_bpf_tap
;
154 einit
.detach
= init
->detach
;
155 einit
.event
= init
->event
;
156 einit
.broadcast_addr
= init
->broadcast_addr
;
157 einit
.broadcast_len
= init
->broadcast_len
;
159 return ifnet_allocate_extended(&einit
, ifp
);
163 ifnet_allocate_internal(const struct ifnet_init_params
*init
, ifnet_t
*ifp
)
165 return ifnet_allocate_common(init
, ifp
, true);
169 ifnet_allocate(const struct ifnet_init_params
*init
, ifnet_t
*ifp
)
171 return ifnet_allocate_common(init
, ifp
, false);
175 ifnet_allocate_extended(const struct ifnet_init_eparams
*einit0
,
178 struct ifnet_init_eparams einit
;
179 struct ifnet
*ifp
= NULL
;
180 char if_xname
[IFXNAMSIZ
] = {0};
185 if (einit
.ver
!= IFNET_INIT_CURRENT_VERSION
||
186 einit
.len
< sizeof(einit
)) {
190 if (einit
.family
== 0 || einit
.name
== NULL
||
191 strlen(einit
.name
) >= IFNAMSIZ
||
192 (einit
.type
& 0xFFFFFF00) != 0 || einit
.type
== 0) {
197 if (einit
.flags
& IFNET_INIT_LEGACY
) {
198 if (einit
.output
== NULL
||
199 (einit
.flags
& IFNET_INIT_INPUT_POLL
)) {
202 einit
.pre_enqueue
= NULL
;
204 einit
.output_ctl
= NULL
;
205 einit
.output_sched_model
= IFNET_SCHED_MODEL_NORMAL
;
206 einit
.input_poll
= NULL
;
207 einit
.input_ctl
= NULL
;
209 if (einit
.start
== NULL
) {
214 if (einit
.output_sched_model
>= IFNET_SCHED_MODEL_MAX
) {
218 if (einit
.flags
& IFNET_INIT_INPUT_POLL
) {
219 if (einit
.input_poll
== NULL
|| einit
.input_ctl
== NULL
) {
223 einit
.input_poll
= NULL
;
224 einit
.input_ctl
= NULL
;
228 if (einit
.type
> UCHAR_MAX
) {
232 if (einit
.unit
> SHRT_MAX
) {
236 /* Initialize external name (name + unit) */
237 (void) snprintf(if_xname
, sizeof(if_xname
), "%s%d",
238 einit
.name
, einit
.unit
);
240 if (einit
.uniqueid
== NULL
) {
241 einit
.uniqueid
= if_xname
;
242 einit
.uniqueid_len
= (uint32_t)strlen(if_xname
);
245 error
= dlil_if_acquire(einit
.family
, einit
.uniqueid
,
246 einit
.uniqueid_len
, if_xname
, &ifp
);
252 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
253 * to point to storage of at least IFNAMSIZ bytes. It is safe
256 strlcpy(__DECONST(char *, ifp
->if_name
), einit
.name
, IFNAMSIZ
);
257 ifp
->if_type
= (u_char
)einit
.type
;
258 ifp
->if_family
= einit
.family
;
259 ifp
->if_subfamily
= einit
.subfamily
;
260 ifp
->if_unit
= (short)einit
.unit
;
261 ifp
->if_output
= einit
.output
;
262 ifp
->if_pre_enqueue
= einit
.pre_enqueue
;
263 ifp
->if_start
= einit
.start
;
264 ifp
->if_output_ctl
= einit
.output_ctl
;
265 ifp
->if_output_sched_model
= einit
.output_sched_model
;
266 ifp
->if_output_bw
.eff_bw
= einit
.output_bw
;
267 ifp
->if_output_bw
.max_bw
= einit
.output_bw_max
;
268 ifp
->if_output_lt
.eff_lt
= einit
.output_lt
;
269 ifp
->if_output_lt
.max_lt
= einit
.output_lt_max
;
270 ifp
->if_input_poll
= einit
.input_poll
;
271 ifp
->if_input_ctl
= einit
.input_ctl
;
272 ifp
->if_input_bw
.eff_bw
= einit
.input_bw
;
273 ifp
->if_input_bw
.max_bw
= einit
.input_bw_max
;
274 ifp
->if_input_lt
.eff_lt
= einit
.input_lt
;
275 ifp
->if_input_lt
.max_lt
= einit
.input_lt_max
;
276 ifp
->if_demux
= einit
.demux
;
277 ifp
->if_add_proto
= einit
.add_proto
;
278 ifp
->if_del_proto
= einit
.del_proto
;
279 ifp
->if_check_multi
= einit
.check_multi
;
280 ifp
->if_framer_legacy
= einit
.framer
;
281 ifp
->if_framer
= einit
.framer_extended
;
282 ifp
->if_softc
= einit
.softc
;
283 ifp
->if_ioctl
= einit
.ioctl
;
284 ifp
->if_set_bpf_tap
= einit
.set_bpf_tap
;
285 ifp
->if_free
= (einit
.free
!= NULL
) ? einit
.free
: ifnet_kpi_free
;
286 ifp
->if_event
= einit
.event
;
287 ifp
->if_kpi_storage
= einit
.detach
;
289 /* Initialize Network ID */
290 ifp
->network_id_len
= 0;
291 bzero(&ifp
->network_id
, sizeof(ifp
->network_id
));
293 /* Initialize external name (name + unit) */
294 snprintf(__DECONST(char *, ifp
->if_xname
), IFXNAMSIZ
,
298 * On embedded, framer() is already in the extended form;
299 * we simply use it as is, unless the caller specifies
300 * framer_extended() which will then override it.
302 * On non-embedded, framer() has long been exposed as part
303 * of the public KPI, and therefore its signature must
304 * remain the same (without the pre- and postpend length
305 * parameters.) We special case ether_frameout, such that
306 * it gets mapped to its extended variant. All other cases
307 * utilize the stub routine which will simply return zeroes
308 * for those new parameters.
310 * Internally, DLIL will only use the extended callback
311 * variant which is represented by if_framer.
313 #if !XNU_TARGET_OS_OSX
314 if (ifp
->if_framer
== NULL
&& ifp
->if_framer_legacy
!= NULL
) {
315 ifp
->if_framer
= ifp
->if_framer_legacy
;
317 #else /* XNU_TARGET_OS_OSX */
318 if (ifp
->if_framer
== NULL
&& ifp
->if_framer_legacy
!= NULL
) {
319 if (ifp
->if_framer_legacy
== ether_frameout
) {
320 ifp
->if_framer
= ether_frameout_extended
;
322 ifp
->if_framer
= ifnet_framer_stub
;
325 #endif /* XNU_TARGET_OS_OSX */
327 if (ifp
->if_output_bw
.eff_bw
> ifp
->if_output_bw
.max_bw
) {
328 ifp
->if_output_bw
.max_bw
= ifp
->if_output_bw
.eff_bw
;
329 } else if (ifp
->if_output_bw
.eff_bw
== 0) {
330 ifp
->if_output_bw
.eff_bw
= ifp
->if_output_bw
.max_bw
;
333 if (ifp
->if_input_bw
.eff_bw
> ifp
->if_input_bw
.max_bw
) {
334 ifp
->if_input_bw
.max_bw
= ifp
->if_input_bw
.eff_bw
;
335 } else if (ifp
->if_input_bw
.eff_bw
== 0) {
336 ifp
->if_input_bw
.eff_bw
= ifp
->if_input_bw
.max_bw
;
339 if (ifp
->if_output_bw
.max_bw
== 0) {
340 ifp
->if_output_bw
= ifp
->if_input_bw
;
341 } else if (ifp
->if_input_bw
.max_bw
== 0) {
342 ifp
->if_input_bw
= ifp
->if_output_bw
;
345 /* Pin if_baudrate to 32 bits */
346 br
= MAX(ifp
->if_output_bw
.max_bw
, ifp
->if_input_bw
.max_bw
);
348 ifp
->if_baudrate
= (br
> UINT32_MAX
) ? UINT32_MAX
: (uint32_t)br
;
351 if (ifp
->if_output_lt
.eff_lt
> ifp
->if_output_lt
.max_lt
) {
352 ifp
->if_output_lt
.max_lt
= ifp
->if_output_lt
.eff_lt
;
353 } else if (ifp
->if_output_lt
.eff_lt
== 0) {
354 ifp
->if_output_lt
.eff_lt
= ifp
->if_output_lt
.max_lt
;
357 if (ifp
->if_input_lt
.eff_lt
> ifp
->if_input_lt
.max_lt
) {
358 ifp
->if_input_lt
.max_lt
= ifp
->if_input_lt
.eff_lt
;
359 } else if (ifp
->if_input_lt
.eff_lt
== 0) {
360 ifp
->if_input_lt
.eff_lt
= ifp
->if_input_lt
.max_lt
;
363 if (ifp
->if_output_lt
.max_lt
== 0) {
364 ifp
->if_output_lt
= ifp
->if_input_lt
;
365 } else if (ifp
->if_input_lt
.max_lt
== 0) {
366 ifp
->if_input_lt
= ifp
->if_output_lt
;
369 if (ifp
->if_ioctl
== NULL
) {
370 ifp
->if_ioctl
= ifp_if_ioctl
;
373 if_clear_eflags(ifp
, -1);
374 if (ifp
->if_start
!= NULL
) {
375 if_set_eflags(ifp
, IFEF_TXSTART
);
376 if (ifp
->if_pre_enqueue
== NULL
) {
377 ifp
->if_pre_enqueue
= ifnet_enqueue
;
379 ifp
->if_output
= ifp
->if_pre_enqueue
;
382 if (ifp
->if_input_poll
!= NULL
) {
383 if_set_eflags(ifp
, IFEF_RXPOLL
);
386 ifp
->if_output_dlil
= dlil_output_handler
;
387 ifp
->if_input_dlil
= dlil_input_handler
;
389 VERIFY(!(einit
.flags
& IFNET_INIT_LEGACY
) ||
390 (ifp
->if_pre_enqueue
== NULL
&& ifp
->if_start
== NULL
&&
391 ifp
->if_output_ctl
== NULL
&& ifp
->if_input_poll
== NULL
&&
392 ifp
->if_input_ctl
== NULL
));
393 VERIFY(!(einit
.flags
& IFNET_INIT_INPUT_POLL
) ||
394 (ifp
->if_input_poll
!= NULL
&& ifp
->if_input_ctl
!= NULL
));
396 if (einit
.broadcast_len
&& einit
.broadcast_addr
) {
397 if (einit
.broadcast_len
>
398 sizeof(ifp
->if_broadcast
.u
.buffer
)) {
399 MALLOC(ifp
->if_broadcast
.u
.ptr
, u_char
*,
400 einit
.broadcast_len
, M_IFADDR
, M_NOWAIT
);
401 if (ifp
->if_broadcast
.u
.ptr
== NULL
) {
404 bcopy(einit
.broadcast_addr
,
405 ifp
->if_broadcast
.u
.ptr
,
406 einit
.broadcast_len
);
409 bcopy(einit
.broadcast_addr
,
410 ifp
->if_broadcast
.u
.buffer
,
411 einit
.broadcast_len
);
413 ifp
->if_broadcast
.length
= einit
.broadcast_len
;
415 bzero(&ifp
->if_broadcast
, sizeof(ifp
->if_broadcast
));
418 if_clear_xflags(ifp
, -1);
419 /* legacy interface */
420 if_set_xflags(ifp
, IFXF_LEGACY
);
423 * output target queue delay is specified in millisecond
424 * convert it to nanoseconds
426 IFCQ_TARGET_QDELAY(&ifp
->if_snd
) =
427 einit
.output_target_qdelay
* 1000 * 1000;
428 IFCQ_MAXLEN(&ifp
->if_snd
) = einit
.sndq_maxlen
;
430 ifnet_enqueue_multi_setup(ifp
, einit
.start_delay_qlen
,
431 einit
.start_delay_timeout
);
433 IFCQ_PKT_DROP_LIMIT(&ifp
->if_snd
) = IFCQ_DEFAULT_PKT_DROP_LIMIT
;
436 * Set embryonic flag; this will be cleared
437 * later when it is fully attached.
439 ifp
->if_refflags
= IFRF_EMBRYONIC
;
442 * Count the newly allocated ifnet
444 OSIncrementAtomic64(&net_api_stats
.nas_ifnet_alloc_count
);
445 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_ifnet_alloc_total
);
446 if ((einit
.flags
& IFNET_INIT_ALLOC_KPI
) != 0) {
447 if_set_xflags(ifp
, IFXF_ALLOC_KPI
);
450 &net_api_stats
.nas_ifnet_alloc_os_count
);
451 INC_ATOMIC_INT64_LIM(
452 net_api_stats
.nas_ifnet_alloc_os_total
);
457 // temporary - this should be done in dlil_if_acquire
458 ifnet_reference(ifp
);
460 dlil_if_release(ifp
);
468 ifnet_reference(ifnet_t ifp
)
470 return dlil_if_ref(ifp
);
474 ifnet_dispose(ifnet_t ifp
)
476 if (ifp
->if_broadcast
.length
> sizeof(ifp
->if_broadcast
.u
.buffer
)) {
477 FREE(ifp
->if_broadcast
.u
.ptr
, M_IFADDR
);
478 ifp
->if_broadcast
.u
.ptr
= NULL
;
481 dlil_if_release(ifp
);
485 ifnet_release(ifnet_t ifp
)
487 return dlil_if_free(ifp
);
491 ifnet_interface_family_find(const char *module_string
,
492 ifnet_family_t
*family_id
)
494 if (module_string
== NULL
|| family_id
== NULL
) {
498 return net_str_id_find_internal(module_string
, family_id
,
503 ifnet_softc(ifnet_t interface
)
505 return (interface
== NULL
) ? NULL
: interface
->if_softc
;
509 ifnet_name(ifnet_t interface
)
511 return (interface
== NULL
) ? NULL
: interface
->if_name
;
515 ifnet_family(ifnet_t interface
)
517 return (interface
== NULL
) ? 0 : interface
->if_family
;
521 ifnet_subfamily(ifnet_t interface
)
523 return (interface
== NULL
) ? 0 : interface
->if_subfamily
;
527 ifnet_unit(ifnet_t interface
)
529 return (interface
== NULL
) ? (u_int32_t
)0xffffffff :
530 (u_int32_t
)interface
->if_unit
;
534 ifnet_index(ifnet_t interface
)
536 return (interface
== NULL
) ? (u_int32_t
)0xffffffff :
541 ifnet_set_flags(ifnet_t interface
, u_int16_t new_flags
, u_int16_t mask
)
545 if (interface
== NULL
) {
549 ifnet_lock_exclusive(interface
);
551 /* If we are modifying the up/down state, call if_updown */
552 if ((mask
& IFF_UP
) != 0) {
553 if_updown(interface
, (new_flags
& IFF_UP
) == IFF_UP
);
556 old_flags
= interface
->if_flags
;
557 interface
->if_flags
= (new_flags
& mask
) | (interface
->if_flags
& ~mask
);
558 /* If we are modifying the multicast flag, set/unset the silent flag */
559 if ((old_flags
& IFF_MULTICAST
) !=
560 (interface
->if_flags
& IFF_MULTICAST
)) {
562 if (IGMP_IFINFO(interface
) != NULL
) {
563 igmp_initsilent(interface
, IGMP_IFINFO(interface
));
566 if (MLD_IFINFO(interface
) != NULL
) {
567 mld6_initsilent(interface
, MLD_IFINFO(interface
));
571 ifnet_lock_done(interface
);
577 ifnet_flags(ifnet_t interface
)
579 return (interface
== NULL
) ? 0 : interface
->if_flags
;
583 * This routine ensures the following:
585 * If IFEF_AWDL is set by the caller, also set the rest of flags as
586 * defined in IFEF_AWDL_MASK.
588 * If IFEF_AWDL has been set on the interface and the caller attempts
589 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
592 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
595 * All other flags not associated with AWDL are not affected.
597 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
600 ifnet_awdl_check_eflags(ifnet_t ifp
, u_int32_t
*new_eflags
, u_int32_t
*mask
)
604 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_EXCLUSIVE
);
606 eflags
= (*new_eflags
& *mask
) | (ifp
->if_eflags
& ~(*mask
));
608 if (ifp
->if_eflags
& IFEF_AWDL
) {
609 if (eflags
& IFEF_AWDL
) {
610 if ((eflags
& IFEF_AWDL_MASK
) != IFEF_AWDL_MASK
) {
614 *new_eflags
&= ~IFEF_AWDL_MASK
;
615 *mask
|= IFEF_AWDL_MASK
;
617 } else if (eflags
& IFEF_AWDL
) {
618 *new_eflags
|= IFEF_AWDL_MASK
;
619 *mask
|= IFEF_AWDL_MASK
;
620 } else if (eflags
& IFEF_AWDL_RESTRICTED
&&
621 !(ifp
->if_eflags
& IFEF_AWDL
)) {
629 ifnet_set_eflags(ifnet_t interface
, u_int32_t new_flags
, u_int32_t mask
)
632 struct kev_msg ev_msg
;
633 struct net_event_data ev_data
;
635 if (interface
== NULL
) {
639 bzero(&ev_msg
, sizeof(ev_msg
));
640 ifnet_lock_exclusive(interface
);
642 * Sanity checks for IFEF_AWDL and its related flags.
644 if (ifnet_awdl_check_eflags(interface
, &new_flags
, &mask
) != 0) {
645 ifnet_lock_done(interface
);
649 * Currently Interface advisory reporting is supported only for
652 if ((((new_flags
& mask
) & IFEF_ADV_REPORT
) != 0) &&
653 ((interface
->if_eflags
& IFEF_SKYWALK_NATIVE
) == 0)) {
654 ifnet_lock_done(interface
);
657 oeflags
= interface
->if_eflags
;
658 if_clear_eflags(interface
, mask
);
659 if (new_flags
!= 0) {
660 if_set_eflags(interface
, (new_flags
& mask
));
662 ifnet_lock_done(interface
);
663 if (interface
->if_eflags
& IFEF_AWDL_RESTRICTED
&&
664 !(oeflags
& IFEF_AWDL_RESTRICTED
)) {
665 ev_msg
.event_code
= KEV_DL_AWDL_RESTRICTED
;
667 * The interface is now restricted to applications that have
669 * The check for the entitlement will be done in the data
670 * path, so we don't have to do anything here.
672 } else if (oeflags
& IFEF_AWDL_RESTRICTED
&&
673 !(interface
->if_eflags
& IFEF_AWDL_RESTRICTED
)) {
674 ev_msg
.event_code
= KEV_DL_AWDL_UNRESTRICTED
;
677 * Notify configd so that it has a chance to perform better
678 * reachability detection.
680 if (ev_msg
.event_code
) {
681 bzero(&ev_data
, sizeof(ev_data
));
682 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
683 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
684 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
685 strlcpy(ev_data
.if_name
, interface
->if_name
, IFNAMSIZ
);
686 ev_data
.if_family
= interface
->if_family
;
687 ev_data
.if_unit
= interface
->if_unit
;
688 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
689 ev_msg
.dv
[0].data_ptr
= &ev_data
;
690 ev_msg
.dv
[1].data_length
= 0;
691 dlil_post_complete_msg(interface
, &ev_msg
);
698 ifnet_eflags(ifnet_t interface
)
700 return (interface
== NULL
) ? 0 : interface
->if_eflags
;
704 ifnet_set_idle_flags_locked(ifnet_t ifp
, u_int32_t new_flags
, u_int32_t mask
)
712 LCK_MTX_ASSERT(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
713 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_EXCLUSIVE
);
716 * If this is called prior to ifnet attach, the actual work will
717 * be done at attach time. Otherwise, if it is called after
718 * ifnet detach, then it is a no-op.
720 if (!ifnet_is_attached(ifp
, 0)) {
721 ifp
->if_idle_new_flags
= new_flags
;
722 ifp
->if_idle_new_flags_mask
= mask
;
725 ifp
->if_idle_new_flags
= ifp
->if_idle_new_flags_mask
= 0;
728 before
= ifp
->if_idle_flags
;
729 ifp
->if_idle_flags
= (new_flags
& mask
) | (ifp
->if_idle_flags
& ~mask
);
730 after
= ifp
->if_idle_flags
;
732 if ((after
- before
) < 0 && ifp
->if_idle_flags
== 0 &&
733 ifp
->if_want_aggressive_drain
!= 0) {
734 ifp
->if_want_aggressive_drain
= 0;
735 } else if ((after
- before
) > 0 && ifp
->if_want_aggressive_drain
== 0) {
736 ifp
->if_want_aggressive_drain
++;
743 ifnet_set_idle_flags(ifnet_t ifp
, u_int32_t new_flags
, u_int32_t mask
)
747 lck_mtx_lock(rnh_lock
);
748 ifnet_lock_exclusive(ifp
);
749 err
= ifnet_set_idle_flags_locked(ifp
, new_flags
, mask
);
750 ifnet_lock_done(ifp
);
751 lck_mtx_unlock(rnh_lock
);
757 ifnet_idle_flags(ifnet_t ifp
)
759 return (ifp
== NULL
) ? 0 : ifp
->if_idle_flags
;
763 ifnet_set_link_quality(ifnet_t ifp
, int quality
)
767 if (ifp
== NULL
|| quality
< IFNET_LQM_MIN
|| quality
> IFNET_LQM_MAX
) {
772 if (!ifnet_is_attached(ifp
, 0)) {
777 if_lqm_update(ifp
, quality
, 0);
784 ifnet_link_quality(ifnet_t ifp
)
789 return IFNET_LQM_THRESH_OFF
;
792 ifnet_lock_shared(ifp
);
793 lqm
= ifp
->if_interface_state
.lqm_state
;
794 ifnet_lock_done(ifp
);
800 ifnet_set_interface_state(ifnet_t ifp
,
801 struct if_interface_state
*if_interface_state
)
805 if (ifp
== NULL
|| if_interface_state
== NULL
) {
810 if (!ifnet_is_attached(ifp
, 0)) {
815 if_state_update(ifp
, if_interface_state
);
822 ifnet_get_interface_state(ifnet_t ifp
,
823 struct if_interface_state
*if_interface_state
)
827 if (ifp
== NULL
|| if_interface_state
== NULL
) {
832 if (!ifnet_is_attached(ifp
, 0)) {
837 if_get_state(ifp
, if_interface_state
);
845 ifnet_defrouter_llreachinfo(ifnet_t ifp
, sa_family_t af
,
846 struct ifnet_llreach_info
*iflri
)
848 if (ifp
== NULL
|| iflri
== NULL
) {
852 VERIFY(af
== AF_INET
|| af
== AF_INET6
);
854 return ifnet_llreach_get_defrouter(ifp
, af
, iflri
);
858 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp
, struct ifnet_llreach_info
*iflri
)
860 return ifnet_defrouter_llreachinfo(ifp
, AF_INET
, iflri
);
864 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp
, struct ifnet_llreach_info
*iflri
)
866 return ifnet_defrouter_llreachinfo(ifp
, AF_INET6
, iflri
);
870 ifnet_set_capabilities_supported(ifnet_t ifp
, u_int32_t new_caps
,
880 ifnet_lock_exclusive(ifp
);
881 tmp
= (new_caps
& mask
) | (ifp
->if_capabilities
& ~mask
);
882 if ((tmp
& ~IFCAP_VALID
)) {
885 ifp
->if_capabilities
= tmp
;
887 ifnet_lock_done(ifp
);
893 ifnet_capabilities_supported(ifnet_t ifp
)
895 return (ifp
== NULL
) ? 0 : ifp
->if_capabilities
;
900 ifnet_set_capabilities_enabled(ifnet_t ifp
, u_int32_t new_caps
,
905 struct kev_msg ev_msg
;
906 struct net_event_data ev_data
;
912 ifnet_lock_exclusive(ifp
);
913 tmp
= (new_caps
& mask
) | (ifp
->if_capenable
& ~mask
);
914 if ((tmp
& ~IFCAP_VALID
) || (tmp
& ~ifp
->if_capabilities
)) {
917 ifp
->if_capenable
= tmp
;
919 ifnet_lock_done(ifp
);
921 /* Notify application of the change */
922 bzero(&ev_data
, sizeof(struct net_event_data
));
923 bzero(&ev_msg
, sizeof(struct kev_msg
));
924 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
925 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
926 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
928 ev_msg
.event_code
= KEV_DL_IFCAP_CHANGED
;
929 strlcpy(&ev_data
.if_name
[0], ifp
->if_name
, IFNAMSIZ
);
930 ev_data
.if_family
= ifp
->if_family
;
931 ev_data
.if_unit
= (u_int32_t
)ifp
->if_unit
;
932 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
933 ev_msg
.dv
[0].data_ptr
= &ev_data
;
934 ev_msg
.dv
[1].data_length
= 0;
935 dlil_post_complete_msg(ifp
, &ev_msg
);
941 ifnet_capabilities_enabled(ifnet_t ifp
)
943 return (ifp
== NULL
) ? 0 : ifp
->if_capenable
;
946 static const ifnet_offload_t offload_mask
=
947 (IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
| IFNET_CSUM_FRAGMENT
|
948 IFNET_IP_FRAGMENT
| IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
|
949 IFNET_IPV6_FRAGMENT
| IFNET_CSUM_PARTIAL
| IFNET_CSUM_ZERO_INVERT
|
950 IFNET_VLAN_TAGGING
| IFNET_VLAN_MTU
| IFNET_MULTIPAGES
|
951 IFNET_TSO_IPV4
| IFNET_TSO_IPV6
| IFNET_TX_STATUS
| IFNET_HW_TIMESTAMP
|
954 static const ifnet_offload_t any_offload_csum
= IFNET_CHECKSUMF
;
957 ifnet_set_offload(ifnet_t interface
, ifnet_offload_t offload
)
959 u_int32_t ifcaps
= 0;
961 if (interface
== NULL
) {
965 ifnet_lock_exclusive(interface
);
966 interface
->if_hwassist
= (offload
& offload_mask
);
969 * Hardware capable of partial checksum offload is
970 * flexible enough to handle any transports utilizing
971 * Internet Checksumming. Include those transports
972 * here, and leave the final decision to IP.
974 if (interface
->if_hwassist
& IFNET_CSUM_PARTIAL
) {
975 interface
->if_hwassist
|= (IFNET_CSUM_TCP
| IFNET_CSUM_UDP
|
976 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
);
979 log(LOG_DEBUG
, "%s: set offload flags=%b\n",
981 interface
->if_hwassist
, IFNET_OFFLOADF_BITS
);
983 ifnet_lock_done(interface
);
985 if ((offload
& any_offload_csum
)) {
986 ifcaps
|= IFCAP_HWCSUM
;
988 if ((offload
& IFNET_TSO_IPV4
)) {
989 ifcaps
|= IFCAP_TSO4
;
991 if ((offload
& IFNET_TSO_IPV6
)) {
992 ifcaps
|= IFCAP_TSO6
;
994 if ((offload
& IFNET_VLAN_MTU
)) {
995 ifcaps
|= IFCAP_VLAN_MTU
;
997 if ((offload
& IFNET_VLAN_TAGGING
)) {
998 ifcaps
|= IFCAP_VLAN_HWTAGGING
;
1000 if ((offload
& IFNET_TX_STATUS
)) {
1001 ifcaps
|= IFCAP_TXSTATUS
;
1003 if ((offload
& IFNET_HW_TIMESTAMP
)) {
1004 ifcaps
|= IFCAP_HW_TIMESTAMP
;
1006 if ((offload
& IFNET_SW_TIMESTAMP
)) {
1007 ifcaps
|= IFCAP_SW_TIMESTAMP
;
1009 if ((offload
& IFNET_CSUM_PARTIAL
)) {
1010 ifcaps
|= IFCAP_CSUM_PARTIAL
;
1012 if ((offload
& IFNET_CSUM_ZERO_INVERT
)) {
1013 ifcaps
|= IFCAP_CSUM_ZERO_INVERT
;
1016 (void) ifnet_set_capabilities_supported(interface
, ifcaps
,
1018 (void) ifnet_set_capabilities_enabled(interface
, ifcaps
,
1026 ifnet_offload(ifnet_t interface
)
1028 return (interface
== NULL
) ?
1029 0 : (interface
->if_hwassist
& offload_mask
);
1033 ifnet_set_tso_mtu(ifnet_t interface
, sa_family_t family
, u_int32_t mtuLen
)
1037 if (interface
== NULL
|| mtuLen
< interface
->if_mtu
) {
1043 if (interface
->if_hwassist
& IFNET_TSO_IPV4
) {
1044 interface
->if_tso_v4_mtu
= mtuLen
;
1051 if (interface
->if_hwassist
& IFNET_TSO_IPV6
) {
1052 interface
->if_tso_v6_mtu
= mtuLen
;
1059 error
= EPROTONOSUPPORT
;
1067 ifnet_get_tso_mtu(ifnet_t interface
, sa_family_t family
, u_int32_t
*mtuLen
)
1071 if (interface
== NULL
|| mtuLen
== NULL
) {
1077 if (interface
->if_hwassist
& IFNET_TSO_IPV4
) {
1078 *mtuLen
= interface
->if_tso_v4_mtu
;
1085 if (interface
->if_hwassist
& IFNET_TSO_IPV6
) {
1086 *mtuLen
= interface
->if_tso_v6_mtu
;
1093 error
= EPROTONOSUPPORT
;
1101 ifnet_set_wake_flags(ifnet_t interface
, u_int32_t properties
, u_int32_t mask
)
1103 struct kev_msg ev_msg
;
1104 struct net_event_data ev_data
;
1106 bzero(&ev_data
, sizeof(struct net_event_data
));
1107 bzero(&ev_msg
, sizeof(struct kev_msg
));
1109 if (interface
== NULL
) {
1113 /* Do not accept wacky values */
1114 if ((properties
& mask
) & ~IF_WAKE_VALID_FLAGS
) {
1118 if ((mask
& IF_WAKE_ON_MAGIC_PACKET
) != 0) {
1119 if ((properties
& IF_WAKE_ON_MAGIC_PACKET
) != 0) {
1120 if_set_xflags(interface
, IFXF_WAKE_ON_MAGIC_PACKET
);
1122 if_clear_xflags(interface
, IFXF_WAKE_ON_MAGIC_PACKET
);
1126 (void) ifnet_touch_lastchange(interface
);
1128 /* Notify application of the change */
1129 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1130 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1131 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
1133 ev_msg
.event_code
= KEV_DL_WAKEFLAGS_CHANGED
;
1134 strlcpy(&ev_data
.if_name
[0], interface
->if_name
, IFNAMSIZ
);
1135 ev_data
.if_family
= interface
->if_family
;
1136 ev_data
.if_unit
= (u_int32_t
)interface
->if_unit
;
1137 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
1138 ev_msg
.dv
[0].data_ptr
= &ev_data
;
1139 ev_msg
.dv
[1].data_length
= 0;
1140 dlil_post_complete_msg(interface
, &ev_msg
);
1146 ifnet_get_wake_flags(ifnet_t interface
)
1148 u_int32_t flags
= 0;
1150 if (interface
== NULL
) {
1154 if ((interface
->if_xflags
& IFXF_WAKE_ON_MAGIC_PACKET
) != 0) {
1155 flags
|= IF_WAKE_ON_MAGIC_PACKET
;
1162 * Should MIB data store a copy?
1165 ifnet_set_link_mib_data(ifnet_t interface
, void *mibData
, uint32_t mibLen
)
1167 if (interface
== NULL
) {
1171 ifnet_lock_exclusive(interface
);
1172 interface
->if_linkmib
= (void*)mibData
;
1173 interface
->if_linkmiblen
= mibLen
;
1174 ifnet_lock_done(interface
);
1179 ifnet_get_link_mib_data(ifnet_t interface
, void *mibData
, uint32_t *mibLen
)
1183 if (interface
== NULL
) {
1187 ifnet_lock_shared(interface
);
1188 if (*mibLen
< interface
->if_linkmiblen
) {
1191 if (result
== 0 && interface
->if_linkmib
== NULL
) {
1196 *mibLen
= interface
->if_linkmiblen
;
1197 bcopy(interface
->if_linkmib
, mibData
, *mibLen
);
1199 ifnet_lock_done(interface
);
1205 ifnet_get_link_mib_data_length(ifnet_t interface
)
1207 return (interface
== NULL
) ? 0 : interface
->if_linkmiblen
;
1211 ifnet_output(ifnet_t interface
, protocol_family_t protocol_family
,
1212 mbuf_t m
, void *route
, const struct sockaddr
*dest
)
1214 if (interface
== NULL
|| protocol_family
== 0 || m
== NULL
) {
1220 return dlil_output(interface
, protocol_family
, m
, route
, dest
, 0, NULL
);
1224 ifnet_output_raw(ifnet_t interface
, protocol_family_t protocol_family
, mbuf_t m
)
1226 if (interface
== NULL
|| m
== NULL
) {
1232 return dlil_output(interface
, protocol_family
, m
, NULL
, NULL
, 1, NULL
);
1236 ifnet_set_mtu(ifnet_t interface
, u_int32_t mtu
)
1238 if (interface
== NULL
) {
1242 interface
->if_mtu
= mtu
;
1247 ifnet_mtu(ifnet_t interface
)
1249 return (interface
== NULL
) ? 0 : interface
->if_mtu
;
1253 ifnet_type(ifnet_t interface
)
1255 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_type
;
1259 ifnet_set_addrlen(ifnet_t interface
, u_char addrlen
)
1261 if (interface
== NULL
) {
1265 interface
->if_data
.ifi_addrlen
= addrlen
;
1270 ifnet_addrlen(ifnet_t interface
)
1272 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_addrlen
;
1276 ifnet_set_hdrlen(ifnet_t interface
, u_char hdrlen
)
1278 if (interface
== NULL
) {
1282 interface
->if_data
.ifi_hdrlen
= hdrlen
;
1287 ifnet_hdrlen(ifnet_t interface
)
1289 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_hdrlen
;
1293 ifnet_set_metric(ifnet_t interface
, u_int32_t metric
)
1295 if (interface
== NULL
) {
1299 interface
->if_data
.ifi_metric
= metric
;
1304 ifnet_metric(ifnet_t interface
)
1306 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_metric
;
1310 ifnet_set_baudrate(struct ifnet
*ifp
, uint64_t baudrate
)
1316 ifp
->if_output_bw
.max_bw
= ifp
->if_input_bw
.max_bw
=
1317 ifp
->if_output_bw
.eff_bw
= ifp
->if_input_bw
.eff_bw
= baudrate
;
1319 /* Pin if_baudrate to 32 bits until we can change the storage size */
1320 ifp
->if_baudrate
= (baudrate
> UINT32_MAX
) ? UINT32_MAX
: (uint32_t)baudrate
;
1326 ifnet_baudrate(struct ifnet
*ifp
)
1328 return (ifp
== NULL
) ? 0 : ifp
->if_baudrate
;
1332 ifnet_set_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*output_bw
,
1333 struct if_bandwidths
*input_bw
)
1339 /* set input values first (if any), as output values depend on them */
1340 if (input_bw
!= NULL
) {
1341 (void) ifnet_set_input_bandwidths(ifp
, input_bw
);
1344 if (output_bw
!= NULL
) {
1345 (void) ifnet_set_output_bandwidths(ifp
, output_bw
, FALSE
);
1352 ifnet_set_link_status_outbw(struct ifnet
*ifp
)
1354 struct if_wifi_status_v1
*sr
;
1355 sr
= &ifp
->if_link_status
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
1356 if (ifp
->if_output_bw
.eff_bw
!= 0) {
1357 sr
->valid_bitmask
|=
1358 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
1359 sr
->ul_effective_bandwidth
=
1360 ifp
->if_output_bw
.eff_bw
> UINT32_MAX
?
1362 (uint32_t)ifp
->if_output_bw
.eff_bw
;
1364 if (ifp
->if_output_bw
.max_bw
!= 0) {
1365 sr
->valid_bitmask
|=
1366 IF_WIFI_UL_MAX_BANDWIDTH_VALID
;
1367 sr
->ul_max_bandwidth
=
1368 ifp
->if_output_bw
.max_bw
> UINT32_MAX
?
1370 (uint32_t)ifp
->if_output_bw
.max_bw
;
1375 ifnet_set_output_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*bw
,
1378 struct if_bandwidths old_bw
;
1379 struct ifclassq
*ifq
;
1382 VERIFY(ifp
!= NULL
&& bw
!= NULL
);
1388 IFCQ_LOCK_ASSERT_HELD(ifq
);
1390 old_bw
= ifp
->if_output_bw
;
1391 if (bw
->eff_bw
!= 0) {
1392 ifp
->if_output_bw
.eff_bw
= bw
->eff_bw
;
1394 if (bw
->max_bw
!= 0) {
1395 ifp
->if_output_bw
.max_bw
= bw
->max_bw
;
1397 if (ifp
->if_output_bw
.eff_bw
> ifp
->if_output_bw
.max_bw
) {
1398 ifp
->if_output_bw
.max_bw
= ifp
->if_output_bw
.eff_bw
;
1399 } else if (ifp
->if_output_bw
.eff_bw
== 0) {
1400 ifp
->if_output_bw
.eff_bw
= ifp
->if_output_bw
.max_bw
;
1403 /* Pin if_baudrate to 32 bits */
1404 br
= MAX(ifp
->if_output_bw
.max_bw
, ifp
->if_input_bw
.max_bw
);
1406 ifp
->if_baudrate
= (br
> UINT32_MAX
) ? UINT32_MAX
: (uint32_t)br
;
1409 /* Adjust queue parameters if needed */
1410 if (old_bw
.eff_bw
!= ifp
->if_output_bw
.eff_bw
||
1411 old_bw
.max_bw
!= ifp
->if_output_bw
.max_bw
) {
1412 ifnet_update_sndq(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
1420 * If this is a Wifi interface, update the values in
1421 * if_link_status structure also.
1423 if (IFNET_IS_WIFI(ifp
) && ifp
->if_link_status
!= NULL
) {
1424 lck_rw_lock_exclusive(&ifp
->if_link_status_lock
);
1425 ifnet_set_link_status_outbw(ifp
);
1426 lck_rw_done(&ifp
->if_link_status_lock
);
1433 ifnet_set_link_status_inbw(struct ifnet
*ifp
)
1435 struct if_wifi_status_v1
*sr
;
1437 sr
= &ifp
->if_link_status
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
1438 if (ifp
->if_input_bw
.eff_bw
!= 0) {
1439 sr
->valid_bitmask
|=
1440 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
1441 sr
->dl_effective_bandwidth
=
1442 ifp
->if_input_bw
.eff_bw
> UINT32_MAX
?
1444 (uint32_t)ifp
->if_input_bw
.eff_bw
;
1446 if (ifp
->if_input_bw
.max_bw
!= 0) {
1447 sr
->valid_bitmask
|=
1448 IF_WIFI_DL_MAX_BANDWIDTH_VALID
;
1449 sr
->dl_max_bandwidth
= ifp
->if_input_bw
.max_bw
> UINT32_MAX
?
1451 (uint32_t)ifp
->if_input_bw
.max_bw
;
1456 ifnet_set_input_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*bw
)
1458 struct if_bandwidths old_bw
;
1460 VERIFY(ifp
!= NULL
&& bw
!= NULL
);
1462 old_bw
= ifp
->if_input_bw
;
1463 if (bw
->eff_bw
!= 0) {
1464 ifp
->if_input_bw
.eff_bw
= bw
->eff_bw
;
1466 if (bw
->max_bw
!= 0) {
1467 ifp
->if_input_bw
.max_bw
= bw
->max_bw
;
1469 if (ifp
->if_input_bw
.eff_bw
> ifp
->if_input_bw
.max_bw
) {
1470 ifp
->if_input_bw
.max_bw
= ifp
->if_input_bw
.eff_bw
;
1471 } else if (ifp
->if_input_bw
.eff_bw
== 0) {
1472 ifp
->if_input_bw
.eff_bw
= ifp
->if_input_bw
.max_bw
;
1475 if (IFNET_IS_WIFI(ifp
) && ifp
->if_link_status
!= NULL
) {
1476 lck_rw_lock_exclusive(&ifp
->if_link_status_lock
);
1477 ifnet_set_link_status_inbw(ifp
);
1478 lck_rw_done(&ifp
->if_link_status_lock
);
1481 if (old_bw
.eff_bw
!= ifp
->if_input_bw
.eff_bw
||
1482 old_bw
.max_bw
!= ifp
->if_input_bw
.max_bw
) {
1483 ifnet_update_rcv(ifp
, CLASSQ_EV_LINK_BANDWIDTH
);
1490 ifnet_output_linkrate(struct ifnet
*ifp
)
1492 struct ifclassq
*ifq
= &ifp
->if_snd
;
1495 IFCQ_LOCK_ASSERT_HELD(ifq
);
1497 rate
= ifp
->if_output_bw
.eff_bw
;
1498 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
1499 u_int64_t tbr_rate
= ifp
->if_snd
.ifcq_tbr
.tbr_rate_raw
;
1500 VERIFY(tbr_rate
> 0);
1501 rate
= MIN(rate
, ifp
->if_snd
.ifcq_tbr
.tbr_rate_raw
);
1508 ifnet_input_linkrate(struct ifnet
*ifp
)
1510 return ifp
->if_input_bw
.eff_bw
;
1514 ifnet_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*output_bw
,
1515 struct if_bandwidths
*input_bw
)
1521 if (output_bw
!= NULL
) {
1522 *output_bw
= ifp
->if_output_bw
;
1524 if (input_bw
!= NULL
) {
1525 *input_bw
= ifp
->if_input_bw
;
1532 ifnet_set_latencies(struct ifnet
*ifp
, struct if_latencies
*output_lt
,
1533 struct if_latencies
*input_lt
)
1539 if (output_lt
!= NULL
) {
1540 (void) ifnet_set_output_latencies(ifp
, output_lt
, FALSE
);
1543 if (input_lt
!= NULL
) {
1544 (void) ifnet_set_input_latencies(ifp
, input_lt
);
1551 ifnet_set_output_latencies(struct ifnet
*ifp
, struct if_latencies
*lt
,
1554 struct if_latencies old_lt
;
1555 struct ifclassq
*ifq
;
1557 VERIFY(ifp
!= NULL
&& lt
!= NULL
);
1563 IFCQ_LOCK_ASSERT_HELD(ifq
);
1565 old_lt
= ifp
->if_output_lt
;
1566 if (lt
->eff_lt
!= 0) {
1567 ifp
->if_output_lt
.eff_lt
= lt
->eff_lt
;
1569 if (lt
->max_lt
!= 0) {
1570 ifp
->if_output_lt
.max_lt
= lt
->max_lt
;
1572 if (ifp
->if_output_lt
.eff_lt
> ifp
->if_output_lt
.max_lt
) {
1573 ifp
->if_output_lt
.max_lt
= ifp
->if_output_lt
.eff_lt
;
1574 } else if (ifp
->if_output_lt
.eff_lt
== 0) {
1575 ifp
->if_output_lt
.eff_lt
= ifp
->if_output_lt
.max_lt
;
1578 /* Adjust queue parameters if needed */
1579 if (old_lt
.eff_lt
!= ifp
->if_output_lt
.eff_lt
||
1580 old_lt
.max_lt
!= ifp
->if_output_lt
.max_lt
) {
1581 ifnet_update_sndq(ifq
, CLASSQ_EV_LINK_LATENCY
);
1592 ifnet_set_input_latencies(struct ifnet
*ifp
, struct if_latencies
*lt
)
1594 struct if_latencies old_lt
;
1596 VERIFY(ifp
!= NULL
&& lt
!= NULL
);
1598 old_lt
= ifp
->if_input_lt
;
1599 if (lt
->eff_lt
!= 0) {
1600 ifp
->if_input_lt
.eff_lt
= lt
->eff_lt
;
1602 if (lt
->max_lt
!= 0) {
1603 ifp
->if_input_lt
.max_lt
= lt
->max_lt
;
1605 if (ifp
->if_input_lt
.eff_lt
> ifp
->if_input_lt
.max_lt
) {
1606 ifp
->if_input_lt
.max_lt
= ifp
->if_input_lt
.eff_lt
;
1607 } else if (ifp
->if_input_lt
.eff_lt
== 0) {
1608 ifp
->if_input_lt
.eff_lt
= ifp
->if_input_lt
.max_lt
;
1611 if (old_lt
.eff_lt
!= ifp
->if_input_lt
.eff_lt
||
1612 old_lt
.max_lt
!= ifp
->if_input_lt
.max_lt
) {
1613 ifnet_update_rcv(ifp
, CLASSQ_EV_LINK_LATENCY
);
1620 ifnet_latencies(struct ifnet
*ifp
, struct if_latencies
*output_lt
,
1621 struct if_latencies
*input_lt
)
1627 if (output_lt
!= NULL
) {
1628 *output_lt
= ifp
->if_output_lt
;
1630 if (input_lt
!= NULL
) {
1631 *input_lt
= ifp
->if_input_lt
;
1638 ifnet_set_poll_params(struct ifnet
*ifp
, struct ifnet_poll_params
*p
)
1644 } else if (!ifnet_is_attached(ifp
, 1)) {
1648 err
= dlil_rxpoll_set_params(ifp
, p
, FALSE
);
1650 /* Release the io ref count */
1651 ifnet_decr_iorefcnt(ifp
);
1657 ifnet_poll_params(struct ifnet
*ifp
, struct ifnet_poll_params
*p
)
1661 if (ifp
== NULL
|| p
== NULL
) {
1663 } else if (!ifnet_is_attached(ifp
, 1)) {
1667 err
= dlil_rxpoll_get_params(ifp
, p
);
1669 /* Release the io ref count */
1670 ifnet_decr_iorefcnt(ifp
);
1676 ifnet_stat_increment(struct ifnet
*ifp
,
1677 const struct ifnet_stat_increment_param
*s
)
1683 if (s
->packets_in
!= 0) {
1684 atomic_add_64(&ifp
->if_data
.ifi_ipackets
, s
->packets_in
);
1686 if (s
->bytes_in
!= 0) {
1687 atomic_add_64(&ifp
->if_data
.ifi_ibytes
, s
->bytes_in
);
1689 if (s
->errors_in
!= 0) {
1690 atomic_add_64(&ifp
->if_data
.ifi_ierrors
, s
->errors_in
);
1693 if (s
->packets_out
!= 0) {
1694 atomic_add_64(&ifp
->if_data
.ifi_opackets
, s
->packets_out
);
1696 if (s
->bytes_out
!= 0) {
1697 atomic_add_64(&ifp
->if_data
.ifi_obytes
, s
->bytes_out
);
1699 if (s
->errors_out
!= 0) {
1700 atomic_add_64(&ifp
->if_data
.ifi_oerrors
, s
->errors_out
);
1703 if (s
->collisions
!= 0) {
1704 atomic_add_64(&ifp
->if_data
.ifi_collisions
, s
->collisions
);
1706 if (s
->dropped
!= 0) {
1707 atomic_add_64(&ifp
->if_data
.ifi_iqdrops
, s
->dropped
);
1710 /* Touch the last change time. */
1711 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1713 if (ifp
->if_data_threshold
!= 0) {
1714 ifnet_notify_data_threshold(ifp
);
1721 ifnet_stat_increment_in(struct ifnet
*ifp
, u_int32_t packets_in
,
1722 u_int32_t bytes_in
, u_int32_t errors_in
)
1728 if (packets_in
!= 0) {
1729 atomic_add_64(&ifp
->if_data
.ifi_ipackets
, packets_in
);
1731 if (bytes_in
!= 0) {
1732 atomic_add_64(&ifp
->if_data
.ifi_ibytes
, bytes_in
);
1734 if (errors_in
!= 0) {
1735 atomic_add_64(&ifp
->if_data
.ifi_ierrors
, errors_in
);
1738 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1740 if (ifp
->if_data_threshold
!= 0) {
1741 ifnet_notify_data_threshold(ifp
);
1748 ifnet_stat_increment_out(struct ifnet
*ifp
, u_int32_t packets_out
,
1749 u_int32_t bytes_out
, u_int32_t errors_out
)
1755 if (packets_out
!= 0) {
1756 atomic_add_64(&ifp
->if_data
.ifi_opackets
, packets_out
);
1758 if (bytes_out
!= 0) {
1759 atomic_add_64(&ifp
->if_data
.ifi_obytes
, bytes_out
);
1761 if (errors_out
!= 0) {
1762 atomic_add_64(&ifp
->if_data
.ifi_oerrors
, errors_out
);
1765 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1767 if (ifp
->if_data_threshold
!= 0) {
1768 ifnet_notify_data_threshold(ifp
);
1775 ifnet_set_stat(struct ifnet
*ifp
, const struct ifnet_stats_param
*s
)
1781 atomic_set_64(&ifp
->if_data
.ifi_ipackets
, s
->packets_in
);
1782 atomic_set_64(&ifp
->if_data
.ifi_ibytes
, s
->bytes_in
);
1783 atomic_set_64(&ifp
->if_data
.ifi_imcasts
, s
->multicasts_in
);
1784 atomic_set_64(&ifp
->if_data
.ifi_ierrors
, s
->errors_in
);
1786 atomic_set_64(&ifp
->if_data
.ifi_opackets
, s
->packets_out
);
1787 atomic_set_64(&ifp
->if_data
.ifi_obytes
, s
->bytes_out
);
1788 atomic_set_64(&ifp
->if_data
.ifi_omcasts
, s
->multicasts_out
);
1789 atomic_set_64(&ifp
->if_data
.ifi_oerrors
, s
->errors_out
);
1791 atomic_set_64(&ifp
->if_data
.ifi_collisions
, s
->collisions
);
1792 atomic_set_64(&ifp
->if_data
.ifi_iqdrops
, s
->dropped
);
1793 atomic_set_64(&ifp
->if_data
.ifi_noproto
, s
->no_protocol
);
1795 /* Touch the last change time. */
1796 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1798 if (ifp
->if_data_threshold
!= 0) {
1799 ifnet_notify_data_threshold(ifp
);
1806 ifnet_stat(struct ifnet
*ifp
, struct ifnet_stats_param
*s
)
1812 atomic_get_64(s
->packets_in
, &ifp
->if_data
.ifi_ipackets
);
1813 atomic_get_64(s
->bytes_in
, &ifp
->if_data
.ifi_ibytes
);
1814 atomic_get_64(s
->multicasts_in
, &ifp
->if_data
.ifi_imcasts
);
1815 atomic_get_64(s
->errors_in
, &ifp
->if_data
.ifi_ierrors
);
1817 atomic_get_64(s
->packets_out
, &ifp
->if_data
.ifi_opackets
);
1818 atomic_get_64(s
->bytes_out
, &ifp
->if_data
.ifi_obytes
);
1819 atomic_get_64(s
->multicasts_out
, &ifp
->if_data
.ifi_omcasts
);
1820 atomic_get_64(s
->errors_out
, &ifp
->if_data
.ifi_oerrors
);
1822 atomic_get_64(s
->collisions
, &ifp
->if_data
.ifi_collisions
);
1823 atomic_get_64(s
->dropped
, &ifp
->if_data
.ifi_iqdrops
);
1824 atomic_get_64(s
->no_protocol
, &ifp
->if_data
.ifi_noproto
);
1826 if (ifp
->if_data_threshold
!= 0) {
1827 ifnet_notify_data_threshold(ifp
);
1834 ifnet_touch_lastchange(ifnet_t interface
)
1836 if (interface
== NULL
) {
1840 TOUCHLASTCHANGE(&interface
->if_lastchange
);
1846 ifnet_lastchange(ifnet_t interface
, struct timeval
*last_change
)
1848 if (interface
== NULL
) {
1852 *last_change
= interface
->if_data
.ifi_lastchange
;
1853 /* Crude conversion from uptime to calendar time */
1854 last_change
->tv_sec
+= boottime_sec();
1860 ifnet_touch_lastupdown(ifnet_t interface
)
1862 if (interface
== NULL
) {
1866 TOUCHLASTCHANGE(&interface
->if_lastupdown
);
1872 ifnet_updown_delta(ifnet_t interface
, struct timeval
*updown_delta
)
1874 if (interface
== NULL
) {
1878 /* Calculate the delta */
1879 updown_delta
->tv_sec
= (time_t)net_uptime();
1880 if (updown_delta
->tv_sec
> interface
->if_data
.ifi_lastupdown
.tv_sec
) {
1881 updown_delta
->tv_sec
-= interface
->if_data
.ifi_lastupdown
.tv_sec
;
1883 updown_delta
->tv_usec
= 0;
1889 ifnet_get_address_list(ifnet_t interface
, ifaddr_t
**addresses
)
1891 return addresses
== NULL
? EINVAL
:
1892 ifnet_get_address_list_family(interface
, addresses
, 0);
1895 struct ifnet_addr_list
{
1896 SLIST_ENTRY(ifnet_addr_list
) ifal_le
;
1897 struct ifaddr
*ifal_ifa
;
1901 ifnet_get_address_list_family(ifnet_t interface
, ifaddr_t
**addresses
,
1904 return ifnet_get_address_list_family_internal(interface
, addresses
,
1905 family
, 0, M_NOWAIT
, 0);
1909 ifnet_get_inuse_address_list(ifnet_t interface
, ifaddr_t
**addresses
)
1911 return addresses
== NULL
? EINVAL
:
1912 ifnet_get_address_list_family_internal(interface
, addresses
,
1916 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr
*ifa
);
1918 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr
*ifa
);
1920 __private_extern__ errno_t
1921 ifnet_get_address_list_family_internal(ifnet_t interface
, ifaddr_t
**addresses
,
1922 sa_family_t family
, int detached
, int how
, int return_inuse_addrs
)
1924 SLIST_HEAD(, ifnet_addr_list
) ifal_head
;
1925 struct ifnet_addr_list
*ifal
, *ifal_tmp
;
1932 SLIST_INIT(&ifal_head
);
1934 if (addresses
== NULL
) {
1942 * Interface has been detached, so skip the lookup
1943 * at ifnet_head and go directly to inner loop.
1953 ifnet_head_lock_shared();
1954 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1955 if (interface
!= NULL
&& ifp
!= interface
) {
1959 ifnet_lock_shared(ifp
);
1960 if (interface
== NULL
|| interface
== ifp
) {
1962 TAILQ_FOREACH(ifa
, &ifp
->if_addrhead
, ifa_link
) {
1965 ifa
->ifa_addr
->sa_family
!= family
) {
1969 MALLOC(ifal
, struct ifnet_addr_list
*,
1970 sizeof(*ifal
), M_TEMP
, how
);
1973 ifnet_lock_done(ifp
);
1980 ifal
->ifal_ifa
= ifa
;
1981 IFA_ADDREF_LOCKED(ifa
);
1982 SLIST_INSERT_HEAD(&ifal_head
, ifal
, ifal_le
);
1987 ifnet_lock_done(ifp
);
2000 MALLOC(*addresses
, ifaddr_t
*, sizeof(ifaddr_t
) * (count
+ 1),
2002 if (*addresses
== NULL
) {
2006 bzero(*addresses
, sizeof(ifaddr_t
) * (count
+ 1));
2009 SLIST_FOREACH_SAFE(ifal
, &ifal_head
, ifal_le
, ifal_tmp
) {
2010 SLIST_REMOVE(&ifal_head
, ifal
, ifnet_addr_list
, ifal_le
);
2012 if (return_inuse_addrs
) {
2013 usecount
= tcp_find_anypcb_byaddr(ifal
->ifal_ifa
);
2014 usecount
+= udp_find_anypcb_byaddr(ifal
->ifal_ifa
);
2016 (*addresses
)[index
] = ifal
->ifal_ifa
;
2019 IFA_REMREF(ifal
->ifal_ifa
);
2022 (*addresses
)[--count
] = ifal
->ifal_ifa
;
2025 IFA_REMREF(ifal
->ifal_ifa
);
2030 VERIFY(err
== 0 || *addresses
== NULL
);
2031 if ((err
== 0) && (count
) && ((*addresses
)[0] == NULL
)) {
2032 VERIFY(return_inuse_addrs
== 1);
2033 FREE(*addresses
, M_TEMP
);
2040 ifnet_free_address_list(ifaddr_t
*addresses
)
2044 if (addresses
== NULL
) {
2048 for (i
= 0; addresses
[i
] != NULL
; i
++) {
2049 IFA_REMREF(addresses
[i
]);
2052 FREE(addresses
, M_TEMP
);
2056 ifnet_lladdr(ifnet_t interface
)
2061 if (interface
== NULL
) {
2066 * if_lladdr points to the permanent link address of
2067 * the interface and it never gets deallocated; internal
2068 * code should simply use IF_LLADDR() for performance.
2070 ifa
= interface
->if_lladdr
;
2072 lladdr
= LLADDR(SDL((void *)ifa
->ifa_addr
));
2079 ifnet_llbroadcast_copy_bytes(ifnet_t interface
, void *addr
, size_t buffer_len
,
2082 if (interface
== NULL
|| addr
== NULL
|| out_len
== NULL
) {
2086 *out_len
= interface
->if_broadcast
.length
;
2088 if (buffer_len
< interface
->if_broadcast
.length
) {
2092 if (interface
->if_broadcast
.length
== 0) {
2096 if (interface
->if_broadcast
.length
<=
2097 sizeof(interface
->if_broadcast
.u
.buffer
)) {
2098 bcopy(interface
->if_broadcast
.u
.buffer
, addr
,
2099 interface
->if_broadcast
.length
);
2101 bcopy(interface
->if_broadcast
.u
.ptr
, addr
,
2102 interface
->if_broadcast
.length
);
2109 ifnet_lladdr_copy_bytes_internal(ifnet_t interface
, void *lladdr
,
2110 size_t lladdr_len
, kauth_cred_t
*credp
)
2112 const u_int8_t
*bytes
;
2115 uint8_t sdlbuf
[SOCK_MAXADDRLEN
+ 1];
2119 * Make sure to accomodate the largest possible
2120 * size of SA(if_lladdr)->sa_len.
2122 _CASSERT(sizeof(sdlbuf
) == (SOCK_MAXADDRLEN
+ 1));
2124 if (interface
== NULL
|| lladdr
== NULL
) {
2128 ifa
= interface
->if_lladdr
;
2130 bcopy(ifa
->ifa_addr
, &sdlbuf
, SDL(ifa
->ifa_addr
)->sdl_len
);
2133 bytes
= dlil_ifaddr_bytes(SDL(&sdlbuf
), &bytes_len
, credp
);
2134 if (bytes_len
!= lladdr_len
) {
2135 bzero(lladdr
, lladdr_len
);
2138 bcopy(bytes
, lladdr
, bytes_len
);
2145 ifnet_lladdr_copy_bytes(ifnet_t interface
, void *lladdr
, size_t length
)
2147 return ifnet_lladdr_copy_bytes_internal(interface
, lladdr
, length
,
2152 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface
, void *lladdr
, size_t length
)
2156 net_thread_marks_t marks
;
2158 kauth_cred_t
*credp
;
2163 marks
= net_thread_marks_push(NET_THREAD_CKREQ_LLADDR
);
2164 cred
= kauth_cred_proc_ref(current_proc());
2170 error
= ifnet_lladdr_copy_bytes_internal(interface
, lladdr
, length
,
2174 kauth_cred_unref(credp
);
2175 net_thread_marks_pop(marks
);
2182 ifnet_set_lladdr_internal(ifnet_t interface
, const void *lladdr
,
2183 size_t lladdr_len
, u_char new_type
, int apply_type
)
2188 if (interface
== NULL
) {
2192 ifnet_head_lock_shared();
2193 ifnet_lock_exclusive(interface
);
2194 if (lladdr_len
!= 0 &&
2195 (lladdr_len
!= interface
->if_addrlen
|| lladdr
== 0)) {
2196 ifnet_lock_done(interface
);
2200 ifa
= ifnet_addrs
[interface
->if_index
- 1];
2202 struct sockaddr_dl
*sdl
;
2205 sdl
= (struct sockaddr_dl
*)(void *)ifa
->ifa_addr
;
2206 if (lladdr_len
!= 0) {
2207 bcopy(lladdr
, LLADDR(sdl
), lladdr_len
);
2209 bzero(LLADDR(sdl
), interface
->if_addrlen
);
2211 /* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2212 sdl
->sdl_alen
= (u_char
)lladdr_len
;
2215 sdl
->sdl_type
= new_type
;
2221 ifnet_lock_done(interface
);
2224 /* Generate a kernel event */
2226 intf_event_enqueue_nwk_wq_entry(interface
, NULL
,
2227 INTF_EVENT_CODE_LLADDR_UPDATE
);
2228 dlil_post_msg(interface
, KEV_DL_SUBCLASS
,
2229 KEV_DL_LINK_ADDRESS_CHANGED
, NULL
, 0);
2236 ifnet_set_lladdr(ifnet_t interface
, const void* lladdr
, size_t lladdr_len
)
2238 return ifnet_set_lladdr_internal(interface
, lladdr
, lladdr_len
, 0, 0);
2242 ifnet_set_lladdr_and_type(ifnet_t interface
, const void* lladdr
,
2243 size_t lladdr_len
, u_char type
)
2245 return ifnet_set_lladdr_internal(interface
, lladdr
,
2246 lladdr_len
, type
, 1);
2250 ifnet_add_multicast(ifnet_t interface
, const struct sockaddr
*maddr
,
2251 ifmultiaddr_t
*ifmap
)
2253 if (interface
== NULL
|| maddr
== NULL
) {
2257 /* Don't let users screw up protocols' entries. */
2258 switch (maddr
->sa_family
) {
2260 const struct sockaddr_dl
*sdl
=
2261 (const struct sockaddr_dl
*)(uintptr_t)maddr
;
2262 if (sdl
->sdl_len
< sizeof(struct sockaddr_dl
) ||
2263 (sdl
->sdl_nlen
+ sdl
->sdl_alen
+ sdl
->sdl_slen
+
2264 offsetof(struct sockaddr_dl
, sdl_data
) > sdl
->sdl_len
)) {
2270 if (maddr
->sa_len
< ETHER_ADDR_LEN
+
2271 offsetof(struct sockaddr
, sa_data
)) {
2279 return if_addmulti_anon(interface
, maddr
, ifmap
);
2283 ifnet_remove_multicast(ifmultiaddr_t ifma
)
2285 struct sockaddr
*maddr
;
2291 maddr
= ifma
->ifma_addr
;
2292 /* Don't let users screw up protocols' entries. */
2293 if (maddr
->sa_family
!= AF_UNSPEC
&& maddr
->sa_family
!= AF_LINK
) {
2297 return if_delmulti_anon(ifma
->ifma_ifp
, maddr
);
2301 ifnet_get_multicast_list(ifnet_t ifp
, ifmultiaddr_t
**addresses
)
2305 struct ifmultiaddr
*addr
;
2307 if (ifp
== NULL
|| addresses
== NULL
) {
2311 ifnet_lock_shared(ifp
);
2312 LIST_FOREACH(addr
, &ifp
->if_multiaddrs
, ifma_link
) {
2316 MALLOC(*addresses
, ifmultiaddr_t
*, sizeof(ifmultiaddr_t
) * (cmax
+ 1),
2318 if (*addresses
== NULL
) {
2319 ifnet_lock_done(ifp
);
2323 LIST_FOREACH(addr
, &ifp
->if_multiaddrs
, ifma_link
) {
2324 if (count
+ 1 > cmax
) {
2327 (*addresses
)[count
] = (ifmultiaddr_t
)addr
;
2328 ifmaddr_reference((*addresses
)[count
]);
2331 (*addresses
)[cmax
] = NULL
;
2332 ifnet_lock_done(ifp
);
2338 ifnet_free_multicast_list(ifmultiaddr_t
*addresses
)
2342 if (addresses
== NULL
) {
2346 for (i
= 0; addresses
[i
] != NULL
; i
++) {
2347 ifmaddr_release(addresses
[i
]);
2350 FREE(addresses
, M_TEMP
);
2354 ifnet_find_by_name(const char *ifname
, ifnet_t
*ifpp
)
2359 if (ifname
== NULL
) {
2363 namelen
= strlen(ifname
);
2367 ifnet_head_lock_shared();
2368 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2370 struct sockaddr_dl
*ll_addr
;
2372 ifa
= ifnet_addrs
[ifp
->if_index
- 1];
2378 ll_addr
= (struct sockaddr_dl
*)(void *)ifa
->ifa_addr
;
2380 if (namelen
== ll_addr
->sdl_nlen
&& strncmp(ll_addr
->sdl_data
,
2381 ifname
, ll_addr
->sdl_nlen
) == 0) {
2384 ifnet_reference(*ifpp
);
2391 return (ifp
== NULL
) ? ENXIO
: 0;
2395 ifnet_list_get(ifnet_family_t family
, ifnet_t
**list
, u_int32_t
*count
)
2397 return ifnet_list_get_common(family
, FALSE
, list
, count
);
2400 __private_extern__ errno_t
2401 ifnet_list_get_all(ifnet_family_t family
, ifnet_t
**list
, u_int32_t
*count
)
2403 return ifnet_list_get_common(family
, TRUE
, list
, count
);
2407 SLIST_ENTRY(ifnet_list
) ifl_le
;
2408 struct ifnet
*ifl_ifp
;
2412 ifnet_list_get_common(ifnet_family_t family
, boolean_t get_all
, ifnet_t
**list
,
2415 #pragma unused(get_all)
2416 SLIST_HEAD(, ifnet_list
) ifl_head
;
2417 struct ifnet_list
*ifl
, *ifl_tmp
;
2422 SLIST_INIT(&ifl_head
);
2424 if (list
== NULL
|| count
== NULL
) {
2431 ifnet_head_lock_shared();
2432 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2433 if (family
== IFNET_FAMILY_ANY
|| ifp
->if_family
== family
) {
2434 MALLOC(ifl
, struct ifnet_list
*, sizeof(*ifl
),
2442 ifnet_reference(ifp
);
2443 SLIST_INSERT_HEAD(&ifl_head
, ifl
, ifl_le
);
2454 MALLOC(*list
, ifnet_t
*, sizeof(ifnet_t
) * (cnt
+ 1),
2456 if (*list
== NULL
) {
2460 bzero(*list
, sizeof(ifnet_t
) * (cnt
+ 1));
2464 SLIST_FOREACH_SAFE(ifl
, &ifl_head
, ifl_le
, ifl_tmp
) {
2465 SLIST_REMOVE(&ifl_head
, ifl
, ifnet_list
, ifl_le
);
2467 (*list
)[--cnt
] = ifl
->ifl_ifp
;
2469 ifnet_release(ifl
->ifl_ifp
);
2478 ifnet_list_free(ifnet_t
*interfaces
)
2482 if (interfaces
== NULL
) {
2486 for (i
= 0; interfaces
[i
]; i
++) {
2487 ifnet_release(interfaces
[i
]);
2490 FREE(interfaces
, M_TEMP
);
2493 /*************************************************************************/
2494 /* ifaddr_t accessors */
2495 /*************************************************************************/
2498 ifaddr_reference(ifaddr_t ifa
)
2509 ifaddr_release(ifaddr_t ifa
)
2520 ifaddr_address_family(ifaddr_t ifa
)
2522 sa_family_t family
= 0;
2526 if (ifa
->ifa_addr
!= NULL
) {
2527 family
= ifa
->ifa_addr
->sa_family
;
2535 ifaddr_address(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2539 if (ifa
== NULL
|| out_addr
== NULL
) {
2544 if (ifa
->ifa_addr
== NULL
) {
2549 copylen
= (addr_size
>= ifa
->ifa_addr
->sa_len
) ?
2550 ifa
->ifa_addr
->sa_len
: addr_size
;
2551 bcopy(ifa
->ifa_addr
, out_addr
, copylen
);
2553 if (ifa
->ifa_addr
->sa_len
> addr_size
) {
2563 ifaddr_dstaddress(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2567 if (ifa
== NULL
|| out_addr
== NULL
) {
2572 if (ifa
->ifa_dstaddr
== NULL
) {
2577 copylen
= (addr_size
>= ifa
->ifa_dstaddr
->sa_len
) ?
2578 ifa
->ifa_dstaddr
->sa_len
: addr_size
;
2579 bcopy(ifa
->ifa_dstaddr
, out_addr
, copylen
);
2581 if (ifa
->ifa_dstaddr
->sa_len
> addr_size
) {
2591 ifaddr_netmask(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2595 if (ifa
== NULL
|| out_addr
== NULL
) {
2600 if (ifa
->ifa_netmask
== NULL
) {
2605 copylen
= addr_size
>= ifa
->ifa_netmask
->sa_len
?
2606 ifa
->ifa_netmask
->sa_len
: addr_size
;
2607 bcopy(ifa
->ifa_netmask
, out_addr
, copylen
);
2609 if (ifa
->ifa_netmask
->sa_len
> addr_size
) {
2619 ifaddr_ifnet(ifaddr_t ifa
)
2627 /* ifa_ifp is set once at creation time; it is never changed */
2634 ifaddr_withaddr(const struct sockaddr
*address
)
2636 if (address
== NULL
) {
2640 return ifa_ifwithaddr(address
);
2644 ifaddr_withdstaddr(const struct sockaddr
*address
)
2646 if (address
== NULL
) {
2650 return ifa_ifwithdstaddr(address
);
2654 ifaddr_withnet(const struct sockaddr
*net
)
2660 return ifa_ifwithnet(net
);
2664 ifaddr_withroute(int flags
, const struct sockaddr
*destination
,
2665 const struct sockaddr
*gateway
)
2667 if (destination
== NULL
|| gateway
== NULL
) {
2671 return ifa_ifwithroute(flags
, destination
, gateway
);
2675 ifaddr_findbestforaddr(const struct sockaddr
*addr
, ifnet_t interface
)
2677 if (addr
== NULL
|| interface
== NULL
) {
2681 return ifaof_ifpforaddr_select(addr
, interface
);
2685 ifmaddr_reference(ifmultiaddr_t ifmaddr
)
2687 if (ifmaddr
== NULL
) {
2691 IFMA_ADDREF(ifmaddr
);
2696 ifmaddr_release(ifmultiaddr_t ifmaddr
)
2698 if (ifmaddr
== NULL
) {
2702 IFMA_REMREF(ifmaddr
);
2707 ifmaddr_address(ifmultiaddr_t ifma
, struct sockaddr
*out_addr
,
2708 u_int32_t addr_size
)
2712 if (ifma
== NULL
|| out_addr
== NULL
) {
2717 if (ifma
->ifma_addr
== NULL
) {
2722 copylen
= (addr_size
>= ifma
->ifma_addr
->sa_len
?
2723 ifma
->ifma_addr
->sa_len
: addr_size
);
2724 bcopy(ifma
->ifma_addr
, out_addr
, copylen
);
2726 if (ifma
->ifma_addr
->sa_len
> addr_size
) {
2735 ifmaddr_lladdress(ifmultiaddr_t ifma
, struct sockaddr
*out_addr
,
2736 u_int32_t addr_size
)
2738 struct ifmultiaddr
*ifma_ll
;
2740 if (ifma
== NULL
|| out_addr
== NULL
) {
2743 if ((ifma_ll
= ifma
->ifma_ll
) == NULL
) {
2747 return ifmaddr_address(ifma_ll
, out_addr
, addr_size
);
2751 ifmaddr_ifnet(ifmultiaddr_t ifma
)
2753 return (ifma
== NULL
) ? NULL
: ifma
->ifma_ifp
;
2756 /**************************************************************************/
2757 /* interface cloner */
2758 /**************************************************************************/
2761 ifnet_clone_attach(struct ifnet_clone_params
*cloner_params
,
2762 if_clone_t
*ifcloner
)
2765 struct if_clone
*ifc
= NULL
;
2768 if (cloner_params
== NULL
|| ifcloner
== NULL
||
2769 cloner_params
->ifc_name
== NULL
||
2770 cloner_params
->ifc_create
== NULL
||
2771 cloner_params
->ifc_destroy
== NULL
||
2772 (namelen
= strlen(cloner_params
->ifc_name
)) >= IFNAMSIZ
) {
2777 if (if_clone_lookup(cloner_params
->ifc_name
, NULL
) != NULL
) {
2778 printf("%s: already a cloner for %s\n", __func__
,
2779 cloner_params
->ifc_name
);
2784 /* Make room for name string */
2785 ifc
= _MALLOC(sizeof(struct if_clone
) + IFNAMSIZ
+ 1, M_CLONE
,
2788 printf("%s: _MALLOC failed\n", __func__
);
2792 strlcpy((char *)(ifc
+ 1), cloner_params
->ifc_name
, IFNAMSIZ
+ 1);
2793 ifc
->ifc_name
= (char *)(ifc
+ 1);
2794 ifc
->ifc_namelen
= namelen
;
2795 ifc
->ifc_maxunit
= IF_MAXUNIT
;
2796 ifc
->ifc_create
= cloner_params
->ifc_create
;
2797 ifc
->ifc_destroy
= cloner_params
->ifc_destroy
;
2799 error
= if_clone_attach(ifc
);
2801 printf("%s: if_clone_attach failed %d\n", __func__
, error
);
2815 ifnet_clone_detach(if_clone_t ifcloner
)
2818 struct if_clone
*ifc
= ifcloner
;
2820 if (ifc
== NULL
|| ifc
->ifc_name
== NULL
) {
2824 if ((if_clone_lookup(ifc
->ifc_name
, NULL
)) == NULL
) {
2825 printf("%s: no cloner for %s\n", __func__
, ifc
->ifc_name
);
2830 if_clone_detach(ifc
);
2838 /**************************************************************************/
2840 /**************************************************************************/
2843 ifnet_get_local_ports_extended(ifnet_t ifp
, protocol_family_t protocol
,
2844 u_int32_t flags
, u_int8_t
*bitfield
)
2848 if (bitfield
== NULL
) {
2861 /* bit string is long enough to hold 16-bit port values */
2862 bzero(bitfield
, bitstr_size(IP_PORTRANGE_SIZE
));
2864 if_ports_used_update_wakeuuid(ifp
);
2867 ifindex
= (ifp
!= NULL
) ? ifp
->if_index
: 0;
2869 if (!(flags
& IFNET_GET_LOCAL_PORTS_TCPONLY
)) {
2870 udp_get_ports_used(ifindex
, protocol
, flags
,
2874 if (!(flags
& IFNET_GET_LOCAL_PORTS_UDPONLY
)) {
2875 tcp_get_ports_used(ifindex
, protocol
, flags
,
2883 ifnet_get_local_ports(ifnet_t ifp
, u_int8_t
*bitfield
)
2885 u_int32_t flags
= IFNET_GET_LOCAL_PORTS_WILDCARDOK
;
2886 return ifnet_get_local_ports_extended(ifp
, PF_UNSPEC
, flags
,
2891 ifnet_notice_node_presence(ifnet_t ifp
, struct sockaddr
*sa
, int32_t rssi
,
2892 int lqm
, int npm
, u_int8_t srvinfo
[48])
2894 if (ifp
== NULL
|| sa
== NULL
|| srvinfo
== NULL
) {
2897 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
2900 if (sa
->sa_family
!= AF_LINK
&& sa
->sa_family
!= AF_INET6
) {
2904 return dlil_node_present(ifp
, sa
, rssi
, lqm
, npm
, srvinfo
);
2908 ifnet_notice_node_presence_v2(ifnet_t ifp
, struct sockaddr
*sa
, struct sockaddr_dl
*sdl
,
2909 int32_t rssi
, int lqm
, int npm
, u_int8_t srvinfo
[48])
2911 /* Support older version if sdl is NULL */
2913 return ifnet_notice_node_presence(ifp
, sa
, rssi
, lqm
, npm
, srvinfo
);
2916 if (ifp
== NULL
|| sa
== NULL
|| srvinfo
== NULL
) {
2919 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
2923 if (sa
->sa_family
!= AF_INET6
) {
2927 if (sdl
->sdl_family
!= AF_LINK
) {
2931 return dlil_node_present_v2(ifp
, sa
, sdl
, rssi
, lqm
, npm
, srvinfo
);
2935 ifnet_notice_node_absence(ifnet_t ifp
, struct sockaddr
*sa
)
2937 if (ifp
== NULL
|| sa
== NULL
) {
2940 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
2943 if (sa
->sa_family
!= AF_LINK
&& sa
->sa_family
!= AF_INET6
) {
2947 dlil_node_absent(ifp
, sa
);
2952 ifnet_notice_master_elected(ifnet_t ifp
)
2958 dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_MASTER_ELECTED
, NULL
, 0);
2963 ifnet_tx_compl_status(ifnet_t ifp
, mbuf_t m
, tx_compl_val_t val
)
2967 m_do_tx_compl_callback(m
, ifp
);
2973 ifnet_tx_compl(ifnet_t ifp
, mbuf_t m
)
2975 m_do_tx_compl_callback(m
, ifp
);
2981 ifnet_report_issues(ifnet_t ifp
, u_int8_t modid
[IFNET_MODIDLEN
],
2982 u_int8_t info
[IFNET_MODARGLEN
])
2984 if (ifp
== NULL
|| modid
== NULL
) {
2988 dlil_report_issues(ifp
, modid
, info
);
2993 ifnet_set_delegate(ifnet_t ifp
, ifnet_t delegated_ifp
)
2995 ifnet_t odifp
= NULL
;
2999 } else if (!ifnet_is_attached(ifp
, 1)) {
3003 ifnet_lock_exclusive(ifp
);
3004 odifp
= ifp
->if_delegated
.ifp
;
3005 if (odifp
!= NULL
&& odifp
== delegated_ifp
) {
3006 /* delegate info is unchanged; nothing more to do */
3007 ifnet_lock_done(ifp
);
3010 // Test if this delegate interface would cause a loop
3011 ifnet_t delegate_check_ifp
= delegated_ifp
;
3012 while (delegate_check_ifp
!= NULL
) {
3013 if (delegate_check_ifp
== ifp
) {
3014 printf("%s: delegating to %s would cause a loop\n",
3015 ifp
->if_xname
, delegated_ifp
->if_xname
);
3016 ifnet_lock_done(ifp
);
3019 delegate_check_ifp
= delegate_check_ifp
->if_delegated
.ifp
;
3021 bzero(&ifp
->if_delegated
, sizeof(ifp
->if_delegated
));
3022 if (delegated_ifp
!= NULL
&& ifp
!= delegated_ifp
) {
3023 uint32_t set_eflags
;
3025 ifp
->if_delegated
.ifp
= delegated_ifp
;
3026 ifnet_reference(delegated_ifp
);
3027 ifp
->if_delegated
.type
= delegated_ifp
->if_type
;
3028 ifp
->if_delegated
.family
= delegated_ifp
->if_family
;
3029 ifp
->if_delegated
.subfamily
= delegated_ifp
->if_subfamily
;
3030 ifp
->if_delegated
.expensive
=
3031 delegated_ifp
->if_eflags
& IFEF_EXPENSIVE
? 1 : 0;
3032 ifp
->if_delegated
.constrained
=
3033 delegated_ifp
->if_xflags
& IFXF_CONSTRAINED
? 1 : 0;
3036 * Propogate flags related to ECN from delegated interface
3038 if_clear_eflags(ifp
, IFEF_ECN_ENABLE
| IFEF_ECN_DISABLE
);
3039 set_eflags
= (delegated_ifp
->if_eflags
&
3040 (IFEF_ECN_ENABLE
| IFEF_ECN_DISABLE
));
3041 if_set_eflags(ifp
, set_eflags
);
3042 printf("%s: is now delegating %s (type 0x%x, family %u, "
3043 "sub-family %u)\n", ifp
->if_xname
, delegated_ifp
->if_xname
,
3044 delegated_ifp
->if_type
, delegated_ifp
->if_family
,
3045 delegated_ifp
->if_subfamily
);
3048 ifnet_lock_done(ifp
);
3050 if (odifp
!= NULL
) {
3051 if (odifp
!= delegated_ifp
) {
3052 printf("%s: is no longer delegating %s\n",
3053 ifp
->if_xname
, odifp
->if_xname
);
3055 ifnet_release(odifp
);
3058 /* Generate a kernel event */
3059 dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_IFDELEGATE_CHANGED
, NULL
, 0);
3062 /* Release the io ref count */
3063 ifnet_decr_iorefcnt(ifp
);
3069 ifnet_get_delegate(ifnet_t ifp
, ifnet_t
*pdelegated_ifp
)
3071 if (ifp
== NULL
|| pdelegated_ifp
== NULL
) {
3073 } else if (!ifnet_is_attached(ifp
, 1)) {
3077 ifnet_lock_shared(ifp
);
3078 if (ifp
->if_delegated
.ifp
!= NULL
) {
3079 ifnet_reference(ifp
->if_delegated
.ifp
);
3081 *pdelegated_ifp
= ifp
->if_delegated
.ifp
;
3082 ifnet_lock_done(ifp
);
3084 /* Release the io ref count */
3085 ifnet_decr_iorefcnt(ifp
);
3091 ifnet_get_keepalive_offload_frames(ifnet_t ifp
,
3092 struct ifnet_keepalive_offload_frame
*frames_array
,
3093 u_int32_t frames_array_count
, size_t frame_data_offset
,
3094 u_int32_t
*used_frames_count
)
3098 if (frames_array
== NULL
|| used_frames_count
== NULL
||
3099 frame_data_offset
>= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE
) {
3103 /* frame_data_offset should be 32-bit aligned */
3104 if (P2ROUNDUP(frame_data_offset
, sizeof(u_int32_t
)) !=
3105 frame_data_offset
) {
3109 *used_frames_count
= 0;
3110 if (frames_array_count
== 0) {
3114 /* Keep-alive offload not required for CLAT interface */
3115 if (IS_INTF_CLAT46(ifp
)) {
3119 for (i
= 0; i
< frames_array_count
; i
++) {
3120 struct ifnet_keepalive_offload_frame
*frame
= frames_array
+ i
;
3122 bzero(frame
, sizeof(struct ifnet_keepalive_offload_frame
));
3125 /* First collect IPsec related keep-alive frames */
3126 *used_frames_count
= key_fill_offload_frames_for_savs(ifp
,
3127 frames_array
, frames_array_count
, frame_data_offset
);
3129 /* If there is more room, collect other UDP keep-alive frames */
3130 if (*used_frames_count
< frames_array_count
) {
3131 udp_fill_keepalive_offload_frames(ifp
, frames_array
,
3132 frames_array_count
, frame_data_offset
,
3136 /* If there is more room, collect other TCP keep-alive frames */
3137 if (*used_frames_count
< frames_array_count
) {
3138 tcp_fill_keepalive_offload_frames(ifp
, frames_array
,
3139 frames_array_count
, frame_data_offset
,
3143 VERIFY(*used_frames_count
<= frames_array_count
);
3149 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp
,
3150 struct ifnet_keepalive_offload_frame
*frame
)
3154 if (ifp
== NULL
|| frame
== NULL
) {
3158 if (frame
->type
!= IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP
) {
3161 if (frame
->ether_type
!= IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4
&&
3162 frame
->ether_type
!= IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6
) {
3165 if (frame
->local_port
== 0 || frame
->remote_port
== 0) {
3169 error
= tcp_notify_kao_timeout(ifp
, frame
);
3175 ifnet_link_status_report(ifnet_t ifp
, const void *buffer
,
3178 struct if_link_status
*ifsr
;
3181 if (ifp
== NULL
|| buffer
== NULL
|| buffer_len
== 0) {
3185 ifnet_lock_shared(ifp
);
3188 * Make sure that the interface is attached but there is no need
3189 * to take a reference because this call is coming from the driver.
3191 if (!ifnet_is_attached(ifp
, 0)) {
3192 ifnet_lock_done(ifp
);
3196 lck_rw_lock_exclusive(&ifp
->if_link_status_lock
);
3199 * If this is the first status report then allocate memory
3202 if (ifp
->if_link_status
== NULL
) {
3203 MALLOC(ifp
->if_link_status
, struct if_link_status
*,
3204 sizeof(struct if_link_status
), M_TEMP
, M_ZERO
);
3205 if (ifp
->if_link_status
== NULL
) {
3211 ifsr
= __DECONST(struct if_link_status
*, buffer
);
3213 if (ifp
->if_type
== IFT_CELLULAR
) {
3214 struct if_cellular_status_v1
*if_cell_sr
, *new_cell_sr
;
3216 * Currently we have a single version -- if it does
3217 * not match, just return.
3219 if (ifsr
->ifsr_version
!=
3220 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION
) {
3225 if (ifsr
->ifsr_len
!= sizeof(*if_cell_sr
)) {
3231 &ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
3232 new_cell_sr
= &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
3233 /* Check if we need to act on any new notifications */
3234 if ((new_cell_sr
->valid_bitmask
&
3235 IF_CELL_UL_MSS_RECOMMENDED_VALID
) &&
3236 new_cell_sr
->mss_recommended
!=
3237 if_cell_sr
->mss_recommended
) {
3238 atomic_bitset_32(&tcbinfo
.ipi_flags
,
3239 INPCBINFO_UPDATE_MSS
);
3240 inpcb_timer_sched(&tcbinfo
, INPCB_TIMER_FAST
);
3242 necp_update_all_clients();
3246 /* Finally copy the new information */
3247 ifp
->if_link_status
->ifsr_version
= ifsr
->ifsr_version
;
3248 ifp
->if_link_status
->ifsr_len
= ifsr
->ifsr_len
;
3249 if_cell_sr
->valid_bitmask
= 0;
3250 bcopy(new_cell_sr
, if_cell_sr
, sizeof(*if_cell_sr
));
3251 } else if (IFNET_IS_WIFI(ifp
)) {
3252 struct if_wifi_status_v1
*if_wifi_sr
, *new_wifi_sr
;
3255 if (ifsr
->ifsr_version
!=
3256 IF_WIFI_STATUS_REPORT_CURRENT_VERSION
) {
3261 if (ifsr
->ifsr_len
!= sizeof(*if_wifi_sr
)) {
3267 &ifp
->if_link_status
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
3269 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
3270 ifp
->if_link_status
->ifsr_version
= ifsr
->ifsr_version
;
3271 ifp
->if_link_status
->ifsr_len
= ifsr
->ifsr_len
;
3272 if_wifi_sr
->valid_bitmask
= 0;
3273 bcopy(new_wifi_sr
, if_wifi_sr
, sizeof(*if_wifi_sr
));
3276 * Update the bandwidth values if we got recent values
3277 * reported through the other KPI.
3279 if (!(new_wifi_sr
->valid_bitmask
&
3280 IF_WIFI_UL_MAX_BANDWIDTH_VALID
) &&
3281 ifp
->if_output_bw
.max_bw
> 0) {
3282 if_wifi_sr
->valid_bitmask
|=
3283 IF_WIFI_UL_MAX_BANDWIDTH_VALID
;
3284 if_wifi_sr
->ul_max_bandwidth
=
3285 ifp
->if_output_bw
.max_bw
> UINT32_MAX
?
3287 (uint32_t)ifp
->if_output_bw
.max_bw
;
3289 if (!(new_wifi_sr
->valid_bitmask
&
3290 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) &&
3291 ifp
->if_output_bw
.eff_bw
> 0) {
3292 if_wifi_sr
->valid_bitmask
|=
3293 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
3294 if_wifi_sr
->ul_effective_bandwidth
=
3295 ifp
->if_output_bw
.eff_bw
> UINT32_MAX
?
3297 (uint32_t)ifp
->if_output_bw
.eff_bw
;
3299 if (!(new_wifi_sr
->valid_bitmask
&
3300 IF_WIFI_DL_MAX_BANDWIDTH_VALID
) &&
3301 ifp
->if_input_bw
.max_bw
> 0) {
3302 if_wifi_sr
->valid_bitmask
|=
3303 IF_WIFI_DL_MAX_BANDWIDTH_VALID
;
3304 if_wifi_sr
->dl_max_bandwidth
=
3305 ifp
->if_input_bw
.max_bw
> UINT32_MAX
?
3307 (uint32_t)ifp
->if_input_bw
.max_bw
;
3309 if (!(new_wifi_sr
->valid_bitmask
&
3310 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) &&
3311 ifp
->if_input_bw
.eff_bw
> 0) {
3312 if_wifi_sr
->valid_bitmask
|=
3313 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
3314 if_wifi_sr
->dl_effective_bandwidth
=
3315 ifp
->if_input_bw
.eff_bw
> UINT32_MAX
?
3317 (uint32_t)ifp
->if_input_bw
.eff_bw
;
3322 lck_rw_done(&ifp
->if_link_status_lock
);
3323 ifnet_lock_done(ifp
);
3327 /*************************************************************************/
3328 /* Fastlane QoS Ca */
3329 /*************************************************************************/
3332 ifnet_set_fastlane_capable(ifnet_t interface
, boolean_t capable
)
3334 if (interface
== NULL
) {
3338 if_set_qosmarking_mode(interface
,
3339 capable
? IFRTYPE_QOSMARKING_FASTLANE
: IFRTYPE_QOSMARKING_MODE_NONE
);
3345 ifnet_get_fastlane_capable(ifnet_t interface
, boolean_t
*capable
)
3347 if (interface
== NULL
|| capable
== NULL
) {
3350 if (interface
->if_qosmarking_mode
== IFRTYPE_QOSMARKING_FASTLANE
) {
3359 ifnet_get_unsent_bytes(ifnet_t interface
, int64_t *unsent_bytes
)
3363 if (interface
== NULL
|| unsent_bytes
== NULL
) {
3367 bytes
= *unsent_bytes
= 0;
3369 if (!IF_FULLY_ATTACHED(interface
)) {
3373 bytes
= interface
->if_sndbyte_unsent
;
3375 if (interface
->if_eflags
& IFEF_TXSTART
) {
3376 bytes
+= IFCQ_BYTES(&interface
->if_snd
);
3378 *unsent_bytes
= bytes
;
3384 ifnet_get_buffer_status(const ifnet_t ifp
, ifnet_buffer_status_t
*buf_status
)
3386 if (ifp
== NULL
|| buf_status
== NULL
) {
3390 bzero(buf_status
, sizeof(*buf_status
));
3392 if (!IF_FULLY_ATTACHED(ifp
)) {
3396 if (ifp
->if_eflags
& IFEF_TXSTART
) {
3397 buf_status
->buf_interface
= IFCQ_BYTES(&ifp
->if_snd
);
3400 buf_status
->buf_sndbuf
= ((buf_status
->buf_interface
!= 0) ||
3401 (ifp
->if_sndbyte_unsent
!= 0)) ? 1 : 0;
3407 ifnet_normalise_unsent_data(void)
3411 ifnet_head_lock_shared();
3412 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
3413 ifnet_lock_exclusive(ifp
);
3414 if (!IF_FULLY_ATTACHED(ifp
)) {
3415 ifnet_lock_done(ifp
);
3418 if (!(ifp
->if_eflags
& IFEF_TXSTART
)) {
3419 ifnet_lock_done(ifp
);
3423 if (ifp
->if_sndbyte_total
> 0 ||
3424 IFCQ_BYTES(&ifp
->if_snd
) > 0) {
3425 ifp
->if_unsent_data_cnt
++;
3428 ifnet_lock_done(ifp
);
3434 ifnet_set_low_power_mode(ifnet_t ifp
, boolean_t on
)
3438 error
= if_set_low_power(ifp
, on
);
3444 ifnet_get_low_power_mode(ifnet_t ifp
, boolean_t
*on
)
3446 if (ifp
== NULL
|| on
== NULL
) {
3450 *on
= ((ifp
->if_xflags
& IFXF_LOW_POWER
) != 0);
3454 /*************************************************************************/
3455 /* Interface advisory notifications */
3456 /*************************************************************************/
3458 ifnet_interface_advisory_report(ifnet_t ifp
,
3459 const struct ifnet_interface_advisory
*advisory
)
3463 #pragma unused(advisory)