2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include "kpi_interface.h"
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/net_api_stats.h>
52 #include <net/route.h>
53 #include <net/if_ports_used.h>
54 #include <libkern/libkern.h>
55 #include <libkern/OSAtomic.h>
56 #include <kern/locks.h>
57 #include <kern/clock.h>
58 #include <sys/sockio.h>
60 #include <sys/sysctl.h>
62 #include <netinet/ip_var.h>
63 #include <netinet/udp.h>
64 #include <netinet/udp_var.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_var.h>
67 #include <netinet/in_pcb.h>
69 #include <netinet/igmp_var.h>
72 #include <netinet6/mld6_var.h>
74 #include <netkey/key.h>
77 #include "net/net_str_id.h"
80 #include <sys/kauth.h>
81 #include <security/mac_framework.h>
86 errno_t
ifnet_allocate(const struct ifnet_init_params
*init
,
89 static errno_t
ifnet_allocate_common(const struct ifnet_init_params
*init
,
90 ifnet_t
*ifp
, bool is_internal
);
93 #define TOUCHLASTCHANGE(__if_lastchange) { \
94 (__if_lastchange)->tv_sec = net_uptime(); \
95 (__if_lastchange)->tv_usec = 0; \
98 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t
, int,
99 struct ifnet_llreach_info
*);
100 static void ifnet_kpi_free(ifnet_t
);
101 static errno_t
ifnet_list_get_common(ifnet_family_t
, boolean_t
, ifnet_t
**,
103 static errno_t
ifnet_set_lladdr_internal(ifnet_t
, const void *, size_t,
105 static errno_t
ifnet_awdl_check_eflags(ifnet_t
, u_int32_t
*, u_int32_t
*);
108 * Temporary work around until we have real reference counting
110 * We keep the bits about calling dlil_if_release (which should be
111 * called recycle) transparent by calling it from our if_free function
112 * pointer. We have to keep the client's original detach function
113 * somewhere so we can call it.
116 ifnet_kpi_free(ifnet_t ifp
)
118 ifnet_detached_func detach_func
= ifp
->if_kpi_storage
;
120 if (detach_func
!= NULL
) {
124 if (ifp
->if_broadcast
.length
> sizeof(ifp
->if_broadcast
.u
.buffer
)) {
125 FREE(ifp
->if_broadcast
.u
.ptr
, M_IFADDR
);
126 ifp
->if_broadcast
.u
.ptr
= NULL
;
129 dlil_if_release(ifp
);
133 ifnet_allocate_common(const struct ifnet_init_params
*init
,
134 ifnet_t
*ifp
, bool is_internal
)
136 struct ifnet_init_eparams einit
;
138 bzero(&einit
, sizeof(einit
));
140 einit
.ver
= IFNET_INIT_CURRENT_VERSION
;
141 einit
.len
= sizeof(einit
);
142 einit
.flags
= IFNET_INIT_LEGACY
| IFNET_INIT_NX_NOAUTO
;
144 einit
.flags
|= IFNET_INIT_ALLOC_KPI
;
146 einit
.uniqueid
= init
->uniqueid
;
147 einit
.uniqueid_len
= init
->uniqueid_len
;
148 einit
.name
= init
->name
;
149 einit
.unit
= init
->unit
;
150 einit
.family
= init
->family
;
151 einit
.type
= init
->type
;
152 einit
.output
= init
->output
;
153 einit
.demux
= init
->demux
;
154 einit
.add_proto
= init
->add_proto
;
155 einit
.del_proto
= init
->del_proto
;
156 einit
.check_multi
= init
->check_multi
;
157 einit
.framer
= init
->framer
;
158 einit
.softc
= init
->softc
;
159 einit
.ioctl
= init
->ioctl
;
160 einit
.set_bpf_tap
= init
->set_bpf_tap
;
161 einit
.detach
= init
->detach
;
162 einit
.event
= init
->event
;
163 einit
.broadcast_addr
= init
->broadcast_addr
;
164 einit
.broadcast_len
= init
->broadcast_len
;
166 return ifnet_allocate_extended(&einit
, ifp
);
170 ifnet_allocate_internal(const struct ifnet_init_params
*init
, ifnet_t
*ifp
)
172 return ifnet_allocate_common(init
, ifp
, true);
176 ifnet_allocate(const struct ifnet_init_params
*init
, ifnet_t
*ifp
)
178 return ifnet_allocate_common(init
, ifp
, false);
182 ifnet_allocate_extended(const struct ifnet_init_eparams
*einit0
,
185 struct ifnet_init_eparams einit
;
186 struct ifnet
*ifp
= NULL
;
187 char if_xname
[IFXNAMSIZ
] = {0};
192 if (einit
.ver
!= IFNET_INIT_CURRENT_VERSION
||
193 einit
.len
< sizeof(einit
)) {
197 if (einit
.family
== 0 || einit
.name
== NULL
||
198 strlen(einit
.name
) >= IFNAMSIZ
||
199 (einit
.type
& 0xFFFFFF00) != 0 || einit
.type
== 0) {
204 if (einit
.flags
& IFNET_INIT_LEGACY
) {
205 if (einit
.output
== NULL
||
206 (einit
.flags
& IFNET_INIT_INPUT_POLL
)) {
209 einit
.pre_enqueue
= NULL
;
211 einit
.output_ctl
= NULL
;
212 einit
.output_sched_model
= IFNET_SCHED_MODEL_NORMAL
;
213 einit
.input_poll
= NULL
;
214 einit
.input_ctl
= NULL
;
216 if (einit
.start
== NULL
) {
221 if (einit
.output_sched_model
>= IFNET_SCHED_MODEL_MAX
) {
225 if (einit
.flags
& IFNET_INIT_INPUT_POLL
) {
226 if (einit
.input_poll
== NULL
|| einit
.input_ctl
== NULL
) {
230 einit
.input_poll
= NULL
;
231 einit
.input_ctl
= NULL
;
235 /* Initialize external name (name + unit) */
236 (void) snprintf(if_xname
, sizeof(if_xname
), "%s%d",
237 einit
.name
, einit
.unit
);
239 if (einit
.uniqueid
== NULL
) {
240 einit
.uniqueid
= if_xname
;
241 einit
.uniqueid_len
= strlen(if_xname
);
244 error
= dlil_if_acquire(einit
.family
, einit
.uniqueid
,
245 einit
.uniqueid_len
, if_xname
, &ifp
);
251 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
252 * to point to storage of at least IFNAMSIZ bytes. It is safe
255 strlcpy(__DECONST(char *, ifp
->if_name
), einit
.name
, IFNAMSIZ
);
256 ifp
->if_type
= einit
.type
;
257 ifp
->if_family
= einit
.family
;
258 ifp
->if_subfamily
= einit
.subfamily
;
259 ifp
->if_unit
= einit
.unit
;
260 ifp
->if_output
= einit
.output
;
261 ifp
->if_pre_enqueue
= einit
.pre_enqueue
;
262 ifp
->if_start
= einit
.start
;
263 ifp
->if_output_ctl
= einit
.output_ctl
;
264 ifp
->if_output_sched_model
= einit
.output_sched_model
;
265 ifp
->if_output_bw
.eff_bw
= einit
.output_bw
;
266 ifp
->if_output_bw
.max_bw
= einit
.output_bw_max
;
267 ifp
->if_output_lt
.eff_lt
= einit
.output_lt
;
268 ifp
->if_output_lt
.max_lt
= einit
.output_lt_max
;
269 ifp
->if_input_poll
= einit
.input_poll
;
270 ifp
->if_input_ctl
= einit
.input_ctl
;
271 ifp
->if_input_bw
.eff_bw
= einit
.input_bw
;
272 ifp
->if_input_bw
.max_bw
= einit
.input_bw_max
;
273 ifp
->if_input_lt
.eff_lt
= einit
.input_lt
;
274 ifp
->if_input_lt
.max_lt
= einit
.input_lt_max
;
275 ifp
->if_demux
= einit
.demux
;
276 ifp
->if_add_proto
= einit
.add_proto
;
277 ifp
->if_del_proto
= einit
.del_proto
;
278 ifp
->if_check_multi
= einit
.check_multi
;
279 ifp
->if_framer_legacy
= einit
.framer
;
280 ifp
->if_framer
= einit
.framer_extended
;
281 ifp
->if_softc
= einit
.softc
;
282 ifp
->if_ioctl
= einit
.ioctl
;
283 ifp
->if_set_bpf_tap
= einit
.set_bpf_tap
;
284 ifp
->if_free
= ifnet_kpi_free
;
285 ifp
->if_event
= einit
.event
;
286 ifp
->if_kpi_storage
= einit
.detach
;
288 /* Initialize external name (name + unit) */
289 snprintf(__DECONST(char *, ifp
->if_xname
), IFXNAMSIZ
,
293 * On embedded, framer() is already in the extended form;
294 * we simply use it as is, unless the caller specifies
295 * framer_extended() which will then override it.
297 * On non-embedded, framer() has long been exposed as part
298 * of the public KPI, and therefore its signature must
299 * remain the same (without the pre- and postpend length
300 * parameters.) We special case ether_frameout, such that
301 * it gets mapped to its extended variant. All other cases
302 * utilize the stub routine which will simply return zeroes
303 * for those new parameters.
305 * Internally, DLIL will only use the extended callback
306 * variant which is represented by if_framer.
309 if (ifp
->if_framer
== NULL
&& ifp
->if_framer_legacy
!= NULL
) {
310 ifp
->if_framer
= ifp
->if_framer_legacy
;
312 #else /* !CONFIG_EMBEDDED */
313 if (ifp
->if_framer
== NULL
&& ifp
->if_framer_legacy
!= NULL
) {
314 if (ifp
->if_framer_legacy
== ether_frameout
) {
315 ifp
->if_framer
= ether_frameout_extended
;
317 ifp
->if_framer
= ifnet_framer_stub
;
320 #endif /* !CONFIG_EMBEDDED */
322 if (ifp
->if_output_bw
.eff_bw
> ifp
->if_output_bw
.max_bw
) {
323 ifp
->if_output_bw
.max_bw
= ifp
->if_output_bw
.eff_bw
;
324 } else if (ifp
->if_output_bw
.eff_bw
== 0) {
325 ifp
->if_output_bw
.eff_bw
= ifp
->if_output_bw
.max_bw
;
328 if (ifp
->if_input_bw
.eff_bw
> ifp
->if_input_bw
.max_bw
) {
329 ifp
->if_input_bw
.max_bw
= ifp
->if_input_bw
.eff_bw
;
330 } else if (ifp
->if_input_bw
.eff_bw
== 0) {
331 ifp
->if_input_bw
.eff_bw
= ifp
->if_input_bw
.max_bw
;
334 if (ifp
->if_output_bw
.max_bw
== 0) {
335 ifp
->if_output_bw
= ifp
->if_input_bw
;
336 } else if (ifp
->if_input_bw
.max_bw
== 0) {
337 ifp
->if_input_bw
= ifp
->if_output_bw
;
340 /* Pin if_baudrate to 32 bits */
341 br
= MAX(ifp
->if_output_bw
.max_bw
, ifp
->if_input_bw
.max_bw
);
343 ifp
->if_baudrate
= (br
> 0xFFFFFFFF) ? 0xFFFFFFFF : br
;
346 if (ifp
->if_output_lt
.eff_lt
> ifp
->if_output_lt
.max_lt
) {
347 ifp
->if_output_lt
.max_lt
= ifp
->if_output_lt
.eff_lt
;
348 } else if (ifp
->if_output_lt
.eff_lt
== 0) {
349 ifp
->if_output_lt
.eff_lt
= ifp
->if_output_lt
.max_lt
;
352 if (ifp
->if_input_lt
.eff_lt
> ifp
->if_input_lt
.max_lt
) {
353 ifp
->if_input_lt
.max_lt
= ifp
->if_input_lt
.eff_lt
;
354 } else if (ifp
->if_input_lt
.eff_lt
== 0) {
355 ifp
->if_input_lt
.eff_lt
= ifp
->if_input_lt
.max_lt
;
358 if (ifp
->if_output_lt
.max_lt
== 0) {
359 ifp
->if_output_lt
= ifp
->if_input_lt
;
360 } else if (ifp
->if_input_lt
.max_lt
== 0) {
361 ifp
->if_input_lt
= ifp
->if_output_lt
;
364 if (ifp
->if_ioctl
== NULL
) {
365 ifp
->if_ioctl
= ifp_if_ioctl
;
369 if (ifp
->if_start
!= NULL
) {
370 ifp
->if_eflags
|= IFEF_TXSTART
;
371 if (ifp
->if_pre_enqueue
== NULL
) {
372 ifp
->if_pre_enqueue
= ifnet_enqueue
;
374 ifp
->if_output
= ifp
->if_pre_enqueue
;
376 ifp
->if_eflags
&= ~IFEF_TXSTART
;
379 if (ifp
->if_input_poll
!= NULL
) {
380 ifp
->if_eflags
|= IFEF_RXPOLL
;
382 ifp
->if_eflags
&= ~IFEF_RXPOLL
;
385 ifp
->if_output_dlil
= dlil_output_handler
;
386 ifp
->if_input_dlil
= dlil_input_handler
;
388 VERIFY(!(einit
.flags
& IFNET_INIT_LEGACY
) ||
389 (ifp
->if_pre_enqueue
== NULL
&& ifp
->if_start
== NULL
&&
390 ifp
->if_output_ctl
== NULL
&& ifp
->if_input_poll
== NULL
&&
391 ifp
->if_input_ctl
== NULL
));
392 VERIFY(!(einit
.flags
& IFNET_INIT_INPUT_POLL
) ||
393 (ifp
->if_input_poll
!= NULL
&& ifp
->if_input_ctl
!= NULL
));
395 if (einit
.broadcast_len
&& einit
.broadcast_addr
) {
396 if (einit
.broadcast_len
>
397 sizeof(ifp
->if_broadcast
.u
.buffer
)) {
398 MALLOC(ifp
->if_broadcast
.u
.ptr
, u_char
*,
399 einit
.broadcast_len
, M_IFADDR
, M_NOWAIT
);
400 if (ifp
->if_broadcast
.u
.ptr
== NULL
) {
403 bcopy(einit
.broadcast_addr
,
404 ifp
->if_broadcast
.u
.ptr
,
405 einit
.broadcast_len
);
408 bcopy(einit
.broadcast_addr
,
409 ifp
->if_broadcast
.u
.buffer
,
410 einit
.broadcast_len
);
412 ifp
->if_broadcast
.length
= einit
.broadcast_len
;
414 bzero(&ifp
->if_broadcast
, sizeof(ifp
->if_broadcast
));
418 /* legacy interface */
419 ifp
->if_xflags
|= IFXF_LEGACY
;
422 * output target queue delay is specified in millisecond
423 * convert it to nanoseconds
425 IFCQ_TARGET_QDELAY(&ifp
->if_snd
) =
426 einit
.output_target_qdelay
* 1000 * 1000;
427 IFCQ_MAXLEN(&ifp
->if_snd
) = einit
.sndq_maxlen
;
429 ifnet_enqueue_multi_setup(ifp
, einit
.start_delay_qlen
,
430 einit
.start_delay_timeout
);
432 IFCQ_PKT_DROP_LIMIT(&ifp
->if_snd
) = IFCQ_DEFAULT_PKT_DROP_LIMIT
;
435 * Set embryonic flag; this will be cleared
436 * later when it is fully attached.
438 ifp
->if_refflags
= IFRF_EMBRYONIC
;
441 * Count the newly allocated ifnet
443 OSIncrementAtomic64(&net_api_stats
.nas_ifnet_alloc_count
);
444 INC_ATOMIC_INT64_LIM(net_api_stats
.nas_ifnet_alloc_total
);
445 if (einit
.flags
& IFNET_INIT_ALLOC_KPI
) {
446 ifp
->if_xflags
|= IFXF_ALLOC_KPI
;
449 &net_api_stats
.nas_ifnet_alloc_os_count
);
450 INC_ATOMIC_INT64_LIM(
451 net_api_stats
.nas_ifnet_alloc_os_total
);
456 // temporary - this should be done in dlil_if_acquire
457 ifnet_reference(ifp
);
459 dlil_if_release(ifp
);
467 ifnet_reference(ifnet_t ifp
)
469 return dlil_if_ref(ifp
);
473 ifnet_release(ifnet_t ifp
)
475 return dlil_if_free(ifp
);
479 ifnet_interface_family_find(const char *module_string
,
480 ifnet_family_t
*family_id
)
482 if (module_string
== NULL
|| family_id
== NULL
) {
486 return net_str_id_find_internal(module_string
, family_id
,
491 ifnet_softc(ifnet_t interface
)
493 return (interface
== NULL
) ? NULL
: interface
->if_softc
;
497 ifnet_name(ifnet_t interface
)
499 return (interface
== NULL
) ? NULL
: interface
->if_name
;
503 ifnet_family(ifnet_t interface
)
505 return (interface
== NULL
) ? 0 : interface
->if_family
;
509 ifnet_subfamily(ifnet_t interface
)
511 return (interface
== NULL
) ? 0 : interface
->if_subfamily
;
515 ifnet_unit(ifnet_t interface
)
517 return (interface
== NULL
) ? (u_int32_t
)0xffffffff :
518 (u_int32_t
)interface
->if_unit
;
522 ifnet_index(ifnet_t interface
)
524 return (interface
== NULL
) ? (u_int32_t
)0xffffffff :
529 ifnet_set_flags(ifnet_t interface
, u_int16_t new_flags
, u_int16_t mask
)
533 if (interface
== NULL
) {
537 ifnet_lock_exclusive(interface
);
539 /* If we are modifying the up/down state, call if_updown */
540 if ((mask
& IFF_UP
) != 0) {
541 if_updown(interface
, (new_flags
& IFF_UP
) == IFF_UP
);
544 old_flags
= interface
->if_flags
;
545 interface
->if_flags
= (new_flags
& mask
) | (interface
->if_flags
& ~mask
);
546 /* If we are modifying the multicast flag, set/unset the silent flag */
547 if ((old_flags
& IFF_MULTICAST
) !=
548 (interface
->if_flags
& IFF_MULTICAST
)) {
550 if (IGMP_IFINFO(interface
) != NULL
) {
551 igmp_initsilent(interface
, IGMP_IFINFO(interface
));
555 if (MLD_IFINFO(interface
) != NULL
) {
556 mld6_initsilent(interface
, MLD_IFINFO(interface
));
561 ifnet_lock_done(interface
);
567 ifnet_flags(ifnet_t interface
)
569 return (interface
== NULL
) ? 0 : interface
->if_flags
;
573 * This routine ensures the following:
575 * If IFEF_AWDL is set by the caller, also set the rest of flags as
576 * defined in IFEF_AWDL_MASK.
578 * If IFEF_AWDL has been set on the interface and the caller attempts
579 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
582 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
585 * All other flags not associated with AWDL are not affected.
587 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
590 ifnet_awdl_check_eflags(ifnet_t ifp
, u_int32_t
*new_eflags
, u_int32_t
*mask
)
594 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_EXCLUSIVE
);
596 eflags
= (*new_eflags
& *mask
) | (ifp
->if_eflags
& ~(*mask
));
598 if (ifp
->if_eflags
& IFEF_AWDL
) {
599 if (eflags
& IFEF_AWDL
) {
600 if ((eflags
& IFEF_AWDL_MASK
) != IFEF_AWDL_MASK
) {
604 *new_eflags
&= ~IFEF_AWDL_MASK
;
605 *mask
|= IFEF_AWDL_MASK
;
607 } else if (eflags
& IFEF_AWDL
) {
608 *new_eflags
|= IFEF_AWDL_MASK
;
609 *mask
|= IFEF_AWDL_MASK
;
610 } else if (eflags
& IFEF_AWDL_RESTRICTED
&&
611 !(ifp
->if_eflags
& IFEF_AWDL
)) {
619 ifnet_set_eflags(ifnet_t interface
, u_int32_t new_flags
, u_int32_t mask
)
622 struct kev_msg ev_msg
;
623 struct net_event_data ev_data
;
625 if (interface
== NULL
) {
629 bzero(&ev_msg
, sizeof(ev_msg
));
630 ifnet_lock_exclusive(interface
);
632 * Sanity checks for IFEF_AWDL and its related flags.
634 if (ifnet_awdl_check_eflags(interface
, &new_flags
, &mask
) != 0) {
635 ifnet_lock_done(interface
);
639 * Currently Interface advisory reporting is supported only for
642 if ((((new_flags
& mask
) & IFEF_ADV_REPORT
) != 0) &&
643 ((interface
->if_eflags
& IFEF_SKYWALK_NATIVE
) == 0)) {
646 oeflags
= interface
->if_eflags
;
647 interface
->if_eflags
=
648 (new_flags
& mask
) | (interface
->if_eflags
& ~mask
);
649 ifnet_lock_done(interface
);
650 if (interface
->if_eflags
& IFEF_AWDL_RESTRICTED
&&
651 !(oeflags
& IFEF_AWDL_RESTRICTED
)) {
652 ev_msg
.event_code
= KEV_DL_AWDL_RESTRICTED
;
654 * The interface is now restricted to applications that have
656 * The check for the entitlement will be done in the data
657 * path, so we don't have to do anything here.
659 } else if (oeflags
& IFEF_AWDL_RESTRICTED
&&
660 !(interface
->if_eflags
& IFEF_AWDL_RESTRICTED
)) {
661 ev_msg
.event_code
= KEV_DL_AWDL_UNRESTRICTED
;
664 * Notify configd so that it has a chance to perform better
665 * reachability detection.
667 if (ev_msg
.event_code
) {
668 bzero(&ev_data
, sizeof(ev_data
));
669 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
670 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
671 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
672 strlcpy(ev_data
.if_name
, interface
->if_name
, IFNAMSIZ
);
673 ev_data
.if_family
= interface
->if_family
;
674 ev_data
.if_unit
= interface
->if_unit
;
675 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
676 ev_msg
.dv
[0].data_ptr
= &ev_data
;
677 ev_msg
.dv
[1].data_length
= 0;
678 dlil_post_complete_msg(interface
, &ev_msg
);
685 ifnet_eflags(ifnet_t interface
)
687 return (interface
== NULL
) ? 0 : interface
->if_eflags
;
691 ifnet_set_idle_flags_locked(ifnet_t ifp
, u_int32_t new_flags
, u_int32_t mask
)
699 LCK_MTX_ASSERT(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
700 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_EXCLUSIVE
);
703 * If this is called prior to ifnet attach, the actual work will
704 * be done at attach time. Otherwise, if it is called after
705 * ifnet detach, then it is a no-op.
707 if (!ifnet_is_attached(ifp
, 0)) {
708 ifp
->if_idle_new_flags
= new_flags
;
709 ifp
->if_idle_new_flags_mask
= mask
;
712 ifp
->if_idle_new_flags
= ifp
->if_idle_new_flags_mask
= 0;
715 before
= ifp
->if_idle_flags
;
716 ifp
->if_idle_flags
= (new_flags
& mask
) | (ifp
->if_idle_flags
& ~mask
);
717 after
= ifp
->if_idle_flags
;
719 if ((after
- before
) < 0 && ifp
->if_idle_flags
== 0 &&
720 ifp
->if_want_aggressive_drain
!= 0) {
721 ifp
->if_want_aggressive_drain
= 0;
722 } else if ((after
- before
) > 0 && ifp
->if_want_aggressive_drain
== 0) {
723 ifp
->if_want_aggressive_drain
++;
730 ifnet_set_idle_flags(ifnet_t ifp
, u_int32_t new_flags
, u_int32_t mask
)
734 lck_mtx_lock(rnh_lock
);
735 ifnet_lock_exclusive(ifp
);
736 err
= ifnet_set_idle_flags_locked(ifp
, new_flags
, mask
);
737 ifnet_lock_done(ifp
);
738 lck_mtx_unlock(rnh_lock
);
744 ifnet_idle_flags(ifnet_t ifp
)
746 return (ifp
== NULL
) ? 0 : ifp
->if_idle_flags
;
750 ifnet_set_link_quality(ifnet_t ifp
, int quality
)
754 if (ifp
== NULL
|| quality
< IFNET_LQM_MIN
|| quality
> IFNET_LQM_MAX
) {
759 if (!ifnet_is_attached(ifp
, 0)) {
764 if_lqm_update(ifp
, quality
, 0);
771 ifnet_link_quality(ifnet_t ifp
)
776 return IFNET_LQM_THRESH_OFF
;
779 ifnet_lock_shared(ifp
);
780 lqm
= ifp
->if_interface_state
.lqm_state
;
781 ifnet_lock_done(ifp
);
787 ifnet_set_interface_state(ifnet_t ifp
,
788 struct if_interface_state
*if_interface_state
)
792 if (ifp
== NULL
|| if_interface_state
== NULL
) {
797 if (!ifnet_is_attached(ifp
, 0)) {
802 if_state_update(ifp
, if_interface_state
);
809 ifnet_get_interface_state(ifnet_t ifp
,
810 struct if_interface_state
*if_interface_state
)
814 if (ifp
== NULL
|| if_interface_state
== NULL
) {
819 if (!ifnet_is_attached(ifp
, 0)) {
824 if_get_state(ifp
, if_interface_state
);
832 ifnet_defrouter_llreachinfo(ifnet_t ifp
, int af
,
833 struct ifnet_llreach_info
*iflri
)
835 if (ifp
== NULL
|| iflri
== NULL
) {
839 VERIFY(af
== AF_INET
|| af
== AF_INET6
);
841 return ifnet_llreach_get_defrouter(ifp
, af
, iflri
);
845 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp
, struct ifnet_llreach_info
*iflri
)
847 return ifnet_defrouter_llreachinfo(ifp
, AF_INET
, iflri
);
851 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp
, struct ifnet_llreach_info
*iflri
)
853 return ifnet_defrouter_llreachinfo(ifp
, AF_INET6
, iflri
);
857 ifnet_set_capabilities_supported(ifnet_t ifp
, u_int32_t new_caps
,
867 ifnet_lock_exclusive(ifp
);
868 tmp
= (new_caps
& mask
) | (ifp
->if_capabilities
& ~mask
);
869 if ((tmp
& ~IFCAP_VALID
)) {
872 ifp
->if_capabilities
= tmp
;
874 ifnet_lock_done(ifp
);
880 ifnet_capabilities_supported(ifnet_t ifp
)
882 return (ifp
== NULL
) ? 0 : ifp
->if_capabilities
;
887 ifnet_set_capabilities_enabled(ifnet_t ifp
, u_int32_t new_caps
,
892 struct kev_msg ev_msg
;
893 struct net_event_data ev_data
;
899 ifnet_lock_exclusive(ifp
);
900 tmp
= (new_caps
& mask
) | (ifp
->if_capenable
& ~mask
);
901 if ((tmp
& ~IFCAP_VALID
) || (tmp
& ~ifp
->if_capabilities
)) {
904 ifp
->if_capenable
= tmp
;
906 ifnet_lock_done(ifp
);
908 /* Notify application of the change */
909 bzero(&ev_data
, sizeof(struct net_event_data
));
910 bzero(&ev_msg
, sizeof(struct kev_msg
));
911 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
912 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
913 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
915 ev_msg
.event_code
= KEV_DL_IFCAP_CHANGED
;
916 strlcpy(&ev_data
.if_name
[0], ifp
->if_name
, IFNAMSIZ
);
917 ev_data
.if_family
= ifp
->if_family
;
918 ev_data
.if_unit
= (u_int32_t
)ifp
->if_unit
;
919 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
920 ev_msg
.dv
[0].data_ptr
= &ev_data
;
921 ev_msg
.dv
[1].data_length
= 0;
922 dlil_post_complete_msg(ifp
, &ev_msg
);
928 ifnet_capabilities_enabled(ifnet_t ifp
)
930 return (ifp
== NULL
) ? 0 : ifp
->if_capenable
;
933 static const ifnet_offload_t offload_mask
=
934 (IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
| IFNET_CSUM_FRAGMENT
|
935 IFNET_IP_FRAGMENT
| IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
|
936 IFNET_IPV6_FRAGMENT
| IFNET_CSUM_PARTIAL
| IFNET_CSUM_ZERO_INVERT
|
937 IFNET_VLAN_TAGGING
| IFNET_VLAN_MTU
| IFNET_MULTIPAGES
|
938 IFNET_TSO_IPV4
| IFNET_TSO_IPV6
| IFNET_TX_STATUS
| IFNET_HW_TIMESTAMP
|
941 static const ifnet_offload_t any_offload_csum
= IFNET_CHECKSUMF
;
944 ifnet_set_offload(ifnet_t interface
, ifnet_offload_t offload
)
946 u_int32_t ifcaps
= 0;
948 if (interface
== NULL
) {
952 ifnet_lock_exclusive(interface
);
953 interface
->if_hwassist
= (offload
& offload_mask
);
956 * Hardware capable of partial checksum offload is
957 * flexible enough to handle any transports utilizing
958 * Internet Checksumming. Include those transports
959 * here, and leave the final decision to IP.
961 if (interface
->if_hwassist
& IFNET_CSUM_PARTIAL
) {
962 interface
->if_hwassist
|= (IFNET_CSUM_TCP
| IFNET_CSUM_UDP
|
963 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
);
966 log(LOG_DEBUG
, "%s: set offload flags=%b\n",
968 interface
->if_hwassist
, IFNET_OFFLOADF_BITS
);
970 ifnet_lock_done(interface
);
972 if ((offload
& any_offload_csum
)) {
973 ifcaps
|= IFCAP_HWCSUM
;
975 if ((offload
& IFNET_TSO_IPV4
)) {
976 ifcaps
|= IFCAP_TSO4
;
978 if ((offload
& IFNET_TSO_IPV6
)) {
979 ifcaps
|= IFCAP_TSO6
;
981 if ((offload
& IFNET_VLAN_MTU
)) {
982 ifcaps
|= IFCAP_VLAN_MTU
;
984 if ((offload
& IFNET_VLAN_TAGGING
)) {
985 ifcaps
|= IFCAP_VLAN_HWTAGGING
;
987 if ((offload
& IFNET_TX_STATUS
)) {
988 ifcaps
|= IFCAP_TXSTATUS
;
990 if ((offload
& IFNET_HW_TIMESTAMP
)) {
991 ifcaps
|= IFCAP_HW_TIMESTAMP
;
993 if ((offload
& IFNET_SW_TIMESTAMP
)) {
994 ifcaps
|= IFCAP_SW_TIMESTAMP
;
996 if ((offload
& IFNET_CSUM_PARTIAL
)) {
997 ifcaps
|= IFCAP_CSUM_PARTIAL
;
999 if ((offload
& IFNET_CSUM_ZERO_INVERT
)) {
1000 ifcaps
|= IFCAP_CSUM_ZERO_INVERT
;
1003 (void) ifnet_set_capabilities_supported(interface
, ifcaps
,
1005 (void) ifnet_set_capabilities_enabled(interface
, ifcaps
,
1013 ifnet_offload(ifnet_t interface
)
1015 return (interface
== NULL
) ?
1016 0 : (interface
->if_hwassist
& offload_mask
);
1020 ifnet_set_tso_mtu(ifnet_t interface
, sa_family_t family
, u_int32_t mtuLen
)
1024 if (interface
== NULL
|| mtuLen
< interface
->if_mtu
) {
1030 if (interface
->if_hwassist
& IFNET_TSO_IPV4
) {
1031 interface
->if_tso_v4_mtu
= mtuLen
;
1038 if (interface
->if_hwassist
& IFNET_TSO_IPV6
) {
1039 interface
->if_tso_v6_mtu
= mtuLen
;
1046 error
= EPROTONOSUPPORT
;
1054 ifnet_get_tso_mtu(ifnet_t interface
, sa_family_t family
, u_int32_t
*mtuLen
)
1058 if (interface
== NULL
|| mtuLen
== NULL
) {
1064 if (interface
->if_hwassist
& IFNET_TSO_IPV4
) {
1065 *mtuLen
= interface
->if_tso_v4_mtu
;
1072 if (interface
->if_hwassist
& IFNET_TSO_IPV6
) {
1073 *mtuLen
= interface
->if_tso_v6_mtu
;
1080 error
= EPROTONOSUPPORT
;
1088 ifnet_set_wake_flags(ifnet_t interface
, u_int32_t properties
, u_int32_t mask
)
1090 struct kev_msg ev_msg
;
1091 struct net_event_data ev_data
;
1093 bzero(&ev_data
, sizeof(struct net_event_data
));
1094 bzero(&ev_msg
, sizeof(struct kev_msg
));
1096 if (interface
== NULL
) {
1100 /* Do not accept wacky values */
1101 if ((properties
& mask
) & ~IF_WAKE_VALID_FLAGS
) {
1105 ifnet_lock_exclusive(interface
);
1107 if (mask
& IF_WAKE_ON_MAGIC_PACKET
) {
1108 if (properties
& IF_WAKE_ON_MAGIC_PACKET
) {
1109 interface
->if_xflags
|= IFXF_WAKE_ON_MAGIC_PACKET
;
1111 interface
->if_xflags
&= ~IFXF_WAKE_ON_MAGIC_PACKET
;
1115 ifnet_lock_done(interface
);
1117 (void) ifnet_touch_lastchange(interface
);
1119 /* Notify application of the change */
1120 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
1121 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
1122 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
1124 ev_msg
.event_code
= KEV_DL_WAKEFLAGS_CHANGED
;
1125 strlcpy(&ev_data
.if_name
[0], interface
->if_name
, IFNAMSIZ
);
1126 ev_data
.if_family
= interface
->if_family
;
1127 ev_data
.if_unit
= (u_int32_t
)interface
->if_unit
;
1128 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
1129 ev_msg
.dv
[0].data_ptr
= &ev_data
;
1130 ev_msg
.dv
[1].data_length
= 0;
1131 dlil_post_complete_msg(interface
, &ev_msg
);
1137 ifnet_get_wake_flags(ifnet_t interface
)
1139 u_int32_t flags
= 0;
1141 if (interface
== NULL
) {
1145 if (interface
->if_xflags
& IFXF_WAKE_ON_MAGIC_PACKET
) {
1146 flags
|= IF_WAKE_ON_MAGIC_PACKET
;
1153 * Should MIB data store a copy?
1156 ifnet_set_link_mib_data(ifnet_t interface
, void *mibData
, u_int32_t mibLen
)
1158 if (interface
== NULL
) {
1162 ifnet_lock_exclusive(interface
);
1163 interface
->if_linkmib
= (void*)mibData
;
1164 interface
->if_linkmiblen
= mibLen
;
1165 ifnet_lock_done(interface
);
1170 ifnet_get_link_mib_data(ifnet_t interface
, void *mibData
, u_int32_t
*mibLen
)
1174 if (interface
== NULL
) {
1178 ifnet_lock_shared(interface
);
1179 if (*mibLen
< interface
->if_linkmiblen
) {
1182 if (result
== 0 && interface
->if_linkmib
== NULL
) {
1187 *mibLen
= interface
->if_linkmiblen
;
1188 bcopy(interface
->if_linkmib
, mibData
, *mibLen
);
1190 ifnet_lock_done(interface
);
1196 ifnet_get_link_mib_data_length(ifnet_t interface
)
1198 return (interface
== NULL
) ? 0 : interface
->if_linkmiblen
;
1202 ifnet_output(ifnet_t interface
, protocol_family_t protocol_family
,
1203 mbuf_t m
, void *route
, const struct sockaddr
*dest
)
1205 if (interface
== NULL
|| protocol_family
== 0 || m
== NULL
) {
1211 return dlil_output(interface
, protocol_family
, m
, route
, dest
, 0, NULL
);
1215 ifnet_output_raw(ifnet_t interface
, protocol_family_t protocol_family
, mbuf_t m
)
1217 if (interface
== NULL
|| m
== NULL
) {
1223 return dlil_output(interface
, protocol_family
, m
, NULL
, NULL
, 1, NULL
);
1227 ifnet_set_mtu(ifnet_t interface
, u_int32_t mtu
)
1229 if (interface
== NULL
) {
1233 interface
->if_mtu
= mtu
;
1238 ifnet_mtu(ifnet_t interface
)
1240 return (interface
== NULL
) ? 0 : interface
->if_mtu
;
1244 ifnet_type(ifnet_t interface
)
1246 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_type
;
1250 ifnet_set_addrlen(ifnet_t interface
, u_char addrlen
)
1252 if (interface
== NULL
) {
1256 interface
->if_data
.ifi_addrlen
= addrlen
;
1261 ifnet_addrlen(ifnet_t interface
)
1263 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_addrlen
;
1267 ifnet_set_hdrlen(ifnet_t interface
, u_char hdrlen
)
1269 if (interface
== NULL
) {
1273 interface
->if_data
.ifi_hdrlen
= hdrlen
;
1278 ifnet_hdrlen(ifnet_t interface
)
1280 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_hdrlen
;
1284 ifnet_set_metric(ifnet_t interface
, u_int32_t metric
)
1286 if (interface
== NULL
) {
1290 interface
->if_data
.ifi_metric
= metric
;
1295 ifnet_metric(ifnet_t interface
)
1297 return (interface
== NULL
) ? 0 : interface
->if_data
.ifi_metric
;
1301 ifnet_set_baudrate(struct ifnet
*ifp
, u_int64_t baudrate
)
1307 ifp
->if_output_bw
.max_bw
= ifp
->if_input_bw
.max_bw
=
1308 ifp
->if_output_bw
.eff_bw
= ifp
->if_input_bw
.eff_bw
= baudrate
;
1310 /* Pin if_baudrate to 32 bits until we can change the storage size */
1311 ifp
->if_baudrate
= (baudrate
> 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate
;
1317 ifnet_baudrate(struct ifnet
*ifp
)
1319 return (ifp
== NULL
) ? 0 : ifp
->if_baudrate
;
1323 ifnet_set_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*output_bw
,
1324 struct if_bandwidths
*input_bw
)
1330 /* set input values first (if any), as output values depend on them */
1331 if (input_bw
!= NULL
) {
1332 (void) ifnet_set_input_bandwidths(ifp
, input_bw
);
1335 if (output_bw
!= NULL
) {
1336 (void) ifnet_set_output_bandwidths(ifp
, output_bw
, FALSE
);
1343 ifnet_set_link_status_outbw(struct ifnet
*ifp
)
1345 struct if_wifi_status_v1
*sr
;
1346 sr
= &ifp
->if_link_status
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
1347 if (ifp
->if_output_bw
.eff_bw
!= 0) {
1348 sr
->valid_bitmask
|=
1349 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
1350 sr
->ul_effective_bandwidth
=
1351 ifp
->if_output_bw
.eff_bw
;
1353 if (ifp
->if_output_bw
.max_bw
!= 0) {
1354 sr
->valid_bitmask
|=
1355 IF_WIFI_UL_MAX_BANDWIDTH_VALID
;
1356 sr
->ul_max_bandwidth
=
1357 ifp
->if_output_bw
.max_bw
;
1362 ifnet_set_output_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*bw
,
1365 struct if_bandwidths old_bw
;
1366 struct ifclassq
*ifq
;
1369 VERIFY(ifp
!= NULL
&& bw
!= NULL
);
1375 IFCQ_LOCK_ASSERT_HELD(ifq
);
1377 old_bw
= ifp
->if_output_bw
;
1378 if (bw
->eff_bw
!= 0) {
1379 ifp
->if_output_bw
.eff_bw
= bw
->eff_bw
;
1381 if (bw
->max_bw
!= 0) {
1382 ifp
->if_output_bw
.max_bw
= bw
->max_bw
;
1384 if (ifp
->if_output_bw
.eff_bw
> ifp
->if_output_bw
.max_bw
) {
1385 ifp
->if_output_bw
.max_bw
= ifp
->if_output_bw
.eff_bw
;
1386 } else if (ifp
->if_output_bw
.eff_bw
== 0) {
1387 ifp
->if_output_bw
.eff_bw
= ifp
->if_output_bw
.max_bw
;
1390 /* Pin if_baudrate to 32 bits */
1391 br
= MAX(ifp
->if_output_bw
.max_bw
, ifp
->if_input_bw
.max_bw
);
1393 ifp
->if_baudrate
= (br
> 0xFFFFFFFF) ? 0xFFFFFFFF : br
;
1396 /* Adjust queue parameters if needed */
1397 if (old_bw
.eff_bw
!= ifp
->if_output_bw
.eff_bw
||
1398 old_bw
.max_bw
!= ifp
->if_output_bw
.max_bw
) {
1399 ifnet_update_sndq(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
1407 * If this is a Wifi interface, update the values in
1408 * if_link_status structure also.
1410 if (IFNET_IS_WIFI(ifp
) && ifp
->if_link_status
!= NULL
) {
1411 lck_rw_lock_exclusive(&ifp
->if_link_status_lock
);
1412 ifnet_set_link_status_outbw(ifp
);
1413 lck_rw_done(&ifp
->if_link_status_lock
);
1420 ifnet_set_link_status_inbw(struct ifnet
*ifp
)
1422 struct if_wifi_status_v1
*sr
;
1424 sr
= &ifp
->if_link_status
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
1425 if (ifp
->if_input_bw
.eff_bw
!= 0) {
1426 sr
->valid_bitmask
|=
1427 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
1428 sr
->dl_effective_bandwidth
=
1429 ifp
->if_input_bw
.eff_bw
;
1431 if (ifp
->if_input_bw
.max_bw
!= 0) {
1432 sr
->valid_bitmask
|=
1433 IF_WIFI_DL_MAX_BANDWIDTH_VALID
;
1434 sr
->dl_max_bandwidth
= ifp
->if_input_bw
.max_bw
;
1439 ifnet_set_input_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*bw
)
1441 struct if_bandwidths old_bw
;
1443 VERIFY(ifp
!= NULL
&& bw
!= NULL
);
1445 old_bw
= ifp
->if_input_bw
;
1446 if (bw
->eff_bw
!= 0) {
1447 ifp
->if_input_bw
.eff_bw
= bw
->eff_bw
;
1449 if (bw
->max_bw
!= 0) {
1450 ifp
->if_input_bw
.max_bw
= bw
->max_bw
;
1452 if (ifp
->if_input_bw
.eff_bw
> ifp
->if_input_bw
.max_bw
) {
1453 ifp
->if_input_bw
.max_bw
= ifp
->if_input_bw
.eff_bw
;
1454 } else if (ifp
->if_input_bw
.eff_bw
== 0) {
1455 ifp
->if_input_bw
.eff_bw
= ifp
->if_input_bw
.max_bw
;
1458 if (IFNET_IS_WIFI(ifp
) && ifp
->if_link_status
!= NULL
) {
1459 lck_rw_lock_exclusive(&ifp
->if_link_status_lock
);
1460 ifnet_set_link_status_inbw(ifp
);
1461 lck_rw_done(&ifp
->if_link_status_lock
);
1464 if (old_bw
.eff_bw
!= ifp
->if_input_bw
.eff_bw
||
1465 old_bw
.max_bw
!= ifp
->if_input_bw
.max_bw
) {
1466 ifnet_update_rcv(ifp
, CLASSQ_EV_LINK_BANDWIDTH
);
1473 ifnet_output_linkrate(struct ifnet
*ifp
)
1475 struct ifclassq
*ifq
= &ifp
->if_snd
;
1478 IFCQ_LOCK_ASSERT_HELD(ifq
);
1480 rate
= ifp
->if_output_bw
.eff_bw
;
1481 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
1482 u_int64_t tbr_rate
= ifp
->if_snd
.ifcq_tbr
.tbr_rate_raw
;
1483 VERIFY(tbr_rate
> 0);
1484 rate
= MIN(rate
, ifp
->if_snd
.ifcq_tbr
.tbr_rate_raw
);
1491 ifnet_input_linkrate(struct ifnet
*ifp
)
1493 return ifp
->if_input_bw
.eff_bw
;
1497 ifnet_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*output_bw
,
1498 struct if_bandwidths
*input_bw
)
1504 if (output_bw
!= NULL
) {
1505 *output_bw
= ifp
->if_output_bw
;
1507 if (input_bw
!= NULL
) {
1508 *input_bw
= ifp
->if_input_bw
;
1515 ifnet_set_latencies(struct ifnet
*ifp
, struct if_latencies
*output_lt
,
1516 struct if_latencies
*input_lt
)
1522 if (output_lt
!= NULL
) {
1523 (void) ifnet_set_output_latencies(ifp
, output_lt
, FALSE
);
1526 if (input_lt
!= NULL
) {
1527 (void) ifnet_set_input_latencies(ifp
, input_lt
);
1534 ifnet_set_output_latencies(struct ifnet
*ifp
, struct if_latencies
*lt
,
1537 struct if_latencies old_lt
;
1538 struct ifclassq
*ifq
;
1540 VERIFY(ifp
!= NULL
&& lt
!= NULL
);
1546 IFCQ_LOCK_ASSERT_HELD(ifq
);
1548 old_lt
= ifp
->if_output_lt
;
1549 if (lt
->eff_lt
!= 0) {
1550 ifp
->if_output_lt
.eff_lt
= lt
->eff_lt
;
1552 if (lt
->max_lt
!= 0) {
1553 ifp
->if_output_lt
.max_lt
= lt
->max_lt
;
1555 if (ifp
->if_output_lt
.eff_lt
> ifp
->if_output_lt
.max_lt
) {
1556 ifp
->if_output_lt
.max_lt
= ifp
->if_output_lt
.eff_lt
;
1557 } else if (ifp
->if_output_lt
.eff_lt
== 0) {
1558 ifp
->if_output_lt
.eff_lt
= ifp
->if_output_lt
.max_lt
;
1561 /* Adjust queue parameters if needed */
1562 if (old_lt
.eff_lt
!= ifp
->if_output_lt
.eff_lt
||
1563 old_lt
.max_lt
!= ifp
->if_output_lt
.max_lt
) {
1564 ifnet_update_sndq(ifq
, CLASSQ_EV_LINK_LATENCY
);
1575 ifnet_set_input_latencies(struct ifnet
*ifp
, struct if_latencies
*lt
)
1577 struct if_latencies old_lt
;
1579 VERIFY(ifp
!= NULL
&& lt
!= NULL
);
1581 old_lt
= ifp
->if_input_lt
;
1582 if (lt
->eff_lt
!= 0) {
1583 ifp
->if_input_lt
.eff_lt
= lt
->eff_lt
;
1585 if (lt
->max_lt
!= 0) {
1586 ifp
->if_input_lt
.max_lt
= lt
->max_lt
;
1588 if (ifp
->if_input_lt
.eff_lt
> ifp
->if_input_lt
.max_lt
) {
1589 ifp
->if_input_lt
.max_lt
= ifp
->if_input_lt
.eff_lt
;
1590 } else if (ifp
->if_input_lt
.eff_lt
== 0) {
1591 ifp
->if_input_lt
.eff_lt
= ifp
->if_input_lt
.max_lt
;
1594 if (old_lt
.eff_lt
!= ifp
->if_input_lt
.eff_lt
||
1595 old_lt
.max_lt
!= ifp
->if_input_lt
.max_lt
) {
1596 ifnet_update_rcv(ifp
, CLASSQ_EV_LINK_LATENCY
);
1603 ifnet_latencies(struct ifnet
*ifp
, struct if_latencies
*output_lt
,
1604 struct if_latencies
*input_lt
)
1610 if (output_lt
!= NULL
) {
1611 *output_lt
= ifp
->if_output_lt
;
1613 if (input_lt
!= NULL
) {
1614 *input_lt
= ifp
->if_input_lt
;
1621 ifnet_set_poll_params(struct ifnet
*ifp
, struct ifnet_poll_params
*p
)
1627 } else if (!ifnet_is_attached(ifp
, 1)) {
1631 err
= dlil_rxpoll_set_params(ifp
, p
, FALSE
);
1633 /* Release the io ref count */
1634 ifnet_decr_iorefcnt(ifp
);
1640 ifnet_poll_params(struct ifnet
*ifp
, struct ifnet_poll_params
*p
)
1644 if (ifp
== NULL
|| p
== NULL
) {
1646 } else if (!ifnet_is_attached(ifp
, 1)) {
1650 err
= dlil_rxpoll_get_params(ifp
, p
);
1652 /* Release the io ref count */
1653 ifnet_decr_iorefcnt(ifp
);
1659 ifnet_stat_increment(struct ifnet
*ifp
,
1660 const struct ifnet_stat_increment_param
*s
)
1666 if (s
->packets_in
!= 0) {
1667 atomic_add_64(&ifp
->if_data
.ifi_ipackets
, s
->packets_in
);
1669 if (s
->bytes_in
!= 0) {
1670 atomic_add_64(&ifp
->if_data
.ifi_ibytes
, s
->bytes_in
);
1672 if (s
->errors_in
!= 0) {
1673 atomic_add_64(&ifp
->if_data
.ifi_ierrors
, s
->errors_in
);
1676 if (s
->packets_out
!= 0) {
1677 atomic_add_64(&ifp
->if_data
.ifi_opackets
, s
->packets_out
);
1679 if (s
->bytes_out
!= 0) {
1680 atomic_add_64(&ifp
->if_data
.ifi_obytes
, s
->bytes_out
);
1682 if (s
->errors_out
!= 0) {
1683 atomic_add_64(&ifp
->if_data
.ifi_oerrors
, s
->errors_out
);
1686 if (s
->collisions
!= 0) {
1687 atomic_add_64(&ifp
->if_data
.ifi_collisions
, s
->collisions
);
1689 if (s
->dropped
!= 0) {
1690 atomic_add_64(&ifp
->if_data
.ifi_iqdrops
, s
->dropped
);
1693 /* Touch the last change time. */
1694 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1696 if (ifp
->if_data_threshold
!= 0) {
1697 ifnet_notify_data_threshold(ifp
);
1704 ifnet_stat_increment_in(struct ifnet
*ifp
, u_int32_t packets_in
,
1705 u_int32_t bytes_in
, u_int32_t errors_in
)
1711 if (packets_in
!= 0) {
1712 atomic_add_64(&ifp
->if_data
.ifi_ipackets
, packets_in
);
1714 if (bytes_in
!= 0) {
1715 atomic_add_64(&ifp
->if_data
.ifi_ibytes
, bytes_in
);
1717 if (errors_in
!= 0) {
1718 atomic_add_64(&ifp
->if_data
.ifi_ierrors
, errors_in
);
1721 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1723 if (ifp
->if_data_threshold
!= 0) {
1724 ifnet_notify_data_threshold(ifp
);
1731 ifnet_stat_increment_out(struct ifnet
*ifp
, u_int32_t packets_out
,
1732 u_int32_t bytes_out
, u_int32_t errors_out
)
1738 if (packets_out
!= 0) {
1739 atomic_add_64(&ifp
->if_data
.ifi_opackets
, packets_out
);
1741 if (bytes_out
!= 0) {
1742 atomic_add_64(&ifp
->if_data
.ifi_obytes
, bytes_out
);
1744 if (errors_out
!= 0) {
1745 atomic_add_64(&ifp
->if_data
.ifi_oerrors
, errors_out
);
1748 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1750 if (ifp
->if_data_threshold
!= 0) {
1751 ifnet_notify_data_threshold(ifp
);
1758 ifnet_set_stat(struct ifnet
*ifp
, const struct ifnet_stats_param
*s
)
1764 atomic_set_64(&ifp
->if_data
.ifi_ipackets
, s
->packets_in
);
1765 atomic_set_64(&ifp
->if_data
.ifi_ibytes
, s
->bytes_in
);
1766 atomic_set_64(&ifp
->if_data
.ifi_imcasts
, s
->multicasts_in
);
1767 atomic_set_64(&ifp
->if_data
.ifi_ierrors
, s
->errors_in
);
1769 atomic_set_64(&ifp
->if_data
.ifi_opackets
, s
->packets_out
);
1770 atomic_set_64(&ifp
->if_data
.ifi_obytes
, s
->bytes_out
);
1771 atomic_set_64(&ifp
->if_data
.ifi_omcasts
, s
->multicasts_out
);
1772 atomic_set_64(&ifp
->if_data
.ifi_oerrors
, s
->errors_out
);
1774 atomic_set_64(&ifp
->if_data
.ifi_collisions
, s
->collisions
);
1775 atomic_set_64(&ifp
->if_data
.ifi_iqdrops
, s
->dropped
);
1776 atomic_set_64(&ifp
->if_data
.ifi_noproto
, s
->no_protocol
);
1778 /* Touch the last change time. */
1779 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1781 if (ifp
->if_data_threshold
!= 0) {
1782 ifnet_notify_data_threshold(ifp
);
1789 ifnet_stat(struct ifnet
*ifp
, struct ifnet_stats_param
*s
)
1795 atomic_get_64(s
->packets_in
, &ifp
->if_data
.ifi_ipackets
);
1796 atomic_get_64(s
->bytes_in
, &ifp
->if_data
.ifi_ibytes
);
1797 atomic_get_64(s
->multicasts_in
, &ifp
->if_data
.ifi_imcasts
);
1798 atomic_get_64(s
->errors_in
, &ifp
->if_data
.ifi_ierrors
);
1800 atomic_get_64(s
->packets_out
, &ifp
->if_data
.ifi_opackets
);
1801 atomic_get_64(s
->bytes_out
, &ifp
->if_data
.ifi_obytes
);
1802 atomic_get_64(s
->multicasts_out
, &ifp
->if_data
.ifi_omcasts
);
1803 atomic_get_64(s
->errors_out
, &ifp
->if_data
.ifi_oerrors
);
1805 atomic_get_64(s
->collisions
, &ifp
->if_data
.ifi_collisions
);
1806 atomic_get_64(s
->dropped
, &ifp
->if_data
.ifi_iqdrops
);
1807 atomic_get_64(s
->no_protocol
, &ifp
->if_data
.ifi_noproto
);
1809 if (ifp
->if_data_threshold
!= 0) {
1810 ifnet_notify_data_threshold(ifp
);
1817 ifnet_touch_lastchange(ifnet_t interface
)
1819 if (interface
== NULL
) {
1823 TOUCHLASTCHANGE(&interface
->if_lastchange
);
1829 ifnet_lastchange(ifnet_t interface
, struct timeval
*last_change
)
1831 if (interface
== NULL
) {
1835 *last_change
= interface
->if_data
.ifi_lastchange
;
1836 /* Crude conversion from uptime to calendar time */
1837 last_change
->tv_sec
+= boottime_sec();
1843 ifnet_touch_lastupdown(ifnet_t interface
)
1845 if (interface
== NULL
) {
1849 TOUCHLASTCHANGE(&interface
->if_lastupdown
);
1855 ifnet_updown_delta(ifnet_t interface
, struct timeval
*updown_delta
)
1857 if (interface
== NULL
) {
1861 /* Calculate the delta */
1862 updown_delta
->tv_sec
= net_uptime();
1863 if (updown_delta
->tv_sec
> interface
->if_data
.ifi_lastupdown
.tv_sec
) {
1864 updown_delta
->tv_sec
-= interface
->if_data
.ifi_lastupdown
.tv_sec
;
1866 updown_delta
->tv_usec
= 0;
1872 ifnet_get_address_list(ifnet_t interface
, ifaddr_t
**addresses
)
1874 return addresses
== NULL
? EINVAL
:
1875 ifnet_get_address_list_family(interface
, addresses
, 0);
1878 struct ifnet_addr_list
{
1879 SLIST_ENTRY(ifnet_addr_list
) ifal_le
;
1880 struct ifaddr
*ifal_ifa
;
1884 ifnet_get_address_list_family(ifnet_t interface
, ifaddr_t
**addresses
,
1887 return ifnet_get_address_list_family_internal(interface
, addresses
,
1888 family
, 0, M_NOWAIT
, 0);
1892 ifnet_get_inuse_address_list(ifnet_t interface
, ifaddr_t
**addresses
)
1894 return addresses
== NULL
? EINVAL
:
1895 ifnet_get_address_list_family_internal(interface
, addresses
,
1899 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr
*ifa
);
1901 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr
*ifa
);
1903 __private_extern__ errno_t
1904 ifnet_get_address_list_family_internal(ifnet_t interface
, ifaddr_t
**addresses
,
1905 sa_family_t family
, int detached
, int how
, int return_inuse_addrs
)
1907 SLIST_HEAD(, ifnet_addr_list
) ifal_head
;
1908 struct ifnet_addr_list
*ifal
, *ifal_tmp
;
1915 SLIST_INIT(&ifal_head
);
1917 if (addresses
== NULL
) {
1925 * Interface has been detached, so skip the lookup
1926 * at ifnet_head and go directly to inner loop.
1936 ifnet_head_lock_shared();
1937 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1938 if (interface
!= NULL
&& ifp
!= interface
) {
1942 ifnet_lock_shared(ifp
);
1943 if (interface
== NULL
|| interface
== ifp
) {
1945 TAILQ_FOREACH(ifa
, &ifp
->if_addrhead
, ifa_link
) {
1948 ifa
->ifa_addr
->sa_family
!= family
) {
1952 MALLOC(ifal
, struct ifnet_addr_list
*,
1953 sizeof(*ifal
), M_TEMP
, how
);
1956 ifnet_lock_done(ifp
);
1963 ifal
->ifal_ifa
= ifa
;
1964 IFA_ADDREF_LOCKED(ifa
);
1965 SLIST_INSERT_HEAD(&ifal_head
, ifal
, ifal_le
);
1970 ifnet_lock_done(ifp
);
1983 MALLOC(*addresses
, ifaddr_t
*, sizeof(ifaddr_t
) * (count
+ 1),
1985 if (*addresses
== NULL
) {
1989 bzero(*addresses
, sizeof(ifaddr_t
) * (count
+ 1));
1992 SLIST_FOREACH_SAFE(ifal
, &ifal_head
, ifal_le
, ifal_tmp
) {
1993 SLIST_REMOVE(&ifal_head
, ifal
, ifnet_addr_list
, ifal_le
);
1995 if (return_inuse_addrs
) {
1996 usecount
= tcp_find_anypcb_byaddr(ifal
->ifal_ifa
);
1997 usecount
+= udp_find_anypcb_byaddr(ifal
->ifal_ifa
);
1999 (*addresses
)[index
] = ifal
->ifal_ifa
;
2002 IFA_REMREF(ifal
->ifal_ifa
);
2005 (*addresses
)[--count
] = ifal
->ifal_ifa
;
2008 IFA_REMREF(ifal
->ifal_ifa
);
2013 VERIFY(err
== 0 || *addresses
== NULL
);
2014 if ((err
== 0) && (count
) && ((*addresses
)[0] == NULL
)) {
2015 VERIFY(return_inuse_addrs
== 1);
2016 FREE(*addresses
, M_TEMP
);
2023 ifnet_free_address_list(ifaddr_t
*addresses
)
2027 if (addresses
== NULL
) {
2031 for (i
= 0; addresses
[i
] != NULL
; i
++) {
2032 IFA_REMREF(addresses
[i
]);
2035 FREE(addresses
, M_TEMP
);
2039 ifnet_lladdr(ifnet_t interface
)
2044 if (interface
== NULL
) {
2049 * if_lladdr points to the permanent link address of
2050 * the interface and it never gets deallocated; internal
2051 * code should simply use IF_LLADDR() for performance.
2053 ifa
= interface
->if_lladdr
;
2055 lladdr
= LLADDR(SDL((void *)ifa
->ifa_addr
));
2062 ifnet_llbroadcast_copy_bytes(ifnet_t interface
, void *addr
, size_t buffer_len
,
2065 if (interface
== NULL
|| addr
== NULL
|| out_len
== NULL
) {
2069 *out_len
= interface
->if_broadcast
.length
;
2071 if (buffer_len
< interface
->if_broadcast
.length
) {
2075 if (interface
->if_broadcast
.length
== 0) {
2079 if (interface
->if_broadcast
.length
<=
2080 sizeof(interface
->if_broadcast
.u
.buffer
)) {
2081 bcopy(interface
->if_broadcast
.u
.buffer
, addr
,
2082 interface
->if_broadcast
.length
);
2084 bcopy(interface
->if_broadcast
.u
.ptr
, addr
,
2085 interface
->if_broadcast
.length
);
2092 ifnet_lladdr_copy_bytes_internal(ifnet_t interface
, void *lladdr
,
2093 size_t lladdr_len
, kauth_cred_t
*credp
)
2095 const u_int8_t
*bytes
;
2098 uint8_t sdlbuf
[SOCK_MAXADDRLEN
+ 1];
2102 * Make sure to accomodate the largest possible
2103 * size of SA(if_lladdr)->sa_len.
2105 _CASSERT(sizeof(sdlbuf
) == (SOCK_MAXADDRLEN
+ 1));
2107 if (interface
== NULL
|| lladdr
== NULL
) {
2111 ifa
= interface
->if_lladdr
;
2113 bcopy(ifa
->ifa_addr
, &sdlbuf
, SDL(ifa
->ifa_addr
)->sdl_len
);
2116 bytes
= dlil_ifaddr_bytes(SDL(&sdlbuf
), &bytes_len
, credp
);
2117 if (bytes_len
!= lladdr_len
) {
2118 bzero(lladdr
, lladdr_len
);
2121 bcopy(bytes
, lladdr
, bytes_len
);
2128 ifnet_lladdr_copy_bytes(ifnet_t interface
, void *lladdr
, size_t length
)
2130 return ifnet_lladdr_copy_bytes_internal(interface
, lladdr
, length
,
2135 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface
, void *lladdr
, size_t length
)
2139 net_thread_marks_t marks
;
2141 kauth_cred_t
*credp
;
2146 marks
= net_thread_marks_push(NET_THREAD_CKREQ_LLADDR
);
2147 cred
= kauth_cred_proc_ref(current_proc());
2153 error
= ifnet_lladdr_copy_bytes_internal(interface
, lladdr
, length
,
2157 kauth_cred_unref(credp
);
2158 net_thread_marks_pop(marks
);
2165 ifnet_set_lladdr_internal(ifnet_t interface
, const void *lladdr
,
2166 size_t lladdr_len
, u_char new_type
, int apply_type
)
2171 if (interface
== NULL
) {
2175 ifnet_head_lock_shared();
2176 ifnet_lock_exclusive(interface
);
2177 if (lladdr_len
!= 0 &&
2178 (lladdr_len
!= interface
->if_addrlen
|| lladdr
== 0)) {
2179 ifnet_lock_done(interface
);
2183 ifa
= ifnet_addrs
[interface
->if_index
- 1];
2185 struct sockaddr_dl
*sdl
;
2188 sdl
= (struct sockaddr_dl
*)(void *)ifa
->ifa_addr
;
2189 if (lladdr_len
!= 0) {
2190 bcopy(lladdr
, LLADDR(sdl
), lladdr_len
);
2192 bzero(LLADDR(sdl
), interface
->if_addrlen
);
2194 sdl
->sdl_alen
= lladdr_len
;
2197 sdl
->sdl_type
= new_type
;
2203 ifnet_lock_done(interface
);
2206 /* Generate a kernel event */
2208 intf_event_enqueue_nwk_wq_entry(interface
, NULL
,
2209 INTF_EVENT_CODE_LLADDR_UPDATE
);
2210 dlil_post_msg(interface
, KEV_DL_SUBCLASS
,
2211 KEV_DL_LINK_ADDRESS_CHANGED
, NULL
, 0);
2218 ifnet_set_lladdr(ifnet_t interface
, const void* lladdr
, size_t lladdr_len
)
2220 return ifnet_set_lladdr_internal(interface
, lladdr
, lladdr_len
, 0, 0);
2224 ifnet_set_lladdr_and_type(ifnet_t interface
, const void* lladdr
,
2225 size_t lladdr_len
, u_char type
)
2227 return ifnet_set_lladdr_internal(interface
, lladdr
,
2228 lladdr_len
, type
, 1);
2232 ifnet_add_multicast(ifnet_t interface
, const struct sockaddr
*maddr
,
2233 ifmultiaddr_t
*ifmap
)
2235 if (interface
== NULL
|| maddr
== NULL
) {
2239 /* Don't let users screw up protocols' entries. */
2240 switch (maddr
->sa_family
) {
2242 const struct sockaddr_dl
*sdl
=
2243 (const struct sockaddr_dl
*)(uintptr_t)maddr
;
2244 if (sdl
->sdl_len
< sizeof(struct sockaddr_dl
) ||
2245 (sdl
->sdl_nlen
+ sdl
->sdl_alen
+ sdl
->sdl_slen
+
2246 offsetof(struct sockaddr_dl
, sdl_data
) > sdl
->sdl_len
)) {
2252 if (maddr
->sa_len
< ETHER_ADDR_LEN
+
2253 offsetof(struct sockaddr
, sa_data
)) {
2261 return if_addmulti_anon(interface
, maddr
, ifmap
);
2265 ifnet_remove_multicast(ifmultiaddr_t ifma
)
2267 struct sockaddr
*maddr
;
2273 maddr
= ifma
->ifma_addr
;
2274 /* Don't let users screw up protocols' entries. */
2275 if (maddr
->sa_family
!= AF_UNSPEC
&& maddr
->sa_family
!= AF_LINK
) {
2279 return if_delmulti_anon(ifma
->ifma_ifp
, maddr
);
2283 ifnet_get_multicast_list(ifnet_t ifp
, ifmultiaddr_t
**addresses
)
2287 struct ifmultiaddr
*addr
;
2289 if (ifp
== NULL
|| addresses
== NULL
) {
2293 ifnet_lock_shared(ifp
);
2294 LIST_FOREACH(addr
, &ifp
->if_multiaddrs
, ifma_link
) {
2298 MALLOC(*addresses
, ifmultiaddr_t
*, sizeof(ifmultiaddr_t
) * (cmax
+ 1),
2300 if (*addresses
== NULL
) {
2301 ifnet_lock_done(ifp
);
2305 LIST_FOREACH(addr
, &ifp
->if_multiaddrs
, ifma_link
) {
2306 if (count
+ 1 > cmax
) {
2309 (*addresses
)[count
] = (ifmultiaddr_t
)addr
;
2310 ifmaddr_reference((*addresses
)[count
]);
2313 (*addresses
)[cmax
] = NULL
;
2314 ifnet_lock_done(ifp
);
2320 ifnet_free_multicast_list(ifmultiaddr_t
*addresses
)
2324 if (addresses
== NULL
) {
2328 for (i
= 0; addresses
[i
] != NULL
; i
++) {
2329 ifmaddr_release(addresses
[i
]);
2332 FREE(addresses
, M_TEMP
);
2336 ifnet_find_by_name(const char *ifname
, ifnet_t
*ifpp
)
2341 if (ifname
== NULL
) {
2345 namelen
= strlen(ifname
);
2349 ifnet_head_lock_shared();
2350 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2352 struct sockaddr_dl
*ll_addr
;
2354 ifa
= ifnet_addrs
[ifp
->if_index
- 1];
2360 ll_addr
= (struct sockaddr_dl
*)(void *)ifa
->ifa_addr
;
2362 if (namelen
== ll_addr
->sdl_nlen
&& strncmp(ll_addr
->sdl_data
,
2363 ifname
, ll_addr
->sdl_nlen
) == 0) {
2366 ifnet_reference(*ifpp
);
2373 return (ifp
== NULL
) ? ENXIO
: 0;
2377 ifnet_list_get(ifnet_family_t family
, ifnet_t
**list
, u_int32_t
*count
)
2379 return ifnet_list_get_common(family
, FALSE
, list
, count
);
2382 __private_extern__ errno_t
2383 ifnet_list_get_all(ifnet_family_t family
, ifnet_t
**list
, u_int32_t
*count
)
2385 return ifnet_list_get_common(family
, TRUE
, list
, count
);
2389 SLIST_ENTRY(ifnet_list
) ifl_le
;
2390 struct ifnet
*ifl_ifp
;
2394 ifnet_list_get_common(ifnet_family_t family
, boolean_t get_all
, ifnet_t
**list
,
2397 #pragma unused(get_all)
2398 SLIST_HEAD(, ifnet_list
) ifl_head
;
2399 struct ifnet_list
*ifl
, *ifl_tmp
;
2404 SLIST_INIT(&ifl_head
);
2406 if (list
== NULL
|| count
== NULL
) {
2413 ifnet_head_lock_shared();
2414 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2415 if (family
== IFNET_FAMILY_ANY
|| ifp
->if_family
== family
) {
2416 MALLOC(ifl
, struct ifnet_list
*, sizeof(*ifl
),
2424 ifnet_reference(ifp
);
2425 SLIST_INSERT_HEAD(&ifl_head
, ifl
, ifl_le
);
2436 MALLOC(*list
, ifnet_t
*, sizeof(ifnet_t
) * (cnt
+ 1),
2438 if (*list
== NULL
) {
2442 bzero(*list
, sizeof(ifnet_t
) * (cnt
+ 1));
2446 SLIST_FOREACH_SAFE(ifl
, &ifl_head
, ifl_le
, ifl_tmp
) {
2447 SLIST_REMOVE(&ifl_head
, ifl
, ifnet_list
, ifl_le
);
2449 (*list
)[--cnt
] = ifl
->ifl_ifp
;
2451 ifnet_release(ifl
->ifl_ifp
);
2460 ifnet_list_free(ifnet_t
*interfaces
)
2464 if (interfaces
== NULL
) {
2468 for (i
= 0; interfaces
[i
]; i
++) {
2469 ifnet_release(interfaces
[i
]);
2472 FREE(interfaces
, M_TEMP
);
2475 /*************************************************************************/
2476 /* ifaddr_t accessors */
2477 /*************************************************************************/
2480 ifaddr_reference(ifaddr_t ifa
)
2491 ifaddr_release(ifaddr_t ifa
)
2502 ifaddr_address_family(ifaddr_t ifa
)
2504 sa_family_t family
= 0;
2508 if (ifa
->ifa_addr
!= NULL
) {
2509 family
= ifa
->ifa_addr
->sa_family
;
2517 ifaddr_address(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2521 if (ifa
== NULL
|| out_addr
== NULL
) {
2526 if (ifa
->ifa_addr
== NULL
) {
2531 copylen
= (addr_size
>= ifa
->ifa_addr
->sa_len
) ?
2532 ifa
->ifa_addr
->sa_len
: addr_size
;
2533 bcopy(ifa
->ifa_addr
, out_addr
, copylen
);
2535 if (ifa
->ifa_addr
->sa_len
> addr_size
) {
2545 ifaddr_dstaddress(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2549 if (ifa
== NULL
|| out_addr
== NULL
) {
2554 if (ifa
->ifa_dstaddr
== NULL
) {
2559 copylen
= (addr_size
>= ifa
->ifa_dstaddr
->sa_len
) ?
2560 ifa
->ifa_dstaddr
->sa_len
: addr_size
;
2561 bcopy(ifa
->ifa_dstaddr
, out_addr
, copylen
);
2563 if (ifa
->ifa_dstaddr
->sa_len
> addr_size
) {
2573 ifaddr_netmask(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2577 if (ifa
== NULL
|| out_addr
== NULL
) {
2582 if (ifa
->ifa_netmask
== NULL
) {
2587 copylen
= addr_size
>= ifa
->ifa_netmask
->sa_len
?
2588 ifa
->ifa_netmask
->sa_len
: addr_size
;
2589 bcopy(ifa
->ifa_netmask
, out_addr
, copylen
);
2591 if (ifa
->ifa_netmask
->sa_len
> addr_size
) {
2601 ifaddr_ifnet(ifaddr_t ifa
)
2609 /* ifa_ifp is set once at creation time; it is never changed */
2616 ifaddr_withaddr(const struct sockaddr
*address
)
2618 if (address
== NULL
) {
2622 return ifa_ifwithaddr(address
);
2626 ifaddr_withdstaddr(const struct sockaddr
*address
)
2628 if (address
== NULL
) {
2632 return ifa_ifwithdstaddr(address
);
2636 ifaddr_withnet(const struct sockaddr
*net
)
2642 return ifa_ifwithnet(net
);
2646 ifaddr_withroute(int flags
, const struct sockaddr
*destination
,
2647 const struct sockaddr
*gateway
)
2649 if (destination
== NULL
|| gateway
== NULL
) {
2653 return ifa_ifwithroute(flags
, destination
, gateway
);
2657 ifaddr_findbestforaddr(const struct sockaddr
*addr
, ifnet_t interface
)
2659 if (addr
== NULL
|| interface
== NULL
) {
2663 return ifaof_ifpforaddr_select(addr
, interface
);
2667 ifmaddr_reference(ifmultiaddr_t ifmaddr
)
2669 if (ifmaddr
== NULL
) {
2673 IFMA_ADDREF(ifmaddr
);
2678 ifmaddr_release(ifmultiaddr_t ifmaddr
)
2680 if (ifmaddr
== NULL
) {
2684 IFMA_REMREF(ifmaddr
);
2689 ifmaddr_address(ifmultiaddr_t ifma
, struct sockaddr
*out_addr
,
2690 u_int32_t addr_size
)
2694 if (ifma
== NULL
|| out_addr
== NULL
) {
2699 if (ifma
->ifma_addr
== NULL
) {
2704 copylen
= (addr_size
>= ifma
->ifma_addr
->sa_len
?
2705 ifma
->ifma_addr
->sa_len
: addr_size
);
2706 bcopy(ifma
->ifma_addr
, out_addr
, copylen
);
2708 if (ifma
->ifma_addr
->sa_len
> addr_size
) {
2717 ifmaddr_lladdress(ifmultiaddr_t ifma
, struct sockaddr
*out_addr
,
2718 u_int32_t addr_size
)
2720 struct ifmultiaddr
*ifma_ll
;
2722 if (ifma
== NULL
|| out_addr
== NULL
) {
2725 if ((ifma_ll
= ifma
->ifma_ll
) == NULL
) {
2729 return ifmaddr_address(ifma_ll
, out_addr
, addr_size
);
2733 ifmaddr_ifnet(ifmultiaddr_t ifma
)
2735 return (ifma
== NULL
) ? NULL
: ifma
->ifma_ifp
;
2738 /**************************************************************************/
2739 /* interface cloner */
2740 /**************************************************************************/
2743 ifnet_clone_attach(struct ifnet_clone_params
*cloner_params
,
2744 if_clone_t
*ifcloner
)
2747 struct if_clone
*ifc
= NULL
;
2750 if (cloner_params
== NULL
|| ifcloner
== NULL
||
2751 cloner_params
->ifc_name
== NULL
||
2752 cloner_params
->ifc_create
== NULL
||
2753 cloner_params
->ifc_destroy
== NULL
||
2754 (namelen
= strlen(cloner_params
->ifc_name
)) >= IFNAMSIZ
) {
2759 if (if_clone_lookup(cloner_params
->ifc_name
, NULL
) != NULL
) {
2760 printf("%s: already a cloner for %s\n", __func__
,
2761 cloner_params
->ifc_name
);
2766 /* Make room for name string */
2767 ifc
= _MALLOC(sizeof(struct if_clone
) + IFNAMSIZ
+ 1, M_CLONE
,
2770 printf("%s: _MALLOC failed\n", __func__
);
2774 strlcpy((char *)(ifc
+ 1), cloner_params
->ifc_name
, IFNAMSIZ
+ 1);
2775 ifc
->ifc_name
= (char *)(ifc
+ 1);
2776 ifc
->ifc_namelen
= namelen
;
2777 ifc
->ifc_maxunit
= IF_MAXUNIT
;
2778 ifc
->ifc_create
= cloner_params
->ifc_create
;
2779 ifc
->ifc_destroy
= cloner_params
->ifc_destroy
;
2781 error
= if_clone_attach(ifc
);
2783 printf("%s: if_clone_attach failed %d\n", __func__
, error
);
2797 ifnet_clone_detach(if_clone_t ifcloner
)
2800 struct if_clone
*ifc
= ifcloner
;
2802 if (ifc
== NULL
|| ifc
->ifc_name
== NULL
) {
2806 if ((if_clone_lookup(ifc
->ifc_name
, NULL
)) == NULL
) {
2807 printf("%s: no cloner for %s\n", __func__
, ifc
->ifc_name
);
2812 if_clone_detach(ifc
);
2820 /**************************************************************************/
2822 /**************************************************************************/
2825 ifnet_get_local_ports_extended(ifnet_t ifp
, protocol_family_t protocol
,
2826 u_int32_t flags
, u_int8_t
*bitfield
)
2830 if (bitfield
== NULL
) {
2843 /* bit string is long enough to hold 16-bit port values */
2844 bzero(bitfield
, bitstr_size(IP_PORTRANGE_SIZE
));
2846 if_ports_used_update_wakeuuid(ifp
);
2849 ifindex
= (ifp
!= NULL
) ? ifp
->if_index
: 0;
2851 if (!(flags
& IFNET_GET_LOCAL_PORTS_TCPONLY
)) {
2852 udp_get_ports_used(ifindex
, protocol
, flags
,
2856 if (!(flags
& IFNET_GET_LOCAL_PORTS_UDPONLY
)) {
2857 tcp_get_ports_used(ifindex
, protocol
, flags
,
2865 ifnet_get_local_ports(ifnet_t ifp
, u_int8_t
*bitfield
)
2867 u_int32_t flags
= IFNET_GET_LOCAL_PORTS_WILDCARDOK
;
2868 return ifnet_get_local_ports_extended(ifp
, PF_UNSPEC
, flags
,
2873 ifnet_notice_node_presence(ifnet_t ifp
, struct sockaddr
*sa
, int32_t rssi
,
2874 int lqm
, int npm
, u_int8_t srvinfo
[48])
2876 if (ifp
== NULL
|| sa
== NULL
|| srvinfo
== NULL
) {
2879 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
2882 if (sa
->sa_family
!= AF_LINK
&& sa
->sa_family
!= AF_INET6
) {
2886 return dlil_node_present(ifp
, sa
, rssi
, lqm
, npm
, srvinfo
);
2890 ifnet_notice_node_presence_v2(ifnet_t ifp
, struct sockaddr
*sa
, struct sockaddr_dl
*sdl
,
2891 int32_t rssi
, int lqm
, int npm
, u_int8_t srvinfo
[48])
2893 /* Support older version if sdl is NULL */
2895 return ifnet_notice_node_presence(ifp
, sa
, rssi
, lqm
, npm
, srvinfo
);
2898 if (ifp
== NULL
|| sa
== NULL
|| srvinfo
== NULL
) {
2901 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
2905 if (sa
->sa_family
!= AF_INET6
) {
2909 if (sdl
->sdl_family
!= AF_LINK
) {
2913 return dlil_node_present_v2(ifp
, sa
, sdl
, rssi
, lqm
, npm
, srvinfo
);
2917 ifnet_notice_node_absence(ifnet_t ifp
, struct sockaddr
*sa
)
2919 if (ifp
== NULL
|| sa
== NULL
) {
2922 if (sa
->sa_len
> sizeof(struct sockaddr_storage
)) {
2925 if (sa
->sa_family
!= AF_LINK
&& sa
->sa_family
!= AF_INET6
) {
2929 dlil_node_absent(ifp
, sa
);
2934 ifnet_notice_master_elected(ifnet_t ifp
)
2940 dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_MASTER_ELECTED
, NULL
, 0);
2945 ifnet_tx_compl_status(ifnet_t ifp
, mbuf_t m
, tx_compl_val_t val
)
2949 m_do_tx_compl_callback(m
, ifp
);
2955 ifnet_tx_compl(ifnet_t ifp
, mbuf_t m
)
2957 m_do_tx_compl_callback(m
, ifp
);
2963 ifnet_report_issues(ifnet_t ifp
, u_int8_t modid
[IFNET_MODIDLEN
],
2964 u_int8_t info
[IFNET_MODARGLEN
])
2966 if (ifp
== NULL
|| modid
== NULL
) {
2970 dlil_report_issues(ifp
, modid
, info
);
2975 ifnet_set_delegate(ifnet_t ifp
, ifnet_t delegated_ifp
)
2977 ifnet_t odifp
= NULL
;
2981 } else if (!ifnet_is_attached(ifp
, 1)) {
2985 ifnet_lock_exclusive(ifp
);
2986 odifp
= ifp
->if_delegated
.ifp
;
2987 if (odifp
!= NULL
&& odifp
== delegated_ifp
) {
2988 /* delegate info is unchanged; nothing more to do */
2989 ifnet_lock_done(ifp
);
2992 // Test if this delegate interface would cause a loop
2993 ifnet_t delegate_check_ifp
= delegated_ifp
;
2994 while (delegate_check_ifp
!= NULL
) {
2995 if (delegate_check_ifp
== ifp
) {
2996 printf("%s: delegating to %s would cause a loop\n",
2997 ifp
->if_xname
, delegated_ifp
->if_xname
);
2998 ifnet_lock_done(ifp
);
3001 delegate_check_ifp
= delegate_check_ifp
->if_delegated
.ifp
;
3003 bzero(&ifp
->if_delegated
, sizeof(ifp
->if_delegated
));
3004 if (delegated_ifp
!= NULL
&& ifp
!= delegated_ifp
) {
3005 ifp
->if_delegated
.ifp
= delegated_ifp
;
3006 ifnet_reference(delegated_ifp
);
3007 ifp
->if_delegated
.type
= delegated_ifp
->if_type
;
3008 ifp
->if_delegated
.family
= delegated_ifp
->if_family
;
3009 ifp
->if_delegated
.subfamily
= delegated_ifp
->if_subfamily
;
3010 ifp
->if_delegated
.expensive
=
3011 delegated_ifp
->if_eflags
& IFEF_EXPENSIVE
? 1 : 0;
3012 ifp
->if_delegated
.constrained
=
3013 delegated_ifp
->if_xflags
& IFXF_CONSTRAINED
? 1 : 0;
3016 * Propogate flags related to ECN from delegated interface
3018 ifp
->if_eflags
&= ~(IFEF_ECN_ENABLE
| IFEF_ECN_DISABLE
);
3019 ifp
->if_eflags
|= (delegated_ifp
->if_eflags
&
3020 (IFEF_ECN_ENABLE
| IFEF_ECN_DISABLE
));
3022 printf("%s: is now delegating %s (type 0x%x, family %u, "
3023 "sub-family %u)\n", ifp
->if_xname
, delegated_ifp
->if_xname
,
3024 delegated_ifp
->if_type
, delegated_ifp
->if_family
,
3025 delegated_ifp
->if_subfamily
);
3028 ifnet_lock_done(ifp
);
3030 if (odifp
!= NULL
) {
3031 if (odifp
!= delegated_ifp
) {
3032 printf("%s: is no longer delegating %s\n",
3033 ifp
->if_xname
, odifp
->if_xname
);
3035 ifnet_release(odifp
);
3038 /* Generate a kernel event */
3039 dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_IFDELEGATE_CHANGED
, NULL
, 0);
3042 /* Release the io ref count */
3043 ifnet_decr_iorefcnt(ifp
);
3049 ifnet_get_delegate(ifnet_t ifp
, ifnet_t
*pdelegated_ifp
)
3051 if (ifp
== NULL
|| pdelegated_ifp
== NULL
) {
3053 } else if (!ifnet_is_attached(ifp
, 1)) {
3057 ifnet_lock_shared(ifp
);
3058 if (ifp
->if_delegated
.ifp
!= NULL
) {
3059 ifnet_reference(ifp
->if_delegated
.ifp
);
3061 *pdelegated_ifp
= ifp
->if_delegated
.ifp
;
3062 ifnet_lock_done(ifp
);
3064 /* Release the io ref count */
3065 ifnet_decr_iorefcnt(ifp
);
3071 ifnet_get_keepalive_offload_frames(ifnet_t ifp
,
3072 struct ifnet_keepalive_offload_frame
*frames_array
,
3073 u_int32_t frames_array_count
, size_t frame_data_offset
,
3074 u_int32_t
*used_frames_count
)
3078 if (frames_array
== NULL
|| used_frames_count
== NULL
||
3079 frame_data_offset
>= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE
) {
3083 /* frame_data_offset should be 32-bit aligned */
3084 if (P2ROUNDUP(frame_data_offset
, sizeof(u_int32_t
)) !=
3085 frame_data_offset
) {
3089 *used_frames_count
= 0;
3090 if (frames_array_count
== 0) {
3094 /* Keep-alive offload not required for CLAT interface */
3095 if (IS_INTF_CLAT46(ifp
)) {
3099 for (i
= 0; i
< frames_array_count
; i
++) {
3100 struct ifnet_keepalive_offload_frame
*frame
= frames_array
+ i
;
3102 bzero(frame
, sizeof(struct ifnet_keepalive_offload_frame
));
3105 /* First collect IPsec related keep-alive frames */
3106 *used_frames_count
= key_fill_offload_frames_for_savs(ifp
,
3107 frames_array
, frames_array_count
, frame_data_offset
);
3109 /* If there is more room, collect other UDP keep-alive frames */
3110 if (*used_frames_count
< frames_array_count
) {
3111 udp_fill_keepalive_offload_frames(ifp
, frames_array
,
3112 frames_array_count
, frame_data_offset
,
3116 /* If there is more room, collect other TCP keep-alive frames */
3117 if (*used_frames_count
< frames_array_count
) {
3118 tcp_fill_keepalive_offload_frames(ifp
, frames_array
,
3119 frames_array_count
, frame_data_offset
,
3123 VERIFY(*used_frames_count
<= frames_array_count
);
3129 ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp
,
3130 struct ifnet_keepalive_offload_frame
*frame
)
3134 if (ifp
== NULL
|| frame
== NULL
) {
3138 if (frame
->type
!= IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP
) {
3141 if (frame
->ether_type
!= IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4
&&
3142 frame
->ether_type
!= IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6
) {
3145 if (frame
->local_port
== 0 || frame
->remote_port
== 0) {
3149 error
= tcp_notify_kao_timeout(ifp
, frame
);
3155 ifnet_link_status_report(ifnet_t ifp
, const void *buffer
,
3158 struct if_link_status
*ifsr
;
3161 if (ifp
== NULL
|| buffer
== NULL
|| buffer_len
== 0) {
3165 ifnet_lock_shared(ifp
);
3168 * Make sure that the interface is attached but there is no need
3169 * to take a reference because this call is coming from the driver.
3171 if (!ifnet_is_attached(ifp
, 0)) {
3172 ifnet_lock_done(ifp
);
3176 lck_rw_lock_exclusive(&ifp
->if_link_status_lock
);
3179 * If this is the first status report then allocate memory
3182 if (ifp
->if_link_status
== NULL
) {
3183 MALLOC(ifp
->if_link_status
, struct if_link_status
*,
3184 sizeof(struct if_link_status
), M_TEMP
, M_ZERO
);
3185 if (ifp
->if_link_status
== NULL
) {
3191 ifsr
= __DECONST(struct if_link_status
*, buffer
);
3193 if (ifp
->if_type
== IFT_CELLULAR
) {
3194 struct if_cellular_status_v1
*if_cell_sr
, *new_cell_sr
;
3196 * Currently we have a single version -- if it does
3197 * not match, just return.
3199 if (ifsr
->ifsr_version
!=
3200 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION
) {
3205 if (ifsr
->ifsr_len
!= sizeof(*if_cell_sr
)) {
3211 &ifp
->if_link_status
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
3212 new_cell_sr
= &ifsr
->ifsr_u
.ifsr_cell
.if_cell_u
.if_status_v1
;
3213 /* Check if we need to act on any new notifications */
3214 if ((new_cell_sr
->valid_bitmask
&
3215 IF_CELL_UL_MSS_RECOMMENDED_VALID
) &&
3216 new_cell_sr
->mss_recommended
!=
3217 if_cell_sr
->mss_recommended
) {
3218 atomic_bitset_32(&tcbinfo
.ipi_flags
,
3219 INPCBINFO_UPDATE_MSS
);
3220 inpcb_timer_sched(&tcbinfo
, INPCB_TIMER_FAST
);
3222 necp_update_all_clients();
3226 /* Finally copy the new information */
3227 ifp
->if_link_status
->ifsr_version
= ifsr
->ifsr_version
;
3228 ifp
->if_link_status
->ifsr_len
= ifsr
->ifsr_len
;
3229 if_cell_sr
->valid_bitmask
= 0;
3230 bcopy(new_cell_sr
, if_cell_sr
, sizeof(*if_cell_sr
));
3231 } else if (IFNET_IS_WIFI(ifp
)) {
3232 struct if_wifi_status_v1
*if_wifi_sr
, *new_wifi_sr
;
3235 if (ifsr
->ifsr_version
!=
3236 IF_WIFI_STATUS_REPORT_CURRENT_VERSION
) {
3241 if (ifsr
->ifsr_len
!= sizeof(*if_wifi_sr
)) {
3247 &ifp
->if_link_status
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
3249 &ifsr
->ifsr_u
.ifsr_wifi
.if_wifi_u
.if_status_v1
;
3250 ifp
->if_link_status
->ifsr_version
= ifsr
->ifsr_version
;
3251 ifp
->if_link_status
->ifsr_len
= ifsr
->ifsr_len
;
3252 if_wifi_sr
->valid_bitmask
= 0;
3253 bcopy(new_wifi_sr
, if_wifi_sr
, sizeof(*if_wifi_sr
));
3256 * Update the bandwidth values if we got recent values
3257 * reported through the other KPI.
3259 if (!(new_wifi_sr
->valid_bitmask
&
3260 IF_WIFI_UL_MAX_BANDWIDTH_VALID
) &&
3261 ifp
->if_output_bw
.max_bw
> 0) {
3262 if_wifi_sr
->valid_bitmask
|=
3263 IF_WIFI_UL_MAX_BANDWIDTH_VALID
;
3264 if_wifi_sr
->ul_max_bandwidth
=
3265 ifp
->if_output_bw
.max_bw
;
3267 if (!(new_wifi_sr
->valid_bitmask
&
3268 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
) &&
3269 ifp
->if_output_bw
.eff_bw
> 0) {
3270 if_wifi_sr
->valid_bitmask
|=
3271 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID
;
3272 if_wifi_sr
->ul_effective_bandwidth
=
3273 ifp
->if_output_bw
.eff_bw
;
3275 if (!(new_wifi_sr
->valid_bitmask
&
3276 IF_WIFI_DL_MAX_BANDWIDTH_VALID
) &&
3277 ifp
->if_input_bw
.max_bw
> 0) {
3278 if_wifi_sr
->valid_bitmask
|=
3279 IF_WIFI_DL_MAX_BANDWIDTH_VALID
;
3280 if_wifi_sr
->dl_max_bandwidth
=
3281 ifp
->if_input_bw
.max_bw
;
3283 if (!(new_wifi_sr
->valid_bitmask
&
3284 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
) &&
3285 ifp
->if_input_bw
.eff_bw
> 0) {
3286 if_wifi_sr
->valid_bitmask
|=
3287 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID
;
3288 if_wifi_sr
->dl_effective_bandwidth
=
3289 ifp
->if_input_bw
.eff_bw
;
3294 lck_rw_done(&ifp
->if_link_status_lock
);
3295 ifnet_lock_done(ifp
);
3299 /*************************************************************************/
3300 /* Fastlane QoS Ca */
3301 /*************************************************************************/
3304 ifnet_set_fastlane_capable(ifnet_t interface
, boolean_t capable
)
3306 if (interface
== NULL
) {
3310 if_set_qosmarking_mode(interface
,
3311 capable
? IFRTYPE_QOSMARKING_FASTLANE
: IFRTYPE_QOSMARKING_MODE_NONE
);
3317 ifnet_get_fastlane_capable(ifnet_t interface
, boolean_t
*capable
)
3319 if (interface
== NULL
|| capable
== NULL
) {
3322 if (interface
->if_qosmarking_mode
== IFRTYPE_QOSMARKING_FASTLANE
) {
3331 ifnet_get_unsent_bytes(ifnet_t interface
, int64_t *unsent_bytes
)
3335 if (interface
== NULL
|| unsent_bytes
== NULL
) {
3339 bytes
= *unsent_bytes
= 0;
3341 if (!IF_FULLY_ATTACHED(interface
)) {
3345 bytes
= interface
->if_sndbyte_unsent
;
3347 if (interface
->if_eflags
& IFEF_TXSTART
) {
3348 bytes
+= IFCQ_BYTES(&interface
->if_snd
);
3350 *unsent_bytes
= bytes
;
3356 ifnet_get_buffer_status(const ifnet_t ifp
, ifnet_buffer_status_t
*buf_status
)
3358 if (ifp
== NULL
|| buf_status
== NULL
) {
3362 bzero(buf_status
, sizeof(*buf_status
));
3364 if (!IF_FULLY_ATTACHED(ifp
)) {
3368 if (ifp
->if_eflags
& IFEF_TXSTART
) {
3369 buf_status
->buf_interface
= IFCQ_BYTES(&ifp
->if_snd
);
3372 buf_status
->buf_sndbuf
= ((buf_status
->buf_interface
!= 0) ||
3373 (ifp
->if_sndbyte_unsent
!= 0)) ? 1 : 0;
3379 ifnet_normalise_unsent_data(void)
3383 ifnet_head_lock_shared();
3384 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
3385 ifnet_lock_exclusive(ifp
);
3386 if (!IF_FULLY_ATTACHED(ifp
)) {
3387 ifnet_lock_done(ifp
);
3390 if (!(ifp
->if_eflags
& IFEF_TXSTART
)) {
3391 ifnet_lock_done(ifp
);
3395 if (ifp
->if_sndbyte_total
> 0 ||
3396 IFCQ_BYTES(&ifp
->if_snd
) > 0) {
3397 ifp
->if_unsent_data_cnt
++;
3400 ifnet_lock_done(ifp
);
3406 ifnet_set_low_power_mode(ifnet_t ifp
, boolean_t on
)
3410 error
= if_set_low_power(ifp
, on
);
3416 ifnet_get_low_power_mode(ifnet_t ifp
, boolean_t
*on
)
3418 if (ifp
== NULL
|| on
== NULL
) {
3422 *on
= !!(ifp
->if_xflags
& IFXF_LOW_POWER
);
3427 /*************************************************************************/
3428 /* Interface advisory notifications */
3429 /*************************************************************************/
3431 ifnet_interface_advisory_report(ifnet_t ifp
,
3432 const struct ifnet_interface_advisory
*advisory
)
3436 #pragma unused(advisory)