2 * Copyright (c) 2004-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include "kpi_interface.h"
31 #include <sys/queue.h>
32 #include <sys/param.h> /* for definition of NULL */
33 #include <kern/debug.h> /* for panic */
34 #include <sys/errno.h>
35 #include <sys/socket.h>
36 #include <sys/kern_event.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/kpi_mbuf.h>
40 #include <sys/mcache.h>
41 #include <sys/protosw.h>
42 #include <sys/syslog.h>
43 #include <net/if_var.h>
44 #include <net/if_dl.h>
46 #include <net/if_types.h>
47 #include <net/if_dl.h>
48 #include <net/if_arp.h>
49 #include <net/if_llreach.h>
50 #include <net/if_ether.h>
51 #include <net/route.h>
52 #include <libkern/libkern.h>
53 #include <libkern/OSAtomic.h>
54 #include <kern/locks.h>
55 #include <kern/clock.h>
56 #include <sys/sockio.h>
58 #include <sys/sysctl.h>
60 #include <netinet/ip_var.h>
61 #include <netinet/udp.h>
62 #include <netinet/udp_var.h>
63 #include <netinet/tcp.h>
64 #include <netinet/tcp_var.h>
65 #include <netinet/in_pcb.h>
67 #include <netinet/igmp_var.h>
70 #include <netinet6/mld6_var.h>
73 #include "net/net_str_id.h"
76 #include <sys/kauth.h>
77 #include <security/mac_framework.h>
80 #define TOUCHLASTCHANGE(__if_lastchange) { \
81 (__if_lastchange)->tv_sec = net_uptime(); \
82 (__if_lastchange)->tv_usec = 0; \
85 static errno_t
ifnet_defrouter_llreachinfo(ifnet_t
, int,
86 struct ifnet_llreach_info
*);
87 static void ifnet_kpi_free(ifnet_t
);
88 static errno_t
ifnet_list_get_common(ifnet_family_t
, boolean_t
, ifnet_t
**,
90 static errno_t
ifnet_set_lladdr_internal(ifnet_t
, const void *, size_t,
92 static errno_t
ifnet_awdl_check_eflags(ifnet_t
, u_int32_t
*, u_int32_t
*);
95 * Temporary work around until we have real reference counting
97 * We keep the bits about calling dlil_if_release (which should be
98 * called recycle) transparent by calling it from our if_free function
99 * pointer. We have to keep the client's original detach function
100 * somewhere so we can call it.
103 ifnet_kpi_free(ifnet_t ifp
)
105 ifnet_detached_func detach_func
= ifp
->if_kpi_storage
;
107 if (detach_func
!= NULL
)
110 if (ifp
->if_broadcast
.length
> sizeof (ifp
->if_broadcast
.u
.buffer
)) {
111 FREE(ifp
->if_broadcast
.u
.ptr
, M_IFADDR
);
112 ifp
->if_broadcast
.u
.ptr
= NULL
;
115 dlil_if_release(ifp
);
119 ifnet_allocate(const struct ifnet_init_params
*init
, ifnet_t
*interface
)
121 struct ifnet_init_eparams einit
;
123 bzero(&einit
, sizeof (einit
));
125 einit
.ver
= IFNET_INIT_CURRENT_VERSION
;
126 einit
.len
= sizeof (einit
);
127 einit
.flags
= IFNET_INIT_LEGACY
;
128 einit
.uniqueid
= init
->uniqueid
;
129 einit
.uniqueid_len
= init
->uniqueid_len
;
130 einit
.name
= init
->name
;
131 einit
.unit
= init
->unit
;
132 einit
.family
= init
->family
;
133 einit
.type
= init
->type
;
134 einit
.output
= init
->output
;
135 einit
.demux
= init
->demux
;
136 einit
.add_proto
= init
->add_proto
;
137 einit
.del_proto
= init
->del_proto
;
138 einit
.check_multi
= init
->check_multi
;
139 einit
.framer
= init
->framer
;
140 einit
.softc
= init
->softc
;
141 einit
.ioctl
= init
->ioctl
;
142 einit
.set_bpf_tap
= init
->set_bpf_tap
;
143 einit
.detach
= init
->detach
;
144 einit
.event
= init
->event
;
145 einit
.broadcast_addr
= init
->broadcast_addr
;
146 einit
.broadcast_len
= init
->broadcast_len
;
148 return (ifnet_allocate_extended(&einit
, interface
));
152 ifnet_allocate_extended(const struct ifnet_init_eparams
*einit0
,
155 struct ifnet_init_eparams einit
;
156 struct ifnet
*ifp
= NULL
;
161 if (einit
.ver
!= IFNET_INIT_CURRENT_VERSION
||
162 einit
.len
< sizeof (einit
))
165 if (einit
.family
== 0 || einit
.name
== NULL
||
166 strlen(einit
.name
) >= IFNAMSIZ
||
167 (einit
.type
& 0xFFFFFF00) != 0 || einit
.type
== 0)
170 if (einit
.flags
& IFNET_INIT_LEGACY
) {
171 if (einit
.output
== NULL
|| einit
.flags
!= IFNET_INIT_LEGACY
)
174 einit
.pre_enqueue
= NULL
;
176 einit
.output_ctl
= NULL
;
177 einit
.output_sched_model
= IFNET_SCHED_MODEL_NORMAL
;
178 einit
.input_poll
= NULL
;
179 einit
.input_ctl
= NULL
;
181 if (einit
.start
== NULL
)
185 if (einit
.output_sched_model
>= IFNET_SCHED_MODEL_MAX
)
188 if (einit
.flags
& IFNET_INIT_INPUT_POLL
) {
189 if (einit
.input_poll
== NULL
|| einit
.input_ctl
== NULL
)
192 einit
.input_poll
= NULL
;
193 einit
.input_ctl
= NULL
;
197 error
= dlil_if_acquire(einit
.family
, einit
.uniqueid
,
198 einit
.uniqueid_len
, &ifp
);
204 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
205 * to point to storage of at least IFNAMSIZ bytes. It is safe
208 strlcpy(__DECONST(char *, ifp
->if_name
), einit
.name
, IFNAMSIZ
);
209 ifp
->if_type
= einit
.type
;
210 ifp
->if_family
= einit
.family
;
211 ifp
->if_subfamily
= einit
.subfamily
;
212 ifp
->if_unit
= einit
.unit
;
213 ifp
->if_output
= einit
.output
;
214 ifp
->if_pre_enqueue
= einit
.pre_enqueue
;
215 ifp
->if_start
= einit
.start
;
216 ifp
->if_output_ctl
= einit
.output_ctl
;
217 ifp
->if_output_sched_model
= einit
.output_sched_model
;
218 ifp
->if_output_bw
.eff_bw
= einit
.output_bw
;
219 ifp
->if_output_bw
.max_bw
= einit
.output_bw_max
;
220 ifp
->if_output_lt
.eff_lt
= einit
.output_lt
;
221 ifp
->if_output_lt
.max_lt
= einit
.output_lt_max
;
222 ifp
->if_input_poll
= einit
.input_poll
;
223 ifp
->if_input_ctl
= einit
.input_ctl
;
224 ifp
->if_input_bw
.eff_bw
= einit
.input_bw
;
225 ifp
->if_input_bw
.max_bw
= einit
.input_bw_max
;
226 ifp
->if_input_lt
.eff_lt
= einit
.input_lt
;
227 ifp
->if_input_lt
.max_lt
= einit
.input_lt_max
;
228 ifp
->if_demux
= einit
.demux
;
229 ifp
->if_add_proto
= einit
.add_proto
;
230 ifp
->if_del_proto
= einit
.del_proto
;
231 ifp
->if_check_multi
= einit
.check_multi
;
232 ifp
->if_framer_legacy
= einit
.framer
;
233 ifp
->if_framer
= einit
.framer_extended
;
234 ifp
->if_softc
= einit
.softc
;
235 ifp
->if_ioctl
= einit
.ioctl
;
236 ifp
->if_set_bpf_tap
= einit
.set_bpf_tap
;
237 ifp
->if_free
= ifnet_kpi_free
;
238 ifp
->if_event
= einit
.event
;
239 ifp
->if_kpi_storage
= einit
.detach
;
241 /* Initialize external name (name + unit) */
242 snprintf(__DECONST(char *, ifp
->if_xname
), IFXNAMSIZ
,
243 "%s%d", ifp
->if_name
, ifp
->if_unit
);
246 * On embedded, framer() is already in the extended form;
247 * we simply use it as is, unless the caller specifies
248 * framer_extended() which will then override it.
250 * On non-embedded, framer() has long been exposed as part
251 * of the public KPI, and therefore its signature must
252 * remain the same (without the pre- and postpend length
253 * parameters.) We special case ether_frameout, such that
254 * it gets mapped to its extended variant. All other cases
255 * utilize the stub routine which will simply return zeroes
256 * for those new parameters.
258 * Internally, DLIL will only use the extended callback
259 * variant which is represented by if_framer.
261 if (ifp
->if_framer
== NULL
&& ifp
->if_framer_legacy
!= NULL
) {
262 if (ifp
->if_framer_legacy
== ether_frameout
)
263 ifp
->if_framer
= ether_frameout_extended
;
265 ifp
->if_framer
= ifnet_framer_stub
;
268 if (ifp
->if_output_bw
.eff_bw
> ifp
->if_output_bw
.max_bw
)
269 ifp
->if_output_bw
.max_bw
= ifp
->if_output_bw
.eff_bw
;
270 else if (ifp
->if_output_bw
.eff_bw
== 0)
271 ifp
->if_output_bw
.eff_bw
= ifp
->if_output_bw
.max_bw
;
273 if (ifp
->if_input_bw
.eff_bw
> ifp
->if_input_bw
.max_bw
)
274 ifp
->if_input_bw
.max_bw
= ifp
->if_input_bw
.eff_bw
;
275 else if (ifp
->if_input_bw
.eff_bw
== 0)
276 ifp
->if_input_bw
.eff_bw
= ifp
->if_input_bw
.max_bw
;
278 if (ifp
->if_output_bw
.max_bw
== 0)
279 ifp
->if_output_bw
= ifp
->if_input_bw
;
280 else if (ifp
->if_input_bw
.max_bw
== 0)
281 ifp
->if_input_bw
= ifp
->if_output_bw
;
283 /* Pin if_baudrate to 32 bits */
284 br
= MAX(ifp
->if_output_bw
.max_bw
, ifp
->if_input_bw
.max_bw
);
286 ifp
->if_baudrate
= (br
> 0xFFFFFFFF) ? 0xFFFFFFFF : br
;
288 if (ifp
->if_output_lt
.eff_lt
> ifp
->if_output_lt
.max_lt
)
289 ifp
->if_output_lt
.max_lt
= ifp
->if_output_lt
.eff_lt
;
290 else if (ifp
->if_output_lt
.eff_lt
== 0)
291 ifp
->if_output_lt
.eff_lt
= ifp
->if_output_lt
.max_lt
;
293 if (ifp
->if_input_lt
.eff_lt
> ifp
->if_input_lt
.max_lt
)
294 ifp
->if_input_lt
.max_lt
= ifp
->if_input_lt
.eff_lt
;
295 else if (ifp
->if_input_lt
.eff_lt
== 0)
296 ifp
->if_input_lt
.eff_lt
= ifp
->if_input_lt
.max_lt
;
298 if (ifp
->if_output_lt
.max_lt
== 0)
299 ifp
->if_output_lt
= ifp
->if_input_lt
;
300 else if (ifp
->if_input_lt
.max_lt
== 0)
301 ifp
->if_input_lt
= ifp
->if_output_lt
;
303 if (ifp
->if_ioctl
== NULL
)
304 ifp
->if_ioctl
= ifp_if_ioctl
;
306 if (ifp
->if_start
!= NULL
) {
307 ifp
->if_eflags
|= IFEF_TXSTART
;
308 if (ifp
->if_pre_enqueue
== NULL
)
309 ifp
->if_pre_enqueue
= ifnet_enqueue
;
310 ifp
->if_output
= ifp
->if_pre_enqueue
;
312 ifp
->if_eflags
&= ~IFEF_TXSTART
;
315 if (ifp
->if_input_poll
!= NULL
)
316 ifp
->if_eflags
|= IFEF_RXPOLL
;
318 ifp
->if_eflags
&= ~IFEF_RXPOLL
;
320 VERIFY(!(einit
.flags
& IFNET_INIT_LEGACY
) ||
321 (ifp
->if_pre_enqueue
== NULL
&& ifp
->if_start
== NULL
&&
322 ifp
->if_output_ctl
== NULL
&& ifp
->if_input_poll
== NULL
&&
323 ifp
->if_input_ctl
== NULL
));
324 VERIFY(!(einit
.flags
& IFNET_INIT_INPUT_POLL
) ||
325 (ifp
->if_input_poll
!= NULL
&& ifp
->if_input_ctl
!= NULL
));
327 if (einit
.broadcast_len
&& einit
.broadcast_addr
) {
328 if (einit
.broadcast_len
>
329 sizeof (ifp
->if_broadcast
.u
.buffer
)) {
330 MALLOC(ifp
->if_broadcast
.u
.ptr
, u_char
*,
331 einit
.broadcast_len
, M_IFADDR
, M_NOWAIT
);
332 if (ifp
->if_broadcast
.u
.ptr
== NULL
) {
335 bcopy(einit
.broadcast_addr
,
336 ifp
->if_broadcast
.u
.ptr
,
337 einit
.broadcast_len
);
340 bcopy(einit
.broadcast_addr
,
341 ifp
->if_broadcast
.u
.buffer
,
342 einit
.broadcast_len
);
344 ifp
->if_broadcast
.length
= einit
.broadcast_len
;
346 bzero(&ifp
->if_broadcast
, sizeof (ifp
->if_broadcast
));
349 IFCQ_TARGET_QDELAY(&ifp
->if_snd
) =
350 einit
.output_target_qdelay
;
351 IFCQ_MAXLEN(&ifp
->if_snd
) = einit
.sndq_maxlen
;
355 // temporary - this should be done in dlil_if_acquire
356 ifnet_reference(ifp
);
358 dlil_if_release(ifp
);
364 * Note: We should do something here to indicate that we haven't been
365 * attached yet. By doing so, we can catch the case in ifnet_release
366 * where the reference count reaches zero and call the recycle
367 * function. If the interface is attached, the interface will be
368 * recycled when the interface's if_free function is called. If the
369 * interface is never attached, the if_free function will never be
370 * called and the interface will never be recycled.
377 ifnet_reference(ifnet_t ifp
)
379 return (dlil_if_ref(ifp
));
383 ifnet_release(ifnet_t ifp
)
385 return (dlil_if_free(ifp
));
389 ifnet_interface_family_find(const char *module_string
,
390 ifnet_family_t
*family_id
)
392 if (module_string
== NULL
|| family_id
== NULL
)
395 return (net_str_id_find_internal(module_string
, family_id
,
400 ifnet_softc(ifnet_t interface
)
402 return ((interface
== NULL
) ? NULL
: interface
->if_softc
);
406 ifnet_name(ifnet_t interface
)
408 return ((interface
== NULL
) ? NULL
: interface
->if_name
);
412 ifnet_family(ifnet_t interface
)
414 return ((interface
== NULL
) ? 0 : interface
->if_family
);
418 ifnet_subfamily(ifnet_t interface
)
420 return ((interface
== NULL
) ? 0 : interface
->if_subfamily
);
424 ifnet_unit(ifnet_t interface
)
426 return ((interface
== NULL
) ? (u_int32_t
)0xffffffff :
427 (u_int32_t
)interface
->if_unit
);
431 ifnet_index(ifnet_t interface
)
433 return ((interface
== NULL
) ? (u_int32_t
)0xffffffff :
434 interface
->if_index
);
438 ifnet_set_flags(ifnet_t interface
, u_int16_t new_flags
, u_int16_t mask
)
442 if (interface
== NULL
)
445 ifnet_lock_exclusive(interface
);
447 /* If we are modifying the up/down state, call if_updown */
448 if ((mask
& IFF_UP
) != 0) {
449 if_updown(interface
, (new_flags
& IFF_UP
) == IFF_UP
);
452 old_flags
= interface
->if_flags
;
453 interface
->if_flags
= (new_flags
& mask
) | (interface
->if_flags
& ~mask
);
454 /* If we are modifying the multicast flag, set/unset the silent flag */
455 if ((old_flags
& IFF_MULTICAST
) !=
456 (interface
->if_flags
& IFF_MULTICAST
)) {
458 if (IGMP_IFINFO(interface
) != NULL
)
459 igmp_initsilent(interface
, IGMP_IFINFO(interface
));
462 if (MLD_IFINFO(interface
) != NULL
)
463 mld6_initsilent(interface
, MLD_IFINFO(interface
));
467 ifnet_lock_done(interface
);
473 ifnet_flags(ifnet_t interface
)
475 return ((interface
== NULL
) ? 0 : interface
->if_flags
);
479 * This routine ensures the following:
481 * If IFEF_AWDL is set by the caller, also set the rest of flags as
482 * defined in IFEF_AWDL_MASK.
484 * If IFEF_AWDL has been set on the interface and the caller attempts
485 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
488 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
491 * All other flags not associated with AWDL are not affected.
493 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
496 ifnet_awdl_check_eflags(ifnet_t ifp
, u_int32_t
*new_eflags
, u_int32_t
*mask
)
500 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_EXCLUSIVE
);
502 eflags
= (*new_eflags
& *mask
) | (ifp
->if_eflags
& ~(*mask
));
504 if (ifp
->if_eflags
& IFEF_AWDL
) {
505 if (eflags
& IFEF_AWDL
) {
506 if ((eflags
& IFEF_AWDL_MASK
) != IFEF_AWDL_MASK
)
509 *new_eflags
&= ~IFEF_AWDL_MASK
;
510 *mask
|= IFEF_AWDL_MASK
;
512 } else if (eflags
& IFEF_AWDL
) {
513 *new_eflags
|= IFEF_AWDL_MASK
;
514 *mask
|= IFEF_AWDL_MASK
;
515 } else if (eflags
& IFEF_AWDL_RESTRICTED
&&
516 !(ifp
->if_eflags
& IFEF_AWDL
))
523 ifnet_set_eflags(ifnet_t interface
, u_int32_t new_flags
, u_int32_t mask
)
526 struct kev_msg ev_msg
;
527 struct net_event_data ev_data
;
529 if (interface
== NULL
)
532 bzero(&ev_msg
, sizeof(ev_msg
));
533 ifnet_lock_exclusive(interface
);
535 * Sanity checks for IFEF_AWDL and its related flags.
537 if (ifnet_awdl_check_eflags(interface
, &new_flags
, &mask
) != 0) {
538 ifnet_lock_done(interface
);
541 oeflags
= interface
->if_eflags
;
542 interface
->if_eflags
=
543 (new_flags
& mask
) | (interface
->if_eflags
& ~mask
);
544 ifnet_lock_done(interface
);
545 if (interface
->if_eflags
& IFEF_AWDL_RESTRICTED
&&
546 !(oeflags
& IFEF_AWDL_RESTRICTED
)) {
547 ev_msg
.event_code
= KEV_DL_AWDL_RESTRICTED
;
549 * The interface is now restricted to applications that have
551 * The check for the entitlement will be done in the data
552 * path, so we don't have to do anything here.
554 } else if (oeflags
& IFEF_AWDL_RESTRICTED
&&
555 !(interface
->if_eflags
& IFEF_AWDL_RESTRICTED
))
556 ev_msg
.event_code
= KEV_DL_AWDL_UNRESTRICTED
;
558 * Notify configd so that it has a chance to perform better
559 * reachability detection.
561 if (ev_msg
.event_code
) {
562 bzero(&ev_data
, sizeof(ev_data
));
563 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
564 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
565 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
566 strlcpy(ev_data
.if_name
, interface
->if_name
, IFNAMSIZ
);
567 ev_data
.if_family
= interface
->if_family
;
568 ev_data
.if_unit
= interface
->if_unit
;
569 ev_msg
.dv
[0].data_length
= sizeof(struct net_event_data
);
570 ev_msg
.dv
[0].data_ptr
= &ev_data
;
571 ev_msg
.dv
[1].data_length
= 0;
572 kev_post_msg(&ev_msg
);
579 ifnet_eflags(ifnet_t interface
)
581 return ((interface
== NULL
) ? 0 : interface
->if_eflags
);
585 ifnet_set_idle_flags_locked(ifnet_t ifp
, u_int32_t new_flags
, u_int32_t mask
)
592 lck_mtx_assert(rnh_lock
, LCK_MTX_ASSERT_OWNED
);
593 ifnet_lock_assert(ifp
, IFNET_LCK_ASSERT_EXCLUSIVE
);
596 * If this is called prior to ifnet attach, the actual work will
597 * be done at attach time. Otherwise, if it is called after
598 * ifnet detach, then it is a no-op.
600 if (!ifnet_is_attached(ifp
, 0)) {
601 ifp
->if_idle_new_flags
= new_flags
;
602 ifp
->if_idle_new_flags_mask
= mask
;
605 ifp
->if_idle_new_flags
= ifp
->if_idle_new_flags_mask
= 0;
608 before
= ifp
->if_idle_flags
;
609 ifp
->if_idle_flags
= (new_flags
& mask
) | (ifp
->if_idle_flags
& ~mask
);
610 after
= ifp
->if_idle_flags
;
612 if ((after
- before
) < 0 && ifp
->if_idle_flags
== 0 &&
613 ifp
->if_want_aggressive_drain
!= 0) {
614 ifp
->if_want_aggressive_drain
= 0;
615 if (ifnet_aggressive_drainers
== 0)
616 panic("%s: ifp=%p negative aggdrain!", __func__
, ifp
);
617 } else if ((after
- before
) > 0 && ifp
->if_want_aggressive_drain
== 0) {
618 ifp
->if_want_aggressive_drain
++;
619 if (++ifnet_aggressive_drainers
== 0)
620 panic("%s: ifp=%p wraparound aggdrain!", __func__
, ifp
);
627 ifnet_set_idle_flags(ifnet_t ifp
, u_int32_t new_flags
, u_int32_t mask
)
631 lck_mtx_lock(rnh_lock
);
632 ifnet_lock_exclusive(ifp
);
633 err
= ifnet_set_idle_flags_locked(ifp
, new_flags
, mask
);
634 ifnet_lock_done(ifp
);
635 lck_mtx_unlock(rnh_lock
);
641 ifnet_idle_flags(ifnet_t ifp
)
643 return ((ifp
== NULL
) ? 0 : ifp
->if_idle_flags
);
647 ifnet_set_link_quality(ifnet_t ifp
, int quality
)
651 if (ifp
== NULL
|| quality
< IFNET_LQM_MIN
|| quality
> IFNET_LQM_MAX
) {
656 if (!ifnet_is_attached(ifp
, 0)) {
661 if_lqm_update(ifp
, quality
);
668 ifnet_link_quality(ifnet_t ifp
)
673 return (IFNET_LQM_THRESH_OFF
);
675 ifnet_lock_shared(ifp
);
677 ifnet_lock_done(ifp
);
683 ifnet_defrouter_llreachinfo(ifnet_t ifp
, int af
,
684 struct ifnet_llreach_info
*iflri
)
686 if (ifp
== NULL
|| iflri
== NULL
)
689 VERIFY(af
== AF_INET
|| af
== AF_INET6
);
691 return (ifnet_llreach_get_defrouter(ifp
, af
, iflri
));
695 ifnet_inet_defrouter_llreachinfo(ifnet_t ifp
, struct ifnet_llreach_info
*iflri
)
697 return (ifnet_defrouter_llreachinfo(ifp
, AF_INET
, iflri
));
701 ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp
, struct ifnet_llreach_info
*iflri
)
703 return (ifnet_defrouter_llreachinfo(ifp
, AF_INET6
, iflri
));
707 ifnet_set_capabilities_supported(ifnet_t ifp
, u_int32_t new_caps
,
716 ifnet_lock_exclusive(ifp
);
717 tmp
= (new_caps
& mask
) | (ifp
->if_capabilities
& ~mask
);
718 if ((tmp
& ~IFCAP_VALID
))
721 ifp
->if_capabilities
= tmp
;
722 ifnet_lock_done(ifp
);
728 ifnet_capabilities_supported(ifnet_t ifp
)
730 return ((ifp
== NULL
) ? 0 : ifp
->if_capabilities
);
735 ifnet_set_capabilities_enabled(ifnet_t ifp
, u_int32_t new_caps
,
740 struct kev_msg ev_msg
;
741 struct net_event_data ev_data
;
746 ifnet_lock_exclusive(ifp
);
747 tmp
= (new_caps
& mask
) | (ifp
->if_capenable
& ~mask
);
748 if ((tmp
& ~IFCAP_VALID
) || (tmp
& ~ifp
->if_capabilities
))
751 ifp
->if_capenable
= tmp
;
752 ifnet_lock_done(ifp
);
754 /* Notify application of the change */
755 bzero(&ev_data
, sizeof (struct net_event_data
));
756 bzero(&ev_msg
, sizeof (struct kev_msg
));
757 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
758 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
759 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
761 ev_msg
.event_code
= KEV_DL_IFCAP_CHANGED
;
762 strlcpy(&ev_data
.if_name
[0], ifp
->if_name
, IFNAMSIZ
);
763 ev_data
.if_family
= ifp
->if_family
;
764 ev_data
.if_unit
= (u_int32_t
)ifp
->if_unit
;
765 ev_msg
.dv
[0].data_length
= sizeof (struct net_event_data
);
766 ev_msg
.dv
[0].data_ptr
= &ev_data
;
767 ev_msg
.dv
[1].data_length
= 0;
768 kev_post_msg(&ev_msg
);
774 ifnet_capabilities_enabled(ifnet_t ifp
)
776 return ((ifp
== NULL
) ? 0 : ifp
->if_capenable
);
779 static const ifnet_offload_t offload_mask
=
780 (IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
| IFNET_CSUM_FRAGMENT
|
781 IFNET_IP_FRAGMENT
| IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
|
782 IFNET_IPV6_FRAGMENT
| IFNET_CSUM_PARTIAL
| IFNET_VLAN_TAGGING
|
783 IFNET_VLAN_MTU
| IFNET_MULTIPAGES
| IFNET_TSO_IPV4
| IFNET_TSO_IPV6
|
786 static const ifnet_offload_t any_offload_csum
=
787 (IFNET_CSUM_IP
| IFNET_CSUM_TCP
| IFNET_CSUM_UDP
| IFNET_CSUM_FRAGMENT
|
788 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
| IFNET_CSUM_PARTIAL
);
791 ifnet_set_offload(ifnet_t interface
, ifnet_offload_t offload
)
793 u_int32_t ifcaps
= 0;
795 if (interface
== NULL
)
798 ifnet_lock_exclusive(interface
);
799 interface
->if_hwassist
= (offload
& offload_mask
);
801 * Hardware capable of partial checksum offload is
802 * flexible enough to handle any transports utilizing
803 * Internet Checksumming. Include those transports
804 * here, and leave the final decision to IP.
806 if (interface
->if_hwassist
& IFNET_CSUM_PARTIAL
) {
807 interface
->if_hwassist
|= (IFNET_CSUM_TCP
| IFNET_CSUM_UDP
|
808 IFNET_CSUM_TCPIPV6
| IFNET_CSUM_UDPIPV6
);
811 log(LOG_DEBUG
, "%s: set offload flags=%b\n",
813 interface
->if_hwassist
, IFNET_OFFLOADF_BITS
);
815 ifnet_lock_done(interface
);
817 if ((offload
& any_offload_csum
))
818 ifcaps
|= IFCAP_HWCSUM
;
819 if ((offload
& IFNET_TSO_IPV4
))
820 ifcaps
|= IFCAP_TSO4
;
821 if ((offload
& IFNET_TSO_IPV6
))
822 ifcaps
|= IFCAP_TSO6
;
823 if ((offload
& IFNET_VLAN_MTU
))
824 ifcaps
|= IFCAP_VLAN_MTU
;
825 if ((offload
& IFNET_VLAN_TAGGING
))
826 ifcaps
|= IFCAP_VLAN_HWTAGGING
;
827 if ((offload
& IFNET_TX_STATUS
))
828 ifcaps
|= IFNET_TX_STATUS
;
830 (void) ifnet_set_capabilities_supported(interface
, ifcaps
,
832 (void) ifnet_set_capabilities_enabled(interface
, ifcaps
,
840 ifnet_offload(ifnet_t interface
)
842 return ((interface
== NULL
) ?
843 0 : (interface
->if_hwassist
& offload_mask
));
847 ifnet_set_tso_mtu(ifnet_t interface
, sa_family_t family
, u_int32_t mtuLen
)
851 if (interface
== NULL
|| mtuLen
< interface
->if_mtu
)
856 if (interface
->if_hwassist
& IFNET_TSO_IPV4
)
857 interface
->if_tso_v4_mtu
= mtuLen
;
863 if (interface
->if_hwassist
& IFNET_TSO_IPV6
)
864 interface
->if_tso_v6_mtu
= mtuLen
;
870 error
= EPROTONOSUPPORT
;
878 ifnet_get_tso_mtu(ifnet_t interface
, sa_family_t family
, u_int32_t
*mtuLen
)
882 if (interface
== NULL
|| mtuLen
== NULL
)
887 if (interface
->if_hwassist
& IFNET_TSO_IPV4
)
888 *mtuLen
= interface
->if_tso_v4_mtu
;
894 if (interface
->if_hwassist
& IFNET_TSO_IPV6
)
895 *mtuLen
= interface
->if_tso_v6_mtu
;
901 error
= EPROTONOSUPPORT
;
909 ifnet_set_wake_flags(ifnet_t interface
, u_int32_t properties
, u_int32_t mask
)
911 struct kev_msg ev_msg
;
912 struct net_event_data ev_data
;
914 bzero(&ev_data
, sizeof (struct net_event_data
));
915 bzero(&ev_msg
, sizeof (struct kev_msg
));
917 if (interface
== NULL
)
920 /* Do not accept wacky values */
921 if ((properties
& mask
) & ~IF_WAKE_VALID_FLAGS
)
924 ifnet_lock_exclusive(interface
);
926 interface
->if_wake_properties
=
927 (properties
& mask
) | (interface
->if_wake_properties
& ~mask
);
929 ifnet_lock_done(interface
);
931 (void) ifnet_touch_lastchange(interface
);
933 /* Notify application of the change */
934 ev_msg
.vendor_code
= KEV_VENDOR_APPLE
;
935 ev_msg
.kev_class
= KEV_NETWORK_CLASS
;
936 ev_msg
.kev_subclass
= KEV_DL_SUBCLASS
;
938 ev_msg
.event_code
= KEV_DL_WAKEFLAGS_CHANGED
;
939 strlcpy(&ev_data
.if_name
[0], interface
->if_name
, IFNAMSIZ
);
940 ev_data
.if_family
= interface
->if_family
;
941 ev_data
.if_unit
= (u_int32_t
)interface
->if_unit
;
942 ev_msg
.dv
[0].data_length
= sizeof (struct net_event_data
);
943 ev_msg
.dv
[0].data_ptr
= &ev_data
;
944 ev_msg
.dv
[1].data_length
= 0;
945 kev_post_msg(&ev_msg
);
951 ifnet_get_wake_flags(ifnet_t interface
)
953 return ((interface
== NULL
) ? 0 : interface
->if_wake_properties
);
957 * Should MIB data store a copy?
960 ifnet_set_link_mib_data(ifnet_t interface
, void *mibData
, u_int32_t mibLen
)
962 if (interface
== NULL
)
965 ifnet_lock_exclusive(interface
);
966 interface
->if_linkmib
= (void*)mibData
;
967 interface
->if_linkmiblen
= mibLen
;
968 ifnet_lock_done(interface
);
973 ifnet_get_link_mib_data(ifnet_t interface
, void *mibData
, u_int32_t
*mibLen
)
977 if (interface
== NULL
)
980 ifnet_lock_shared(interface
);
981 if (*mibLen
< interface
->if_linkmiblen
)
983 if (result
== 0 && interface
->if_linkmib
== NULL
)
987 *mibLen
= interface
->if_linkmiblen
;
988 bcopy(interface
->if_linkmib
, mibData
, *mibLen
);
990 ifnet_lock_done(interface
);
996 ifnet_get_link_mib_data_length(ifnet_t interface
)
998 return ((interface
== NULL
) ? 0 : interface
->if_linkmiblen
);
1002 ifnet_output(ifnet_t interface
, protocol_family_t protocol_family
,
1003 mbuf_t m
, void *route
, const struct sockaddr
*dest
)
1005 if (interface
== NULL
|| protocol_family
== 0 || m
== NULL
) {
1010 return (dlil_output(interface
, protocol_family
, m
, route
, dest
, 0, NULL
));
1014 ifnet_output_raw(ifnet_t interface
, protocol_family_t protocol_family
, mbuf_t m
)
1016 if (interface
== NULL
|| m
== NULL
) {
1021 return (dlil_output(interface
, protocol_family
, m
, NULL
, NULL
, 1, NULL
));
1025 ifnet_set_mtu(ifnet_t interface
, u_int32_t mtu
)
1027 if (interface
== NULL
)
1030 interface
->if_mtu
= mtu
;
1035 ifnet_mtu(ifnet_t interface
)
1037 return ((interface
== NULL
) ? 0 : interface
->if_mtu
);
1041 ifnet_type(ifnet_t interface
)
1043 return ((interface
== NULL
) ? 0 : interface
->if_data
.ifi_type
);
1047 ifnet_set_addrlen(ifnet_t interface
, u_char addrlen
)
1049 if (interface
== NULL
)
1052 interface
->if_data
.ifi_addrlen
= addrlen
;
1057 ifnet_addrlen(ifnet_t interface
)
1059 return ((interface
== NULL
) ? 0 : interface
->if_data
.ifi_addrlen
);
1063 ifnet_set_hdrlen(ifnet_t interface
, u_char hdrlen
)
1065 if (interface
== NULL
)
1068 interface
->if_data
.ifi_hdrlen
= hdrlen
;
1073 ifnet_hdrlen(ifnet_t interface
)
1075 return ((interface
== NULL
) ? 0 : interface
->if_data
.ifi_hdrlen
);
1079 ifnet_set_metric(ifnet_t interface
, u_int32_t metric
)
1081 if (interface
== NULL
)
1084 interface
->if_data
.ifi_metric
= metric
;
1089 ifnet_metric(ifnet_t interface
)
1091 return ((interface
== NULL
) ? 0 : interface
->if_data
.ifi_metric
);
1095 ifnet_set_baudrate(struct ifnet
*ifp
, u_int64_t baudrate
)
1100 ifp
->if_output_bw
.max_bw
= ifp
->if_input_bw
.max_bw
=
1101 ifp
->if_output_bw
.eff_bw
= ifp
->if_input_bw
.eff_bw
= baudrate
;
1103 /* Pin if_baudrate to 32 bits until we can change the storage size */
1104 ifp
->if_baudrate
= (baudrate
> 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate
;
1110 ifnet_baudrate(struct ifnet
*ifp
)
1112 return ((ifp
== NULL
) ? 0 : ifp
->if_baudrate
);
1116 ifnet_set_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*output_bw
,
1117 struct if_bandwidths
*input_bw
)
1122 /* set input values first (if any), as output values depend on them */
1123 if (input_bw
!= NULL
)
1124 (void) ifnet_set_input_bandwidths(ifp
, input_bw
);
1126 if (output_bw
!= NULL
)
1127 (void) ifnet_set_output_bandwidths(ifp
, output_bw
, FALSE
);
1133 ifnet_set_output_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*bw
,
1136 struct if_bandwidths old_bw
;
1137 struct ifclassq
*ifq
;
1140 VERIFY(ifp
!= NULL
&& bw
!= NULL
);
1145 IFCQ_LOCK_ASSERT_HELD(ifq
);
1147 old_bw
= ifp
->if_output_bw
;
1148 if (bw
->eff_bw
!= 0)
1149 ifp
->if_output_bw
.eff_bw
= bw
->eff_bw
;
1150 if (bw
->max_bw
!= 0)
1151 ifp
->if_output_bw
.max_bw
= bw
->max_bw
;
1152 if (ifp
->if_output_bw
.eff_bw
> ifp
->if_output_bw
.max_bw
)
1153 ifp
->if_output_bw
.max_bw
= ifp
->if_output_bw
.eff_bw
;
1154 else if (ifp
->if_output_bw
.eff_bw
== 0)
1155 ifp
->if_output_bw
.eff_bw
= ifp
->if_output_bw
.max_bw
;
1157 /* Pin if_baudrate to 32 bits */
1158 br
= MAX(ifp
->if_output_bw
.max_bw
, ifp
->if_input_bw
.max_bw
);
1160 ifp
->if_baudrate
= (br
> 0xFFFFFFFF) ? 0xFFFFFFFF : br
;
1162 /* Adjust queue parameters if needed */
1163 if (old_bw
.eff_bw
!= ifp
->if_output_bw
.eff_bw
||
1164 old_bw
.max_bw
!= ifp
->if_output_bw
.max_bw
)
1165 ifnet_update_sndq(ifq
, CLASSQ_EV_LINK_BANDWIDTH
);
1174 ifnet_set_input_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*bw
)
1176 struct if_bandwidths old_bw
;
1178 VERIFY(ifp
!= NULL
&& bw
!= NULL
);
1180 old_bw
= ifp
->if_input_bw
;
1181 if (bw
->eff_bw
!= 0)
1182 ifp
->if_input_bw
.eff_bw
= bw
->eff_bw
;
1183 if (bw
->max_bw
!= 0)
1184 ifp
->if_input_bw
.max_bw
= bw
->max_bw
;
1185 if (ifp
->if_input_bw
.eff_bw
> ifp
->if_input_bw
.max_bw
)
1186 ifp
->if_input_bw
.max_bw
= ifp
->if_input_bw
.eff_bw
;
1187 else if (ifp
->if_input_bw
.eff_bw
== 0)
1188 ifp
->if_input_bw
.eff_bw
= ifp
->if_input_bw
.max_bw
;
1190 if (old_bw
.eff_bw
!= ifp
->if_input_bw
.eff_bw
||
1191 old_bw
.max_bw
!= ifp
->if_input_bw
.max_bw
)
1192 ifnet_update_rcv(ifp
, CLASSQ_EV_LINK_BANDWIDTH
);
1198 ifnet_output_linkrate(struct ifnet
*ifp
)
1200 struct ifclassq
*ifq
= &ifp
->if_snd
;
1203 IFCQ_LOCK_ASSERT_HELD(ifq
);
1205 rate
= ifp
->if_output_bw
.eff_bw
;
1206 if (IFCQ_TBR_IS_ENABLED(ifq
)) {
1207 u_int64_t tbr_rate
= ifp
->if_snd
.ifcq_tbr
.tbr_rate_raw
;
1208 VERIFY(tbr_rate
> 0);
1209 rate
= MIN(rate
, ifp
->if_snd
.ifcq_tbr
.tbr_rate_raw
);
1216 ifnet_input_linkrate(struct ifnet
*ifp
)
1218 return (ifp
->if_input_bw
.eff_bw
);
1222 ifnet_bandwidths(struct ifnet
*ifp
, struct if_bandwidths
*output_bw
,
1223 struct if_bandwidths
*input_bw
)
1228 if (output_bw
!= NULL
)
1229 *output_bw
= ifp
->if_output_bw
;
1230 if (input_bw
!= NULL
)
1231 *input_bw
= ifp
->if_input_bw
;
1237 ifnet_set_latencies(struct ifnet
*ifp
, struct if_latencies
*output_lt
,
1238 struct if_latencies
*input_lt
)
1243 if (output_lt
!= NULL
)
1244 (void) ifnet_set_output_latencies(ifp
, output_lt
, FALSE
);
1246 if (input_lt
!= NULL
)
1247 (void) ifnet_set_input_latencies(ifp
, input_lt
);
1253 ifnet_set_output_latencies(struct ifnet
*ifp
, struct if_latencies
*lt
,
1256 struct if_latencies old_lt
;
1257 struct ifclassq
*ifq
;
1259 VERIFY(ifp
!= NULL
&& lt
!= NULL
);
1264 IFCQ_LOCK_ASSERT_HELD(ifq
);
1266 old_lt
= ifp
->if_output_lt
;
1267 if (lt
->eff_lt
!= 0)
1268 ifp
->if_output_lt
.eff_lt
= lt
->eff_lt
;
1269 if (lt
->max_lt
!= 0)
1270 ifp
->if_output_lt
.max_lt
= lt
->max_lt
;
1271 if (ifp
->if_output_lt
.eff_lt
> ifp
->if_output_lt
.max_lt
)
1272 ifp
->if_output_lt
.max_lt
= ifp
->if_output_lt
.eff_lt
;
1273 else if (ifp
->if_output_lt
.eff_lt
== 0)
1274 ifp
->if_output_lt
.eff_lt
= ifp
->if_output_lt
.max_lt
;
1276 /* Adjust queue parameters if needed */
1277 if (old_lt
.eff_lt
!= ifp
->if_output_lt
.eff_lt
||
1278 old_lt
.max_lt
!= ifp
->if_output_lt
.max_lt
)
1279 ifnet_update_sndq(ifq
, CLASSQ_EV_LINK_LATENCY
);
1288 ifnet_set_input_latencies(struct ifnet
*ifp
, struct if_latencies
*lt
)
1290 struct if_latencies old_lt
;
1292 VERIFY(ifp
!= NULL
&& lt
!= NULL
);
1294 old_lt
= ifp
->if_input_lt
;
1295 if (lt
->eff_lt
!= 0)
1296 ifp
->if_input_lt
.eff_lt
= lt
->eff_lt
;
1297 if (lt
->max_lt
!= 0)
1298 ifp
->if_input_lt
.max_lt
= lt
->max_lt
;
1299 if (ifp
->if_input_lt
.eff_lt
> ifp
->if_input_lt
.max_lt
)
1300 ifp
->if_input_lt
.max_lt
= ifp
->if_input_lt
.eff_lt
;
1301 else if (ifp
->if_input_lt
.eff_lt
== 0)
1302 ifp
->if_input_lt
.eff_lt
= ifp
->if_input_lt
.max_lt
;
1304 if (old_lt
.eff_lt
!= ifp
->if_input_lt
.eff_lt
||
1305 old_lt
.max_lt
!= ifp
->if_input_lt
.max_lt
)
1306 ifnet_update_rcv(ifp
, CLASSQ_EV_LINK_LATENCY
);
1312 ifnet_latencies(struct ifnet
*ifp
, struct if_latencies
*output_lt
,
1313 struct if_latencies
*input_lt
)
1318 if (output_lt
!= NULL
)
1319 *output_lt
= ifp
->if_output_lt
;
1320 if (input_lt
!= NULL
)
1321 *input_lt
= ifp
->if_input_lt
;
1327 ifnet_set_poll_params(struct ifnet
*ifp
, struct ifnet_poll_params
*p
)
1333 else if (!ifnet_is_attached(ifp
, 1))
1336 err
= dlil_rxpoll_set_params(ifp
, p
, FALSE
);
1338 /* Release the io ref count */
1339 ifnet_decr_iorefcnt(ifp
);
1345 ifnet_poll_params(struct ifnet
*ifp
, struct ifnet_poll_params
*p
)
1349 if (ifp
== NULL
|| p
== NULL
)
1351 else if (!ifnet_is_attached(ifp
, 1))
1354 err
= dlil_rxpoll_get_params(ifp
, p
);
1356 /* Release the io ref count */
1357 ifnet_decr_iorefcnt(ifp
);
1363 ifnet_stat_increment(struct ifnet
*ifp
,
1364 const struct ifnet_stat_increment_param
*s
)
1369 if (s
->packets_in
!= 0)
1370 atomic_add_64(&ifp
->if_data
.ifi_ipackets
, s
->packets_in
);
1371 if (s
->bytes_in
!= 0)
1372 atomic_add_64(&ifp
->if_data
.ifi_ibytes
, s
->bytes_in
);
1373 if (s
->errors_in
!= 0)
1374 atomic_add_64(&ifp
->if_data
.ifi_ierrors
, s
->errors_in
);
1376 if (s
->packets_out
!= 0)
1377 atomic_add_64(&ifp
->if_data
.ifi_opackets
, s
->packets_out
);
1378 if (s
->bytes_out
!= 0)
1379 atomic_add_64(&ifp
->if_data
.ifi_obytes
, s
->bytes_out
);
1380 if (s
->errors_out
!= 0)
1381 atomic_add_64(&ifp
->if_data
.ifi_oerrors
, s
->errors_out
);
1383 if (s
->collisions
!= 0)
1384 atomic_add_64(&ifp
->if_data
.ifi_collisions
, s
->collisions
);
1385 if (s
->dropped
!= 0)
1386 atomic_add_64(&ifp
->if_data
.ifi_iqdrops
, s
->dropped
);
1388 /* Touch the last change time. */
1389 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1395 ifnet_stat_increment_in(struct ifnet
*ifp
, u_int32_t packets_in
,
1396 u_int32_t bytes_in
, u_int32_t errors_in
)
1401 if (packets_in
!= 0)
1402 atomic_add_64(&ifp
->if_data
.ifi_ipackets
, packets_in
);
1404 atomic_add_64(&ifp
->if_data
.ifi_ibytes
, bytes_in
);
1406 atomic_add_64(&ifp
->if_data
.ifi_ierrors
, errors_in
);
1408 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1414 ifnet_stat_increment_out(struct ifnet
*ifp
, u_int32_t packets_out
,
1415 u_int32_t bytes_out
, u_int32_t errors_out
)
1420 if (packets_out
!= 0)
1421 atomic_add_64(&ifp
->if_data
.ifi_opackets
, packets_out
);
1423 atomic_add_64(&ifp
->if_data
.ifi_obytes
, bytes_out
);
1424 if (errors_out
!= 0)
1425 atomic_add_64(&ifp
->if_data
.ifi_oerrors
, errors_out
);
1427 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1433 ifnet_set_stat(struct ifnet
*ifp
, const struct ifnet_stats_param
*s
)
1438 atomic_set_64(&ifp
->if_data
.ifi_ipackets
, s
->packets_in
);
1439 atomic_set_64(&ifp
->if_data
.ifi_ibytes
, s
->bytes_in
);
1440 atomic_set_64(&ifp
->if_data
.ifi_imcasts
, s
->multicasts_in
);
1441 atomic_set_64(&ifp
->if_data
.ifi_ierrors
, s
->errors_in
);
1443 atomic_set_64(&ifp
->if_data
.ifi_opackets
, s
->packets_out
);
1444 atomic_set_64(&ifp
->if_data
.ifi_obytes
, s
->bytes_out
);
1445 atomic_set_64(&ifp
->if_data
.ifi_omcasts
, s
->multicasts_out
);
1446 atomic_set_64(&ifp
->if_data
.ifi_oerrors
, s
->errors_out
);
1448 atomic_set_64(&ifp
->if_data
.ifi_collisions
, s
->collisions
);
1449 atomic_set_64(&ifp
->if_data
.ifi_iqdrops
, s
->dropped
);
1450 atomic_set_64(&ifp
->if_data
.ifi_noproto
, s
->no_protocol
);
1452 /* Touch the last change time. */
1453 TOUCHLASTCHANGE(&ifp
->if_lastchange
);
1459 ifnet_stat(struct ifnet
*ifp
, struct ifnet_stats_param
*s
)
1464 atomic_get_64(s
->packets_in
, &ifp
->if_data
.ifi_ipackets
);
1465 atomic_get_64(s
->bytes_in
, &ifp
->if_data
.ifi_ibytes
);
1466 atomic_get_64(s
->multicasts_in
, &ifp
->if_data
.ifi_imcasts
);
1467 atomic_get_64(s
->errors_in
, &ifp
->if_data
.ifi_ierrors
);
1469 atomic_get_64(s
->packets_out
, &ifp
->if_data
.ifi_opackets
);
1470 atomic_get_64(s
->bytes_out
, &ifp
->if_data
.ifi_obytes
);
1471 atomic_get_64(s
->multicasts_out
, &ifp
->if_data
.ifi_omcasts
);
1472 atomic_get_64(s
->errors_out
, &ifp
->if_data
.ifi_oerrors
);
1474 atomic_get_64(s
->collisions
, &ifp
->if_data
.ifi_collisions
);
1475 atomic_get_64(s
->dropped
, &ifp
->if_data
.ifi_iqdrops
);
1476 atomic_get_64(s
->no_protocol
, &ifp
->if_data
.ifi_noproto
);
1482 ifnet_touch_lastchange(ifnet_t interface
)
1484 if (interface
== NULL
)
1487 TOUCHLASTCHANGE(&interface
->if_lastchange
);
1493 ifnet_lastchange(ifnet_t interface
, struct timeval
*last_change
)
1495 if (interface
== NULL
)
1498 *last_change
= interface
->if_data
.ifi_lastchange
;
1499 /* Crude conversion from uptime to calendar time */
1500 last_change
->tv_sec
+= boottime_sec();
1506 ifnet_get_address_list(ifnet_t interface
, ifaddr_t
**addresses
)
1508 return (addresses
== NULL
? EINVAL
:
1509 ifnet_get_address_list_family(interface
, addresses
, 0));
1512 struct ifnet_addr_list
{
1513 SLIST_ENTRY(ifnet_addr_list
) ifal_le
;
1514 struct ifaddr
*ifal_ifa
;
1518 ifnet_get_address_list_family(ifnet_t interface
, ifaddr_t
**addresses
,
1521 return (ifnet_get_address_list_family_internal(interface
, addresses
,
1522 family
, 0, M_NOWAIT
, 0));
1526 ifnet_get_inuse_address_list(ifnet_t interface
, ifaddr_t
**addresses
)
1528 return (addresses
== NULL
? EINVAL
:
1529 ifnet_get_address_list_family_internal(interface
, addresses
,
1530 0, 0, M_NOWAIT
, 1));
1533 extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr
*ifa
);
1535 extern uint32_t udp_find_anypcb_byaddr(struct ifaddr
*ifa
);
1537 __private_extern__ errno_t
1538 ifnet_get_address_list_family_internal(ifnet_t interface
, ifaddr_t
**addresses
,
1539 sa_family_t family
, int detached
, int how
, int return_inuse_addrs
)
1541 SLIST_HEAD(, ifnet_addr_list
) ifal_head
;
1542 struct ifnet_addr_list
*ifal
, *ifal_tmp
;
1549 SLIST_INIT(&ifal_head
);
1551 if (addresses
== NULL
) {
1559 * Interface has been detached, so skip the lookup
1560 * at ifnet_head and go directly to inner loop.
1570 ifnet_head_lock_shared();
1571 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1572 if (interface
!= NULL
&& ifp
!= interface
)
1575 ifnet_lock_shared(ifp
);
1576 if (interface
== NULL
|| interface
== ifp
) {
1578 TAILQ_FOREACH(ifa
, &ifp
->if_addrhead
, ifa_link
) {
1581 ifa
->ifa_addr
->sa_family
!= family
) {
1585 MALLOC(ifal
, struct ifnet_addr_list
*,
1586 sizeof (*ifal
), M_TEMP
, how
);
1589 ifnet_lock_done(ifp
);
1595 ifal
->ifal_ifa
= ifa
;
1596 IFA_ADDREF_LOCKED(ifa
);
1597 SLIST_INSERT_HEAD(&ifal_head
, ifal
, ifal_le
);
1602 ifnet_lock_done(ifp
);
1613 MALLOC(*addresses
, ifaddr_t
*, sizeof (ifaddr_t
) * (count
+ 1),
1615 if (*addresses
== NULL
) {
1619 bzero(*addresses
, sizeof (ifaddr_t
) * (count
+ 1));
1622 SLIST_FOREACH_SAFE(ifal
, &ifal_head
, ifal_le
, ifal_tmp
) {
1623 SLIST_REMOVE(&ifal_head
, ifal
, ifnet_addr_list
, ifal_le
);
1625 if (return_inuse_addrs
) {
1626 usecount
= tcp_find_anypcb_byaddr(ifal
->ifal_ifa
);
1627 usecount
+= udp_find_anypcb_byaddr(ifal
->ifal_ifa
);
1629 (*addresses
)[index
] = ifal
->ifal_ifa
;
1633 IFA_REMREF(ifal
->ifal_ifa
);
1635 (*addresses
)[--count
] = ifal
->ifal_ifa
;
1639 IFA_REMREF(ifal
->ifal_ifa
);
1644 VERIFY(err
== 0 || *addresses
== NULL
);
1645 if ((err
== 0) && (count
) && ((*addresses
)[0] == NULL
)) {
1646 VERIFY(return_inuse_addrs
== 1);
1647 FREE(*addresses
, M_TEMP
);
1654 ifnet_free_address_list(ifaddr_t
*addresses
)
1658 if (addresses
== NULL
)
1661 for (i
= 0; addresses
[i
] != NULL
; i
++)
1662 IFA_REMREF(addresses
[i
]);
1664 FREE(addresses
, M_TEMP
);
1668 ifnet_lladdr(ifnet_t interface
)
1673 if (interface
== NULL
)
1677 * if_lladdr points to the permanent link address of
1678 * the interface and it never gets deallocated; internal
1679 * code should simply use IF_LLADDR() for performance.
1681 ifa
= interface
->if_lladdr
;
1683 lladdr
= LLADDR(SDL((void *)ifa
->ifa_addr
));
1690 ifnet_llbroadcast_copy_bytes(ifnet_t interface
, void *addr
, size_t buffer_len
,
1693 if (interface
== NULL
|| addr
== NULL
|| out_len
== NULL
)
1696 *out_len
= interface
->if_broadcast
.length
;
1698 if (buffer_len
< interface
->if_broadcast
.length
)
1701 if (interface
->if_broadcast
.length
== 0)
1704 if (interface
->if_broadcast
.length
<=
1705 sizeof (interface
->if_broadcast
.u
.buffer
)) {
1706 bcopy(interface
->if_broadcast
.u
.buffer
, addr
,
1707 interface
->if_broadcast
.length
);
1709 bcopy(interface
->if_broadcast
.u
.ptr
, addr
,
1710 interface
->if_broadcast
.length
);
1717 ifnet_lladdr_copy_bytes_internal(ifnet_t interface
, void *lladdr
,
1718 size_t lladdr_len
, kauth_cred_t
*credp
)
1720 const u_int8_t
*bytes
;
1723 uint8_t sdlbuf
[SOCK_MAXADDRLEN
+ 1];
1727 * Make sure to accomodate the largest possible
1728 * size of SA(if_lladdr)->sa_len.
1730 _CASSERT(sizeof (sdlbuf
) == (SOCK_MAXADDRLEN
+ 1));
1732 if (interface
== NULL
|| lladdr
== NULL
)
1735 ifa
= interface
->if_lladdr
;
1737 bcopy(ifa
->ifa_addr
, &sdlbuf
, SDL(ifa
->ifa_addr
)->sdl_len
);
1740 bytes
= dlil_ifaddr_bytes(SDL(&sdlbuf
), &bytes_len
, credp
);
1741 if (bytes_len
!= lladdr_len
) {
1742 bzero(lladdr
, lladdr_len
);
1745 bcopy(bytes
, lladdr
, bytes_len
);
1752 ifnet_lladdr_copy_bytes(ifnet_t interface
, void *lladdr
, size_t length
)
1754 return (ifnet_lladdr_copy_bytes_internal(interface
, lladdr
, length
,
1759 ifnet_guarded_lladdr_copy_bytes(ifnet_t interface
, void *lladdr
, size_t length
)
1763 net_thread_marks_t marks
;
1765 kauth_cred_t
*credp
;
1770 marks
= net_thread_marks_push(NET_THREAD_CKREQ_LLADDR
);
1771 cred
= kauth_cred_proc_ref(current_proc());
1777 error
= ifnet_lladdr_copy_bytes_internal(interface
, lladdr
, length
,
1781 kauth_cred_unref(credp
);
1782 net_thread_marks_pop(marks
);
1789 ifnet_set_lladdr_internal(ifnet_t interface
, const void *lladdr
,
1790 size_t lladdr_len
, u_char new_type
, int apply_type
)
1795 if (interface
== NULL
)
1798 ifnet_head_lock_shared();
1799 ifnet_lock_exclusive(interface
);
1800 if (lladdr_len
!= 0 &&
1801 (lladdr_len
!= interface
->if_addrlen
|| lladdr
== 0)) {
1802 ifnet_lock_done(interface
);
1806 ifa
= ifnet_addrs
[interface
->if_index
- 1];
1808 struct sockaddr_dl
*sdl
;
1811 sdl
= (struct sockaddr_dl
*)(void *)ifa
->ifa_addr
;
1812 if (lladdr_len
!= 0) {
1813 bcopy(lladdr
, LLADDR(sdl
), lladdr_len
);
1815 bzero(LLADDR(sdl
), interface
->if_addrlen
);
1817 sdl
->sdl_alen
= lladdr_len
;
1820 sdl
->sdl_type
= new_type
;
1826 ifnet_lock_done(interface
);
1829 /* Generate a kernel event */
1831 dlil_post_msg(interface
, KEV_DL_SUBCLASS
,
1832 KEV_DL_LINK_ADDRESS_CHANGED
, NULL
, 0);
1839 ifnet_set_lladdr(ifnet_t interface
, const void* lladdr
, size_t lladdr_len
)
1841 return (ifnet_set_lladdr_internal(interface
, lladdr
, lladdr_len
, 0, 0));
1845 ifnet_set_lladdr_and_type(ifnet_t interface
, const void* lladdr
,
1846 size_t lladdr_len
, u_char type
)
1848 return (ifnet_set_lladdr_internal(interface
, lladdr
,
1849 lladdr_len
, type
, 1));
1853 ifnet_add_multicast(ifnet_t interface
, const struct sockaddr
*maddr
,
1854 ifmultiaddr_t
*ifmap
)
1856 if (interface
== NULL
|| maddr
== NULL
)
1859 /* Don't let users screw up protocols' entries. */
1860 if (maddr
->sa_family
!= AF_UNSPEC
&& maddr
->sa_family
!= AF_LINK
)
1863 return (if_addmulti_anon(interface
, maddr
, ifmap
));
1867 ifnet_remove_multicast(ifmultiaddr_t ifma
)
1869 struct sockaddr
*maddr
;
1874 maddr
= ifma
->ifma_addr
;
1875 /* Don't let users screw up protocols' entries. */
1876 if (maddr
->sa_family
!= AF_UNSPEC
&& maddr
->sa_family
!= AF_LINK
)
1879 return (if_delmulti_anon(ifma
->ifma_ifp
, maddr
));
1883 ifnet_get_multicast_list(ifnet_t ifp
, ifmultiaddr_t
**addresses
)
1887 struct ifmultiaddr
*addr
;
1889 if (ifp
== NULL
|| addresses
== NULL
)
1892 ifnet_lock_shared(ifp
);
1893 LIST_FOREACH(addr
, &ifp
->if_multiaddrs
, ifma_link
) {
1897 MALLOC(*addresses
, ifmultiaddr_t
*, sizeof (ifmultiaddr_t
) * (cmax
+ 1),
1899 if (*addresses
== NULL
) {
1900 ifnet_lock_done(ifp
);
1904 LIST_FOREACH(addr
, &ifp
->if_multiaddrs
, ifma_link
) {
1905 if (count
+ 1 > cmax
)
1907 (*addresses
)[count
] = (ifmultiaddr_t
)addr
;
1908 ifmaddr_reference((*addresses
)[count
]);
1911 (*addresses
)[cmax
] = NULL
;
1912 ifnet_lock_done(ifp
);
1918 ifnet_free_multicast_list(ifmultiaddr_t
*addresses
)
1922 if (addresses
== NULL
)
1925 for (i
= 0; addresses
[i
] != NULL
; i
++)
1926 ifmaddr_release(addresses
[i
]);
1928 FREE(addresses
, M_TEMP
);
1932 ifnet_find_by_name(const char *ifname
, ifnet_t
*ifpp
)
1940 namelen
= strlen(ifname
);
1944 ifnet_head_lock_shared();
1945 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
1947 struct sockaddr_dl
*ll_addr
;
1949 ifa
= ifnet_addrs
[ifp
->if_index
- 1];
1954 ll_addr
= (struct sockaddr_dl
*)(void *)ifa
->ifa_addr
;
1956 if (namelen
== ll_addr
->sdl_nlen
&& strncmp(ll_addr
->sdl_data
,
1957 ifname
, ll_addr
->sdl_nlen
) == 0) {
1960 ifnet_reference(*ifpp
);
1967 return ((ifp
== NULL
) ? ENXIO
: 0);
1971 ifnet_list_get(ifnet_family_t family
, ifnet_t
**list
, u_int32_t
*count
)
1973 return (ifnet_list_get_common(family
, FALSE
, list
, count
));
1976 __private_extern__ errno_t
1977 ifnet_list_get_all(ifnet_family_t family
, ifnet_t
**list
, u_int32_t
*count
)
1979 return (ifnet_list_get_common(family
, TRUE
, list
, count
));
1983 SLIST_ENTRY(ifnet_list
) ifl_le
;
1984 struct ifnet
*ifl_ifp
;
1988 ifnet_list_get_common(ifnet_family_t family
, boolean_t get_all
, ifnet_t
**list
,
1991 #pragma unused(get_all)
1992 SLIST_HEAD(, ifnet_list
) ifl_head
;
1993 struct ifnet_list
*ifl
, *ifl_tmp
;
1998 SLIST_INIT(&ifl_head
);
2000 if (list
== NULL
|| count
== NULL
) {
2007 ifnet_head_lock_shared();
2008 TAILQ_FOREACH(ifp
, &ifnet_head
, if_link
) {
2009 if (family
== IFNET_FAMILY_ANY
|| ifp
->if_family
== family
) {
2010 MALLOC(ifl
, struct ifnet_list
*, sizeof (*ifl
),
2018 ifnet_reference(ifp
);
2019 SLIST_INSERT_HEAD(&ifl_head
, ifl
, ifl_le
);
2030 MALLOC(*list
, ifnet_t
*, sizeof (ifnet_t
) * (cnt
+ 1),
2032 if (*list
== NULL
) {
2036 bzero(*list
, sizeof (ifnet_t
) * (cnt
+ 1));
2040 SLIST_FOREACH_SAFE(ifl
, &ifl_head
, ifl_le
, ifl_tmp
) {
2041 SLIST_REMOVE(&ifl_head
, ifl
, ifnet_list
, ifl_le
);
2043 (*list
)[--cnt
] = ifl
->ifl_ifp
;
2045 ifnet_release(ifl
->ifl_ifp
);
2053 ifnet_list_free(ifnet_t
*interfaces
)
2057 if (interfaces
== NULL
)
2060 for (i
= 0; interfaces
[i
]; i
++)
2061 ifnet_release(interfaces
[i
]);
2063 FREE(interfaces
, M_TEMP
);
2067 ifnet_transmit_burst_start(ifnet_t ifp
, mbuf_t pkt
)
2070 uint32_t orig_flags
;
2072 if (ifp
== NULL
|| !(pkt
->m_flags
& M_PKTHDR
))
2075 orig_flags
= OSBitOrAtomic(IF_MEASURED_BW_INPROGRESS
,
2077 if (orig_flags
& IF_MEASURED_BW_INPROGRESS
) {
2078 /* There is already a measurement in progress; skip this one */
2082 ifp
->if_bw
.start_seq
= pkt
->m_pkthdr
.pkt_bwseq
;
2083 ifp
->if_bw
.start_ts
= mach_absolute_time();
2084 #else /*!MEASURE_BW */
2085 #pragma unused(ifp, pkt)
2086 #endif /* !MEASURE_BW */
2090 ifnet_transmit_burst_end(ifnet_t ifp
, mbuf_t pkt
)
2093 uint64_t oseq
, ots
, bytes
, ts
, t
;
2096 if ( ifp
== NULL
|| !(pkt
->m_flags
& M_PKTHDR
))
2099 flags
= OSBitOrAtomic(IF_MEASURED_BW_CALCULATION
, &ifp
->if_bw
.flags
);
2101 /* If a calculation is already in progress, just return */
2102 if (flags
& IF_MEASURED_BW_CALCULATION
)
2105 /* Check if a measurement was started at all */
2106 if (!(flags
& IF_MEASURED_BW_INPROGRESS
)) {
2108 * It is an error to call burst_end before burst_start.
2109 * Reset the calculation flag and return.
2114 oseq
= pkt
->m_pkthdr
.pkt_bwseq
;
2115 ots
= mach_absolute_time();
2117 if (ifp
->if_bw
.start_seq
> 0 && oseq
> ifp
->if_bw
.start_seq
) {
2118 ts
= ots
- ifp
->if_bw
.start_ts
;
2120 absolutetime_to_nanoseconds(ts
, &t
);
2121 bytes
= oseq
- ifp
->if_bw
.start_seq
;
2122 ifp
->if_bw
.bytes
= bytes
;
2128 /* Compute bandwidth as bytes/ms */
2129 bw
= (bytes
* NSEC_PER_MSEC
) / t
;
2131 if (ifp
->if_bw
.bw
> 0) {
2134 shft
= if_bw_smoothing_val
;
2135 /* Compute EWMA of bw */
2136 ifp
->if_bw
.bw
= (bw
+
2137 ((ifp
->if_bw
.bw
<< shft
) -
2138 ifp
->if_bw
.bw
)) >> shft
;
2144 ifp
->if_bw
.last_seq
= oseq
;
2145 ifp
->if_bw
.last_ts
= ots
;
2150 flags
= ~(IF_MEASURED_BW_INPROGRESS
| IF_MEASURED_BW_CALCULATION
);
2151 OSBitAndAtomic(flags
, &ifp
->if_bw
.flags
);
2152 #else /* !MEASURE_BW */
2153 #pragma unused(ifp, pkt)
2154 #endif /* !MEASURE_BW */
2157 /****************************************************************************/
2158 /* ifaddr_t accessors */
2159 /****************************************************************************/
2162 ifaddr_reference(ifaddr_t ifa
)
2172 ifaddr_release(ifaddr_t ifa
)
2182 ifaddr_address_family(ifaddr_t ifa
)
2184 sa_family_t family
= 0;
2188 if (ifa
->ifa_addr
!= NULL
)
2189 family
= ifa
->ifa_addr
->sa_family
;
2196 ifaddr_address(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2200 if (ifa
== NULL
|| out_addr
== NULL
)
2204 if (ifa
->ifa_addr
== NULL
) {
2209 copylen
= (addr_size
>= ifa
->ifa_addr
->sa_len
) ?
2210 ifa
->ifa_addr
->sa_len
: addr_size
;
2211 bcopy(ifa
->ifa_addr
, out_addr
, copylen
);
2213 if (ifa
->ifa_addr
->sa_len
> addr_size
) {
2223 ifaddr_dstaddress(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2227 if (ifa
== NULL
|| out_addr
== NULL
)
2231 if (ifa
->ifa_dstaddr
== NULL
) {
2236 copylen
= (addr_size
>= ifa
->ifa_dstaddr
->sa_len
) ?
2237 ifa
->ifa_dstaddr
->sa_len
: addr_size
;
2238 bcopy(ifa
->ifa_dstaddr
, out_addr
, copylen
);
2240 if (ifa
->ifa_dstaddr
->sa_len
> addr_size
) {
2250 ifaddr_netmask(ifaddr_t ifa
, struct sockaddr
*out_addr
, u_int32_t addr_size
)
2254 if (ifa
== NULL
|| out_addr
== NULL
)
2258 if (ifa
->ifa_netmask
== NULL
) {
2263 copylen
= addr_size
>= ifa
->ifa_netmask
->sa_len
?
2264 ifa
->ifa_netmask
->sa_len
: addr_size
;
2265 bcopy(ifa
->ifa_netmask
, out_addr
, copylen
);
2267 if (ifa
->ifa_netmask
->sa_len
> addr_size
) {
2277 ifaddr_ifnet(ifaddr_t ifa
)
2284 /* ifa_ifp is set once at creation time; it is never changed */
2291 ifaddr_withaddr(const struct sockaddr
*address
)
2293 if (address
== NULL
)
2296 return (ifa_ifwithaddr(address
));
2300 ifaddr_withdstaddr(const struct sockaddr
*address
)
2302 if (address
== NULL
)
2305 return (ifa_ifwithdstaddr(address
));
2309 ifaddr_withnet(const struct sockaddr
*net
)
2314 return (ifa_ifwithnet(net
));
2318 ifaddr_withroute(int flags
, const struct sockaddr
*destination
,
2319 const struct sockaddr
*gateway
)
2321 if (destination
== NULL
|| gateway
== NULL
)
2324 return (ifa_ifwithroute(flags
, destination
, gateway
));
2328 ifaddr_findbestforaddr(const struct sockaddr
*addr
, ifnet_t interface
)
2330 if (addr
== NULL
|| interface
== NULL
)
2333 return (ifaof_ifpforaddr(addr
, interface
));
2337 ifmaddr_reference(ifmultiaddr_t ifmaddr
)
2339 if (ifmaddr
== NULL
)
2342 IFMA_ADDREF(ifmaddr
);
2347 ifmaddr_release(ifmultiaddr_t ifmaddr
)
2349 if (ifmaddr
== NULL
)
2352 IFMA_REMREF(ifmaddr
);
2357 ifmaddr_address(ifmultiaddr_t ifma
, struct sockaddr
*out_addr
,
2358 u_int32_t addr_size
)
2362 if (ifma
== NULL
|| out_addr
== NULL
)
2366 if (ifma
->ifma_addr
== NULL
) {
2371 copylen
= (addr_size
>= ifma
->ifma_addr
->sa_len
?
2372 ifma
->ifma_addr
->sa_len
: addr_size
);
2373 bcopy(ifma
->ifma_addr
, out_addr
, copylen
);
2375 if (ifma
->ifma_addr
->sa_len
> addr_size
) {
2384 ifmaddr_lladdress(ifmultiaddr_t ifma
, struct sockaddr
*out_addr
,
2385 u_int32_t addr_size
)
2387 struct ifmultiaddr
*ifma_ll
;
2389 if (ifma
== NULL
|| out_addr
== NULL
)
2391 if ((ifma_ll
= ifma
->ifma_ll
) == NULL
)
2394 return (ifmaddr_address(ifma_ll
, out_addr
, addr_size
));
2398 ifmaddr_ifnet(ifmultiaddr_t ifma
)
2400 return ((ifma
== NULL
) ? NULL
: ifma
->ifma_ifp
);
2403 /******************************************************************************/
2404 /* interface cloner */
2405 /******************************************************************************/
2408 ifnet_clone_attach(struct ifnet_clone_params
*cloner_params
,
2409 if_clone_t
*ifcloner
)
2412 struct if_clone
*ifc
= NULL
;
2415 if (cloner_params
== NULL
|| ifcloner
== NULL
||
2416 cloner_params
->ifc_name
== NULL
||
2417 cloner_params
->ifc_create
== NULL
||
2418 cloner_params
->ifc_destroy
== NULL
||
2419 (namelen
= strlen(cloner_params
->ifc_name
)) >= IFNAMSIZ
) {
2424 if (if_clone_lookup(cloner_params
->ifc_name
, NULL
) != NULL
) {
2425 printf("%s: already a cloner for %s\n", __func__
,
2426 cloner_params
->ifc_name
);
2431 /* Make room for name string */
2432 ifc
= _MALLOC(sizeof (struct if_clone
) + IFNAMSIZ
+ 1, M_CLONE
,
2435 printf("%s: _MALLOC failed\n", __func__
);
2439 strlcpy((char *)(ifc
+ 1), cloner_params
->ifc_name
, IFNAMSIZ
+ 1);
2440 ifc
->ifc_name
= (char *)(ifc
+ 1);
2441 ifc
->ifc_namelen
= namelen
;
2442 ifc
->ifc_maxunit
= IF_MAXUNIT
;
2443 ifc
->ifc_create
= cloner_params
->ifc_create
;
2444 ifc
->ifc_destroy
= cloner_params
->ifc_destroy
;
2446 error
= if_clone_attach(ifc
);
2448 printf("%s: if_clone_attach failed %d\n", __func__
, error
);
2461 ifnet_clone_detach(if_clone_t ifcloner
)
2464 struct if_clone
*ifc
= ifcloner
;
2466 if (ifc
== NULL
|| ifc
->ifc_name
== NULL
)
2469 if ((if_clone_lookup(ifc
->ifc_name
, NULL
)) == NULL
) {
2470 printf("%s: no cloner for %s\n", __func__
, ifc
->ifc_name
);
2475 if_clone_detach(ifc
);
2483 /******************************************************************************/
2485 /******************************************************************************/
2488 ifnet_get_local_ports_extended(ifnet_t ifp
, protocol_family_t protocol
,
2489 u_int32_t flags
, u_int8_t
*bitfield
)
2492 u_int32_t inp_flags
= 0;
2494 inp_flags
|= ((flags
& IFNET_GET_LOCAL_PORTS_WILDCARDOK
) ?
2495 INPCB_GET_PORTS_USED_WILDCARDOK
: 0);
2496 inp_flags
|= ((flags
& IFNET_GET_LOCAL_PORTS_NOWAKEUPOK
) ?
2497 INPCB_GET_PORTS_USED_NOWAKEUPOK
: 0);
2499 if (bitfield
== NULL
)
2511 /* bit string is long enough to hold 16-bit port values */
2512 bzero(bitfield
, bitstr_size(65536));
2514 ifindex
= (ifp
!= NULL
) ? ifp
->if_index
: 0;
2516 if (!(flags
& IFNET_GET_LOCAL_PORTS_TCPONLY
))
2517 udp_get_ports_used(ifindex
, protocol
, inp_flags
, bitfield
);
2519 if (!(flags
& IFNET_GET_LOCAL_PORTS_UDPONLY
))
2520 tcp_get_ports_used(ifindex
, protocol
, inp_flags
, bitfield
);
2526 ifnet_get_local_ports(ifnet_t ifp
, u_int8_t
*bitfield
)
2528 u_int32_t flags
= IFNET_GET_LOCAL_PORTS_WILDCARDOK
;
2529 return (ifnet_get_local_ports_extended(ifp
, PF_UNSPEC
, flags
,
2534 ifnet_notice_node_presence(ifnet_t ifp
, struct sockaddr
* sa
, int32_t rssi
,
2535 int lqm
, int npm
, u_int8_t srvinfo
[48])
2537 if (ifp
== NULL
|| sa
== NULL
|| srvinfo
== NULL
)
2539 if (sa
->sa_len
> sizeof(struct sockaddr_storage
))
2541 if (sa
->sa_family
!= AF_LINK
&& sa
->sa_family
!= AF_INET6
)
2544 dlil_node_present(ifp
, sa
, rssi
, lqm
, npm
, srvinfo
);
2549 ifnet_notice_node_absence(ifnet_t ifp
, struct sockaddr
* sa
)
2551 if (ifp
== NULL
|| sa
== NULL
)
2553 if (sa
->sa_len
> sizeof(struct sockaddr_storage
))
2555 if (sa
->sa_family
!= AF_LINK
&& sa
->sa_family
!= AF_INET6
)
2558 dlil_node_absent(ifp
, sa
);
2563 ifnet_notice_master_elected(ifnet_t ifp
)
2568 dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_MASTER_ELECTED
, NULL
, 0);
2573 ifnet_tx_compl_status(ifnet_t ifp
, mbuf_t m
, tx_compl_val_t val
)
2575 #pragma unused(ifp, m, val)
2576 /* Dummy function to be implemented XXX */
2581 ifnet_report_issues(ifnet_t ifp
, u_int8_t modid
[IFNET_MODIDLEN
],
2582 u_int8_t info
[IFNET_MODARGLEN
])
2584 if (ifp
== NULL
|| modid
== NULL
)
2587 dlil_report_issues(ifp
, modid
, info
);
2592 ifnet_set_delegate(ifnet_t ifp
, ifnet_t delegated_ifp
)
2594 ifnet_t odifp
= NULL
;
2598 else if (!ifnet_is_attached(ifp
, 1))
2601 ifnet_lock_exclusive(ifp
);
2602 odifp
= ifp
->if_delegated
.ifp
;
2603 if (odifp
!= NULL
&& odifp
== delegated_ifp
) {
2604 /* delegate info is unchanged; nothing more to do */
2605 ifnet_lock_done(ifp
);
2608 bzero(&ifp
->if_delegated
, sizeof (ifp
->if_delegated
));
2609 if (delegated_ifp
!= NULL
&& ifp
!= delegated_ifp
) {
2610 ifp
->if_delegated
.ifp
= delegated_ifp
;
2611 ifnet_reference(delegated_ifp
);
2612 ifp
->if_delegated
.type
= delegated_ifp
->if_type
;
2613 ifp
->if_delegated
.family
= delegated_ifp
->if_family
;
2614 ifp
->if_delegated
.subfamily
= delegated_ifp
->if_subfamily
;
2615 ifp
->if_delegated
.expensive
=
2616 delegated_ifp
->if_eflags
& IFEF_EXPENSIVE
? 1 : 0;
2617 printf("%s: is now delegating %s (type 0x%x, family %u, "
2618 "sub-family %u)\n", ifp
->if_xname
, delegated_ifp
->if_xname
,
2619 delegated_ifp
->if_type
, delegated_ifp
->if_family
,
2620 delegated_ifp
->if_subfamily
);
2622 ifnet_lock_done(ifp
);
2624 if (odifp
!= NULL
) {
2625 if (odifp
!= delegated_ifp
) {
2626 printf("%s: is no longer delegating %s\n",
2627 ifp
->if_xname
, odifp
->if_xname
);
2629 ifnet_release(odifp
);
2632 /* Generate a kernel event */
2633 dlil_post_msg(ifp
, KEV_DL_SUBCLASS
, KEV_DL_IFDELEGATE_CHANGED
, NULL
, 0);
2636 /* Release the io ref count */
2637 ifnet_decr_iorefcnt(ifp
);
2643 ifnet_get_delegate(ifnet_t ifp
, ifnet_t
*pdelegated_ifp
)
2645 if (ifp
== NULL
|| pdelegated_ifp
== NULL
)
2647 else if (!ifnet_is_attached(ifp
, 1))
2650 ifnet_lock_shared(ifp
);
2651 if (ifp
->if_delegated
.ifp
!= NULL
)
2652 ifnet_reference(ifp
->if_delegated
.ifp
);
2653 *pdelegated_ifp
= ifp
->if_delegated
.ifp
;
2654 ifnet_lock_done(ifp
);
2656 /* Release the io ref count */
2657 ifnet_decr_iorefcnt(ifp
);
2662 extern u_int32_t
key_fill_offload_frames_for_savs (ifnet_t ifp
,
2663 struct ipsec_offload_frame
*frames_array
, u_int32_t frames_array_count
,
2664 size_t frame_data_offset
);
2667 ifnet_get_ipsec_offload_frames(ifnet_t ifp
,
2668 struct ipsec_offload_frame
*frames_array
,
2669 u_int32_t frames_array_count
,
2670 size_t frame_data_offset
,
2671 u_int32_t
*used_frames_count
)
2673 if (frames_array
== NULL
|| used_frames_count
== NULL
) {
2677 *used_frames_count
= 0;
2679 if (frames_array_count
== 0) {
2683 *used_frames_count
= key_fill_offload_frames_for_savs(ifp
,
2684 frames_array
, frames_array_count
, frame_data_offset
);