]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/net/kpi_interface.c
xnu-3247.10.11.tar.gz
[apple/xnu.git] / bsd / net / kpi_interface.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2004-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include "kpi_interface.h"
30
31#include <sys/queue.h>
32#include <sys/param.h> /* for definition of NULL */
33#include <kern/debug.h> /* for panic */
34#include <sys/errno.h>
35#include <sys/socket.h>
36#include <sys/kern_event.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/kpi_mbuf.h>
40#include <sys/mcache.h>
41#include <sys/protosw.h>
42#include <sys/syslog.h>
43#include <net/if_var.h>
44#include <net/if_dl.h>
45#include <net/dlil.h>
46#include <net/if_types.h>
47#include <net/if_dl.h>
48#include <net/if_arp.h>
49#include <net/if_llreach.h>
50#include <net/if_ether.h>
51#include <net/route.h>
52#include <libkern/libkern.h>
53#include <libkern/OSAtomic.h>
54#include <kern/locks.h>
55#include <kern/clock.h>
56#include <sys/sockio.h>
57#include <sys/proc.h>
58#include <sys/sysctl.h>
59#include <sys/mbuf.h>
60#include <netinet/ip_var.h>
61#include <netinet/udp.h>
62#include <netinet/udp_var.h>
63#include <netinet/tcp.h>
64#include <netinet/tcp_var.h>
65#include <netinet/in_pcb.h>
66#ifdef INET
67#include <netinet/igmp_var.h>
68#endif
69#ifdef INET6
70#include <netinet6/mld6_var.h>
71#endif
72
73#include "net/net_str_id.h"
74
75#if CONFIG_MACF
76#include <sys/kauth.h>
77#include <security/mac_framework.h>
78#endif
79
80#define TOUCHLASTCHANGE(__if_lastchange) { \
81 (__if_lastchange)->tv_sec = net_uptime(); \
82 (__if_lastchange)->tv_usec = 0; \
83}
84
85static errno_t ifnet_defrouter_llreachinfo(ifnet_t, int,
86 struct ifnet_llreach_info *);
87static void ifnet_kpi_free(ifnet_t);
88static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
89 u_int32_t *);
90static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
91 u_char, int);
92static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
93
94/*
95 * Temporary work around until we have real reference counting
96 *
97 * We keep the bits about calling dlil_if_release (which should be
98 * called recycle) transparent by calling it from our if_free function
99 * pointer. We have to keep the client's original detach function
100 * somewhere so we can call it.
101 */
102static void
103ifnet_kpi_free(ifnet_t ifp)
104{
105 ifnet_detached_func detach_func = ifp->if_kpi_storage;
106
107 if (detach_func != NULL)
108 detach_func(ifp);
109
110 if (ifp->if_broadcast.length > sizeof (ifp->if_broadcast.u.buffer)) {
111 FREE(ifp->if_broadcast.u.ptr, M_IFADDR);
112 ifp->if_broadcast.u.ptr = NULL;
113 }
114
115 dlil_if_release(ifp);
116}
117
118errno_t
119ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *interface)
120{
121 struct ifnet_init_eparams einit;
122
123 bzero(&einit, sizeof (einit));
124
125 einit.ver = IFNET_INIT_CURRENT_VERSION;
126 einit.len = sizeof (einit);
127 einit.flags = IFNET_INIT_LEGACY;
128 einit.uniqueid = init->uniqueid;
129 einit.uniqueid_len = init->uniqueid_len;
130 einit.name = init->name;
131 einit.unit = init->unit;
132 einit.family = init->family;
133 einit.type = init->type;
134 einit.output = init->output;
135 einit.demux = init->demux;
136 einit.add_proto = init->add_proto;
137 einit.del_proto = init->del_proto;
138 einit.check_multi = init->check_multi;
139 einit.framer = init->framer;
140 einit.softc = init->softc;
141 einit.ioctl = init->ioctl;
142 einit.set_bpf_tap = init->set_bpf_tap;
143 einit.detach = init->detach;
144 einit.event = init->event;
145 einit.broadcast_addr = init->broadcast_addr;
146 einit.broadcast_len = init->broadcast_len;
147
148 return (ifnet_allocate_extended(&einit, interface));
149}
150
151errno_t
152ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
153 ifnet_t *interface)
154{
155 struct ifnet_init_eparams einit;
156 struct ifnet *ifp = NULL;
157 int error;
158
159 einit = *einit0;
160
161 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
162 einit.len < sizeof (einit))
163 return (EINVAL);
164
165 if (einit.family == 0 || einit.name == NULL ||
166 strlen(einit.name) >= IFNAMSIZ ||
167 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0)
168 return (EINVAL);
169
170 if (einit.flags & IFNET_INIT_LEGACY) {
171 if (einit.output == NULL || einit.flags != IFNET_INIT_LEGACY)
172 return (EINVAL);
173
174 einit.pre_enqueue = NULL;
175 einit.start = NULL;
176 einit.output_ctl = NULL;
177 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
178 einit.input_poll = NULL;
179 einit.input_ctl = NULL;
180 } else {
181 if (einit.start == NULL)
182 return (EINVAL);
183
184 einit.output = NULL;
185 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX)
186 return (EINVAL);
187
188 if (einit.flags & IFNET_INIT_INPUT_POLL) {
189 if (einit.input_poll == NULL || einit.input_ctl == NULL)
190 return (EINVAL);
191 } else {
192 einit.input_poll = NULL;
193 einit.input_ctl = NULL;
194 }
195 }
196
197 error = dlil_if_acquire(einit.family, einit.uniqueid,
198 einit.uniqueid_len, &ifp);
199
200 if (error == 0) {
201 u_int64_t br;
202
203 /*
204 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
205 * to point to storage of at least IFNAMSIZ bytes. It is safe
206 * to write to this.
207 */
208 strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ);
209 ifp->if_type = einit.type;
210 ifp->if_family = einit.family;
211 ifp->if_subfamily = einit.subfamily;
212 ifp->if_unit = einit.unit;
213 ifp->if_output = einit.output;
214 ifp->if_pre_enqueue = einit.pre_enqueue;
215 ifp->if_start = einit.start;
216 ifp->if_output_ctl = einit.output_ctl;
217 ifp->if_output_sched_model = einit.output_sched_model;
218 ifp->if_output_bw.eff_bw = einit.output_bw;
219 ifp->if_output_bw.max_bw = einit.output_bw_max;
220 ifp->if_output_lt.eff_lt = einit.output_lt;
221 ifp->if_output_lt.max_lt = einit.output_lt_max;
222 ifp->if_input_poll = einit.input_poll;
223 ifp->if_input_ctl = einit.input_ctl;
224 ifp->if_input_bw.eff_bw = einit.input_bw;
225 ifp->if_input_bw.max_bw = einit.input_bw_max;
226 ifp->if_input_lt.eff_lt = einit.input_lt;
227 ifp->if_input_lt.max_lt = einit.input_lt_max;
228 ifp->if_demux = einit.demux;
229 ifp->if_add_proto = einit.add_proto;
230 ifp->if_del_proto = einit.del_proto;
231 ifp->if_check_multi = einit.check_multi;
232 ifp->if_framer_legacy = einit.framer;
233 ifp->if_framer = einit.framer_extended;
234 ifp->if_softc = einit.softc;
235 ifp->if_ioctl = einit.ioctl;
236 ifp->if_set_bpf_tap = einit.set_bpf_tap;
237 ifp->if_free = ifnet_kpi_free;
238 ifp->if_event = einit.event;
239 ifp->if_kpi_storage = einit.detach;
240
241 /* Initialize external name (name + unit) */
242 snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
243 "%s%d", ifp->if_name, ifp->if_unit);
244
245 /*
246 * On embedded, framer() is already in the extended form;
247 * we simply use it as is, unless the caller specifies
248 * framer_extended() which will then override it.
249 *
250 * On non-embedded, framer() has long been exposed as part
251 * of the public KPI, and therefore its signature must
252 * remain the same (without the pre- and postpend length
253 * parameters.) We special case ether_frameout, such that
254 * it gets mapped to its extended variant. All other cases
255 * utilize the stub routine which will simply return zeroes
256 * for those new parameters.
257 *
258 * Internally, DLIL will only use the extended callback
259 * variant which is represented by if_framer.
260 */
261 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
262 if (ifp->if_framer_legacy == ether_frameout)
263 ifp->if_framer = ether_frameout_extended;
264 else
265 ifp->if_framer = ifnet_framer_stub;
266 }
267
268 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
269 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
270 else if (ifp->if_output_bw.eff_bw == 0)
271 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
272
273 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
274 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
275 else if (ifp->if_input_bw.eff_bw == 0)
276 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
277
278 if (ifp->if_output_bw.max_bw == 0)
279 ifp->if_output_bw = ifp->if_input_bw;
280 else if (ifp->if_input_bw.max_bw == 0)
281 ifp->if_input_bw = ifp->if_output_bw;
282
283 /* Pin if_baudrate to 32 bits */
284 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
285 if (br != 0)
286 ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
287
288 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
289 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
290 else if (ifp->if_output_lt.eff_lt == 0)
291 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
292
293 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
294 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
295 else if (ifp->if_input_lt.eff_lt == 0)
296 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
297
298 if (ifp->if_output_lt.max_lt == 0)
299 ifp->if_output_lt = ifp->if_input_lt;
300 else if (ifp->if_input_lt.max_lt == 0)
301 ifp->if_input_lt = ifp->if_output_lt;
302
303 if (ifp->if_ioctl == NULL)
304 ifp->if_ioctl = ifp_if_ioctl;
305
306 if (ifp->if_start != NULL) {
307 ifp->if_eflags |= IFEF_TXSTART;
308 if (ifp->if_pre_enqueue == NULL)
309 ifp->if_pre_enqueue = ifnet_enqueue;
310 ifp->if_output = ifp->if_pre_enqueue;
311 } else {
312 ifp->if_eflags &= ~IFEF_TXSTART;
313 }
314
315 if (ifp->if_input_poll != NULL)
316 ifp->if_eflags |= IFEF_RXPOLL;
317 else
318 ifp->if_eflags &= ~IFEF_RXPOLL;
319
320 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
321 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
322 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
323 ifp->if_input_ctl == NULL));
324 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
325 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
326
327 if (einit.broadcast_len && einit.broadcast_addr) {
328 if (einit.broadcast_len >
329 sizeof (ifp->if_broadcast.u.buffer)) {
330 MALLOC(ifp->if_broadcast.u.ptr, u_char *,
331 einit.broadcast_len, M_IFADDR, M_NOWAIT);
332 if (ifp->if_broadcast.u.ptr == NULL) {
333 error = ENOMEM;
334 } else {
335 bcopy(einit.broadcast_addr,
336 ifp->if_broadcast.u.ptr,
337 einit.broadcast_len);
338 }
339 } else {
340 bcopy(einit.broadcast_addr,
341 ifp->if_broadcast.u.buffer,
342 einit.broadcast_len);
343 }
344 ifp->if_broadcast.length = einit.broadcast_len;
345 } else {
346 bzero(&ifp->if_broadcast, sizeof (ifp->if_broadcast));
347 }
348
349 /*
350 * output target queue delay is specified in millisecond
351 * convert it to nanoseconds
352 */
353 IFCQ_TARGET_QDELAY(&ifp->if_snd) =
354 einit.output_target_qdelay * 1000 * 1000;
355 IFCQ_MAXLEN(&ifp->if_snd) = einit.sndq_maxlen;
356
357 if (einit.start_delay_qlen > 0 &&
358 einit.start_delay_timeout > 0) {
359 ifp->if_eflags |= IFEF_ENQUEUE_MULTI;
360 ifp->if_start_delay_qlen =
361 min(100, einit.start_delay_qlen);
362 ifp->if_start_delay_timeout =
363 min(20000, einit.start_delay_timeout);
364 /* convert timeout to nanoseconds */
365 ifp->if_start_delay_timeout *= 1000;
366 }
367
368 if (error == 0) {
369 *interface = ifp;
370 // temporary - this should be done in dlil_if_acquire
371 ifnet_reference(ifp);
372 } else {
373 dlil_if_release(ifp);
374 *interface = NULL;
375 }
376 }
377
378 /*
379 * Note: We should do something here to indicate that we haven't been
380 * attached yet. By doing so, we can catch the case in ifnet_release
381 * where the reference count reaches zero and call the recycle
382 * function. If the interface is attached, the interface will be
383 * recycled when the interface's if_free function is called. If the
384 * interface is never attached, the if_free function will never be
385 * called and the interface will never be recycled.
386 */
387
388 return (error);
389}
390
391errno_t
392ifnet_reference(ifnet_t ifp)
393{
394 return (dlil_if_ref(ifp));
395}
396
397errno_t
398ifnet_release(ifnet_t ifp)
399{
400 return (dlil_if_free(ifp));
401}
402
403errno_t
404ifnet_interface_family_find(const char *module_string,
405 ifnet_family_t *family_id)
406{
407 if (module_string == NULL || family_id == NULL)
408 return (EINVAL);
409
410 return (net_str_id_find_internal(module_string, family_id,
411 NSI_IF_FAM_ID, 1));
412}
413
414void *
415ifnet_softc(ifnet_t interface)
416{
417 return ((interface == NULL) ? NULL : interface->if_softc);
418}
419
420const char *
421ifnet_name(ifnet_t interface)
422{
423 return ((interface == NULL) ? NULL : interface->if_name);
424}
425
426ifnet_family_t
427ifnet_family(ifnet_t interface)
428{
429 return ((interface == NULL) ? 0 : interface->if_family);
430}
431
432ifnet_subfamily_t
433ifnet_subfamily(ifnet_t interface)
434{
435 return ((interface == NULL) ? 0 : interface->if_subfamily);
436}
437
438u_int32_t
439ifnet_unit(ifnet_t interface)
440{
441 return ((interface == NULL) ? (u_int32_t)0xffffffff :
442 (u_int32_t)interface->if_unit);
443}
444
445u_int32_t
446ifnet_index(ifnet_t interface)
447{
448 return ((interface == NULL) ? (u_int32_t)0xffffffff :
449 interface->if_index);
450}
451
452errno_t
453ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
454{
455 uint16_t old_flags;
456
457 if (interface == NULL)
458 return (EINVAL);
459
460 ifnet_lock_exclusive(interface);
461
462 /* If we are modifying the up/down state, call if_updown */
463 if ((mask & IFF_UP) != 0) {
464 if_updown(interface, (new_flags & IFF_UP) == IFF_UP);
465 }
466
467 old_flags = interface->if_flags;
468 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
469 /* If we are modifying the multicast flag, set/unset the silent flag */
470 if ((old_flags & IFF_MULTICAST) !=
471 (interface->if_flags & IFF_MULTICAST)) {
472#if INET
473 if (IGMP_IFINFO(interface) != NULL)
474 igmp_initsilent(interface, IGMP_IFINFO(interface));
475#endif /* INET */
476#if INET6
477 if (MLD_IFINFO(interface) != NULL)
478 mld6_initsilent(interface, MLD_IFINFO(interface));
479#endif /* INET6 */
480 }
481
482 ifnet_lock_done(interface);
483
484 return (0);
485}
486
487u_int16_t
488ifnet_flags(ifnet_t interface)
489{
490 return ((interface == NULL) ? 0 : interface->if_flags);
491}
492
493/*
494 * This routine ensures the following:
495 *
496 * If IFEF_AWDL is set by the caller, also set the rest of flags as
497 * defined in IFEF_AWDL_MASK.
498 *
499 * If IFEF_AWDL has been set on the interface and the caller attempts
500 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
501 * return failure.
502 *
503 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
504 * on the interface.
505 *
506 * All other flags not associated with AWDL are not affected.
507 *
508 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
509 */
510static errno_t
511ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
512{
513 u_int32_t eflags;
514
515 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
516
517 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
518
519 if (ifp->if_eflags & IFEF_AWDL) {
520 if (eflags & IFEF_AWDL) {
521 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK)
522 return (EINVAL);
523 } else {
524 *new_eflags &= ~IFEF_AWDL_MASK;
525 *mask |= IFEF_AWDL_MASK;
526 }
527 } else if (eflags & IFEF_AWDL) {
528 *new_eflags |= IFEF_AWDL_MASK;
529 *mask |= IFEF_AWDL_MASK;
530 } else if (eflags & IFEF_AWDL_RESTRICTED &&
531 !(ifp->if_eflags & IFEF_AWDL))
532 return (EINVAL);
533
534 return (0);
535}
536
537errno_t
538ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
539{
540 uint32_t oeflags;
541 struct kev_msg ev_msg;
542 struct net_event_data ev_data;
543
544 if (interface == NULL)
545 return (EINVAL);
546
547 bzero(&ev_msg, sizeof(ev_msg));
548 ifnet_lock_exclusive(interface);
549 /*
550 * Sanity checks for IFEF_AWDL and its related flags.
551 */
552 if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
553 ifnet_lock_done(interface);
554 return (EINVAL);
555 }
556 oeflags = interface->if_eflags;
557 interface->if_eflags =
558 (new_flags & mask) | (interface->if_eflags & ~mask);
559 ifnet_lock_done(interface);
560 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
561 !(oeflags & IFEF_AWDL_RESTRICTED)) {
562 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
563 /*
564 * The interface is now restricted to applications that have
565 * the entitlement.
566 * The check for the entitlement will be done in the data
567 * path, so we don't have to do anything here.
568 */
569 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
570 !(interface->if_eflags & IFEF_AWDL_RESTRICTED))
571 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
572 /*
573 * Notify configd so that it has a chance to perform better
574 * reachability detection.
575 */
576 if (ev_msg.event_code) {
577 bzero(&ev_data, sizeof(ev_data));
578 ev_msg.vendor_code = KEV_VENDOR_APPLE;
579 ev_msg.kev_class = KEV_NETWORK_CLASS;
580 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
581 strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
582 ev_data.if_family = interface->if_family;
583 ev_data.if_unit = interface->if_unit;
584 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
585 ev_msg.dv[0].data_ptr = &ev_data;
586 ev_msg.dv[1].data_length = 0;
587 kev_post_msg(&ev_msg);
588 }
589
590 return (0);
591}
592
593u_int32_t
594ifnet_eflags(ifnet_t interface)
595{
596 return ((interface == NULL) ? 0 : interface->if_eflags);
597}
598
599errno_t
600ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
601{
602 int before, after;
603
604 if (ifp == NULL)
605 return (EINVAL);
606
607 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
608 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
609
610 /*
611 * If this is called prior to ifnet attach, the actual work will
612 * be done at attach time. Otherwise, if it is called after
613 * ifnet detach, then it is a no-op.
614 */
615 if (!ifnet_is_attached(ifp, 0)) {
616 ifp->if_idle_new_flags = new_flags;
617 ifp->if_idle_new_flags_mask = mask;
618 return (0);
619 } else {
620 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
621 }
622
623 before = ifp->if_idle_flags;
624 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
625 after = ifp->if_idle_flags;
626
627 if ((after - before) < 0 && ifp->if_idle_flags == 0 &&
628 ifp->if_want_aggressive_drain != 0) {
629 ifp->if_want_aggressive_drain = 0;
630 if (ifnet_aggressive_drainers == 0)
631 panic("%s: ifp=%p negative aggdrain!", __func__, ifp);
632 } else if ((after - before) > 0 && ifp->if_want_aggressive_drain == 0) {
633 ifp->if_want_aggressive_drain++;
634 if (++ifnet_aggressive_drainers == 0)
635 panic("%s: ifp=%p wraparound aggdrain!", __func__, ifp);
636 }
637
638 return (0);
639}
640
641errno_t
642ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
643{
644 errno_t err;
645
646 lck_mtx_lock(rnh_lock);
647 ifnet_lock_exclusive(ifp);
648 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
649 ifnet_lock_done(ifp);
650 lck_mtx_unlock(rnh_lock);
651
652 return (err);
653}
654
655u_int32_t
656ifnet_idle_flags(ifnet_t ifp)
657{
658 return ((ifp == NULL) ? 0 : ifp->if_idle_flags);
659}
660
661errno_t
662ifnet_set_link_quality(ifnet_t ifp, int quality)
663{
664 errno_t err = 0;
665
666 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
667 err = EINVAL;
668 goto done;
669 }
670
671 if (!ifnet_is_attached(ifp, 0)) {
672 err = ENXIO;
673 goto done;
674 }
675
676 if_lqm_update(ifp, quality, 0);
677
678done:
679 return (err);
680}
681
682int
683ifnet_link_quality(ifnet_t ifp)
684{
685 int lqm;
686
687 if (ifp == NULL)
688 return (IFNET_LQM_THRESH_OFF);
689
690 ifnet_lock_shared(ifp);
691 lqm = ifp->if_interface_state.lqm_state;
692 ifnet_lock_done(ifp);
693
694 return (lqm);
695}
696
697errno_t
698ifnet_set_interface_state(ifnet_t ifp,
699 struct if_interface_state *if_interface_state)
700{
701 errno_t err = 0;
702
703 if (ifp == NULL || if_interface_state == NULL) {
704 err = EINVAL;
705 goto done;
706 }
707
708 if (!ifnet_is_attached(ifp, 0)) {
709 err = ENXIO;
710 goto done;
711 }
712
713 if_state_update(ifp, if_interface_state);
714
715done:
716 return (err);
717}
718
719errno_t
720ifnet_get_interface_state(ifnet_t ifp,
721 struct if_interface_state *if_interface_state)
722{
723 errno_t err = 0;
724
725 if (ifp == NULL || if_interface_state == NULL) {
726 err = EINVAL;
727 goto done;
728 }
729
730 if (!ifnet_is_attached(ifp, 0)) {
731 err = ENXIO;
732 goto done;
733 }
734
735 if_get_state(ifp, if_interface_state);
736
737done:
738 return (err);
739}
740
741
742static errno_t
743ifnet_defrouter_llreachinfo(ifnet_t ifp, int af,
744 struct ifnet_llreach_info *iflri)
745{
746 if (ifp == NULL || iflri == NULL)
747 return (EINVAL);
748
749 VERIFY(af == AF_INET || af == AF_INET6);
750
751 return (ifnet_llreach_get_defrouter(ifp, af, iflri));
752}
753
754errno_t
755ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
756{
757 return (ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri));
758}
759
760errno_t
761ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
762{
763 return (ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri));
764}
765
766errno_t
767ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
768 u_int32_t mask)
769{
770 errno_t error = 0;
771 int tmp;
772
773 if (ifp == NULL)
774 return (EINVAL);
775
776 ifnet_lock_exclusive(ifp);
777 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
778 if ((tmp & ~IFCAP_VALID))
779 error = EINVAL;
780 else
781 ifp->if_capabilities = tmp;
782 ifnet_lock_done(ifp);
783
784 return (error);
785}
786
787u_int32_t
788ifnet_capabilities_supported(ifnet_t ifp)
789{
790 return ((ifp == NULL) ? 0 : ifp->if_capabilities);
791}
792
793
794errno_t
795ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
796 u_int32_t mask)
797{
798 errno_t error = 0;
799 int tmp;
800 struct kev_msg ev_msg;
801 struct net_event_data ev_data;
802
803 if (ifp == NULL)
804 return (EINVAL);
805
806 ifnet_lock_exclusive(ifp);
807 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
808 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities))
809 error = EINVAL;
810 else
811 ifp->if_capenable = tmp;
812 ifnet_lock_done(ifp);
813
814 /* Notify application of the change */
815 bzero(&ev_data, sizeof (struct net_event_data));
816 bzero(&ev_msg, sizeof (struct kev_msg));
817 ev_msg.vendor_code = KEV_VENDOR_APPLE;
818 ev_msg.kev_class = KEV_NETWORK_CLASS;
819 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
820
821 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
822 strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
823 ev_data.if_family = ifp->if_family;
824 ev_data.if_unit = (u_int32_t)ifp->if_unit;
825 ev_msg.dv[0].data_length = sizeof (struct net_event_data);
826 ev_msg.dv[0].data_ptr = &ev_data;
827 ev_msg.dv[1].data_length = 0;
828 kev_post_msg(&ev_msg);
829
830 return (error);
831}
832
833u_int32_t
834ifnet_capabilities_enabled(ifnet_t ifp)
835{
836 return ((ifp == NULL) ? 0 : ifp->if_capenable);
837}
838
839static const ifnet_offload_t offload_mask =
840 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
841 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
842 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_VLAN_TAGGING |
843 IFNET_VLAN_MTU | IFNET_MULTIPAGES | IFNET_TSO_IPV4 | IFNET_TSO_IPV6 |
844 IFNET_TX_STATUS);
845
846static const ifnet_offload_t any_offload_csum =
847 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
848 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_CSUM_PARTIAL);
849
850errno_t
851ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
852{
853 u_int32_t ifcaps = 0;
854
855 if (interface == NULL)
856 return (EINVAL);
857
858 ifnet_lock_exclusive(interface);
859 interface->if_hwassist = (offload & offload_mask);
860 /*
861 * Hardware capable of partial checksum offload is
862 * flexible enough to handle any transports utilizing
863 * Internet Checksumming. Include those transports
864 * here, and leave the final decision to IP.
865 */
866 if (interface->if_hwassist & IFNET_CSUM_PARTIAL) {
867 interface->if_hwassist |= (IFNET_CSUM_TCP | IFNET_CSUM_UDP |
868 IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6);
869 }
870 if (dlil_verbose) {
871 log(LOG_DEBUG, "%s: set offload flags=%b\n",
872 if_name(interface),
873 interface->if_hwassist, IFNET_OFFLOADF_BITS);
874 }
875 ifnet_lock_done(interface);
876
877 if ((offload & any_offload_csum))
878 ifcaps |= IFCAP_HWCSUM;
879 if ((offload & IFNET_TSO_IPV4))
880 ifcaps |= IFCAP_TSO4;
881 if ((offload & IFNET_TSO_IPV6))
882 ifcaps |= IFCAP_TSO6;
883 if ((offload & IFNET_VLAN_MTU))
884 ifcaps |= IFCAP_VLAN_MTU;
885 if ((offload & IFNET_VLAN_TAGGING))
886 ifcaps |= IFCAP_VLAN_HWTAGGING;
887 if ((offload & IFNET_TX_STATUS))
888 ifcaps |= IFNET_TX_STATUS;
889 if (ifcaps != 0) {
890 (void) ifnet_set_capabilities_supported(interface, ifcaps,
891 IFCAP_VALID);
892 (void) ifnet_set_capabilities_enabled(interface, ifcaps,
893 IFCAP_VALID);
894 }
895
896 return (0);
897}
898
899ifnet_offload_t
900ifnet_offload(ifnet_t interface)
901{
902 return ((interface == NULL) ?
903 0 : (interface->if_hwassist & offload_mask));
904}
905
906errno_t
907ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
908{
909 errno_t error = 0;
910
911 if (interface == NULL || mtuLen < interface->if_mtu)
912 return (EINVAL);
913
914 switch (family) {
915 case AF_INET:
916 if (interface->if_hwassist & IFNET_TSO_IPV4)
917 interface->if_tso_v4_mtu = mtuLen;
918 else
919 error = EINVAL;
920 break;
921
922 case AF_INET6:
923 if (interface->if_hwassist & IFNET_TSO_IPV6)
924 interface->if_tso_v6_mtu = mtuLen;
925 else
926 error = EINVAL;
927 break;
928
929 default:
930 error = EPROTONOSUPPORT;
931 break;
932 }
933
934 return (error);
935}
936
937errno_t
938ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
939{
940 errno_t error = 0;
941
942 if (interface == NULL || mtuLen == NULL)
943 return (EINVAL);
944
945 switch (family) {
946 case AF_INET:
947 if (interface->if_hwassist & IFNET_TSO_IPV4)
948 *mtuLen = interface->if_tso_v4_mtu;
949 else
950 error = EINVAL;
951 break;
952
953 case AF_INET6:
954 if (interface->if_hwassist & IFNET_TSO_IPV6)
955 *mtuLen = interface->if_tso_v6_mtu;
956 else
957 error = EINVAL;
958 break;
959
960 default:
961 error = EPROTONOSUPPORT;
962 break;
963 }
964
965 return (error);
966}
967
968errno_t
969ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
970{
971 struct kev_msg ev_msg;
972 struct net_event_data ev_data;
973
974 bzero(&ev_data, sizeof (struct net_event_data));
975 bzero(&ev_msg, sizeof (struct kev_msg));
976
977 if (interface == NULL)
978 return (EINVAL);
979
980 /* Do not accept wacky values */
981 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS)
982 return (EINVAL);
983
984 ifnet_lock_exclusive(interface);
985
986 interface->if_wake_properties =
987 (properties & mask) | (interface->if_wake_properties & ~mask);
988
989 ifnet_lock_done(interface);
990
991 (void) ifnet_touch_lastchange(interface);
992
993 /* Notify application of the change */
994 ev_msg.vendor_code = KEV_VENDOR_APPLE;
995 ev_msg.kev_class = KEV_NETWORK_CLASS;
996 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
997
998 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
999 strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
1000 ev_data.if_family = interface->if_family;
1001 ev_data.if_unit = (u_int32_t)interface->if_unit;
1002 ev_msg.dv[0].data_length = sizeof (struct net_event_data);
1003 ev_msg.dv[0].data_ptr = &ev_data;
1004 ev_msg.dv[1].data_length = 0;
1005 kev_post_msg(&ev_msg);
1006
1007 return (0);
1008}
1009
1010u_int32_t
1011ifnet_get_wake_flags(ifnet_t interface)
1012{
1013 return ((interface == NULL) ? 0 : interface->if_wake_properties);
1014}
1015
1016/*
1017 * Should MIB data store a copy?
1018 */
1019errno_t
1020ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen)
1021{
1022 if (interface == NULL)
1023 return (EINVAL);
1024
1025 ifnet_lock_exclusive(interface);
1026 interface->if_linkmib = (void*)mibData;
1027 interface->if_linkmiblen = mibLen;
1028 ifnet_lock_done(interface);
1029 return (0);
1030}
1031
1032errno_t
1033ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen)
1034{
1035 errno_t result = 0;
1036
1037 if (interface == NULL)
1038 return (EINVAL);
1039
1040 ifnet_lock_shared(interface);
1041 if (*mibLen < interface->if_linkmiblen)
1042 result = EMSGSIZE;
1043 if (result == 0 && interface->if_linkmib == NULL)
1044 result = ENOTSUP;
1045
1046 if (result == 0) {
1047 *mibLen = interface->if_linkmiblen;
1048 bcopy(interface->if_linkmib, mibData, *mibLen);
1049 }
1050 ifnet_lock_done(interface);
1051
1052 return (result);
1053}
1054
1055u_int32_t
1056ifnet_get_link_mib_data_length(ifnet_t interface)
1057{
1058 return ((interface == NULL) ? 0 : interface->if_linkmiblen);
1059}
1060
1061errno_t
1062ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1063 mbuf_t m, void *route, const struct sockaddr *dest)
1064{
1065 if (interface == NULL || protocol_family == 0 || m == NULL) {
1066 if (m != NULL)
1067 mbuf_freem_list(m);
1068 return (EINVAL);
1069 }
1070 return (dlil_output(interface, protocol_family, m, route, dest, 0, NULL));
1071}
1072
1073errno_t
1074ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1075{
1076 if (interface == NULL || m == NULL) {
1077 if (m != NULL)
1078 mbuf_freem_list(m);
1079 return (EINVAL);
1080 }
1081 return (dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL));
1082}
1083
1084errno_t
1085ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1086{
1087 if (interface == NULL)
1088 return (EINVAL);
1089
1090 interface->if_mtu = mtu;
1091 return (0);
1092}
1093
1094u_int32_t
1095ifnet_mtu(ifnet_t interface)
1096{
1097 return ((interface == NULL) ? 0 : interface->if_mtu);
1098}
1099
1100u_char
1101ifnet_type(ifnet_t interface)
1102{
1103 return ((interface == NULL) ? 0 : interface->if_data.ifi_type);
1104}
1105
1106errno_t
1107ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1108{
1109 if (interface == NULL)
1110 return (EINVAL);
1111
1112 interface->if_data.ifi_addrlen = addrlen;
1113 return (0);
1114}
1115
1116u_char
1117ifnet_addrlen(ifnet_t interface)
1118{
1119 return ((interface == NULL) ? 0 : interface->if_data.ifi_addrlen);
1120}
1121
1122errno_t
1123ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1124{
1125 if (interface == NULL)
1126 return (EINVAL);
1127
1128 interface->if_data.ifi_hdrlen = hdrlen;
1129 return (0);
1130}
1131
1132u_char
1133ifnet_hdrlen(ifnet_t interface)
1134{
1135 return ((interface == NULL) ? 0 : interface->if_data.ifi_hdrlen);
1136}
1137
1138errno_t
1139ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1140{
1141 if (interface == NULL)
1142 return (EINVAL);
1143
1144 interface->if_data.ifi_metric = metric;
1145 return (0);
1146}
1147
1148u_int32_t
1149ifnet_metric(ifnet_t interface)
1150{
1151 return ((interface == NULL) ? 0 : interface->if_data.ifi_metric);
1152}
1153
1154errno_t
1155ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate)
1156{
1157 if (ifp == NULL)
1158 return (EINVAL);
1159
1160 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1161 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1162
1163 /* Pin if_baudrate to 32 bits until we can change the storage size */
1164 ifp->if_baudrate = (baudrate > 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate;
1165
1166 return (0);
1167}
1168
1169u_int64_t
1170ifnet_baudrate(struct ifnet *ifp)
1171{
1172 return ((ifp == NULL) ? 0 : ifp->if_baudrate);
1173}
1174
1175errno_t
1176ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1177 struct if_bandwidths *input_bw)
1178{
1179 if (ifp == NULL)
1180 return (EINVAL);
1181
1182 /* set input values first (if any), as output values depend on them */
1183 if (input_bw != NULL)
1184 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1185
1186 if (output_bw != NULL)
1187 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1188
1189 return (0);
1190}
1191
1192static void
1193ifnet_set_link_status_outbw(struct ifnet *ifp)
1194{
1195 struct if_wifi_status_v1 *sr;
1196 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1197 if (ifp->if_output_bw.eff_bw != 0) {
1198 sr->valid_bitmask |=
1199 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1200 sr->ul_effective_bandwidth =
1201 ifp->if_output_bw.eff_bw;
1202 }
1203 if (ifp->if_output_bw.max_bw != 0) {
1204 sr->valid_bitmask |=
1205 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1206 sr->ul_max_bandwidth =
1207 ifp->if_output_bw.max_bw;
1208 }
1209}
1210
1211errno_t
1212ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1213 boolean_t locked)
1214{
1215 struct if_bandwidths old_bw;
1216 struct ifclassq *ifq;
1217 u_int64_t br;
1218
1219 VERIFY(ifp != NULL && bw != NULL);
1220
1221 ifq = &ifp->if_snd;
1222 if (!locked)
1223 IFCQ_LOCK(ifq);
1224 IFCQ_LOCK_ASSERT_HELD(ifq);
1225
1226 old_bw = ifp->if_output_bw;
1227 if (bw->eff_bw != 0)
1228 ifp->if_output_bw.eff_bw = bw->eff_bw;
1229 if (bw->max_bw != 0)
1230 ifp->if_output_bw.max_bw = bw->max_bw;
1231 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
1232 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1233 else if (ifp->if_output_bw.eff_bw == 0)
1234 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1235
1236 /* Pin if_baudrate to 32 bits */
1237 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1238 if (br != 0)
1239 ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
1240
1241 /* Adjust queue parameters if needed */
1242 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1243 old_bw.max_bw != ifp->if_output_bw.max_bw)
1244 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1245
1246 if (!locked)
1247 IFCQ_UNLOCK(ifq);
1248
1249 /*
1250 * If this is a Wifi interface, update the values in
1251 * if_link_status structure also.
1252 */
1253 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1254 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1255 ifnet_set_link_status_outbw(ifp);
1256 lck_rw_done(&ifp->if_link_status_lock);
1257 }
1258
1259 return (0);
1260}
1261
1262static void
1263ifnet_set_link_status_inbw(struct ifnet *ifp)
1264{
1265 struct if_wifi_status_v1 *sr;
1266
1267 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1268 if (ifp->if_input_bw.eff_bw != 0) {
1269 sr->valid_bitmask |=
1270 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1271 sr->dl_effective_bandwidth =
1272 ifp->if_input_bw.eff_bw;
1273 }
1274 if (ifp->if_input_bw.max_bw != 0) {
1275 sr->valid_bitmask |=
1276 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1277 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw;
1278 }
1279}
1280
1281errno_t
1282ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1283{
1284 struct if_bandwidths old_bw;
1285
1286 VERIFY(ifp != NULL && bw != NULL);
1287
1288 old_bw = ifp->if_input_bw;
1289 if (bw->eff_bw != 0)
1290 ifp->if_input_bw.eff_bw = bw->eff_bw;
1291 if (bw->max_bw != 0)
1292 ifp->if_input_bw.max_bw = bw->max_bw;
1293 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
1294 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1295 else if (ifp->if_input_bw.eff_bw == 0)
1296 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1297
1298 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1299 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
1300 ifnet_set_link_status_inbw(ifp);
1301 lck_rw_done(&ifp->if_link_status_lock);
1302 }
1303
1304 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1305 old_bw.max_bw != ifp->if_input_bw.max_bw)
1306 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1307
1308 return (0);
1309}
1310
1311u_int64_t
1312ifnet_output_linkrate(struct ifnet *ifp)
1313{
1314 struct ifclassq *ifq = &ifp->if_snd;
1315 u_int64_t rate;
1316
1317 IFCQ_LOCK_ASSERT_HELD(ifq);
1318
1319 rate = ifp->if_output_bw.eff_bw;
1320 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1321 u_int64_t tbr_rate = ifp->if_snd.ifcq_tbr.tbr_rate_raw;
1322 VERIFY(tbr_rate > 0);
1323 rate = MIN(rate, ifp->if_snd.ifcq_tbr.tbr_rate_raw);
1324 }
1325
1326 return (rate);
1327}
1328
1329u_int64_t
1330ifnet_input_linkrate(struct ifnet *ifp)
1331{
1332 return (ifp->if_input_bw.eff_bw);
1333}
1334
1335errno_t
1336ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1337 struct if_bandwidths *input_bw)
1338{
1339 if (ifp == NULL)
1340 return (EINVAL);
1341
1342 if (output_bw != NULL)
1343 *output_bw = ifp->if_output_bw;
1344 if (input_bw != NULL)
1345 *input_bw = ifp->if_input_bw;
1346
1347 return (0);
1348}
1349
1350errno_t
1351ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1352 struct if_latencies *input_lt)
1353{
1354 if (ifp == NULL)
1355 return (EINVAL);
1356
1357 if (output_lt != NULL)
1358 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1359
1360 if (input_lt != NULL)
1361 (void) ifnet_set_input_latencies(ifp, input_lt);
1362
1363 return (0);
1364}
1365
1366errno_t
1367ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1368 boolean_t locked)
1369{
1370 struct if_latencies old_lt;
1371 struct ifclassq *ifq;
1372
1373 VERIFY(ifp != NULL && lt != NULL);
1374
1375 ifq = &ifp->if_snd;
1376 if (!locked)
1377 IFCQ_LOCK(ifq);
1378 IFCQ_LOCK_ASSERT_HELD(ifq);
1379
1380 old_lt = ifp->if_output_lt;
1381 if (lt->eff_lt != 0)
1382 ifp->if_output_lt.eff_lt = lt->eff_lt;
1383 if (lt->max_lt != 0)
1384 ifp->if_output_lt.max_lt = lt->max_lt;
1385 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
1386 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1387 else if (ifp->if_output_lt.eff_lt == 0)
1388 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1389
1390 /* Adjust queue parameters if needed */
1391 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1392 old_lt.max_lt != ifp->if_output_lt.max_lt)
1393 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1394
1395 if (!locked)
1396 IFCQ_UNLOCK(ifq);
1397
1398 return (0);
1399}
1400
1401errno_t
1402ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1403{
1404 struct if_latencies old_lt;
1405
1406 VERIFY(ifp != NULL && lt != NULL);
1407
1408 old_lt = ifp->if_input_lt;
1409 if (lt->eff_lt != 0)
1410 ifp->if_input_lt.eff_lt = lt->eff_lt;
1411 if (lt->max_lt != 0)
1412 ifp->if_input_lt.max_lt = lt->max_lt;
1413 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
1414 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1415 else if (ifp->if_input_lt.eff_lt == 0)
1416 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1417
1418 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1419 old_lt.max_lt != ifp->if_input_lt.max_lt)
1420 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1421
1422 return (0);
1423}
1424
1425errno_t
1426ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1427 struct if_latencies *input_lt)
1428{
1429 if (ifp == NULL)
1430 return (EINVAL);
1431
1432 if (output_lt != NULL)
1433 *output_lt = ifp->if_output_lt;
1434 if (input_lt != NULL)
1435 *input_lt = ifp->if_input_lt;
1436
1437 return (0);
1438}
1439
1440errno_t
1441ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1442{
1443 errno_t err;
1444
1445 if (ifp == NULL)
1446 return (EINVAL);
1447 else if (!ifnet_is_attached(ifp, 1))
1448 return (ENXIO);
1449
1450 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1451
1452 /* Release the io ref count */
1453 ifnet_decr_iorefcnt(ifp);
1454
1455 return (err);
1456}
1457
1458errno_t
1459ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1460{
1461 errno_t err;
1462
1463 if (ifp == NULL || p == NULL)
1464 return (EINVAL);
1465 else if (!ifnet_is_attached(ifp, 1))
1466 return (ENXIO);
1467
1468 err = dlil_rxpoll_get_params(ifp, p);
1469
1470 /* Release the io ref count */
1471 ifnet_decr_iorefcnt(ifp);
1472
1473 return (err);
1474}
1475
1476errno_t
1477ifnet_stat_increment(struct ifnet *ifp,
1478 const struct ifnet_stat_increment_param *s)
1479{
1480 if (ifp == NULL)
1481 return (EINVAL);
1482
1483 if (s->packets_in != 0)
1484 atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1485 if (s->bytes_in != 0)
1486 atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1487 if (s->errors_in != 0)
1488 atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1489
1490 if (s->packets_out != 0)
1491 atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out);
1492 if (s->bytes_out != 0)
1493 atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1494 if (s->errors_out != 0)
1495 atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1496
1497 if (s->collisions != 0)
1498 atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions);
1499 if (s->dropped != 0)
1500 atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1501
1502 /* Touch the last change time. */
1503 TOUCHLASTCHANGE(&ifp->if_lastchange);
1504
1505 return (0);
1506}
1507
1508errno_t
1509ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1510 u_int32_t bytes_in, u_int32_t errors_in)
1511{
1512 if (ifp == NULL)
1513 return (EINVAL);
1514
1515 if (packets_in != 0)
1516 atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in);
1517 if (bytes_in != 0)
1518 atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in);
1519 if (errors_in != 0)
1520 atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in);
1521
1522 TOUCHLASTCHANGE(&ifp->if_lastchange);
1523
1524 return (0);
1525}
1526
1527errno_t
1528ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1529 u_int32_t bytes_out, u_int32_t errors_out)
1530{
1531 if (ifp == NULL)
1532 return (EINVAL);
1533
1534 if (packets_out != 0)
1535 atomic_add_64(&ifp->if_data.ifi_opackets, packets_out);
1536 if (bytes_out != 0)
1537 atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out);
1538 if (errors_out != 0)
1539 atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out);
1540
1541 TOUCHLASTCHANGE(&ifp->if_lastchange);
1542
1543 return (0);
1544}
1545
1546errno_t
1547ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1548{
1549 if (ifp == NULL)
1550 return (EINVAL);
1551
1552 atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1553 atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1554 atomic_set_64(&ifp->if_data.ifi_imcasts, s->multicasts_in);
1555 atomic_set_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1556
1557 atomic_set_64(&ifp->if_data.ifi_opackets, s->packets_out);
1558 atomic_set_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1559 atomic_set_64(&ifp->if_data.ifi_omcasts, s->multicasts_out);
1560 atomic_set_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1561
1562 atomic_set_64(&ifp->if_data.ifi_collisions, s->collisions);
1563 atomic_set_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1564 atomic_set_64(&ifp->if_data.ifi_noproto, s->no_protocol);
1565
1566 /* Touch the last change time. */
1567 TOUCHLASTCHANGE(&ifp->if_lastchange);
1568
1569 return (0);
1570}
1571
1572errno_t
1573ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1574{
1575 if (ifp == NULL)
1576 return (EINVAL);
1577
1578 atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets);
1579 atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes);
1580 atomic_get_64(s->multicasts_in, &ifp->if_data.ifi_imcasts);
1581 atomic_get_64(s->errors_in, &ifp->if_data.ifi_ierrors);
1582
1583 atomic_get_64(s->packets_out, &ifp->if_data.ifi_opackets);
1584 atomic_get_64(s->bytes_out, &ifp->if_data.ifi_obytes);
1585 atomic_get_64(s->multicasts_out, &ifp->if_data.ifi_omcasts);
1586 atomic_get_64(s->errors_out, &ifp->if_data.ifi_oerrors);
1587
1588 atomic_get_64(s->collisions, &ifp->if_data.ifi_collisions);
1589 atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops);
1590 atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto);
1591
1592 return (0);
1593}
1594
1595errno_t
1596ifnet_touch_lastchange(ifnet_t interface)
1597{
1598 if (interface == NULL)
1599 return (EINVAL);
1600
1601 TOUCHLASTCHANGE(&interface->if_lastchange);
1602
1603 return (0);
1604}
1605
1606errno_t
1607ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1608{
1609 if (interface == NULL)
1610 return (EINVAL);
1611
1612 *last_change = interface->if_data.ifi_lastchange;
1613 /* Crude conversion from uptime to calendar time */
1614 last_change->tv_sec += boottime_sec();
1615
1616 return (0);
1617}
1618
1619errno_t
1620ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
1621{
1622 return (addresses == NULL ? EINVAL :
1623 ifnet_get_address_list_family(interface, addresses, 0));
1624}
1625
1626struct ifnet_addr_list {
1627 SLIST_ENTRY(ifnet_addr_list) ifal_le;
1628 struct ifaddr *ifal_ifa;
1629};
1630
1631errno_t
1632ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
1633 sa_family_t family)
1634{
1635 return (ifnet_get_address_list_family_internal(interface, addresses,
1636 family, 0, M_NOWAIT, 0));
1637}
1638
1639errno_t
1640ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
1641{
1642 return (addresses == NULL ? EINVAL :
1643 ifnet_get_address_list_family_internal(interface, addresses,
1644 0, 0, M_NOWAIT, 1));
1645}
1646
1647extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
1648
1649extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
1650
1651__private_extern__ errno_t
1652ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
1653 sa_family_t family, int detached, int how, int return_inuse_addrs)
1654{
1655 SLIST_HEAD(, ifnet_addr_list) ifal_head;
1656 struct ifnet_addr_list *ifal, *ifal_tmp;
1657 struct ifnet *ifp;
1658 int count = 0;
1659 errno_t err = 0;
1660 int usecount = 0;
1661 int index = 0;
1662
1663 SLIST_INIT(&ifal_head);
1664
1665 if (addresses == NULL) {
1666 err = EINVAL;
1667 goto done;
1668 }
1669 *addresses = NULL;
1670
1671 if (detached) {
1672 /*
1673 * Interface has been detached, so skip the lookup
1674 * at ifnet_head and go directly to inner loop.
1675 */
1676 ifp = interface;
1677 if (ifp == NULL) {
1678 err = EINVAL;
1679 goto done;
1680 }
1681 goto one;
1682 }
1683
1684 ifnet_head_lock_shared();
1685 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1686 if (interface != NULL && ifp != interface)
1687 continue;
1688one:
1689 ifnet_lock_shared(ifp);
1690 if (interface == NULL || interface == ifp) {
1691 struct ifaddr *ifa;
1692 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1693 IFA_LOCK(ifa);
1694 if (family != 0 &&
1695 ifa->ifa_addr->sa_family != family) {
1696 IFA_UNLOCK(ifa);
1697 continue;
1698 }
1699 MALLOC(ifal, struct ifnet_addr_list *,
1700 sizeof (*ifal), M_TEMP, how);
1701 if (ifal == NULL) {
1702 IFA_UNLOCK(ifa);
1703 ifnet_lock_done(ifp);
1704 if (!detached)
1705 ifnet_head_done();
1706 err = ENOMEM;
1707 goto done;
1708 }
1709 ifal->ifal_ifa = ifa;
1710 IFA_ADDREF_LOCKED(ifa);
1711 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
1712 ++count;
1713 IFA_UNLOCK(ifa);
1714 }
1715 }
1716 ifnet_lock_done(ifp);
1717 if (detached)
1718 break;
1719 }
1720 if (!detached)
1721 ifnet_head_done();
1722
1723 if (count == 0) {
1724 err = ENXIO;
1725 goto done;
1726 }
1727 MALLOC(*addresses, ifaddr_t *, sizeof (ifaddr_t) * (count + 1),
1728 M_TEMP, how);
1729 if (*addresses == NULL) {
1730 err = ENOMEM;
1731 goto done;
1732 }
1733 bzero(*addresses, sizeof (ifaddr_t) * (count + 1));
1734
1735done:
1736 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
1737 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
1738 if (err == 0) {
1739 if (return_inuse_addrs) {
1740 usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
1741 usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
1742 if (usecount) {
1743 (*addresses)[index] = ifal->ifal_ifa;
1744 index++;
1745 } else {
1746 IFA_REMREF(ifal->ifal_ifa);
1747 }
1748 } else {
1749 (*addresses)[--count] = ifal->ifal_ifa;
1750 }
1751 } else {
1752 IFA_REMREF(ifal->ifal_ifa);
1753 }
1754 FREE(ifal, M_TEMP);
1755 }
1756
1757 VERIFY(err == 0 || *addresses == NULL);
1758 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
1759 VERIFY(return_inuse_addrs == 1);
1760 FREE(*addresses, M_TEMP);
1761 err = ENXIO;
1762 }
1763 return (err);
1764}
1765
1766void
1767ifnet_free_address_list(ifaddr_t *addresses)
1768{
1769 int i;
1770
1771 if (addresses == NULL)
1772 return;
1773
1774 for (i = 0; addresses[i] != NULL; i++)
1775 IFA_REMREF(addresses[i]);
1776
1777 FREE(addresses, M_TEMP);
1778}
1779
1780void *
1781ifnet_lladdr(ifnet_t interface)
1782{
1783 struct ifaddr *ifa;
1784 void *lladdr;
1785
1786 if (interface == NULL)
1787 return (NULL);
1788
1789 /*
1790 * if_lladdr points to the permanent link address of
1791 * the interface and it never gets deallocated; internal
1792 * code should simply use IF_LLADDR() for performance.
1793 */
1794 ifa = interface->if_lladdr;
1795 IFA_LOCK_SPIN(ifa);
1796 lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
1797 IFA_UNLOCK(ifa);
1798
1799 return (lladdr);
1800}
1801
1802errno_t
1803ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
1804 size_t *out_len)
1805{
1806 if (interface == NULL || addr == NULL || out_len == NULL)
1807 return (EINVAL);
1808
1809 *out_len = interface->if_broadcast.length;
1810
1811 if (buffer_len < interface->if_broadcast.length)
1812 return (EMSGSIZE);
1813
1814 if (interface->if_broadcast.length == 0)
1815 return (ENXIO);
1816
1817 if (interface->if_broadcast.length <=
1818 sizeof (interface->if_broadcast.u.buffer)) {
1819 bcopy(interface->if_broadcast.u.buffer, addr,
1820 interface->if_broadcast.length);
1821 } else {
1822 bcopy(interface->if_broadcast.u.ptr, addr,
1823 interface->if_broadcast.length);
1824 }
1825
1826 return (0);
1827}
1828
1829static errno_t
1830ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
1831 size_t lladdr_len, kauth_cred_t *credp)
1832{
1833 const u_int8_t *bytes;
1834 size_t bytes_len;
1835 struct ifaddr *ifa;
1836 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
1837 errno_t error = 0;
1838
1839 /*
1840 * Make sure to accomodate the largest possible
1841 * size of SA(if_lladdr)->sa_len.
1842 */
1843 _CASSERT(sizeof (sdlbuf) == (SOCK_MAXADDRLEN + 1));
1844
1845 if (interface == NULL || lladdr == NULL)
1846 return (EINVAL);
1847
1848 ifa = interface->if_lladdr;
1849 IFA_LOCK_SPIN(ifa);
1850 bcopy(ifa->ifa_addr, &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
1851 IFA_UNLOCK(ifa);
1852
1853 bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
1854 if (bytes_len != lladdr_len) {
1855 bzero(lladdr, lladdr_len);
1856 error = EMSGSIZE;
1857 } else {
1858 bcopy(bytes, lladdr, bytes_len);
1859 }
1860
1861 return (error);
1862}
1863
1864errno_t
1865ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1866{
1867 return (ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1868 NULL));
1869}
1870
1871errno_t
1872ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1873{
1874#if CONFIG_MACF
1875 kauth_cred_t cred;
1876 net_thread_marks_t marks;
1877#endif
1878 kauth_cred_t *credp;
1879 errno_t error;
1880
1881 credp = NULL;
1882#if CONFIG_MACF
1883 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
1884 cred = kauth_cred_proc_ref(current_proc());
1885 credp = &cred;
1886#else
1887 credp = NULL;
1888#endif
1889
1890 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1891 credp);
1892
1893#if CONFIG_MACF
1894 kauth_cred_unref(credp);
1895 net_thread_marks_pop(marks);
1896#endif
1897
1898 return (error);
1899}
1900
1901static errno_t
1902ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
1903 size_t lladdr_len, u_char new_type, int apply_type)
1904{
1905 struct ifaddr *ifa;
1906 errno_t error = 0;
1907
1908 if (interface == NULL)
1909 return (EINVAL);
1910
1911 ifnet_head_lock_shared();
1912 ifnet_lock_exclusive(interface);
1913 if (lladdr_len != 0 &&
1914 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
1915 ifnet_lock_done(interface);
1916 ifnet_head_done();
1917 return (EINVAL);
1918 }
1919 ifa = ifnet_addrs[interface->if_index - 1];
1920 if (ifa != NULL) {
1921 struct sockaddr_dl *sdl;
1922
1923 IFA_LOCK_SPIN(ifa);
1924 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
1925 if (lladdr_len != 0) {
1926 bcopy(lladdr, LLADDR(sdl), lladdr_len);
1927 } else {
1928 bzero(LLADDR(sdl), interface->if_addrlen);
1929 }
1930 sdl->sdl_alen = lladdr_len;
1931
1932 if (apply_type) {
1933 sdl->sdl_type = new_type;
1934 }
1935 IFA_UNLOCK(ifa);
1936 } else {
1937 error = ENXIO;
1938 }
1939 ifnet_lock_done(interface);
1940 ifnet_head_done();
1941
1942 /* Generate a kernel event */
1943 if (error == 0) {
1944 dlil_post_msg(interface, KEV_DL_SUBCLASS,
1945 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0);
1946 }
1947
1948 return (error);
1949}
1950
1951errno_t
1952ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
1953{
1954 return (ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0));
1955}
1956
1957errno_t
1958ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
1959 size_t lladdr_len, u_char type)
1960{
1961 return (ifnet_set_lladdr_internal(interface, lladdr,
1962 lladdr_len, type, 1));
1963}
1964
1965errno_t
1966ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
1967 ifmultiaddr_t *ifmap)
1968{
1969 if (interface == NULL || maddr == NULL)
1970 return (EINVAL);
1971
1972 /* Don't let users screw up protocols' entries. */
1973 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
1974 return (EINVAL);
1975
1976 return (if_addmulti_anon(interface, maddr, ifmap));
1977}
1978
1979errno_t
1980ifnet_remove_multicast(ifmultiaddr_t ifma)
1981{
1982 struct sockaddr *maddr;
1983
1984 if (ifma == NULL)
1985 return (EINVAL);
1986
1987 maddr = ifma->ifma_addr;
1988 /* Don't let users screw up protocols' entries. */
1989 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
1990 return (EINVAL);
1991
1992 return (if_delmulti_anon(ifma->ifma_ifp, maddr));
1993}
1994
1995errno_t
1996ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
1997{
1998 int count = 0;
1999 int cmax = 0;
2000 struct ifmultiaddr *addr;
2001
2002 if (ifp == NULL || addresses == NULL)
2003 return (EINVAL);
2004
2005 ifnet_lock_shared(ifp);
2006 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2007 cmax++;
2008 }
2009
2010 MALLOC(*addresses, ifmultiaddr_t *, sizeof (ifmultiaddr_t) * (cmax + 1),
2011 M_TEMP, M_NOWAIT);
2012 if (*addresses == NULL) {
2013 ifnet_lock_done(ifp);
2014 return (ENOMEM);
2015 }
2016
2017 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2018 if (count + 1 > cmax)
2019 break;
2020 (*addresses)[count] = (ifmultiaddr_t)addr;
2021 ifmaddr_reference((*addresses)[count]);
2022 count++;
2023 }
2024 (*addresses)[cmax] = NULL;
2025 ifnet_lock_done(ifp);
2026
2027 return (0);
2028}
2029
2030void
2031ifnet_free_multicast_list(ifmultiaddr_t *addresses)
2032{
2033 int i;
2034
2035 if (addresses == NULL)
2036 return;
2037
2038 for (i = 0; addresses[i] != NULL; i++)
2039 ifmaddr_release(addresses[i]);
2040
2041 FREE(addresses, M_TEMP);
2042}
2043
2044errno_t
2045ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2046{
2047 struct ifnet *ifp;
2048 int namelen;
2049
2050 if (ifname == NULL)
2051 return (EINVAL);
2052
2053 namelen = strlen(ifname);
2054
2055 *ifpp = NULL;
2056
2057 ifnet_head_lock_shared();
2058 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2059 struct ifaddr *ifa;
2060 struct sockaddr_dl *ll_addr;
2061
2062 ifa = ifnet_addrs[ifp->if_index - 1];
2063 if (ifa == NULL)
2064 continue;
2065
2066 IFA_LOCK(ifa);
2067 ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2068
2069 if (namelen == ll_addr->sdl_nlen && strncmp(ll_addr->sdl_data,
2070 ifname, ll_addr->sdl_nlen) == 0) {
2071 IFA_UNLOCK(ifa);
2072 *ifpp = ifp;
2073 ifnet_reference(*ifpp);
2074 break;
2075 }
2076 IFA_UNLOCK(ifa);
2077 }
2078 ifnet_head_done();
2079
2080 return ((ifp == NULL) ? ENXIO : 0);
2081}
2082
2083errno_t
2084ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2085{
2086 return (ifnet_list_get_common(family, FALSE, list, count));
2087}
2088
2089__private_extern__ errno_t
2090ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2091{
2092 return (ifnet_list_get_common(family, TRUE, list, count));
2093}
2094
2095struct ifnet_list {
2096 SLIST_ENTRY(ifnet_list) ifl_le;
2097 struct ifnet *ifl_ifp;
2098};
2099
2100static errno_t
2101ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
2102 u_int32_t *count)
2103{
2104#pragma unused(get_all)
2105 SLIST_HEAD(, ifnet_list) ifl_head;
2106 struct ifnet_list *ifl, *ifl_tmp;
2107 struct ifnet *ifp;
2108 int cnt = 0;
2109 errno_t err = 0;
2110
2111 SLIST_INIT(&ifl_head);
2112
2113 if (list == NULL || count == NULL) {
2114 err = EINVAL;
2115 goto done;
2116 }
2117 *count = 0;
2118 *list = NULL;
2119
2120 ifnet_head_lock_shared();
2121 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2122 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2123 MALLOC(ifl, struct ifnet_list *, sizeof (*ifl),
2124 M_TEMP, M_NOWAIT);
2125 if (ifl == NULL) {
2126 ifnet_head_done();
2127 err = ENOMEM;
2128 goto done;
2129 }
2130 ifl->ifl_ifp = ifp;
2131 ifnet_reference(ifp);
2132 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2133 ++cnt;
2134 }
2135 }
2136 ifnet_head_done();
2137
2138 if (cnt == 0) {
2139 err = ENXIO;
2140 goto done;
2141 }
2142
2143 MALLOC(*list, ifnet_t *, sizeof (ifnet_t) * (cnt + 1),
2144 M_TEMP, M_NOWAIT);
2145 if (*list == NULL) {
2146 err = ENOMEM;
2147 goto done;
2148 }
2149 bzero(*list, sizeof (ifnet_t) * (cnt + 1));
2150 *count = cnt;
2151
2152done:
2153 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2154 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2155 if (err == 0)
2156 (*list)[--cnt] = ifl->ifl_ifp;
2157 else
2158 ifnet_release(ifl->ifl_ifp);
2159 FREE(ifl, M_TEMP);
2160 }
2161
2162 return (err);
2163}
2164
2165void
2166ifnet_list_free(ifnet_t *interfaces)
2167{
2168 int i;
2169
2170 if (interfaces == NULL)
2171 return;
2172
2173 for (i = 0; interfaces[i]; i++)
2174 ifnet_release(interfaces[i]);
2175
2176 FREE(interfaces, M_TEMP);
2177}
2178
2179void
2180ifnet_transmit_burst_start(ifnet_t ifp, mbuf_t pkt)
2181{
2182#if MEASURE_BW
2183 uint32_t orig_flags;
2184
2185 if (ifp == NULL || !(pkt->m_flags & M_PKTHDR))
2186 return;
2187
2188 orig_flags = OSBitOrAtomic(IF_MEASURED_BW_INPROGRESS,
2189 &ifp->if_bw.flags);
2190 if (orig_flags & IF_MEASURED_BW_INPROGRESS) {
2191 /* There is already a measurement in progress; skip this one */
2192 return;
2193 }
2194
2195 ifp->if_bw.start_seq = pkt->m_pkthdr.pkt_bwseq;
2196 ifp->if_bw.start_ts = mach_absolute_time();
2197#else /*!MEASURE_BW */
2198#pragma unused(ifp, pkt)
2199#endif /* !MEASURE_BW */
2200}
2201
2202void
2203ifnet_transmit_burst_end(ifnet_t ifp, mbuf_t pkt)
2204{
2205#if MEASURE_BW
2206 uint64_t oseq, ots, bytes, ts, t;
2207 uint32_t flags;
2208
2209 if (ifp == NULL || !(pkt->m_flags & M_PKTHDR))
2210 return;
2211
2212 flags = OSBitOrAtomic(IF_MEASURED_BW_CALCULATION, &ifp->if_bw.flags);
2213
2214 /* If a calculation is already in progress, just return */
2215 if (flags & IF_MEASURED_BW_CALCULATION)
2216 return;
2217
2218 /* Check if a measurement was started at all */
2219 if (!(flags & IF_MEASURED_BW_INPROGRESS)) {
2220 /*
2221 * It is an error to call burst_end before burst_start.
2222 * Reset the calculation flag and return.
2223 */
2224 goto done;
2225 }
2226
2227 oseq = pkt->m_pkthdr.pkt_bwseq;
2228 ots = mach_absolute_time();
2229
2230 if (ifp->if_bw.start_seq > 0 && oseq > ifp->if_bw.start_seq) {
2231 ts = ots - ifp->if_bw.start_ts;
2232 if (ts > 0) {
2233 absolutetime_to_nanoseconds(ts, &t);
2234 bytes = oseq - ifp->if_bw.start_seq;
2235 ifp->if_bw.bytes = bytes;
2236 ifp->if_bw.ts = ts;
2237
2238 if (t > 0) {
2239 uint64_t bw = 0;
2240
2241 /* Compute bandwidth as bytes/ms */
2242 bw = (bytes * NSEC_PER_MSEC) / t;
2243 if (bw > 0) {
2244 if (ifp->if_bw.bw > 0) {
2245 u_int32_t shft;
2246
2247 shft = if_bw_smoothing_val;
2248 /* Compute EWMA of bw */
2249 ifp->if_bw.bw = (bw +
2250 ((ifp->if_bw.bw << shft) -
2251 ifp->if_bw.bw)) >> shft;
2252 } else {
2253 ifp->if_bw.bw = bw;
2254 }
2255 }
2256 }
2257 ifp->if_bw.last_seq = oseq;
2258 ifp->if_bw.last_ts = ots;
2259 }
2260 }
2261
2262done:
2263 flags = ~(IF_MEASURED_BW_INPROGRESS | IF_MEASURED_BW_CALCULATION);
2264 OSBitAndAtomic(flags, &ifp->if_bw.flags);
2265#else /* !MEASURE_BW */
2266#pragma unused(ifp, pkt)
2267#endif /* !MEASURE_BW */
2268}
2269
2270/****************************************************************************/
2271/* ifaddr_t accessors */
2272/****************************************************************************/
2273
2274errno_t
2275ifaddr_reference(ifaddr_t ifa)
2276{
2277 if (ifa == NULL)
2278 return (EINVAL);
2279
2280 IFA_ADDREF(ifa);
2281 return (0);
2282}
2283
2284errno_t
2285ifaddr_release(ifaddr_t ifa)
2286{
2287 if (ifa == NULL)
2288 return (EINVAL);
2289
2290 IFA_REMREF(ifa);
2291 return (0);
2292}
2293
2294sa_family_t
2295ifaddr_address_family(ifaddr_t ifa)
2296{
2297 sa_family_t family = 0;
2298
2299 if (ifa != NULL) {
2300 IFA_LOCK_SPIN(ifa);
2301 if (ifa->ifa_addr != NULL)
2302 family = ifa->ifa_addr->sa_family;
2303 IFA_UNLOCK(ifa);
2304 }
2305 return (family);
2306}
2307
2308errno_t
2309ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2310{
2311 u_int32_t copylen;
2312
2313 if (ifa == NULL || out_addr == NULL)
2314 return (EINVAL);
2315
2316 IFA_LOCK_SPIN(ifa);
2317 if (ifa->ifa_addr == NULL) {
2318 IFA_UNLOCK(ifa);
2319 return (ENOTSUP);
2320 }
2321
2322 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2323 ifa->ifa_addr->sa_len : addr_size;
2324 bcopy(ifa->ifa_addr, out_addr, copylen);
2325
2326 if (ifa->ifa_addr->sa_len > addr_size) {
2327 IFA_UNLOCK(ifa);
2328 return (EMSGSIZE);
2329 }
2330
2331 IFA_UNLOCK(ifa);
2332 return (0);
2333}
2334
2335errno_t
2336ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2337{
2338 u_int32_t copylen;
2339
2340 if (ifa == NULL || out_addr == NULL)
2341 return (EINVAL);
2342
2343 IFA_LOCK_SPIN(ifa);
2344 if (ifa->ifa_dstaddr == NULL) {
2345 IFA_UNLOCK(ifa);
2346 return (ENOTSUP);
2347 }
2348
2349 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2350 ifa->ifa_dstaddr->sa_len : addr_size;
2351 bcopy(ifa->ifa_dstaddr, out_addr, copylen);
2352
2353 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2354 IFA_UNLOCK(ifa);
2355 return (EMSGSIZE);
2356 }
2357
2358 IFA_UNLOCK(ifa);
2359 return (0);
2360}
2361
2362errno_t
2363ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2364{
2365 u_int32_t copylen;
2366
2367 if (ifa == NULL || out_addr == NULL)
2368 return (EINVAL);
2369
2370 IFA_LOCK_SPIN(ifa);
2371 if (ifa->ifa_netmask == NULL) {
2372 IFA_UNLOCK(ifa);
2373 return (ENOTSUP);
2374 }
2375
2376 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2377 ifa->ifa_netmask->sa_len : addr_size;
2378 bcopy(ifa->ifa_netmask, out_addr, copylen);
2379
2380 if (ifa->ifa_netmask->sa_len > addr_size) {
2381 IFA_UNLOCK(ifa);
2382 return (EMSGSIZE);
2383 }
2384
2385 IFA_UNLOCK(ifa);
2386 return (0);
2387}
2388
2389ifnet_t
2390ifaddr_ifnet(ifaddr_t ifa)
2391{
2392 struct ifnet *ifp;
2393
2394 if (ifa == NULL)
2395 return (NULL);
2396
2397 /* ifa_ifp is set once at creation time; it is never changed */
2398 ifp = ifa->ifa_ifp;
2399
2400 return (ifp);
2401}
2402
2403ifaddr_t
2404ifaddr_withaddr(const struct sockaddr *address)
2405{
2406 if (address == NULL)
2407 return (NULL);
2408
2409 return (ifa_ifwithaddr(address));
2410}
2411
2412ifaddr_t
2413ifaddr_withdstaddr(const struct sockaddr *address)
2414{
2415 if (address == NULL)
2416 return (NULL);
2417
2418 return (ifa_ifwithdstaddr(address));
2419}
2420
2421ifaddr_t
2422ifaddr_withnet(const struct sockaddr *net)
2423{
2424 if (net == NULL)
2425 return (NULL);
2426
2427 return (ifa_ifwithnet(net));
2428}
2429
2430ifaddr_t
2431ifaddr_withroute(int flags, const struct sockaddr *destination,
2432 const struct sockaddr *gateway)
2433{
2434 if (destination == NULL || gateway == NULL)
2435 return (NULL);
2436
2437 return (ifa_ifwithroute(flags, destination, gateway));
2438}
2439
2440ifaddr_t
2441ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2442{
2443 if (addr == NULL || interface == NULL)
2444 return (NULL);
2445
2446 return (ifaof_ifpforaddr(addr, interface));
2447}
2448
2449errno_t
2450ifmaddr_reference(ifmultiaddr_t ifmaddr)
2451{
2452 if (ifmaddr == NULL)
2453 return (EINVAL);
2454
2455 IFMA_ADDREF(ifmaddr);
2456 return (0);
2457}
2458
2459errno_t
2460ifmaddr_release(ifmultiaddr_t ifmaddr)
2461{
2462 if (ifmaddr == NULL)
2463 return (EINVAL);
2464
2465 IFMA_REMREF(ifmaddr);
2466 return (0);
2467}
2468
2469errno_t
2470ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2471 u_int32_t addr_size)
2472{
2473 u_int32_t copylen;
2474
2475 if (ifma == NULL || out_addr == NULL)
2476 return (EINVAL);
2477
2478 IFMA_LOCK(ifma);
2479 if (ifma->ifma_addr == NULL) {
2480 IFMA_UNLOCK(ifma);
2481 return (ENOTSUP);
2482 }
2483
2484 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2485 ifma->ifma_addr->sa_len : addr_size);
2486 bcopy(ifma->ifma_addr, out_addr, copylen);
2487
2488 if (ifma->ifma_addr->sa_len > addr_size) {
2489 IFMA_UNLOCK(ifma);
2490 return (EMSGSIZE);
2491 }
2492 IFMA_UNLOCK(ifma);
2493 return (0);
2494}
2495
2496errno_t
2497ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2498 u_int32_t addr_size)
2499{
2500 struct ifmultiaddr *ifma_ll;
2501
2502 if (ifma == NULL || out_addr == NULL)
2503 return (EINVAL);
2504 if ((ifma_ll = ifma->ifma_ll) == NULL)
2505 return (ENOTSUP);
2506
2507 return (ifmaddr_address(ifma_ll, out_addr, addr_size));
2508}
2509
2510ifnet_t
2511ifmaddr_ifnet(ifmultiaddr_t ifma)
2512{
2513 return ((ifma == NULL) ? NULL : ifma->ifma_ifp);
2514}
2515
2516/******************************************************************************/
2517/* interface cloner */
2518/******************************************************************************/
2519
2520errno_t
2521ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2522 if_clone_t *ifcloner)
2523{
2524 errno_t error = 0;
2525 struct if_clone *ifc = NULL;
2526 size_t namelen;
2527
2528 if (cloner_params == NULL || ifcloner == NULL ||
2529 cloner_params->ifc_name == NULL ||
2530 cloner_params->ifc_create == NULL ||
2531 cloner_params->ifc_destroy == NULL ||
2532 (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
2533 error = EINVAL;
2534 goto fail;
2535 }
2536
2537 if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2538 printf("%s: already a cloner for %s\n", __func__,
2539 cloner_params->ifc_name);
2540 error = EEXIST;
2541 goto fail;
2542 }
2543
2544 /* Make room for name string */
2545 ifc = _MALLOC(sizeof (struct if_clone) + IFNAMSIZ + 1, M_CLONE,
2546 M_WAITOK | M_ZERO);
2547 if (ifc == NULL) {
2548 printf("%s: _MALLOC failed\n", __func__);
2549 error = ENOBUFS;
2550 goto fail;
2551 }
2552 strlcpy((char *)(ifc + 1), cloner_params->ifc_name, IFNAMSIZ + 1);
2553 ifc->ifc_name = (char *)(ifc + 1);
2554 ifc->ifc_namelen = namelen;
2555 ifc->ifc_maxunit = IF_MAXUNIT;
2556 ifc->ifc_create = cloner_params->ifc_create;
2557 ifc->ifc_destroy = cloner_params->ifc_destroy;
2558
2559 error = if_clone_attach(ifc);
2560 if (error != 0) {
2561 printf("%s: if_clone_attach failed %d\n", __func__, error);
2562 goto fail;
2563 }
2564 *ifcloner = ifc;
2565
2566 return (0);
2567fail:
2568 if (ifc != NULL)
2569 FREE(ifc, M_CLONE);
2570 return (error);
2571}
2572
2573errno_t
2574ifnet_clone_detach(if_clone_t ifcloner)
2575{
2576 errno_t error = 0;
2577 struct if_clone *ifc = ifcloner;
2578
2579 if (ifc == NULL || ifc->ifc_name == NULL)
2580 return (EINVAL);
2581
2582 if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2583 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2584 error = EINVAL;
2585 goto fail;
2586 }
2587
2588 if_clone_detach(ifc);
2589
2590 FREE(ifc, M_CLONE);
2591
2592fail:
2593 return (error);
2594}
2595
2596/******************************************************************************/
2597/* misc */
2598/******************************************************************************/
2599
2600errno_t
2601ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
2602 u_int32_t flags, u_int8_t *bitfield)
2603{
2604 u_int32_t ifindex;
2605 u_int32_t inp_flags = 0;
2606
2607 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ?
2608 INPCB_GET_PORTS_USED_WILDCARDOK : 0);
2609 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ?
2610 INPCB_GET_PORTS_USED_NOWAKEUPOK : 0);
2611 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_RECVANYIFONLY) ?
2612 INPCB_GET_PORTS_USED_RECVANYIFONLY : 0);
2613 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY) ?
2614 INPCB_GET_PORTS_USED_EXTBGIDLEONLY : 0);
2615 inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_ACTIVEONLY) ?
2616 INPCB_GET_PORTS_USED_ACTIVEONLY : 0);
2617
2618 if (bitfield == NULL)
2619 return (EINVAL);
2620
2621 switch (protocol) {
2622 case PF_UNSPEC:
2623 case PF_INET:
2624 case PF_INET6:
2625 break;
2626 default:
2627 return (EINVAL);
2628 }
2629
2630 /* bit string is long enough to hold 16-bit port values */
2631 bzero(bitfield, bitstr_size(65536));
2632
2633 ifindex = (ifp != NULL) ? ifp->if_index : 0;
2634
2635 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY))
2636 udp_get_ports_used(ifindex, protocol, inp_flags, bitfield);
2637
2638 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY))
2639 tcp_get_ports_used(ifindex, protocol, inp_flags, bitfield);
2640
2641 return (0);
2642}
2643
2644errno_t
2645ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
2646{
2647 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
2648 return (ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
2649 bitfield));
2650}
2651
2652errno_t
2653ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr* sa, int32_t rssi,
2654 int lqm, int npm, u_int8_t srvinfo[48])
2655{
2656 if (ifp == NULL || sa == NULL || srvinfo == NULL)
2657 return (EINVAL);
2658 if (sa->sa_len > sizeof(struct sockaddr_storage))
2659 return (EINVAL);
2660 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2661 return (EINVAL);
2662
2663 dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
2664 return (0);
2665}
2666
2667errno_t
2668ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr* sa)
2669{
2670 if (ifp == NULL || sa == NULL)
2671 return (EINVAL);
2672 if (sa->sa_len > sizeof(struct sockaddr_storage))
2673 return (EINVAL);
2674 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2675 return (EINVAL);
2676
2677 dlil_node_absent(ifp, sa);
2678 return (0);
2679}
2680
2681errno_t
2682ifnet_notice_master_elected(ifnet_t ifp)
2683{
2684 if (ifp == NULL)
2685 return (EINVAL);
2686
2687 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0);
2688 return (0);
2689}
2690
2691errno_t
2692ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
2693{
2694#pragma unused(ifp, m, val)
2695 /* Dummy function to be implemented XXX */
2696 return (0);
2697}
2698
2699errno_t
2700ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
2701 u_int8_t info[IFNET_MODARGLEN])
2702{
2703 if (ifp == NULL || modid == NULL)
2704 return (EINVAL);
2705
2706 dlil_report_issues(ifp, modid, info);
2707 return (0);
2708}
2709
2710errno_t
2711ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
2712{
2713 ifnet_t odifp = NULL;
2714
2715 if (ifp == NULL)
2716 return (EINVAL);
2717 else if (!ifnet_is_attached(ifp, 1))
2718 return (ENXIO);
2719
2720 ifnet_lock_exclusive(ifp);
2721 odifp = ifp->if_delegated.ifp;
2722 if (odifp != NULL && odifp == delegated_ifp) {
2723 /* delegate info is unchanged; nothing more to do */
2724 ifnet_lock_done(ifp);
2725 goto done;
2726 }
2727 bzero(&ifp->if_delegated, sizeof (ifp->if_delegated));
2728 if (delegated_ifp != NULL && ifp != delegated_ifp) {
2729 ifp->if_delegated.ifp = delegated_ifp;
2730 ifnet_reference(delegated_ifp);
2731 ifp->if_delegated.type = delegated_ifp->if_type;
2732 ifp->if_delegated.family = delegated_ifp->if_family;
2733 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
2734 ifp->if_delegated.expensive =
2735 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
2736 printf("%s: is now delegating %s (type 0x%x, family %u, "
2737 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
2738 delegated_ifp->if_type, delegated_ifp->if_family,
2739 delegated_ifp->if_subfamily);
2740 }
2741 ifnet_lock_done(ifp);
2742
2743 if (odifp != NULL) {
2744 if (odifp != delegated_ifp) {
2745 printf("%s: is no longer delegating %s\n",
2746 ifp->if_xname, odifp->if_xname);
2747 }
2748 ifnet_release(odifp);
2749 }
2750
2751 /* Generate a kernel event */
2752 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0);
2753
2754done:
2755 /* Release the io ref count */
2756 ifnet_decr_iorefcnt(ifp);
2757
2758 return (0);
2759}
2760
2761errno_t
2762ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
2763{
2764 if (ifp == NULL || pdelegated_ifp == NULL)
2765 return (EINVAL);
2766 else if (!ifnet_is_attached(ifp, 1))
2767 return (ENXIO);
2768
2769 ifnet_lock_shared(ifp);
2770 if (ifp->if_delegated.ifp != NULL)
2771 ifnet_reference(ifp->if_delegated.ifp);
2772 *pdelegated_ifp = ifp->if_delegated.ifp;
2773 ifnet_lock_done(ifp);
2774
2775 /* Release the io ref count */
2776 ifnet_decr_iorefcnt(ifp);
2777
2778 return (0);
2779}
2780
2781extern u_int32_t
2782key_fill_offload_frames_for_savs(ifnet_t ifp,
2783 struct ifnet_keepalive_offload_frame *frames_array,
2784 u_int32_t frames_array_count, size_t frame_data_offset);
2785
2786extern void
2787udp_fill_keepalive_offload_frames(ifnet_t ifp,
2788 struct ifnet_keepalive_offload_frame *frames_array,
2789 u_int32_t frames_array_count, size_t frame_data_offset,
2790 u_int32_t *used_frames_count);
2791
2792errno_t
2793ifnet_get_keepalive_offload_frames(ifnet_t ifp,
2794 struct ifnet_keepalive_offload_frame *frames_array,
2795 u_int32_t frames_array_count, size_t frame_data_offset,
2796 u_int32_t *used_frames_count)
2797{
2798 if (frames_array == NULL || used_frames_count == NULL)
2799 return (EINVAL);
2800
2801 /* frame_data_offset should be 32-bit aligned */
2802 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t))
2803 != frame_data_offset)
2804 return (EINVAL);
2805
2806 *used_frames_count = 0;
2807 if (frames_array_count == 0)
2808 return (0);
2809
2810 /* First collect IPSec related keep-alive frames */
2811 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
2812 frames_array, frames_array_count, frame_data_offset);
2813
2814 /* If there is more room, collect other UDP keep-alive frames */
2815 if (*used_frames_count < frames_array_count)
2816 udp_fill_keepalive_offload_frames(ifp, frames_array,
2817 frames_array_count, frame_data_offset,
2818 used_frames_count);
2819
2820 VERIFY(*used_frames_count <= frames_array_count);
2821 return (0);
2822}
2823
2824errno_t
2825ifnet_link_status_report(ifnet_t ifp, const void *buffer,
2826 size_t buffer_len)
2827{
2828 struct if_link_status *ifsr;
2829 errno_t err = 0;
2830
2831 if (ifp == NULL || buffer == NULL || buffer_len == 0)
2832 return (EINVAL);
2833
2834 ifnet_lock_shared(ifp);
2835
2836 /*
2837 * Make sure that the interface is attached but there is no need
2838 * to take a reference because this call is coming from the driver.
2839 */
2840 if (!ifnet_is_attached(ifp, 0)) {
2841 ifnet_lock_done(ifp);
2842 return (ENXIO);
2843 }
2844
2845 lck_rw_lock_exclusive(&ifp->if_link_status_lock);
2846
2847 /*
2848 * If this is the first status report then allocate memory
2849 * to store it.
2850 */
2851 if (ifp->if_link_status == NULL) {
2852 MALLOC(ifp->if_link_status, struct if_link_status *,
2853 sizeof(struct if_link_status), M_TEMP, M_ZERO);
2854 if (ifp->if_link_status == NULL) {
2855 err = ENOMEM;
2856 goto done;
2857 }
2858 }
2859
2860 ifsr = __DECONST(struct if_link_status *, buffer);
2861
2862 if (ifp->if_type == IFT_CELLULAR) {
2863 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
2864 /*
2865 * Currently we have a single version -- if it does
2866 * not match, just return.
2867 */
2868 if (ifsr->ifsr_version !=
2869 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
2870 err = ENOTSUP;
2871 goto done;
2872 }
2873
2874 if (ifsr->ifsr_len != sizeof(*if_cell_sr)) {
2875 err = EINVAL;
2876 goto done;
2877 }
2878
2879 if_cell_sr =
2880 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2881 new_cell_sr = &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2882 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
2883 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
2884 if_cell_sr->valid_bitmask = 0;
2885 bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr));
2886 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2887 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
2888
2889 /* Check version */
2890 if (ifsr->ifsr_version !=
2891 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
2892 err = ENOTSUP;
2893 goto done;
2894 }
2895
2896 if (ifsr->ifsr_len != sizeof(*if_wifi_sr)) {
2897 err = EINVAL;
2898 goto done;
2899 }
2900
2901 if_wifi_sr =
2902 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2903 new_wifi_sr =
2904 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2905 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
2906 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
2907 if_wifi_sr->valid_bitmask = 0;
2908 bcopy(new_wifi_sr, if_wifi_sr, sizeof(*if_wifi_sr));
2909
2910 /*
2911 * Update the bandwidth values if we got recent values
2912 * reported through the other KPI.
2913 */
2914 if (!(new_wifi_sr->valid_bitmask &
2915 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
2916 ifp->if_output_bw.max_bw > 0) {
2917 if_wifi_sr->valid_bitmask |=
2918 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
2919 if_wifi_sr->ul_max_bandwidth =
2920 ifp->if_output_bw.max_bw;
2921 }
2922 if (!(new_wifi_sr->valid_bitmask &
2923 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
2924 ifp->if_output_bw.eff_bw > 0) {
2925 if_wifi_sr->valid_bitmask |=
2926 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2927 if_wifi_sr->ul_effective_bandwidth =
2928 ifp->if_output_bw.eff_bw;
2929 }
2930 if (!(new_wifi_sr->valid_bitmask &
2931 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
2932 ifp->if_input_bw.max_bw > 0) {
2933 if_wifi_sr->valid_bitmask |=
2934 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
2935 if_wifi_sr->dl_max_bandwidth =
2936 ifp->if_input_bw.max_bw;
2937 }
2938 if (!(new_wifi_sr->valid_bitmask &
2939 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
2940 ifp->if_input_bw.eff_bw > 0) {
2941 if_wifi_sr->valid_bitmask |=
2942 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2943 if_wifi_sr->dl_effective_bandwidth =
2944 ifp->if_input_bw.eff_bw;
2945 }
2946 }
2947
2948done:
2949 lck_rw_done(&ifp->if_link_status_lock);
2950 ifnet_lock_done(ifp);
2951 return (err);
2952}
2953
2954/*************************************************************************/
2955/* Packet preamble */
2956/*************************************************************************/
2957
2958#define MAX_IF_PACKET_PREAMBLE_LEN 32
2959
2960errno_t
2961ifnet_set_packetpreamblelen(ifnet_t interface, u_int32_t len)
2962{
2963 errno_t err = 0;
2964
2965 if (interface == NULL || len > MAX_IF_PACKET_PREAMBLE_LEN) {
2966 err = EINVAL;
2967 goto done;
2968 }
2969 interface->if_data.ifi_preamblelen = len;
2970done:
2971 return (err);
2972}
2973
2974u_int32_t
2975ifnet_packetpreamblelen(ifnet_t interface)
2976{
2977 return ((interface == NULL) ? 0 : interface->if_data.ifi_preamblelen);
2978}
2979
2980u_int32_t
2981ifnet_maxpacketpreamblelen(void)
2982{
2983 return (MAX_IF_PACKET_PREAMBLE_LEN);
2984}