]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc.
30 *
31 * Data Link Inteface Layer
32 * Author: Ted Walker
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/mbuf.h>
40 #include <sys/socket.h>
41 #include <sys/domain.h>
42 #include <sys/user.h>
43 #include <net/if_dl.h>
44 #include <net/if.h>
45 #include <net/route.h>
46 #include <net/if_var.h>
47 #include <net/dlil.h>
48 #include <net/if_arp.h>
49 #include <sys/kern_event.h>
50 #include <sys/kdebug.h>
51
52 #include <kern/assert.h>
53 #include <kern/task.h>
54 #include <kern/thread.h>
55 #include <kern/sched_prim.h>
56 #include <kern/locks.h>
57
58 #include <net/if_types.h>
59 #include <net/kpi_interfacefilter.h>
60
61 #include <libkern/OSAtomic.h>
62
63 #include <machine/machine_routines.h>
64
65 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
66 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
67 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
68 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
69 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
70
71
72 #define MAX_DL_TAGS 16
73 #define MAX_DLIL_FILTERS 16
74 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
75 #define MAX_LINKADDR 4 /* LONGWORDS */
76 #define M_NKE M_IFADDR
77
78 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
79 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
80
81 #if 0
82 #define DLIL_PRINTF printf
83 #else
84 #define DLIL_PRINTF kprintf
85 #endif
86
87 enum {
88 kProtoKPI_DLIL = 0,
89 kProtoKPI_v1 = 1
90 };
91
92 struct if_proto {
93 SLIST_ENTRY(if_proto) next_hash;
94 int refcount;
95 int detaching;
96 struct ifnet *ifp;
97 struct domain *dl_domain;
98 protocol_family_t protocol_family;
99 int proto_kpi;
100 union {
101 struct {
102 dl_input_func dl_input;
103 dl_pre_output_func dl_pre_output;
104 dl_event_func dl_event;
105 dl_offer_func dl_offer;
106 dl_ioctl_func dl_ioctl;
107 dl_detached_func dl_detached;
108 } dlil;
109 struct {
110 proto_media_input input;
111 proto_media_preout pre_output;
112 proto_media_event event;
113 proto_media_ioctl ioctl;
114 proto_media_detached detached;
115 proto_media_resolve_multi resolve_multi;
116 proto_media_send_arp send_arp;
117 } v1;
118 } kpi;
119 };
120
121 SLIST_HEAD(proto_hash_entry, if_proto);
122
123
124 struct dlil_ifnet {
125 /* ifnet and drvr_ext are used by the stack and drivers
126 drvr_ext extends the public ifnet and must follow dl_if */
127 struct ifnet dl_if; /* public ifnet */
128
129 /* dlil private fields */
130 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
131 /* it is not the ifnet list */
132 void *if_uniqueid; /* unique id identifying the interface */
133 size_t if_uniqueid_len;/* length of the unique id */
134 char if_namestorage[IFNAMSIZ]; /* interface name storage */
135 };
136
137 struct ifnet_filter {
138 TAILQ_ENTRY(ifnet_filter) filt_next;
139 ifnet_t filt_ifp;
140 int filt_detaching;
141
142 const char *filt_name;
143 void *filt_cookie;
144 protocol_family_t filt_protocol;
145 iff_input_func filt_input;
146 iff_output_func filt_output;
147 iff_event_func filt_event;
148 iff_ioctl_func filt_ioctl;
149 iff_detached_func filt_detached;
150 };
151
152 struct if_family_str {
153 TAILQ_ENTRY(if_family_str) if_fam_next;
154 u_long if_family;
155 int refcnt;
156 int flags;
157
158 #define DLIL_SHUTDOWN 1
159
160 int (*add_if)(struct ifnet *ifp);
161 int (*del_if)(struct ifnet *ifp);
162 int (*init_if)(struct ifnet *ifp);
163 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
164 ifnet_del_proto_func del_proto;
165 ifnet_ioctl_func ifmod_ioctl;
166 int (*shutdown)(void);
167 };
168
169 struct proto_family_str {
170 TAILQ_ENTRY(proto_family_str) proto_fam_next;
171 u_long proto_family;
172 u_long if_family;
173 int usecnt;
174
175 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
176 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
177 };
178
179 enum {
180 kIfNetUseCount_MayBeZero = 0,
181 kIfNetUseCount_MustNotBeZero = 1
182 };
183
184 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
185 static TAILQ_HEAD(, if_family_str) if_family_head;
186 static TAILQ_HEAD(, proto_family_str) proto_family_head;
187 static lck_grp_t *dlil_lock_group;
188 static lck_grp_t *ifnet_lock_group;
189 static lck_grp_t *ifnet_head_lock_group;
190 static lck_attr_t *ifnet_lock_attr;
191 static lck_mtx_t *proto_family_mutex;
192 static lck_rw_t *ifnet_head_mutex;
193 static lck_mtx_t *dlil_ifnet_mutex;
194 static lck_mtx_t *dlil_mutex;
195 static unsigned long dlil_read_count = 0;
196 static unsigned long dlil_detach_waiting = 0;
197 extern u_int32_t ipv4_ll_arp_aware;
198
199 int dlil_initialized = 0;
200 lck_spin_t *dlil_input_lock;
201 __private_extern__ thread_t dlil_input_thread_ptr = 0;
202 int dlil_input_thread_wakeup = 0;
203 __private_extern__ int dlil_output_thread_wakeup = 0;
204 static struct mbuf *dlil_input_mbuf_head = NULL;
205 static struct mbuf *dlil_input_mbuf_tail = NULL;
206 #if NLOOP > 1
207 #error dlil_input() needs to be revised to support more than on loopback interface
208 #endif
209 static struct mbuf *dlil_input_loop_head = NULL;
210 static struct mbuf *dlil_input_loop_tail = NULL;
211
212 static void dlil_input_thread(void);
213 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
214 struct ifnet *ifbyfamily(u_long family, short unit);
215 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
216 static void dlil_call_delayed_detach_thread(void);
217
218 static void dlil_read_begin(void);
219 static void dlil_read_end(void);
220 static int dlil_write_begin(void);
221 static void dlil_write_end(void);
222
223 static int ifp_use(struct ifnet *ifp, int handle_zero);
224 static int ifp_unuse(struct ifnet *ifp);
225 static void ifp_use_reached_zero(struct ifnet *ifp);
226
227 extern void bpfdetach(struct ifnet*);
228 extern void proto_input_run(void); // new run_netisr
229
230
231 int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
232
233 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
234
235 int dlil_expand_mcl;
236
237 extern u_int32_t inject_buckets;
238
239 static const u_int32_t dlil_writer_waiting = 0x80000000;
240
241 static __inline__ void*
242 _cast_non_const(const void * ptr) {
243 union {
244 const void* cval;
245 void* val;
246 } ret;
247
248 ret.cval = ptr;
249 return (ret.val);
250 }
251
252 /* Should these be inline? */
253 static void
254 dlil_read_begin(void)
255 {
256 unsigned long new_value;
257 unsigned long old_value;
258 struct uthread *uth = get_bsdthread_info(current_thread());
259
260 if (uth->dlil_incremented_read == dlil_writer_waiting)
261 panic("dlil_read_begin - thread is already a writer");
262
263 do {
264 again:
265 old_value = dlil_read_count;
266
267 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
268 {
269 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
270 goto again;
271 }
272
273 new_value = old_value + 1;
274 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
275
276 uth->dlil_incremented_read++;
277 }
278
279 static void
280 dlil_read_end(void)
281 {
282 struct uthread *uth = get_bsdthread_info(current_thread());
283
284 OSDecrementAtomic((UInt32*)&dlil_read_count);
285 uth->dlil_incremented_read--;
286 if (dlil_read_count == dlil_writer_waiting)
287 wakeup(_cast_non_const(&dlil_writer_waiting));
288 }
289
290 static int
291 dlil_write_begin(void)
292 {
293 struct uthread *uth = get_bsdthread_info(current_thread());
294
295 if (uth->dlil_incremented_read != 0) {
296 return EDEADLK;
297 }
298 lck_mtx_lock(dlil_mutex);
299 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
300 again:
301 if (dlil_read_count == dlil_writer_waiting) {
302 uth->dlil_incremented_read = dlil_writer_waiting;
303 return 0;
304 }
305 else {
306 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
307 goto again;
308 }
309 }
310
311 static void
312 dlil_write_end(void)
313 {
314 struct uthread *uth = get_bsdthread_info(current_thread());
315
316 if (uth->dlil_incremented_read != dlil_writer_waiting)
317 panic("dlil_write_end - thread is not a writer");
318 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
319 lck_mtx_unlock(dlil_mutex);
320 uth->dlil_incremented_read = 0;
321 wakeup(&dlil_read_count);
322 }
323
324 #define PROTO_HASH_SLOTS 0x5
325
326 /*
327 * Internal functions.
328 */
329
330 static int
331 proto_hash_value(u_long protocol_family)
332 {
333 switch(protocol_family) {
334 case PF_INET:
335 return 0;
336 case PF_INET6:
337 return 1;
338 case PF_APPLETALK:
339 return 2;
340 case PF_VLAN:
341 return 3;
342 default:
343 return 4;
344 }
345 }
346
347 static
348 struct if_family_str *find_family_module(u_long if_family)
349 {
350 struct if_family_str *mod = NULL;
351
352 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
353 if (mod->if_family == (if_family & 0xffff))
354 break;
355 }
356
357 return mod;
358 }
359
360 static
361 struct proto_family_str*
362 find_proto_module(u_long proto_family, u_long if_family)
363 {
364 struct proto_family_str *mod = NULL;
365
366 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
367 if ((mod->proto_family == (proto_family & 0xffff))
368 && (mod->if_family == (if_family & 0xffff)))
369 break;
370 }
371
372 return mod;
373 }
374
375 static struct if_proto*
376 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
377 {
378 struct if_proto *proto = NULL;
379 u_long i = proto_hash_value(protocol_family);
380 if (ifp->if_proto_hash) {
381 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
382 }
383
384 while(proto && proto->protocol_family != protocol_family) {
385 proto = SLIST_NEXT(proto, next_hash);
386 }
387
388 return proto;
389 }
390
391 static void
392 if_proto_ref(struct if_proto *proto)
393 {
394 OSAddAtomic(1, (UInt32*)&proto->refcount);
395 }
396
397 static void
398 if_proto_free(struct if_proto *proto)
399 {
400 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
401
402 if (oldval == 1) { /* This was the last reference */
403 FREE(proto, M_IFADDR);
404 }
405 }
406
407 __private_extern__ void
408 ifnet_lock_assert(
409 __unused struct ifnet *ifp,
410 __unused int what)
411 {
412 #if IFNET_RW_LOCK
413 /*
414 * Not implemented for rw locks.
415 *
416 * Function exists so when/if we use mutex we can
417 * enable this check.
418 */
419 #else
420 lck_mtx_assert(ifp->if_lock, what);
421 #endif
422 }
423
424 __private_extern__ void
425 ifnet_lock_shared(
426 struct ifnet *ifp)
427 {
428 #if IFNET_RW_LOCK
429 lck_rw_lock_shared(ifp->if_lock);
430 #else
431 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
432 lck_mtx_lock(ifp->if_lock);
433 #endif
434 }
435
436 __private_extern__ void
437 ifnet_lock_exclusive(
438 struct ifnet *ifp)
439 {
440 #if IFNET_RW_LOCK
441 lck_rw_lock_exclusive(ifp->if_lock);
442 #else
443 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
444 lck_mtx_lock(ifp->if_lock);
445 #endif
446 }
447
448 __private_extern__ void
449 ifnet_lock_done(
450 struct ifnet *ifp)
451 {
452 #if IFNET_RW_LOCK
453 lck_rw_done(ifp->if_lock);
454 #else
455 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
456 lck_mtx_unlock(ifp->if_lock);
457 #endif
458 }
459
460 __private_extern__ void
461 ifnet_head_lock_shared()
462 {
463 lck_rw_lock_shared(ifnet_head_mutex);
464 }
465
466 __private_extern__ void
467 ifnet_head_lock_exclusive()
468 {
469 lck_rw_lock_exclusive(ifnet_head_mutex);
470 }
471
472 __private_extern__ void
473 ifnet_head_done()
474 {
475 lck_rw_done(ifnet_head_mutex);
476 }
477
478 /*
479 * Public functions.
480 */
481 struct ifnet *ifbyfamily(u_long family, short unit)
482 {
483 struct ifnet *ifp;
484
485 ifnet_head_lock_shared();
486 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
487 if ((family == ifp->if_family) && (ifp->if_unit == unit))
488 break;
489 ifnet_head_done();
490
491 return ifp;
492 }
493
494 static int dlil_ifp_proto_count(struct ifnet * ifp)
495 {
496 int count = 0;
497 int i;
498
499 if (ifp->if_proto_hash != NULL) {
500 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
501 struct if_proto *proto;
502 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
503 count++;
504 }
505 }
506 }
507
508 return count;
509 }
510
511 __private_extern__ void
512 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
513 struct net_event_data *event_data, u_long event_data_len)
514 {
515 struct net_event_data ev_data;
516 struct kev_msg ev_msg;
517
518 /*
519 * a net event always start with a net_event_data structure
520 * but the caller can generate a simple net event or
521 * provide a longer event structure to post
522 */
523
524 ev_msg.vendor_code = KEV_VENDOR_APPLE;
525 ev_msg.kev_class = KEV_NETWORK_CLASS;
526 ev_msg.kev_subclass = event_subclass;
527 ev_msg.event_code = event_code;
528
529 if (event_data == 0) {
530 event_data = &ev_data;
531 event_data_len = sizeof(struct net_event_data);
532 }
533
534 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
535 event_data->if_family = ifp->if_family;
536 event_data->if_unit = (unsigned long) ifp->if_unit;
537
538 ev_msg.dv[0].data_length = event_data_len;
539 ev_msg.dv[0].data_ptr = event_data;
540 ev_msg.dv[1].data_length = 0;
541
542 dlil_event_internal(ifp, &ev_msg);
543 }
544
545 void dlil_init(void);
546 void
547 dlil_init(void)
548 {
549 lck_grp_attr_t *grp_attributes = 0;
550 lck_attr_t *lck_attributes = 0;
551 lck_grp_t *input_lock_grp = 0;
552
553 TAILQ_INIT(&dlil_ifnet_head);
554 TAILQ_INIT(&if_family_head);
555 TAILQ_INIT(&proto_family_head);
556 TAILQ_INIT(&ifnet_head);
557
558 /* Setup the lock groups we will use */
559 grp_attributes = lck_grp_attr_alloc_init();
560
561 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
562 #if IFNET_RW_LOCK
563 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
564 #else
565 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
566 #endif
567 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
568 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
569 lck_grp_attr_free(grp_attributes);
570 grp_attributes = 0;
571
572 /* Setup the lock attributes we will use */
573 lck_attributes = lck_attr_alloc_init();
574
575 ifnet_lock_attr = lck_attr_alloc_init();
576
577 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
578 input_lock_grp = 0;
579
580 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
581 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
582 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
583 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
584
585 lck_attr_free(lck_attributes);
586 lck_attributes = 0;
587
588 /*
589 * Start up the dlil input thread once everything is initialized
590 */
591 (void) kernel_thread(kernel_task, dlil_input_thread);
592 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
593 }
594
595 int
596 dlil_attach_filter(
597 struct ifnet *ifp,
598 const struct iff_filter *if_filter,
599 interface_filter_t *filter_ref)
600 {
601 int retval = 0;
602 struct ifnet_filter *filter;
603
604 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
605 if (filter == NULL)
606 return ENOMEM;
607 bzero(filter, sizeof(*filter));
608
609
610 filter->filt_ifp = ifp;
611 filter->filt_cookie = if_filter->iff_cookie;
612 filter->filt_name = if_filter->iff_name;
613 filter->filt_protocol = if_filter->iff_protocol;
614 filter->filt_input = if_filter->iff_input;
615 filter->filt_output = if_filter->iff_output;
616 filter->filt_event = if_filter->iff_event;
617 filter->filt_ioctl = if_filter->iff_ioctl;
618 filter->filt_detached = if_filter->iff_detached;
619
620 if ((retval = dlil_write_begin()) != 0) {
621 /* Failed to acquire the write lock */
622 FREE(filter, M_NKE);
623 return retval;
624 }
625 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
626 dlil_write_end();
627 *filter_ref = filter;
628 return retval;
629 }
630
631 static int
632 dlil_detach_filter_internal(interface_filter_t filter, int detached)
633 {
634 int retval = 0;
635
636 if (detached == 0) {
637 ifnet_t ifp = NULL;
638 interface_filter_t entry = NULL;
639
640 /* Take the write lock */
641 retval = dlil_write_begin();
642 if (retval != 0 && retval != EDEADLK)
643 return retval;
644
645 /*
646 * At this point either we have the write lock (retval == 0)
647 * or we couldn't get it (retval == EDEADLK) because someone
648 * else up the stack is holding the read lock. It is safe to
649 * read, either the read or write is held. Verify the filter
650 * parameter before proceeding.
651 */
652 ifnet_head_lock_shared();
653 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
654 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
655 if (entry == filter)
656 break;
657 }
658 if (entry == filter)
659 break;
660 }
661 ifnet_head_done();
662
663 if (entry != filter) {
664 /* filter parameter is not a valid filter ref */
665 if (retval == 0) {
666 dlil_write_end();
667 }
668 return EINVAL;
669 }
670
671 if (retval == EDEADLK) {
672 /* Perform a delayed detach */
673 filter->filt_detaching = 1;
674 dlil_detach_waiting = 1;
675 wakeup(&dlil_detach_waiting);
676 return 0;
677 }
678
679 /* Remove the filter from the list */
680 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
681 dlil_write_end();
682 }
683
684 /* Call the detached funciton if there is one */
685 if (filter->filt_detached)
686 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
687
688 /* Free the filter */
689 FREE(filter, M_NKE);
690
691 return retval;
692 }
693
694 void
695 dlil_detach_filter(interface_filter_t filter)
696 {
697 if (filter == NULL)
698 return;
699 dlil_detach_filter_internal(filter, 0);
700 }
701
702 static void
703 dlil_input_thread_continue(
704 __unused void* foo,
705 __unused wait_result_t wait)
706 {
707 while (1) {
708 struct mbuf *m, *m_loop;
709
710 lck_spin_lock(dlil_input_lock);
711 m = dlil_input_mbuf_head;
712 dlil_input_mbuf_head = NULL;
713 dlil_input_mbuf_tail = NULL;
714 m_loop = dlil_input_loop_head;
715 dlil_input_loop_head = NULL;
716 dlil_input_loop_tail = NULL;
717 lck_spin_unlock(dlil_input_lock);
718
719 /*
720 * NOTE warning %%% attention !!!!
721 * We should think about putting some thread starvation safeguards if
722 * we deal with long chains of packets.
723 */
724 while (m) {
725 struct mbuf *m0 = m->m_nextpkt;
726 void *header = m->m_pkthdr.header;
727
728 m->m_nextpkt = NULL;
729 m->m_pkthdr.header = NULL;
730 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
731 m = m0;
732 }
733 m = m_loop;
734 while (m) {
735 struct mbuf *m0 = m->m_nextpkt;
736 void *header = m->m_pkthdr.header;
737 struct ifnet *ifp = &loif[0];
738
739 m->m_nextpkt = NULL;
740 m->m_pkthdr.header = NULL;
741 (void) dlil_input_packet(ifp, m, header);
742 m = m0;
743 }
744
745 proto_input_run();
746
747 if (dlil_input_mbuf_head == NULL &&
748 dlil_input_loop_head == NULL && inject_buckets == 0) {
749 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
750 (void) thread_block(dlil_input_thread_continue);
751 /* NOTREACHED */
752 }
753 }
754 }
755
756 void dlil_input_thread(void)
757 {
758 register thread_t self = current_thread();
759
760 ml_thread_policy(self, MACHINE_GROUP,
761 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
762
763 dlil_initialized = 1;
764 dlil_input_thread_ptr = current_thread();
765 dlil_input_thread_continue(NULL, THREAD_RESTART);
766 }
767
768 int
769 dlil_input_with_stats(
770 struct ifnet *ifp,
771 struct mbuf *m_head,
772 struct mbuf *m_tail,
773 const struct ifnet_stat_increment_param *stats)
774 {
775 /* WARNING
776 * Because of loopbacked multicast we cannot stuff the ifp in
777 * the rcvif of the packet header: loopback has its own dlil
778 * input queue
779 */
780
781 lck_spin_lock(dlil_input_lock);
782 if (ifp->if_type != IFT_LOOP) {
783 if (dlil_input_mbuf_head == NULL)
784 dlil_input_mbuf_head = m_head;
785 else if (dlil_input_mbuf_tail != NULL)
786 dlil_input_mbuf_tail->m_nextpkt = m_head;
787 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
788 } else {
789 if (dlil_input_loop_head == NULL)
790 dlil_input_loop_head = m_head;
791 else if (dlil_input_loop_tail != NULL)
792 dlil_input_loop_tail->m_nextpkt = m_head;
793 dlil_input_loop_tail = m_tail ? m_tail : m_head;
794 }
795 if (stats) {
796 ifp->if_data.ifi_ipackets += stats->packets_in;
797 ifp->if_data.ifi_ibytes += stats->bytes_in;
798 ifp->if_data.ifi_ierrors += stats->errors_in;
799
800 ifp->if_data.ifi_opackets += stats->packets_out;
801 ifp->if_data.ifi_obytes += stats->bytes_out;
802 ifp->if_data.ifi_oerrors += stats->errors_out;
803
804 ifp->if_data.ifi_collisions += stats->collisions;
805 ifp->if_data.ifi_iqdrops += stats->dropped;
806 }
807 lck_spin_unlock(dlil_input_lock);
808
809 wakeup((caddr_t)&dlil_input_thread_wakeup);
810
811 return 0;
812 }
813
814 int
815 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
816 {
817 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
818 }
819
820 int
821 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
822 char *frame_header)
823 {
824 int retval;
825 struct if_proto *ifproto = 0;
826 protocol_family_t protocol_family;
827 struct ifnet_filter *filter;
828
829
830 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
831
832 /*
833 * Lock the interface while we run through
834 * the filters and the demux. This lock
835 * protects the filter list and the demux list.
836 */
837 dlil_read_begin();
838
839 /*
840 * Call family demux module. If the demux module finds a match
841 * for the frame it will fill-in the ifproto pointer.
842 */
843
844 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
845 if (retval != 0)
846 protocol_family = 0;
847 if (retval == EJUSTRETURN) {
848 dlil_read_end();
849 return 0;
850 }
851
852 /* DANGER!!! */
853 if (m->m_flags & (M_BCAST|M_MCAST))
854 ifp->if_imcasts++;
855
856 /*
857 * Run interface filters
858 */
859
860 /* Do not pass VLAN tagged packets to filters PR-3586856 */
861 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
862 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
863 int filter_result;
864 if (filter->filt_input && (filter->filt_protocol == 0 ||
865 filter->filt_protocol == protocol_family)) {
866 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
867
868 if (filter_result) {
869 dlil_read_end();
870 if (filter_result == EJUSTRETURN) {
871 filter_result = 0;
872 }
873 else {
874 m_freem(m);
875 }
876
877 return filter_result;
878 }
879 }
880 }
881 }
882
883 /* Demux is done, interface filters have been processed, unlock the mutex */
884 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
885 dlil_read_end();
886 if (retval != EJUSTRETURN) {
887 m_freem(m);
888 return retval;
889 }
890 else
891 return 0;
892 }
893
894 ifproto = find_attached_proto(ifp, protocol_family);
895
896 if (ifproto == 0) {
897 dlil_read_end();
898 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
899 m_freem(m);
900 return 0;
901 }
902
903 /*
904 * Hand the packet off to the protocol.
905 */
906
907 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
908 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
909 }
910
911 if (ifproto->proto_kpi == kProtoKPI_DLIL)
912 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
913 ifp, ifproto->protocol_family,
914 TRUE);
915 else
916 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
917
918 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
919 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
920 }
921
922 dlil_read_end();
923
924 if (retval == EJUSTRETURN)
925 retval = 0;
926 else
927 if (retval)
928 m_freem(m);
929
930 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
931 return retval;
932 }
933
934 static int
935 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
936 {
937 struct ifnet_filter *filter;
938
939 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
940 dlil_read_begin();
941
942 /* Pass the event to the interface filters */
943 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
944 if (filter->filt_event)
945 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
946 }
947
948 if (ifp->if_proto_hash) {
949 int i;
950
951 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
952 struct if_proto *proto;
953
954 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
955 /* Pass the event to the protocol */
956 if (proto->proto_kpi == kProtoKPI_DLIL) {
957 if (proto->kpi.dlil.dl_event)
958 proto->kpi.dlil.dl_event(ifp, event);
959 }
960 else {
961 if (proto->kpi.v1.event)
962 proto->kpi.v1.event(ifp, proto->protocol_family, event);
963 }
964 }
965 }
966 }
967
968 dlil_read_end();
969
970 /* Pass the event to the interface */
971 if (ifp->if_event)
972 ifp->if_event(ifp, event);
973
974 if (ifp_unuse(ifp))
975 ifp_use_reached_zero(ifp);
976 }
977
978 return kev_post_msg(event);
979 }
980
981 int
982 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
983 {
984 int result = 0;
985
986 struct kev_msg kev_msg;
987
988 kev_msg.vendor_code = event->vendor_code;
989 kev_msg.kev_class = event->kev_class;
990 kev_msg.kev_subclass = event->kev_subclass;
991 kev_msg.event_code = event->event_code;
992 kev_msg.dv[0].data_ptr = &event->event_data[0];
993 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
994 kev_msg.dv[1].data_length = 0;
995
996
997 result = dlil_event_internal(ifp, &kev_msg);
998
999
1000 return result;
1001 }
1002
1003 int
1004 dlil_output_list(
1005 struct ifnet* ifp,
1006 u_long proto_family,
1007 struct mbuf *packetlist,
1008 caddr_t route,
1009 const struct sockaddr *dest,
1010 int raw)
1011 {
1012 char *frame_type = 0;
1013 char *dst_linkaddr = 0;
1014 int error, retval = 0;
1015 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1016 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1017 struct ifnet_filter *filter;
1018 struct if_proto *proto = 0;
1019 struct mbuf *m;
1020
1021 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1022 #if BRIDGE
1023 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1024 #else
1025 if ((raw != 0) || proto_family != PF_INET) {
1026 #endif
1027 while (packetlist) {
1028 m = packetlist;
1029 packetlist = packetlist->m_nextpkt;
1030 m->m_nextpkt = NULL;
1031 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1032 if (error) {
1033 if (packetlist)
1034 m_freem_list(packetlist);
1035 return (error);
1036 }
1037 }
1038 return (0);
1039 }
1040
1041 dlil_read_begin();
1042
1043 frame_type = frame_type_buffer;
1044 dst_linkaddr = dst_linkaddr_buffer;
1045 m = packetlist;
1046 packetlist = packetlist->m_nextpkt;
1047 m->m_nextpkt = NULL;
1048
1049 proto = find_attached_proto(ifp, proto_family);
1050 if (proto == NULL) {
1051 retval = ENXIO;
1052 goto cleanup;
1053 }
1054
1055 retval = 0;
1056 if (proto->proto_kpi == kProtoKPI_DLIL) {
1057 if (proto->kpi.dlil.dl_pre_output)
1058 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1059 }
1060 else {
1061 if (proto->kpi.v1.pre_output)
1062 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1063 }
1064
1065 if (retval) {
1066 if (retval != EJUSTRETURN) {
1067 m_freem(m);
1068 }
1069 goto cleanup;
1070 }
1071
1072 do {
1073
1074
1075 if (ifp->if_framer) {
1076 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1077 if (retval) {
1078 if (retval != EJUSTRETURN) {
1079 m_freem(m);
1080 }
1081 goto cleanup;
1082 }
1083 }
1084
1085 /*
1086 * Let interface filters (if any) do their thing ...
1087 */
1088 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1089 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1090 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1091 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1092 filter->filt_output) {
1093 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1094 if (retval) {
1095 if (retval == EJUSTRETURN)
1096 continue;
1097 else {
1098 m_freem(m);
1099 }
1100 goto cleanup;
1101 }
1102 }
1103 }
1104 }
1105 /*
1106 * Finally, call the driver.
1107 */
1108
1109 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1110 retval = ifp->if_output(ifp, m);
1111 if (retval) {
1112 printf("dlil_output_list: output error retval = %x\n", retval);
1113 goto cleanup;
1114 }
1115 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1116
1117 m = packetlist;
1118 if (m) {
1119 packetlist = packetlist->m_nextpkt;
1120 m->m_nextpkt = NULL;
1121 }
1122 } while (m);
1123
1124
1125 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1126
1127 cleanup:
1128 dlil_read_end();
1129 if (packetlist) /* if any packet left, clean up */
1130 m_freem_list(packetlist);
1131 if (retval == EJUSTRETURN)
1132 retval = 0;
1133 return retval;
1134 }
1135
1136 /*
1137 * dlil_output
1138 *
1139 * Caller should have a lock on the protocol domain if the protocol
1140 * doesn't support finer grained locking. In most cases, the lock
1141 * will be held from the socket layer and won't be released until
1142 * we return back to the socket layer.
1143 *
1144 * This does mean that we must take a protocol lock before we take
1145 * an interface lock if we're going to take both. This makes sense
1146 * because a protocol is likely to interact with an ifp while it
1147 * is under the protocol lock.
1148 */
1149 int
1150 dlil_output(
1151 struct ifnet* ifp,
1152 u_long proto_family,
1153 struct mbuf *m,
1154 caddr_t route,
1155 const struct sockaddr *dest,
1156 int raw)
1157 {
1158 char *frame_type = 0;
1159 char *dst_linkaddr = 0;
1160 int retval = 0;
1161 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1162 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1163 struct ifnet_filter *filter;
1164
1165 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1166
1167 dlil_read_begin();
1168
1169 frame_type = frame_type_buffer;
1170 dst_linkaddr = dst_linkaddr_buffer;
1171
1172 if (raw == 0) {
1173 struct if_proto *proto = 0;
1174
1175 proto = find_attached_proto(ifp, proto_family);
1176 if (proto == NULL) {
1177 m_freem(m);
1178 retval = ENXIO;
1179 goto cleanup;
1180 }
1181
1182 retval = 0;
1183 if (proto->proto_kpi == kProtoKPI_DLIL) {
1184 if (proto->kpi.dlil.dl_pre_output)
1185 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1186 }
1187 else {
1188 if (proto->kpi.v1.pre_output)
1189 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1190 }
1191
1192 if (retval) {
1193 if (retval != EJUSTRETURN) {
1194 m_freem(m);
1195 }
1196 goto cleanup;
1197 }
1198 }
1199
1200 /*
1201 * Call framing module
1202 */
1203 if ((raw == 0) && (ifp->if_framer)) {
1204 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1205 if (retval) {
1206 if (retval != EJUSTRETURN) {
1207 m_freem(m);
1208 }
1209 goto cleanup;
1210 }
1211 }
1212
1213 #if BRIDGE
1214 /* !!!LOCKING!!!
1215 *
1216 * Need to consider how to handle this.
1217 */
1218 broken-locking
1219 if (do_bridge) {
1220 struct mbuf *m0 = m;
1221 struct ether_header *eh = mtod(m, struct ether_header *);
1222
1223 if (m->m_pkthdr.rcvif)
1224 m->m_pkthdr.rcvif = NULL;
1225 ifp = bridge_dst_lookup(eh);
1226 bdg_forward(&m0, ifp);
1227 if (m0)
1228 m_freem(m0);
1229
1230 return 0;
1231 }
1232 #endif
1233
1234
1235 /*
1236 * Let interface filters (if any) do their thing ...
1237 */
1238
1239 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1240 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1241 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1242 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1243 filter->filt_output) {
1244 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1245 if (retval) {
1246 if (retval != EJUSTRETURN)
1247 m_freem(m);
1248 goto cleanup;
1249 }
1250 }
1251 }
1252 }
1253
1254 /*
1255 * Finally, call the driver.
1256 */
1257
1258 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1259 retval = ifp->if_output(ifp, m);
1260 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1261
1262 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1263
1264 cleanup:
1265 dlil_read_end();
1266 if (retval == EJUSTRETURN)
1267 retval = 0;
1268 return retval;
1269 }
1270
1271 int
1272 dlil_ioctl(u_long proto_fam,
1273 struct ifnet *ifp,
1274 u_long ioctl_code,
1275 caddr_t ioctl_arg)
1276 {
1277 struct ifnet_filter *filter;
1278 int retval = EOPNOTSUPP;
1279 int result = 0;
1280 struct if_family_str *if_family;
1281 int holding_read = 0;
1282
1283 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1284 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1285 if (result != 0)
1286 return EOPNOTSUPP;
1287
1288 dlil_read_begin();
1289 holding_read = 1;
1290
1291 /* Run the interface filters first.
1292 * We want to run all filters before calling the protocol,
1293 * interface family, or interface.
1294 */
1295 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1296 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1297 filter->filt_ioctl != NULL) {
1298 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1299 /* Only update retval if no one has handled the ioctl */
1300 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1301 if (result == ENOTSUP)
1302 result = EOPNOTSUPP;
1303 retval = result;
1304 if (retval && retval != EOPNOTSUPP) {
1305 goto cleanup;
1306 }
1307 }
1308 }
1309 }
1310
1311 /* Allow the protocol to handle the ioctl */
1312 if (proto_fam) {
1313 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1314
1315 if (proto != 0) {
1316 result = EOPNOTSUPP;
1317 if (proto->proto_kpi == kProtoKPI_DLIL) {
1318 if (proto->kpi.dlil.dl_ioctl)
1319 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1320 }
1321 else {
1322 if (proto->kpi.v1.ioctl)
1323 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1324 }
1325
1326 /* Only update retval if no one has handled the ioctl */
1327 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1328 if (result == ENOTSUP)
1329 result = EOPNOTSUPP;
1330 retval = result;
1331 if (retval && retval != EOPNOTSUPP) {
1332 goto cleanup;
1333 }
1334 }
1335 }
1336 }
1337
1338 /*
1339 * Since we have incremented the use count on the ifp, we are guaranteed
1340 * that the ifp will not go away (the function pointers may not be changed).
1341 * We release the dlil read lock so the interface ioctl may trigger a
1342 * protocol attach. This happens with vlan and may occur with other virtual
1343 * interfaces.
1344 */
1345 dlil_read_end();
1346 holding_read = 0;
1347
1348 /* retval is either 0 or EOPNOTSUPP */
1349
1350 /*
1351 * Let the family handle this ioctl.
1352 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1353 * If it returns zero, the ioctl was handled, so set retval to zero.
1354 */
1355 if_family = find_family_module(ifp->if_family);
1356 if ((if_family) && (if_family->ifmod_ioctl)) {
1357 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1358
1359 /* Only update retval if no one has handled the ioctl */
1360 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1361 if (result == ENOTSUP)
1362 result = EOPNOTSUPP;
1363 retval = result;
1364 if (retval && retval != EOPNOTSUPP) {
1365 goto cleanup;
1366 }
1367 }
1368 }
1369
1370 /*
1371 * Let the interface handle this ioctl.
1372 * If it returns EOPNOTSUPP, ignore that, we may have
1373 * already handled this in the protocol or family.
1374 */
1375 if (ifp->if_ioctl)
1376 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1377
1378 /* Only update retval if no one has handled the ioctl */
1379 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1380 if (result == ENOTSUP)
1381 result = EOPNOTSUPP;
1382 retval = result;
1383 if (retval && retval != EOPNOTSUPP) {
1384 goto cleanup;
1385 }
1386 }
1387
1388 cleanup:
1389 if (holding_read)
1390 dlil_read_end();
1391 if (ifp_unuse(ifp))
1392 ifp_use_reached_zero(ifp);
1393
1394 if (retval == EJUSTRETURN)
1395 retval = 0;
1396 return retval;
1397 }
1398
1399 __private_extern__ errno_t
1400 dlil_set_bpf_tap(
1401 ifnet_t ifp,
1402 bpf_tap_mode mode,
1403 bpf_packet_func callback)
1404 {
1405 errno_t error = 0;
1406
1407 dlil_read_begin();
1408 if (ifp->if_set_bpf_tap)
1409 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1410 dlil_read_end();
1411
1412 return error;
1413 }
1414
1415 __private_extern__ errno_t
1416 dlil_resolve_multi(
1417 struct ifnet *ifp,
1418 const struct sockaddr *proto_addr,
1419 struct sockaddr *ll_addr,
1420 size_t ll_len)
1421 {
1422 errno_t result = EOPNOTSUPP;
1423 struct if_proto *proto;
1424 const struct sockaddr *verify;
1425
1426 dlil_read_begin();
1427
1428 bzero(ll_addr, ll_len);
1429
1430 /* Call the protocol first */
1431 proto = find_attached_proto(ifp, proto_addr->sa_family);
1432 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1433 proto->kpi.v1.resolve_multi != NULL) {
1434 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1435 (struct sockaddr_dl*)ll_addr, ll_len);
1436 }
1437
1438 /* Let the interface verify the multicast address */
1439 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1440 if (result == 0)
1441 verify = ll_addr;
1442 else
1443 verify = proto_addr;
1444 result = ifp->if_check_multi(ifp, verify);
1445 }
1446
1447 dlil_read_end();
1448
1449 return result;
1450 }
1451
1452 __private_extern__ errno_t
1453 dlil_send_arp_internal(
1454 ifnet_t ifp,
1455 u_short arpop,
1456 const struct sockaddr_dl* sender_hw,
1457 const struct sockaddr* sender_proto,
1458 const struct sockaddr_dl* target_hw,
1459 const struct sockaddr* target_proto)
1460 {
1461 struct if_proto *proto;
1462 errno_t result = 0;
1463
1464 dlil_read_begin();
1465
1466 proto = find_attached_proto(ifp, target_proto->sa_family);
1467 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1468 proto->kpi.v1.send_arp == NULL) {
1469 result = ENOTSUP;
1470 }
1471 else {
1472 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1473 target_hw, target_proto);
1474 }
1475
1476 dlil_read_end();
1477
1478 return result;
1479 }
1480
1481 __private_extern__ errno_t
1482 dlil_send_arp(
1483 ifnet_t ifp,
1484 u_short arpop,
1485 const struct sockaddr_dl* sender_hw,
1486 const struct sockaddr* sender_proto,
1487 const struct sockaddr_dl* target_hw,
1488 const struct sockaddr* target_proto)
1489 {
1490 errno_t result = 0;
1491
1492 if (target_proto == NULL || (sender_proto &&
1493 sender_proto->sa_family != target_proto->sa_family))
1494 return EINVAL;
1495
1496 /*
1497 * If this is an ARP request and the target IP is IPv4LL,
1498 * send the request on all interfaces.
1499 */
1500 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1501 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1502 arpop == ARPOP_REQUEST) {
1503 ifnet_t *ifp_list;
1504 u_int32_t count;
1505 u_int32_t ifp_on;
1506
1507 result = ENOTSUP;
1508
1509 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1510 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1511 errno_t new_result;
1512 ifaddr_t source_hw = NULL;
1513 ifaddr_t source_ip = NULL;
1514 struct sockaddr_in source_ip_copy;
1515
1516 /*
1517 * Only arp on interfaces marked for IPv4LL ARPing. This may
1518 * mean that we don't ARP on the interface the subnet route
1519 * points to.
1520 */
1521 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1522 continue;
1523 }
1524
1525 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1526
1527 /* Find the source IP address */
1528 ifnet_lock_shared(ifp_list[ifp_on]);
1529 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1530 ifa_link) {
1531 if (source_ip->ifa_addr &&
1532 source_ip->ifa_addr->sa_family == AF_INET) {
1533 break;
1534 }
1535 }
1536
1537 /* No IP Source, don't arp */
1538 if (source_ip == NULL) {
1539 ifnet_lock_done(ifp_list[ifp_on]);
1540 continue;
1541 }
1542
1543 /* Copy the source IP address */
1544 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1545
1546 ifnet_lock_done(ifp_list[ifp_on]);
1547
1548 /* Send the ARP */
1549 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1550 (struct sockaddr_dl*)source_hw->ifa_addr,
1551 (struct sockaddr*)&source_ip_copy, NULL,
1552 target_proto);
1553
1554 if (result == ENOTSUP) {
1555 result = new_result;
1556 }
1557 }
1558 }
1559
1560 ifnet_list_free(ifp_list);
1561 }
1562 else {
1563 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1564 target_hw, target_proto);
1565 }
1566
1567 return result;
1568 }
1569
1570 static int
1571 ifp_use(
1572 struct ifnet *ifp,
1573 int handle_zero)
1574 {
1575 int old_value;
1576 int retval = 0;
1577
1578 do {
1579 old_value = ifp->if_usecnt;
1580 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1581 retval = ENXIO; // ifp is invalid
1582 break;
1583 }
1584 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1585
1586 return retval;
1587 }
1588
1589 /* ifp_unuse is broken into two pieces.
1590 *
1591 * ifp_use and ifp_unuse must be called between when the caller calls
1592 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1593 * operations after dlil_write_end has been called. For this reason,
1594 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1595 * returns a non-zero value. The caller must call ifp_use_reached_zero
1596 * after the caller has called dlil_write_end.
1597 */
1598 static void
1599 ifp_use_reached_zero(
1600 struct ifnet *ifp)
1601 {
1602 struct if_family_str *if_family;
1603 ifnet_detached_func free_func;
1604
1605 dlil_read_begin();
1606
1607 if (ifp->if_usecnt != 0)
1608 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1609
1610 /* Let BPF know we're detaching */
1611 bpfdetach(ifp);
1612
1613 ifnet_head_lock_exclusive();
1614 ifnet_lock_exclusive(ifp);
1615
1616 /* Remove ourselves from the list */
1617 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1618 ifnet_addrs[ifp->if_index - 1] = 0;
1619
1620 /* ifp should be removed from the interface list */
1621 while (ifp->if_multiaddrs.lh_first) {
1622 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1623
1624 /*
1625 * When the interface is gone, we will no longer
1626 * be listening on these multicasts. Various bits
1627 * of the stack may be referencing these multicasts,
1628 * release only our reference.
1629 */
1630 LIST_REMOVE(ifma, ifma_link);
1631 ifma->ifma_ifp = NULL;
1632 ifma_release(ifma);
1633 }
1634 ifnet_head_done();
1635
1636 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1637 ifnet_lock_done(ifp);
1638
1639 if_family = find_family_module(ifp->if_family);
1640 if (if_family && if_family->del_if)
1641 if_family->del_if(ifp);
1642 #if 0
1643 if (--if_family->if_usecnt == 0) {
1644 if (if_family->shutdown)
1645 (*if_family->shutdown)();
1646
1647 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1648 FREE(if_family, M_IFADDR);
1649 }
1650 #endif
1651
1652 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1653 free_func = ifp->if_free;
1654 dlil_read_end();
1655
1656 if (free_func)
1657 free_func(ifp);
1658 }
1659
1660 static int
1661 ifp_unuse(
1662 struct ifnet *ifp)
1663 {
1664 int oldval;
1665 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1666 if (oldval == 0)
1667 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1668
1669 if (oldval > 1)
1670 return 0;
1671
1672 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1673 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1674
1675 return 1; /* caller must call ifp_use_reached_zero */
1676 }
1677
1678 void
1679 ifp_reference(
1680 struct ifnet *ifp)
1681 {
1682 int oldval;
1683 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1684 }
1685
1686 void
1687 ifp_release(
1688 struct ifnet *ifp)
1689 {
1690 int oldval;
1691 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1692 if (oldval == 0)
1693 panic("dlil_if_reference - refcount decremented past zero!");
1694 }
1695
1696 extern lck_mtx_t *domain_proto_mtx;
1697
1698 static int
1699 dlil_attach_protocol_internal(
1700 struct if_proto *proto,
1701 const struct ddesc_head_str *demux,
1702 const struct ifnet_demux_desc *demux_list,
1703 u_int32_t demux_count)
1704 {
1705 struct ddesc_head_str temp_head;
1706 struct kev_dl_proto_data ev_pr_data;
1707 struct ifnet *ifp = proto->ifp;
1708 int retval = 0;
1709 u_long hash_value = proto_hash_value(proto->protocol_family);
1710 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1711 void* free_me = NULL;
1712
1713 /* setup some of the common values */
1714
1715 {
1716 lck_mtx_lock(domain_proto_mtx);
1717 struct domain *dp = domains;
1718 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1719 dp = dp->dom_next;
1720 proto->dl_domain = dp;
1721 lck_mtx_unlock(domain_proto_mtx);
1722 }
1723
1724 /*
1725 * Convert the demux descriptors to a type the interface
1726 * will understand. Checking e_flags should be safe, this
1727 * flag won't change.
1728 */
1729 if (if_using_kpi && demux) {
1730 /* Convert the demux linked list to a demux_list */
1731 struct dlil_demux_desc *demux_entry;
1732 struct ifnet_demux_desc *temp_list = NULL;
1733 u_int32_t i = 0;
1734
1735 TAILQ_FOREACH(demux_entry, demux, next) {
1736 i++;
1737 }
1738
1739 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1740 free_me = temp_list;
1741
1742 if (temp_list == NULL)
1743 return ENOMEM;
1744
1745 i = 0;
1746 TAILQ_FOREACH(demux_entry, demux, next) {
1747 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1748 if (demux_entry->type == 1 ||
1749 demux_entry->type == 2 ||
1750 demux_entry->type == 3) {
1751 FREE(free_me, M_TEMP);
1752 return ENOTSUP;
1753 }
1754
1755 temp_list[i].type = demux_entry->type;
1756 temp_list[i].data = demux_entry->native_type;
1757 temp_list[i].datalen = demux_entry->variants.native_type_length;
1758 i++;
1759 }
1760 demux_count = i;
1761 demux_list = temp_list;
1762 }
1763 else if (!if_using_kpi && demux_list != NULL) {
1764 struct dlil_demux_desc *demux_entry;
1765 u_int32_t i = 0;
1766
1767 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1768 free_me = demux_entry;
1769 if (demux_entry == NULL)
1770 return ENOMEM;
1771
1772 TAILQ_INIT(&temp_head);
1773
1774 for (i = 0; i < demux_count; i++) {
1775 demux_entry[i].type = demux_list[i].type;
1776 demux_entry[i].native_type = demux_list[i].data;
1777 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1778 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1779 }
1780 demux = &temp_head;
1781 }
1782
1783 /*
1784 * Take the write lock to protect readers and exclude other writers.
1785 */
1786 dlil_write_begin();
1787
1788 /* Check that the interface isn't currently detaching */
1789 ifnet_lock_shared(ifp);
1790 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1791 ifnet_lock_done(ifp);
1792 dlil_write_end();
1793 if (free_me)
1794 FREE(free_me, M_TEMP);
1795 return ENXIO;
1796 }
1797 ifnet_lock_done(ifp);
1798
1799 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1800 dlil_write_end();
1801 if (free_me)
1802 FREE(free_me, M_TEMP);
1803 return EEXIST;
1804 }
1805
1806 /*
1807 * Call family module add_proto routine so it can refine the
1808 * demux descriptors as it wishes.
1809 */
1810 if (if_using_kpi)
1811 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1812 else {
1813 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1814 _cast_non_const(demux));
1815 }
1816 if (retval) {
1817 dlil_write_end();
1818 if (free_me)
1819 FREE(free_me, M_TEMP);
1820 return retval;
1821 }
1822
1823 /*
1824 * We can't fail from this point on.
1825 * Increment the number of uses (protocol attachments + interface attached).
1826 */
1827 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1828
1829 /*
1830 * Insert the protocol in the hash
1831 */
1832 {
1833 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1834 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1835 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1836 if (prev_proto)
1837 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1838 else
1839 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1840 }
1841
1842 /*
1843 * Add to if_proto list for this interface
1844 */
1845 if_proto_ref(proto);
1846 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1847 ifp->offercnt++;
1848 dlil_write_end();
1849
1850 /* the reserved field carries the number of protocol still attached (subject to change) */
1851 ev_pr_data.proto_family = proto->protocol_family;
1852 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1853 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1854 (struct net_event_data *)&ev_pr_data,
1855 sizeof(struct kev_dl_proto_data));
1856
1857 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1858 ifp->if_name, ifp->if_unit, retval);
1859 if (free_me)
1860 FREE(free_me, M_TEMP);
1861 return retval;
1862 }
1863
1864 __private_extern__ int
1865 dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1866 const struct ifnet_attach_proto_param *proto_details)
1867 {
1868 int retval = 0;
1869 struct if_proto *ifproto = NULL;
1870
1871 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1872 if (ifproto == 0) {
1873 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1874 retval = ENOMEM;
1875 goto end;
1876 }
1877 bzero(ifproto, sizeof(*ifproto));
1878
1879 ifproto->ifp = ifp;
1880 ifproto->protocol_family = protocol;
1881 ifproto->proto_kpi = kProtoKPI_v1;
1882 ifproto->kpi.v1.input = proto_details->input;
1883 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1884 ifproto->kpi.v1.event = proto_details->event;
1885 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1886 ifproto->kpi.v1.detached = proto_details->detached;
1887 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1888 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1889
1890 retval = dlil_attach_protocol_internal(ifproto, NULL,
1891 proto_details->demux_list, proto_details->demux_count);
1892
1893 end:
1894 if (retval && ifproto)
1895 FREE(ifproto, M_IFADDR);
1896 return retval;
1897 }
1898
1899 int
1900 dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1901 {
1902 struct ifnet *ifp = NULL;
1903 struct if_proto *ifproto = NULL;
1904 int retval = 0;
1905
1906 /*
1907 * Do everything we can before taking the write lock
1908 */
1909
1910 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1911 return EINVAL;
1912
1913 /*
1914 * Allocate and init a new if_proto structure
1915 */
1916 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1917 if (!ifproto) {
1918 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1919 retval = ENOMEM;
1920 goto end;
1921 }
1922
1923
1924 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1925 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1926 if (!ifp) {
1927 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1928 proto->interface_family, proto->unit_number);
1929 retval = ENXIO;
1930 goto end;
1931 }
1932
1933 bzero(ifproto, sizeof(struct if_proto));
1934
1935 ifproto->ifp = ifp;
1936 ifproto->protocol_family = proto->protocol_family;
1937 ifproto->proto_kpi = kProtoKPI_DLIL;
1938 ifproto->kpi.dlil.dl_input = proto->input;
1939 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1940 ifproto->kpi.dlil.dl_event = proto->event;
1941 ifproto->kpi.dlil.dl_offer = proto->offer;
1942 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1943 ifproto->kpi.dlil.dl_detached = proto->detached;
1944
1945 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1946
1947 end:
1948 if (retval && ifproto)
1949 FREE(ifproto, M_IFADDR);
1950 return retval;
1951 }
1952
1953 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1954
1955 static int
1956 dlil_detach_protocol_internal(
1957 struct if_proto *proto)
1958 {
1959 struct ifnet *ifp = proto->ifp;
1960 u_long proto_family = proto->protocol_family;
1961 struct kev_dl_proto_data ev_pr_data;
1962
1963 if (proto->proto_kpi == kProtoKPI_DLIL) {
1964 if (proto->kpi.dlil.dl_detached)
1965 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1966 }
1967 else {
1968 if (proto->kpi.v1.detached)
1969 proto->kpi.v1.detached(ifp, proto->protocol_family);
1970 }
1971 if_proto_free(proto);
1972
1973 /*
1974 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1975 */
1976
1977 if_rtproto_del(ifp, proto_family);
1978
1979 /* the reserved field carries the number of protocol still attached (subject to change) */
1980 ev_pr_data.proto_family = proto_family;
1981 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1982 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1983 (struct net_event_data *)&ev_pr_data,
1984 sizeof(struct kev_dl_proto_data));
1985 return 0;
1986 }
1987
1988 int
1989 dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1990 {
1991 struct if_proto *proto = NULL;
1992 int retval = 0;
1993 int use_reached_zero = 0;
1994
1995
1996 if ((retval = dlil_write_begin()) != 0) {
1997 if (retval == EDEADLK) {
1998 retval = 0;
1999 dlil_read_begin();
2000 proto = find_attached_proto(ifp, proto_family);
2001 if (proto == 0) {
2002 retval = ENXIO;
2003 }
2004 else {
2005 proto->detaching = 1;
2006 dlil_detach_waiting = 1;
2007 wakeup(&dlil_detach_waiting);
2008 }
2009 dlil_read_end();
2010 }
2011 goto end;
2012 }
2013
2014 proto = find_attached_proto(ifp, proto_family);
2015
2016 if (proto == NULL) {
2017 retval = ENXIO;
2018 dlil_write_end();
2019 goto end;
2020 }
2021
2022 /*
2023 * Call family module del_proto
2024 */
2025
2026 if (ifp->if_del_proto)
2027 ifp->if_del_proto(ifp, proto->protocol_family);
2028
2029 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2030 ifp->offercnt--;
2031
2032 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2033
2034 /*
2035 * We can do the rest of the work outside of the write lock.
2036 */
2037 use_reached_zero = ifp_unuse(ifp);
2038 dlil_write_end();
2039
2040 dlil_detach_protocol_internal(proto);
2041
2042 /*
2043 * Only handle the case where the interface will go away after
2044 * we've sent the message. This way post message can send the
2045 * message to the interface safely.
2046 */
2047
2048 if (use_reached_zero)
2049 ifp_use_reached_zero(ifp);
2050
2051 end:
2052 return retval;
2053 }
2054
2055 /*
2056 * dlil_delayed_detach_thread is responsible for detaching
2057 * protocols, protocol filters, and interface filters after
2058 * an attempt was made to detach one of those items while
2059 * it was not safe to do so (i.e. called dlil_read_begin).
2060 *
2061 * This function will take the dlil write lock and walk
2062 * through each of the interfaces looking for items with
2063 * the detaching flag set. When an item is found, it is
2064 * detached from the interface and placed on a local list.
2065 * After all of the items have been collected, we drop the
2066 * write lock and performed the post detach. This is done
2067 * so we only have to take the write lock once.
2068 *
2069 * When detaching a protocol filter, if we find that we
2070 * have detached the very last protocol and we need to call
2071 * ifp_use_reached_zero, we have to break out of our work
2072 * to drop the write lock so we can call ifp_use_reached_zero.
2073 */
2074
2075 static void
2076 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2077 {
2078 thread_t self = current_thread();
2079 int asserted = 0;
2080
2081 ml_thread_policy(self, MACHINE_GROUP,
2082 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2083
2084
2085 while (1) {
2086 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2087 struct ifnet *ifp;
2088 struct proto_hash_entry detached_protos;
2089 struct ifnet_filter_head detached_filters;
2090 struct if_proto *proto;
2091 struct if_proto *next_proto;
2092 struct ifnet_filter *filt;
2093 struct ifnet_filter *next_filt;
2094 int reached_zero;
2095
2096 reached_zero = 0;
2097
2098 /* Clear the detach waiting flag */
2099 dlil_detach_waiting = 0;
2100 TAILQ_INIT(&detached_filters);
2101 SLIST_INIT(&detached_protos);
2102
2103 ifnet_head_lock_shared();
2104 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2105 int i;
2106
2107 // Look for protocols and protocol filters
2108 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2109 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2110 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2111
2112 // Detach this protocol
2113 if (proto->detaching) {
2114 if (ifp->if_del_proto)
2115 ifp->if_del_proto(ifp, proto->protocol_family);
2116 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2117 ifp->offercnt--;
2118 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2119 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2120 reached_zero = ifp_unuse(ifp);
2121 if (reached_zero) {
2122 break;
2123 }
2124 }
2125 else {
2126 // Update prev_nextptr to point to our next ptr
2127 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2128 }
2129 }
2130 }
2131
2132 // look for interface filters that need to be detached
2133 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2134 next_filt = TAILQ_NEXT(filt, filt_next);
2135 if (filt->filt_detaching != 0) {
2136 // take this interface filter off the interface filter list
2137 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2138
2139 // put this interface filter on the detached filters list
2140 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2141 }
2142 }
2143
2144 if (ifp->if_delayed_detach) {
2145 ifp->if_delayed_detach = 0;
2146 reached_zero = ifp_unuse(ifp);
2147 }
2148
2149 if (reached_zero)
2150 break;
2151 }
2152 ifnet_head_done();
2153 dlil_write_end();
2154
2155 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2156 next_filt = TAILQ_NEXT(filt, filt_next);
2157 /*
2158 * dlil_detach_filter_internal won't remove an item from
2159 * the list if it is already detached (second parameter).
2160 * The item will be freed though.
2161 */
2162 dlil_detach_filter_internal(filt, 1);
2163 }
2164
2165 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2166 next_proto = SLIST_NEXT(proto, next_hash);
2167 dlil_detach_protocol_internal(proto);
2168 }
2169
2170 if (reached_zero) {
2171 ifp_use_reached_zero(ifp);
2172 dlil_detach_waiting = 1; // we may have missed something
2173 }
2174 }
2175
2176 if (!asserted && dlil_detach_waiting == 0) {
2177 asserted = 1;
2178 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2179 }
2180
2181 if (dlil_detach_waiting == 0) {
2182 asserted = 0;
2183 thread_block(dlil_delayed_detach_thread);
2184 }
2185 }
2186 }
2187
2188 static void
2189 dlil_call_delayed_detach_thread(void) {
2190 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2191 }
2192
2193 extern int if_next_index(void);
2194
2195 __private_extern__ int
2196 dlil_if_attach_with_address(
2197 struct ifnet *ifp,
2198 const struct sockaddr_dl *ll_addr)
2199 {
2200 u_long interface_family = ifp->if_family;
2201 struct if_family_str *if_family = NULL;
2202 int stat;
2203 struct ifnet *tmp_if;
2204 struct proto_hash_entry *new_proto_list = NULL;
2205 int locked = 0;
2206
2207
2208 ifnet_head_lock_shared();
2209
2210 /* Verify we aren't already on the list */
2211 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2212 if (tmp_if == ifp) {
2213 ifnet_head_done();
2214 return EEXIST;
2215 }
2216 }
2217
2218 ifnet_head_done();
2219
2220 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2221 #if IFNET_RW_LOCK
2222 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2223 #else
2224 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2225 #endif
2226
2227 if (ifp->if_lock == 0) {
2228 return ENOMEM;
2229 }
2230
2231 // Only use family if this is not a KPI interface
2232 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2233 if_family = find_family_module(interface_family);
2234 }
2235
2236 /*
2237 * Allow interfaces withouth protocol families to attach
2238 * only if they have the necessary fields filled out.
2239 */
2240
2241 if ((if_family == 0) &&
2242 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2243 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2244 interface_family);
2245 return ENODEV;
2246 }
2247
2248 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2249 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2250 M_NKE, M_WAITOK);
2251
2252 if (new_proto_list == 0) {
2253 return ENOBUFS;
2254 }
2255 }
2256
2257 dlil_write_begin();
2258 locked = 1;
2259
2260 /*
2261 * Call the family module to fill in the appropriate fields in the
2262 * ifnet structure.
2263 */
2264
2265 if (if_family) {
2266 stat = if_family->add_if(ifp);
2267 if (stat) {
2268 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2269 dlil_write_end();
2270 return stat;
2271 }
2272 ifp->if_add_proto_u.original = if_family->add_proto;
2273 ifp->if_del_proto = if_family->del_proto;
2274 if_family->refcnt++;
2275 }
2276
2277 ifp->offercnt = 0;
2278 TAILQ_INIT(&ifp->if_flt_head);
2279
2280
2281 if (new_proto_list) {
2282 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2283 ifp->if_proto_hash = new_proto_list;
2284 new_proto_list = 0;
2285 }
2286
2287 /* old_if_attach */
2288 {
2289 struct ifaddr *ifa = 0;
2290
2291 if (ifp->if_snd.ifq_maxlen == 0)
2292 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2293 TAILQ_INIT(&ifp->if_prefixhead);
2294 LIST_INIT(&ifp->if_multiaddrs);
2295 ifnet_touch_lastchange(ifp);
2296
2297 /* usecount to track attachment to the ifnet list */
2298 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2299
2300 /* Lock the list of interfaces */
2301 ifnet_head_lock_exclusive();
2302 ifnet_lock_exclusive(ifp);
2303
2304 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2305 char workbuf[64];
2306 int namelen, masklen, socksize, ifasize;
2307
2308 ifp->if_index = if_next_index();
2309
2310 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2311 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2312 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2313 socksize = masklen + ifp->if_addrlen;
2314 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2315 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2316 socksize = sizeof(struct sockaddr_dl);
2317 socksize = ROUNDUP(socksize);
2318 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2319 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2320 if (ifa) {
2321 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2322 ifnet_addrs[ifp->if_index - 1] = ifa;
2323 bzero(ifa, ifasize);
2324 sdl->sdl_len = socksize;
2325 sdl->sdl_family = AF_LINK;
2326 bcopy(workbuf, sdl->sdl_data, namelen);
2327 sdl->sdl_nlen = namelen;
2328 sdl->sdl_index = ifp->if_index;
2329 sdl->sdl_type = ifp->if_type;
2330 if (ll_addr) {
2331 sdl->sdl_alen = ll_addr->sdl_alen;
2332 if (ll_addr->sdl_alen != ifp->if_addrlen)
2333 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2334 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2335 }
2336 ifa->ifa_ifp = ifp;
2337 ifa->ifa_rtrequest = link_rtrequest;
2338 ifa->ifa_addr = (struct sockaddr*)sdl;
2339 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2340 ifa->ifa_netmask = (struct sockaddr*)sdl;
2341 sdl->sdl_len = masklen;
2342 while (namelen != 0)
2343 sdl->sdl_data[--namelen] = 0xff;
2344 }
2345 }
2346 else {
2347 /* preserve the first ifaddr */
2348 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2349 }
2350
2351
2352 TAILQ_INIT(&ifp->if_addrhead);
2353 ifa = ifnet_addrs[ifp->if_index - 1];
2354
2355 if (ifa) {
2356 /*
2357 * We don't use if_attach_ifa because we want
2358 * this address to be first on the list.
2359 */
2360 ifaref(ifa);
2361 ifa->ifa_debug |= IFA_ATTACHED;
2362 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2363 }
2364
2365 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2366 ifindex2ifnet[ifp->if_index] = ifp;
2367
2368 ifnet_head_done();
2369 }
2370 dlil_write_end();
2371
2372 if (if_family && if_family->init_if) {
2373 stat = if_family->init_if(ifp);
2374 if (stat) {
2375 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2376 }
2377 }
2378
2379 ifnet_lock_done(ifp);
2380 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2381
2382 return 0;
2383 }
2384
2385 int
2386 dlil_if_attach(struct ifnet *ifp)
2387 {
2388 dlil_if_attach_with_address(ifp, NULL);
2389 }
2390
2391
2392 int
2393 dlil_if_detach(struct ifnet *ifp)
2394 {
2395 struct ifnet_filter *filter;
2396 struct ifnet_filter *filter_next;
2397 int zeroed = 0;
2398 int retval = 0;
2399 struct ifnet_filter_head fhead;
2400
2401
2402 ifnet_lock_exclusive(ifp);
2403
2404 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2405 /* Interface has already been detached */
2406 ifnet_lock_done(ifp);
2407 return ENXIO;
2408 }
2409
2410 /*
2411 * Indicate this interface is being detached.
2412 *
2413 * This should prevent protocols from attaching
2414 * from this point on. Interface will remain on
2415 * the list until all of the protocols are detached.
2416 */
2417 ifp->if_eflags |= IFEF_DETACHING;
2418 ifnet_lock_done(ifp);
2419
2420 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
2421
2422 if ((retval = dlil_write_begin()) != 0) {
2423 if (retval == EDEADLK) {
2424 retval = DLIL_WAIT_FOR_FREE;
2425
2426 /* We need to perform a delayed detach */
2427 ifp->if_delayed_detach = 1;
2428 dlil_detach_waiting = 1;
2429 wakeup(&dlil_detach_waiting);
2430 }
2431 return retval;
2432 }
2433
2434 /* Steal the list of interface filters */
2435 fhead = ifp->if_flt_head;
2436 TAILQ_INIT(&ifp->if_flt_head);
2437
2438 /* unuse the interface */
2439 zeroed = ifp_unuse(ifp);
2440
2441 dlil_write_end();
2442
2443 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2444 filter_next = TAILQ_NEXT(filter, filt_next);
2445 dlil_detach_filter_internal(filter, 1);
2446 }
2447
2448 if (zeroed == 0) {
2449 retval = DLIL_WAIT_FOR_FREE;
2450 }
2451 else
2452 {
2453 ifp_use_reached_zero(ifp);
2454 }
2455
2456 return retval;
2457 }
2458
2459
2460 int
2461 dlil_reg_if_modules(u_long interface_family,
2462 struct dlil_ifmod_reg_str *ifmod)
2463 {
2464 struct if_family_str *if_family;
2465
2466
2467 if (find_family_module(interface_family)) {
2468 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
2469 interface_family);
2470 return EEXIST;
2471 }
2472
2473 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2474 (!ifmod->add_proto) || (!ifmod->del_proto)) {
2475 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
2476 return EINVAL;
2477 }
2478
2479 /*
2480 * The following is a gross hack to keep from breaking
2481 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2482 * does not zero the reserved fields in dlil_ifmod_reg_str.
2483 * As a result, we have to zero any function that used to
2484 * be reserved fields at the time Vicomsoft built their
2485 * kext. Radar #2974305
2486 */
2487 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2488 if (interface_family == 123) { /* Vicom */
2489 ifmod->init_if = 0;
2490 } else {
2491 return EINVAL;
2492 }
2493 }
2494
2495 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2496 if (!if_family) {
2497 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
2498 return ENOMEM;
2499 }
2500
2501 bzero(if_family, sizeof(struct if_family_str));
2502
2503 if_family->if_family = interface_family & 0xffff;
2504 if_family->shutdown = ifmod->shutdown;
2505 if_family->add_if = ifmod->add_if;
2506 if_family->del_if = ifmod->del_if;
2507 if_family->init_if = ifmod->init_if;
2508 if_family->add_proto = ifmod->add_proto;
2509 if_family->del_proto = ifmod->del_proto;
2510 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
2511 if_family->refcnt = 1;
2512 if_family->flags = 0;
2513
2514 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
2515 return 0;
2516 }
2517
2518 int dlil_dereg_if_modules(u_long interface_family)
2519 {
2520 struct if_family_str *if_family;
2521 int ret = 0;
2522
2523
2524 if_family = find_family_module(interface_family);
2525 if (if_family == 0) {
2526 return ENXIO;
2527 }
2528
2529 if (--if_family->refcnt == 0) {
2530 if (if_family->shutdown)
2531 (*if_family->shutdown)();
2532
2533 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2534 FREE(if_family, M_IFADDR);
2535 }
2536 else {
2537 if_family->flags |= DLIL_SHUTDOWN;
2538 ret = DLIL_WAIT_FOR_FREE;
2539 }
2540
2541 return ret;
2542 }
2543
2544
2545
2546 int
2547 dlil_reg_proto_module(
2548 u_long protocol_family,
2549 u_long interface_family,
2550 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2551 int (*detach)(struct ifnet *ifp, u_long protocol_family))
2552 {
2553 struct proto_family_str *proto_family;
2554
2555 if (attach == NULL) return EINVAL;
2556
2557 lck_mtx_lock(proto_family_mutex);
2558
2559 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2560 if (proto_family->proto_family == protocol_family &&
2561 proto_family->if_family == interface_family) {
2562 lck_mtx_unlock(proto_family_mutex);
2563 return EEXIST;
2564 }
2565 }
2566
2567 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2568 if (!proto_family) {
2569 lck_mtx_unlock(proto_family_mutex);
2570 return ENOMEM;
2571 }
2572
2573 bzero(proto_family, sizeof(struct proto_family_str));
2574 proto_family->proto_family = protocol_family;
2575 proto_family->if_family = interface_family & 0xffff;
2576 proto_family->attach_proto = attach;
2577 proto_family->detach_proto = detach;
2578
2579 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
2580 lck_mtx_unlock(proto_family_mutex);
2581 return 0;
2582 }
2583
2584 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2585 {
2586 struct proto_family_str *proto_family;
2587 int ret = 0;
2588
2589 lck_mtx_lock(proto_family_mutex);
2590
2591 proto_family = find_proto_module(protocol_family, interface_family);
2592 if (proto_family == 0) {
2593 lck_mtx_unlock(proto_family_mutex);
2594 return ENXIO;
2595 }
2596
2597 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2598 FREE(proto_family, M_IFADDR);
2599
2600 lck_mtx_unlock(proto_family_mutex);
2601 return ret;
2602 }
2603
2604 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
2605 {
2606 struct proto_family_str *proto_family;
2607 int ret = 0;
2608
2609 lck_mtx_lock(proto_family_mutex);
2610 proto_family = find_proto_module(protocol_family, ifp->if_family);
2611 if (proto_family == 0) {
2612 lck_mtx_unlock(proto_family_mutex);
2613 return ENXIO;
2614 }
2615
2616 ret = proto_family->attach_proto(ifp, protocol_family);
2617
2618 lck_mtx_unlock(proto_family_mutex);
2619 return ret;
2620 }
2621
2622
2623 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2624 {
2625 struct proto_family_str *proto_family;
2626 int ret = 0;
2627
2628 lck_mtx_lock(proto_family_mutex);
2629
2630 proto_family = find_proto_module(protocol_family, ifp->if_family);
2631 if (proto_family && proto_family->detach_proto)
2632 ret = proto_family->detach_proto(ifp, protocol_family);
2633 else
2634 ret = dlil_detach_protocol(ifp, protocol_family);
2635
2636 lck_mtx_unlock(proto_family_mutex);
2637 return ret;
2638 }
2639
2640 static errno_t
2641 dlil_recycle_ioctl(
2642 __unused ifnet_t ifnet_ptr,
2643 __unused u_int32_t ioctl_code,
2644 __unused void *ioctl_arg)
2645 {
2646 return EOPNOTSUPP;
2647 }
2648
2649 static int
2650 dlil_recycle_output(
2651 __unused struct ifnet *ifnet_ptr,
2652 struct mbuf *m)
2653 {
2654 m_freem(m);
2655 return 0;
2656 }
2657
2658 static void
2659 dlil_recycle_free(
2660 __unused ifnet_t ifnet_ptr)
2661 {
2662 }
2663
2664 static errno_t
2665 dlil_recycle_set_bpf_tap(
2666 __unused ifnet_t ifp,
2667 __unused bpf_tap_mode mode,
2668 __unused bpf_packet_func callback)
2669 {
2670 /* XXX not sure what to do here */
2671 return 0;
2672 }
2673
2674 int dlil_if_acquire(
2675 u_long family,
2676 const void *uniqueid,
2677 size_t uniqueid_len,
2678 struct ifnet **ifp)
2679 {
2680 struct ifnet *ifp1 = NULL;
2681 struct dlil_ifnet *dlifp1 = NULL;
2682 int ret = 0;
2683
2684 lck_mtx_lock(dlil_ifnet_mutex);
2685 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2686
2687 ifp1 = (struct ifnet *)dlifp1;
2688
2689 if (ifp1->if_family == family) {
2690
2691 /* same uniqueid and same len or no unique id specified */
2692 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2693 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2694
2695 /* check for matching interface in use */
2696 if (ifp1->if_eflags & IFEF_INUSE) {
2697 if (uniqueid_len) {
2698 ret = EBUSY;
2699 goto end;
2700 }
2701 }
2702 else {
2703 if (!ifp1->if_lock)
2704 panic("ifp's lock is gone\n");
2705 ifnet_lock_exclusive(ifp1);
2706 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2707 ifnet_lock_done(ifp1);
2708 *ifp = ifp1;
2709 goto end;
2710 }
2711 }
2712 }
2713 }
2714
2715 /* no interface found, allocate a new one */
2716 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2717 if (dlifp1 == 0) {
2718 ret = ENOMEM;
2719 goto end;
2720 }
2721
2722 bzero(dlifp1, sizeof(*dlifp1));
2723
2724 if (uniqueid_len) {
2725 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2726 if (dlifp1->if_uniqueid == 0) {
2727 FREE(dlifp1, M_NKE);
2728 ret = ENOMEM;
2729 goto end;
2730 }
2731 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2732 dlifp1->if_uniqueid_len = uniqueid_len;
2733 }
2734
2735 ifp1 = (struct ifnet *)dlifp1;
2736 ifp1->if_eflags |= IFEF_INUSE;
2737 ifp1->if_name = dlifp1->if_namestorage;
2738
2739 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2740
2741 *ifp = ifp1;
2742
2743 end:
2744 lck_mtx_unlock(dlil_ifnet_mutex);
2745
2746 return ret;
2747 }
2748
2749 void dlil_if_release(struct ifnet *ifp)
2750 {
2751 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2752
2753
2754 /* Interface does not have a lock until it is attached - radar 3713951 */
2755 if (ifp->if_lock)
2756 ifnet_lock_exclusive(ifp);
2757 ifp->if_eflags &= ~IFEF_INUSE;
2758 ifp->if_ioctl = dlil_recycle_ioctl;
2759 ifp->if_output = dlil_recycle_output;
2760 ifp->if_free = dlil_recycle_free;
2761 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2762
2763 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2764 ifp->if_name = dlifp->if_namestorage;
2765 if (ifp->if_lock)
2766 ifnet_lock_done(ifp);
2767
2768 }