]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
fc7d8c1a14b950d95cb2bcb71fc6a674690a443f
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright (c) 1999 Apple Computer, Inc.
25 *
26 * Data Link Inteface Layer
27 * Author: Ted Walker
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/mbuf.h>
35 #include <sys/socket.h>
36 #include <sys/domain.h>
37 #include <sys/user.h>
38 #include <net/if_dl.h>
39 #include <net/if.h>
40 #include <net/route.h>
41 #include <net/if_var.h>
42 #include <net/dlil.h>
43 #include <net/if_arp.h>
44 #include <sys/kern_event.h>
45 #include <sys/kdebug.h>
46
47 #include <kern/assert.h>
48 #include <kern/task.h>
49 #include <kern/thread.h>
50 #include <kern/sched_prim.h>
51 #include <kern/locks.h>
52
53 #include <net/if_types.h>
54 #include <net/kpi_interfacefilter.h>
55
56 #include <libkern/OSAtomic.h>
57
58 #include <machine/machine_routines.h>
59
60 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
61 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
62 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
63 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
64 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
65
66
67 #define MAX_DL_TAGS 16
68 #define MAX_DLIL_FILTERS 16
69 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
70 #define MAX_LINKADDR 4 /* LONGWORDS */
71 #define M_NKE M_IFADDR
72
73 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
74 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
75
76 #if 0
77 #define DLIL_PRINTF printf
78 #else
79 #define DLIL_PRINTF kprintf
80 #endif
81
82 enum {
83 kProtoKPI_DLIL = 0,
84 kProtoKPI_v1 = 1
85 };
86
87 struct if_proto {
88 SLIST_ENTRY(if_proto) next_hash;
89 int refcount;
90 int detaching;
91 struct ifnet *ifp;
92 struct domain *dl_domain;
93 protocol_family_t protocol_family;
94 int proto_kpi;
95 union {
96 struct {
97 dl_input_func dl_input;
98 dl_pre_output_func dl_pre_output;
99 dl_event_func dl_event;
100 dl_offer_func dl_offer;
101 dl_ioctl_func dl_ioctl;
102 dl_detached_func dl_detached;
103 } dlil;
104 struct {
105 proto_media_input input;
106 proto_media_preout pre_output;
107 proto_media_event event;
108 proto_media_ioctl ioctl;
109 proto_media_detached detached;
110 proto_media_resolve_multi resolve_multi;
111 proto_media_send_arp send_arp;
112 } v1;
113 } kpi;
114 };
115
116 SLIST_HEAD(proto_hash_entry, if_proto);
117
118
119 struct dlil_ifnet {
120 /* ifnet and drvr_ext are used by the stack and drivers
121 drvr_ext extends the public ifnet and must follow dl_if */
122 struct ifnet dl_if; /* public ifnet */
123
124 /* dlil private fields */
125 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
126 /* it is not the ifnet list */
127 void *if_uniqueid; /* unique id identifying the interface */
128 size_t if_uniqueid_len;/* length of the unique id */
129 char if_namestorage[IFNAMSIZ]; /* interface name storage */
130 };
131
132 struct ifnet_filter {
133 TAILQ_ENTRY(ifnet_filter) filt_next;
134 ifnet_t filt_ifp;
135 int filt_detaching;
136
137 const char *filt_name;
138 void *filt_cookie;
139 protocol_family_t filt_protocol;
140 iff_input_func filt_input;
141 iff_output_func filt_output;
142 iff_event_func filt_event;
143 iff_ioctl_func filt_ioctl;
144 iff_detached_func filt_detached;
145 };
146
147 struct if_family_str {
148 TAILQ_ENTRY(if_family_str) if_fam_next;
149 u_long if_family;
150 int refcnt;
151 int flags;
152
153 #define DLIL_SHUTDOWN 1
154
155 int (*add_if)(struct ifnet *ifp);
156 int (*del_if)(struct ifnet *ifp);
157 int (*init_if)(struct ifnet *ifp);
158 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
159 ifnet_del_proto_func del_proto;
160 ifnet_ioctl_func ifmod_ioctl;
161 int (*shutdown)(void);
162 };
163
164 struct proto_family_str {
165 TAILQ_ENTRY(proto_family_str) proto_fam_next;
166 u_long proto_family;
167 u_long if_family;
168 int usecnt;
169
170 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
171 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
172 };
173
174 enum {
175 kIfNetUseCount_MayBeZero = 0,
176 kIfNetUseCount_MustNotBeZero = 1
177 };
178
179 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
180 static TAILQ_HEAD(, if_family_str) if_family_head;
181 static TAILQ_HEAD(, proto_family_str) proto_family_head;
182 static lck_grp_t *dlil_lock_group;
183 static lck_grp_t *ifnet_lock_group;
184 static lck_grp_t *ifnet_head_lock_group;
185 static lck_attr_t *ifnet_lock_attr;
186 static lck_mtx_t *proto_family_mutex;
187 static lck_rw_t *ifnet_head_mutex;
188 static lck_mtx_t *dlil_ifnet_mutex;
189 static lck_mtx_t *dlil_mutex;
190 static unsigned long dlil_read_count = 0;
191 static unsigned long dlil_detach_waiting = 0;
192 extern u_int32_t ipv4_ll_arp_aware;
193
194 int dlil_initialized = 0;
195 lck_spin_t *dlil_input_lock;
196 __private_extern__ thread_t dlil_input_thread_ptr = 0;
197 int dlil_input_thread_wakeup = 0;
198 __private_extern__ int dlil_output_thread_wakeup = 0;
199 static struct mbuf *dlil_input_mbuf_head = NULL;
200 static struct mbuf *dlil_input_mbuf_tail = NULL;
201 #if NLOOP > 1
202 #error dlil_input() needs to be revised to support more than on loopback interface
203 #endif
204 static struct mbuf *dlil_input_loop_head = NULL;
205 static struct mbuf *dlil_input_loop_tail = NULL;
206
207 static void dlil_input_thread(void);
208 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
209 struct ifnet *ifbyfamily(u_long family, short unit);
210 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
211 static void dlil_call_delayed_detach_thread(void);
212
213 static void dlil_read_begin(void);
214 static void dlil_read_end(void);
215 static int dlil_write_begin(void);
216 static void dlil_write_end(void);
217
218 static int ifp_use(struct ifnet *ifp, int handle_zero);
219 static int ifp_unuse(struct ifnet *ifp);
220 static void ifp_use_reached_zero(struct ifnet *ifp);
221
222 extern void bpfdetach(struct ifnet*);
223 extern void proto_input_run(void); // new run_netisr
224
225
226 int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
227
228 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
229
230 int dlil_expand_mcl;
231
232 extern u_int32_t inject_buckets;
233
234 static const u_int32_t dlil_writer_waiting = 0x80000000;
235
236 static __inline__ void*
237 _cast_non_const(const void * ptr) {
238 union {
239 const void* cval;
240 void* val;
241 } ret;
242
243 ret.cval = ptr;
244 return (ret.val);
245 }
246
247 /* Should these be inline? */
248 static void
249 dlil_read_begin(void)
250 {
251 unsigned long new_value;
252 unsigned long old_value;
253 struct uthread *uth = get_bsdthread_info(current_thread());
254
255 if (uth->dlil_incremented_read == dlil_writer_waiting)
256 panic("dlil_read_begin - thread is already a writer");
257
258 do {
259 again:
260 old_value = dlil_read_count;
261
262 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
263 {
264 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
265 goto again;
266 }
267
268 new_value = old_value + 1;
269 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
270
271 uth->dlil_incremented_read++;
272 }
273
274 static void
275 dlil_read_end(void)
276 {
277 struct uthread *uth = get_bsdthread_info(current_thread());
278
279 OSDecrementAtomic((UInt32*)&dlil_read_count);
280 uth->dlil_incremented_read--;
281 if (dlil_read_count == dlil_writer_waiting)
282 wakeup(_cast_non_const(&dlil_writer_waiting));
283 }
284
285 static int
286 dlil_write_begin(void)
287 {
288 struct uthread *uth = get_bsdthread_info(current_thread());
289
290 if (uth->dlil_incremented_read != 0) {
291 return EDEADLK;
292 }
293 lck_mtx_lock(dlil_mutex);
294 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
295 again:
296 if (dlil_read_count == dlil_writer_waiting) {
297 uth->dlil_incremented_read = dlil_writer_waiting;
298 return 0;
299 }
300 else {
301 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
302 goto again;
303 }
304 }
305
306 static void
307 dlil_write_end(void)
308 {
309 struct uthread *uth = get_bsdthread_info(current_thread());
310
311 if (uth->dlil_incremented_read != dlil_writer_waiting)
312 panic("dlil_write_end - thread is not a writer");
313 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
314 lck_mtx_unlock(dlil_mutex);
315 uth->dlil_incremented_read = 0;
316 wakeup(&dlil_read_count);
317 }
318
319 #define PROTO_HASH_SLOTS 0x5
320
321 /*
322 * Internal functions.
323 */
324
325 static int
326 proto_hash_value(u_long protocol_family)
327 {
328 switch(protocol_family) {
329 case PF_INET:
330 return 0;
331 case PF_INET6:
332 return 1;
333 case PF_APPLETALK:
334 return 2;
335 case PF_VLAN:
336 return 3;
337 default:
338 return 4;
339 }
340 }
341
342 static
343 struct if_family_str *find_family_module(u_long if_family)
344 {
345 struct if_family_str *mod = NULL;
346
347 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
348 if (mod->if_family == (if_family & 0xffff))
349 break;
350 }
351
352 return mod;
353 }
354
355 static
356 struct proto_family_str*
357 find_proto_module(u_long proto_family, u_long if_family)
358 {
359 struct proto_family_str *mod = NULL;
360
361 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
362 if ((mod->proto_family == (proto_family & 0xffff))
363 && (mod->if_family == (if_family & 0xffff)))
364 break;
365 }
366
367 return mod;
368 }
369
370 static struct if_proto*
371 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
372 {
373 struct if_proto *proto = NULL;
374 u_long i = proto_hash_value(protocol_family);
375 if (ifp->if_proto_hash) {
376 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
377 }
378
379 while(proto && proto->protocol_family != protocol_family) {
380 proto = SLIST_NEXT(proto, next_hash);
381 }
382
383 return proto;
384 }
385
386 static void
387 if_proto_ref(struct if_proto *proto)
388 {
389 OSAddAtomic(1, (UInt32*)&proto->refcount);
390 }
391
392 static void
393 if_proto_free(struct if_proto *proto)
394 {
395 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
396
397 if (oldval == 1) { /* This was the last reference */
398 FREE(proto, M_IFADDR);
399 }
400 }
401
402 __private_extern__ void
403 ifnet_lock_assert(
404 __unused struct ifnet *ifp,
405 __unused int what)
406 {
407 #if IFNET_RW_LOCK
408 /*
409 * Not implemented for rw locks.
410 *
411 * Function exists so when/if we use mutex we can
412 * enable this check.
413 */
414 #else
415 lck_mtx_assert(ifp->if_lock, what);
416 #endif
417 }
418
419 __private_extern__ void
420 ifnet_lock_shared(
421 struct ifnet *ifp)
422 {
423 #if IFNET_RW_LOCK
424 lck_rw_lock_shared(ifp->if_lock);
425 #else
426 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
427 lck_mtx_lock(ifp->if_lock);
428 #endif
429 }
430
431 __private_extern__ void
432 ifnet_lock_exclusive(
433 struct ifnet *ifp)
434 {
435 #if IFNET_RW_LOCK
436 lck_rw_lock_exclusive(ifp->if_lock);
437 #else
438 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
439 lck_mtx_lock(ifp->if_lock);
440 #endif
441 }
442
443 __private_extern__ void
444 ifnet_lock_done(
445 struct ifnet *ifp)
446 {
447 #if IFNET_RW_LOCK
448 lck_rw_done(ifp->if_lock);
449 #else
450 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
451 lck_mtx_unlock(ifp->if_lock);
452 #endif
453 }
454
455 __private_extern__ void
456 ifnet_head_lock_shared()
457 {
458 lck_rw_lock_shared(ifnet_head_mutex);
459 }
460
461 __private_extern__ void
462 ifnet_head_lock_exclusive()
463 {
464 lck_rw_lock_exclusive(ifnet_head_mutex);
465 }
466
467 __private_extern__ void
468 ifnet_head_done()
469 {
470 lck_rw_done(ifnet_head_mutex);
471 }
472
473 /*
474 * Public functions.
475 */
476 struct ifnet *ifbyfamily(u_long family, short unit)
477 {
478 struct ifnet *ifp;
479
480 ifnet_head_lock_shared();
481 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
482 if ((family == ifp->if_family) && (ifp->if_unit == unit))
483 break;
484 ifnet_head_done();
485
486 return ifp;
487 }
488
489 static int dlil_ifp_proto_count(struct ifnet * ifp)
490 {
491 int count = 0;
492 int i;
493
494 if (ifp->if_proto_hash != NULL) {
495 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
496 struct if_proto *proto;
497 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
498 count++;
499 }
500 }
501 }
502
503 return count;
504 }
505
506 __private_extern__ void
507 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
508 struct net_event_data *event_data, u_long event_data_len)
509 {
510 struct net_event_data ev_data;
511 struct kev_msg ev_msg;
512
513 /*
514 * a net event always start with a net_event_data structure
515 * but the caller can generate a simple net event or
516 * provide a longer event structure to post
517 */
518
519 ev_msg.vendor_code = KEV_VENDOR_APPLE;
520 ev_msg.kev_class = KEV_NETWORK_CLASS;
521 ev_msg.kev_subclass = event_subclass;
522 ev_msg.event_code = event_code;
523
524 if (event_data == 0) {
525 event_data = &ev_data;
526 event_data_len = sizeof(struct net_event_data);
527 }
528
529 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
530 event_data->if_family = ifp->if_family;
531 event_data->if_unit = (unsigned long) ifp->if_unit;
532
533 ev_msg.dv[0].data_length = event_data_len;
534 ev_msg.dv[0].data_ptr = event_data;
535 ev_msg.dv[1].data_length = 0;
536
537 dlil_event_internal(ifp, &ev_msg);
538 }
539
540 void dlil_init(void);
541 void
542 dlil_init(void)
543 {
544 lck_grp_attr_t *grp_attributes = 0;
545 lck_attr_t *lck_attributes = 0;
546 lck_grp_t *input_lock_grp = 0;
547
548 TAILQ_INIT(&dlil_ifnet_head);
549 TAILQ_INIT(&if_family_head);
550 TAILQ_INIT(&proto_family_head);
551 TAILQ_INIT(&ifnet_head);
552
553 /* Setup the lock groups we will use */
554 grp_attributes = lck_grp_attr_alloc_init();
555 lck_grp_attr_setdefault(grp_attributes);
556
557 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
558 #if IFNET_RW_LOCK
559 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
560 #else
561 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
562 #endif
563 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
564 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
565 lck_grp_attr_free(grp_attributes);
566 grp_attributes = 0;
567
568 /* Setup the lock attributes we will use */
569 lck_attributes = lck_attr_alloc_init();
570 lck_attr_setdefault(lck_attributes);
571
572 ifnet_lock_attr = lck_attr_alloc_init();
573 lck_attr_setdefault(ifnet_lock_attr);
574
575 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
576 input_lock_grp = 0;
577
578 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
579 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
580 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
581 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
582
583 lck_attr_free(lck_attributes);
584 lck_attributes = 0;
585
586 /*
587 * Start up the dlil input thread once everything is initialized
588 */
589 (void) kernel_thread(kernel_task, dlil_input_thread);
590 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
591 }
592
593 int
594 dlil_attach_filter(
595 struct ifnet *ifp,
596 const struct iff_filter *if_filter,
597 interface_filter_t *filter_ref)
598 {
599 int retval = 0;
600 struct ifnet_filter *filter;
601
602 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
603 if (filter == NULL)
604 return ENOMEM;
605 bzero(filter, sizeof(*filter));
606
607
608 filter->filt_ifp = ifp;
609 filter->filt_cookie = if_filter->iff_cookie;
610 filter->filt_name = if_filter->iff_name;
611 filter->filt_protocol = if_filter->iff_protocol;
612 filter->filt_input = if_filter->iff_input;
613 filter->filt_output = if_filter->iff_output;
614 filter->filt_event = if_filter->iff_event;
615 filter->filt_ioctl = if_filter->iff_ioctl;
616 filter->filt_detached = if_filter->iff_detached;
617
618 if ((retval = dlil_write_begin()) != 0) {
619 /* Failed to acquire the write lock */
620 FREE(filter, M_NKE);
621 return retval;
622 }
623 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
624 dlil_write_end();
625 *filter_ref = filter;
626 return retval;
627 }
628
629 static int
630 dlil_detach_filter_internal(interface_filter_t filter, int detached)
631 {
632 int retval = 0;
633
634 if (detached == 0) {
635 ifnet_t ifp = NULL;
636 interface_filter_t entry = NULL;
637
638 /* Take the write lock */
639 retval = dlil_write_begin();
640 if (retval != 0 && retval != EDEADLK)
641 return retval;
642
643 /*
644 * At this point either we have the write lock (retval == 0)
645 * or we couldn't get it (retval == EDEADLK) because someone
646 * else up the stack is holding the read lock. It is safe to
647 * read, either the read or write is held. Verify the filter
648 * parameter before proceeding.
649 */
650 ifnet_head_lock_shared();
651 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
652 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
653 if (entry == filter)
654 break;
655 }
656 if (entry == filter)
657 break;
658 }
659 ifnet_head_done();
660
661 if (entry != filter) {
662 /* filter parameter is not a valid filter ref */
663 if (retval == 0) {
664 dlil_write_end();
665 }
666 return EINVAL;
667 }
668
669 if (retval == EDEADLK) {
670 /* Perform a delayed detach */
671 filter->filt_detaching = 1;
672 dlil_detach_waiting = 1;
673 wakeup(&dlil_detach_waiting);
674 return 0;
675 }
676
677 /* Remove the filter from the list */
678 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
679 dlil_write_end();
680 }
681
682 /* Call the detached funciton if there is one */
683 if (filter->filt_detached)
684 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
685
686 /* Free the filter */
687 FREE(filter, M_NKE);
688
689 return retval;
690 }
691
692 void
693 dlil_detach_filter(interface_filter_t filter)
694 {
695 if (filter == NULL)
696 return;
697 dlil_detach_filter_internal(filter, 0);
698 }
699
700 static void
701 dlil_input_thread_continue(
702 __unused void* foo,
703 __unused wait_result_t wait)
704 {
705 while (1) {
706 struct mbuf *m, *m_loop;
707
708 lck_spin_lock(dlil_input_lock);
709 m = dlil_input_mbuf_head;
710 dlil_input_mbuf_head = NULL;
711 dlil_input_mbuf_tail = NULL;
712 m_loop = dlil_input_loop_head;
713 dlil_input_loop_head = NULL;
714 dlil_input_loop_tail = NULL;
715 lck_spin_unlock(dlil_input_lock);
716
717 /*
718 * NOTE warning %%% attention !!!!
719 * We should think about putting some thread starvation safeguards if
720 * we deal with long chains of packets.
721 */
722 while (m) {
723 struct mbuf *m0 = m->m_nextpkt;
724 void *header = m->m_pkthdr.header;
725
726 m->m_nextpkt = NULL;
727 m->m_pkthdr.header = NULL;
728 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
729 m = m0;
730 }
731 m = m_loop;
732 while (m) {
733 struct mbuf *m0 = m->m_nextpkt;
734 void *header = m->m_pkthdr.header;
735 struct ifnet *ifp = &loif[0];
736
737 m->m_nextpkt = NULL;
738 m->m_pkthdr.header = NULL;
739 (void) dlil_input_packet(ifp, m, header);
740 m = m0;
741 }
742
743 proto_input_run();
744
745 if (dlil_input_mbuf_head == NULL &&
746 dlil_input_loop_head == NULL && inject_buckets == 0) {
747 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
748 (void) thread_block(dlil_input_thread_continue);
749 /* NOTREACHED */
750 }
751 }
752 }
753
754 void dlil_input_thread(void)
755 {
756 register thread_t self = current_thread();
757
758 ml_thread_policy(self, MACHINE_GROUP,
759 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
760
761 dlil_initialized = 1;
762 dlil_input_thread_ptr = current_thread();
763 dlil_input_thread_continue(NULL, THREAD_RESTART);
764 }
765
766 int
767 dlil_input_with_stats(
768 struct ifnet *ifp,
769 struct mbuf *m_head,
770 struct mbuf *m_tail,
771 const struct ifnet_stat_increment_param *stats)
772 {
773 /* WARNING
774 * Because of loopbacked multicast we cannot stuff the ifp in
775 * the rcvif of the packet header: loopback has its own dlil
776 * input queue
777 */
778
779 lck_spin_lock(dlil_input_lock);
780 if (ifp->if_type != IFT_LOOP) {
781 if (dlil_input_mbuf_head == NULL)
782 dlil_input_mbuf_head = m_head;
783 else if (dlil_input_mbuf_tail != NULL)
784 dlil_input_mbuf_tail->m_nextpkt = m_head;
785 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
786 } else {
787 if (dlil_input_loop_head == NULL)
788 dlil_input_loop_head = m_head;
789 else if (dlil_input_loop_tail != NULL)
790 dlil_input_loop_tail->m_nextpkt = m_head;
791 dlil_input_loop_tail = m_tail ? m_tail : m_head;
792 }
793 if (stats) {
794 ifp->if_data.ifi_ipackets += stats->packets_in;
795 ifp->if_data.ifi_ibytes += stats->bytes_in;
796 ifp->if_data.ifi_ierrors += stats->errors_in;
797
798 ifp->if_data.ifi_opackets += stats->packets_out;
799 ifp->if_data.ifi_obytes += stats->bytes_out;
800 ifp->if_data.ifi_oerrors += stats->errors_out;
801
802 ifp->if_data.ifi_collisions += stats->collisions;
803 ifp->if_data.ifi_iqdrops += stats->dropped;
804 }
805 lck_spin_unlock(dlil_input_lock);
806
807 wakeup((caddr_t)&dlil_input_thread_wakeup);
808
809 return 0;
810 }
811
812 int
813 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
814 {
815 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
816 }
817
818 int
819 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
820 char *frame_header)
821 {
822 int retval;
823 struct if_proto *ifproto = 0;
824 protocol_family_t protocol_family;
825 struct ifnet_filter *filter;
826
827
828 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
829
830 /*
831 * Lock the interface while we run through
832 * the filters and the demux. This lock
833 * protects the filter list and the demux list.
834 */
835 dlil_read_begin();
836
837 /*
838 * Call family demux module. If the demux module finds a match
839 * for the frame it will fill-in the ifproto pointer.
840 */
841
842 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
843 if (retval != 0)
844 protocol_family = 0;
845 if (retval == EJUSTRETURN) {
846 dlil_read_end();
847 return 0;
848 }
849
850 /* DANGER!!! */
851 if (m->m_flags & (M_BCAST|M_MCAST))
852 ifp->if_imcasts++;
853
854 /*
855 * Run interface filters
856 */
857
858 /* Do not pass VLAN tagged packets to filters PR-3586856 */
859 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
860 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
861 int filter_result;
862 if (filter->filt_input && (filter->filt_protocol == 0 ||
863 filter->filt_protocol == protocol_family)) {
864 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
865
866 if (filter_result) {
867 dlil_read_end();
868 if (filter_result == EJUSTRETURN) {
869 filter_result = 0;
870 }
871 else {
872 m_freem(m);
873 }
874
875 return filter_result;
876 }
877 }
878 }
879 }
880
881 /* Demux is done, interface filters have been processed, unlock the mutex */
882 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
883 dlil_read_end();
884 if (retval != EJUSTRETURN) {
885 m_freem(m);
886 return retval;
887 }
888 else
889 return 0;
890 }
891
892 ifproto = find_attached_proto(ifp, protocol_family);
893
894 if (ifproto == 0) {
895 dlil_read_end();
896 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
897 m_freem(m);
898 return 0;
899 }
900
901 /*
902 * Hand the packet off to the protocol.
903 */
904
905 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
906 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
907 }
908
909 if (ifproto->proto_kpi == kProtoKPI_DLIL)
910 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
911 ifp, ifproto->protocol_family,
912 TRUE);
913 else
914 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
915
916 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
917 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
918 }
919
920 dlil_read_end();
921
922 if (retval == EJUSTRETURN)
923 retval = 0;
924 else
925 if (retval)
926 m_freem(m);
927
928 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
929 return retval;
930 }
931
932 static int
933 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
934 {
935 struct ifnet_filter *filter;
936
937 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
938 dlil_read_begin();
939
940 /* Pass the event to the interface filters */
941 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
942 if (filter->filt_event)
943 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
944 }
945
946 if (ifp->if_proto_hash) {
947 int i;
948
949 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
950 struct if_proto *proto;
951
952 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
953 /* Pass the event to the protocol */
954 if (proto->proto_kpi == kProtoKPI_DLIL) {
955 if (proto->kpi.dlil.dl_event)
956 proto->kpi.dlil.dl_event(ifp, event);
957 }
958 else {
959 if (proto->kpi.v1.event)
960 proto->kpi.v1.event(ifp, proto->protocol_family, event);
961 }
962 }
963 }
964 }
965
966 dlil_read_end();
967
968 /* Pass the event to the interface */
969 if (ifp->if_event)
970 ifp->if_event(ifp, event);
971
972 if (ifp_unuse(ifp))
973 ifp_use_reached_zero(ifp);
974 }
975
976 return kev_post_msg(event);
977 }
978
979 int
980 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
981 {
982 int result = 0;
983
984 struct kev_msg kev_msg;
985
986 kev_msg.vendor_code = event->vendor_code;
987 kev_msg.kev_class = event->kev_class;
988 kev_msg.kev_subclass = event->kev_subclass;
989 kev_msg.event_code = event->event_code;
990 kev_msg.dv[0].data_ptr = &event->event_data[0];
991 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
992 kev_msg.dv[1].data_length = 0;
993
994
995 result = dlil_event_internal(ifp, &kev_msg);
996
997
998 return result;
999 }
1000
1001 int
1002 dlil_output_list(
1003 struct ifnet* ifp,
1004 u_long proto_family,
1005 struct mbuf *packetlist,
1006 caddr_t route,
1007 const struct sockaddr *dest,
1008 int raw)
1009 {
1010 char *frame_type = 0;
1011 char *dst_linkaddr = 0;
1012 int error, retval = 0;
1013 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1014 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1015 struct ifnet_filter *filter;
1016 struct if_proto *proto = 0;
1017 struct mbuf *m;
1018
1019 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1020 #if BRIDGE
1021 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1022 #else
1023 if ((raw != 0) || proto_family != PF_INET) {
1024 #endif
1025 while (packetlist) {
1026 m = packetlist;
1027 packetlist = packetlist->m_nextpkt;
1028 m->m_nextpkt = NULL;
1029 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1030 if (error) {
1031 if (packetlist)
1032 m_freem_list(packetlist);
1033 return (error);
1034 }
1035 }
1036 return (0);
1037 }
1038
1039 dlil_read_begin();
1040
1041 frame_type = frame_type_buffer;
1042 dst_linkaddr = dst_linkaddr_buffer;
1043 m = packetlist;
1044 packetlist = packetlist->m_nextpkt;
1045 m->m_nextpkt = NULL;
1046
1047 proto = find_attached_proto(ifp, proto_family);
1048 if (proto == NULL) {
1049 retval = ENXIO;
1050 goto cleanup;
1051 }
1052
1053 retval = 0;
1054 if (proto->proto_kpi == kProtoKPI_DLIL) {
1055 if (proto->kpi.dlil.dl_pre_output)
1056 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1057 }
1058 else {
1059 if (proto->kpi.v1.pre_output)
1060 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1061 }
1062
1063 if (retval) {
1064 if (retval != EJUSTRETURN) {
1065 m_freem(m);
1066 }
1067 goto cleanup;
1068 }
1069
1070 do {
1071
1072
1073 if (ifp->if_framer) {
1074 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1075 if (retval) {
1076 if (retval != EJUSTRETURN) {
1077 m_freem(m);
1078 }
1079 goto cleanup;
1080 }
1081 }
1082
1083 /*
1084 * Let interface filters (if any) do their thing ...
1085 */
1086 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1087 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1088 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1089 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1090 filter->filt_output) {
1091 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1092 if (retval) {
1093 if (retval == EJUSTRETURN)
1094 continue;
1095 else {
1096 m_freem(m);
1097 }
1098 goto cleanup;
1099 }
1100 }
1101 }
1102 }
1103 /*
1104 * Finally, call the driver.
1105 */
1106
1107 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1108 retval = ifp->if_output(ifp, m);
1109 if (retval) {
1110 printf("dlil_output_list: output error retval = %x\n", retval);
1111 goto cleanup;
1112 }
1113 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1114
1115 m = packetlist;
1116 if (m) {
1117 packetlist = packetlist->m_nextpkt;
1118 m->m_nextpkt = NULL;
1119 }
1120 } while (m);
1121
1122
1123 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1124
1125 cleanup:
1126 dlil_read_end();
1127 if (packetlist) /* if any packet left, clean up */
1128 m_freem_list(packetlist);
1129 if (retval == EJUSTRETURN)
1130 retval = 0;
1131 return retval;
1132 }
1133
1134 /*
1135 * dlil_output
1136 *
1137 * Caller should have a lock on the protocol domain if the protocol
1138 * doesn't support finer grained locking. In most cases, the lock
1139 * will be held from the socket layer and won't be released until
1140 * we return back to the socket layer.
1141 *
1142 * This does mean that we must take a protocol lock before we take
1143 * an interface lock if we're going to take both. This makes sense
1144 * because a protocol is likely to interact with an ifp while it
1145 * is under the protocol lock.
1146 */
1147 int
1148 dlil_output(
1149 struct ifnet* ifp,
1150 u_long proto_family,
1151 struct mbuf *m,
1152 caddr_t route,
1153 const struct sockaddr *dest,
1154 int raw)
1155 {
1156 char *frame_type = 0;
1157 char *dst_linkaddr = 0;
1158 int retval = 0;
1159 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1160 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1161 struct ifnet_filter *filter;
1162
1163 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1164
1165 dlil_read_begin();
1166
1167 frame_type = frame_type_buffer;
1168 dst_linkaddr = dst_linkaddr_buffer;
1169
1170 if (raw == 0) {
1171 struct if_proto *proto = 0;
1172
1173 proto = find_attached_proto(ifp, proto_family);
1174 if (proto == NULL) {
1175 m_freem(m);
1176 retval = ENXIO;
1177 goto cleanup;
1178 }
1179
1180 retval = 0;
1181 if (proto->proto_kpi == kProtoKPI_DLIL) {
1182 if (proto->kpi.dlil.dl_pre_output)
1183 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1184 }
1185 else {
1186 if (proto->kpi.v1.pre_output)
1187 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1188 }
1189
1190 if (retval) {
1191 if (retval != EJUSTRETURN) {
1192 m_freem(m);
1193 }
1194 goto cleanup;
1195 }
1196 }
1197
1198 /*
1199 * Call framing module
1200 */
1201 if ((raw == 0) && (ifp->if_framer)) {
1202 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1203 if (retval) {
1204 if (retval != EJUSTRETURN) {
1205 m_freem(m);
1206 }
1207 goto cleanup;
1208 }
1209 }
1210
1211 #if BRIDGE
1212 /* !!!LOCKING!!!
1213 *
1214 * Need to consider how to handle this.
1215 */
1216 broken-locking
1217 if (do_bridge) {
1218 struct mbuf *m0 = m;
1219 struct ether_header *eh = mtod(m, struct ether_header *);
1220
1221 if (m->m_pkthdr.rcvif)
1222 m->m_pkthdr.rcvif = NULL;
1223 ifp = bridge_dst_lookup(eh);
1224 bdg_forward(&m0, ifp);
1225 if (m0)
1226 m_freem(m0);
1227
1228 return 0;
1229 }
1230 #endif
1231
1232
1233 /*
1234 * Let interface filters (if any) do their thing ...
1235 */
1236
1237 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1238 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1239 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1240 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1241 filter->filt_output) {
1242 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1243 if (retval) {
1244 if (retval != EJUSTRETURN)
1245 m_freem(m);
1246 goto cleanup;
1247 }
1248 }
1249 }
1250 }
1251
1252 /*
1253 * Finally, call the driver.
1254 */
1255
1256 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1257 retval = ifp->if_output(ifp, m);
1258 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1259
1260 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1261
1262 cleanup:
1263 dlil_read_end();
1264 if (retval == EJUSTRETURN)
1265 retval = 0;
1266 return retval;
1267 }
1268
1269 int
1270 dlil_ioctl(u_long proto_fam,
1271 struct ifnet *ifp,
1272 u_long ioctl_code,
1273 caddr_t ioctl_arg)
1274 {
1275 struct ifnet_filter *filter;
1276 int retval = EOPNOTSUPP;
1277 int result = 0;
1278 struct if_family_str *if_family;
1279 int holding_read = 0;
1280
1281 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1282 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1283 if (result != 0)
1284 return EOPNOTSUPP;
1285
1286 dlil_read_begin();
1287 holding_read = 1;
1288
1289 /* Run the interface filters first.
1290 * We want to run all filters before calling the protocol,
1291 * interface family, or interface.
1292 */
1293 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1294 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1295 filter->filt_ioctl != NULL) {
1296 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1297 /* Only update retval if no one has handled the ioctl */
1298 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1299 if (result == ENOTSUP)
1300 result = EOPNOTSUPP;
1301 retval = result;
1302 if (retval && retval != EOPNOTSUPP) {
1303 goto cleanup;
1304 }
1305 }
1306 }
1307 }
1308
1309 /* Allow the protocol to handle the ioctl */
1310 if (proto_fam) {
1311 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1312
1313 if (proto != 0) {
1314 result = EOPNOTSUPP;
1315 if (proto->proto_kpi == kProtoKPI_DLIL) {
1316 if (proto->kpi.dlil.dl_ioctl)
1317 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1318 }
1319 else {
1320 if (proto->kpi.v1.ioctl)
1321 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1322 }
1323
1324 /* Only update retval if no one has handled the ioctl */
1325 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1326 if (result == ENOTSUP)
1327 result = EOPNOTSUPP;
1328 retval = result;
1329 if (retval && retval != EOPNOTSUPP) {
1330 goto cleanup;
1331 }
1332 }
1333 }
1334 }
1335
1336 /*
1337 * Since we have incremented the use count on the ifp, we are guaranteed
1338 * that the ifp will not go away (the function pointers may not be changed).
1339 * We release the dlil read lock so the interface ioctl may trigger a
1340 * protocol attach. This happens with vlan and may occur with other virtual
1341 * interfaces.
1342 */
1343 dlil_read_end();
1344 holding_read = 0;
1345
1346 /* retval is either 0 or EOPNOTSUPP */
1347
1348 /*
1349 * Let the family handle this ioctl.
1350 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1351 * If it returns zero, the ioctl was handled, so set retval to zero.
1352 */
1353 if_family = find_family_module(ifp->if_family);
1354 if ((if_family) && (if_family->ifmod_ioctl)) {
1355 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1356
1357 /* Only update retval if no one has handled the ioctl */
1358 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1359 if (result == ENOTSUP)
1360 result = EOPNOTSUPP;
1361 retval = result;
1362 if (retval && retval != EOPNOTSUPP) {
1363 goto cleanup;
1364 }
1365 }
1366 }
1367
1368 /*
1369 * Let the interface handle this ioctl.
1370 * If it returns EOPNOTSUPP, ignore that, we may have
1371 * already handled this in the protocol or family.
1372 */
1373 if (ifp->if_ioctl)
1374 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1375
1376 /* Only update retval if no one has handled the ioctl */
1377 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1378 if (result == ENOTSUP)
1379 result = EOPNOTSUPP;
1380 retval = result;
1381 if (retval && retval != EOPNOTSUPP) {
1382 goto cleanup;
1383 }
1384 }
1385
1386 cleanup:
1387 if (holding_read)
1388 dlil_read_end();
1389 if (ifp_unuse(ifp))
1390 ifp_use_reached_zero(ifp);
1391
1392 if (retval == EJUSTRETURN)
1393 retval = 0;
1394 return retval;
1395 }
1396
1397 __private_extern__ errno_t
1398 dlil_set_bpf_tap(
1399 ifnet_t ifp,
1400 bpf_tap_mode mode,
1401 bpf_packet_func callback)
1402 {
1403 errno_t error = 0;
1404
1405 dlil_read_begin();
1406 if (ifp->if_set_bpf_tap)
1407 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1408 dlil_read_end();
1409
1410 return error;
1411 }
1412
1413 __private_extern__ errno_t
1414 dlil_resolve_multi(
1415 struct ifnet *ifp,
1416 const struct sockaddr *proto_addr,
1417 struct sockaddr *ll_addr,
1418 size_t ll_len)
1419 {
1420 errno_t result = EOPNOTSUPP;
1421 struct if_proto *proto;
1422 const struct sockaddr *verify;
1423
1424 dlil_read_begin();
1425
1426 bzero(ll_addr, ll_len);
1427
1428 /* Call the protocol first */
1429 proto = find_attached_proto(ifp, proto_addr->sa_family);
1430 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1431 proto->kpi.v1.resolve_multi != NULL) {
1432 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1433 (struct sockaddr_dl*)ll_addr, ll_len);
1434 }
1435
1436 /* Let the interface verify the multicast address */
1437 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1438 if (result == 0)
1439 verify = ll_addr;
1440 else
1441 verify = proto_addr;
1442 result = ifp->if_check_multi(ifp, verify);
1443 }
1444
1445 dlil_read_end();
1446
1447 return result;
1448 }
1449
1450 __private_extern__ errno_t
1451 dlil_send_arp_internal(
1452 ifnet_t ifp,
1453 u_short arpop,
1454 const struct sockaddr_dl* sender_hw,
1455 const struct sockaddr* sender_proto,
1456 const struct sockaddr_dl* target_hw,
1457 const struct sockaddr* target_proto)
1458 {
1459 struct if_proto *proto;
1460 errno_t result = 0;
1461
1462 dlil_read_begin();
1463
1464 proto = find_attached_proto(ifp, target_proto->sa_family);
1465 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1466 proto->kpi.v1.send_arp == NULL) {
1467 result = ENOTSUP;
1468 }
1469 else {
1470 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1471 target_hw, target_proto);
1472 }
1473
1474 dlil_read_end();
1475
1476 return result;
1477 }
1478
1479 __private_extern__ errno_t
1480 dlil_send_arp(
1481 ifnet_t ifp,
1482 u_short arpop,
1483 const struct sockaddr_dl* sender_hw,
1484 const struct sockaddr* sender_proto,
1485 const struct sockaddr_dl* target_hw,
1486 const struct sockaddr* target_proto)
1487 {
1488 errno_t result = 0;
1489
1490 if (target_proto == NULL || (sender_proto &&
1491 sender_proto->sa_family != target_proto->sa_family))
1492 return EINVAL;
1493
1494 /*
1495 * If this is an ARP request and the target IP is IPv4LL,
1496 * send the request on all interfaces.
1497 */
1498 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1499 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1500 arpop == ARPOP_REQUEST) {
1501 ifnet_t *ifp_list;
1502 u_int32_t count;
1503 u_int32_t ifp_on;
1504
1505 result = ENOTSUP;
1506
1507 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1508 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1509 errno_t new_result;
1510 ifaddr_t source_hw = NULL;
1511 ifaddr_t source_ip = NULL;
1512 struct sockaddr_in source_ip_copy;
1513
1514 /*
1515 * Only arp on interfaces marked for IPv4LL ARPing. This may
1516 * mean that we don't ARP on the interface the subnet route
1517 * points to.
1518 */
1519 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1520 continue;
1521 }
1522
1523 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1524
1525 /* Find the source IP address */
1526 ifnet_lock_shared(ifp_list[ifp_on]);
1527 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1528 ifa_link) {
1529 if (source_ip->ifa_addr &&
1530 source_ip->ifa_addr->sa_family == AF_INET) {
1531 break;
1532 }
1533 }
1534
1535 /* No IP Source, don't arp */
1536 if (source_ip == NULL) {
1537 ifnet_lock_done(ifp_list[ifp_on]);
1538 continue;
1539 }
1540
1541 /* Copy the source IP address */
1542 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1543
1544 ifnet_lock_done(ifp_list[ifp_on]);
1545
1546 /* Send the ARP */
1547 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1548 (struct sockaddr_dl*)source_hw->ifa_addr,
1549 (struct sockaddr*)&source_ip_copy, NULL,
1550 target_proto);
1551
1552 if (result == ENOTSUP) {
1553 result = new_result;
1554 }
1555 }
1556 }
1557
1558 ifnet_list_free(ifp_list);
1559 }
1560 else {
1561 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1562 target_hw, target_proto);
1563 }
1564
1565 return result;
1566 }
1567
1568 static int
1569 ifp_use(
1570 struct ifnet *ifp,
1571 int handle_zero)
1572 {
1573 int old_value;
1574 int retval = 0;
1575
1576 do {
1577 old_value = ifp->if_usecnt;
1578 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1579 retval = ENXIO; // ifp is invalid
1580 break;
1581 }
1582 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1583
1584 return retval;
1585 }
1586
1587 /* ifp_unuse is broken into two pieces.
1588 *
1589 * ifp_use and ifp_unuse must be called between when the caller calls
1590 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1591 * operations after dlil_write_end has been called. For this reason,
1592 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1593 * returns a non-zero value. The caller must call ifp_use_reached_zero
1594 * after the caller has called dlil_write_end.
1595 */
1596 static void
1597 ifp_use_reached_zero(
1598 struct ifnet *ifp)
1599 {
1600 struct if_family_str *if_family;
1601 ifnet_detached_func free_func;
1602
1603 dlil_read_begin();
1604
1605 if (ifp->if_usecnt != 0)
1606 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1607
1608 /* Let BPF know we're detaching */
1609 bpfdetach(ifp);
1610
1611 ifnet_head_lock_exclusive();
1612 ifnet_lock_exclusive(ifp);
1613
1614 /* Remove ourselves from the list */
1615 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1616 ifnet_addrs[ifp->if_index - 1] = 0;
1617
1618 /* ifp should be removed from the interface list */
1619 while (ifp->if_multiaddrs.lh_first) {
1620 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1621
1622 /*
1623 * When the interface is gone, we will no longer
1624 * be listening on these multicasts. Various bits
1625 * of the stack may be referencing these multicasts,
1626 * release only our reference.
1627 */
1628 LIST_REMOVE(ifma, ifma_link);
1629 ifma->ifma_ifp = NULL;
1630 ifma_release(ifma);
1631 }
1632 ifnet_head_done();
1633
1634 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1635 ifnet_lock_done(ifp);
1636
1637 if_family = find_family_module(ifp->if_family);
1638 if (if_family && if_family->del_if)
1639 if_family->del_if(ifp);
1640 #if 0
1641 if (--if_family->if_usecnt == 0) {
1642 if (if_family->shutdown)
1643 (*if_family->shutdown)();
1644
1645 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1646 FREE(if_family, M_IFADDR);
1647 }
1648 #endif
1649
1650 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1651 free_func = ifp->if_free;
1652 dlil_read_end();
1653
1654 if (free_func)
1655 free_func(ifp);
1656 }
1657
1658 static int
1659 ifp_unuse(
1660 struct ifnet *ifp)
1661 {
1662 int oldval;
1663 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1664 if (oldval == 0)
1665 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1666
1667 if (oldval > 1)
1668 return 0;
1669
1670 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1671 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1672
1673 return 1; /* caller must call ifp_use_reached_zero */
1674 }
1675
1676 void
1677 ifp_reference(
1678 struct ifnet *ifp)
1679 {
1680 int oldval;
1681 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1682 }
1683
1684 void
1685 ifp_release(
1686 struct ifnet *ifp)
1687 {
1688 int oldval;
1689 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1690 if (oldval == 0)
1691 panic("dlil_if_reference - refcount decremented past zero!");
1692 }
1693
1694 extern lck_mtx_t *domain_proto_mtx;
1695
1696 static int
1697 dlil_attach_protocol_internal(
1698 struct if_proto *proto,
1699 const struct ddesc_head_str *demux,
1700 const struct ifnet_demux_desc *demux_list,
1701 u_int32_t demux_count)
1702 {
1703 struct ddesc_head_str temp_head;
1704 struct kev_dl_proto_data ev_pr_data;
1705 struct ifnet *ifp = proto->ifp;
1706 int retval = 0;
1707 u_long hash_value = proto_hash_value(proto->protocol_family);
1708 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1709 void* free_me = NULL;
1710
1711 /* setup some of the common values */
1712
1713 {
1714 lck_mtx_lock(domain_proto_mtx);
1715 struct domain *dp = domains;
1716 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1717 dp = dp->dom_next;
1718 proto->dl_domain = dp;
1719 lck_mtx_unlock(domain_proto_mtx);
1720 }
1721
1722 /*
1723 * Convert the demux descriptors to a type the interface
1724 * will understand. Checking e_flags should be safe, this
1725 * flag won't change.
1726 */
1727 if (if_using_kpi && demux) {
1728 /* Convert the demux linked list to a demux_list */
1729 struct dlil_demux_desc *demux_entry;
1730 struct ifnet_demux_desc *temp_list = NULL;
1731 u_int32_t i = 0;
1732
1733 TAILQ_FOREACH(demux_entry, demux, next) {
1734 i++;
1735 }
1736
1737 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1738 free_me = temp_list;
1739
1740 if (temp_list == NULL)
1741 return ENOMEM;
1742
1743 i = 0;
1744 TAILQ_FOREACH(demux_entry, demux, next) {
1745 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1746 if (demux_entry->type == 1 ||
1747 demux_entry->type == 2 ||
1748 demux_entry->type == 3) {
1749 FREE(free_me, M_TEMP);
1750 return ENOTSUP;
1751 }
1752
1753 temp_list[i].type = demux_entry->type;
1754 temp_list[i].data = demux_entry->native_type;
1755 temp_list[i].datalen = demux_entry->variants.native_type_length;
1756 i++;
1757 }
1758 demux_count = i;
1759 demux_list = temp_list;
1760 }
1761 else if (!if_using_kpi && demux_list != NULL) {
1762 struct dlil_demux_desc *demux_entry;
1763 u_int32_t i = 0;
1764
1765 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1766 free_me = demux_entry;
1767 if (demux_entry == NULL)
1768 return ENOMEM;
1769
1770 TAILQ_INIT(&temp_head);
1771
1772 for (i = 0; i < demux_count; i++) {
1773 demux_entry[i].type = demux_list[i].type;
1774 demux_entry[i].native_type = demux_list[i].data;
1775 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1776 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1777 }
1778 demux = &temp_head;
1779 }
1780
1781 /*
1782 * Take the write lock to protect readers and exclude other writers.
1783 */
1784 dlil_write_begin();
1785
1786 /* Check that the interface isn't currently detaching */
1787 ifnet_lock_shared(ifp);
1788 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1789 ifnet_lock_done(ifp);
1790 dlil_write_end();
1791 if (free_me)
1792 FREE(free_me, M_TEMP);
1793 return ENXIO;
1794 }
1795 ifnet_lock_done(ifp);
1796
1797 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1798 dlil_write_end();
1799 if (free_me)
1800 FREE(free_me, M_TEMP);
1801 return EEXIST;
1802 }
1803
1804 /*
1805 * Call family module add_proto routine so it can refine the
1806 * demux descriptors as it wishes.
1807 */
1808 if (if_using_kpi)
1809 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1810 else {
1811 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1812 _cast_non_const(demux));
1813 }
1814 if (retval) {
1815 dlil_write_end();
1816 if (free_me)
1817 FREE(free_me, M_TEMP);
1818 return retval;
1819 }
1820
1821 /*
1822 * We can't fail from this point on.
1823 * Increment the number of uses (protocol attachments + interface attached).
1824 */
1825 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1826
1827 /*
1828 * Insert the protocol in the hash
1829 */
1830 {
1831 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1832 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1833 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1834 if (prev_proto)
1835 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1836 else
1837 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1838 }
1839
1840 /*
1841 * Add to if_proto list for this interface
1842 */
1843 if_proto_ref(proto);
1844 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1845 ifp->offercnt++;
1846 dlil_write_end();
1847
1848 /* the reserved field carries the number of protocol still attached (subject to change) */
1849 ev_pr_data.proto_family = proto->protocol_family;
1850 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1851 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1852 (struct net_event_data *)&ev_pr_data,
1853 sizeof(struct kev_dl_proto_data));
1854
1855 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1856 ifp->if_name, ifp->if_unit, retval);
1857 if (free_me)
1858 FREE(free_me, M_TEMP);
1859 return retval;
1860 }
1861
1862 __private_extern__ int
1863 dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1864 const struct ifnet_attach_proto_param *proto_details)
1865 {
1866 int retval = 0;
1867 struct if_proto *ifproto = NULL;
1868
1869 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1870 if (ifproto == 0) {
1871 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1872 retval = ENOMEM;
1873 goto end;
1874 }
1875 bzero(ifproto, sizeof(*ifproto));
1876
1877 ifproto->ifp = ifp;
1878 ifproto->protocol_family = protocol;
1879 ifproto->proto_kpi = kProtoKPI_v1;
1880 ifproto->kpi.v1.input = proto_details->input;
1881 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1882 ifproto->kpi.v1.event = proto_details->event;
1883 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1884 ifproto->kpi.v1.detached = proto_details->detached;
1885 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1886 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1887
1888 retval = dlil_attach_protocol_internal(ifproto, NULL,
1889 proto_details->demux_list, proto_details->demux_count);
1890
1891 end:
1892 if (retval && ifproto)
1893 FREE(ifproto, M_IFADDR);
1894 return retval;
1895 }
1896
1897 int
1898 dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1899 {
1900 struct ifnet *ifp = NULL;
1901 struct if_proto *ifproto = NULL;
1902 int retval = 0;
1903
1904 /*
1905 * Do everything we can before taking the write lock
1906 */
1907
1908 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1909 return EINVAL;
1910
1911 /*
1912 * Allocate and init a new if_proto structure
1913 */
1914 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1915 if (!ifproto) {
1916 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1917 retval = ENOMEM;
1918 goto end;
1919 }
1920
1921
1922 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1923 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1924 if (!ifp) {
1925 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1926 proto->interface_family, proto->unit_number);
1927 retval = ENXIO;
1928 goto end;
1929 }
1930
1931 bzero(ifproto, sizeof(struct if_proto));
1932
1933 ifproto->ifp = ifp;
1934 ifproto->protocol_family = proto->protocol_family;
1935 ifproto->proto_kpi = kProtoKPI_DLIL;
1936 ifproto->kpi.dlil.dl_input = proto->input;
1937 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1938 ifproto->kpi.dlil.dl_event = proto->event;
1939 ifproto->kpi.dlil.dl_offer = proto->offer;
1940 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1941 ifproto->kpi.dlil.dl_detached = proto->detached;
1942
1943 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1944
1945 end:
1946 if (retval && ifproto)
1947 FREE(ifproto, M_IFADDR);
1948 return retval;
1949 }
1950
1951 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1952
1953 static int
1954 dlil_detach_protocol_internal(
1955 struct if_proto *proto)
1956 {
1957 struct ifnet *ifp = proto->ifp;
1958 u_long proto_family = proto->protocol_family;
1959 struct kev_dl_proto_data ev_pr_data;
1960
1961 if (proto->proto_kpi == kProtoKPI_DLIL) {
1962 if (proto->kpi.dlil.dl_detached)
1963 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1964 }
1965 else {
1966 if (proto->kpi.v1.detached)
1967 proto->kpi.v1.detached(ifp, proto->protocol_family);
1968 }
1969 if_proto_free(proto);
1970
1971 /*
1972 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1973 */
1974
1975 if_rtproto_del(ifp, proto_family);
1976
1977 /* the reserved field carries the number of protocol still attached (subject to change) */
1978 ev_pr_data.proto_family = proto_family;
1979 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1980 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1981 (struct net_event_data *)&ev_pr_data,
1982 sizeof(struct kev_dl_proto_data));
1983 return 0;
1984 }
1985
1986 int
1987 dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1988 {
1989 struct if_proto *proto = NULL;
1990 int retval = 0;
1991 int use_reached_zero = 0;
1992
1993
1994 if ((retval = dlil_write_begin()) != 0) {
1995 if (retval == EDEADLK) {
1996 retval = 0;
1997 dlil_read_begin();
1998 proto = find_attached_proto(ifp, proto_family);
1999 if (proto == 0) {
2000 retval = ENXIO;
2001 }
2002 else {
2003 proto->detaching = 1;
2004 dlil_detach_waiting = 1;
2005 wakeup(&dlil_detach_waiting);
2006 }
2007 dlil_read_end();
2008 }
2009 goto end;
2010 }
2011
2012 proto = find_attached_proto(ifp, proto_family);
2013
2014 if (proto == NULL) {
2015 retval = ENXIO;
2016 dlil_write_end();
2017 goto end;
2018 }
2019
2020 /*
2021 * Call family module del_proto
2022 */
2023
2024 if (ifp->if_del_proto)
2025 ifp->if_del_proto(ifp, proto->protocol_family);
2026
2027 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2028 ifp->offercnt--;
2029
2030 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2031
2032 /*
2033 * We can do the rest of the work outside of the write lock.
2034 */
2035 use_reached_zero = ifp_unuse(ifp);
2036 dlil_write_end();
2037
2038 dlil_detach_protocol_internal(proto);
2039
2040 /*
2041 * Only handle the case where the interface will go away after
2042 * we've sent the message. This way post message can send the
2043 * message to the interface safely.
2044 */
2045
2046 if (use_reached_zero)
2047 ifp_use_reached_zero(ifp);
2048
2049 end:
2050 return retval;
2051 }
2052
2053 /*
2054 * dlil_delayed_detach_thread is responsible for detaching
2055 * protocols, protocol filters, and interface filters after
2056 * an attempt was made to detach one of those items while
2057 * it was not safe to do so (i.e. called dlil_read_begin).
2058 *
2059 * This function will take the dlil write lock and walk
2060 * through each of the interfaces looking for items with
2061 * the detaching flag set. When an item is found, it is
2062 * detached from the interface and placed on a local list.
2063 * After all of the items have been collected, we drop the
2064 * write lock and performed the post detach. This is done
2065 * so we only have to take the write lock once.
2066 *
2067 * When detaching a protocol filter, if we find that we
2068 * have detached the very last protocol and we need to call
2069 * ifp_use_reached_zero, we have to break out of our work
2070 * to drop the write lock so we can call ifp_use_reached_zero.
2071 */
2072
2073 static void
2074 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2075 {
2076 thread_t self = current_thread();
2077 int asserted = 0;
2078
2079 ml_thread_policy(self, MACHINE_GROUP,
2080 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2081
2082
2083 while (1) {
2084 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2085 struct ifnet *ifp;
2086 struct proto_hash_entry detached_protos;
2087 struct ifnet_filter_head detached_filters;
2088 struct if_proto *proto;
2089 struct if_proto *next_proto;
2090 struct ifnet_filter *filt;
2091 struct ifnet_filter *next_filt;
2092 int reached_zero;
2093
2094 reached_zero = 0;
2095
2096 /* Clear the detach waiting flag */
2097 dlil_detach_waiting = 0;
2098 TAILQ_INIT(&detached_filters);
2099 SLIST_INIT(&detached_protos);
2100
2101 ifnet_head_lock_shared();
2102 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2103 int i;
2104
2105 // Look for protocols and protocol filters
2106 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2107 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2108 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2109
2110 // Detach this protocol
2111 if (proto->detaching) {
2112 if (ifp->if_del_proto)
2113 ifp->if_del_proto(ifp, proto->protocol_family);
2114 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2115 ifp->offercnt--;
2116 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2117 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2118 reached_zero = ifp_unuse(ifp);
2119 if (reached_zero) {
2120 break;
2121 }
2122 }
2123 else {
2124 // Update prev_nextptr to point to our next ptr
2125 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2126 }
2127 }
2128 }
2129
2130 // look for interface filters that need to be detached
2131 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2132 next_filt = TAILQ_NEXT(filt, filt_next);
2133 if (filt->filt_detaching != 0) {
2134 // take this interface filter off the interface filter list
2135 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2136
2137 // put this interface filter on the detached filters list
2138 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2139 }
2140 }
2141
2142 if (ifp->if_delayed_detach) {
2143 ifp->if_delayed_detach = 0;
2144 reached_zero = ifp_unuse(ifp);
2145 }
2146
2147 if (reached_zero)
2148 break;
2149 }
2150 ifnet_head_done();
2151 dlil_write_end();
2152
2153 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2154 next_filt = TAILQ_NEXT(filt, filt_next);
2155 /*
2156 * dlil_detach_filter_internal won't remove an item from
2157 * the list if it is already detached (second parameter).
2158 * The item will be freed though.
2159 */
2160 dlil_detach_filter_internal(filt, 1);
2161 }
2162
2163 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2164 next_proto = SLIST_NEXT(proto, next_hash);
2165 dlil_detach_protocol_internal(proto);
2166 }
2167
2168 if (reached_zero) {
2169 ifp_use_reached_zero(ifp);
2170 dlil_detach_waiting = 1; // we may have missed something
2171 }
2172 }
2173
2174 if (!asserted && dlil_detach_waiting == 0) {
2175 asserted = 1;
2176 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2177 }
2178
2179 if (dlil_detach_waiting == 0) {
2180 asserted = 0;
2181 thread_block(dlil_delayed_detach_thread);
2182 }
2183 }
2184 }
2185
2186 static void
2187 dlil_call_delayed_detach_thread(void) {
2188 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2189 }
2190
2191 extern int if_next_index(void);
2192
2193 __private_extern__ int
2194 dlil_if_attach_with_address(
2195 struct ifnet *ifp,
2196 const struct sockaddr_dl *ll_addr)
2197 {
2198 u_long interface_family = ifp->if_family;
2199 struct if_family_str *if_family = NULL;
2200 int stat;
2201 struct ifnet *tmp_if;
2202 struct proto_hash_entry *new_proto_list = NULL;
2203 int locked = 0;
2204
2205
2206 ifnet_head_lock_shared();
2207
2208 /* Verify we aren't already on the list */
2209 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2210 if (tmp_if == ifp) {
2211 ifnet_head_done();
2212 return EEXIST;
2213 }
2214 }
2215
2216 ifnet_head_done();
2217
2218 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2219 #if IFNET_RW_LOCK
2220 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2221 #else
2222 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2223 #endif
2224
2225 if (ifp->if_lock == 0) {
2226 return ENOMEM;
2227 }
2228
2229 // Only use family if this is not a KPI interface
2230 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2231 if_family = find_family_module(interface_family);
2232 }
2233
2234 /*
2235 * Allow interfaces withouth protocol families to attach
2236 * only if they have the necessary fields filled out.
2237 */
2238
2239 if ((if_family == 0) &&
2240 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2241 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2242 interface_family);
2243 return ENODEV;
2244 }
2245
2246 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2247 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2248 M_NKE, M_WAITOK);
2249
2250 if (new_proto_list == 0) {
2251 return ENOBUFS;
2252 }
2253 }
2254
2255 dlil_write_begin();
2256 locked = 1;
2257
2258 /*
2259 * Call the family module to fill in the appropriate fields in the
2260 * ifnet structure.
2261 */
2262
2263 if (if_family) {
2264 stat = if_family->add_if(ifp);
2265 if (stat) {
2266 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2267 dlil_write_end();
2268 return stat;
2269 }
2270 ifp->if_add_proto_u.original = if_family->add_proto;
2271 ifp->if_del_proto = if_family->del_proto;
2272 if_family->refcnt++;
2273 }
2274
2275 ifp->offercnt = 0;
2276 TAILQ_INIT(&ifp->if_flt_head);
2277
2278
2279 if (new_proto_list) {
2280 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2281 ifp->if_proto_hash = new_proto_list;
2282 new_proto_list = 0;
2283 }
2284
2285 /* old_if_attach */
2286 {
2287 struct ifaddr *ifa = 0;
2288
2289 if (ifp->if_snd.ifq_maxlen == 0)
2290 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2291 TAILQ_INIT(&ifp->if_prefixhead);
2292 LIST_INIT(&ifp->if_multiaddrs);
2293 ifnet_touch_lastchange(ifp);
2294
2295 /* usecount to track attachment to the ifnet list */
2296 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2297
2298 /* Lock the list of interfaces */
2299 ifnet_head_lock_exclusive();
2300 ifnet_lock_exclusive(ifp);
2301
2302 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2303 char workbuf[64];
2304 int namelen, masklen, socksize, ifasize;
2305
2306 ifp->if_index = if_next_index();
2307
2308 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2309 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2310 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2311 socksize = masklen + ifp->if_addrlen;
2312 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2313 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2314 socksize = sizeof(struct sockaddr_dl);
2315 socksize = ROUNDUP(socksize);
2316 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2317 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2318 if (ifa) {
2319 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2320 ifnet_addrs[ifp->if_index - 1] = ifa;
2321 bzero(ifa, ifasize);
2322 sdl->sdl_len = socksize;
2323 sdl->sdl_family = AF_LINK;
2324 bcopy(workbuf, sdl->sdl_data, namelen);
2325 sdl->sdl_nlen = namelen;
2326 sdl->sdl_index = ifp->if_index;
2327 sdl->sdl_type = ifp->if_type;
2328 if (ll_addr) {
2329 sdl->sdl_alen = ll_addr->sdl_alen;
2330 if (ll_addr->sdl_alen != ifp->if_addrlen)
2331 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2332 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2333 }
2334 ifa->ifa_ifp = ifp;
2335 ifa->ifa_rtrequest = link_rtrequest;
2336 ifa->ifa_addr = (struct sockaddr*)sdl;
2337 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2338 ifa->ifa_netmask = (struct sockaddr*)sdl;
2339 sdl->sdl_len = masklen;
2340 while (namelen != 0)
2341 sdl->sdl_data[--namelen] = 0xff;
2342 }
2343 }
2344 else {
2345 /* preserve the first ifaddr */
2346 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2347 }
2348
2349
2350 TAILQ_INIT(&ifp->if_addrhead);
2351 ifa = ifnet_addrs[ifp->if_index - 1];
2352
2353 if (ifa) {
2354 /*
2355 * We don't use if_attach_ifa because we want
2356 * this address to be first on the list.
2357 */
2358 ifaref(ifa);
2359 ifa->ifa_debug |= IFA_ATTACHED;
2360 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2361 }
2362
2363 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2364 ifindex2ifnet[ifp->if_index] = ifp;
2365
2366 ifnet_head_done();
2367 }
2368 dlil_write_end();
2369
2370 if (if_family && if_family->init_if) {
2371 stat = if_family->init_if(ifp);
2372 if (stat) {
2373 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2374 }
2375 }
2376
2377 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2378 ifnet_lock_done(ifp);
2379
2380 return 0;
2381 }
2382
2383 int
2384 dlil_if_attach(struct ifnet *ifp)
2385 {
2386 dlil_if_attach_with_address(ifp, NULL);
2387 }
2388
2389
2390 int
2391 dlil_if_detach(struct ifnet *ifp)
2392 {
2393 struct ifnet_filter *filter;
2394 struct ifnet_filter *filter_next;
2395 int zeroed = 0;
2396 int retval = 0;
2397 struct ifnet_filter_head fhead;
2398
2399
2400 ifnet_lock_exclusive(ifp);
2401
2402 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2403 /* Interface has already been detached */
2404 ifnet_lock_done(ifp);
2405 return ENXIO;
2406 }
2407
2408 /*
2409 * Indicate this interface is being detached.
2410 *
2411 * This should prevent protocols from attaching
2412 * from this point on. Interface will remain on
2413 * the list until all of the protocols are detached.
2414 */
2415 ifp->if_eflags |= IFEF_DETACHING;
2416 ifnet_lock_done(ifp);
2417
2418 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
2419
2420 if ((retval = dlil_write_begin()) != 0) {
2421 if (retval == EDEADLK) {
2422 retval = DLIL_WAIT_FOR_FREE;
2423
2424 /* We need to perform a delayed detach */
2425 ifp->if_delayed_detach = 1;
2426 dlil_detach_waiting = 1;
2427 wakeup(&dlil_detach_waiting);
2428 }
2429 return retval;
2430 }
2431
2432 /* Steal the list of interface filters */
2433 fhead = ifp->if_flt_head;
2434 TAILQ_INIT(&ifp->if_flt_head);
2435
2436 /* unuse the interface */
2437 zeroed = ifp_unuse(ifp);
2438
2439 dlil_write_end();
2440
2441 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2442 filter_next = TAILQ_NEXT(filter, filt_next);
2443 dlil_detach_filter_internal(filter, 1);
2444 }
2445
2446 if (zeroed == 0) {
2447 retval = DLIL_WAIT_FOR_FREE;
2448 }
2449 else
2450 {
2451 ifp_use_reached_zero(ifp);
2452 }
2453
2454 return retval;
2455 }
2456
2457
2458 int
2459 dlil_reg_if_modules(u_long interface_family,
2460 struct dlil_ifmod_reg_str *ifmod)
2461 {
2462 struct if_family_str *if_family;
2463
2464
2465 if (find_family_module(interface_family)) {
2466 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
2467 interface_family);
2468 return EEXIST;
2469 }
2470
2471 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2472 (!ifmod->add_proto) || (!ifmod->del_proto)) {
2473 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
2474 return EINVAL;
2475 }
2476
2477 /*
2478 * The following is a gross hack to keep from breaking
2479 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2480 * does not zero the reserved fields in dlil_ifmod_reg_str.
2481 * As a result, we have to zero any function that used to
2482 * be reserved fields at the time Vicomsoft built their
2483 * kext. Radar #2974305
2484 */
2485 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2486 if (interface_family == 123) { /* Vicom */
2487 ifmod->init_if = 0;
2488 } else {
2489 return EINVAL;
2490 }
2491 }
2492
2493 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2494 if (!if_family) {
2495 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
2496 return ENOMEM;
2497 }
2498
2499 bzero(if_family, sizeof(struct if_family_str));
2500
2501 if_family->if_family = interface_family & 0xffff;
2502 if_family->shutdown = ifmod->shutdown;
2503 if_family->add_if = ifmod->add_if;
2504 if_family->del_if = ifmod->del_if;
2505 if_family->init_if = ifmod->init_if;
2506 if_family->add_proto = ifmod->add_proto;
2507 if_family->del_proto = ifmod->del_proto;
2508 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
2509 if_family->refcnt = 1;
2510 if_family->flags = 0;
2511
2512 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
2513 return 0;
2514 }
2515
2516 int dlil_dereg_if_modules(u_long interface_family)
2517 {
2518 struct if_family_str *if_family;
2519 int ret = 0;
2520
2521
2522 if_family = find_family_module(interface_family);
2523 if (if_family == 0) {
2524 return ENXIO;
2525 }
2526
2527 if (--if_family->refcnt == 0) {
2528 if (if_family->shutdown)
2529 (*if_family->shutdown)();
2530
2531 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2532 FREE(if_family, M_IFADDR);
2533 }
2534 else {
2535 if_family->flags |= DLIL_SHUTDOWN;
2536 ret = DLIL_WAIT_FOR_FREE;
2537 }
2538
2539 return ret;
2540 }
2541
2542
2543
2544 int
2545 dlil_reg_proto_module(
2546 u_long protocol_family,
2547 u_long interface_family,
2548 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2549 int (*detach)(struct ifnet *ifp, u_long protocol_family))
2550 {
2551 struct proto_family_str *proto_family;
2552
2553 if (attach == NULL) return EINVAL;
2554
2555 lck_mtx_lock(proto_family_mutex);
2556
2557 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2558 if (proto_family->proto_family == protocol_family &&
2559 proto_family->if_family == interface_family) {
2560 lck_mtx_unlock(proto_family_mutex);
2561 return EEXIST;
2562 }
2563 }
2564
2565 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2566 if (!proto_family) {
2567 lck_mtx_unlock(proto_family_mutex);
2568 return ENOMEM;
2569 }
2570
2571 bzero(proto_family, sizeof(struct proto_family_str));
2572 proto_family->proto_family = protocol_family;
2573 proto_family->if_family = interface_family & 0xffff;
2574 proto_family->attach_proto = attach;
2575 proto_family->detach_proto = detach;
2576
2577 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
2578 lck_mtx_unlock(proto_family_mutex);
2579 return 0;
2580 }
2581
2582 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2583 {
2584 struct proto_family_str *proto_family;
2585 int ret = 0;
2586
2587 lck_mtx_lock(proto_family_mutex);
2588
2589 proto_family = find_proto_module(protocol_family, interface_family);
2590 if (proto_family == 0) {
2591 lck_mtx_unlock(proto_family_mutex);
2592 return ENXIO;
2593 }
2594
2595 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2596 FREE(proto_family, M_IFADDR);
2597
2598 lck_mtx_unlock(proto_family_mutex);
2599 return ret;
2600 }
2601
2602 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
2603 {
2604 struct proto_family_str *proto_family;
2605 int ret = 0;
2606
2607 lck_mtx_lock(proto_family_mutex);
2608 proto_family = find_proto_module(protocol_family, ifp->if_family);
2609 if (proto_family == 0) {
2610 lck_mtx_unlock(proto_family_mutex);
2611 return ENXIO;
2612 }
2613
2614 ret = proto_family->attach_proto(ifp, protocol_family);
2615
2616 lck_mtx_unlock(proto_family_mutex);
2617 return ret;
2618 }
2619
2620
2621 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2622 {
2623 struct proto_family_str *proto_family;
2624 int ret = 0;
2625
2626 lck_mtx_lock(proto_family_mutex);
2627
2628 proto_family = find_proto_module(protocol_family, ifp->if_family);
2629 if (proto_family && proto_family->detach_proto)
2630 ret = proto_family->detach_proto(ifp, protocol_family);
2631 else
2632 ret = dlil_detach_protocol(ifp, protocol_family);
2633
2634 lck_mtx_unlock(proto_family_mutex);
2635 return ret;
2636 }
2637
2638 static errno_t
2639 dlil_recycle_ioctl(
2640 __unused ifnet_t ifnet_ptr,
2641 __unused u_int32_t ioctl_code,
2642 __unused void *ioctl_arg)
2643 {
2644 return EOPNOTSUPP;
2645 }
2646
2647 static int
2648 dlil_recycle_output(
2649 __unused struct ifnet *ifnet_ptr,
2650 struct mbuf *m)
2651 {
2652 m_freem(m);
2653 return 0;
2654 }
2655
2656 static void
2657 dlil_recycle_free(
2658 __unused ifnet_t ifnet_ptr)
2659 {
2660 }
2661
2662 static errno_t
2663 dlil_recycle_set_bpf_tap(
2664 __unused ifnet_t ifp,
2665 __unused bpf_tap_mode mode,
2666 __unused bpf_packet_func callback)
2667 {
2668 /* XXX not sure what to do here */
2669 return 0;
2670 }
2671
2672 int dlil_if_acquire(
2673 u_long family,
2674 const void *uniqueid,
2675 size_t uniqueid_len,
2676 struct ifnet **ifp)
2677 {
2678 struct ifnet *ifp1 = NULL;
2679 struct dlil_ifnet *dlifp1 = NULL;
2680 int ret = 0;
2681
2682 lck_mtx_lock(dlil_ifnet_mutex);
2683 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2684
2685 ifp1 = (struct ifnet *)dlifp1;
2686
2687 if (ifp1->if_family == family) {
2688
2689 /* same uniqueid and same len or no unique id specified */
2690 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2691 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2692
2693 /* check for matching interface in use */
2694 if (ifp1->if_eflags & IFEF_INUSE) {
2695 if (uniqueid_len) {
2696 ret = EBUSY;
2697 goto end;
2698 }
2699 }
2700 else {
2701 if (!ifp1->if_lock)
2702 panic("ifp's lock is gone\n");
2703 ifnet_lock_exclusive(ifp1);
2704 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2705 ifnet_lock_done(ifp1);
2706 *ifp = ifp1;
2707 goto end;
2708 }
2709 }
2710 }
2711 }
2712
2713 /* no interface found, allocate a new one */
2714 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2715 if (dlifp1 == 0) {
2716 ret = ENOMEM;
2717 goto end;
2718 }
2719
2720 bzero(dlifp1, sizeof(*dlifp1));
2721
2722 if (uniqueid_len) {
2723 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2724 if (dlifp1->if_uniqueid == 0) {
2725 FREE(dlifp1, M_NKE);
2726 ret = ENOMEM;
2727 goto end;
2728 }
2729 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2730 dlifp1->if_uniqueid_len = uniqueid_len;
2731 }
2732
2733 ifp1 = (struct ifnet *)dlifp1;
2734 ifp1->if_eflags |= IFEF_INUSE;
2735 ifp1->if_name = dlifp1->if_namestorage;
2736
2737 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2738
2739 *ifp = ifp1;
2740
2741 end:
2742 lck_mtx_unlock(dlil_ifnet_mutex);
2743
2744 return ret;
2745 }
2746
2747 void dlil_if_release(struct ifnet *ifp)
2748 {
2749 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2750
2751
2752 /* Interface does not have a lock until it is attached - radar 3713951 */
2753 if (ifp->if_lock)
2754 ifnet_lock_exclusive(ifp);
2755 ifp->if_eflags &= ~IFEF_INUSE;
2756 ifp->if_ioctl = dlil_recycle_ioctl;
2757 ifp->if_output = dlil_recycle_output;
2758 ifp->if_free = dlil_recycle_free;
2759 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2760
2761 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2762 ifp->if_name = dlifp->if_namestorage;
2763 if (ifp->if_lock)
2764 ifnet_lock_done(ifp);
2765
2766 }