]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc.
24 *
25 * Data Link Inteface Layer
26 * Author: Ted Walker
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/socket.h>
35 #include <sys/domain.h>
36 #include <sys/user.h>
37 #include <net/if_dl.h>
38 #include <net/if.h>
39 #include <net/route.h>
40 #include <net/if_var.h>
41 #include <net/dlil.h>
42 #include <net/if_arp.h>
43 #include <sys/kern_event.h>
44 #include <sys/kdebug.h>
45
46 #include <kern/assert.h>
47 #include <kern/task.h>
48 #include <kern/thread.h>
49 #include <kern/sched_prim.h>
50 #include <kern/locks.h>
51
52 #include <net/if_types.h>
53 #include <net/kpi_interfacefilter.h>
54
55 #include <libkern/OSAtomic.h>
56
57 #include <machine/machine_routines.h>
58
59 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
60 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
61 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
62 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
63 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
64
65
66 #define MAX_DL_TAGS 16
67 #define MAX_DLIL_FILTERS 16
68 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
69 #define MAX_LINKADDR 4 /* LONGWORDS */
70 #define M_NKE M_IFADDR
71
72 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
73 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
74
75 #if 0
76 #define DLIL_PRINTF printf
77 #else
78 #define DLIL_PRINTF kprintf
79 #endif
80
81 enum {
82 kProtoKPI_DLIL = 0,
83 kProtoKPI_v1 = 1
84 };
85
86 struct if_proto {
87 SLIST_ENTRY(if_proto) next_hash;
88 int refcount;
89 int detaching;
90 struct ifnet *ifp;
91 struct domain *dl_domain;
92 protocol_family_t protocol_family;
93 int proto_kpi;
94 union {
95 struct {
96 dl_input_func dl_input;
97 dl_pre_output_func dl_pre_output;
98 dl_event_func dl_event;
99 dl_offer_func dl_offer;
100 dl_ioctl_func dl_ioctl;
101 dl_detached_func dl_detached;
102 } dlil;
103 struct {
104 proto_media_input input;
105 proto_media_preout pre_output;
106 proto_media_event event;
107 proto_media_ioctl ioctl;
108 proto_media_detached detached;
109 proto_media_resolve_multi resolve_multi;
110 proto_media_send_arp send_arp;
111 } v1;
112 } kpi;
113 };
114
115 SLIST_HEAD(proto_hash_entry, if_proto);
116
117
118 struct dlil_ifnet {
119 /* ifnet and drvr_ext are used by the stack and drivers
120 drvr_ext extends the public ifnet and must follow dl_if */
121 struct ifnet dl_if; /* public ifnet */
122
123 /* dlil private fields */
124 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
125 /* it is not the ifnet list */
126 void *if_uniqueid; /* unique id identifying the interface */
127 size_t if_uniqueid_len;/* length of the unique id */
128 char if_namestorage[IFNAMSIZ]; /* interface name storage */
129 };
130
131 struct ifnet_filter {
132 TAILQ_ENTRY(ifnet_filter) filt_next;
133 ifnet_t filt_ifp;
134 int filt_detaching;
135
136 const char *filt_name;
137 void *filt_cookie;
138 protocol_family_t filt_protocol;
139 iff_input_func filt_input;
140 iff_output_func filt_output;
141 iff_event_func filt_event;
142 iff_ioctl_func filt_ioctl;
143 iff_detached_func filt_detached;
144 };
145
146 struct if_family_str {
147 TAILQ_ENTRY(if_family_str) if_fam_next;
148 u_long if_family;
149 int refcnt;
150 int flags;
151
152 #define DLIL_SHUTDOWN 1
153
154 int (*add_if)(struct ifnet *ifp);
155 int (*del_if)(struct ifnet *ifp);
156 int (*init_if)(struct ifnet *ifp);
157 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
158 ifnet_del_proto_func del_proto;
159 ifnet_ioctl_func ifmod_ioctl;
160 int (*shutdown)(void);
161 };
162
163 struct proto_family_str {
164 TAILQ_ENTRY(proto_family_str) proto_fam_next;
165 u_long proto_family;
166 u_long if_family;
167 int usecnt;
168
169 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
170 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
171 };
172
173 enum {
174 kIfNetUseCount_MayBeZero = 0,
175 kIfNetUseCount_MustNotBeZero = 1
176 };
177
178 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
179 static TAILQ_HEAD(, if_family_str) if_family_head;
180 static TAILQ_HEAD(, proto_family_str) proto_family_head;
181 static lck_grp_t *dlil_lock_group;
182 static lck_grp_t *ifnet_lock_group;
183 static lck_grp_t *ifnet_head_lock_group;
184 static lck_attr_t *ifnet_lock_attr;
185 static lck_mtx_t *proto_family_mutex;
186 static lck_rw_t *ifnet_head_mutex;
187 static lck_mtx_t *dlil_ifnet_mutex;
188 static lck_mtx_t *dlil_mutex;
189 static unsigned long dlil_read_count = 0;
190 static unsigned long dlil_detach_waiting = 0;
191 extern u_int32_t ipv4_ll_arp_aware;
192
193 int dlil_initialized = 0;
194 lck_spin_t *dlil_input_lock;
195 __private_extern__ thread_t dlil_input_thread_ptr = 0;
196 int dlil_input_thread_wakeup = 0;
197 __private_extern__ int dlil_output_thread_wakeup = 0;
198 static struct mbuf *dlil_input_mbuf_head = NULL;
199 static struct mbuf *dlil_input_mbuf_tail = NULL;
200 #if NLOOP > 1
201 #error dlil_input() needs to be revised to support more than on loopback interface
202 #endif
203 static struct mbuf *dlil_input_loop_head = NULL;
204 static struct mbuf *dlil_input_loop_tail = NULL;
205
206 static void dlil_input_thread(void);
207 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
208 struct ifnet *ifbyfamily(u_long family, short unit);
209 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
210 static void dlil_call_delayed_detach_thread(void);
211
212 static void dlil_read_begin(void);
213 static void dlil_read_end(void);
214 static int dlil_write_begin(void);
215 static void dlil_write_end(void);
216
217 static int ifp_use(struct ifnet *ifp, int handle_zero);
218 static int ifp_unuse(struct ifnet *ifp);
219 static void ifp_use_reached_zero(struct ifnet *ifp);
220
221 extern void bpfdetach(struct ifnet*);
222 extern void proto_input_run(void); // new run_netisr
223
224
225 int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
226
227 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
228
229 int dlil_expand_mcl;
230
231 extern u_int32_t inject_buckets;
232
233 static const u_int32_t dlil_writer_waiting = 0x80000000;
234
235 static __inline__ void*
236 _cast_non_const(const void * ptr) {
237 union {
238 const void* cval;
239 void* val;
240 } ret;
241
242 ret.cval = ptr;
243 return (ret.val);
244 }
245
246 /* Should these be inline? */
247 static void
248 dlil_read_begin(void)
249 {
250 unsigned long new_value;
251 unsigned long old_value;
252 struct uthread *uth = get_bsdthread_info(current_thread());
253
254 if (uth->dlil_incremented_read == dlil_writer_waiting)
255 panic("dlil_read_begin - thread is already a writer");
256
257 do {
258 again:
259 old_value = dlil_read_count;
260
261 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
262 {
263 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
264 goto again;
265 }
266
267 new_value = old_value + 1;
268 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
269
270 uth->dlil_incremented_read++;
271 }
272
273 static void
274 dlil_read_end(void)
275 {
276 struct uthread *uth = get_bsdthread_info(current_thread());
277
278 OSDecrementAtomic((UInt32*)&dlil_read_count);
279 uth->dlil_incremented_read--;
280 if (dlil_read_count == dlil_writer_waiting)
281 wakeup(_cast_non_const(&dlil_writer_waiting));
282 }
283
284 static int
285 dlil_write_begin(void)
286 {
287 struct uthread *uth = get_bsdthread_info(current_thread());
288
289 if (uth->dlil_incremented_read != 0) {
290 return EDEADLK;
291 }
292 lck_mtx_lock(dlil_mutex);
293 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
294 again:
295 if (dlil_read_count == dlil_writer_waiting) {
296 uth->dlil_incremented_read = dlil_writer_waiting;
297 return 0;
298 }
299 else {
300 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
301 goto again;
302 }
303 }
304
305 static void
306 dlil_write_end(void)
307 {
308 struct uthread *uth = get_bsdthread_info(current_thread());
309
310 if (uth->dlil_incremented_read != dlil_writer_waiting)
311 panic("dlil_write_end - thread is not a writer");
312 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
313 lck_mtx_unlock(dlil_mutex);
314 uth->dlil_incremented_read = 0;
315 wakeup(&dlil_read_count);
316 }
317
318 #define PROTO_HASH_SLOTS 0x5
319
320 /*
321 * Internal functions.
322 */
323
324 static int
325 proto_hash_value(u_long protocol_family)
326 {
327 switch(protocol_family) {
328 case PF_INET:
329 return 0;
330 case PF_INET6:
331 return 1;
332 case PF_APPLETALK:
333 return 2;
334 case PF_VLAN:
335 return 3;
336 default:
337 return 4;
338 }
339 }
340
341 static
342 struct if_family_str *find_family_module(u_long if_family)
343 {
344 struct if_family_str *mod = NULL;
345
346 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
347 if (mod->if_family == (if_family & 0xffff))
348 break;
349 }
350
351 return mod;
352 }
353
354 static
355 struct proto_family_str*
356 find_proto_module(u_long proto_family, u_long if_family)
357 {
358 struct proto_family_str *mod = NULL;
359
360 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
361 if ((mod->proto_family == (proto_family & 0xffff))
362 && (mod->if_family == (if_family & 0xffff)))
363 break;
364 }
365
366 return mod;
367 }
368
369 static struct if_proto*
370 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
371 {
372 struct if_proto *proto = NULL;
373 u_long i = proto_hash_value(protocol_family);
374 if (ifp->if_proto_hash) {
375 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
376 }
377
378 while(proto && proto->protocol_family != protocol_family) {
379 proto = SLIST_NEXT(proto, next_hash);
380 }
381
382 return proto;
383 }
384
385 static void
386 if_proto_ref(struct if_proto *proto)
387 {
388 OSAddAtomic(1, (UInt32*)&proto->refcount);
389 }
390
391 static void
392 if_proto_free(struct if_proto *proto)
393 {
394 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
395
396 if (oldval == 1) { /* This was the last reference */
397 FREE(proto, M_IFADDR);
398 }
399 }
400
401 __private_extern__ void
402 ifnet_lock_assert(
403 __unused struct ifnet *ifp,
404 __unused int what)
405 {
406 #if IFNET_RW_LOCK
407 /*
408 * Not implemented for rw locks.
409 *
410 * Function exists so when/if we use mutex we can
411 * enable this check.
412 */
413 #else
414 lck_mtx_assert(ifp->if_lock, what);
415 #endif
416 }
417
418 __private_extern__ void
419 ifnet_lock_shared(
420 struct ifnet *ifp)
421 {
422 #if IFNET_RW_LOCK
423 lck_rw_lock_shared(ifp->if_lock);
424 #else
425 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
426 lck_mtx_lock(ifp->if_lock);
427 #endif
428 }
429
430 __private_extern__ void
431 ifnet_lock_exclusive(
432 struct ifnet *ifp)
433 {
434 #if IFNET_RW_LOCK
435 lck_rw_lock_exclusive(ifp->if_lock);
436 #else
437 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
438 lck_mtx_lock(ifp->if_lock);
439 #endif
440 }
441
442 __private_extern__ void
443 ifnet_lock_done(
444 struct ifnet *ifp)
445 {
446 #if IFNET_RW_LOCK
447 lck_rw_done(ifp->if_lock);
448 #else
449 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
450 lck_mtx_unlock(ifp->if_lock);
451 #endif
452 }
453
454 __private_extern__ void
455 ifnet_head_lock_shared()
456 {
457 lck_rw_lock_shared(ifnet_head_mutex);
458 }
459
460 __private_extern__ void
461 ifnet_head_lock_exclusive()
462 {
463 lck_rw_lock_exclusive(ifnet_head_mutex);
464 }
465
466 __private_extern__ void
467 ifnet_head_done()
468 {
469 lck_rw_done(ifnet_head_mutex);
470 }
471
472 /*
473 * Public functions.
474 */
475 struct ifnet *ifbyfamily(u_long family, short unit)
476 {
477 struct ifnet *ifp;
478
479 ifnet_head_lock_shared();
480 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
481 if ((family == ifp->if_family) && (ifp->if_unit == unit))
482 break;
483 ifnet_head_done();
484
485 return ifp;
486 }
487
488 static int dlil_ifp_proto_count(struct ifnet * ifp)
489 {
490 int count = 0;
491 int i;
492
493 if (ifp->if_proto_hash != NULL) {
494 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
495 struct if_proto *proto;
496 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
497 count++;
498 }
499 }
500 }
501
502 return count;
503 }
504
505 __private_extern__ void
506 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
507 struct net_event_data *event_data, u_long event_data_len)
508 {
509 struct net_event_data ev_data;
510 struct kev_msg ev_msg;
511
512 /*
513 * a net event always start with a net_event_data structure
514 * but the caller can generate a simple net event or
515 * provide a longer event structure to post
516 */
517
518 ev_msg.vendor_code = KEV_VENDOR_APPLE;
519 ev_msg.kev_class = KEV_NETWORK_CLASS;
520 ev_msg.kev_subclass = event_subclass;
521 ev_msg.event_code = event_code;
522
523 if (event_data == 0) {
524 event_data = &ev_data;
525 event_data_len = sizeof(struct net_event_data);
526 }
527
528 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
529 event_data->if_family = ifp->if_family;
530 event_data->if_unit = (unsigned long) ifp->if_unit;
531
532 ev_msg.dv[0].data_length = event_data_len;
533 ev_msg.dv[0].data_ptr = event_data;
534 ev_msg.dv[1].data_length = 0;
535
536 dlil_event_internal(ifp, &ev_msg);
537 }
538
539 void dlil_init(void);
540 void
541 dlil_init(void)
542 {
543 lck_grp_attr_t *grp_attributes = 0;
544 lck_attr_t *lck_attributes = 0;
545 lck_grp_t *input_lock_grp = 0;
546
547 TAILQ_INIT(&dlil_ifnet_head);
548 TAILQ_INIT(&if_family_head);
549 TAILQ_INIT(&proto_family_head);
550 TAILQ_INIT(&ifnet_head);
551
552 /* Setup the lock groups we will use */
553 grp_attributes = lck_grp_attr_alloc_init();
554 lck_grp_attr_setdefault(grp_attributes);
555
556 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
557 #if IFNET_RW_LOCK
558 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
559 #else
560 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
561 #endif
562 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
563 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
564 lck_grp_attr_free(grp_attributes);
565 grp_attributes = 0;
566
567 /* Setup the lock attributes we will use */
568 lck_attributes = lck_attr_alloc_init();
569 lck_attr_setdefault(lck_attributes);
570
571 ifnet_lock_attr = lck_attr_alloc_init();
572 lck_attr_setdefault(ifnet_lock_attr);
573
574 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
575 input_lock_grp = 0;
576
577 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
578 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
579 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
580 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
581
582 lck_attr_free(lck_attributes);
583 lck_attributes = 0;
584
585 /*
586 * Start up the dlil input thread once everything is initialized
587 */
588 (void) kernel_thread(kernel_task, dlil_input_thread);
589 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
590 }
591
592 int
593 dlil_attach_filter(
594 struct ifnet *ifp,
595 const struct iff_filter *if_filter,
596 interface_filter_t *filter_ref)
597 {
598 int retval = 0;
599 struct ifnet_filter *filter;
600
601 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
602 if (filter == NULL)
603 return ENOMEM;
604 bzero(filter, sizeof(*filter));
605
606
607 filter->filt_ifp = ifp;
608 filter->filt_cookie = if_filter->iff_cookie;
609 filter->filt_name = if_filter->iff_name;
610 filter->filt_protocol = if_filter->iff_protocol;
611 filter->filt_input = if_filter->iff_input;
612 filter->filt_output = if_filter->iff_output;
613 filter->filt_event = if_filter->iff_event;
614 filter->filt_ioctl = if_filter->iff_ioctl;
615 filter->filt_detached = if_filter->iff_detached;
616
617 if ((retval = dlil_write_begin()) != 0) {
618 /* Failed to acquire the write lock */
619 FREE(filter, M_NKE);
620 return retval;
621 }
622 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
623 dlil_write_end();
624 *filter_ref = filter;
625 return retval;
626 }
627
628 static int
629 dlil_detach_filter_internal(interface_filter_t filter, int detached)
630 {
631 int retval = 0;
632
633 if (detached == 0) {
634 ifnet_t ifp = NULL;
635 interface_filter_t entry = NULL;
636
637 /* Take the write lock */
638 retval = dlil_write_begin();
639 if (retval != 0 && retval != EDEADLK)
640 return retval;
641
642 /*
643 * At this point either we have the write lock (retval == 0)
644 * or we couldn't get it (retval == EDEADLK) because someone
645 * else up the stack is holding the read lock. It is safe to
646 * read, either the read or write is held. Verify the filter
647 * parameter before proceeding.
648 */
649 ifnet_head_lock_shared();
650 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
651 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
652 if (entry == filter)
653 break;
654 }
655 if (entry == filter)
656 break;
657 }
658 ifnet_head_done();
659
660 if (entry != filter) {
661 /* filter parameter is not a valid filter ref */
662 if (retval == 0) {
663 dlil_write_end();
664 }
665 return EINVAL;
666 }
667
668 if (retval == EDEADLK) {
669 /* Perform a delayed detach */
670 filter->filt_detaching = 1;
671 dlil_detach_waiting = 1;
672 wakeup(&dlil_detach_waiting);
673 return 0;
674 }
675
676 /* Remove the filter from the list */
677 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
678 dlil_write_end();
679 }
680
681 /* Call the detached funciton if there is one */
682 if (filter->filt_detached)
683 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
684
685 /* Free the filter */
686 FREE(filter, M_NKE);
687
688 return retval;
689 }
690
691 void
692 dlil_detach_filter(interface_filter_t filter)
693 {
694 if (filter == NULL)
695 return;
696 dlil_detach_filter_internal(filter, 0);
697 }
698
699 static void
700 dlil_input_thread_continue(
701 __unused void* foo,
702 __unused wait_result_t wait)
703 {
704 while (1) {
705 struct mbuf *m, *m_loop;
706
707 lck_spin_lock(dlil_input_lock);
708 m = dlil_input_mbuf_head;
709 dlil_input_mbuf_head = NULL;
710 dlil_input_mbuf_tail = NULL;
711 m_loop = dlil_input_loop_head;
712 dlil_input_loop_head = NULL;
713 dlil_input_loop_tail = NULL;
714 lck_spin_unlock(dlil_input_lock);
715
716 /*
717 * NOTE warning %%% attention !!!!
718 * We should think about putting some thread starvation safeguards if
719 * we deal with long chains of packets.
720 */
721 while (m) {
722 struct mbuf *m0 = m->m_nextpkt;
723 void *header = m->m_pkthdr.header;
724
725 m->m_nextpkt = NULL;
726 m->m_pkthdr.header = NULL;
727 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
728 m = m0;
729 }
730 m = m_loop;
731 while (m) {
732 struct mbuf *m0 = m->m_nextpkt;
733 void *header = m->m_pkthdr.header;
734 struct ifnet *ifp = &loif[0];
735
736 m->m_nextpkt = NULL;
737 m->m_pkthdr.header = NULL;
738 (void) dlil_input_packet(ifp, m, header);
739 m = m0;
740 }
741
742 proto_input_run();
743
744 if (dlil_input_mbuf_head == NULL &&
745 dlil_input_loop_head == NULL && inject_buckets == 0) {
746 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
747 (void) thread_block(dlil_input_thread_continue);
748 /* NOTREACHED */
749 }
750 }
751 }
752
753 void dlil_input_thread(void)
754 {
755 register thread_t self = current_thread();
756
757 ml_thread_policy(self, MACHINE_GROUP,
758 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
759
760 dlil_initialized = 1;
761 dlil_input_thread_ptr = current_thread();
762 dlil_input_thread_continue(NULL, THREAD_RESTART);
763 }
764
765 int
766 dlil_input_with_stats(
767 struct ifnet *ifp,
768 struct mbuf *m_head,
769 struct mbuf *m_tail,
770 const struct ifnet_stat_increment_param *stats)
771 {
772 /* WARNING
773 * Because of loopbacked multicast we cannot stuff the ifp in
774 * the rcvif of the packet header: loopback has its own dlil
775 * input queue
776 */
777
778 lck_spin_lock(dlil_input_lock);
779 if (ifp->if_type != IFT_LOOP) {
780 if (dlil_input_mbuf_head == NULL)
781 dlil_input_mbuf_head = m_head;
782 else if (dlil_input_mbuf_tail != NULL)
783 dlil_input_mbuf_tail->m_nextpkt = m_head;
784 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
785 } else {
786 if (dlil_input_loop_head == NULL)
787 dlil_input_loop_head = m_head;
788 else if (dlil_input_loop_tail != NULL)
789 dlil_input_loop_tail->m_nextpkt = m_head;
790 dlil_input_loop_tail = m_tail ? m_tail : m_head;
791 }
792 if (stats) {
793 ifp->if_data.ifi_ipackets += stats->packets_in;
794 ifp->if_data.ifi_ibytes += stats->bytes_in;
795 ifp->if_data.ifi_ierrors += stats->errors_in;
796
797 ifp->if_data.ifi_opackets += stats->packets_out;
798 ifp->if_data.ifi_obytes += stats->bytes_out;
799 ifp->if_data.ifi_oerrors += stats->errors_out;
800
801 ifp->if_data.ifi_collisions += stats->collisions;
802 ifp->if_data.ifi_iqdrops += stats->dropped;
803 }
804 lck_spin_unlock(dlil_input_lock);
805
806 wakeup((caddr_t)&dlil_input_thread_wakeup);
807
808 return 0;
809 }
810
811 int
812 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
813 {
814 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
815 }
816
817 int
818 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
819 char *frame_header)
820 {
821 int retval;
822 struct if_proto *ifproto = 0;
823 protocol_family_t protocol_family;
824 struct ifnet_filter *filter;
825
826
827 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
828
829 /*
830 * Lock the interface while we run through
831 * the filters and the demux. This lock
832 * protects the filter list and the demux list.
833 */
834 dlil_read_begin();
835
836 /*
837 * Call family demux module. If the demux module finds a match
838 * for the frame it will fill-in the ifproto pointer.
839 */
840
841 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
842 if (retval != 0)
843 protocol_family = 0;
844 if (retval == EJUSTRETURN) {
845 dlil_read_end();
846 return 0;
847 }
848
849 /* DANGER!!! */
850 if (m->m_flags & (M_BCAST|M_MCAST))
851 ifp->if_imcasts++;
852
853 /*
854 * Run interface filters
855 */
856
857 /* Do not pass VLAN tagged packets to filters PR-3586856 */
858 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
859 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
860 int filter_result;
861 if (filter->filt_input && (filter->filt_protocol == 0 ||
862 filter->filt_protocol == protocol_family)) {
863 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
864
865 if (filter_result) {
866 dlil_read_end();
867 if (filter_result == EJUSTRETURN) {
868 filter_result = 0;
869 }
870 else {
871 m_freem(m);
872 }
873
874 return filter_result;
875 }
876 }
877 }
878 }
879
880 /* Demux is done, interface filters have been processed, unlock the mutex */
881 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
882 dlil_read_end();
883 if (retval != EJUSTRETURN) {
884 m_freem(m);
885 return retval;
886 }
887 else
888 return 0;
889 }
890
891 ifproto = find_attached_proto(ifp, protocol_family);
892
893 if (ifproto == 0) {
894 dlil_read_end();
895 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
896 m_freem(m);
897 return 0;
898 }
899
900 /*
901 * Hand the packet off to the protocol.
902 */
903
904 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
905 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
906 }
907
908 if (ifproto->proto_kpi == kProtoKPI_DLIL)
909 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
910 ifp, ifproto->protocol_family,
911 TRUE);
912 else
913 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
914
915 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
916 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
917 }
918
919 dlil_read_end();
920
921 if (retval == EJUSTRETURN)
922 retval = 0;
923 else
924 if (retval)
925 m_freem(m);
926
927 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
928 return retval;
929 }
930
931 static int
932 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
933 {
934 struct ifnet_filter *filter;
935
936 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
937 dlil_read_begin();
938
939 /* Pass the event to the interface filters */
940 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
941 if (filter->filt_event)
942 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
943 }
944
945 if (ifp->if_proto_hash) {
946 int i;
947
948 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
949 struct if_proto *proto;
950
951 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
952 /* Pass the event to the protocol */
953 if (proto->proto_kpi == kProtoKPI_DLIL) {
954 if (proto->kpi.dlil.dl_event)
955 proto->kpi.dlil.dl_event(ifp, event);
956 }
957 else {
958 if (proto->kpi.v1.event)
959 proto->kpi.v1.event(ifp, proto->protocol_family, event);
960 }
961 }
962 }
963 }
964
965 dlil_read_end();
966
967 /* Pass the event to the interface */
968 if (ifp->if_event)
969 ifp->if_event(ifp, event);
970
971 if (ifp_unuse(ifp))
972 ifp_use_reached_zero(ifp);
973 }
974
975 return kev_post_msg(event);
976 }
977
978 int
979 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
980 {
981 int result = 0;
982
983 struct kev_msg kev_msg;
984
985 kev_msg.vendor_code = event->vendor_code;
986 kev_msg.kev_class = event->kev_class;
987 kev_msg.kev_subclass = event->kev_subclass;
988 kev_msg.event_code = event->event_code;
989 kev_msg.dv[0].data_ptr = &event->event_data[0];
990 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
991 kev_msg.dv[1].data_length = 0;
992
993
994 result = dlil_event_internal(ifp, &kev_msg);
995
996
997 return result;
998 }
999
1000 int
1001 dlil_output_list(
1002 struct ifnet* ifp,
1003 u_long proto_family,
1004 struct mbuf *packetlist,
1005 caddr_t route,
1006 const struct sockaddr *dest,
1007 int raw)
1008 {
1009 char *frame_type = 0;
1010 char *dst_linkaddr = 0;
1011 int error, retval = 0;
1012 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1013 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1014 struct ifnet_filter *filter;
1015 struct if_proto *proto = 0;
1016 struct mbuf *m;
1017
1018 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1019 #if BRIDGE
1020 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1021 #else
1022 if ((raw != 0) || proto_family != PF_INET) {
1023 #endif
1024 while (packetlist) {
1025 m = packetlist;
1026 packetlist = packetlist->m_nextpkt;
1027 m->m_nextpkt = NULL;
1028 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1029 if (error) {
1030 if (packetlist)
1031 m_freem_list(packetlist);
1032 return (error);
1033 }
1034 }
1035 return (0);
1036 }
1037
1038 dlil_read_begin();
1039
1040 frame_type = frame_type_buffer;
1041 dst_linkaddr = dst_linkaddr_buffer;
1042 m = packetlist;
1043 packetlist = packetlist->m_nextpkt;
1044 m->m_nextpkt = NULL;
1045
1046 proto = find_attached_proto(ifp, proto_family);
1047 if (proto == NULL) {
1048 retval = ENXIO;
1049 goto cleanup;
1050 }
1051
1052 retval = 0;
1053 if (proto->proto_kpi == kProtoKPI_DLIL) {
1054 if (proto->kpi.dlil.dl_pre_output)
1055 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1056 }
1057 else {
1058 if (proto->kpi.v1.pre_output)
1059 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1060 }
1061
1062 if (retval) {
1063 if (retval != EJUSTRETURN) {
1064 m_freem(m);
1065 }
1066 goto cleanup;
1067 }
1068
1069 do {
1070
1071
1072 if (ifp->if_framer) {
1073 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1074 if (retval) {
1075 if (retval != EJUSTRETURN) {
1076 m_freem(m);
1077 }
1078 goto cleanup;
1079 }
1080 }
1081
1082 /*
1083 * Let interface filters (if any) do their thing ...
1084 */
1085 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1086 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1087 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1088 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1089 filter->filt_output) {
1090 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1091 if (retval) {
1092 printf("dlil_output_list %p filt_output %d\n", m, retval);
1093 if (retval != EJUSTRETURN)
1094 m_freem(m);
1095 goto next;
1096 }
1097 }
1098 }
1099 }
1100 /*
1101 * Finally, call the driver.
1102 */
1103
1104 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1105 retval = ifp->if_output(ifp, m);
1106 if (retval) {
1107 printf("dlil_output_list: output error retval = %x\n", retval);
1108 goto cleanup;
1109 }
1110 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1111 next:
1112 m = packetlist;
1113 if (m) {
1114 packetlist = packetlist->m_nextpkt;
1115 m->m_nextpkt = NULL;
1116 }
1117 } while (m);
1118
1119
1120 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1121
1122 cleanup:
1123 dlil_read_end();
1124 if (packetlist) /* if any packet left, clean up */
1125 m_freem_list(packetlist);
1126 if (retval == EJUSTRETURN)
1127 retval = 0;
1128 return retval;
1129 }
1130
1131 /*
1132 * dlil_output
1133 *
1134 * Caller should have a lock on the protocol domain if the protocol
1135 * doesn't support finer grained locking. In most cases, the lock
1136 * will be held from the socket layer and won't be released until
1137 * we return back to the socket layer.
1138 *
1139 * This does mean that we must take a protocol lock before we take
1140 * an interface lock if we're going to take both. This makes sense
1141 * because a protocol is likely to interact with an ifp while it
1142 * is under the protocol lock.
1143 */
1144 int
1145 dlil_output(
1146 struct ifnet* ifp,
1147 u_long proto_family,
1148 struct mbuf *m,
1149 caddr_t route,
1150 const struct sockaddr *dest,
1151 int raw)
1152 {
1153 char *frame_type = 0;
1154 char *dst_linkaddr = 0;
1155 int retval = 0;
1156 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1157 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1158 struct ifnet_filter *filter;
1159
1160 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1161
1162 dlil_read_begin();
1163
1164 frame_type = frame_type_buffer;
1165 dst_linkaddr = dst_linkaddr_buffer;
1166
1167 if (raw == 0) {
1168 struct if_proto *proto = 0;
1169
1170 proto = find_attached_proto(ifp, proto_family);
1171 if (proto == NULL) {
1172 m_freem(m);
1173 retval = ENXIO;
1174 goto cleanup;
1175 }
1176
1177 retval = 0;
1178 if (proto->proto_kpi == kProtoKPI_DLIL) {
1179 if (proto->kpi.dlil.dl_pre_output)
1180 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1181 }
1182 else {
1183 if (proto->kpi.v1.pre_output)
1184 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1185 }
1186
1187 if (retval) {
1188 if (retval != EJUSTRETURN) {
1189 m_freem(m);
1190 }
1191 goto cleanup;
1192 }
1193 }
1194
1195 /*
1196 * Call framing module
1197 */
1198 if ((raw == 0) && (ifp->if_framer)) {
1199 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1200 if (retval) {
1201 if (retval != EJUSTRETURN) {
1202 m_freem(m);
1203 }
1204 goto cleanup;
1205 }
1206 }
1207
1208 #if BRIDGE
1209 /* !!!LOCKING!!!
1210 *
1211 * Need to consider how to handle this.
1212 */
1213 broken-locking
1214 if (do_bridge) {
1215 struct mbuf *m0 = m;
1216 struct ether_header *eh = mtod(m, struct ether_header *);
1217
1218 if (m->m_pkthdr.rcvif)
1219 m->m_pkthdr.rcvif = NULL;
1220 ifp = bridge_dst_lookup(eh);
1221 bdg_forward(&m0, ifp);
1222 if (m0)
1223 m_freem(m0);
1224
1225 return 0;
1226 }
1227 #endif
1228
1229
1230 /*
1231 * Let interface filters (if any) do their thing ...
1232 */
1233
1234 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1235 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1236 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1237 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1238 filter->filt_output) {
1239 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1240 if (retval) {
1241 if (retval != EJUSTRETURN)
1242 m_freem(m);
1243 goto cleanup;
1244 }
1245 }
1246 }
1247 }
1248
1249 /*
1250 * Finally, call the driver.
1251 */
1252
1253 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1254 retval = ifp->if_output(ifp, m);
1255 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1256
1257 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1258
1259 cleanup:
1260 dlil_read_end();
1261 if (retval == EJUSTRETURN)
1262 retval = 0;
1263 return retval;
1264 }
1265
1266 int
1267 dlil_ioctl(u_long proto_fam,
1268 struct ifnet *ifp,
1269 u_long ioctl_code,
1270 caddr_t ioctl_arg)
1271 {
1272 struct ifnet_filter *filter;
1273 int retval = EOPNOTSUPP;
1274 int result = 0;
1275 struct if_family_str *if_family;
1276 int holding_read = 0;
1277
1278 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1279 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1280 if (result != 0)
1281 return EOPNOTSUPP;
1282
1283 dlil_read_begin();
1284 holding_read = 1;
1285
1286 /* Run the interface filters first.
1287 * We want to run all filters before calling the protocol,
1288 * interface family, or interface.
1289 */
1290 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1291 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1292 filter->filt_ioctl != NULL) {
1293 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1294 /* Only update retval if no one has handled the ioctl */
1295 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1296 if (result == ENOTSUP)
1297 result = EOPNOTSUPP;
1298 retval = result;
1299 if (retval && retval != EOPNOTSUPP) {
1300 goto cleanup;
1301 }
1302 }
1303 }
1304 }
1305
1306 /* Allow the protocol to handle the ioctl */
1307 if (proto_fam) {
1308 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1309
1310 if (proto != 0) {
1311 result = EOPNOTSUPP;
1312 if (proto->proto_kpi == kProtoKPI_DLIL) {
1313 if (proto->kpi.dlil.dl_ioctl)
1314 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1315 }
1316 else {
1317 if (proto->kpi.v1.ioctl)
1318 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1319 }
1320
1321 /* Only update retval if no one has handled the ioctl */
1322 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1323 if (result == ENOTSUP)
1324 result = EOPNOTSUPP;
1325 retval = result;
1326 if (retval && retval != EOPNOTSUPP) {
1327 goto cleanup;
1328 }
1329 }
1330 }
1331 }
1332
1333 /*
1334 * Since we have incremented the use count on the ifp, we are guaranteed
1335 * that the ifp will not go away (the function pointers may not be changed).
1336 * We release the dlil read lock so the interface ioctl may trigger a
1337 * protocol attach. This happens with vlan and may occur with other virtual
1338 * interfaces.
1339 */
1340 dlil_read_end();
1341 holding_read = 0;
1342
1343 /* retval is either 0 or EOPNOTSUPP */
1344
1345 /*
1346 * Let the family handle this ioctl.
1347 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1348 * If it returns zero, the ioctl was handled, so set retval to zero.
1349 */
1350 if_family = find_family_module(ifp->if_family);
1351 if ((if_family) && (if_family->ifmod_ioctl)) {
1352 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1353
1354 /* Only update retval if no one has handled the ioctl */
1355 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1356 if (result == ENOTSUP)
1357 result = EOPNOTSUPP;
1358 retval = result;
1359 if (retval && retval != EOPNOTSUPP) {
1360 goto cleanup;
1361 }
1362 }
1363 }
1364
1365 /*
1366 * Let the interface handle this ioctl.
1367 * If it returns EOPNOTSUPP, ignore that, we may have
1368 * already handled this in the protocol or family.
1369 */
1370 if (ifp->if_ioctl)
1371 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1372
1373 /* Only update retval if no one has handled the ioctl */
1374 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1375 if (result == ENOTSUP)
1376 result = EOPNOTSUPP;
1377 retval = result;
1378 if (retval && retval != EOPNOTSUPP) {
1379 goto cleanup;
1380 }
1381 }
1382
1383 cleanup:
1384 if (holding_read)
1385 dlil_read_end();
1386 if (ifp_unuse(ifp))
1387 ifp_use_reached_zero(ifp);
1388
1389 if (retval == EJUSTRETURN)
1390 retval = 0;
1391 return retval;
1392 }
1393
1394 __private_extern__ errno_t
1395 dlil_set_bpf_tap(
1396 ifnet_t ifp,
1397 bpf_tap_mode mode,
1398 bpf_packet_func callback)
1399 {
1400 errno_t error = 0;
1401
1402 dlil_read_begin();
1403 if (ifp->if_set_bpf_tap)
1404 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1405 dlil_read_end();
1406
1407 return error;
1408 }
1409
1410 __private_extern__ errno_t
1411 dlil_resolve_multi(
1412 struct ifnet *ifp,
1413 const struct sockaddr *proto_addr,
1414 struct sockaddr *ll_addr,
1415 size_t ll_len)
1416 {
1417 errno_t result = EOPNOTSUPP;
1418 struct if_proto *proto;
1419 const struct sockaddr *verify;
1420
1421 dlil_read_begin();
1422
1423 bzero(ll_addr, ll_len);
1424
1425 /* Call the protocol first */
1426 proto = find_attached_proto(ifp, proto_addr->sa_family);
1427 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1428 proto->kpi.v1.resolve_multi != NULL) {
1429 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1430 (struct sockaddr_dl*)ll_addr, ll_len);
1431 }
1432
1433 /* Let the interface verify the multicast address */
1434 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1435 if (result == 0)
1436 verify = ll_addr;
1437 else
1438 verify = proto_addr;
1439 result = ifp->if_check_multi(ifp, verify);
1440 }
1441
1442 dlil_read_end();
1443
1444 return result;
1445 }
1446
1447 __private_extern__ errno_t
1448 dlil_send_arp_internal(
1449 ifnet_t ifp,
1450 u_short arpop,
1451 const struct sockaddr_dl* sender_hw,
1452 const struct sockaddr* sender_proto,
1453 const struct sockaddr_dl* target_hw,
1454 const struct sockaddr* target_proto)
1455 {
1456 struct if_proto *proto;
1457 errno_t result = 0;
1458
1459 dlil_read_begin();
1460
1461 proto = find_attached_proto(ifp, target_proto->sa_family);
1462 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1463 proto->kpi.v1.send_arp == NULL) {
1464 result = ENOTSUP;
1465 }
1466 else {
1467 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1468 target_hw, target_proto);
1469 }
1470
1471 dlil_read_end();
1472
1473 return result;
1474 }
1475
1476 __private_extern__ errno_t
1477 dlil_send_arp(
1478 ifnet_t ifp,
1479 u_short arpop,
1480 const struct sockaddr_dl* sender_hw,
1481 const struct sockaddr* sender_proto,
1482 const struct sockaddr_dl* target_hw,
1483 const struct sockaddr* target_proto)
1484 {
1485 errno_t result = 0;
1486
1487 if (target_proto == NULL || (sender_proto &&
1488 sender_proto->sa_family != target_proto->sa_family))
1489 return EINVAL;
1490
1491 /*
1492 * If this is an ARP request and the target IP is IPv4LL,
1493 * send the request on all interfaces.
1494 */
1495 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1496 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1497 arpop == ARPOP_REQUEST) {
1498 ifnet_t *ifp_list;
1499 u_int32_t count;
1500 u_int32_t ifp_on;
1501
1502 result = ENOTSUP;
1503
1504 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1505 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1506 errno_t new_result;
1507 ifaddr_t source_hw = NULL;
1508 ifaddr_t source_ip = NULL;
1509 struct sockaddr_in source_ip_copy;
1510
1511 /*
1512 * Only arp on interfaces marked for IPv4LL ARPing. This may
1513 * mean that we don't ARP on the interface the subnet route
1514 * points to.
1515 */
1516 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1517 continue;
1518 }
1519
1520 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1521
1522 /* Find the source IP address */
1523 ifnet_lock_shared(ifp_list[ifp_on]);
1524 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1525 ifa_link) {
1526 if (source_ip->ifa_addr &&
1527 source_ip->ifa_addr->sa_family == AF_INET) {
1528 break;
1529 }
1530 }
1531
1532 /* No IP Source, don't arp */
1533 if (source_ip == NULL) {
1534 ifnet_lock_done(ifp_list[ifp_on]);
1535 continue;
1536 }
1537
1538 /* Copy the source IP address */
1539 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1540
1541 ifnet_lock_done(ifp_list[ifp_on]);
1542
1543 /* Send the ARP */
1544 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1545 (struct sockaddr_dl*)source_hw->ifa_addr,
1546 (struct sockaddr*)&source_ip_copy, NULL,
1547 target_proto);
1548
1549 if (result == ENOTSUP) {
1550 result = new_result;
1551 }
1552 }
1553 }
1554
1555 ifnet_list_free(ifp_list);
1556 }
1557 else {
1558 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1559 target_hw, target_proto);
1560 }
1561
1562 return result;
1563 }
1564
1565 static int
1566 ifp_use(
1567 struct ifnet *ifp,
1568 int handle_zero)
1569 {
1570 int old_value;
1571 int retval = 0;
1572
1573 do {
1574 old_value = ifp->if_usecnt;
1575 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1576 retval = ENXIO; // ifp is invalid
1577 break;
1578 }
1579 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1580
1581 return retval;
1582 }
1583
1584 /* ifp_unuse is broken into two pieces.
1585 *
1586 * ifp_use and ifp_unuse must be called between when the caller calls
1587 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1588 * operations after dlil_write_end has been called. For this reason,
1589 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1590 * returns a non-zero value. The caller must call ifp_use_reached_zero
1591 * after the caller has called dlil_write_end.
1592 */
1593 static void
1594 ifp_use_reached_zero(
1595 struct ifnet *ifp)
1596 {
1597 struct if_family_str *if_family;
1598 ifnet_detached_func free_func;
1599
1600 dlil_read_begin();
1601
1602 if (ifp->if_usecnt != 0)
1603 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1604
1605 /* Let BPF know we're detaching */
1606 bpfdetach(ifp);
1607
1608 ifnet_head_lock_exclusive();
1609 ifnet_lock_exclusive(ifp);
1610
1611 /* Remove ourselves from the list */
1612 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1613 ifnet_addrs[ifp->if_index - 1] = 0;
1614
1615 /* ifp should be removed from the interface list */
1616 while (ifp->if_multiaddrs.lh_first) {
1617 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1618
1619 /*
1620 * When the interface is gone, we will no longer
1621 * be listening on these multicasts. Various bits
1622 * of the stack may be referencing these multicasts,
1623 * release only our reference.
1624 */
1625 LIST_REMOVE(ifma, ifma_link);
1626 ifma->ifma_ifp = NULL;
1627 ifma_release(ifma);
1628 }
1629 ifnet_head_done();
1630
1631 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1632 ifnet_lock_done(ifp);
1633
1634 if_family = find_family_module(ifp->if_family);
1635 if (if_family && if_family->del_if)
1636 if_family->del_if(ifp);
1637 #if 0
1638 if (--if_family->if_usecnt == 0) {
1639 if (if_family->shutdown)
1640 (*if_family->shutdown)();
1641
1642 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1643 FREE(if_family, M_IFADDR);
1644 }
1645 #endif
1646
1647 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1648 free_func = ifp->if_free;
1649 dlil_read_end();
1650
1651 if (free_func)
1652 free_func(ifp);
1653 }
1654
1655 static int
1656 ifp_unuse(
1657 struct ifnet *ifp)
1658 {
1659 int oldval;
1660 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1661 if (oldval == 0)
1662 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1663
1664 if (oldval > 1)
1665 return 0;
1666
1667 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1668 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1669
1670 return 1; /* caller must call ifp_use_reached_zero */
1671 }
1672
1673 void
1674 ifp_reference(
1675 struct ifnet *ifp)
1676 {
1677 int oldval;
1678 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1679 }
1680
1681 void
1682 ifp_release(
1683 struct ifnet *ifp)
1684 {
1685 int oldval;
1686 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1687 if (oldval == 0)
1688 panic("dlil_if_reference - refcount decremented past zero!");
1689 }
1690
1691 extern lck_mtx_t *domain_proto_mtx;
1692
1693 static int
1694 dlil_attach_protocol_internal(
1695 struct if_proto *proto,
1696 const struct ddesc_head_str *demux,
1697 const struct ifnet_demux_desc *demux_list,
1698 u_int32_t demux_count)
1699 {
1700 struct ddesc_head_str temp_head;
1701 struct kev_dl_proto_data ev_pr_data;
1702 struct ifnet *ifp = proto->ifp;
1703 int retval = 0;
1704 u_long hash_value = proto_hash_value(proto->protocol_family);
1705 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1706 void* free_me = NULL;
1707
1708 /* setup some of the common values */
1709
1710 {
1711 lck_mtx_lock(domain_proto_mtx);
1712 struct domain *dp = domains;
1713 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1714 dp = dp->dom_next;
1715 proto->dl_domain = dp;
1716 lck_mtx_unlock(domain_proto_mtx);
1717 }
1718
1719 /*
1720 * Convert the demux descriptors to a type the interface
1721 * will understand. Checking e_flags should be safe, this
1722 * flag won't change.
1723 */
1724 if (if_using_kpi && demux) {
1725 /* Convert the demux linked list to a demux_list */
1726 struct dlil_demux_desc *demux_entry;
1727 struct ifnet_demux_desc *temp_list = NULL;
1728 u_int32_t i = 0;
1729
1730 TAILQ_FOREACH(demux_entry, demux, next) {
1731 i++;
1732 }
1733
1734 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1735 free_me = temp_list;
1736
1737 if (temp_list == NULL)
1738 return ENOMEM;
1739
1740 i = 0;
1741 TAILQ_FOREACH(demux_entry, demux, next) {
1742 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1743 if (demux_entry->type == 1 ||
1744 demux_entry->type == 2 ||
1745 demux_entry->type == 3) {
1746 FREE(free_me, M_TEMP);
1747 return ENOTSUP;
1748 }
1749
1750 temp_list[i].type = demux_entry->type;
1751 temp_list[i].data = demux_entry->native_type;
1752 temp_list[i].datalen = demux_entry->variants.native_type_length;
1753 i++;
1754 }
1755 demux_count = i;
1756 demux_list = temp_list;
1757 }
1758 else if (!if_using_kpi && demux_list != NULL) {
1759 struct dlil_demux_desc *demux_entry;
1760 u_int32_t i = 0;
1761
1762 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1763 free_me = demux_entry;
1764 if (demux_entry == NULL)
1765 return ENOMEM;
1766
1767 TAILQ_INIT(&temp_head);
1768
1769 for (i = 0; i < demux_count; i++) {
1770 demux_entry[i].type = demux_list[i].type;
1771 demux_entry[i].native_type = demux_list[i].data;
1772 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1773 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1774 }
1775 demux = &temp_head;
1776 }
1777
1778 /*
1779 * Take the write lock to protect readers and exclude other writers.
1780 */
1781 dlil_write_begin();
1782
1783 /* Check that the interface isn't currently detaching */
1784 ifnet_lock_shared(ifp);
1785 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1786 ifnet_lock_done(ifp);
1787 dlil_write_end();
1788 if (free_me)
1789 FREE(free_me, M_TEMP);
1790 return ENXIO;
1791 }
1792 ifnet_lock_done(ifp);
1793
1794 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1795 dlil_write_end();
1796 if (free_me)
1797 FREE(free_me, M_TEMP);
1798 return EEXIST;
1799 }
1800
1801 /*
1802 * Call family module add_proto routine so it can refine the
1803 * demux descriptors as it wishes.
1804 */
1805 if (if_using_kpi)
1806 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1807 else {
1808 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1809 _cast_non_const(demux));
1810 }
1811 if (retval) {
1812 dlil_write_end();
1813 if (free_me)
1814 FREE(free_me, M_TEMP);
1815 return retval;
1816 }
1817
1818 /*
1819 * We can't fail from this point on.
1820 * Increment the number of uses (protocol attachments + interface attached).
1821 */
1822 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1823
1824 /*
1825 * Insert the protocol in the hash
1826 */
1827 {
1828 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1829 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1830 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1831 if (prev_proto)
1832 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1833 else
1834 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1835 }
1836
1837 /*
1838 * Add to if_proto list for this interface
1839 */
1840 if_proto_ref(proto);
1841 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1842 ifp->offercnt++;
1843 dlil_write_end();
1844
1845 /* the reserved field carries the number of protocol still attached (subject to change) */
1846 ev_pr_data.proto_family = proto->protocol_family;
1847 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1848 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1849 (struct net_event_data *)&ev_pr_data,
1850 sizeof(struct kev_dl_proto_data));
1851
1852 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1853 ifp->if_name, ifp->if_unit, retval);
1854 if (free_me)
1855 FREE(free_me, M_TEMP);
1856 return retval;
1857 }
1858
1859 __private_extern__ int
1860 dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1861 const struct ifnet_attach_proto_param *proto_details)
1862 {
1863 int retval = 0;
1864 struct if_proto *ifproto = NULL;
1865
1866 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1867 if (ifproto == 0) {
1868 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1869 retval = ENOMEM;
1870 goto end;
1871 }
1872 bzero(ifproto, sizeof(*ifproto));
1873
1874 ifproto->ifp = ifp;
1875 ifproto->protocol_family = protocol;
1876 ifproto->proto_kpi = kProtoKPI_v1;
1877 ifproto->kpi.v1.input = proto_details->input;
1878 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1879 ifproto->kpi.v1.event = proto_details->event;
1880 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1881 ifproto->kpi.v1.detached = proto_details->detached;
1882 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1883 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1884
1885 retval = dlil_attach_protocol_internal(ifproto, NULL,
1886 proto_details->demux_list, proto_details->demux_count);
1887
1888 end:
1889 if (retval && ifproto)
1890 FREE(ifproto, M_IFADDR);
1891 return retval;
1892 }
1893
1894 int
1895 dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1896 {
1897 struct ifnet *ifp = NULL;
1898 struct if_proto *ifproto = NULL;
1899 int retval = 0;
1900
1901 /*
1902 * Do everything we can before taking the write lock
1903 */
1904
1905 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1906 return EINVAL;
1907
1908 /*
1909 * Allocate and init a new if_proto structure
1910 */
1911 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1912 if (!ifproto) {
1913 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1914 retval = ENOMEM;
1915 goto end;
1916 }
1917
1918
1919 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1920 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1921 if (!ifp) {
1922 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1923 proto->interface_family, proto->unit_number);
1924 retval = ENXIO;
1925 goto end;
1926 }
1927
1928 bzero(ifproto, sizeof(struct if_proto));
1929
1930 ifproto->ifp = ifp;
1931 ifproto->protocol_family = proto->protocol_family;
1932 ifproto->proto_kpi = kProtoKPI_DLIL;
1933 ifproto->kpi.dlil.dl_input = proto->input;
1934 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1935 ifproto->kpi.dlil.dl_event = proto->event;
1936 ifproto->kpi.dlil.dl_offer = proto->offer;
1937 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1938 ifproto->kpi.dlil.dl_detached = proto->detached;
1939
1940 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1941
1942 end:
1943 if (retval && ifproto)
1944 FREE(ifproto, M_IFADDR);
1945 return retval;
1946 }
1947
1948 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1949
1950 static int
1951 dlil_detach_protocol_internal(
1952 struct if_proto *proto)
1953 {
1954 struct ifnet *ifp = proto->ifp;
1955 u_long proto_family = proto->protocol_family;
1956 struct kev_dl_proto_data ev_pr_data;
1957
1958 if (proto->proto_kpi == kProtoKPI_DLIL) {
1959 if (proto->kpi.dlil.dl_detached)
1960 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1961 }
1962 else {
1963 if (proto->kpi.v1.detached)
1964 proto->kpi.v1.detached(ifp, proto->protocol_family);
1965 }
1966 if_proto_free(proto);
1967
1968 /*
1969 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1970 */
1971
1972 if_rtproto_del(ifp, proto_family);
1973
1974 /* the reserved field carries the number of protocol still attached (subject to change) */
1975 ev_pr_data.proto_family = proto_family;
1976 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1977 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1978 (struct net_event_data *)&ev_pr_data,
1979 sizeof(struct kev_dl_proto_data));
1980 return 0;
1981 }
1982
1983 int
1984 dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1985 {
1986 struct if_proto *proto = NULL;
1987 int retval = 0;
1988 int use_reached_zero = 0;
1989
1990
1991 if ((retval = dlil_write_begin()) != 0) {
1992 if (retval == EDEADLK) {
1993 retval = 0;
1994 dlil_read_begin();
1995 proto = find_attached_proto(ifp, proto_family);
1996 if (proto == 0) {
1997 retval = ENXIO;
1998 }
1999 else {
2000 proto->detaching = 1;
2001 dlil_detach_waiting = 1;
2002 wakeup(&dlil_detach_waiting);
2003 }
2004 dlil_read_end();
2005 }
2006 goto end;
2007 }
2008
2009 proto = find_attached_proto(ifp, proto_family);
2010
2011 if (proto == NULL) {
2012 retval = ENXIO;
2013 dlil_write_end();
2014 goto end;
2015 }
2016
2017 /*
2018 * Call family module del_proto
2019 */
2020
2021 if (ifp->if_del_proto)
2022 ifp->if_del_proto(ifp, proto->protocol_family);
2023
2024 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2025 ifp->offercnt--;
2026
2027 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2028
2029 /*
2030 * We can do the rest of the work outside of the write lock.
2031 */
2032 use_reached_zero = ifp_unuse(ifp);
2033 dlil_write_end();
2034
2035 dlil_detach_protocol_internal(proto);
2036
2037 /*
2038 * Only handle the case where the interface will go away after
2039 * we've sent the message. This way post message can send the
2040 * message to the interface safely.
2041 */
2042
2043 if (use_reached_zero)
2044 ifp_use_reached_zero(ifp);
2045
2046 end:
2047 return retval;
2048 }
2049
2050 /*
2051 * dlil_delayed_detach_thread is responsible for detaching
2052 * protocols, protocol filters, and interface filters after
2053 * an attempt was made to detach one of those items while
2054 * it was not safe to do so (i.e. called dlil_read_begin).
2055 *
2056 * This function will take the dlil write lock and walk
2057 * through each of the interfaces looking for items with
2058 * the detaching flag set. When an item is found, it is
2059 * detached from the interface and placed on a local list.
2060 * After all of the items have been collected, we drop the
2061 * write lock and performed the post detach. This is done
2062 * so we only have to take the write lock once.
2063 *
2064 * When detaching a protocol filter, if we find that we
2065 * have detached the very last protocol and we need to call
2066 * ifp_use_reached_zero, we have to break out of our work
2067 * to drop the write lock so we can call ifp_use_reached_zero.
2068 */
2069
2070 static void
2071 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2072 {
2073 thread_t self = current_thread();
2074 int asserted = 0;
2075
2076 ml_thread_policy(self, MACHINE_GROUP,
2077 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2078
2079
2080 while (1) {
2081 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2082 struct ifnet *ifp;
2083 struct proto_hash_entry detached_protos;
2084 struct ifnet_filter_head detached_filters;
2085 struct if_proto *proto;
2086 struct if_proto *next_proto;
2087 struct ifnet_filter *filt;
2088 struct ifnet_filter *next_filt;
2089 int reached_zero;
2090
2091 reached_zero = 0;
2092
2093 /* Clear the detach waiting flag */
2094 dlil_detach_waiting = 0;
2095 TAILQ_INIT(&detached_filters);
2096 SLIST_INIT(&detached_protos);
2097
2098 ifnet_head_lock_shared();
2099 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2100 int i;
2101
2102 // Look for protocols and protocol filters
2103 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2104 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2105 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2106
2107 // Detach this protocol
2108 if (proto->detaching) {
2109 if (ifp->if_del_proto)
2110 ifp->if_del_proto(ifp, proto->protocol_family);
2111 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2112 ifp->offercnt--;
2113 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2114 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2115 reached_zero = ifp_unuse(ifp);
2116 if (reached_zero) {
2117 break;
2118 }
2119 }
2120 else {
2121 // Update prev_nextptr to point to our next ptr
2122 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2123 }
2124 }
2125 }
2126
2127 // look for interface filters that need to be detached
2128 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2129 next_filt = TAILQ_NEXT(filt, filt_next);
2130 if (filt->filt_detaching != 0) {
2131 // take this interface filter off the interface filter list
2132 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2133
2134 // put this interface filter on the detached filters list
2135 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2136 }
2137 }
2138
2139 if (ifp->if_delayed_detach) {
2140 ifp->if_delayed_detach = 0;
2141 reached_zero = ifp_unuse(ifp);
2142 }
2143
2144 if (reached_zero)
2145 break;
2146 }
2147 ifnet_head_done();
2148 dlil_write_end();
2149
2150 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2151 next_filt = TAILQ_NEXT(filt, filt_next);
2152 /*
2153 * dlil_detach_filter_internal won't remove an item from
2154 * the list if it is already detached (second parameter).
2155 * The item will be freed though.
2156 */
2157 dlil_detach_filter_internal(filt, 1);
2158 }
2159
2160 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2161 next_proto = SLIST_NEXT(proto, next_hash);
2162 dlil_detach_protocol_internal(proto);
2163 }
2164
2165 if (reached_zero) {
2166 ifp_use_reached_zero(ifp);
2167 dlil_detach_waiting = 1; // we may have missed something
2168 }
2169 }
2170
2171 if (!asserted && dlil_detach_waiting == 0) {
2172 asserted = 1;
2173 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2174 }
2175
2176 if (dlil_detach_waiting == 0) {
2177 asserted = 0;
2178 thread_block(dlil_delayed_detach_thread);
2179 }
2180 }
2181 }
2182
2183 static void
2184 dlil_call_delayed_detach_thread(void) {
2185 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2186 }
2187
2188 extern int if_next_index(void);
2189
2190 __private_extern__ int
2191 dlil_if_attach_with_address(
2192 struct ifnet *ifp,
2193 const struct sockaddr_dl *ll_addr)
2194 {
2195 u_long interface_family = ifp->if_family;
2196 struct if_family_str *if_family = NULL;
2197 int stat;
2198 struct ifnet *tmp_if;
2199 struct proto_hash_entry *new_proto_list = NULL;
2200 int locked = 0;
2201
2202
2203 ifnet_head_lock_shared();
2204
2205 /* Verify we aren't already on the list */
2206 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2207 if (tmp_if == ifp) {
2208 ifnet_head_done();
2209 return EEXIST;
2210 }
2211 }
2212
2213 ifnet_head_done();
2214
2215 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2216 #if IFNET_RW_LOCK
2217 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2218 #else
2219 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2220 #endif
2221
2222 if (ifp->if_lock == 0) {
2223 return ENOMEM;
2224 }
2225
2226 // Only use family if this is not a KPI interface
2227 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2228 if_family = find_family_module(interface_family);
2229 }
2230
2231 /*
2232 * Allow interfaces withouth protocol families to attach
2233 * only if they have the necessary fields filled out.
2234 */
2235
2236 if ((if_family == 0) &&
2237 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2238 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2239 interface_family);
2240 return ENODEV;
2241 }
2242
2243 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2244 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2245 M_NKE, M_WAITOK);
2246
2247 if (new_proto_list == 0) {
2248 return ENOBUFS;
2249 }
2250 }
2251
2252 dlil_write_begin();
2253 locked = 1;
2254
2255 /*
2256 * Call the family module to fill in the appropriate fields in the
2257 * ifnet structure.
2258 */
2259
2260 if (if_family) {
2261 stat = if_family->add_if(ifp);
2262 if (stat) {
2263 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2264 dlil_write_end();
2265 return stat;
2266 }
2267 ifp->if_add_proto_u.original = if_family->add_proto;
2268 ifp->if_del_proto = if_family->del_proto;
2269 if_family->refcnt++;
2270 }
2271
2272 ifp->offercnt = 0;
2273 TAILQ_INIT(&ifp->if_flt_head);
2274
2275
2276 if (new_proto_list) {
2277 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2278 ifp->if_proto_hash = new_proto_list;
2279 new_proto_list = 0;
2280 }
2281
2282 /* old_if_attach */
2283 {
2284 struct ifaddr *ifa = 0;
2285
2286 if (ifp->if_snd.ifq_maxlen == 0)
2287 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2288 TAILQ_INIT(&ifp->if_prefixhead);
2289 LIST_INIT(&ifp->if_multiaddrs);
2290 ifnet_touch_lastchange(ifp);
2291
2292 /* usecount to track attachment to the ifnet list */
2293 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2294
2295 /* Lock the list of interfaces */
2296 ifnet_head_lock_exclusive();
2297 ifnet_lock_exclusive(ifp);
2298
2299 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2300 char workbuf[64];
2301 int namelen, masklen, socksize, ifasize;
2302
2303 ifp->if_index = if_next_index();
2304
2305 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2306 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2307 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2308 socksize = masklen + ifp->if_addrlen;
2309 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2310 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2311 socksize = sizeof(struct sockaddr_dl);
2312 socksize = ROUNDUP(socksize);
2313 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2314 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2315 if (ifa) {
2316 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2317 ifnet_addrs[ifp->if_index - 1] = ifa;
2318 bzero(ifa, ifasize);
2319 sdl->sdl_len = socksize;
2320 sdl->sdl_family = AF_LINK;
2321 bcopy(workbuf, sdl->sdl_data, namelen);
2322 sdl->sdl_nlen = namelen;
2323 sdl->sdl_index = ifp->if_index;
2324 sdl->sdl_type = ifp->if_type;
2325 if (ll_addr) {
2326 sdl->sdl_alen = ll_addr->sdl_alen;
2327 if (ll_addr->sdl_alen != ifp->if_addrlen)
2328 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2329 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2330 }
2331 ifa->ifa_ifp = ifp;
2332 ifa->ifa_rtrequest = link_rtrequest;
2333 ifa->ifa_addr = (struct sockaddr*)sdl;
2334 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2335 ifa->ifa_netmask = (struct sockaddr*)sdl;
2336 sdl->sdl_len = masklen;
2337 while (namelen != 0)
2338 sdl->sdl_data[--namelen] = 0xff;
2339 }
2340 }
2341 else {
2342 /* preserve the first ifaddr */
2343 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2344 }
2345
2346
2347 TAILQ_INIT(&ifp->if_addrhead);
2348 ifa = ifnet_addrs[ifp->if_index - 1];
2349
2350 if (ifa) {
2351 /*
2352 * We don't use if_attach_ifa because we want
2353 * this address to be first on the list.
2354 */
2355 ifaref(ifa);
2356 ifa->ifa_debug |= IFA_ATTACHED;
2357 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2358 }
2359
2360 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2361 ifindex2ifnet[ifp->if_index] = ifp;
2362
2363 ifnet_head_done();
2364 }
2365 dlil_write_end();
2366
2367 if (if_family && if_family->init_if) {
2368 stat = if_family->init_if(ifp);
2369 if (stat) {
2370 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2371 }
2372 }
2373
2374 ifnet_lock_done(ifp);
2375
2376 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2377
2378 return 0;
2379 }
2380
2381 int
2382 dlil_if_attach(struct ifnet *ifp)
2383 {
2384 dlil_if_attach_with_address(ifp, NULL);
2385 }
2386
2387
2388 int
2389 dlil_if_detach(struct ifnet *ifp)
2390 {
2391 struct ifnet_filter *filter;
2392 struct ifnet_filter *filter_next;
2393 int zeroed = 0;
2394 int retval = 0;
2395 struct ifnet_filter_head fhead;
2396
2397
2398 ifnet_lock_exclusive(ifp);
2399
2400 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2401 /* Interface has already been detached */
2402 ifnet_lock_done(ifp);
2403 return ENXIO;
2404 }
2405
2406 /*
2407 * Indicate this interface is being detached.
2408 *
2409 * This should prevent protocols from attaching
2410 * from this point on. Interface will remain on
2411 * the list until all of the protocols are detached.
2412 */
2413 ifp->if_eflags |= IFEF_DETACHING;
2414 ifnet_lock_done(ifp);
2415
2416 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
2417
2418 if ((retval = dlil_write_begin()) != 0) {
2419 if (retval == EDEADLK) {
2420 retval = DLIL_WAIT_FOR_FREE;
2421
2422 /* We need to perform a delayed detach */
2423 ifp->if_delayed_detach = 1;
2424 dlil_detach_waiting = 1;
2425 wakeup(&dlil_detach_waiting);
2426 }
2427 return retval;
2428 }
2429
2430 /* Steal the list of interface filters */
2431 fhead = ifp->if_flt_head;
2432 TAILQ_INIT(&ifp->if_flt_head);
2433
2434 /* unuse the interface */
2435 zeroed = ifp_unuse(ifp);
2436
2437 dlil_write_end();
2438
2439 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2440 filter_next = TAILQ_NEXT(filter, filt_next);
2441 dlil_detach_filter_internal(filter, 1);
2442 }
2443
2444 if (zeroed == 0) {
2445 retval = DLIL_WAIT_FOR_FREE;
2446 }
2447 else
2448 {
2449 ifp_use_reached_zero(ifp);
2450 }
2451
2452 return retval;
2453 }
2454
2455
2456 int
2457 dlil_reg_if_modules(u_long interface_family,
2458 struct dlil_ifmod_reg_str *ifmod)
2459 {
2460 struct if_family_str *if_family;
2461
2462
2463 if (find_family_module(interface_family)) {
2464 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
2465 interface_family);
2466 return EEXIST;
2467 }
2468
2469 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2470 (!ifmod->add_proto) || (!ifmod->del_proto)) {
2471 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
2472 return EINVAL;
2473 }
2474
2475 /*
2476 * The following is a gross hack to keep from breaking
2477 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2478 * does not zero the reserved fields in dlil_ifmod_reg_str.
2479 * As a result, we have to zero any function that used to
2480 * be reserved fields at the time Vicomsoft built their
2481 * kext. Radar #2974305
2482 */
2483 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2484 if (interface_family == 123) { /* Vicom */
2485 ifmod->init_if = 0;
2486 } else {
2487 return EINVAL;
2488 }
2489 }
2490
2491 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2492 if (!if_family) {
2493 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
2494 return ENOMEM;
2495 }
2496
2497 bzero(if_family, sizeof(struct if_family_str));
2498
2499 if_family->if_family = interface_family & 0xffff;
2500 if_family->shutdown = ifmod->shutdown;
2501 if_family->add_if = ifmod->add_if;
2502 if_family->del_if = ifmod->del_if;
2503 if_family->init_if = ifmod->init_if;
2504 if_family->add_proto = ifmod->add_proto;
2505 if_family->del_proto = ifmod->del_proto;
2506 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
2507 if_family->refcnt = 1;
2508 if_family->flags = 0;
2509
2510 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
2511 return 0;
2512 }
2513
2514 int dlil_dereg_if_modules(u_long interface_family)
2515 {
2516 struct if_family_str *if_family;
2517 int ret = 0;
2518
2519
2520 if_family = find_family_module(interface_family);
2521 if (if_family == 0) {
2522 return ENXIO;
2523 }
2524
2525 if (--if_family->refcnt == 0) {
2526 if (if_family->shutdown)
2527 (*if_family->shutdown)();
2528
2529 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2530 FREE(if_family, M_IFADDR);
2531 }
2532 else {
2533 if_family->flags |= DLIL_SHUTDOWN;
2534 ret = DLIL_WAIT_FOR_FREE;
2535 }
2536
2537 return ret;
2538 }
2539
2540
2541
2542 int
2543 dlil_reg_proto_module(
2544 u_long protocol_family,
2545 u_long interface_family,
2546 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2547 int (*detach)(struct ifnet *ifp, u_long protocol_family))
2548 {
2549 struct proto_family_str *proto_family;
2550
2551 if (attach == NULL) return EINVAL;
2552
2553 lck_mtx_lock(proto_family_mutex);
2554
2555 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2556 if (proto_family->proto_family == protocol_family &&
2557 proto_family->if_family == interface_family) {
2558 lck_mtx_unlock(proto_family_mutex);
2559 return EEXIST;
2560 }
2561 }
2562
2563 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2564 if (!proto_family) {
2565 lck_mtx_unlock(proto_family_mutex);
2566 return ENOMEM;
2567 }
2568
2569 bzero(proto_family, sizeof(struct proto_family_str));
2570 proto_family->proto_family = protocol_family;
2571 proto_family->if_family = interface_family & 0xffff;
2572 proto_family->attach_proto = attach;
2573 proto_family->detach_proto = detach;
2574
2575 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
2576 lck_mtx_unlock(proto_family_mutex);
2577 return 0;
2578 }
2579
2580 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2581 {
2582 struct proto_family_str *proto_family;
2583 int ret = 0;
2584
2585 lck_mtx_lock(proto_family_mutex);
2586
2587 proto_family = find_proto_module(protocol_family, interface_family);
2588 if (proto_family == 0) {
2589 lck_mtx_unlock(proto_family_mutex);
2590 return ENXIO;
2591 }
2592
2593 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2594 FREE(proto_family, M_IFADDR);
2595
2596 lck_mtx_unlock(proto_family_mutex);
2597 return ret;
2598 }
2599
2600 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
2601 {
2602 struct proto_family_str *proto_family;
2603 int ret = 0;
2604
2605 lck_mtx_lock(proto_family_mutex);
2606 proto_family = find_proto_module(protocol_family, ifp->if_family);
2607 if (proto_family == 0) {
2608 lck_mtx_unlock(proto_family_mutex);
2609 return ENXIO;
2610 }
2611
2612 ret = proto_family->attach_proto(ifp, protocol_family);
2613
2614 lck_mtx_unlock(proto_family_mutex);
2615 return ret;
2616 }
2617
2618
2619 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2620 {
2621 struct proto_family_str *proto_family;
2622 int ret = 0;
2623
2624 lck_mtx_lock(proto_family_mutex);
2625
2626 proto_family = find_proto_module(protocol_family, ifp->if_family);
2627 if (proto_family && proto_family->detach_proto)
2628 ret = proto_family->detach_proto(ifp, protocol_family);
2629 else
2630 ret = dlil_detach_protocol(ifp, protocol_family);
2631
2632 lck_mtx_unlock(proto_family_mutex);
2633 return ret;
2634 }
2635
2636 static errno_t
2637 dlil_recycle_ioctl(
2638 __unused ifnet_t ifnet_ptr,
2639 __unused u_int32_t ioctl_code,
2640 __unused void *ioctl_arg)
2641 {
2642 return EOPNOTSUPP;
2643 }
2644
2645 static int
2646 dlil_recycle_output(
2647 __unused struct ifnet *ifnet_ptr,
2648 struct mbuf *m)
2649 {
2650 m_freem(m);
2651 return 0;
2652 }
2653
2654 static void
2655 dlil_recycle_free(
2656 __unused ifnet_t ifnet_ptr)
2657 {
2658 }
2659
2660 static errno_t
2661 dlil_recycle_set_bpf_tap(
2662 __unused ifnet_t ifp,
2663 __unused bpf_tap_mode mode,
2664 __unused bpf_packet_func callback)
2665 {
2666 /* XXX not sure what to do here */
2667 return 0;
2668 }
2669
2670 int dlil_if_acquire(
2671 u_long family,
2672 const void *uniqueid,
2673 size_t uniqueid_len,
2674 struct ifnet **ifp)
2675 {
2676 struct ifnet *ifp1 = NULL;
2677 struct dlil_ifnet *dlifp1 = NULL;
2678 int ret = 0;
2679
2680 lck_mtx_lock(dlil_ifnet_mutex);
2681 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2682
2683 ifp1 = (struct ifnet *)dlifp1;
2684
2685 if (ifp1->if_family == family) {
2686
2687 /* same uniqueid and same len or no unique id specified */
2688 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2689 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2690
2691 /* check for matching interface in use */
2692 if (ifp1->if_eflags & IFEF_INUSE) {
2693 if (uniqueid_len) {
2694 ret = EBUSY;
2695 goto end;
2696 }
2697 }
2698 else {
2699 if (!ifp1->if_lock)
2700 panic("ifp's lock is gone\n");
2701 ifnet_lock_exclusive(ifp1);
2702 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2703 ifnet_lock_done(ifp1);
2704 *ifp = ifp1;
2705 goto end;
2706 }
2707 }
2708 }
2709 }
2710
2711 /* no interface found, allocate a new one */
2712 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2713 if (dlifp1 == 0) {
2714 ret = ENOMEM;
2715 goto end;
2716 }
2717
2718 bzero(dlifp1, sizeof(*dlifp1));
2719
2720 if (uniqueid_len) {
2721 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2722 if (dlifp1->if_uniqueid == 0) {
2723 FREE(dlifp1, M_NKE);
2724 ret = ENOMEM;
2725 goto end;
2726 }
2727 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2728 dlifp1->if_uniqueid_len = uniqueid_len;
2729 }
2730
2731 ifp1 = (struct ifnet *)dlifp1;
2732 ifp1->if_eflags |= IFEF_INUSE;
2733 ifp1->if_name = dlifp1->if_namestorage;
2734
2735 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2736
2737 *ifp = ifp1;
2738
2739 end:
2740 lck_mtx_unlock(dlil_ifnet_mutex);
2741
2742 return ret;
2743 }
2744
2745 void dlil_if_release(struct ifnet *ifp)
2746 {
2747 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2748
2749
2750 /* Interface does not have a lock until it is attached - radar 3713951 */
2751 if (ifp->if_lock)
2752 ifnet_lock_exclusive(ifp);
2753 ifp->if_eflags &= ~IFEF_INUSE;
2754 ifp->if_ioctl = dlil_recycle_ioctl;
2755 ifp->if_output = dlil_recycle_output;
2756 ifp->if_free = dlil_recycle_free;
2757 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2758
2759 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2760 ifp->if_name = dlifp->if_namestorage;
2761 if (ifp->if_lock)
2762 ifnet_lock_done(ifp);
2763
2764 }