]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
1118b61426b8401be58eeddcac4a23fd27a27983
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc.
24 *
25 * Data Link Inteface Layer
26 * Author: Ted Walker
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/mbuf.h>
34 #include <sys/socket.h>
35 #include <sys/domain.h>
36 #include <sys/user.h>
37 #include <net/if_dl.h>
38 #include <net/if.h>
39 #include <net/route.h>
40 #include <net/if_var.h>
41 #include <net/dlil.h>
42 #include <net/if_arp.h>
43 #include <sys/kern_event.h>
44 #include <sys/kdebug.h>
45
46 #include <kern/assert.h>
47 #include <kern/task.h>
48 #include <kern/thread.h>
49 #include <kern/sched_prim.h>
50 #include <kern/locks.h>
51
52 #include <net/if_types.h>
53 #include <net/kpi_interfacefilter.h>
54
55 #include <libkern/OSAtomic.h>
56
57 #include <machine/machine_routines.h>
58
59 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
60 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
61 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
62 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
63 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
64
65
66 #define MAX_DL_TAGS 16
67 #define MAX_DLIL_FILTERS 16
68 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
69 #define MAX_LINKADDR 4 /* LONGWORDS */
70 #define M_NKE M_IFADDR
71
72 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
73 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
74
75 #if 0
76 #define DLIL_PRINTF printf
77 #else
78 #define DLIL_PRINTF kprintf
79 #endif
80
81 enum {
82 kProtoKPI_DLIL = 0,
83 kProtoKPI_v1 = 1
84 };
85
86 struct if_proto {
87 SLIST_ENTRY(if_proto) next_hash;
88 int refcount;
89 int detaching;
90 struct ifnet *ifp;
91 struct domain *dl_domain;
92 protocol_family_t protocol_family;
93 int proto_kpi;
94 union {
95 struct {
96 dl_input_func dl_input;
97 dl_pre_output_func dl_pre_output;
98 dl_event_func dl_event;
99 dl_offer_func dl_offer;
100 dl_ioctl_func dl_ioctl;
101 dl_detached_func dl_detached;
102 } dlil;
103 struct {
104 proto_media_input input;
105 proto_media_preout pre_output;
106 proto_media_event event;
107 proto_media_ioctl ioctl;
108 proto_media_detached detached;
109 proto_media_resolve_multi resolve_multi;
110 proto_media_send_arp send_arp;
111 } v1;
112 } kpi;
113 };
114
115 SLIST_HEAD(proto_hash_entry, if_proto);
116
117
118 struct dlil_ifnet {
119 /* ifnet and drvr_ext are used by the stack and drivers
120 drvr_ext extends the public ifnet and must follow dl_if */
121 struct ifnet dl_if; /* public ifnet */
122
123 /* dlil private fields */
124 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
125 /* it is not the ifnet list */
126 void *if_uniqueid; /* unique id identifying the interface */
127 size_t if_uniqueid_len;/* length of the unique id */
128 char if_namestorage[IFNAMSIZ]; /* interface name storage */
129 };
130
131 struct ifnet_filter {
132 TAILQ_ENTRY(ifnet_filter) filt_next;
133 ifnet_t filt_ifp;
134 int filt_detaching;
135
136 const char *filt_name;
137 void *filt_cookie;
138 protocol_family_t filt_protocol;
139 iff_input_func filt_input;
140 iff_output_func filt_output;
141 iff_event_func filt_event;
142 iff_ioctl_func filt_ioctl;
143 iff_detached_func filt_detached;
144 };
145
146 struct if_family_str {
147 TAILQ_ENTRY(if_family_str) if_fam_next;
148 u_long if_family;
149 int refcnt;
150 int flags;
151
152 #define DLIL_SHUTDOWN 1
153
154 int (*add_if)(struct ifnet *ifp);
155 int (*del_if)(struct ifnet *ifp);
156 int (*init_if)(struct ifnet *ifp);
157 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
158 ifnet_del_proto_func del_proto;
159 ifnet_ioctl_func ifmod_ioctl;
160 int (*shutdown)(void);
161 };
162
163 struct proto_family_str {
164 TAILQ_ENTRY(proto_family_str) proto_fam_next;
165 u_long proto_family;
166 u_long if_family;
167 int usecnt;
168
169 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
170 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
171 };
172
173 enum {
174 kIfNetUseCount_MayBeZero = 0,
175 kIfNetUseCount_MustNotBeZero = 1
176 };
177
178 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
179 static TAILQ_HEAD(, if_family_str) if_family_head;
180 static TAILQ_HEAD(, proto_family_str) proto_family_head;
181 static lck_grp_t *dlil_lock_group;
182 static lck_grp_t *ifnet_lock_group;
183 static lck_grp_t *ifnet_head_lock_group;
184 static lck_attr_t *ifnet_lock_attr;
185 static lck_mtx_t *proto_family_mutex;
186 static lck_rw_t *ifnet_head_mutex;
187 static lck_mtx_t *dlil_ifnet_mutex;
188 static lck_mtx_t *dlil_mutex;
189 static unsigned long dlil_read_count = 0;
190 static unsigned long dlil_detach_waiting = 0;
191 extern u_int32_t ipv4_ll_arp_aware;
192
193 int dlil_initialized = 0;
194 lck_spin_t *dlil_input_lock;
195 __private_extern__ thread_t dlil_input_thread_ptr = 0;
196 int dlil_input_thread_wakeup = 0;
197 __private_extern__ int dlil_output_thread_wakeup = 0;
198 static struct mbuf *dlil_input_mbuf_head = NULL;
199 static struct mbuf *dlil_input_mbuf_tail = NULL;
200 #if NLOOP > 1
201 #error dlil_input() needs to be revised to support more than on loopback interface
202 #endif
203 static struct mbuf *dlil_input_loop_head = NULL;
204 static struct mbuf *dlil_input_loop_tail = NULL;
205
206 static void dlil_input_thread(void);
207 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
208 struct ifnet *ifbyfamily(u_long family, short unit);
209 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
210 static void dlil_call_delayed_detach_thread(void);
211
212 static void dlil_read_begin(void);
213 static void dlil_read_end(void);
214 static int dlil_write_begin(void);
215 static void dlil_write_end(void);
216
217 static int ifp_use(struct ifnet *ifp, int handle_zero);
218 static int ifp_unuse(struct ifnet *ifp);
219 static void ifp_use_reached_zero(struct ifnet *ifp);
220
221 extern void bpfdetach(struct ifnet*);
222 extern void proto_input_run(void); // new run_netisr
223
224
225 int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
226
227 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
228
229 int dlil_expand_mcl;
230
231 extern u_int32_t inject_buckets;
232
233 static const u_int32_t dlil_writer_waiting = 0x80000000;
234
235 static __inline__ void*
236 _cast_non_const(const void * ptr) {
237 union {
238 const void* cval;
239 void* val;
240 } ret;
241
242 ret.cval = ptr;
243 return (ret.val);
244 }
245
246 /* Should these be inline? */
247 static void
248 dlil_read_begin(void)
249 {
250 unsigned long new_value;
251 unsigned long old_value;
252 struct uthread *uth = get_bsdthread_info(current_thread());
253
254 if (uth->dlil_incremented_read == dlil_writer_waiting)
255 panic("dlil_read_begin - thread is already a writer");
256
257 do {
258 again:
259 old_value = dlil_read_count;
260
261 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
262 {
263 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
264 goto again;
265 }
266
267 new_value = old_value + 1;
268 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
269
270 uth->dlil_incremented_read++;
271 }
272
273 static void
274 dlil_read_end(void)
275 {
276 struct uthread *uth = get_bsdthread_info(current_thread());
277
278 OSDecrementAtomic((UInt32*)&dlil_read_count);
279 uth->dlil_incremented_read--;
280 if (dlil_read_count == dlil_writer_waiting)
281 wakeup(_cast_non_const(&dlil_writer_waiting));
282 }
283
284 static int
285 dlil_write_begin(void)
286 {
287 struct uthread *uth = get_bsdthread_info(current_thread());
288
289 if (uth->dlil_incremented_read != 0) {
290 return EDEADLK;
291 }
292 lck_mtx_lock(dlil_mutex);
293 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
294 again:
295 if (dlil_read_count == dlil_writer_waiting) {
296 uth->dlil_incremented_read = dlil_writer_waiting;
297 return 0;
298 }
299 else {
300 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
301 goto again;
302 }
303 }
304
305 static void
306 dlil_write_end(void)
307 {
308 struct uthread *uth = get_bsdthread_info(current_thread());
309
310 if (uth->dlil_incremented_read != dlil_writer_waiting)
311 panic("dlil_write_end - thread is not a writer");
312 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
313 lck_mtx_unlock(dlil_mutex);
314 uth->dlil_incremented_read = 0;
315 wakeup(&dlil_read_count);
316 }
317
318 #define PROTO_HASH_SLOTS 0x5
319
320 /*
321 * Internal functions.
322 */
323
324 static int
325 proto_hash_value(u_long protocol_family)
326 {
327 switch(protocol_family) {
328 case PF_INET:
329 return 0;
330 case PF_INET6:
331 return 1;
332 case PF_APPLETALK:
333 return 2;
334 case PF_VLAN:
335 return 3;
336 default:
337 return 4;
338 }
339 }
340
341 static
342 struct if_family_str *find_family_module(u_long if_family)
343 {
344 struct if_family_str *mod = NULL;
345
346 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
347 if (mod->if_family == (if_family & 0xffff))
348 break;
349 }
350
351 return mod;
352 }
353
354 static
355 struct proto_family_str*
356 find_proto_module(u_long proto_family, u_long if_family)
357 {
358 struct proto_family_str *mod = NULL;
359
360 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
361 if ((mod->proto_family == (proto_family & 0xffff))
362 && (mod->if_family == (if_family & 0xffff)))
363 break;
364 }
365
366 return mod;
367 }
368
369 static struct if_proto*
370 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
371 {
372 struct if_proto *proto = NULL;
373 u_long i = proto_hash_value(protocol_family);
374 if (ifp->if_proto_hash) {
375 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
376 }
377
378 while(proto && proto->protocol_family != protocol_family) {
379 proto = SLIST_NEXT(proto, next_hash);
380 }
381
382 return proto;
383 }
384
385 static void
386 if_proto_ref(struct if_proto *proto)
387 {
388 OSAddAtomic(1, (UInt32*)&proto->refcount);
389 }
390
391 static void
392 if_proto_free(struct if_proto *proto)
393 {
394 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
395
396 if (oldval == 1) { /* This was the last reference */
397 FREE(proto, M_IFADDR);
398 }
399 }
400
401 __private_extern__ void
402 ifnet_lock_assert(
403 __unused struct ifnet *ifp,
404 __unused int what)
405 {
406 #if IFNET_RW_LOCK
407 /*
408 * Not implemented for rw locks.
409 *
410 * Function exists so when/if we use mutex we can
411 * enable this check.
412 */
413 #else
414 lck_mtx_assert(ifp->if_lock, what);
415 #endif
416 }
417
418 __private_extern__ void
419 ifnet_lock_shared(
420 struct ifnet *ifp)
421 {
422 #if IFNET_RW_LOCK
423 lck_rw_lock_shared(ifp->if_lock);
424 #else
425 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
426 lck_mtx_lock(ifp->if_lock);
427 #endif
428 }
429
430 __private_extern__ void
431 ifnet_lock_exclusive(
432 struct ifnet *ifp)
433 {
434 #if IFNET_RW_LOCK
435 lck_rw_lock_exclusive(ifp->if_lock);
436 #else
437 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
438 lck_mtx_lock(ifp->if_lock);
439 #endif
440 }
441
442 __private_extern__ void
443 ifnet_lock_done(
444 struct ifnet *ifp)
445 {
446 #if IFNET_RW_LOCK
447 lck_rw_done(ifp->if_lock);
448 #else
449 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
450 lck_mtx_unlock(ifp->if_lock);
451 #endif
452 }
453
454 __private_extern__ void
455 ifnet_head_lock_shared()
456 {
457 lck_rw_lock_shared(ifnet_head_mutex);
458 }
459
460 __private_extern__ void
461 ifnet_head_lock_exclusive()
462 {
463 lck_rw_lock_exclusive(ifnet_head_mutex);
464 }
465
466 __private_extern__ void
467 ifnet_head_done()
468 {
469 lck_rw_done(ifnet_head_mutex);
470 }
471
472 /*
473 * Public functions.
474 */
475 struct ifnet *ifbyfamily(u_long family, short unit)
476 {
477 struct ifnet *ifp;
478
479 ifnet_head_lock_shared();
480 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
481 if ((family == ifp->if_family) && (ifp->if_unit == unit))
482 break;
483 ifnet_head_done();
484
485 return ifp;
486 }
487
488 static int dlil_ifp_proto_count(struct ifnet * ifp)
489 {
490 int count = 0;
491 int i;
492
493 if (ifp->if_proto_hash != NULL) {
494 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
495 struct if_proto *proto;
496 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
497 count++;
498 }
499 }
500 }
501
502 return count;
503 }
504
505 __private_extern__ void
506 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
507 struct net_event_data *event_data, u_long event_data_len)
508 {
509 struct net_event_data ev_data;
510 struct kev_msg ev_msg;
511
512 /*
513 * a net event always start with a net_event_data structure
514 * but the caller can generate a simple net event or
515 * provide a longer event structure to post
516 */
517
518 ev_msg.vendor_code = KEV_VENDOR_APPLE;
519 ev_msg.kev_class = KEV_NETWORK_CLASS;
520 ev_msg.kev_subclass = event_subclass;
521 ev_msg.event_code = event_code;
522
523 if (event_data == 0) {
524 event_data = &ev_data;
525 event_data_len = sizeof(struct net_event_data);
526 }
527
528 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
529 event_data->if_family = ifp->if_family;
530 event_data->if_unit = (unsigned long) ifp->if_unit;
531
532 ev_msg.dv[0].data_length = event_data_len;
533 ev_msg.dv[0].data_ptr = event_data;
534 ev_msg.dv[1].data_length = 0;
535
536 dlil_event_internal(ifp, &ev_msg);
537 }
538
539 void dlil_init(void);
540 void
541 dlil_init(void)
542 {
543 lck_grp_attr_t *grp_attributes = 0;
544 lck_attr_t *lck_attributes = 0;
545 lck_grp_t *input_lock_grp = 0;
546
547 TAILQ_INIT(&dlil_ifnet_head);
548 TAILQ_INIT(&if_family_head);
549 TAILQ_INIT(&proto_family_head);
550 TAILQ_INIT(&ifnet_head);
551
552 /* Setup the lock groups we will use */
553 grp_attributes = lck_grp_attr_alloc_init();
554 lck_grp_attr_setdefault(grp_attributes);
555
556 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
557 #if IFNET_RW_LOCK
558 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
559 #else
560 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
561 #endif
562 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
563 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
564 lck_grp_attr_free(grp_attributes);
565 grp_attributes = 0;
566
567 /* Setup the lock attributes we will use */
568 lck_attributes = lck_attr_alloc_init();
569 lck_attr_setdefault(lck_attributes);
570
571 ifnet_lock_attr = lck_attr_alloc_init();
572 lck_attr_setdefault(ifnet_lock_attr);
573
574 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
575 input_lock_grp = 0;
576
577 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
578 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
579 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
580 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
581
582 lck_attr_free(lck_attributes);
583 lck_attributes = 0;
584
585 /*
586 * Start up the dlil input thread once everything is initialized
587 */
588 (void) kernel_thread(kernel_task, dlil_input_thread);
589 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
590 }
591
592 int
593 dlil_attach_filter(
594 struct ifnet *ifp,
595 const struct iff_filter *if_filter,
596 interface_filter_t *filter_ref)
597 {
598 int retval = 0;
599 struct ifnet_filter *filter;
600
601 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
602 if (filter == NULL)
603 return ENOMEM;
604 bzero(filter, sizeof(*filter));
605
606
607 filter->filt_ifp = ifp;
608 filter->filt_cookie = if_filter->iff_cookie;
609 filter->filt_name = if_filter->iff_name;
610 filter->filt_protocol = if_filter->iff_protocol;
611 filter->filt_input = if_filter->iff_input;
612 filter->filt_output = if_filter->iff_output;
613 filter->filt_event = if_filter->iff_event;
614 filter->filt_ioctl = if_filter->iff_ioctl;
615 filter->filt_detached = if_filter->iff_detached;
616
617 if ((retval = dlil_write_begin()) != 0) {
618 /* Failed to acquire the write lock */
619 FREE(filter, M_NKE);
620 return retval;
621 }
622 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
623 dlil_write_end();
624 *filter_ref = filter;
625 return retval;
626 }
627
628 static int
629 dlil_detach_filter_internal(interface_filter_t filter, int detached)
630 {
631 int retval = 0;
632
633 if (detached == 0) {
634 ifnet_t ifp = NULL;
635 interface_filter_t entry = NULL;
636
637 /* Take the write lock */
638 retval = dlil_write_begin();
639 if (retval != 0 && retval != EDEADLK)
640 return retval;
641
642 /*
643 * At this point either we have the write lock (retval == 0)
644 * or we couldn't get it (retval == EDEADLK) because someone
645 * else up the stack is holding the read lock. It is safe to
646 * read, either the read or write is held. Verify the filter
647 * parameter before proceeding.
648 */
649 ifnet_head_lock_shared();
650 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
651 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
652 if (entry == filter)
653 break;
654 }
655 if (entry == filter)
656 break;
657 }
658 ifnet_head_done();
659
660 if (entry != filter) {
661 /* filter parameter is not a valid filter ref */
662 if (retval == 0) {
663 dlil_write_end();
664 }
665 return EINVAL;
666 }
667
668 if (retval == EDEADLK) {
669 /* Perform a delayed detach */
670 filter->filt_detaching = 1;
671 dlil_detach_waiting = 1;
672 wakeup(&dlil_detach_waiting);
673 return 0;
674 }
675
676 /* Remove the filter from the list */
677 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
678 dlil_write_end();
679 }
680
681 /* Call the detached funciton if there is one */
682 if (filter->filt_detached)
683 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
684
685 /* Free the filter */
686 FREE(filter, M_NKE);
687
688 return retval;
689 }
690
691 void
692 dlil_detach_filter(interface_filter_t filter)
693 {
694 if (filter == NULL)
695 return;
696 dlil_detach_filter_internal(filter, 0);
697 }
698
699 static void
700 dlil_input_thread_continue(
701 __unused void* foo,
702 __unused wait_result_t wait)
703 {
704 while (1) {
705 struct mbuf *m, *m_loop;
706
707 lck_spin_lock(dlil_input_lock);
708 m = dlil_input_mbuf_head;
709 dlil_input_mbuf_head = NULL;
710 dlil_input_mbuf_tail = NULL;
711 m_loop = dlil_input_loop_head;
712 dlil_input_loop_head = NULL;
713 dlil_input_loop_tail = NULL;
714 lck_spin_unlock(dlil_input_lock);
715
716 /*
717 * NOTE warning %%% attention !!!!
718 * We should think about putting some thread starvation safeguards if
719 * we deal with long chains of packets.
720 */
721 while (m) {
722 struct mbuf *m0 = m->m_nextpkt;
723 void *header = m->m_pkthdr.header;
724
725 m->m_nextpkt = NULL;
726 m->m_pkthdr.header = NULL;
727 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
728 m = m0;
729 }
730 m = m_loop;
731 while (m) {
732 struct mbuf *m0 = m->m_nextpkt;
733 void *header = m->m_pkthdr.header;
734 struct ifnet *ifp = &loif[0];
735
736 m->m_nextpkt = NULL;
737 m->m_pkthdr.header = NULL;
738 (void) dlil_input_packet(ifp, m, header);
739 m = m0;
740 }
741
742 proto_input_run();
743
744 if (dlil_input_mbuf_head == NULL &&
745 dlil_input_loop_head == NULL && inject_buckets == 0) {
746 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
747 (void) thread_block(dlil_input_thread_continue);
748 /* NOTREACHED */
749 }
750 }
751 }
752
753 void dlil_input_thread(void)
754 {
755 register thread_t self = current_thread();
756
757 ml_thread_policy(self, MACHINE_GROUP,
758 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
759
760 dlil_initialized = 1;
761 dlil_input_thread_ptr = current_thread();
762 dlil_input_thread_continue(NULL, THREAD_RESTART);
763 }
764
765 int
766 dlil_input_with_stats(
767 struct ifnet *ifp,
768 struct mbuf *m_head,
769 struct mbuf *m_tail,
770 const struct ifnet_stat_increment_param *stats)
771 {
772 /* WARNING
773 * Because of loopbacked multicast we cannot stuff the ifp in
774 * the rcvif of the packet header: loopback has its own dlil
775 * input queue
776 */
777
778 lck_spin_lock(dlil_input_lock);
779 if (ifp->if_type != IFT_LOOP) {
780 if (dlil_input_mbuf_head == NULL)
781 dlil_input_mbuf_head = m_head;
782 else if (dlil_input_mbuf_tail != NULL)
783 dlil_input_mbuf_tail->m_nextpkt = m_head;
784 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
785 } else {
786 if (dlil_input_loop_head == NULL)
787 dlil_input_loop_head = m_head;
788 else if (dlil_input_loop_tail != NULL)
789 dlil_input_loop_tail->m_nextpkt = m_head;
790 dlil_input_loop_tail = m_tail ? m_tail : m_head;
791 }
792 if (stats) {
793 ifp->if_data.ifi_ipackets += stats->packets_in;
794 ifp->if_data.ifi_ibytes += stats->bytes_in;
795 ifp->if_data.ifi_ierrors += stats->errors_in;
796
797 ifp->if_data.ifi_opackets += stats->packets_out;
798 ifp->if_data.ifi_obytes += stats->bytes_out;
799 ifp->if_data.ifi_oerrors += stats->errors_out;
800
801 ifp->if_data.ifi_collisions += stats->collisions;
802 ifp->if_data.ifi_iqdrops += stats->dropped;
803 }
804 lck_spin_unlock(dlil_input_lock);
805
806 wakeup((caddr_t)&dlil_input_thread_wakeup);
807
808 return 0;
809 }
810
811 int
812 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
813 {
814 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
815 }
816
817 int
818 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
819 char *frame_header)
820 {
821 int retval;
822 struct if_proto *ifproto = 0;
823 protocol_family_t protocol_family;
824 struct ifnet_filter *filter;
825
826
827 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
828
829 /*
830 * Lock the interface while we run through
831 * the filters and the demux. This lock
832 * protects the filter list and the demux list.
833 */
834 dlil_read_begin();
835
836 /*
837 * Call family demux module. If the demux module finds a match
838 * for the frame it will fill-in the ifproto pointer.
839 */
840
841 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
842 if (retval != 0)
843 protocol_family = 0;
844 if (retval == EJUSTRETURN) {
845 dlil_read_end();
846 return 0;
847 }
848
849 /* DANGER!!! */
850 if (m->m_flags & (M_BCAST|M_MCAST))
851 ifp->if_imcasts++;
852
853 /*
854 * Run interface filters
855 */
856
857 /* Do not pass VLAN tagged packets to filters PR-3586856 */
858 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
859 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
860 int filter_result;
861 if (filter->filt_input && (filter->filt_protocol == 0 ||
862 filter->filt_protocol == protocol_family)) {
863 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
864
865 if (filter_result) {
866 dlil_read_end();
867 if (filter_result == EJUSTRETURN) {
868 filter_result = 0;
869 }
870 else {
871 m_freem(m);
872 }
873
874 return filter_result;
875 }
876 }
877 }
878 }
879
880 /* Demux is done, interface filters have been processed, unlock the mutex */
881 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
882 dlil_read_end();
883 if (retval != EJUSTRETURN) {
884 m_freem(m);
885 return retval;
886 }
887 else
888 return 0;
889 }
890
891 ifproto = find_attached_proto(ifp, protocol_family);
892
893 if (ifproto == 0) {
894 dlil_read_end();
895 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
896 m_freem(m);
897 return 0;
898 }
899
900 /*
901 * Hand the packet off to the protocol.
902 */
903
904 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
905 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
906 }
907
908 if (ifproto->proto_kpi == kProtoKPI_DLIL)
909 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
910 ifp, ifproto->protocol_family,
911 TRUE);
912 else
913 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
914
915 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
916 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
917 }
918
919 dlil_read_end();
920
921 if (retval == EJUSTRETURN)
922 retval = 0;
923 else
924 if (retval)
925 m_freem(m);
926
927 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
928 return retval;
929 }
930
931 static int
932 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
933 {
934 struct ifnet_filter *filter;
935
936 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
937 dlil_read_begin();
938
939 /* Pass the event to the interface filters */
940 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
941 if (filter->filt_event)
942 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
943 }
944
945 if (ifp->if_proto_hash) {
946 int i;
947
948 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
949 struct if_proto *proto;
950
951 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
952 /* Pass the event to the protocol */
953 if (proto->proto_kpi == kProtoKPI_DLIL) {
954 if (proto->kpi.dlil.dl_event)
955 proto->kpi.dlil.dl_event(ifp, event);
956 }
957 else {
958 if (proto->kpi.v1.event)
959 proto->kpi.v1.event(ifp, proto->protocol_family, event);
960 }
961 }
962 }
963 }
964
965 dlil_read_end();
966
967 /* Pass the event to the interface */
968 if (ifp->if_event)
969 ifp->if_event(ifp, event);
970
971 if (ifp_unuse(ifp))
972 ifp_use_reached_zero(ifp);
973 }
974
975 return kev_post_msg(event);
976 }
977
978 int
979 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
980 {
981 int result = 0;
982
983 struct kev_msg kev_msg;
984
985 kev_msg.vendor_code = event->vendor_code;
986 kev_msg.kev_class = event->kev_class;
987 kev_msg.kev_subclass = event->kev_subclass;
988 kev_msg.event_code = event->event_code;
989 kev_msg.dv[0].data_ptr = &event->event_data[0];
990 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
991 kev_msg.dv[1].data_length = 0;
992
993
994 result = dlil_event_internal(ifp, &kev_msg);
995
996
997 return result;
998 }
999
1000 int
1001 dlil_output_list(
1002 struct ifnet* ifp,
1003 u_long proto_family,
1004 struct mbuf *packetlist,
1005 caddr_t route,
1006 const struct sockaddr *dest,
1007 int raw)
1008 {
1009 char *frame_type = 0;
1010 char *dst_linkaddr = 0;
1011 int error, retval = 0;
1012 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1013 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1014 struct ifnet_filter *filter;
1015 struct if_proto *proto = 0;
1016 struct mbuf *m;
1017
1018 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1019 #if BRIDGE
1020 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1021 #else
1022 if ((raw != 0) || proto_family != PF_INET) {
1023 #endif
1024 while (packetlist) {
1025 m = packetlist;
1026 packetlist = packetlist->m_nextpkt;
1027 m->m_nextpkt = NULL;
1028 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1029 if (error) {
1030 if (packetlist)
1031 m_freem_list(packetlist);
1032 return (error);
1033 }
1034 }
1035 return (0);
1036 }
1037
1038 dlil_read_begin();
1039
1040 frame_type = frame_type_buffer;
1041 dst_linkaddr = dst_linkaddr_buffer;
1042 m = packetlist;
1043 packetlist = packetlist->m_nextpkt;
1044 m->m_nextpkt = NULL;
1045
1046 proto = find_attached_proto(ifp, proto_family);
1047 if (proto == NULL) {
1048 retval = ENXIO;
1049 goto cleanup;
1050 }
1051
1052 retval = 0;
1053 if (proto->proto_kpi == kProtoKPI_DLIL) {
1054 if (proto->kpi.dlil.dl_pre_output)
1055 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1056 }
1057 else {
1058 if (proto->kpi.v1.pre_output)
1059 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1060 }
1061
1062 if (retval) {
1063 if (retval != EJUSTRETURN) {
1064 m_freem(m);
1065 }
1066 goto cleanup;
1067 }
1068
1069 do {
1070
1071
1072 if (ifp->if_framer) {
1073 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1074 if (retval) {
1075 if (retval != EJUSTRETURN) {
1076 m_freem(m);
1077 }
1078 goto cleanup;
1079 }
1080 }
1081
1082 /*
1083 * Let interface filters (if any) do their thing ...
1084 */
1085 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1086 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1087 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1088 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1089 filter->filt_output) {
1090 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1091 if (retval) {
1092 if (retval == EJUSTRETURN)
1093 continue;
1094 else {
1095 m_freem(m);
1096 }
1097 goto cleanup;
1098 }
1099 }
1100 }
1101 }
1102 /*
1103 * Finally, call the driver.
1104 */
1105
1106 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1107 retval = ifp->if_output(ifp, m);
1108 if (retval) {
1109 printf("dlil_output_list: output error retval = %x\n", retval);
1110 goto cleanup;
1111 }
1112 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1113
1114 m = packetlist;
1115 if (m) {
1116 packetlist = packetlist->m_nextpkt;
1117 m->m_nextpkt = NULL;
1118 }
1119 } while (m);
1120
1121
1122 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1123
1124 cleanup:
1125 dlil_read_end();
1126 if (packetlist) /* if any packet left, clean up */
1127 m_freem_list(packetlist);
1128 if (retval == EJUSTRETURN)
1129 retval = 0;
1130 return retval;
1131 }
1132
1133 /*
1134 * dlil_output
1135 *
1136 * Caller should have a lock on the protocol domain if the protocol
1137 * doesn't support finer grained locking. In most cases, the lock
1138 * will be held from the socket layer and won't be released until
1139 * we return back to the socket layer.
1140 *
1141 * This does mean that we must take a protocol lock before we take
1142 * an interface lock if we're going to take both. This makes sense
1143 * because a protocol is likely to interact with an ifp while it
1144 * is under the protocol lock.
1145 */
1146 int
1147 dlil_output(
1148 struct ifnet* ifp,
1149 u_long proto_family,
1150 struct mbuf *m,
1151 caddr_t route,
1152 const struct sockaddr *dest,
1153 int raw)
1154 {
1155 char *frame_type = 0;
1156 char *dst_linkaddr = 0;
1157 int retval = 0;
1158 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1159 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1160 struct ifnet_filter *filter;
1161
1162 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1163
1164 dlil_read_begin();
1165
1166 frame_type = frame_type_buffer;
1167 dst_linkaddr = dst_linkaddr_buffer;
1168
1169 if (raw == 0) {
1170 struct if_proto *proto = 0;
1171
1172 proto = find_attached_proto(ifp, proto_family);
1173 if (proto == NULL) {
1174 m_freem(m);
1175 retval = ENXIO;
1176 goto cleanup;
1177 }
1178
1179 retval = 0;
1180 if (proto->proto_kpi == kProtoKPI_DLIL) {
1181 if (proto->kpi.dlil.dl_pre_output)
1182 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1183 }
1184 else {
1185 if (proto->kpi.v1.pre_output)
1186 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1187 }
1188
1189 if (retval) {
1190 if (retval != EJUSTRETURN) {
1191 m_freem(m);
1192 }
1193 goto cleanup;
1194 }
1195 }
1196
1197 /*
1198 * Call framing module
1199 */
1200 if ((raw == 0) && (ifp->if_framer)) {
1201 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1202 if (retval) {
1203 if (retval != EJUSTRETURN) {
1204 m_freem(m);
1205 }
1206 goto cleanup;
1207 }
1208 }
1209
1210 #if BRIDGE
1211 /* !!!LOCKING!!!
1212 *
1213 * Need to consider how to handle this.
1214 */
1215 broken-locking
1216 if (do_bridge) {
1217 struct mbuf *m0 = m;
1218 struct ether_header *eh = mtod(m, struct ether_header *);
1219
1220 if (m->m_pkthdr.rcvif)
1221 m->m_pkthdr.rcvif = NULL;
1222 ifp = bridge_dst_lookup(eh);
1223 bdg_forward(&m0, ifp);
1224 if (m0)
1225 m_freem(m0);
1226
1227 return 0;
1228 }
1229 #endif
1230
1231
1232 /*
1233 * Let interface filters (if any) do their thing ...
1234 */
1235
1236 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1237 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1238 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1239 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1240 filter->filt_output) {
1241 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1242 if (retval) {
1243 if (retval != EJUSTRETURN)
1244 m_freem(m);
1245 goto cleanup;
1246 }
1247 }
1248 }
1249 }
1250
1251 /*
1252 * Finally, call the driver.
1253 */
1254
1255 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1256 retval = ifp->if_output(ifp, m);
1257 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1258
1259 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1260
1261 cleanup:
1262 dlil_read_end();
1263 if (retval == EJUSTRETURN)
1264 retval = 0;
1265 return retval;
1266 }
1267
1268 int
1269 dlil_ioctl(u_long proto_fam,
1270 struct ifnet *ifp,
1271 u_long ioctl_code,
1272 caddr_t ioctl_arg)
1273 {
1274 struct ifnet_filter *filter;
1275 int retval = EOPNOTSUPP;
1276 int result = 0;
1277 struct if_family_str *if_family;
1278 int holding_read = 0;
1279
1280 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1281 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1282 if (result != 0)
1283 return EOPNOTSUPP;
1284
1285 dlil_read_begin();
1286 holding_read = 1;
1287
1288 /* Run the interface filters first.
1289 * We want to run all filters before calling the protocol,
1290 * interface family, or interface.
1291 */
1292 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1293 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1294 filter->filt_ioctl != NULL) {
1295 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1296 /* Only update retval if no one has handled the ioctl */
1297 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1298 if (result == ENOTSUP)
1299 result = EOPNOTSUPP;
1300 retval = result;
1301 if (retval && retval != EOPNOTSUPP) {
1302 goto cleanup;
1303 }
1304 }
1305 }
1306 }
1307
1308 /* Allow the protocol to handle the ioctl */
1309 if (proto_fam) {
1310 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1311
1312 if (proto != 0) {
1313 result = EOPNOTSUPP;
1314 if (proto->proto_kpi == kProtoKPI_DLIL) {
1315 if (proto->kpi.dlil.dl_ioctl)
1316 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1317 }
1318 else {
1319 if (proto->kpi.v1.ioctl)
1320 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1321 }
1322
1323 /* Only update retval if no one has handled the ioctl */
1324 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1325 if (result == ENOTSUP)
1326 result = EOPNOTSUPP;
1327 retval = result;
1328 if (retval && retval != EOPNOTSUPP) {
1329 goto cleanup;
1330 }
1331 }
1332 }
1333 }
1334
1335 /*
1336 * Since we have incremented the use count on the ifp, we are guaranteed
1337 * that the ifp will not go away (the function pointers may not be changed).
1338 * We release the dlil read lock so the interface ioctl may trigger a
1339 * protocol attach. This happens with vlan and may occur with other virtual
1340 * interfaces.
1341 */
1342 dlil_read_end();
1343 holding_read = 0;
1344
1345 /* retval is either 0 or EOPNOTSUPP */
1346
1347 /*
1348 * Let the family handle this ioctl.
1349 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1350 * If it returns zero, the ioctl was handled, so set retval to zero.
1351 */
1352 if_family = find_family_module(ifp->if_family);
1353 if ((if_family) && (if_family->ifmod_ioctl)) {
1354 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1355
1356 /* Only update retval if no one has handled the ioctl */
1357 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1358 if (result == ENOTSUP)
1359 result = EOPNOTSUPP;
1360 retval = result;
1361 if (retval && retval != EOPNOTSUPP) {
1362 goto cleanup;
1363 }
1364 }
1365 }
1366
1367 /*
1368 * Let the interface handle this ioctl.
1369 * If it returns EOPNOTSUPP, ignore that, we may have
1370 * already handled this in the protocol or family.
1371 */
1372 if (ifp->if_ioctl)
1373 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1374
1375 /* Only update retval if no one has handled the ioctl */
1376 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1377 if (result == ENOTSUP)
1378 result = EOPNOTSUPP;
1379 retval = result;
1380 if (retval && retval != EOPNOTSUPP) {
1381 goto cleanup;
1382 }
1383 }
1384
1385 cleanup:
1386 if (holding_read)
1387 dlil_read_end();
1388 if (ifp_unuse(ifp))
1389 ifp_use_reached_zero(ifp);
1390
1391 if (retval == EJUSTRETURN)
1392 retval = 0;
1393 return retval;
1394 }
1395
1396 __private_extern__ errno_t
1397 dlil_set_bpf_tap(
1398 ifnet_t ifp,
1399 bpf_tap_mode mode,
1400 bpf_packet_func callback)
1401 {
1402 errno_t error = 0;
1403
1404 dlil_read_begin();
1405 if (ifp->if_set_bpf_tap)
1406 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1407 dlil_read_end();
1408
1409 return error;
1410 }
1411
1412 __private_extern__ errno_t
1413 dlil_resolve_multi(
1414 struct ifnet *ifp,
1415 const struct sockaddr *proto_addr,
1416 struct sockaddr *ll_addr,
1417 size_t ll_len)
1418 {
1419 errno_t result = EOPNOTSUPP;
1420 struct if_proto *proto;
1421 const struct sockaddr *verify;
1422
1423 dlil_read_begin();
1424
1425 bzero(ll_addr, ll_len);
1426
1427 /* Call the protocol first */
1428 proto = find_attached_proto(ifp, proto_addr->sa_family);
1429 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1430 proto->kpi.v1.resolve_multi != NULL) {
1431 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1432 (struct sockaddr_dl*)ll_addr, ll_len);
1433 }
1434
1435 /* Let the interface verify the multicast address */
1436 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1437 if (result == 0)
1438 verify = ll_addr;
1439 else
1440 verify = proto_addr;
1441 result = ifp->if_check_multi(ifp, verify);
1442 }
1443
1444 dlil_read_end();
1445
1446 return result;
1447 }
1448
1449 __private_extern__ errno_t
1450 dlil_send_arp_internal(
1451 ifnet_t ifp,
1452 u_short arpop,
1453 const struct sockaddr_dl* sender_hw,
1454 const struct sockaddr* sender_proto,
1455 const struct sockaddr_dl* target_hw,
1456 const struct sockaddr* target_proto)
1457 {
1458 struct if_proto *proto;
1459 errno_t result = 0;
1460
1461 dlil_read_begin();
1462
1463 proto = find_attached_proto(ifp, target_proto->sa_family);
1464 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1465 proto->kpi.v1.send_arp == NULL) {
1466 result = ENOTSUP;
1467 }
1468 else {
1469 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1470 target_hw, target_proto);
1471 }
1472
1473 dlil_read_end();
1474
1475 return result;
1476 }
1477
1478 __private_extern__ errno_t
1479 dlil_send_arp(
1480 ifnet_t ifp,
1481 u_short arpop,
1482 const struct sockaddr_dl* sender_hw,
1483 const struct sockaddr* sender_proto,
1484 const struct sockaddr_dl* target_hw,
1485 const struct sockaddr* target_proto)
1486 {
1487 errno_t result = 0;
1488
1489 if (target_proto == NULL || (sender_proto &&
1490 sender_proto->sa_family != target_proto->sa_family))
1491 return EINVAL;
1492
1493 /*
1494 * If this is an ARP request and the target IP is IPv4LL,
1495 * send the request on all interfaces.
1496 */
1497 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1498 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1499 arpop == ARPOP_REQUEST) {
1500 ifnet_t *ifp_list;
1501 u_int32_t count;
1502 u_int32_t ifp_on;
1503
1504 result = ENOTSUP;
1505
1506 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1507 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1508 errno_t new_result;
1509 ifaddr_t source_hw = NULL;
1510 ifaddr_t source_ip = NULL;
1511 struct sockaddr_in source_ip_copy;
1512
1513 /*
1514 * Only arp on interfaces marked for IPv4LL ARPing. This may
1515 * mean that we don't ARP on the interface the subnet route
1516 * points to.
1517 */
1518 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1519 continue;
1520 }
1521
1522 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1523
1524 /* Find the source IP address */
1525 ifnet_lock_shared(ifp_list[ifp_on]);
1526 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1527 ifa_link) {
1528 if (source_ip->ifa_addr &&
1529 source_ip->ifa_addr->sa_family == AF_INET) {
1530 break;
1531 }
1532 }
1533
1534 /* No IP Source, don't arp */
1535 if (source_ip == NULL) {
1536 ifnet_lock_done(ifp_list[ifp_on]);
1537 continue;
1538 }
1539
1540 /* Copy the source IP address */
1541 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1542
1543 ifnet_lock_done(ifp_list[ifp_on]);
1544
1545 /* Send the ARP */
1546 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1547 (struct sockaddr_dl*)source_hw->ifa_addr,
1548 (struct sockaddr*)&source_ip_copy, NULL,
1549 target_proto);
1550
1551 if (result == ENOTSUP) {
1552 result = new_result;
1553 }
1554 }
1555 }
1556
1557 ifnet_list_free(ifp_list);
1558 }
1559 else {
1560 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1561 target_hw, target_proto);
1562 }
1563
1564 return result;
1565 }
1566
1567 static int
1568 ifp_use(
1569 struct ifnet *ifp,
1570 int handle_zero)
1571 {
1572 int old_value;
1573 int retval = 0;
1574
1575 do {
1576 old_value = ifp->if_usecnt;
1577 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1578 retval = ENXIO; // ifp is invalid
1579 break;
1580 }
1581 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1582
1583 return retval;
1584 }
1585
1586 /* ifp_unuse is broken into two pieces.
1587 *
1588 * ifp_use and ifp_unuse must be called between when the caller calls
1589 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1590 * operations after dlil_write_end has been called. For this reason,
1591 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1592 * returns a non-zero value. The caller must call ifp_use_reached_zero
1593 * after the caller has called dlil_write_end.
1594 */
1595 static void
1596 ifp_use_reached_zero(
1597 struct ifnet *ifp)
1598 {
1599 struct if_family_str *if_family;
1600 ifnet_detached_func free_func;
1601
1602 dlil_read_begin();
1603
1604 if (ifp->if_usecnt != 0)
1605 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1606
1607 /* Let BPF know we're detaching */
1608 bpfdetach(ifp);
1609
1610 ifnet_head_lock_exclusive();
1611 ifnet_lock_exclusive(ifp);
1612
1613 /* Remove ourselves from the list */
1614 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1615 ifnet_addrs[ifp->if_index - 1] = 0;
1616
1617 /* ifp should be removed from the interface list */
1618 while (ifp->if_multiaddrs.lh_first) {
1619 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1620
1621 /*
1622 * When the interface is gone, we will no longer
1623 * be listening on these multicasts. Various bits
1624 * of the stack may be referencing these multicasts,
1625 * release only our reference.
1626 */
1627 LIST_REMOVE(ifma, ifma_link);
1628 ifma->ifma_ifp = NULL;
1629 ifma_release(ifma);
1630 }
1631 ifnet_head_done();
1632
1633 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1634 ifnet_lock_done(ifp);
1635
1636 if_family = find_family_module(ifp->if_family);
1637 if (if_family && if_family->del_if)
1638 if_family->del_if(ifp);
1639 #if 0
1640 if (--if_family->if_usecnt == 0) {
1641 if (if_family->shutdown)
1642 (*if_family->shutdown)();
1643
1644 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1645 FREE(if_family, M_IFADDR);
1646 }
1647 #endif
1648
1649 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1650 free_func = ifp->if_free;
1651 dlil_read_end();
1652
1653 if (free_func)
1654 free_func(ifp);
1655 }
1656
1657 static int
1658 ifp_unuse(
1659 struct ifnet *ifp)
1660 {
1661 int oldval;
1662 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1663 if (oldval == 0)
1664 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1665
1666 if (oldval > 1)
1667 return 0;
1668
1669 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1670 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1671
1672 return 1; /* caller must call ifp_use_reached_zero */
1673 }
1674
1675 void
1676 ifp_reference(
1677 struct ifnet *ifp)
1678 {
1679 int oldval;
1680 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1681 }
1682
1683 void
1684 ifp_release(
1685 struct ifnet *ifp)
1686 {
1687 int oldval;
1688 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1689 if (oldval == 0)
1690 panic("dlil_if_reference - refcount decremented past zero!");
1691 }
1692
1693 extern lck_mtx_t *domain_proto_mtx;
1694
1695 static int
1696 dlil_attach_protocol_internal(
1697 struct if_proto *proto,
1698 const struct ddesc_head_str *demux,
1699 const struct ifnet_demux_desc *demux_list,
1700 u_int32_t demux_count)
1701 {
1702 struct ddesc_head_str temp_head;
1703 struct kev_dl_proto_data ev_pr_data;
1704 struct ifnet *ifp = proto->ifp;
1705 int retval = 0;
1706 u_long hash_value = proto_hash_value(proto->protocol_family);
1707 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1708 void* free_me = NULL;
1709
1710 /* setup some of the common values */
1711
1712 {
1713 lck_mtx_lock(domain_proto_mtx);
1714 struct domain *dp = domains;
1715 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1716 dp = dp->dom_next;
1717 proto->dl_domain = dp;
1718 lck_mtx_unlock(domain_proto_mtx);
1719 }
1720
1721 /*
1722 * Convert the demux descriptors to a type the interface
1723 * will understand. Checking e_flags should be safe, this
1724 * flag won't change.
1725 */
1726 if (if_using_kpi && demux) {
1727 /* Convert the demux linked list to a demux_list */
1728 struct dlil_demux_desc *demux_entry;
1729 struct ifnet_demux_desc *temp_list = NULL;
1730 u_int32_t i = 0;
1731
1732 TAILQ_FOREACH(demux_entry, demux, next) {
1733 i++;
1734 }
1735
1736 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1737 free_me = temp_list;
1738
1739 if (temp_list == NULL)
1740 return ENOMEM;
1741
1742 i = 0;
1743 TAILQ_FOREACH(demux_entry, demux, next) {
1744 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1745 if (demux_entry->type == 1 ||
1746 demux_entry->type == 2 ||
1747 demux_entry->type == 3) {
1748 FREE(free_me, M_TEMP);
1749 return ENOTSUP;
1750 }
1751
1752 temp_list[i].type = demux_entry->type;
1753 temp_list[i].data = demux_entry->native_type;
1754 temp_list[i].datalen = demux_entry->variants.native_type_length;
1755 i++;
1756 }
1757 demux_count = i;
1758 demux_list = temp_list;
1759 }
1760 else if (!if_using_kpi && demux_list != NULL) {
1761 struct dlil_demux_desc *demux_entry;
1762 u_int32_t i = 0;
1763
1764 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1765 free_me = demux_entry;
1766 if (demux_entry == NULL)
1767 return ENOMEM;
1768
1769 TAILQ_INIT(&temp_head);
1770
1771 for (i = 0; i < demux_count; i++) {
1772 demux_entry[i].type = demux_list[i].type;
1773 demux_entry[i].native_type = demux_list[i].data;
1774 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1775 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1776 }
1777 demux = &temp_head;
1778 }
1779
1780 /*
1781 * Take the write lock to protect readers and exclude other writers.
1782 */
1783 dlil_write_begin();
1784
1785 /* Check that the interface isn't currently detaching */
1786 ifnet_lock_shared(ifp);
1787 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1788 ifnet_lock_done(ifp);
1789 dlil_write_end();
1790 if (free_me)
1791 FREE(free_me, M_TEMP);
1792 return ENXIO;
1793 }
1794 ifnet_lock_done(ifp);
1795
1796 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1797 dlil_write_end();
1798 if (free_me)
1799 FREE(free_me, M_TEMP);
1800 return EEXIST;
1801 }
1802
1803 /*
1804 * Call family module add_proto routine so it can refine the
1805 * demux descriptors as it wishes.
1806 */
1807 if (if_using_kpi)
1808 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1809 else {
1810 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1811 _cast_non_const(demux));
1812 }
1813 if (retval) {
1814 dlil_write_end();
1815 if (free_me)
1816 FREE(free_me, M_TEMP);
1817 return retval;
1818 }
1819
1820 /*
1821 * We can't fail from this point on.
1822 * Increment the number of uses (protocol attachments + interface attached).
1823 */
1824 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1825
1826 /*
1827 * Insert the protocol in the hash
1828 */
1829 {
1830 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1831 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1832 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1833 if (prev_proto)
1834 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1835 else
1836 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1837 }
1838
1839 /*
1840 * Add to if_proto list for this interface
1841 */
1842 if_proto_ref(proto);
1843 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1844 ifp->offercnt++;
1845 dlil_write_end();
1846
1847 /* the reserved field carries the number of protocol still attached (subject to change) */
1848 ev_pr_data.proto_family = proto->protocol_family;
1849 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1850 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1851 (struct net_event_data *)&ev_pr_data,
1852 sizeof(struct kev_dl_proto_data));
1853
1854 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1855 ifp->if_name, ifp->if_unit, retval);
1856 if (free_me)
1857 FREE(free_me, M_TEMP);
1858 return retval;
1859 }
1860
1861 __private_extern__ int
1862 dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1863 const struct ifnet_attach_proto_param *proto_details)
1864 {
1865 int retval = 0;
1866 struct if_proto *ifproto = NULL;
1867
1868 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1869 if (ifproto == 0) {
1870 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1871 retval = ENOMEM;
1872 goto end;
1873 }
1874 bzero(ifproto, sizeof(*ifproto));
1875
1876 ifproto->ifp = ifp;
1877 ifproto->protocol_family = protocol;
1878 ifproto->proto_kpi = kProtoKPI_v1;
1879 ifproto->kpi.v1.input = proto_details->input;
1880 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1881 ifproto->kpi.v1.event = proto_details->event;
1882 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1883 ifproto->kpi.v1.detached = proto_details->detached;
1884 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1885 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1886
1887 retval = dlil_attach_protocol_internal(ifproto, NULL,
1888 proto_details->demux_list, proto_details->demux_count);
1889
1890 end:
1891 if (retval && ifproto)
1892 FREE(ifproto, M_IFADDR);
1893 return retval;
1894 }
1895
1896 int
1897 dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1898 {
1899 struct ifnet *ifp = NULL;
1900 struct if_proto *ifproto = NULL;
1901 int retval = 0;
1902
1903 /*
1904 * Do everything we can before taking the write lock
1905 */
1906
1907 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1908 return EINVAL;
1909
1910 /*
1911 * Allocate and init a new if_proto structure
1912 */
1913 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1914 if (!ifproto) {
1915 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1916 retval = ENOMEM;
1917 goto end;
1918 }
1919
1920
1921 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1922 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1923 if (!ifp) {
1924 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1925 proto->interface_family, proto->unit_number);
1926 retval = ENXIO;
1927 goto end;
1928 }
1929
1930 bzero(ifproto, sizeof(struct if_proto));
1931
1932 ifproto->ifp = ifp;
1933 ifproto->protocol_family = proto->protocol_family;
1934 ifproto->proto_kpi = kProtoKPI_DLIL;
1935 ifproto->kpi.dlil.dl_input = proto->input;
1936 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1937 ifproto->kpi.dlil.dl_event = proto->event;
1938 ifproto->kpi.dlil.dl_offer = proto->offer;
1939 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1940 ifproto->kpi.dlil.dl_detached = proto->detached;
1941
1942 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1943
1944 end:
1945 if (retval && ifproto)
1946 FREE(ifproto, M_IFADDR);
1947 return retval;
1948 }
1949
1950 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1951
1952 static int
1953 dlil_detach_protocol_internal(
1954 struct if_proto *proto)
1955 {
1956 struct ifnet *ifp = proto->ifp;
1957 u_long proto_family = proto->protocol_family;
1958 struct kev_dl_proto_data ev_pr_data;
1959
1960 if (proto->proto_kpi == kProtoKPI_DLIL) {
1961 if (proto->kpi.dlil.dl_detached)
1962 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1963 }
1964 else {
1965 if (proto->kpi.v1.detached)
1966 proto->kpi.v1.detached(ifp, proto->protocol_family);
1967 }
1968 if_proto_free(proto);
1969
1970 /*
1971 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1972 */
1973
1974 if_rtproto_del(ifp, proto_family);
1975
1976 /* the reserved field carries the number of protocol still attached (subject to change) */
1977 ev_pr_data.proto_family = proto_family;
1978 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1979 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1980 (struct net_event_data *)&ev_pr_data,
1981 sizeof(struct kev_dl_proto_data));
1982 return 0;
1983 }
1984
1985 int
1986 dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1987 {
1988 struct if_proto *proto = NULL;
1989 int retval = 0;
1990 int use_reached_zero = 0;
1991
1992
1993 if ((retval = dlil_write_begin()) != 0) {
1994 if (retval == EDEADLK) {
1995 retval = 0;
1996 dlil_read_begin();
1997 proto = find_attached_proto(ifp, proto_family);
1998 if (proto == 0) {
1999 retval = ENXIO;
2000 }
2001 else {
2002 proto->detaching = 1;
2003 dlil_detach_waiting = 1;
2004 wakeup(&dlil_detach_waiting);
2005 }
2006 dlil_read_end();
2007 }
2008 goto end;
2009 }
2010
2011 proto = find_attached_proto(ifp, proto_family);
2012
2013 if (proto == NULL) {
2014 retval = ENXIO;
2015 dlil_write_end();
2016 goto end;
2017 }
2018
2019 /*
2020 * Call family module del_proto
2021 */
2022
2023 if (ifp->if_del_proto)
2024 ifp->if_del_proto(ifp, proto->protocol_family);
2025
2026 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2027 ifp->offercnt--;
2028
2029 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2030
2031 /*
2032 * We can do the rest of the work outside of the write lock.
2033 */
2034 use_reached_zero = ifp_unuse(ifp);
2035 dlil_write_end();
2036
2037 dlil_detach_protocol_internal(proto);
2038
2039 /*
2040 * Only handle the case where the interface will go away after
2041 * we've sent the message. This way post message can send the
2042 * message to the interface safely.
2043 */
2044
2045 if (use_reached_zero)
2046 ifp_use_reached_zero(ifp);
2047
2048 end:
2049 return retval;
2050 }
2051
2052 /*
2053 * dlil_delayed_detach_thread is responsible for detaching
2054 * protocols, protocol filters, and interface filters after
2055 * an attempt was made to detach one of those items while
2056 * it was not safe to do so (i.e. called dlil_read_begin).
2057 *
2058 * This function will take the dlil write lock and walk
2059 * through each of the interfaces looking for items with
2060 * the detaching flag set. When an item is found, it is
2061 * detached from the interface and placed on a local list.
2062 * After all of the items have been collected, we drop the
2063 * write lock and performed the post detach. This is done
2064 * so we only have to take the write lock once.
2065 *
2066 * When detaching a protocol filter, if we find that we
2067 * have detached the very last protocol and we need to call
2068 * ifp_use_reached_zero, we have to break out of our work
2069 * to drop the write lock so we can call ifp_use_reached_zero.
2070 */
2071
2072 static void
2073 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2074 {
2075 thread_t self = current_thread();
2076 int asserted = 0;
2077
2078 ml_thread_policy(self, MACHINE_GROUP,
2079 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2080
2081
2082 while (1) {
2083 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2084 struct ifnet *ifp;
2085 struct proto_hash_entry detached_protos;
2086 struct ifnet_filter_head detached_filters;
2087 struct if_proto *proto;
2088 struct if_proto *next_proto;
2089 struct ifnet_filter *filt;
2090 struct ifnet_filter *next_filt;
2091 int reached_zero;
2092
2093 reached_zero = 0;
2094
2095 /* Clear the detach waiting flag */
2096 dlil_detach_waiting = 0;
2097 TAILQ_INIT(&detached_filters);
2098 SLIST_INIT(&detached_protos);
2099
2100 ifnet_head_lock_shared();
2101 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2102 int i;
2103
2104 // Look for protocols and protocol filters
2105 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2106 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2107 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2108
2109 // Detach this protocol
2110 if (proto->detaching) {
2111 if (ifp->if_del_proto)
2112 ifp->if_del_proto(ifp, proto->protocol_family);
2113 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2114 ifp->offercnt--;
2115 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2116 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2117 reached_zero = ifp_unuse(ifp);
2118 if (reached_zero) {
2119 break;
2120 }
2121 }
2122 else {
2123 // Update prev_nextptr to point to our next ptr
2124 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2125 }
2126 }
2127 }
2128
2129 // look for interface filters that need to be detached
2130 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2131 next_filt = TAILQ_NEXT(filt, filt_next);
2132 if (filt->filt_detaching != 0) {
2133 // take this interface filter off the interface filter list
2134 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2135
2136 // put this interface filter on the detached filters list
2137 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2138 }
2139 }
2140
2141 if (ifp->if_delayed_detach) {
2142 ifp->if_delayed_detach = 0;
2143 reached_zero = ifp_unuse(ifp);
2144 }
2145
2146 if (reached_zero)
2147 break;
2148 }
2149 ifnet_head_done();
2150 dlil_write_end();
2151
2152 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2153 next_filt = TAILQ_NEXT(filt, filt_next);
2154 /*
2155 * dlil_detach_filter_internal won't remove an item from
2156 * the list if it is already detached (second parameter).
2157 * The item will be freed though.
2158 */
2159 dlil_detach_filter_internal(filt, 1);
2160 }
2161
2162 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2163 next_proto = SLIST_NEXT(proto, next_hash);
2164 dlil_detach_protocol_internal(proto);
2165 }
2166
2167 if (reached_zero) {
2168 ifp_use_reached_zero(ifp);
2169 dlil_detach_waiting = 1; // we may have missed something
2170 }
2171 }
2172
2173 if (!asserted && dlil_detach_waiting == 0) {
2174 asserted = 1;
2175 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2176 }
2177
2178 if (dlil_detach_waiting == 0) {
2179 asserted = 0;
2180 thread_block(dlil_delayed_detach_thread);
2181 }
2182 }
2183 }
2184
2185 static void
2186 dlil_call_delayed_detach_thread(void) {
2187 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2188 }
2189
2190 extern int if_next_index(void);
2191
2192 __private_extern__ int
2193 dlil_if_attach_with_address(
2194 struct ifnet *ifp,
2195 const struct sockaddr_dl *ll_addr)
2196 {
2197 u_long interface_family = ifp->if_family;
2198 struct if_family_str *if_family = NULL;
2199 int stat;
2200 struct ifnet *tmp_if;
2201 struct proto_hash_entry *new_proto_list = NULL;
2202 int locked = 0;
2203
2204
2205 ifnet_head_lock_shared();
2206
2207 /* Verify we aren't already on the list */
2208 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2209 if (tmp_if == ifp) {
2210 ifnet_head_done();
2211 return EEXIST;
2212 }
2213 }
2214
2215 ifnet_head_done();
2216
2217 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2218 #if IFNET_RW_LOCK
2219 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2220 #else
2221 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2222 #endif
2223
2224 if (ifp->if_lock == 0) {
2225 return ENOMEM;
2226 }
2227
2228 // Only use family if this is not a KPI interface
2229 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2230 if_family = find_family_module(interface_family);
2231 }
2232
2233 /*
2234 * Allow interfaces withouth protocol families to attach
2235 * only if they have the necessary fields filled out.
2236 */
2237
2238 if ((if_family == 0) &&
2239 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2240 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2241 interface_family);
2242 return ENODEV;
2243 }
2244
2245 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2246 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2247 M_NKE, M_WAITOK);
2248
2249 if (new_proto_list == 0) {
2250 return ENOBUFS;
2251 }
2252 }
2253
2254 dlil_write_begin();
2255 locked = 1;
2256
2257 /*
2258 * Call the family module to fill in the appropriate fields in the
2259 * ifnet structure.
2260 */
2261
2262 if (if_family) {
2263 stat = if_family->add_if(ifp);
2264 if (stat) {
2265 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2266 dlil_write_end();
2267 return stat;
2268 }
2269 ifp->if_add_proto_u.original = if_family->add_proto;
2270 ifp->if_del_proto = if_family->del_proto;
2271 if_family->refcnt++;
2272 }
2273
2274 ifp->offercnt = 0;
2275 TAILQ_INIT(&ifp->if_flt_head);
2276
2277
2278 if (new_proto_list) {
2279 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2280 ifp->if_proto_hash = new_proto_list;
2281 new_proto_list = 0;
2282 }
2283
2284 /* old_if_attach */
2285 {
2286 struct ifaddr *ifa = 0;
2287
2288 if (ifp->if_snd.ifq_maxlen == 0)
2289 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2290 TAILQ_INIT(&ifp->if_prefixhead);
2291 LIST_INIT(&ifp->if_multiaddrs);
2292 ifnet_touch_lastchange(ifp);
2293
2294 /* usecount to track attachment to the ifnet list */
2295 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2296
2297 /* Lock the list of interfaces */
2298 ifnet_head_lock_exclusive();
2299 ifnet_lock_exclusive(ifp);
2300
2301 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2302 char workbuf[64];
2303 int namelen, masklen, socksize, ifasize;
2304
2305 ifp->if_index = if_next_index();
2306
2307 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2308 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2309 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2310 socksize = masklen + ifp->if_addrlen;
2311 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2312 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2313 socksize = sizeof(struct sockaddr_dl);
2314 socksize = ROUNDUP(socksize);
2315 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2316 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2317 if (ifa) {
2318 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2319 ifnet_addrs[ifp->if_index - 1] = ifa;
2320 bzero(ifa, ifasize);
2321 sdl->sdl_len = socksize;
2322 sdl->sdl_family = AF_LINK;
2323 bcopy(workbuf, sdl->sdl_data, namelen);
2324 sdl->sdl_nlen = namelen;
2325 sdl->sdl_index = ifp->if_index;
2326 sdl->sdl_type = ifp->if_type;
2327 if (ll_addr) {
2328 sdl->sdl_alen = ll_addr->sdl_alen;
2329 if (ll_addr->sdl_alen != ifp->if_addrlen)
2330 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2331 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2332 }
2333 ifa->ifa_ifp = ifp;
2334 ifa->ifa_rtrequest = link_rtrequest;
2335 ifa->ifa_addr = (struct sockaddr*)sdl;
2336 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2337 ifa->ifa_netmask = (struct sockaddr*)sdl;
2338 sdl->sdl_len = masklen;
2339 while (namelen != 0)
2340 sdl->sdl_data[--namelen] = 0xff;
2341 }
2342 }
2343 else {
2344 /* preserve the first ifaddr */
2345 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2346 }
2347
2348
2349 TAILQ_INIT(&ifp->if_addrhead);
2350 ifa = ifnet_addrs[ifp->if_index - 1];
2351
2352 if (ifa) {
2353 /*
2354 * We don't use if_attach_ifa because we want
2355 * this address to be first on the list.
2356 */
2357 ifaref(ifa);
2358 ifa->ifa_debug |= IFA_ATTACHED;
2359 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2360 }
2361
2362 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2363 ifindex2ifnet[ifp->if_index] = ifp;
2364
2365 ifnet_head_done();
2366 }
2367 dlil_write_end();
2368
2369 if (if_family && if_family->init_if) {
2370 stat = if_family->init_if(ifp);
2371 if (stat) {
2372 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2373 }
2374 }
2375
2376 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2377 ifnet_lock_done(ifp);
2378
2379 return 0;
2380 }
2381
2382 int
2383 dlil_if_attach(struct ifnet *ifp)
2384 {
2385 dlil_if_attach_with_address(ifp, NULL);
2386 }
2387
2388
2389 int
2390 dlil_if_detach(struct ifnet *ifp)
2391 {
2392 struct ifnet_filter *filter;
2393 struct ifnet_filter *filter_next;
2394 int zeroed = 0;
2395 int retval = 0;
2396 struct ifnet_filter_head fhead;
2397
2398
2399 ifnet_lock_exclusive(ifp);
2400
2401 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2402 /* Interface has already been detached */
2403 ifnet_lock_done(ifp);
2404 return ENXIO;
2405 }
2406
2407 /*
2408 * Indicate this interface is being detached.
2409 *
2410 * This should prevent protocols from attaching
2411 * from this point on. Interface will remain on
2412 * the list until all of the protocols are detached.
2413 */
2414 ifp->if_eflags |= IFEF_DETACHING;
2415 ifnet_lock_done(ifp);
2416
2417 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
2418
2419 if ((retval = dlil_write_begin()) != 0) {
2420 if (retval == EDEADLK) {
2421 retval = DLIL_WAIT_FOR_FREE;
2422
2423 /* We need to perform a delayed detach */
2424 ifp->if_delayed_detach = 1;
2425 dlil_detach_waiting = 1;
2426 wakeup(&dlil_detach_waiting);
2427 }
2428 return retval;
2429 }
2430
2431 /* Steal the list of interface filters */
2432 fhead = ifp->if_flt_head;
2433 TAILQ_INIT(&ifp->if_flt_head);
2434
2435 /* unuse the interface */
2436 zeroed = ifp_unuse(ifp);
2437
2438 dlil_write_end();
2439
2440 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2441 filter_next = TAILQ_NEXT(filter, filt_next);
2442 dlil_detach_filter_internal(filter, 1);
2443 }
2444
2445 if (zeroed == 0) {
2446 retval = DLIL_WAIT_FOR_FREE;
2447 }
2448 else
2449 {
2450 ifp_use_reached_zero(ifp);
2451 }
2452
2453 return retval;
2454 }
2455
2456
2457 int
2458 dlil_reg_if_modules(u_long interface_family,
2459 struct dlil_ifmod_reg_str *ifmod)
2460 {
2461 struct if_family_str *if_family;
2462
2463
2464 if (find_family_module(interface_family)) {
2465 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
2466 interface_family);
2467 return EEXIST;
2468 }
2469
2470 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2471 (!ifmod->add_proto) || (!ifmod->del_proto)) {
2472 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
2473 return EINVAL;
2474 }
2475
2476 /*
2477 * The following is a gross hack to keep from breaking
2478 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2479 * does not zero the reserved fields in dlil_ifmod_reg_str.
2480 * As a result, we have to zero any function that used to
2481 * be reserved fields at the time Vicomsoft built their
2482 * kext. Radar #2974305
2483 */
2484 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2485 if (interface_family == 123) { /* Vicom */
2486 ifmod->init_if = 0;
2487 } else {
2488 return EINVAL;
2489 }
2490 }
2491
2492 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2493 if (!if_family) {
2494 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
2495 return ENOMEM;
2496 }
2497
2498 bzero(if_family, sizeof(struct if_family_str));
2499
2500 if_family->if_family = interface_family & 0xffff;
2501 if_family->shutdown = ifmod->shutdown;
2502 if_family->add_if = ifmod->add_if;
2503 if_family->del_if = ifmod->del_if;
2504 if_family->init_if = ifmod->init_if;
2505 if_family->add_proto = ifmod->add_proto;
2506 if_family->del_proto = ifmod->del_proto;
2507 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
2508 if_family->refcnt = 1;
2509 if_family->flags = 0;
2510
2511 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
2512 return 0;
2513 }
2514
2515 int dlil_dereg_if_modules(u_long interface_family)
2516 {
2517 struct if_family_str *if_family;
2518 int ret = 0;
2519
2520
2521 if_family = find_family_module(interface_family);
2522 if (if_family == 0) {
2523 return ENXIO;
2524 }
2525
2526 if (--if_family->refcnt == 0) {
2527 if (if_family->shutdown)
2528 (*if_family->shutdown)();
2529
2530 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2531 FREE(if_family, M_IFADDR);
2532 }
2533 else {
2534 if_family->flags |= DLIL_SHUTDOWN;
2535 ret = DLIL_WAIT_FOR_FREE;
2536 }
2537
2538 return ret;
2539 }
2540
2541
2542
2543 int
2544 dlil_reg_proto_module(
2545 u_long protocol_family,
2546 u_long interface_family,
2547 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2548 int (*detach)(struct ifnet *ifp, u_long protocol_family))
2549 {
2550 struct proto_family_str *proto_family;
2551
2552 if (attach == NULL) return EINVAL;
2553
2554 lck_mtx_lock(proto_family_mutex);
2555
2556 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2557 if (proto_family->proto_family == protocol_family &&
2558 proto_family->if_family == interface_family) {
2559 lck_mtx_unlock(proto_family_mutex);
2560 return EEXIST;
2561 }
2562 }
2563
2564 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2565 if (!proto_family) {
2566 lck_mtx_unlock(proto_family_mutex);
2567 return ENOMEM;
2568 }
2569
2570 bzero(proto_family, sizeof(struct proto_family_str));
2571 proto_family->proto_family = protocol_family;
2572 proto_family->if_family = interface_family & 0xffff;
2573 proto_family->attach_proto = attach;
2574 proto_family->detach_proto = detach;
2575
2576 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
2577 lck_mtx_unlock(proto_family_mutex);
2578 return 0;
2579 }
2580
2581 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2582 {
2583 struct proto_family_str *proto_family;
2584 int ret = 0;
2585
2586 lck_mtx_lock(proto_family_mutex);
2587
2588 proto_family = find_proto_module(protocol_family, interface_family);
2589 if (proto_family == 0) {
2590 lck_mtx_unlock(proto_family_mutex);
2591 return ENXIO;
2592 }
2593
2594 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2595 FREE(proto_family, M_IFADDR);
2596
2597 lck_mtx_unlock(proto_family_mutex);
2598 return ret;
2599 }
2600
2601 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
2602 {
2603 struct proto_family_str *proto_family;
2604 int ret = 0;
2605
2606 lck_mtx_lock(proto_family_mutex);
2607 proto_family = find_proto_module(protocol_family, ifp->if_family);
2608 if (proto_family == 0) {
2609 lck_mtx_unlock(proto_family_mutex);
2610 return ENXIO;
2611 }
2612
2613 ret = proto_family->attach_proto(ifp, protocol_family);
2614
2615 lck_mtx_unlock(proto_family_mutex);
2616 return ret;
2617 }
2618
2619
2620 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2621 {
2622 struct proto_family_str *proto_family;
2623 int ret = 0;
2624
2625 lck_mtx_lock(proto_family_mutex);
2626
2627 proto_family = find_proto_module(protocol_family, ifp->if_family);
2628 if (proto_family && proto_family->detach_proto)
2629 ret = proto_family->detach_proto(ifp, protocol_family);
2630 else
2631 ret = dlil_detach_protocol(ifp, protocol_family);
2632
2633 lck_mtx_unlock(proto_family_mutex);
2634 return ret;
2635 }
2636
2637 static errno_t
2638 dlil_recycle_ioctl(
2639 __unused ifnet_t ifnet_ptr,
2640 __unused u_int32_t ioctl_code,
2641 __unused void *ioctl_arg)
2642 {
2643 return EOPNOTSUPP;
2644 }
2645
2646 static int
2647 dlil_recycle_output(
2648 __unused struct ifnet *ifnet_ptr,
2649 struct mbuf *m)
2650 {
2651 m_freem(m);
2652 return 0;
2653 }
2654
2655 static void
2656 dlil_recycle_free(
2657 __unused ifnet_t ifnet_ptr)
2658 {
2659 }
2660
2661 static errno_t
2662 dlil_recycle_set_bpf_tap(
2663 __unused ifnet_t ifp,
2664 __unused bpf_tap_mode mode,
2665 __unused bpf_packet_func callback)
2666 {
2667 /* XXX not sure what to do here */
2668 return 0;
2669 }
2670
2671 int dlil_if_acquire(
2672 u_long family,
2673 const void *uniqueid,
2674 size_t uniqueid_len,
2675 struct ifnet **ifp)
2676 {
2677 struct ifnet *ifp1 = NULL;
2678 struct dlil_ifnet *dlifp1 = NULL;
2679 int ret = 0;
2680
2681 lck_mtx_lock(dlil_ifnet_mutex);
2682 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2683
2684 ifp1 = (struct ifnet *)dlifp1;
2685
2686 if (ifp1->if_family == family) {
2687
2688 /* same uniqueid and same len or no unique id specified */
2689 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2690 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2691
2692 /* check for matching interface in use */
2693 if (ifp1->if_eflags & IFEF_INUSE) {
2694 if (uniqueid_len) {
2695 ret = EBUSY;
2696 goto end;
2697 }
2698 }
2699 else {
2700 if (!ifp1->if_lock)
2701 panic("ifp's lock is gone\n");
2702 ifnet_lock_exclusive(ifp1);
2703 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2704 ifnet_lock_done(ifp1);
2705 *ifp = ifp1;
2706 goto end;
2707 }
2708 }
2709 }
2710 }
2711
2712 /* no interface found, allocate a new one */
2713 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2714 if (dlifp1 == 0) {
2715 ret = ENOMEM;
2716 goto end;
2717 }
2718
2719 bzero(dlifp1, sizeof(*dlifp1));
2720
2721 if (uniqueid_len) {
2722 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2723 if (dlifp1->if_uniqueid == 0) {
2724 FREE(dlifp1, M_NKE);
2725 ret = ENOMEM;
2726 goto end;
2727 }
2728 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2729 dlifp1->if_uniqueid_len = uniqueid_len;
2730 }
2731
2732 ifp1 = (struct ifnet *)dlifp1;
2733 ifp1->if_eflags |= IFEF_INUSE;
2734 ifp1->if_name = dlifp1->if_namestorage;
2735
2736 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2737
2738 *ifp = ifp1;
2739
2740 end:
2741 lck_mtx_unlock(dlil_ifnet_mutex);
2742
2743 return ret;
2744 }
2745
2746 void dlil_if_release(struct ifnet *ifp)
2747 {
2748 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2749
2750
2751 /* Interface does not have a lock until it is attached - radar 3713951 */
2752 if (ifp->if_lock)
2753 ifnet_lock_exclusive(ifp);
2754 ifp->if_eflags &= ~IFEF_INUSE;
2755 ifp->if_ioctl = dlil_recycle_ioctl;
2756 ifp->if_output = dlil_recycle_output;
2757 ifp->if_free = dlil_recycle_free;
2758 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2759
2760 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2761 ifp->if_name = dlifp->if_namestorage;
2762 if (ifp->if_lock)
2763 ifnet_lock_done(ifp);
2764
2765 }