]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
6166fbfd9f831333d06604aeddc22d9de484f149
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (c) 1999 Apple Computer, Inc.
32 *
33 * Data Link Inteface Layer
34 * Author: Ted Walker
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/domain.h>
44 #include <sys/user.h>
45 #include <net/if_dl.h>
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <net/if_var.h>
49 #include <net/dlil.h>
50 #include <net/if_arp.h>
51 #include <sys/kern_event.h>
52 #include <sys/kdebug.h>
53
54 #include <kern/assert.h>
55 #include <kern/task.h>
56 #include <kern/thread.h>
57 #include <kern/sched_prim.h>
58 #include <kern/locks.h>
59
60 #include <net/if_types.h>
61 #include <net/kpi_interfacefilter.h>
62
63 #include <libkern/OSAtomic.h>
64
65 #include <machine/machine_routines.h>
66
67 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
68 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
69 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
70 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
71 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
72
73
74 #define MAX_DL_TAGS 16
75 #define MAX_DLIL_FILTERS 16
76 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
77 #define MAX_LINKADDR 4 /* LONGWORDS */
78 #define M_NKE M_IFADDR
79
80 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
81 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
82
83 #if 0
84 #define DLIL_PRINTF printf
85 #else
86 #define DLIL_PRINTF kprintf
87 #endif
88
89 enum {
90 kProtoKPI_DLIL = 0,
91 kProtoKPI_v1 = 1
92 };
93
94 struct if_proto {
95 SLIST_ENTRY(if_proto) next_hash;
96 int refcount;
97 int detaching;
98 struct ifnet *ifp;
99 struct domain *dl_domain;
100 protocol_family_t protocol_family;
101 int proto_kpi;
102 union {
103 struct {
104 dl_input_func dl_input;
105 dl_pre_output_func dl_pre_output;
106 dl_event_func dl_event;
107 dl_offer_func dl_offer;
108 dl_ioctl_func dl_ioctl;
109 dl_detached_func dl_detached;
110 } dlil;
111 struct {
112 proto_media_input input;
113 proto_media_preout pre_output;
114 proto_media_event event;
115 proto_media_ioctl ioctl;
116 proto_media_detached detached;
117 proto_media_resolve_multi resolve_multi;
118 proto_media_send_arp send_arp;
119 } v1;
120 } kpi;
121 };
122
123 SLIST_HEAD(proto_hash_entry, if_proto);
124
125
126 struct dlil_ifnet {
127 /* ifnet and drvr_ext are used by the stack and drivers
128 drvr_ext extends the public ifnet and must follow dl_if */
129 struct ifnet dl_if; /* public ifnet */
130
131 /* dlil private fields */
132 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
133 /* it is not the ifnet list */
134 void *if_uniqueid; /* unique id identifying the interface */
135 size_t if_uniqueid_len;/* length of the unique id */
136 char if_namestorage[IFNAMSIZ]; /* interface name storage */
137 };
138
139 struct ifnet_filter {
140 TAILQ_ENTRY(ifnet_filter) filt_next;
141 ifnet_t filt_ifp;
142 int filt_detaching;
143
144 const char *filt_name;
145 void *filt_cookie;
146 protocol_family_t filt_protocol;
147 iff_input_func filt_input;
148 iff_output_func filt_output;
149 iff_event_func filt_event;
150 iff_ioctl_func filt_ioctl;
151 iff_detached_func filt_detached;
152 };
153
154 struct if_family_str {
155 TAILQ_ENTRY(if_family_str) if_fam_next;
156 u_long if_family;
157 int refcnt;
158 int flags;
159
160 #define DLIL_SHUTDOWN 1
161
162 int (*add_if)(struct ifnet *ifp);
163 int (*del_if)(struct ifnet *ifp);
164 int (*init_if)(struct ifnet *ifp);
165 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
166 ifnet_del_proto_func del_proto;
167 ifnet_ioctl_func ifmod_ioctl;
168 int (*shutdown)(void);
169 };
170
171 struct proto_family_str {
172 TAILQ_ENTRY(proto_family_str) proto_fam_next;
173 u_long proto_family;
174 u_long if_family;
175 int usecnt;
176
177 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
178 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
179 };
180
181 enum {
182 kIfNetUseCount_MayBeZero = 0,
183 kIfNetUseCount_MustNotBeZero = 1
184 };
185
186 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
187 static TAILQ_HEAD(, if_family_str) if_family_head;
188 static TAILQ_HEAD(, proto_family_str) proto_family_head;
189 static lck_grp_t *dlil_lock_group;
190 static lck_grp_t *ifnet_lock_group;
191 static lck_grp_t *ifnet_head_lock_group;
192 static lck_attr_t *ifnet_lock_attr;
193 static lck_mtx_t *proto_family_mutex;
194 static lck_rw_t *ifnet_head_mutex;
195 static lck_mtx_t *dlil_ifnet_mutex;
196 static lck_mtx_t *dlil_mutex;
197 static unsigned long dlil_read_count = 0;
198 static unsigned long dlil_detach_waiting = 0;
199 extern u_int32_t ipv4_ll_arp_aware;
200
201 int dlil_initialized = 0;
202 lck_spin_t *dlil_input_lock;
203 __private_extern__ thread_t dlil_input_thread_ptr = 0;
204 int dlil_input_thread_wakeup = 0;
205 __private_extern__ int dlil_output_thread_wakeup = 0;
206 static struct mbuf *dlil_input_mbuf_head = NULL;
207 static struct mbuf *dlil_input_mbuf_tail = NULL;
208 #if NLOOP > 1
209 #error dlil_input() needs to be revised to support more than on loopback interface
210 #endif
211 static struct mbuf *dlil_input_loop_head = NULL;
212 static struct mbuf *dlil_input_loop_tail = NULL;
213
214 static void dlil_input_thread(void);
215 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
216 struct ifnet *ifbyfamily(u_long family, short unit);
217 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
218 static void dlil_call_delayed_detach_thread(void);
219
220 static void dlil_read_begin(void);
221 static void dlil_read_end(void);
222 static int dlil_write_begin(void);
223 static void dlil_write_end(void);
224
225 static int ifp_use(struct ifnet *ifp, int handle_zero);
226 static int ifp_unuse(struct ifnet *ifp);
227 static void ifp_use_reached_zero(struct ifnet *ifp);
228
229 extern void bpfdetach(struct ifnet*);
230 extern void proto_input_run(void); // new run_netisr
231
232
233 int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
234
235 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
236
237 int dlil_expand_mcl;
238
239 extern u_int32_t inject_buckets;
240
241 static const u_int32_t dlil_writer_waiting = 0x80000000;
242
243 static __inline__ void*
244 _cast_non_const(const void * ptr) {
245 union {
246 const void* cval;
247 void* val;
248 } ret;
249
250 ret.cval = ptr;
251 return (ret.val);
252 }
253
254 /* Should these be inline? */
255 static void
256 dlil_read_begin(void)
257 {
258 unsigned long new_value;
259 unsigned long old_value;
260 struct uthread *uth = get_bsdthread_info(current_thread());
261
262 if (uth->dlil_incremented_read == dlil_writer_waiting)
263 panic("dlil_read_begin - thread is already a writer");
264
265 do {
266 again:
267 old_value = dlil_read_count;
268
269 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
270 {
271 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
272 goto again;
273 }
274
275 new_value = old_value + 1;
276 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
277
278 uth->dlil_incremented_read++;
279 }
280
281 static void
282 dlil_read_end(void)
283 {
284 struct uthread *uth = get_bsdthread_info(current_thread());
285
286 OSDecrementAtomic((UInt32*)&dlil_read_count);
287 uth->dlil_incremented_read--;
288 if (dlil_read_count == dlil_writer_waiting)
289 wakeup(_cast_non_const(&dlil_writer_waiting));
290 }
291
292 static int
293 dlil_write_begin(void)
294 {
295 struct uthread *uth = get_bsdthread_info(current_thread());
296
297 if (uth->dlil_incremented_read != 0) {
298 return EDEADLK;
299 }
300 lck_mtx_lock(dlil_mutex);
301 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
302 again:
303 if (dlil_read_count == dlil_writer_waiting) {
304 uth->dlil_incremented_read = dlil_writer_waiting;
305 return 0;
306 }
307 else {
308 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
309 goto again;
310 }
311 }
312
313 static void
314 dlil_write_end(void)
315 {
316 struct uthread *uth = get_bsdthread_info(current_thread());
317
318 if (uth->dlil_incremented_read != dlil_writer_waiting)
319 panic("dlil_write_end - thread is not a writer");
320 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
321 lck_mtx_unlock(dlil_mutex);
322 uth->dlil_incremented_read = 0;
323 wakeup(&dlil_read_count);
324 }
325
326 #define PROTO_HASH_SLOTS 0x5
327
328 /*
329 * Internal functions.
330 */
331
332 static int
333 proto_hash_value(u_long protocol_family)
334 {
335 switch(protocol_family) {
336 case PF_INET:
337 return 0;
338 case PF_INET6:
339 return 1;
340 case PF_APPLETALK:
341 return 2;
342 case PF_VLAN:
343 return 3;
344 default:
345 return 4;
346 }
347 }
348
349 static
350 struct if_family_str *find_family_module(u_long if_family)
351 {
352 struct if_family_str *mod = NULL;
353
354 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
355 if (mod->if_family == (if_family & 0xffff))
356 break;
357 }
358
359 return mod;
360 }
361
362 static
363 struct proto_family_str*
364 find_proto_module(u_long proto_family, u_long if_family)
365 {
366 struct proto_family_str *mod = NULL;
367
368 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
369 if ((mod->proto_family == (proto_family & 0xffff))
370 && (mod->if_family == (if_family & 0xffff)))
371 break;
372 }
373
374 return mod;
375 }
376
377 static struct if_proto*
378 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
379 {
380 struct if_proto *proto = NULL;
381 u_long i = proto_hash_value(protocol_family);
382 if (ifp->if_proto_hash) {
383 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
384 }
385
386 while(proto && proto->protocol_family != protocol_family) {
387 proto = SLIST_NEXT(proto, next_hash);
388 }
389
390 return proto;
391 }
392
393 static void
394 if_proto_ref(struct if_proto *proto)
395 {
396 OSAddAtomic(1, (UInt32*)&proto->refcount);
397 }
398
399 static void
400 if_proto_free(struct if_proto *proto)
401 {
402 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
403
404 if (oldval == 1) { /* This was the last reference */
405 FREE(proto, M_IFADDR);
406 }
407 }
408
409 __private_extern__ void
410 ifnet_lock_assert(
411 __unused struct ifnet *ifp,
412 __unused int what)
413 {
414 #if IFNET_RW_LOCK
415 /*
416 * Not implemented for rw locks.
417 *
418 * Function exists so when/if we use mutex we can
419 * enable this check.
420 */
421 #else
422 lck_mtx_assert(ifp->if_lock, what);
423 #endif
424 }
425
426 __private_extern__ void
427 ifnet_lock_shared(
428 struct ifnet *ifp)
429 {
430 #if IFNET_RW_LOCK
431 lck_rw_lock_shared(ifp->if_lock);
432 #else
433 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
434 lck_mtx_lock(ifp->if_lock);
435 #endif
436 }
437
438 __private_extern__ void
439 ifnet_lock_exclusive(
440 struct ifnet *ifp)
441 {
442 #if IFNET_RW_LOCK
443 lck_rw_lock_exclusive(ifp->if_lock);
444 #else
445 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
446 lck_mtx_lock(ifp->if_lock);
447 #endif
448 }
449
450 __private_extern__ void
451 ifnet_lock_done(
452 struct ifnet *ifp)
453 {
454 #if IFNET_RW_LOCK
455 lck_rw_done(ifp->if_lock);
456 #else
457 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
458 lck_mtx_unlock(ifp->if_lock);
459 #endif
460 }
461
462 __private_extern__ void
463 ifnet_head_lock_shared()
464 {
465 lck_rw_lock_shared(ifnet_head_mutex);
466 }
467
468 __private_extern__ void
469 ifnet_head_lock_exclusive()
470 {
471 lck_rw_lock_exclusive(ifnet_head_mutex);
472 }
473
474 __private_extern__ void
475 ifnet_head_done()
476 {
477 lck_rw_done(ifnet_head_mutex);
478 }
479
480 /*
481 * Public functions.
482 */
483 struct ifnet *ifbyfamily(u_long family, short unit)
484 {
485 struct ifnet *ifp;
486
487 ifnet_head_lock_shared();
488 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
489 if ((family == ifp->if_family) && (ifp->if_unit == unit))
490 break;
491 ifnet_head_done();
492
493 return ifp;
494 }
495
496 static int dlil_ifp_proto_count(struct ifnet * ifp)
497 {
498 int count = 0;
499 int i;
500
501 if (ifp->if_proto_hash != NULL) {
502 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
503 struct if_proto *proto;
504 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
505 count++;
506 }
507 }
508 }
509
510 return count;
511 }
512
513 __private_extern__ void
514 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
515 struct net_event_data *event_data, u_long event_data_len)
516 {
517 struct net_event_data ev_data;
518 struct kev_msg ev_msg;
519
520 /*
521 * a net event always start with a net_event_data structure
522 * but the caller can generate a simple net event or
523 * provide a longer event structure to post
524 */
525
526 ev_msg.vendor_code = KEV_VENDOR_APPLE;
527 ev_msg.kev_class = KEV_NETWORK_CLASS;
528 ev_msg.kev_subclass = event_subclass;
529 ev_msg.event_code = event_code;
530
531 if (event_data == 0) {
532 event_data = &ev_data;
533 event_data_len = sizeof(struct net_event_data);
534 }
535
536 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
537 event_data->if_family = ifp->if_family;
538 event_data->if_unit = (unsigned long) ifp->if_unit;
539
540 ev_msg.dv[0].data_length = event_data_len;
541 ev_msg.dv[0].data_ptr = event_data;
542 ev_msg.dv[1].data_length = 0;
543
544 dlil_event_internal(ifp, &ev_msg);
545 }
546
547 void dlil_init(void);
548 void
549 dlil_init(void)
550 {
551 lck_grp_attr_t *grp_attributes = 0;
552 lck_attr_t *lck_attributes = 0;
553 lck_grp_t *input_lock_grp = 0;
554
555 TAILQ_INIT(&dlil_ifnet_head);
556 TAILQ_INIT(&if_family_head);
557 TAILQ_INIT(&proto_family_head);
558 TAILQ_INIT(&ifnet_head);
559
560 /* Setup the lock groups we will use */
561 grp_attributes = lck_grp_attr_alloc_init();
562
563 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
564 #if IFNET_RW_LOCK
565 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
566 #else
567 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
568 #endif
569 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
570 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
571 lck_grp_attr_free(grp_attributes);
572 grp_attributes = 0;
573
574 /* Setup the lock attributes we will use */
575 lck_attributes = lck_attr_alloc_init();
576
577 ifnet_lock_attr = lck_attr_alloc_init();
578
579 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
580 input_lock_grp = 0;
581
582 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
583 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
584 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
585 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
586
587 lck_attr_free(lck_attributes);
588 lck_attributes = 0;
589
590 /*
591 * Start up the dlil input thread once everything is initialized
592 */
593 (void) kernel_thread(kernel_task, dlil_input_thread);
594 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
595 }
596
597 int
598 dlil_attach_filter(
599 struct ifnet *ifp,
600 const struct iff_filter *if_filter,
601 interface_filter_t *filter_ref)
602 {
603 int retval = 0;
604 struct ifnet_filter *filter;
605
606 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
607 if (filter == NULL)
608 return ENOMEM;
609 bzero(filter, sizeof(*filter));
610
611
612 filter->filt_ifp = ifp;
613 filter->filt_cookie = if_filter->iff_cookie;
614 filter->filt_name = if_filter->iff_name;
615 filter->filt_protocol = if_filter->iff_protocol;
616 filter->filt_input = if_filter->iff_input;
617 filter->filt_output = if_filter->iff_output;
618 filter->filt_event = if_filter->iff_event;
619 filter->filt_ioctl = if_filter->iff_ioctl;
620 filter->filt_detached = if_filter->iff_detached;
621
622 if ((retval = dlil_write_begin()) != 0) {
623 /* Failed to acquire the write lock */
624 FREE(filter, M_NKE);
625 return retval;
626 }
627 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
628 dlil_write_end();
629 *filter_ref = filter;
630 return retval;
631 }
632
633 static int
634 dlil_detach_filter_internal(interface_filter_t filter, int detached)
635 {
636 int retval = 0;
637
638 if (detached == 0) {
639 ifnet_t ifp = NULL;
640 interface_filter_t entry = NULL;
641
642 /* Take the write lock */
643 retval = dlil_write_begin();
644 if (retval != 0 && retval != EDEADLK)
645 return retval;
646
647 /*
648 * At this point either we have the write lock (retval == 0)
649 * or we couldn't get it (retval == EDEADLK) because someone
650 * else up the stack is holding the read lock. It is safe to
651 * read, either the read or write is held. Verify the filter
652 * parameter before proceeding.
653 */
654 ifnet_head_lock_shared();
655 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
656 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
657 if (entry == filter)
658 break;
659 }
660 if (entry == filter)
661 break;
662 }
663 ifnet_head_done();
664
665 if (entry != filter) {
666 /* filter parameter is not a valid filter ref */
667 if (retval == 0) {
668 dlil_write_end();
669 }
670 return EINVAL;
671 }
672
673 if (retval == EDEADLK) {
674 /* Perform a delayed detach */
675 filter->filt_detaching = 1;
676 dlil_detach_waiting = 1;
677 wakeup(&dlil_detach_waiting);
678 return 0;
679 }
680
681 /* Remove the filter from the list */
682 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
683 dlil_write_end();
684 }
685
686 /* Call the detached funciton if there is one */
687 if (filter->filt_detached)
688 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
689
690 /* Free the filter */
691 FREE(filter, M_NKE);
692
693 return retval;
694 }
695
696 void
697 dlil_detach_filter(interface_filter_t filter)
698 {
699 if (filter == NULL)
700 return;
701 dlil_detach_filter_internal(filter, 0);
702 }
703
704 static void
705 dlil_input_thread_continue(
706 __unused void* foo,
707 __unused wait_result_t wait)
708 {
709 while (1) {
710 struct mbuf *m, *m_loop;
711
712 lck_spin_lock(dlil_input_lock);
713 m = dlil_input_mbuf_head;
714 dlil_input_mbuf_head = NULL;
715 dlil_input_mbuf_tail = NULL;
716 m_loop = dlil_input_loop_head;
717 dlil_input_loop_head = NULL;
718 dlil_input_loop_tail = NULL;
719 lck_spin_unlock(dlil_input_lock);
720
721 /*
722 * NOTE warning %%% attention !!!!
723 * We should think about putting some thread starvation safeguards if
724 * we deal with long chains of packets.
725 */
726 while (m) {
727 struct mbuf *m0 = m->m_nextpkt;
728 void *header = m->m_pkthdr.header;
729
730 m->m_nextpkt = NULL;
731 m->m_pkthdr.header = NULL;
732 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
733 m = m0;
734 }
735 m = m_loop;
736 while (m) {
737 struct mbuf *m0 = m->m_nextpkt;
738 void *header = m->m_pkthdr.header;
739 struct ifnet *ifp = &loif[0];
740
741 m->m_nextpkt = NULL;
742 m->m_pkthdr.header = NULL;
743 (void) dlil_input_packet(ifp, m, header);
744 m = m0;
745 }
746
747 proto_input_run();
748
749 if (dlil_input_mbuf_head == NULL &&
750 dlil_input_loop_head == NULL && inject_buckets == 0) {
751 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
752 (void) thread_block(dlil_input_thread_continue);
753 /* NOTREACHED */
754 }
755 }
756 }
757
758 void dlil_input_thread(void)
759 {
760 register thread_t self = current_thread();
761
762 ml_thread_policy(self, MACHINE_GROUP,
763 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
764
765 dlil_initialized = 1;
766 dlil_input_thread_ptr = current_thread();
767 dlil_input_thread_continue(NULL, THREAD_RESTART);
768 }
769
770 int
771 dlil_input_with_stats(
772 struct ifnet *ifp,
773 struct mbuf *m_head,
774 struct mbuf *m_tail,
775 const struct ifnet_stat_increment_param *stats)
776 {
777 /* WARNING
778 * Because of loopbacked multicast we cannot stuff the ifp in
779 * the rcvif of the packet header: loopback has its own dlil
780 * input queue
781 */
782
783 lck_spin_lock(dlil_input_lock);
784 if (ifp->if_type != IFT_LOOP) {
785 if (dlil_input_mbuf_head == NULL)
786 dlil_input_mbuf_head = m_head;
787 else if (dlil_input_mbuf_tail != NULL)
788 dlil_input_mbuf_tail->m_nextpkt = m_head;
789 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
790 } else {
791 if (dlil_input_loop_head == NULL)
792 dlil_input_loop_head = m_head;
793 else if (dlil_input_loop_tail != NULL)
794 dlil_input_loop_tail->m_nextpkt = m_head;
795 dlil_input_loop_tail = m_tail ? m_tail : m_head;
796 }
797 if (stats) {
798 ifp->if_data.ifi_ipackets += stats->packets_in;
799 ifp->if_data.ifi_ibytes += stats->bytes_in;
800 ifp->if_data.ifi_ierrors += stats->errors_in;
801
802 ifp->if_data.ifi_opackets += stats->packets_out;
803 ifp->if_data.ifi_obytes += stats->bytes_out;
804 ifp->if_data.ifi_oerrors += stats->errors_out;
805
806 ifp->if_data.ifi_collisions += stats->collisions;
807 ifp->if_data.ifi_iqdrops += stats->dropped;
808 }
809 lck_spin_unlock(dlil_input_lock);
810
811 wakeup((caddr_t)&dlil_input_thread_wakeup);
812
813 return 0;
814 }
815
816 int
817 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
818 {
819 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
820 }
821
822 int
823 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
824 char *frame_header)
825 {
826 int retval;
827 struct if_proto *ifproto = 0;
828 protocol_family_t protocol_family;
829 struct ifnet_filter *filter;
830
831
832 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
833
834 /*
835 * Lock the interface while we run through
836 * the filters and the demux. This lock
837 * protects the filter list and the demux list.
838 */
839 dlil_read_begin();
840
841 /*
842 * Call family demux module. If the demux module finds a match
843 * for the frame it will fill-in the ifproto pointer.
844 */
845
846 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
847 if (retval != 0)
848 protocol_family = 0;
849 if (retval == EJUSTRETURN) {
850 dlil_read_end();
851 return 0;
852 }
853
854 /* DANGER!!! */
855 if (m->m_flags & (M_BCAST|M_MCAST))
856 ifp->if_imcasts++;
857
858 /*
859 * Run interface filters
860 */
861
862 /* Do not pass VLAN tagged packets to filters PR-3586856 */
863 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
864 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
865 int filter_result;
866 if (filter->filt_input && (filter->filt_protocol == 0 ||
867 filter->filt_protocol == protocol_family)) {
868 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
869
870 if (filter_result) {
871 dlil_read_end();
872 if (filter_result == EJUSTRETURN) {
873 filter_result = 0;
874 }
875 else {
876 m_freem(m);
877 }
878
879 return filter_result;
880 }
881 }
882 }
883 }
884
885 /* Demux is done, interface filters have been processed, unlock the mutex */
886 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
887 dlil_read_end();
888 if (retval != EJUSTRETURN) {
889 m_freem(m);
890 return retval;
891 }
892 else
893 return 0;
894 }
895
896 ifproto = find_attached_proto(ifp, protocol_family);
897
898 if (ifproto == 0) {
899 dlil_read_end();
900 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
901 m_freem(m);
902 return 0;
903 }
904
905 /*
906 * Hand the packet off to the protocol.
907 */
908
909 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
910 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
911 }
912
913 if (ifproto->proto_kpi == kProtoKPI_DLIL)
914 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
915 ifp, ifproto->protocol_family,
916 TRUE);
917 else
918 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
919
920 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
921 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
922 }
923
924 dlil_read_end();
925
926 if (retval == EJUSTRETURN)
927 retval = 0;
928 else
929 if (retval)
930 m_freem(m);
931
932 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
933 return retval;
934 }
935
936 static int
937 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
938 {
939 struct ifnet_filter *filter;
940
941 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
942 dlil_read_begin();
943
944 /* Pass the event to the interface filters */
945 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
946 if (filter->filt_event)
947 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
948 }
949
950 if (ifp->if_proto_hash) {
951 int i;
952
953 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
954 struct if_proto *proto;
955
956 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
957 /* Pass the event to the protocol */
958 if (proto->proto_kpi == kProtoKPI_DLIL) {
959 if (proto->kpi.dlil.dl_event)
960 proto->kpi.dlil.dl_event(ifp, event);
961 }
962 else {
963 if (proto->kpi.v1.event)
964 proto->kpi.v1.event(ifp, proto->protocol_family, event);
965 }
966 }
967 }
968 }
969
970 dlil_read_end();
971
972 /* Pass the event to the interface */
973 if (ifp->if_event)
974 ifp->if_event(ifp, event);
975
976 if (ifp_unuse(ifp))
977 ifp_use_reached_zero(ifp);
978 }
979
980 return kev_post_msg(event);
981 }
982
983 int
984 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
985 {
986 int result = 0;
987
988 struct kev_msg kev_msg;
989
990 kev_msg.vendor_code = event->vendor_code;
991 kev_msg.kev_class = event->kev_class;
992 kev_msg.kev_subclass = event->kev_subclass;
993 kev_msg.event_code = event->event_code;
994 kev_msg.dv[0].data_ptr = &event->event_data[0];
995 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
996 kev_msg.dv[1].data_length = 0;
997
998
999 result = dlil_event_internal(ifp, &kev_msg);
1000
1001
1002 return result;
1003 }
1004
1005 int
1006 dlil_output_list(
1007 struct ifnet* ifp,
1008 u_long proto_family,
1009 struct mbuf *packetlist,
1010 caddr_t route,
1011 const struct sockaddr *dest,
1012 int raw)
1013 {
1014 char *frame_type = 0;
1015 char *dst_linkaddr = 0;
1016 int error, retval = 0;
1017 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1018 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1019 struct ifnet_filter *filter;
1020 struct if_proto *proto = 0;
1021 struct mbuf *m;
1022
1023 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1024 #if BRIDGE
1025 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1026 #else
1027 if ((raw != 0) || proto_family != PF_INET) {
1028 #endif
1029 while (packetlist) {
1030 m = packetlist;
1031 packetlist = packetlist->m_nextpkt;
1032 m->m_nextpkt = NULL;
1033 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1034 if (error) {
1035 if (packetlist)
1036 m_freem_list(packetlist);
1037 return (error);
1038 }
1039 }
1040 return (0);
1041 }
1042
1043 dlil_read_begin();
1044
1045 frame_type = frame_type_buffer;
1046 dst_linkaddr = dst_linkaddr_buffer;
1047 m = packetlist;
1048 packetlist = packetlist->m_nextpkt;
1049 m->m_nextpkt = NULL;
1050
1051 proto = find_attached_proto(ifp, proto_family);
1052 if (proto == NULL) {
1053 retval = ENXIO;
1054 goto cleanup;
1055 }
1056
1057 retval = 0;
1058 if (proto->proto_kpi == kProtoKPI_DLIL) {
1059 if (proto->kpi.dlil.dl_pre_output)
1060 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1061 }
1062 else {
1063 if (proto->kpi.v1.pre_output)
1064 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1065 }
1066
1067 if (retval) {
1068 if (retval != EJUSTRETURN) {
1069 m_freem(m);
1070 }
1071 goto cleanup;
1072 }
1073
1074 do {
1075
1076
1077 if (ifp->if_framer) {
1078 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1079 if (retval) {
1080 if (retval != EJUSTRETURN) {
1081 m_freem(m);
1082 }
1083 goto cleanup;
1084 }
1085 }
1086
1087 /*
1088 * Let interface filters (if any) do their thing ...
1089 */
1090 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1091 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1092 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1093 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1094 filter->filt_output) {
1095 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1096 if (retval) {
1097 if (retval == EJUSTRETURN)
1098 continue;
1099 else {
1100 m_freem(m);
1101 }
1102 goto cleanup;
1103 }
1104 }
1105 }
1106 }
1107 /*
1108 * Finally, call the driver.
1109 */
1110
1111 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1112 retval = ifp->if_output(ifp, m);
1113 if (retval) {
1114 printf("dlil_output_list: output error retval = %x\n", retval);
1115 goto cleanup;
1116 }
1117 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1118
1119 m = packetlist;
1120 if (m) {
1121 packetlist = packetlist->m_nextpkt;
1122 m->m_nextpkt = NULL;
1123 }
1124 } while (m);
1125
1126
1127 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1128
1129 cleanup:
1130 dlil_read_end();
1131 if (packetlist) /* if any packet left, clean up */
1132 m_freem_list(packetlist);
1133 if (retval == EJUSTRETURN)
1134 retval = 0;
1135 return retval;
1136 }
1137
1138 /*
1139 * dlil_output
1140 *
1141 * Caller should have a lock on the protocol domain if the protocol
1142 * doesn't support finer grained locking. In most cases, the lock
1143 * will be held from the socket layer and won't be released until
1144 * we return back to the socket layer.
1145 *
1146 * This does mean that we must take a protocol lock before we take
1147 * an interface lock if we're going to take both. This makes sense
1148 * because a protocol is likely to interact with an ifp while it
1149 * is under the protocol lock.
1150 */
1151 int
1152 dlil_output(
1153 struct ifnet* ifp,
1154 u_long proto_family,
1155 struct mbuf *m,
1156 caddr_t route,
1157 const struct sockaddr *dest,
1158 int raw)
1159 {
1160 char *frame_type = 0;
1161 char *dst_linkaddr = 0;
1162 int retval = 0;
1163 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1164 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1165 struct ifnet_filter *filter;
1166
1167 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1168
1169 dlil_read_begin();
1170
1171 frame_type = frame_type_buffer;
1172 dst_linkaddr = dst_linkaddr_buffer;
1173
1174 if (raw == 0) {
1175 struct if_proto *proto = 0;
1176
1177 proto = find_attached_proto(ifp, proto_family);
1178 if (proto == NULL) {
1179 m_freem(m);
1180 retval = ENXIO;
1181 goto cleanup;
1182 }
1183
1184 retval = 0;
1185 if (proto->proto_kpi == kProtoKPI_DLIL) {
1186 if (proto->kpi.dlil.dl_pre_output)
1187 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1188 }
1189 else {
1190 if (proto->kpi.v1.pre_output)
1191 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1192 }
1193
1194 if (retval) {
1195 if (retval != EJUSTRETURN) {
1196 m_freem(m);
1197 }
1198 goto cleanup;
1199 }
1200 }
1201
1202 /*
1203 * Call framing module
1204 */
1205 if ((raw == 0) && (ifp->if_framer)) {
1206 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1207 if (retval) {
1208 if (retval != EJUSTRETURN) {
1209 m_freem(m);
1210 }
1211 goto cleanup;
1212 }
1213 }
1214
1215 #if BRIDGE
1216 /* !!!LOCKING!!!
1217 *
1218 * Need to consider how to handle this.
1219 */
1220 broken-locking
1221 if (do_bridge) {
1222 struct mbuf *m0 = m;
1223 struct ether_header *eh = mtod(m, struct ether_header *);
1224
1225 if (m->m_pkthdr.rcvif)
1226 m->m_pkthdr.rcvif = NULL;
1227 ifp = bridge_dst_lookup(eh);
1228 bdg_forward(&m0, ifp);
1229 if (m0)
1230 m_freem(m0);
1231
1232 return 0;
1233 }
1234 #endif
1235
1236
1237 /*
1238 * Let interface filters (if any) do their thing ...
1239 */
1240
1241 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1242 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1243 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1244 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1245 filter->filt_output) {
1246 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1247 if (retval) {
1248 if (retval != EJUSTRETURN)
1249 m_freem(m);
1250 goto cleanup;
1251 }
1252 }
1253 }
1254 }
1255
1256 /*
1257 * Finally, call the driver.
1258 */
1259
1260 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1261 retval = ifp->if_output(ifp, m);
1262 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1263
1264 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1265
1266 cleanup:
1267 dlil_read_end();
1268 if (retval == EJUSTRETURN)
1269 retval = 0;
1270 return retval;
1271 }
1272
1273 int
1274 dlil_ioctl(u_long proto_fam,
1275 struct ifnet *ifp,
1276 u_long ioctl_code,
1277 caddr_t ioctl_arg)
1278 {
1279 struct ifnet_filter *filter;
1280 int retval = EOPNOTSUPP;
1281 int result = 0;
1282 struct if_family_str *if_family;
1283 int holding_read = 0;
1284
1285 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1286 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1287 if (result != 0)
1288 return EOPNOTSUPP;
1289
1290 dlil_read_begin();
1291 holding_read = 1;
1292
1293 /* Run the interface filters first.
1294 * We want to run all filters before calling the protocol,
1295 * interface family, or interface.
1296 */
1297 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1298 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1299 filter->filt_ioctl != NULL) {
1300 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1301 /* Only update retval if no one has handled the ioctl */
1302 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1303 if (result == ENOTSUP)
1304 result = EOPNOTSUPP;
1305 retval = result;
1306 if (retval && retval != EOPNOTSUPP) {
1307 goto cleanup;
1308 }
1309 }
1310 }
1311 }
1312
1313 /* Allow the protocol to handle the ioctl */
1314 if (proto_fam) {
1315 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1316
1317 if (proto != 0) {
1318 result = EOPNOTSUPP;
1319 if (proto->proto_kpi == kProtoKPI_DLIL) {
1320 if (proto->kpi.dlil.dl_ioctl)
1321 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1322 }
1323 else {
1324 if (proto->kpi.v1.ioctl)
1325 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1326 }
1327
1328 /* Only update retval if no one has handled the ioctl */
1329 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1330 if (result == ENOTSUP)
1331 result = EOPNOTSUPP;
1332 retval = result;
1333 if (retval && retval != EOPNOTSUPP) {
1334 goto cleanup;
1335 }
1336 }
1337 }
1338 }
1339
1340 /*
1341 * Since we have incremented the use count on the ifp, we are guaranteed
1342 * that the ifp will not go away (the function pointers may not be changed).
1343 * We release the dlil read lock so the interface ioctl may trigger a
1344 * protocol attach. This happens with vlan and may occur with other virtual
1345 * interfaces.
1346 */
1347 dlil_read_end();
1348 holding_read = 0;
1349
1350 /* retval is either 0 or EOPNOTSUPP */
1351
1352 /*
1353 * Let the family handle this ioctl.
1354 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1355 * If it returns zero, the ioctl was handled, so set retval to zero.
1356 */
1357 if_family = find_family_module(ifp->if_family);
1358 if ((if_family) && (if_family->ifmod_ioctl)) {
1359 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1360
1361 /* Only update retval if no one has handled the ioctl */
1362 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1363 if (result == ENOTSUP)
1364 result = EOPNOTSUPP;
1365 retval = result;
1366 if (retval && retval != EOPNOTSUPP) {
1367 goto cleanup;
1368 }
1369 }
1370 }
1371
1372 /*
1373 * Let the interface handle this ioctl.
1374 * If it returns EOPNOTSUPP, ignore that, we may have
1375 * already handled this in the protocol or family.
1376 */
1377 if (ifp->if_ioctl)
1378 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1379
1380 /* Only update retval if no one has handled the ioctl */
1381 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1382 if (result == ENOTSUP)
1383 result = EOPNOTSUPP;
1384 retval = result;
1385 if (retval && retval != EOPNOTSUPP) {
1386 goto cleanup;
1387 }
1388 }
1389
1390 cleanup:
1391 if (holding_read)
1392 dlil_read_end();
1393 if (ifp_unuse(ifp))
1394 ifp_use_reached_zero(ifp);
1395
1396 if (retval == EJUSTRETURN)
1397 retval = 0;
1398 return retval;
1399 }
1400
1401 __private_extern__ errno_t
1402 dlil_set_bpf_tap(
1403 ifnet_t ifp,
1404 bpf_tap_mode mode,
1405 bpf_packet_func callback)
1406 {
1407 errno_t error = 0;
1408
1409 dlil_read_begin();
1410 if (ifp->if_set_bpf_tap)
1411 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1412 dlil_read_end();
1413
1414 return error;
1415 }
1416
1417 __private_extern__ errno_t
1418 dlil_resolve_multi(
1419 struct ifnet *ifp,
1420 const struct sockaddr *proto_addr,
1421 struct sockaddr *ll_addr,
1422 size_t ll_len)
1423 {
1424 errno_t result = EOPNOTSUPP;
1425 struct if_proto *proto;
1426 const struct sockaddr *verify;
1427
1428 dlil_read_begin();
1429
1430 bzero(ll_addr, ll_len);
1431
1432 /* Call the protocol first */
1433 proto = find_attached_proto(ifp, proto_addr->sa_family);
1434 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1435 proto->kpi.v1.resolve_multi != NULL) {
1436 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1437 (struct sockaddr_dl*)ll_addr, ll_len);
1438 }
1439
1440 /* Let the interface verify the multicast address */
1441 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1442 if (result == 0)
1443 verify = ll_addr;
1444 else
1445 verify = proto_addr;
1446 result = ifp->if_check_multi(ifp, verify);
1447 }
1448
1449 dlil_read_end();
1450
1451 return result;
1452 }
1453
1454 __private_extern__ errno_t
1455 dlil_send_arp_internal(
1456 ifnet_t ifp,
1457 u_short arpop,
1458 const struct sockaddr_dl* sender_hw,
1459 const struct sockaddr* sender_proto,
1460 const struct sockaddr_dl* target_hw,
1461 const struct sockaddr* target_proto)
1462 {
1463 struct if_proto *proto;
1464 errno_t result = 0;
1465
1466 dlil_read_begin();
1467
1468 proto = find_attached_proto(ifp, target_proto->sa_family);
1469 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1470 proto->kpi.v1.send_arp == NULL) {
1471 result = ENOTSUP;
1472 }
1473 else {
1474 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1475 target_hw, target_proto);
1476 }
1477
1478 dlil_read_end();
1479
1480 return result;
1481 }
1482
1483 __private_extern__ errno_t
1484 dlil_send_arp(
1485 ifnet_t ifp,
1486 u_short arpop,
1487 const struct sockaddr_dl* sender_hw,
1488 const struct sockaddr* sender_proto,
1489 const struct sockaddr_dl* target_hw,
1490 const struct sockaddr* target_proto)
1491 {
1492 errno_t result = 0;
1493
1494 if (target_proto == NULL || (sender_proto &&
1495 sender_proto->sa_family != target_proto->sa_family))
1496 return EINVAL;
1497
1498 /*
1499 * If this is an ARP request and the target IP is IPv4LL,
1500 * send the request on all interfaces.
1501 */
1502 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1503 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1504 arpop == ARPOP_REQUEST) {
1505 ifnet_t *ifp_list;
1506 u_int32_t count;
1507 u_int32_t ifp_on;
1508
1509 result = ENOTSUP;
1510
1511 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1512 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1513 errno_t new_result;
1514 ifaddr_t source_hw = NULL;
1515 ifaddr_t source_ip = NULL;
1516 struct sockaddr_in source_ip_copy;
1517
1518 /*
1519 * Only arp on interfaces marked for IPv4LL ARPing. This may
1520 * mean that we don't ARP on the interface the subnet route
1521 * points to.
1522 */
1523 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1524 continue;
1525 }
1526
1527 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1528
1529 /* Find the source IP address */
1530 ifnet_lock_shared(ifp_list[ifp_on]);
1531 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1532 ifa_link) {
1533 if (source_ip->ifa_addr &&
1534 source_ip->ifa_addr->sa_family == AF_INET) {
1535 break;
1536 }
1537 }
1538
1539 /* No IP Source, don't arp */
1540 if (source_ip == NULL) {
1541 ifnet_lock_done(ifp_list[ifp_on]);
1542 continue;
1543 }
1544
1545 /* Copy the source IP address */
1546 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1547
1548 ifnet_lock_done(ifp_list[ifp_on]);
1549
1550 /* Send the ARP */
1551 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1552 (struct sockaddr_dl*)source_hw->ifa_addr,
1553 (struct sockaddr*)&source_ip_copy, NULL,
1554 target_proto);
1555
1556 if (result == ENOTSUP) {
1557 result = new_result;
1558 }
1559 }
1560 }
1561
1562 ifnet_list_free(ifp_list);
1563 }
1564 else {
1565 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1566 target_hw, target_proto);
1567 }
1568
1569 return result;
1570 }
1571
1572 static int
1573 ifp_use(
1574 struct ifnet *ifp,
1575 int handle_zero)
1576 {
1577 int old_value;
1578 int retval = 0;
1579
1580 do {
1581 old_value = ifp->if_usecnt;
1582 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1583 retval = ENXIO; // ifp is invalid
1584 break;
1585 }
1586 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1587
1588 return retval;
1589 }
1590
1591 /* ifp_unuse is broken into two pieces.
1592 *
1593 * ifp_use and ifp_unuse must be called between when the caller calls
1594 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1595 * operations after dlil_write_end has been called. For this reason,
1596 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1597 * returns a non-zero value. The caller must call ifp_use_reached_zero
1598 * after the caller has called dlil_write_end.
1599 */
1600 static void
1601 ifp_use_reached_zero(
1602 struct ifnet *ifp)
1603 {
1604 struct if_family_str *if_family;
1605 ifnet_detached_func free_func;
1606
1607 dlil_read_begin();
1608
1609 if (ifp->if_usecnt != 0)
1610 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1611
1612 /* Let BPF know we're detaching */
1613 bpfdetach(ifp);
1614
1615 ifnet_head_lock_exclusive();
1616 ifnet_lock_exclusive(ifp);
1617
1618 /* Remove ourselves from the list */
1619 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1620 ifnet_addrs[ifp->if_index - 1] = 0;
1621
1622 /* ifp should be removed from the interface list */
1623 while (ifp->if_multiaddrs.lh_first) {
1624 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1625
1626 /*
1627 * When the interface is gone, we will no longer
1628 * be listening on these multicasts. Various bits
1629 * of the stack may be referencing these multicasts,
1630 * release only our reference.
1631 */
1632 LIST_REMOVE(ifma, ifma_link);
1633 ifma->ifma_ifp = NULL;
1634 ifma_release(ifma);
1635 }
1636 ifnet_head_done();
1637
1638 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1639 ifnet_lock_done(ifp);
1640
1641 if_family = find_family_module(ifp->if_family);
1642 if (if_family && if_family->del_if)
1643 if_family->del_if(ifp);
1644 #if 0
1645 if (--if_family->if_usecnt == 0) {
1646 if (if_family->shutdown)
1647 (*if_family->shutdown)();
1648
1649 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1650 FREE(if_family, M_IFADDR);
1651 }
1652 #endif
1653
1654 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1655 free_func = ifp->if_free;
1656 dlil_read_end();
1657
1658 if (free_func)
1659 free_func(ifp);
1660 }
1661
1662 static int
1663 ifp_unuse(
1664 struct ifnet *ifp)
1665 {
1666 int oldval;
1667 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1668 if (oldval == 0)
1669 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1670
1671 if (oldval > 1)
1672 return 0;
1673
1674 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1675 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1676
1677 return 1; /* caller must call ifp_use_reached_zero */
1678 }
1679
1680 void
1681 ifp_reference(
1682 struct ifnet *ifp)
1683 {
1684 int oldval;
1685 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1686 }
1687
1688 void
1689 ifp_release(
1690 struct ifnet *ifp)
1691 {
1692 int oldval;
1693 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1694 if (oldval == 0)
1695 panic("dlil_if_reference - refcount decremented past zero!");
1696 }
1697
1698 extern lck_mtx_t *domain_proto_mtx;
1699
1700 static int
1701 dlil_attach_protocol_internal(
1702 struct if_proto *proto,
1703 const struct ddesc_head_str *demux,
1704 const struct ifnet_demux_desc *demux_list,
1705 u_int32_t demux_count)
1706 {
1707 struct ddesc_head_str temp_head;
1708 struct kev_dl_proto_data ev_pr_data;
1709 struct ifnet *ifp = proto->ifp;
1710 int retval = 0;
1711 u_long hash_value = proto_hash_value(proto->protocol_family);
1712 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1713 void* free_me = NULL;
1714
1715 /* setup some of the common values */
1716
1717 {
1718 lck_mtx_lock(domain_proto_mtx);
1719 struct domain *dp = domains;
1720 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1721 dp = dp->dom_next;
1722 proto->dl_domain = dp;
1723 lck_mtx_unlock(domain_proto_mtx);
1724 }
1725
1726 /*
1727 * Convert the demux descriptors to a type the interface
1728 * will understand. Checking e_flags should be safe, this
1729 * flag won't change.
1730 */
1731 if (if_using_kpi && demux) {
1732 /* Convert the demux linked list to a demux_list */
1733 struct dlil_demux_desc *demux_entry;
1734 struct ifnet_demux_desc *temp_list = NULL;
1735 u_int32_t i = 0;
1736
1737 TAILQ_FOREACH(demux_entry, demux, next) {
1738 i++;
1739 }
1740
1741 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1742 free_me = temp_list;
1743
1744 if (temp_list == NULL)
1745 return ENOMEM;
1746
1747 i = 0;
1748 TAILQ_FOREACH(demux_entry, demux, next) {
1749 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1750 if (demux_entry->type == 1 ||
1751 demux_entry->type == 2 ||
1752 demux_entry->type == 3) {
1753 FREE(free_me, M_TEMP);
1754 return ENOTSUP;
1755 }
1756
1757 temp_list[i].type = demux_entry->type;
1758 temp_list[i].data = demux_entry->native_type;
1759 temp_list[i].datalen = demux_entry->variants.native_type_length;
1760 i++;
1761 }
1762 demux_count = i;
1763 demux_list = temp_list;
1764 }
1765 else if (!if_using_kpi && demux_list != NULL) {
1766 struct dlil_demux_desc *demux_entry;
1767 u_int32_t i = 0;
1768
1769 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1770 free_me = demux_entry;
1771 if (demux_entry == NULL)
1772 return ENOMEM;
1773
1774 TAILQ_INIT(&temp_head);
1775
1776 for (i = 0; i < demux_count; i++) {
1777 demux_entry[i].type = demux_list[i].type;
1778 demux_entry[i].native_type = demux_list[i].data;
1779 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1780 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1781 }
1782 demux = &temp_head;
1783 }
1784
1785 /*
1786 * Take the write lock to protect readers and exclude other writers.
1787 */
1788 dlil_write_begin();
1789
1790 /* Check that the interface isn't currently detaching */
1791 ifnet_lock_shared(ifp);
1792 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1793 ifnet_lock_done(ifp);
1794 dlil_write_end();
1795 if (free_me)
1796 FREE(free_me, M_TEMP);
1797 return ENXIO;
1798 }
1799 ifnet_lock_done(ifp);
1800
1801 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1802 dlil_write_end();
1803 if (free_me)
1804 FREE(free_me, M_TEMP);
1805 return EEXIST;
1806 }
1807
1808 /*
1809 * Call family module add_proto routine so it can refine the
1810 * demux descriptors as it wishes.
1811 */
1812 if (if_using_kpi)
1813 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1814 else {
1815 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1816 _cast_non_const(demux));
1817 }
1818 if (retval) {
1819 dlil_write_end();
1820 if (free_me)
1821 FREE(free_me, M_TEMP);
1822 return retval;
1823 }
1824
1825 /*
1826 * We can't fail from this point on.
1827 * Increment the number of uses (protocol attachments + interface attached).
1828 */
1829 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1830
1831 /*
1832 * Insert the protocol in the hash
1833 */
1834 {
1835 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1836 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1837 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1838 if (prev_proto)
1839 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1840 else
1841 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1842 }
1843
1844 /*
1845 * Add to if_proto list for this interface
1846 */
1847 if_proto_ref(proto);
1848 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1849 ifp->offercnt++;
1850 dlil_write_end();
1851
1852 /* the reserved field carries the number of protocol still attached (subject to change) */
1853 ev_pr_data.proto_family = proto->protocol_family;
1854 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1855 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1856 (struct net_event_data *)&ev_pr_data,
1857 sizeof(struct kev_dl_proto_data));
1858
1859 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1860 ifp->if_name, ifp->if_unit, retval);
1861 if (free_me)
1862 FREE(free_me, M_TEMP);
1863 return retval;
1864 }
1865
1866 __private_extern__ int
1867 dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1868 const struct ifnet_attach_proto_param *proto_details)
1869 {
1870 int retval = 0;
1871 struct if_proto *ifproto = NULL;
1872
1873 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1874 if (ifproto == 0) {
1875 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1876 retval = ENOMEM;
1877 goto end;
1878 }
1879 bzero(ifproto, sizeof(*ifproto));
1880
1881 ifproto->ifp = ifp;
1882 ifproto->protocol_family = protocol;
1883 ifproto->proto_kpi = kProtoKPI_v1;
1884 ifproto->kpi.v1.input = proto_details->input;
1885 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1886 ifproto->kpi.v1.event = proto_details->event;
1887 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1888 ifproto->kpi.v1.detached = proto_details->detached;
1889 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1890 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1891
1892 retval = dlil_attach_protocol_internal(ifproto, NULL,
1893 proto_details->demux_list, proto_details->demux_count);
1894
1895 end:
1896 if (retval && ifproto)
1897 FREE(ifproto, M_IFADDR);
1898 return retval;
1899 }
1900
1901 int
1902 dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1903 {
1904 struct ifnet *ifp = NULL;
1905 struct if_proto *ifproto = NULL;
1906 int retval = 0;
1907
1908 /*
1909 * Do everything we can before taking the write lock
1910 */
1911
1912 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1913 return EINVAL;
1914
1915 /*
1916 * Allocate and init a new if_proto structure
1917 */
1918 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1919 if (!ifproto) {
1920 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1921 retval = ENOMEM;
1922 goto end;
1923 }
1924
1925
1926 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1927 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1928 if (!ifp) {
1929 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1930 proto->interface_family, proto->unit_number);
1931 retval = ENXIO;
1932 goto end;
1933 }
1934
1935 bzero(ifproto, sizeof(struct if_proto));
1936
1937 ifproto->ifp = ifp;
1938 ifproto->protocol_family = proto->protocol_family;
1939 ifproto->proto_kpi = kProtoKPI_DLIL;
1940 ifproto->kpi.dlil.dl_input = proto->input;
1941 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1942 ifproto->kpi.dlil.dl_event = proto->event;
1943 ifproto->kpi.dlil.dl_offer = proto->offer;
1944 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1945 ifproto->kpi.dlil.dl_detached = proto->detached;
1946
1947 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1948
1949 end:
1950 if (retval && ifproto)
1951 FREE(ifproto, M_IFADDR);
1952 return retval;
1953 }
1954
1955 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1956
1957 static int
1958 dlil_detach_protocol_internal(
1959 struct if_proto *proto)
1960 {
1961 struct ifnet *ifp = proto->ifp;
1962 u_long proto_family = proto->protocol_family;
1963 struct kev_dl_proto_data ev_pr_data;
1964
1965 if (proto->proto_kpi == kProtoKPI_DLIL) {
1966 if (proto->kpi.dlil.dl_detached)
1967 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1968 }
1969 else {
1970 if (proto->kpi.v1.detached)
1971 proto->kpi.v1.detached(ifp, proto->protocol_family);
1972 }
1973 if_proto_free(proto);
1974
1975 /*
1976 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1977 */
1978
1979 if_rtproto_del(ifp, proto_family);
1980
1981 /* the reserved field carries the number of protocol still attached (subject to change) */
1982 ev_pr_data.proto_family = proto_family;
1983 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1984 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1985 (struct net_event_data *)&ev_pr_data,
1986 sizeof(struct kev_dl_proto_data));
1987 return 0;
1988 }
1989
1990 int
1991 dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1992 {
1993 struct if_proto *proto = NULL;
1994 int retval = 0;
1995 int use_reached_zero = 0;
1996
1997
1998 if ((retval = dlil_write_begin()) != 0) {
1999 if (retval == EDEADLK) {
2000 retval = 0;
2001 dlil_read_begin();
2002 proto = find_attached_proto(ifp, proto_family);
2003 if (proto == 0) {
2004 retval = ENXIO;
2005 }
2006 else {
2007 proto->detaching = 1;
2008 dlil_detach_waiting = 1;
2009 wakeup(&dlil_detach_waiting);
2010 }
2011 dlil_read_end();
2012 }
2013 goto end;
2014 }
2015
2016 proto = find_attached_proto(ifp, proto_family);
2017
2018 if (proto == NULL) {
2019 retval = ENXIO;
2020 dlil_write_end();
2021 goto end;
2022 }
2023
2024 /*
2025 * Call family module del_proto
2026 */
2027
2028 if (ifp->if_del_proto)
2029 ifp->if_del_proto(ifp, proto->protocol_family);
2030
2031 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2032 ifp->offercnt--;
2033
2034 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2035
2036 /*
2037 * We can do the rest of the work outside of the write lock.
2038 */
2039 use_reached_zero = ifp_unuse(ifp);
2040 dlil_write_end();
2041
2042 dlil_detach_protocol_internal(proto);
2043
2044 /*
2045 * Only handle the case where the interface will go away after
2046 * we've sent the message. This way post message can send the
2047 * message to the interface safely.
2048 */
2049
2050 if (use_reached_zero)
2051 ifp_use_reached_zero(ifp);
2052
2053 end:
2054 return retval;
2055 }
2056
2057 /*
2058 * dlil_delayed_detach_thread is responsible for detaching
2059 * protocols, protocol filters, and interface filters after
2060 * an attempt was made to detach one of those items while
2061 * it was not safe to do so (i.e. called dlil_read_begin).
2062 *
2063 * This function will take the dlil write lock and walk
2064 * through each of the interfaces looking for items with
2065 * the detaching flag set. When an item is found, it is
2066 * detached from the interface and placed on a local list.
2067 * After all of the items have been collected, we drop the
2068 * write lock and performed the post detach. This is done
2069 * so we only have to take the write lock once.
2070 *
2071 * When detaching a protocol filter, if we find that we
2072 * have detached the very last protocol and we need to call
2073 * ifp_use_reached_zero, we have to break out of our work
2074 * to drop the write lock so we can call ifp_use_reached_zero.
2075 */
2076
2077 static void
2078 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2079 {
2080 thread_t self = current_thread();
2081 int asserted = 0;
2082
2083 ml_thread_policy(self, MACHINE_GROUP,
2084 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2085
2086
2087 while (1) {
2088 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2089 struct ifnet *ifp;
2090 struct proto_hash_entry detached_protos;
2091 struct ifnet_filter_head detached_filters;
2092 struct if_proto *proto;
2093 struct if_proto *next_proto;
2094 struct ifnet_filter *filt;
2095 struct ifnet_filter *next_filt;
2096 int reached_zero;
2097
2098 reached_zero = 0;
2099
2100 /* Clear the detach waiting flag */
2101 dlil_detach_waiting = 0;
2102 TAILQ_INIT(&detached_filters);
2103 SLIST_INIT(&detached_protos);
2104
2105 ifnet_head_lock_shared();
2106 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2107 int i;
2108
2109 // Look for protocols and protocol filters
2110 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2111 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2112 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2113
2114 // Detach this protocol
2115 if (proto->detaching) {
2116 if (ifp->if_del_proto)
2117 ifp->if_del_proto(ifp, proto->protocol_family);
2118 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2119 ifp->offercnt--;
2120 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2121 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2122 reached_zero = ifp_unuse(ifp);
2123 if (reached_zero) {
2124 break;
2125 }
2126 }
2127 else {
2128 // Update prev_nextptr to point to our next ptr
2129 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2130 }
2131 }
2132 }
2133
2134 // look for interface filters that need to be detached
2135 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2136 next_filt = TAILQ_NEXT(filt, filt_next);
2137 if (filt->filt_detaching != 0) {
2138 // take this interface filter off the interface filter list
2139 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2140
2141 // put this interface filter on the detached filters list
2142 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2143 }
2144 }
2145
2146 if (ifp->if_delayed_detach) {
2147 ifp->if_delayed_detach = 0;
2148 reached_zero = ifp_unuse(ifp);
2149 }
2150
2151 if (reached_zero)
2152 break;
2153 }
2154 ifnet_head_done();
2155 dlil_write_end();
2156
2157 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2158 next_filt = TAILQ_NEXT(filt, filt_next);
2159 /*
2160 * dlil_detach_filter_internal won't remove an item from
2161 * the list if it is already detached (second parameter).
2162 * The item will be freed though.
2163 */
2164 dlil_detach_filter_internal(filt, 1);
2165 }
2166
2167 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2168 next_proto = SLIST_NEXT(proto, next_hash);
2169 dlil_detach_protocol_internal(proto);
2170 }
2171
2172 if (reached_zero) {
2173 ifp_use_reached_zero(ifp);
2174 dlil_detach_waiting = 1; // we may have missed something
2175 }
2176 }
2177
2178 if (!asserted && dlil_detach_waiting == 0) {
2179 asserted = 1;
2180 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2181 }
2182
2183 if (dlil_detach_waiting == 0) {
2184 asserted = 0;
2185 thread_block(dlil_delayed_detach_thread);
2186 }
2187 }
2188 }
2189
2190 static void
2191 dlil_call_delayed_detach_thread(void) {
2192 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2193 }
2194
2195 extern int if_next_index(void);
2196
2197 __private_extern__ int
2198 dlil_if_attach_with_address(
2199 struct ifnet *ifp,
2200 const struct sockaddr_dl *ll_addr)
2201 {
2202 u_long interface_family = ifp->if_family;
2203 struct if_family_str *if_family = NULL;
2204 int stat;
2205 struct ifnet *tmp_if;
2206 struct proto_hash_entry *new_proto_list = NULL;
2207 int locked = 0;
2208
2209
2210 ifnet_head_lock_shared();
2211
2212 /* Verify we aren't already on the list */
2213 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2214 if (tmp_if == ifp) {
2215 ifnet_head_done();
2216 return EEXIST;
2217 }
2218 }
2219
2220 ifnet_head_done();
2221
2222 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2223 #if IFNET_RW_LOCK
2224 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2225 #else
2226 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2227 #endif
2228
2229 if (ifp->if_lock == 0) {
2230 return ENOMEM;
2231 }
2232
2233 // Only use family if this is not a KPI interface
2234 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2235 if_family = find_family_module(interface_family);
2236 }
2237
2238 /*
2239 * Allow interfaces withouth protocol families to attach
2240 * only if they have the necessary fields filled out.
2241 */
2242
2243 if ((if_family == 0) &&
2244 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2245 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2246 interface_family);
2247 return ENODEV;
2248 }
2249
2250 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2251 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2252 M_NKE, M_WAITOK);
2253
2254 if (new_proto_list == 0) {
2255 return ENOBUFS;
2256 }
2257 }
2258
2259 dlil_write_begin();
2260 locked = 1;
2261
2262 /*
2263 * Call the family module to fill in the appropriate fields in the
2264 * ifnet structure.
2265 */
2266
2267 if (if_family) {
2268 stat = if_family->add_if(ifp);
2269 if (stat) {
2270 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2271 dlil_write_end();
2272 return stat;
2273 }
2274 ifp->if_add_proto_u.original = if_family->add_proto;
2275 ifp->if_del_proto = if_family->del_proto;
2276 if_family->refcnt++;
2277 }
2278
2279 ifp->offercnt = 0;
2280 TAILQ_INIT(&ifp->if_flt_head);
2281
2282
2283 if (new_proto_list) {
2284 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2285 ifp->if_proto_hash = new_proto_list;
2286 new_proto_list = 0;
2287 }
2288
2289 /* old_if_attach */
2290 {
2291 struct ifaddr *ifa = 0;
2292
2293 if (ifp->if_snd.ifq_maxlen == 0)
2294 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2295 TAILQ_INIT(&ifp->if_prefixhead);
2296 LIST_INIT(&ifp->if_multiaddrs);
2297 ifnet_touch_lastchange(ifp);
2298
2299 /* usecount to track attachment to the ifnet list */
2300 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2301
2302 /* Lock the list of interfaces */
2303 ifnet_head_lock_exclusive();
2304 ifnet_lock_exclusive(ifp);
2305
2306 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2307 char workbuf[64];
2308 int namelen, masklen, socksize, ifasize;
2309
2310 ifp->if_index = if_next_index();
2311
2312 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2313 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2314 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2315 socksize = masklen + ifp->if_addrlen;
2316 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2317 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2318 socksize = sizeof(struct sockaddr_dl);
2319 socksize = ROUNDUP(socksize);
2320 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2321 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2322 if (ifa) {
2323 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2324 ifnet_addrs[ifp->if_index - 1] = ifa;
2325 bzero(ifa, ifasize);
2326 sdl->sdl_len = socksize;
2327 sdl->sdl_family = AF_LINK;
2328 bcopy(workbuf, sdl->sdl_data, namelen);
2329 sdl->sdl_nlen = namelen;
2330 sdl->sdl_index = ifp->if_index;
2331 sdl->sdl_type = ifp->if_type;
2332 if (ll_addr) {
2333 sdl->sdl_alen = ll_addr->sdl_alen;
2334 if (ll_addr->sdl_alen != ifp->if_addrlen)
2335 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2336 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2337 }
2338 ifa->ifa_ifp = ifp;
2339 ifa->ifa_rtrequest = link_rtrequest;
2340 ifa->ifa_addr = (struct sockaddr*)sdl;
2341 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2342 ifa->ifa_netmask = (struct sockaddr*)sdl;
2343 sdl->sdl_len = masklen;
2344 while (namelen != 0)
2345 sdl->sdl_data[--namelen] = 0xff;
2346 }
2347 }
2348 else {
2349 /* preserve the first ifaddr */
2350 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2351 }
2352
2353
2354 TAILQ_INIT(&ifp->if_addrhead);
2355 ifa = ifnet_addrs[ifp->if_index - 1];
2356
2357 if (ifa) {
2358 /*
2359 * We don't use if_attach_ifa because we want
2360 * this address to be first on the list.
2361 */
2362 ifaref(ifa);
2363 ifa->ifa_debug |= IFA_ATTACHED;
2364 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2365 }
2366
2367 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2368 ifindex2ifnet[ifp->if_index] = ifp;
2369
2370 ifnet_head_done();
2371 }
2372 dlil_write_end();
2373
2374 if (if_family && if_family->init_if) {
2375 stat = if_family->init_if(ifp);
2376 if (stat) {
2377 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2378 }
2379 }
2380
2381 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2382 ifnet_lock_done(ifp);
2383
2384 return 0;
2385 }
2386
2387 int
2388 dlil_if_attach(struct ifnet *ifp)
2389 {
2390 dlil_if_attach_with_address(ifp, NULL);
2391 }
2392
2393
2394 int
2395 dlil_if_detach(struct ifnet *ifp)
2396 {
2397 struct ifnet_filter *filter;
2398 struct ifnet_filter *filter_next;
2399 int zeroed = 0;
2400 int retval = 0;
2401 struct ifnet_filter_head fhead;
2402
2403
2404 ifnet_lock_exclusive(ifp);
2405
2406 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2407 /* Interface has already been detached */
2408 ifnet_lock_done(ifp);
2409 return ENXIO;
2410 }
2411
2412 /*
2413 * Indicate this interface is being detached.
2414 *
2415 * This should prevent protocols from attaching
2416 * from this point on. Interface will remain on
2417 * the list until all of the protocols are detached.
2418 */
2419 ifp->if_eflags |= IFEF_DETACHING;
2420 ifnet_lock_done(ifp);
2421
2422 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
2423
2424 if ((retval = dlil_write_begin()) != 0) {
2425 if (retval == EDEADLK) {
2426 retval = DLIL_WAIT_FOR_FREE;
2427
2428 /* We need to perform a delayed detach */
2429 ifp->if_delayed_detach = 1;
2430 dlil_detach_waiting = 1;
2431 wakeup(&dlil_detach_waiting);
2432 }
2433 return retval;
2434 }
2435
2436 /* Steal the list of interface filters */
2437 fhead = ifp->if_flt_head;
2438 TAILQ_INIT(&ifp->if_flt_head);
2439
2440 /* unuse the interface */
2441 zeroed = ifp_unuse(ifp);
2442
2443 dlil_write_end();
2444
2445 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2446 filter_next = TAILQ_NEXT(filter, filt_next);
2447 dlil_detach_filter_internal(filter, 1);
2448 }
2449
2450 if (zeroed == 0) {
2451 retval = DLIL_WAIT_FOR_FREE;
2452 }
2453 else
2454 {
2455 ifp_use_reached_zero(ifp);
2456 }
2457
2458 return retval;
2459 }
2460
2461
2462 int
2463 dlil_reg_if_modules(u_long interface_family,
2464 struct dlil_ifmod_reg_str *ifmod)
2465 {
2466 struct if_family_str *if_family;
2467
2468
2469 if (find_family_module(interface_family)) {
2470 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
2471 interface_family);
2472 return EEXIST;
2473 }
2474
2475 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2476 (!ifmod->add_proto) || (!ifmod->del_proto)) {
2477 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
2478 return EINVAL;
2479 }
2480
2481 /*
2482 * The following is a gross hack to keep from breaking
2483 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2484 * does not zero the reserved fields in dlil_ifmod_reg_str.
2485 * As a result, we have to zero any function that used to
2486 * be reserved fields at the time Vicomsoft built their
2487 * kext. Radar #2974305
2488 */
2489 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2490 if (interface_family == 123) { /* Vicom */
2491 ifmod->init_if = 0;
2492 } else {
2493 return EINVAL;
2494 }
2495 }
2496
2497 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2498 if (!if_family) {
2499 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
2500 return ENOMEM;
2501 }
2502
2503 bzero(if_family, sizeof(struct if_family_str));
2504
2505 if_family->if_family = interface_family & 0xffff;
2506 if_family->shutdown = ifmod->shutdown;
2507 if_family->add_if = ifmod->add_if;
2508 if_family->del_if = ifmod->del_if;
2509 if_family->init_if = ifmod->init_if;
2510 if_family->add_proto = ifmod->add_proto;
2511 if_family->del_proto = ifmod->del_proto;
2512 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
2513 if_family->refcnt = 1;
2514 if_family->flags = 0;
2515
2516 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
2517 return 0;
2518 }
2519
2520 int dlil_dereg_if_modules(u_long interface_family)
2521 {
2522 struct if_family_str *if_family;
2523 int ret = 0;
2524
2525
2526 if_family = find_family_module(interface_family);
2527 if (if_family == 0) {
2528 return ENXIO;
2529 }
2530
2531 if (--if_family->refcnt == 0) {
2532 if (if_family->shutdown)
2533 (*if_family->shutdown)();
2534
2535 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2536 FREE(if_family, M_IFADDR);
2537 }
2538 else {
2539 if_family->flags |= DLIL_SHUTDOWN;
2540 ret = DLIL_WAIT_FOR_FREE;
2541 }
2542
2543 return ret;
2544 }
2545
2546
2547
2548 int
2549 dlil_reg_proto_module(
2550 u_long protocol_family,
2551 u_long interface_family,
2552 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2553 int (*detach)(struct ifnet *ifp, u_long protocol_family))
2554 {
2555 struct proto_family_str *proto_family;
2556
2557 if (attach == NULL) return EINVAL;
2558
2559 lck_mtx_lock(proto_family_mutex);
2560
2561 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2562 if (proto_family->proto_family == protocol_family &&
2563 proto_family->if_family == interface_family) {
2564 lck_mtx_unlock(proto_family_mutex);
2565 return EEXIST;
2566 }
2567 }
2568
2569 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2570 if (!proto_family) {
2571 lck_mtx_unlock(proto_family_mutex);
2572 return ENOMEM;
2573 }
2574
2575 bzero(proto_family, sizeof(struct proto_family_str));
2576 proto_family->proto_family = protocol_family;
2577 proto_family->if_family = interface_family & 0xffff;
2578 proto_family->attach_proto = attach;
2579 proto_family->detach_proto = detach;
2580
2581 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
2582 lck_mtx_unlock(proto_family_mutex);
2583 return 0;
2584 }
2585
2586 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2587 {
2588 struct proto_family_str *proto_family;
2589 int ret = 0;
2590
2591 lck_mtx_lock(proto_family_mutex);
2592
2593 proto_family = find_proto_module(protocol_family, interface_family);
2594 if (proto_family == 0) {
2595 lck_mtx_unlock(proto_family_mutex);
2596 return ENXIO;
2597 }
2598
2599 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2600 FREE(proto_family, M_IFADDR);
2601
2602 lck_mtx_unlock(proto_family_mutex);
2603 return ret;
2604 }
2605
2606 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
2607 {
2608 struct proto_family_str *proto_family;
2609 int ret = 0;
2610
2611 lck_mtx_lock(proto_family_mutex);
2612 proto_family = find_proto_module(protocol_family, ifp->if_family);
2613 if (proto_family == 0) {
2614 lck_mtx_unlock(proto_family_mutex);
2615 return ENXIO;
2616 }
2617
2618 ret = proto_family->attach_proto(ifp, protocol_family);
2619
2620 lck_mtx_unlock(proto_family_mutex);
2621 return ret;
2622 }
2623
2624
2625 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2626 {
2627 struct proto_family_str *proto_family;
2628 int ret = 0;
2629
2630 lck_mtx_lock(proto_family_mutex);
2631
2632 proto_family = find_proto_module(protocol_family, ifp->if_family);
2633 if (proto_family && proto_family->detach_proto)
2634 ret = proto_family->detach_proto(ifp, protocol_family);
2635 else
2636 ret = dlil_detach_protocol(ifp, protocol_family);
2637
2638 lck_mtx_unlock(proto_family_mutex);
2639 return ret;
2640 }
2641
2642 static errno_t
2643 dlil_recycle_ioctl(
2644 __unused ifnet_t ifnet_ptr,
2645 __unused u_int32_t ioctl_code,
2646 __unused void *ioctl_arg)
2647 {
2648 return EOPNOTSUPP;
2649 }
2650
2651 static int
2652 dlil_recycle_output(
2653 __unused struct ifnet *ifnet_ptr,
2654 struct mbuf *m)
2655 {
2656 m_freem(m);
2657 return 0;
2658 }
2659
2660 static void
2661 dlil_recycle_free(
2662 __unused ifnet_t ifnet_ptr)
2663 {
2664 }
2665
2666 static errno_t
2667 dlil_recycle_set_bpf_tap(
2668 __unused ifnet_t ifp,
2669 __unused bpf_tap_mode mode,
2670 __unused bpf_packet_func callback)
2671 {
2672 /* XXX not sure what to do here */
2673 return 0;
2674 }
2675
2676 int dlil_if_acquire(
2677 u_long family,
2678 const void *uniqueid,
2679 size_t uniqueid_len,
2680 struct ifnet **ifp)
2681 {
2682 struct ifnet *ifp1 = NULL;
2683 struct dlil_ifnet *dlifp1 = NULL;
2684 int ret = 0;
2685
2686 lck_mtx_lock(dlil_ifnet_mutex);
2687 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2688
2689 ifp1 = (struct ifnet *)dlifp1;
2690
2691 if (ifp1->if_family == family) {
2692
2693 /* same uniqueid and same len or no unique id specified */
2694 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2695 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2696
2697 /* check for matching interface in use */
2698 if (ifp1->if_eflags & IFEF_INUSE) {
2699 if (uniqueid_len) {
2700 ret = EBUSY;
2701 goto end;
2702 }
2703 }
2704 else {
2705 if (!ifp1->if_lock)
2706 panic("ifp's lock is gone\n");
2707 ifnet_lock_exclusive(ifp1);
2708 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2709 ifnet_lock_done(ifp1);
2710 *ifp = ifp1;
2711 goto end;
2712 }
2713 }
2714 }
2715 }
2716
2717 /* no interface found, allocate a new one */
2718 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2719 if (dlifp1 == 0) {
2720 ret = ENOMEM;
2721 goto end;
2722 }
2723
2724 bzero(dlifp1, sizeof(*dlifp1));
2725
2726 if (uniqueid_len) {
2727 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2728 if (dlifp1->if_uniqueid == 0) {
2729 FREE(dlifp1, M_NKE);
2730 ret = ENOMEM;
2731 goto end;
2732 }
2733 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2734 dlifp1->if_uniqueid_len = uniqueid_len;
2735 }
2736
2737 ifp1 = (struct ifnet *)dlifp1;
2738 ifp1->if_eflags |= IFEF_INUSE;
2739 ifp1->if_name = dlifp1->if_namestorage;
2740
2741 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2742
2743 *ifp = ifp1;
2744
2745 end:
2746 lck_mtx_unlock(dlil_ifnet_mutex);
2747
2748 return ret;
2749 }
2750
2751 void dlil_if_release(struct ifnet *ifp)
2752 {
2753 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2754
2755
2756 /* Interface does not have a lock until it is attached - radar 3713951 */
2757 if (ifp->if_lock)
2758 ifnet_lock_exclusive(ifp);
2759 ifp->if_eflags &= ~IFEF_INUSE;
2760 ifp->if_ioctl = dlil_recycle_ioctl;
2761 ifp->if_output = dlil_recycle_output;
2762 ifp->if_free = dlil_recycle_free;
2763 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2764
2765 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2766 ifp->if_name = dlifp->if_namestorage;
2767 if (ifp->if_lock)
2768 ifnet_lock_done(ifp);
2769
2770 }