]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
4eb62bc6556ba7b505712eca1a00324befc20149
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (c) 1999 Apple Computer, Inc.
32 *
33 * Data Link Inteface Layer
34 * Author: Ted Walker
35 */
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/socket.h>
43 #include <sys/domain.h>
44 #include <sys/user.h>
45 #include <net/if_dl.h>
46 #include <net/if.h>
47 #include <net/route.h>
48 #include <net/if_var.h>
49 #include <net/dlil.h>
50 #include <net/if_arp.h>
51 #include <sys/kern_event.h>
52 #include <sys/kdebug.h>
53
54 #include <kern/assert.h>
55 #include <kern/task.h>
56 #include <kern/thread.h>
57 #include <kern/sched_prim.h>
58 #include <kern/locks.h>
59
60 #include <net/if_types.h>
61 #include <net/kpi_interfacefilter.h>
62
63 #include <libkern/OSAtomic.h>
64
65 #include <machine/machine_routines.h>
66
67 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
68 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
69 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
70 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
71 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
72
73
74 #define MAX_DL_TAGS 16
75 #define MAX_DLIL_FILTERS 16
76 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
77 #define MAX_LINKADDR 4 /* LONGWORDS */
78 #define M_NKE M_IFADDR
79
80 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
81 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
82
83 #if 0
84 #define DLIL_PRINTF printf
85 #else
86 #define DLIL_PRINTF kprintf
87 #endif
88
89 enum {
90 kProtoKPI_DLIL = 0,
91 kProtoKPI_v1 = 1
92 };
93
94 struct if_proto {
95 SLIST_ENTRY(if_proto) next_hash;
96 int refcount;
97 int detaching;
98 struct ifnet *ifp;
99 struct domain *dl_domain;
100 protocol_family_t protocol_family;
101 int proto_kpi;
102 union {
103 struct {
104 dl_input_func dl_input;
105 dl_pre_output_func dl_pre_output;
106 dl_event_func dl_event;
107 dl_offer_func dl_offer;
108 dl_ioctl_func dl_ioctl;
109 dl_detached_func dl_detached;
110 } dlil;
111 struct {
112 proto_media_input input;
113 proto_media_preout pre_output;
114 proto_media_event event;
115 proto_media_ioctl ioctl;
116 proto_media_detached detached;
117 proto_media_resolve_multi resolve_multi;
118 proto_media_send_arp send_arp;
119 } v1;
120 } kpi;
121 };
122
123 SLIST_HEAD(proto_hash_entry, if_proto);
124
125
126 struct dlil_ifnet {
127 /* ifnet and drvr_ext are used by the stack and drivers
128 drvr_ext extends the public ifnet and must follow dl_if */
129 struct ifnet dl_if; /* public ifnet */
130
131 /* dlil private fields */
132 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
133 /* it is not the ifnet list */
134 void *if_uniqueid; /* unique id identifying the interface */
135 size_t if_uniqueid_len;/* length of the unique id */
136 char if_namestorage[IFNAMSIZ]; /* interface name storage */
137 };
138
139 struct ifnet_filter {
140 TAILQ_ENTRY(ifnet_filter) filt_next;
141 ifnet_t filt_ifp;
142 int filt_detaching;
143
144 const char *filt_name;
145 void *filt_cookie;
146 protocol_family_t filt_protocol;
147 iff_input_func filt_input;
148 iff_output_func filt_output;
149 iff_event_func filt_event;
150 iff_ioctl_func filt_ioctl;
151 iff_detached_func filt_detached;
152 };
153
154 struct if_family_str {
155 TAILQ_ENTRY(if_family_str) if_fam_next;
156 u_long if_family;
157 int refcnt;
158 int flags;
159
160 #define DLIL_SHUTDOWN 1
161
162 int (*add_if)(struct ifnet *ifp);
163 int (*del_if)(struct ifnet *ifp);
164 int (*init_if)(struct ifnet *ifp);
165 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
166 ifnet_del_proto_func del_proto;
167 ifnet_ioctl_func ifmod_ioctl;
168 int (*shutdown)(void);
169 };
170
171 struct proto_family_str {
172 TAILQ_ENTRY(proto_family_str) proto_fam_next;
173 u_long proto_family;
174 u_long if_family;
175 int usecnt;
176
177 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
178 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
179 };
180
181 enum {
182 kIfNetUseCount_MayBeZero = 0,
183 kIfNetUseCount_MustNotBeZero = 1
184 };
185
186 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
187 static TAILQ_HEAD(, if_family_str) if_family_head;
188 static TAILQ_HEAD(, proto_family_str) proto_family_head;
189 static lck_grp_t *dlil_lock_group;
190 static lck_grp_t *ifnet_lock_group;
191 static lck_grp_t *ifnet_head_lock_group;
192 static lck_attr_t *ifnet_lock_attr;
193 static lck_mtx_t *proto_family_mutex;
194 static lck_rw_t *ifnet_head_mutex;
195 static lck_mtx_t *dlil_ifnet_mutex;
196 static lck_mtx_t *dlil_mutex;
197 static unsigned long dlil_read_count = 0;
198 static unsigned long dlil_detach_waiting = 0;
199 extern u_int32_t ipv4_ll_arp_aware;
200
201 int dlil_initialized = 0;
202 lck_spin_t *dlil_input_lock;
203 __private_extern__ thread_t dlil_input_thread_ptr = 0;
204 int dlil_input_thread_wakeup = 0;
205 __private_extern__ int dlil_output_thread_wakeup = 0;
206 static struct mbuf *dlil_input_mbuf_head = NULL;
207 static struct mbuf *dlil_input_mbuf_tail = NULL;
208 #if NLOOP > 1
209 #error dlil_input() needs to be revised to support more than on loopback interface
210 #endif
211 static struct mbuf *dlil_input_loop_head = NULL;
212 static struct mbuf *dlil_input_loop_tail = NULL;
213
214 static void dlil_input_thread(void);
215 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
216 struct ifnet *ifbyfamily(u_long family, short unit);
217 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
218 static void dlil_call_delayed_detach_thread(void);
219
220 static void dlil_read_begin(void);
221 static void dlil_read_end(void);
222 static int dlil_write_begin(void);
223 static void dlil_write_end(void);
224
225 static int ifp_use(struct ifnet *ifp, int handle_zero);
226 static int ifp_unuse(struct ifnet *ifp);
227 static void ifp_use_reached_zero(struct ifnet *ifp);
228
229 extern void bpfdetach(struct ifnet*);
230 extern void proto_input_run(void); // new run_netisr
231
232
233 int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
234
235 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
236
237 int dlil_expand_mcl;
238
239 extern u_int32_t inject_buckets;
240
241 static const u_int32_t dlil_writer_waiting = 0x80000000;
242
243 static __inline__ void*
244 _cast_non_const(const void * ptr) {
245 union {
246 const void* cval;
247 void* val;
248 } ret;
249
250 ret.cval = ptr;
251 return (ret.val);
252 }
253
254 /* Should these be inline? */
255 static void
256 dlil_read_begin(void)
257 {
258 unsigned long new_value;
259 unsigned long old_value;
260 struct uthread *uth = get_bsdthread_info(current_thread());
261
262 if (uth->dlil_incremented_read == dlil_writer_waiting)
263 panic("dlil_read_begin - thread is already a writer");
264
265 do {
266 again:
267 old_value = dlil_read_count;
268
269 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
270 {
271 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
272 goto again;
273 }
274
275 new_value = old_value + 1;
276 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
277
278 uth->dlil_incremented_read++;
279 }
280
281 static void
282 dlil_read_end(void)
283 {
284 struct uthread *uth = get_bsdthread_info(current_thread());
285
286 OSDecrementAtomic((UInt32*)&dlil_read_count);
287 uth->dlil_incremented_read--;
288 if (dlil_read_count == dlil_writer_waiting)
289 wakeup(_cast_non_const(&dlil_writer_waiting));
290 }
291
292 static int
293 dlil_write_begin(void)
294 {
295 struct uthread *uth = get_bsdthread_info(current_thread());
296
297 if (uth->dlil_incremented_read != 0) {
298 return EDEADLK;
299 }
300 lck_mtx_lock(dlil_mutex);
301 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
302 again:
303 if (dlil_read_count == dlil_writer_waiting) {
304 uth->dlil_incremented_read = dlil_writer_waiting;
305 return 0;
306 }
307 else {
308 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
309 goto again;
310 }
311 }
312
313 static void
314 dlil_write_end(void)
315 {
316 struct uthread *uth = get_bsdthread_info(current_thread());
317
318 if (uth->dlil_incremented_read != dlil_writer_waiting)
319 panic("dlil_write_end - thread is not a writer");
320 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
321 lck_mtx_unlock(dlil_mutex);
322 uth->dlil_incremented_read = 0;
323 wakeup(&dlil_read_count);
324 }
325
326 #define PROTO_HASH_SLOTS 0x5
327
328 /*
329 * Internal functions.
330 */
331
332 static int
333 proto_hash_value(u_long protocol_family)
334 {
335 switch(protocol_family) {
336 case PF_INET:
337 return 0;
338 case PF_INET6:
339 return 1;
340 case PF_APPLETALK:
341 return 2;
342 case PF_VLAN:
343 return 3;
344 default:
345 return 4;
346 }
347 }
348
349 static
350 struct if_family_str *find_family_module(u_long if_family)
351 {
352 struct if_family_str *mod = NULL;
353
354 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
355 if (mod->if_family == (if_family & 0xffff))
356 break;
357 }
358
359 return mod;
360 }
361
362 static
363 struct proto_family_str*
364 find_proto_module(u_long proto_family, u_long if_family)
365 {
366 struct proto_family_str *mod = NULL;
367
368 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
369 if ((mod->proto_family == (proto_family & 0xffff))
370 && (mod->if_family == (if_family & 0xffff)))
371 break;
372 }
373
374 return mod;
375 }
376
377 static struct if_proto*
378 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
379 {
380 struct if_proto *proto = NULL;
381 u_long i = proto_hash_value(protocol_family);
382 if (ifp->if_proto_hash) {
383 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
384 }
385
386 while(proto && proto->protocol_family != protocol_family) {
387 proto = SLIST_NEXT(proto, next_hash);
388 }
389
390 return proto;
391 }
392
393 static void
394 if_proto_ref(struct if_proto *proto)
395 {
396 OSAddAtomic(1, (UInt32*)&proto->refcount);
397 }
398
399 static void
400 if_proto_free(struct if_proto *proto)
401 {
402 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
403
404 if (oldval == 1) { /* This was the last reference */
405 FREE(proto, M_IFADDR);
406 }
407 }
408
409 __private_extern__ void
410 ifnet_lock_assert(
411 __unused struct ifnet *ifp,
412 __unused int what)
413 {
414 #if IFNET_RW_LOCK
415 /*
416 * Not implemented for rw locks.
417 *
418 * Function exists so when/if we use mutex we can
419 * enable this check.
420 */
421 #else
422 lck_mtx_assert(ifp->if_lock, what);
423 #endif
424 }
425
426 __private_extern__ void
427 ifnet_lock_shared(
428 struct ifnet *ifp)
429 {
430 #if IFNET_RW_LOCK
431 lck_rw_lock_shared(ifp->if_lock);
432 #else
433 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
434 lck_mtx_lock(ifp->if_lock);
435 #endif
436 }
437
438 __private_extern__ void
439 ifnet_lock_exclusive(
440 struct ifnet *ifp)
441 {
442 #if IFNET_RW_LOCK
443 lck_rw_lock_exclusive(ifp->if_lock);
444 #else
445 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
446 lck_mtx_lock(ifp->if_lock);
447 #endif
448 }
449
450 __private_extern__ void
451 ifnet_lock_done(
452 struct ifnet *ifp)
453 {
454 #if IFNET_RW_LOCK
455 lck_rw_done(ifp->if_lock);
456 #else
457 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
458 lck_mtx_unlock(ifp->if_lock);
459 #endif
460 }
461
462 __private_extern__ void
463 ifnet_head_lock_shared()
464 {
465 lck_rw_lock_shared(ifnet_head_mutex);
466 }
467
468 __private_extern__ void
469 ifnet_head_lock_exclusive()
470 {
471 lck_rw_lock_exclusive(ifnet_head_mutex);
472 }
473
474 __private_extern__ void
475 ifnet_head_done()
476 {
477 lck_rw_done(ifnet_head_mutex);
478 }
479
480 /*
481 * Public functions.
482 */
483 struct ifnet *ifbyfamily(u_long family, short unit)
484 {
485 struct ifnet *ifp;
486
487 ifnet_head_lock_shared();
488 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
489 if ((family == ifp->if_family) && (ifp->if_unit == unit))
490 break;
491 ifnet_head_done();
492
493 return ifp;
494 }
495
496 static int dlil_ifp_proto_count(struct ifnet * ifp)
497 {
498 int count = 0;
499 int i;
500
501 if (ifp->if_proto_hash != NULL) {
502 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
503 struct if_proto *proto;
504 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
505 count++;
506 }
507 }
508 }
509
510 return count;
511 }
512
513 __private_extern__ void
514 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
515 struct net_event_data *event_data, u_long event_data_len)
516 {
517 struct net_event_data ev_data;
518 struct kev_msg ev_msg;
519
520 /*
521 * a net event always start with a net_event_data structure
522 * but the caller can generate a simple net event or
523 * provide a longer event structure to post
524 */
525
526 ev_msg.vendor_code = KEV_VENDOR_APPLE;
527 ev_msg.kev_class = KEV_NETWORK_CLASS;
528 ev_msg.kev_subclass = event_subclass;
529 ev_msg.event_code = event_code;
530
531 if (event_data == 0) {
532 event_data = &ev_data;
533 event_data_len = sizeof(struct net_event_data);
534 }
535
536 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
537 event_data->if_family = ifp->if_family;
538 event_data->if_unit = (unsigned long) ifp->if_unit;
539
540 ev_msg.dv[0].data_length = event_data_len;
541 ev_msg.dv[0].data_ptr = event_data;
542 ev_msg.dv[1].data_length = 0;
543
544 dlil_event_internal(ifp, &ev_msg);
545 }
546
547 void dlil_init(void);
548 void
549 dlil_init(void)
550 {
551 lck_grp_attr_t *grp_attributes = 0;
552 lck_attr_t *lck_attributes = 0;
553 lck_grp_t *input_lock_grp = 0;
554
555 TAILQ_INIT(&dlil_ifnet_head);
556 TAILQ_INIT(&if_family_head);
557 TAILQ_INIT(&proto_family_head);
558 TAILQ_INIT(&ifnet_head);
559
560 /* Setup the lock groups we will use */
561 grp_attributes = lck_grp_attr_alloc_init();
562 lck_grp_attr_setdefault(grp_attributes);
563
564 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
565 #if IFNET_RW_LOCK
566 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
567 #else
568 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
569 #endif
570 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
571 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
572 lck_grp_attr_free(grp_attributes);
573 grp_attributes = 0;
574
575 /* Setup the lock attributes we will use */
576 lck_attributes = lck_attr_alloc_init();
577 lck_attr_setdefault(lck_attributes);
578
579 ifnet_lock_attr = lck_attr_alloc_init();
580 lck_attr_setdefault(ifnet_lock_attr);
581
582 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
583 input_lock_grp = 0;
584
585 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
586 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
587 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
588 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
589
590 lck_attr_free(lck_attributes);
591 lck_attributes = 0;
592
593 /*
594 * Start up the dlil input thread once everything is initialized
595 */
596 (void) kernel_thread(kernel_task, dlil_input_thread);
597 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
598 }
599
600 int
601 dlil_attach_filter(
602 struct ifnet *ifp,
603 const struct iff_filter *if_filter,
604 interface_filter_t *filter_ref)
605 {
606 int retval = 0;
607 struct ifnet_filter *filter;
608
609 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
610 if (filter == NULL)
611 return ENOMEM;
612 bzero(filter, sizeof(*filter));
613
614
615 filter->filt_ifp = ifp;
616 filter->filt_cookie = if_filter->iff_cookie;
617 filter->filt_name = if_filter->iff_name;
618 filter->filt_protocol = if_filter->iff_protocol;
619 filter->filt_input = if_filter->iff_input;
620 filter->filt_output = if_filter->iff_output;
621 filter->filt_event = if_filter->iff_event;
622 filter->filt_ioctl = if_filter->iff_ioctl;
623 filter->filt_detached = if_filter->iff_detached;
624
625 if ((retval = dlil_write_begin()) != 0) {
626 /* Failed to acquire the write lock */
627 FREE(filter, M_NKE);
628 return retval;
629 }
630 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
631 dlil_write_end();
632 *filter_ref = filter;
633 return retval;
634 }
635
636 static int
637 dlil_detach_filter_internal(interface_filter_t filter, int detached)
638 {
639 int retval = 0;
640
641 if (detached == 0) {
642 ifnet_t ifp = NULL;
643 interface_filter_t entry = NULL;
644
645 /* Take the write lock */
646 retval = dlil_write_begin();
647 if (retval != 0 && retval != EDEADLK)
648 return retval;
649
650 /*
651 * At this point either we have the write lock (retval == 0)
652 * or we couldn't get it (retval == EDEADLK) because someone
653 * else up the stack is holding the read lock. It is safe to
654 * read, either the read or write is held. Verify the filter
655 * parameter before proceeding.
656 */
657 ifnet_head_lock_shared();
658 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
659 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
660 if (entry == filter)
661 break;
662 }
663 if (entry == filter)
664 break;
665 }
666 ifnet_head_done();
667
668 if (entry != filter) {
669 /* filter parameter is not a valid filter ref */
670 if (retval == 0) {
671 dlil_write_end();
672 }
673 return EINVAL;
674 }
675
676 if (retval == EDEADLK) {
677 /* Perform a delayed detach */
678 filter->filt_detaching = 1;
679 dlil_detach_waiting = 1;
680 wakeup(&dlil_detach_waiting);
681 return 0;
682 }
683
684 /* Remove the filter from the list */
685 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
686 dlil_write_end();
687 }
688
689 /* Call the detached funciton if there is one */
690 if (filter->filt_detached)
691 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
692
693 /* Free the filter */
694 FREE(filter, M_NKE);
695
696 return retval;
697 }
698
699 void
700 dlil_detach_filter(interface_filter_t filter)
701 {
702 if (filter == NULL)
703 return;
704 dlil_detach_filter_internal(filter, 0);
705 }
706
707 static void
708 dlil_input_thread_continue(
709 __unused void* foo,
710 __unused wait_result_t wait)
711 {
712 while (1) {
713 struct mbuf *m, *m_loop;
714
715 lck_spin_lock(dlil_input_lock);
716 m = dlil_input_mbuf_head;
717 dlil_input_mbuf_head = NULL;
718 dlil_input_mbuf_tail = NULL;
719 m_loop = dlil_input_loop_head;
720 dlil_input_loop_head = NULL;
721 dlil_input_loop_tail = NULL;
722 lck_spin_unlock(dlil_input_lock);
723
724 /*
725 * NOTE warning %%% attention !!!!
726 * We should think about putting some thread starvation safeguards if
727 * we deal with long chains of packets.
728 */
729 while (m) {
730 struct mbuf *m0 = m->m_nextpkt;
731 void *header = m->m_pkthdr.header;
732
733 m->m_nextpkt = NULL;
734 m->m_pkthdr.header = NULL;
735 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
736 m = m0;
737 }
738 m = m_loop;
739 while (m) {
740 struct mbuf *m0 = m->m_nextpkt;
741 void *header = m->m_pkthdr.header;
742 struct ifnet *ifp = &loif[0];
743
744 m->m_nextpkt = NULL;
745 m->m_pkthdr.header = NULL;
746 (void) dlil_input_packet(ifp, m, header);
747 m = m0;
748 }
749
750 proto_input_run();
751
752 if (dlil_input_mbuf_head == NULL &&
753 dlil_input_loop_head == NULL && inject_buckets == 0) {
754 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
755 (void) thread_block(dlil_input_thread_continue);
756 /* NOTREACHED */
757 }
758 }
759 }
760
761 void dlil_input_thread(void)
762 {
763 register thread_t self = current_thread();
764
765 ml_thread_policy(self, MACHINE_GROUP,
766 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
767
768 dlil_initialized = 1;
769 dlil_input_thread_ptr = current_thread();
770 dlil_input_thread_continue(NULL, THREAD_RESTART);
771 }
772
773 int
774 dlil_input_with_stats(
775 struct ifnet *ifp,
776 struct mbuf *m_head,
777 struct mbuf *m_tail,
778 const struct ifnet_stat_increment_param *stats)
779 {
780 /* WARNING
781 * Because of loopbacked multicast we cannot stuff the ifp in
782 * the rcvif of the packet header: loopback has its own dlil
783 * input queue
784 */
785
786 lck_spin_lock(dlil_input_lock);
787 if (ifp->if_type != IFT_LOOP) {
788 if (dlil_input_mbuf_head == NULL)
789 dlil_input_mbuf_head = m_head;
790 else if (dlil_input_mbuf_tail != NULL)
791 dlil_input_mbuf_tail->m_nextpkt = m_head;
792 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
793 } else {
794 if (dlil_input_loop_head == NULL)
795 dlil_input_loop_head = m_head;
796 else if (dlil_input_loop_tail != NULL)
797 dlil_input_loop_tail->m_nextpkt = m_head;
798 dlil_input_loop_tail = m_tail ? m_tail : m_head;
799 }
800 if (stats) {
801 ifp->if_data.ifi_ipackets += stats->packets_in;
802 ifp->if_data.ifi_ibytes += stats->bytes_in;
803 ifp->if_data.ifi_ierrors += stats->errors_in;
804
805 ifp->if_data.ifi_opackets += stats->packets_out;
806 ifp->if_data.ifi_obytes += stats->bytes_out;
807 ifp->if_data.ifi_oerrors += stats->errors_out;
808
809 ifp->if_data.ifi_collisions += stats->collisions;
810 ifp->if_data.ifi_iqdrops += stats->dropped;
811 }
812 lck_spin_unlock(dlil_input_lock);
813
814 wakeup((caddr_t)&dlil_input_thread_wakeup);
815
816 return 0;
817 }
818
819 int
820 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
821 {
822 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
823 }
824
825 int
826 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
827 char *frame_header)
828 {
829 int retval;
830 struct if_proto *ifproto = 0;
831 protocol_family_t protocol_family;
832 struct ifnet_filter *filter;
833
834
835 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
836
837 /*
838 * Lock the interface while we run through
839 * the filters and the demux. This lock
840 * protects the filter list and the demux list.
841 */
842 dlil_read_begin();
843
844 /*
845 * Call family demux module. If the demux module finds a match
846 * for the frame it will fill-in the ifproto pointer.
847 */
848
849 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
850 if (retval != 0)
851 protocol_family = 0;
852 if (retval == EJUSTRETURN) {
853 dlil_read_end();
854 return 0;
855 }
856
857 /* DANGER!!! */
858 if (m->m_flags & (M_BCAST|M_MCAST))
859 ifp->if_imcasts++;
860
861 /*
862 * Run interface filters
863 */
864
865 /* Do not pass VLAN tagged packets to filters PR-3586856 */
866 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
867 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
868 int filter_result;
869 if (filter->filt_input && (filter->filt_protocol == 0 ||
870 filter->filt_protocol == protocol_family)) {
871 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
872
873 if (filter_result) {
874 dlil_read_end();
875 if (filter_result == EJUSTRETURN) {
876 filter_result = 0;
877 }
878 else {
879 m_freem(m);
880 }
881
882 return filter_result;
883 }
884 }
885 }
886 }
887
888 /* Demux is done, interface filters have been processed, unlock the mutex */
889 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
890 dlil_read_end();
891 if (retval != EJUSTRETURN) {
892 m_freem(m);
893 return retval;
894 }
895 else
896 return 0;
897 }
898
899 ifproto = find_attached_proto(ifp, protocol_family);
900
901 if (ifproto == 0) {
902 dlil_read_end();
903 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
904 m_freem(m);
905 return 0;
906 }
907
908 /*
909 * Hand the packet off to the protocol.
910 */
911
912 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
913 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
914 }
915
916 if (ifproto->proto_kpi == kProtoKPI_DLIL)
917 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
918 ifp, ifproto->protocol_family,
919 TRUE);
920 else
921 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
922
923 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
924 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
925 }
926
927 dlil_read_end();
928
929 if (retval == EJUSTRETURN)
930 retval = 0;
931 else
932 if (retval)
933 m_freem(m);
934
935 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
936 return retval;
937 }
938
939 static int
940 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
941 {
942 struct ifnet_filter *filter;
943
944 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
945 dlil_read_begin();
946
947 /* Pass the event to the interface filters */
948 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
949 if (filter->filt_event)
950 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
951 }
952
953 if (ifp->if_proto_hash) {
954 int i;
955
956 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
957 struct if_proto *proto;
958
959 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
960 /* Pass the event to the protocol */
961 if (proto->proto_kpi == kProtoKPI_DLIL) {
962 if (proto->kpi.dlil.dl_event)
963 proto->kpi.dlil.dl_event(ifp, event);
964 }
965 else {
966 if (proto->kpi.v1.event)
967 proto->kpi.v1.event(ifp, proto->protocol_family, event);
968 }
969 }
970 }
971 }
972
973 dlil_read_end();
974
975 /* Pass the event to the interface */
976 if (ifp->if_event)
977 ifp->if_event(ifp, event);
978
979 if (ifp_unuse(ifp))
980 ifp_use_reached_zero(ifp);
981 }
982
983 return kev_post_msg(event);
984 }
985
986 int
987 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
988 {
989 int result = 0;
990
991 struct kev_msg kev_msg;
992
993 kev_msg.vendor_code = event->vendor_code;
994 kev_msg.kev_class = event->kev_class;
995 kev_msg.kev_subclass = event->kev_subclass;
996 kev_msg.event_code = event->event_code;
997 kev_msg.dv[0].data_ptr = &event->event_data[0];
998 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
999 kev_msg.dv[1].data_length = 0;
1000
1001
1002 result = dlil_event_internal(ifp, &kev_msg);
1003
1004
1005 return result;
1006 }
1007
1008 int
1009 dlil_output_list(
1010 struct ifnet* ifp,
1011 u_long proto_family,
1012 struct mbuf *packetlist,
1013 caddr_t route,
1014 const struct sockaddr *dest,
1015 int raw)
1016 {
1017 char *frame_type = 0;
1018 char *dst_linkaddr = 0;
1019 int error, retval = 0;
1020 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1021 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1022 struct ifnet_filter *filter;
1023 struct if_proto *proto = 0;
1024 struct mbuf *m;
1025
1026 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1027 #if BRIDGE
1028 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1029 #else
1030 if ((raw != 0) || proto_family != PF_INET) {
1031 #endif
1032 while (packetlist) {
1033 m = packetlist;
1034 packetlist = packetlist->m_nextpkt;
1035 m->m_nextpkt = NULL;
1036 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1037 if (error) {
1038 if (packetlist)
1039 m_freem_list(packetlist);
1040 return (error);
1041 }
1042 }
1043 return (0);
1044 }
1045
1046 dlil_read_begin();
1047
1048 frame_type = frame_type_buffer;
1049 dst_linkaddr = dst_linkaddr_buffer;
1050 m = packetlist;
1051 packetlist = packetlist->m_nextpkt;
1052 m->m_nextpkt = NULL;
1053
1054 proto = find_attached_proto(ifp, proto_family);
1055 if (proto == NULL) {
1056 retval = ENXIO;
1057 goto cleanup;
1058 }
1059
1060 retval = 0;
1061 if (proto->proto_kpi == kProtoKPI_DLIL) {
1062 if (proto->kpi.dlil.dl_pre_output)
1063 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1064 }
1065 else {
1066 if (proto->kpi.v1.pre_output)
1067 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1068 }
1069
1070 if (retval) {
1071 if (retval != EJUSTRETURN) {
1072 m_freem(m);
1073 }
1074 goto cleanup;
1075 }
1076
1077 do {
1078
1079
1080 if (ifp->if_framer) {
1081 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1082 if (retval) {
1083 if (retval != EJUSTRETURN) {
1084 m_freem(m);
1085 }
1086 goto cleanup;
1087 }
1088 }
1089
1090 /*
1091 * Let interface filters (if any) do their thing ...
1092 */
1093 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1094 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1095 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1096 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1097 filter->filt_output) {
1098 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1099 if (retval) {
1100 if (retval == EJUSTRETURN)
1101 continue;
1102 else {
1103 m_freem(m);
1104 }
1105 goto cleanup;
1106 }
1107 }
1108 }
1109 }
1110 /*
1111 * Finally, call the driver.
1112 */
1113
1114 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1115 retval = ifp->if_output(ifp, m);
1116 if (retval) {
1117 printf("dlil_output_list: output error retval = %x\n", retval);
1118 goto cleanup;
1119 }
1120 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1121
1122 m = packetlist;
1123 if (m) {
1124 packetlist = packetlist->m_nextpkt;
1125 m->m_nextpkt = NULL;
1126 }
1127 } while (m);
1128
1129
1130 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1131
1132 cleanup:
1133 dlil_read_end();
1134 if (packetlist) /* if any packet left, clean up */
1135 m_freem_list(packetlist);
1136 if (retval == EJUSTRETURN)
1137 retval = 0;
1138 return retval;
1139 }
1140
1141 /*
1142 * dlil_output
1143 *
1144 * Caller should have a lock on the protocol domain if the protocol
1145 * doesn't support finer grained locking. In most cases, the lock
1146 * will be held from the socket layer and won't be released until
1147 * we return back to the socket layer.
1148 *
1149 * This does mean that we must take a protocol lock before we take
1150 * an interface lock if we're going to take both. This makes sense
1151 * because a protocol is likely to interact with an ifp while it
1152 * is under the protocol lock.
1153 */
1154 int
1155 dlil_output(
1156 struct ifnet* ifp,
1157 u_long proto_family,
1158 struct mbuf *m,
1159 caddr_t route,
1160 const struct sockaddr *dest,
1161 int raw)
1162 {
1163 char *frame_type = 0;
1164 char *dst_linkaddr = 0;
1165 int retval = 0;
1166 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1167 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1168 struct ifnet_filter *filter;
1169
1170 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1171
1172 dlil_read_begin();
1173
1174 frame_type = frame_type_buffer;
1175 dst_linkaddr = dst_linkaddr_buffer;
1176
1177 if (raw == 0) {
1178 struct if_proto *proto = 0;
1179
1180 proto = find_attached_proto(ifp, proto_family);
1181 if (proto == NULL) {
1182 m_freem(m);
1183 retval = ENXIO;
1184 goto cleanup;
1185 }
1186
1187 retval = 0;
1188 if (proto->proto_kpi == kProtoKPI_DLIL) {
1189 if (proto->kpi.dlil.dl_pre_output)
1190 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1191 }
1192 else {
1193 if (proto->kpi.v1.pre_output)
1194 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1195 }
1196
1197 if (retval) {
1198 if (retval != EJUSTRETURN) {
1199 m_freem(m);
1200 }
1201 goto cleanup;
1202 }
1203 }
1204
1205 /*
1206 * Call framing module
1207 */
1208 if ((raw == 0) && (ifp->if_framer)) {
1209 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1210 if (retval) {
1211 if (retval != EJUSTRETURN) {
1212 m_freem(m);
1213 }
1214 goto cleanup;
1215 }
1216 }
1217
1218 #if BRIDGE
1219 /* !!!LOCKING!!!
1220 *
1221 * Need to consider how to handle this.
1222 */
1223 broken-locking
1224 if (do_bridge) {
1225 struct mbuf *m0 = m;
1226 struct ether_header *eh = mtod(m, struct ether_header *);
1227
1228 if (m->m_pkthdr.rcvif)
1229 m->m_pkthdr.rcvif = NULL;
1230 ifp = bridge_dst_lookup(eh);
1231 bdg_forward(&m0, ifp);
1232 if (m0)
1233 m_freem(m0);
1234
1235 return 0;
1236 }
1237 #endif
1238
1239
1240 /*
1241 * Let interface filters (if any) do their thing ...
1242 */
1243
1244 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1245 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1246 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1247 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1248 filter->filt_output) {
1249 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1250 if (retval) {
1251 if (retval != EJUSTRETURN)
1252 m_freem(m);
1253 goto cleanup;
1254 }
1255 }
1256 }
1257 }
1258
1259 /*
1260 * Finally, call the driver.
1261 */
1262
1263 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1264 retval = ifp->if_output(ifp, m);
1265 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1266
1267 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1268
1269 cleanup:
1270 dlil_read_end();
1271 if (retval == EJUSTRETURN)
1272 retval = 0;
1273 return retval;
1274 }
1275
1276 int
1277 dlil_ioctl(u_long proto_fam,
1278 struct ifnet *ifp,
1279 u_long ioctl_code,
1280 caddr_t ioctl_arg)
1281 {
1282 struct ifnet_filter *filter;
1283 int retval = EOPNOTSUPP;
1284 int result = 0;
1285 struct if_family_str *if_family;
1286 int holding_read = 0;
1287
1288 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1289 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1290 if (result != 0)
1291 return EOPNOTSUPP;
1292
1293 dlil_read_begin();
1294 holding_read = 1;
1295
1296 /* Run the interface filters first.
1297 * We want to run all filters before calling the protocol,
1298 * interface family, or interface.
1299 */
1300 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1301 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1302 filter->filt_ioctl != NULL) {
1303 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1304 /* Only update retval if no one has handled the ioctl */
1305 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1306 if (result == ENOTSUP)
1307 result = EOPNOTSUPP;
1308 retval = result;
1309 if (retval && retval != EOPNOTSUPP) {
1310 goto cleanup;
1311 }
1312 }
1313 }
1314 }
1315
1316 /* Allow the protocol to handle the ioctl */
1317 if (proto_fam) {
1318 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1319
1320 if (proto != 0) {
1321 result = EOPNOTSUPP;
1322 if (proto->proto_kpi == kProtoKPI_DLIL) {
1323 if (proto->kpi.dlil.dl_ioctl)
1324 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1325 }
1326 else {
1327 if (proto->kpi.v1.ioctl)
1328 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1329 }
1330
1331 /* Only update retval if no one has handled the ioctl */
1332 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1333 if (result == ENOTSUP)
1334 result = EOPNOTSUPP;
1335 retval = result;
1336 if (retval && retval != EOPNOTSUPP) {
1337 goto cleanup;
1338 }
1339 }
1340 }
1341 }
1342
1343 /*
1344 * Since we have incremented the use count on the ifp, we are guaranteed
1345 * that the ifp will not go away (the function pointers may not be changed).
1346 * We release the dlil read lock so the interface ioctl may trigger a
1347 * protocol attach. This happens with vlan and may occur with other virtual
1348 * interfaces.
1349 */
1350 dlil_read_end();
1351 holding_read = 0;
1352
1353 /* retval is either 0 or EOPNOTSUPP */
1354
1355 /*
1356 * Let the family handle this ioctl.
1357 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1358 * If it returns zero, the ioctl was handled, so set retval to zero.
1359 */
1360 if_family = find_family_module(ifp->if_family);
1361 if ((if_family) && (if_family->ifmod_ioctl)) {
1362 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1363
1364 /* Only update retval if no one has handled the ioctl */
1365 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1366 if (result == ENOTSUP)
1367 result = EOPNOTSUPP;
1368 retval = result;
1369 if (retval && retval != EOPNOTSUPP) {
1370 goto cleanup;
1371 }
1372 }
1373 }
1374
1375 /*
1376 * Let the interface handle this ioctl.
1377 * If it returns EOPNOTSUPP, ignore that, we may have
1378 * already handled this in the protocol or family.
1379 */
1380 if (ifp->if_ioctl)
1381 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1382
1383 /* Only update retval if no one has handled the ioctl */
1384 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1385 if (result == ENOTSUP)
1386 result = EOPNOTSUPP;
1387 retval = result;
1388 if (retval && retval != EOPNOTSUPP) {
1389 goto cleanup;
1390 }
1391 }
1392
1393 cleanup:
1394 if (holding_read)
1395 dlil_read_end();
1396 if (ifp_unuse(ifp))
1397 ifp_use_reached_zero(ifp);
1398
1399 if (retval == EJUSTRETURN)
1400 retval = 0;
1401 return retval;
1402 }
1403
1404 __private_extern__ errno_t
1405 dlil_set_bpf_tap(
1406 ifnet_t ifp,
1407 bpf_tap_mode mode,
1408 bpf_packet_func callback)
1409 {
1410 errno_t error = 0;
1411
1412 dlil_read_begin();
1413 if (ifp->if_set_bpf_tap)
1414 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1415 dlil_read_end();
1416
1417 return error;
1418 }
1419
1420 __private_extern__ errno_t
1421 dlil_resolve_multi(
1422 struct ifnet *ifp,
1423 const struct sockaddr *proto_addr,
1424 struct sockaddr *ll_addr,
1425 size_t ll_len)
1426 {
1427 errno_t result = EOPNOTSUPP;
1428 struct if_proto *proto;
1429 const struct sockaddr *verify;
1430
1431 dlil_read_begin();
1432
1433 bzero(ll_addr, ll_len);
1434
1435 /* Call the protocol first */
1436 proto = find_attached_proto(ifp, proto_addr->sa_family);
1437 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1438 proto->kpi.v1.resolve_multi != NULL) {
1439 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1440 (struct sockaddr_dl*)ll_addr, ll_len);
1441 }
1442
1443 /* Let the interface verify the multicast address */
1444 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1445 if (result == 0)
1446 verify = ll_addr;
1447 else
1448 verify = proto_addr;
1449 result = ifp->if_check_multi(ifp, verify);
1450 }
1451
1452 dlil_read_end();
1453
1454 return result;
1455 }
1456
1457 __private_extern__ errno_t
1458 dlil_send_arp_internal(
1459 ifnet_t ifp,
1460 u_short arpop,
1461 const struct sockaddr_dl* sender_hw,
1462 const struct sockaddr* sender_proto,
1463 const struct sockaddr_dl* target_hw,
1464 const struct sockaddr* target_proto)
1465 {
1466 struct if_proto *proto;
1467 errno_t result = 0;
1468
1469 dlil_read_begin();
1470
1471 proto = find_attached_proto(ifp, target_proto->sa_family);
1472 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1473 proto->kpi.v1.send_arp == NULL) {
1474 result = ENOTSUP;
1475 }
1476 else {
1477 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1478 target_hw, target_proto);
1479 }
1480
1481 dlil_read_end();
1482
1483 return result;
1484 }
1485
1486 __private_extern__ errno_t
1487 dlil_send_arp(
1488 ifnet_t ifp,
1489 u_short arpop,
1490 const struct sockaddr_dl* sender_hw,
1491 const struct sockaddr* sender_proto,
1492 const struct sockaddr_dl* target_hw,
1493 const struct sockaddr* target_proto)
1494 {
1495 errno_t result = 0;
1496
1497 if (target_proto == NULL || (sender_proto &&
1498 sender_proto->sa_family != target_proto->sa_family))
1499 return EINVAL;
1500
1501 /*
1502 * If this is an ARP request and the target IP is IPv4LL,
1503 * send the request on all interfaces.
1504 */
1505 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1506 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1507 arpop == ARPOP_REQUEST) {
1508 ifnet_t *ifp_list;
1509 u_int32_t count;
1510 u_int32_t ifp_on;
1511
1512 result = ENOTSUP;
1513
1514 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1515 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1516 errno_t new_result;
1517 ifaddr_t source_hw = NULL;
1518 ifaddr_t source_ip = NULL;
1519 struct sockaddr_in source_ip_copy;
1520
1521 /*
1522 * Only arp on interfaces marked for IPv4LL ARPing. This may
1523 * mean that we don't ARP on the interface the subnet route
1524 * points to.
1525 */
1526 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1527 continue;
1528 }
1529
1530 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1531
1532 /* Find the source IP address */
1533 ifnet_lock_shared(ifp_list[ifp_on]);
1534 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1535 ifa_link) {
1536 if (source_ip->ifa_addr &&
1537 source_ip->ifa_addr->sa_family == AF_INET) {
1538 break;
1539 }
1540 }
1541
1542 /* No IP Source, don't arp */
1543 if (source_ip == NULL) {
1544 ifnet_lock_done(ifp_list[ifp_on]);
1545 continue;
1546 }
1547
1548 /* Copy the source IP address */
1549 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1550
1551 ifnet_lock_done(ifp_list[ifp_on]);
1552
1553 /* Send the ARP */
1554 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1555 (struct sockaddr_dl*)source_hw->ifa_addr,
1556 (struct sockaddr*)&source_ip_copy, NULL,
1557 target_proto);
1558
1559 if (result == ENOTSUP) {
1560 result = new_result;
1561 }
1562 }
1563 }
1564
1565 ifnet_list_free(ifp_list);
1566 }
1567 else {
1568 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1569 target_hw, target_proto);
1570 }
1571
1572 return result;
1573 }
1574
1575 static int
1576 ifp_use(
1577 struct ifnet *ifp,
1578 int handle_zero)
1579 {
1580 int old_value;
1581 int retval = 0;
1582
1583 do {
1584 old_value = ifp->if_usecnt;
1585 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1586 retval = ENXIO; // ifp is invalid
1587 break;
1588 }
1589 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1590
1591 return retval;
1592 }
1593
1594 /* ifp_unuse is broken into two pieces.
1595 *
1596 * ifp_use and ifp_unuse must be called between when the caller calls
1597 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1598 * operations after dlil_write_end has been called. For this reason,
1599 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1600 * returns a non-zero value. The caller must call ifp_use_reached_zero
1601 * after the caller has called dlil_write_end.
1602 */
1603 static void
1604 ifp_use_reached_zero(
1605 struct ifnet *ifp)
1606 {
1607 struct if_family_str *if_family;
1608 ifnet_detached_func free_func;
1609
1610 dlil_read_begin();
1611
1612 if (ifp->if_usecnt != 0)
1613 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1614
1615 /* Let BPF know we're detaching */
1616 bpfdetach(ifp);
1617
1618 ifnet_head_lock_exclusive();
1619 ifnet_lock_exclusive(ifp);
1620
1621 /* Remove ourselves from the list */
1622 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1623 ifnet_addrs[ifp->if_index - 1] = 0;
1624
1625 /* ifp should be removed from the interface list */
1626 while (ifp->if_multiaddrs.lh_first) {
1627 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1628
1629 /*
1630 * When the interface is gone, we will no longer
1631 * be listening on these multicasts. Various bits
1632 * of the stack may be referencing these multicasts,
1633 * release only our reference.
1634 */
1635 LIST_REMOVE(ifma, ifma_link);
1636 ifma->ifma_ifp = NULL;
1637 ifma_release(ifma);
1638 }
1639 ifnet_head_done();
1640
1641 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1642 ifnet_lock_done(ifp);
1643
1644 if_family = find_family_module(ifp->if_family);
1645 if (if_family && if_family->del_if)
1646 if_family->del_if(ifp);
1647 #if 0
1648 if (--if_family->if_usecnt == 0) {
1649 if (if_family->shutdown)
1650 (*if_family->shutdown)();
1651
1652 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1653 FREE(if_family, M_IFADDR);
1654 }
1655 #endif
1656
1657 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1658 free_func = ifp->if_free;
1659 dlil_read_end();
1660
1661 if (free_func)
1662 free_func(ifp);
1663 }
1664
1665 static int
1666 ifp_unuse(
1667 struct ifnet *ifp)
1668 {
1669 int oldval;
1670 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1671 if (oldval == 0)
1672 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1673
1674 if (oldval > 1)
1675 return 0;
1676
1677 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1678 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1679
1680 return 1; /* caller must call ifp_use_reached_zero */
1681 }
1682
1683 void
1684 ifp_reference(
1685 struct ifnet *ifp)
1686 {
1687 int oldval;
1688 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1689 }
1690
1691 void
1692 ifp_release(
1693 struct ifnet *ifp)
1694 {
1695 int oldval;
1696 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1697 if (oldval == 0)
1698 panic("dlil_if_reference - refcount decremented past zero!");
1699 }
1700
1701 extern lck_mtx_t *domain_proto_mtx;
1702
1703 static int
1704 dlil_attach_protocol_internal(
1705 struct if_proto *proto,
1706 const struct ddesc_head_str *demux,
1707 const struct ifnet_demux_desc *demux_list,
1708 u_int32_t demux_count)
1709 {
1710 struct ddesc_head_str temp_head;
1711 struct kev_dl_proto_data ev_pr_data;
1712 struct ifnet *ifp = proto->ifp;
1713 int retval = 0;
1714 u_long hash_value = proto_hash_value(proto->protocol_family);
1715 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1716 void* free_me = NULL;
1717
1718 /* setup some of the common values */
1719
1720 {
1721 lck_mtx_lock(domain_proto_mtx);
1722 struct domain *dp = domains;
1723 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1724 dp = dp->dom_next;
1725 proto->dl_domain = dp;
1726 lck_mtx_unlock(domain_proto_mtx);
1727 }
1728
1729 /*
1730 * Convert the demux descriptors to a type the interface
1731 * will understand. Checking e_flags should be safe, this
1732 * flag won't change.
1733 */
1734 if (if_using_kpi && demux) {
1735 /* Convert the demux linked list to a demux_list */
1736 struct dlil_demux_desc *demux_entry;
1737 struct ifnet_demux_desc *temp_list = NULL;
1738 u_int32_t i = 0;
1739
1740 TAILQ_FOREACH(demux_entry, demux, next) {
1741 i++;
1742 }
1743
1744 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1745 free_me = temp_list;
1746
1747 if (temp_list == NULL)
1748 return ENOMEM;
1749
1750 i = 0;
1751 TAILQ_FOREACH(demux_entry, demux, next) {
1752 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1753 if (demux_entry->type == 1 ||
1754 demux_entry->type == 2 ||
1755 demux_entry->type == 3) {
1756 FREE(free_me, M_TEMP);
1757 return ENOTSUP;
1758 }
1759
1760 temp_list[i].type = demux_entry->type;
1761 temp_list[i].data = demux_entry->native_type;
1762 temp_list[i].datalen = demux_entry->variants.native_type_length;
1763 i++;
1764 }
1765 demux_count = i;
1766 demux_list = temp_list;
1767 }
1768 else if (!if_using_kpi && demux_list != NULL) {
1769 struct dlil_demux_desc *demux_entry;
1770 u_int32_t i = 0;
1771
1772 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1773 free_me = demux_entry;
1774 if (demux_entry == NULL)
1775 return ENOMEM;
1776
1777 TAILQ_INIT(&temp_head);
1778
1779 for (i = 0; i < demux_count; i++) {
1780 demux_entry[i].type = demux_list[i].type;
1781 demux_entry[i].native_type = demux_list[i].data;
1782 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1783 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1784 }
1785 demux = &temp_head;
1786 }
1787
1788 /*
1789 * Take the write lock to protect readers and exclude other writers.
1790 */
1791 dlil_write_begin();
1792
1793 /* Check that the interface isn't currently detaching */
1794 ifnet_lock_shared(ifp);
1795 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1796 ifnet_lock_done(ifp);
1797 dlil_write_end();
1798 if (free_me)
1799 FREE(free_me, M_TEMP);
1800 return ENXIO;
1801 }
1802 ifnet_lock_done(ifp);
1803
1804 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1805 dlil_write_end();
1806 if (free_me)
1807 FREE(free_me, M_TEMP);
1808 return EEXIST;
1809 }
1810
1811 /*
1812 * Call family module add_proto routine so it can refine the
1813 * demux descriptors as it wishes.
1814 */
1815 if (if_using_kpi)
1816 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1817 else {
1818 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1819 _cast_non_const(demux));
1820 }
1821 if (retval) {
1822 dlil_write_end();
1823 if (free_me)
1824 FREE(free_me, M_TEMP);
1825 return retval;
1826 }
1827
1828 /*
1829 * We can't fail from this point on.
1830 * Increment the number of uses (protocol attachments + interface attached).
1831 */
1832 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1833
1834 /*
1835 * Insert the protocol in the hash
1836 */
1837 {
1838 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1839 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1840 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1841 if (prev_proto)
1842 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1843 else
1844 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1845 }
1846
1847 /*
1848 * Add to if_proto list for this interface
1849 */
1850 if_proto_ref(proto);
1851 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1852 ifp->offercnt++;
1853 dlil_write_end();
1854
1855 /* the reserved field carries the number of protocol still attached (subject to change) */
1856 ev_pr_data.proto_family = proto->protocol_family;
1857 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1858 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1859 (struct net_event_data *)&ev_pr_data,
1860 sizeof(struct kev_dl_proto_data));
1861
1862 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1863 ifp->if_name, ifp->if_unit, retval);
1864 if (free_me)
1865 FREE(free_me, M_TEMP);
1866 return retval;
1867 }
1868
1869 __private_extern__ int
1870 dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1871 const struct ifnet_attach_proto_param *proto_details)
1872 {
1873 int retval = 0;
1874 struct if_proto *ifproto = NULL;
1875
1876 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1877 if (ifproto == 0) {
1878 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1879 retval = ENOMEM;
1880 goto end;
1881 }
1882 bzero(ifproto, sizeof(*ifproto));
1883
1884 ifproto->ifp = ifp;
1885 ifproto->protocol_family = protocol;
1886 ifproto->proto_kpi = kProtoKPI_v1;
1887 ifproto->kpi.v1.input = proto_details->input;
1888 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1889 ifproto->kpi.v1.event = proto_details->event;
1890 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1891 ifproto->kpi.v1.detached = proto_details->detached;
1892 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1893 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1894
1895 retval = dlil_attach_protocol_internal(ifproto, NULL,
1896 proto_details->demux_list, proto_details->demux_count);
1897
1898 end:
1899 if (retval && ifproto)
1900 FREE(ifproto, M_IFADDR);
1901 return retval;
1902 }
1903
1904 int
1905 dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1906 {
1907 struct ifnet *ifp = NULL;
1908 struct if_proto *ifproto = NULL;
1909 int retval = 0;
1910
1911 /*
1912 * Do everything we can before taking the write lock
1913 */
1914
1915 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1916 return EINVAL;
1917
1918 /*
1919 * Allocate and init a new if_proto structure
1920 */
1921 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1922 if (!ifproto) {
1923 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1924 retval = ENOMEM;
1925 goto end;
1926 }
1927
1928
1929 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1930 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1931 if (!ifp) {
1932 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1933 proto->interface_family, proto->unit_number);
1934 retval = ENXIO;
1935 goto end;
1936 }
1937
1938 bzero(ifproto, sizeof(struct if_proto));
1939
1940 ifproto->ifp = ifp;
1941 ifproto->protocol_family = proto->protocol_family;
1942 ifproto->proto_kpi = kProtoKPI_DLIL;
1943 ifproto->kpi.dlil.dl_input = proto->input;
1944 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1945 ifproto->kpi.dlil.dl_event = proto->event;
1946 ifproto->kpi.dlil.dl_offer = proto->offer;
1947 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1948 ifproto->kpi.dlil.dl_detached = proto->detached;
1949
1950 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1951
1952 end:
1953 if (retval && ifproto)
1954 FREE(ifproto, M_IFADDR);
1955 return retval;
1956 }
1957
1958 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1959
1960 static int
1961 dlil_detach_protocol_internal(
1962 struct if_proto *proto)
1963 {
1964 struct ifnet *ifp = proto->ifp;
1965 u_long proto_family = proto->protocol_family;
1966 struct kev_dl_proto_data ev_pr_data;
1967
1968 if (proto->proto_kpi == kProtoKPI_DLIL) {
1969 if (proto->kpi.dlil.dl_detached)
1970 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1971 }
1972 else {
1973 if (proto->kpi.v1.detached)
1974 proto->kpi.v1.detached(ifp, proto->protocol_family);
1975 }
1976 if_proto_free(proto);
1977
1978 /*
1979 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1980 */
1981
1982 if_rtproto_del(ifp, proto_family);
1983
1984 /* the reserved field carries the number of protocol still attached (subject to change) */
1985 ev_pr_data.proto_family = proto_family;
1986 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1987 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1988 (struct net_event_data *)&ev_pr_data,
1989 sizeof(struct kev_dl_proto_data));
1990 return 0;
1991 }
1992
1993 int
1994 dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1995 {
1996 struct if_proto *proto = NULL;
1997 int retval = 0;
1998 int use_reached_zero = 0;
1999
2000
2001 if ((retval = dlil_write_begin()) != 0) {
2002 if (retval == EDEADLK) {
2003 retval = 0;
2004 dlil_read_begin();
2005 proto = find_attached_proto(ifp, proto_family);
2006 if (proto == 0) {
2007 retval = ENXIO;
2008 }
2009 else {
2010 proto->detaching = 1;
2011 dlil_detach_waiting = 1;
2012 wakeup(&dlil_detach_waiting);
2013 }
2014 dlil_read_end();
2015 }
2016 goto end;
2017 }
2018
2019 proto = find_attached_proto(ifp, proto_family);
2020
2021 if (proto == NULL) {
2022 retval = ENXIO;
2023 dlil_write_end();
2024 goto end;
2025 }
2026
2027 /*
2028 * Call family module del_proto
2029 */
2030
2031 if (ifp->if_del_proto)
2032 ifp->if_del_proto(ifp, proto->protocol_family);
2033
2034 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2035 ifp->offercnt--;
2036
2037 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2038
2039 /*
2040 * We can do the rest of the work outside of the write lock.
2041 */
2042 use_reached_zero = ifp_unuse(ifp);
2043 dlil_write_end();
2044
2045 dlil_detach_protocol_internal(proto);
2046
2047 /*
2048 * Only handle the case where the interface will go away after
2049 * we've sent the message. This way post message can send the
2050 * message to the interface safely.
2051 */
2052
2053 if (use_reached_zero)
2054 ifp_use_reached_zero(ifp);
2055
2056 end:
2057 return retval;
2058 }
2059
2060 /*
2061 * dlil_delayed_detach_thread is responsible for detaching
2062 * protocols, protocol filters, and interface filters after
2063 * an attempt was made to detach one of those items while
2064 * it was not safe to do so (i.e. called dlil_read_begin).
2065 *
2066 * This function will take the dlil write lock and walk
2067 * through each of the interfaces looking for items with
2068 * the detaching flag set. When an item is found, it is
2069 * detached from the interface and placed on a local list.
2070 * After all of the items have been collected, we drop the
2071 * write lock and performed the post detach. This is done
2072 * so we only have to take the write lock once.
2073 *
2074 * When detaching a protocol filter, if we find that we
2075 * have detached the very last protocol and we need to call
2076 * ifp_use_reached_zero, we have to break out of our work
2077 * to drop the write lock so we can call ifp_use_reached_zero.
2078 */
2079
2080 static void
2081 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2082 {
2083 thread_t self = current_thread();
2084 int asserted = 0;
2085
2086 ml_thread_policy(self, MACHINE_GROUP,
2087 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2088
2089
2090 while (1) {
2091 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2092 struct ifnet *ifp;
2093 struct proto_hash_entry detached_protos;
2094 struct ifnet_filter_head detached_filters;
2095 struct if_proto *proto;
2096 struct if_proto *next_proto;
2097 struct ifnet_filter *filt;
2098 struct ifnet_filter *next_filt;
2099 int reached_zero;
2100
2101 reached_zero = 0;
2102
2103 /* Clear the detach waiting flag */
2104 dlil_detach_waiting = 0;
2105 TAILQ_INIT(&detached_filters);
2106 SLIST_INIT(&detached_protos);
2107
2108 ifnet_head_lock_shared();
2109 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2110 int i;
2111
2112 // Look for protocols and protocol filters
2113 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2114 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2115 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2116
2117 // Detach this protocol
2118 if (proto->detaching) {
2119 if (ifp->if_del_proto)
2120 ifp->if_del_proto(ifp, proto->protocol_family);
2121 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2122 ifp->offercnt--;
2123 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2124 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2125 reached_zero = ifp_unuse(ifp);
2126 if (reached_zero) {
2127 break;
2128 }
2129 }
2130 else {
2131 // Update prev_nextptr to point to our next ptr
2132 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2133 }
2134 }
2135 }
2136
2137 // look for interface filters that need to be detached
2138 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2139 next_filt = TAILQ_NEXT(filt, filt_next);
2140 if (filt->filt_detaching != 0) {
2141 // take this interface filter off the interface filter list
2142 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2143
2144 // put this interface filter on the detached filters list
2145 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2146 }
2147 }
2148
2149 if (ifp->if_delayed_detach) {
2150 ifp->if_delayed_detach = 0;
2151 reached_zero = ifp_unuse(ifp);
2152 }
2153
2154 if (reached_zero)
2155 break;
2156 }
2157 ifnet_head_done();
2158 dlil_write_end();
2159
2160 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2161 next_filt = TAILQ_NEXT(filt, filt_next);
2162 /*
2163 * dlil_detach_filter_internal won't remove an item from
2164 * the list if it is already detached (second parameter).
2165 * The item will be freed though.
2166 */
2167 dlil_detach_filter_internal(filt, 1);
2168 }
2169
2170 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2171 next_proto = SLIST_NEXT(proto, next_hash);
2172 dlil_detach_protocol_internal(proto);
2173 }
2174
2175 if (reached_zero) {
2176 ifp_use_reached_zero(ifp);
2177 dlil_detach_waiting = 1; // we may have missed something
2178 }
2179 }
2180
2181 if (!asserted && dlil_detach_waiting == 0) {
2182 asserted = 1;
2183 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2184 }
2185
2186 if (dlil_detach_waiting == 0) {
2187 asserted = 0;
2188 thread_block(dlil_delayed_detach_thread);
2189 }
2190 }
2191 }
2192
2193 static void
2194 dlil_call_delayed_detach_thread(void) {
2195 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2196 }
2197
2198 extern int if_next_index(void);
2199
2200 __private_extern__ int
2201 dlil_if_attach_with_address(
2202 struct ifnet *ifp,
2203 const struct sockaddr_dl *ll_addr)
2204 {
2205 u_long interface_family = ifp->if_family;
2206 struct if_family_str *if_family = NULL;
2207 int stat;
2208 struct ifnet *tmp_if;
2209 struct proto_hash_entry *new_proto_list = NULL;
2210 int locked = 0;
2211
2212
2213 ifnet_head_lock_shared();
2214
2215 /* Verify we aren't already on the list */
2216 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2217 if (tmp_if == ifp) {
2218 ifnet_head_done();
2219 return EEXIST;
2220 }
2221 }
2222
2223 ifnet_head_done();
2224
2225 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2226 #if IFNET_RW_LOCK
2227 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2228 #else
2229 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2230 #endif
2231
2232 if (ifp->if_lock == 0) {
2233 return ENOMEM;
2234 }
2235
2236 // Only use family if this is not a KPI interface
2237 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2238 if_family = find_family_module(interface_family);
2239 }
2240
2241 /*
2242 * Allow interfaces withouth protocol families to attach
2243 * only if they have the necessary fields filled out.
2244 */
2245
2246 if ((if_family == 0) &&
2247 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2248 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2249 interface_family);
2250 return ENODEV;
2251 }
2252
2253 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2254 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2255 M_NKE, M_WAITOK);
2256
2257 if (new_proto_list == 0) {
2258 return ENOBUFS;
2259 }
2260 }
2261
2262 dlil_write_begin();
2263 locked = 1;
2264
2265 /*
2266 * Call the family module to fill in the appropriate fields in the
2267 * ifnet structure.
2268 */
2269
2270 if (if_family) {
2271 stat = if_family->add_if(ifp);
2272 if (stat) {
2273 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2274 dlil_write_end();
2275 return stat;
2276 }
2277 ifp->if_add_proto_u.original = if_family->add_proto;
2278 ifp->if_del_proto = if_family->del_proto;
2279 if_family->refcnt++;
2280 }
2281
2282 ifp->offercnt = 0;
2283 TAILQ_INIT(&ifp->if_flt_head);
2284
2285
2286 if (new_proto_list) {
2287 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2288 ifp->if_proto_hash = new_proto_list;
2289 new_proto_list = 0;
2290 }
2291
2292 /* old_if_attach */
2293 {
2294 struct ifaddr *ifa = 0;
2295
2296 if (ifp->if_snd.ifq_maxlen == 0)
2297 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2298 TAILQ_INIT(&ifp->if_prefixhead);
2299 LIST_INIT(&ifp->if_multiaddrs);
2300 ifnet_touch_lastchange(ifp);
2301
2302 /* usecount to track attachment to the ifnet list */
2303 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2304
2305 /* Lock the list of interfaces */
2306 ifnet_head_lock_exclusive();
2307 ifnet_lock_exclusive(ifp);
2308
2309 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2310 char workbuf[64];
2311 int namelen, masklen, socksize, ifasize;
2312
2313 ifp->if_index = if_next_index();
2314
2315 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2316 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2317 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2318 socksize = masklen + ifp->if_addrlen;
2319 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2320 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2321 socksize = sizeof(struct sockaddr_dl);
2322 socksize = ROUNDUP(socksize);
2323 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2324 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2325 if (ifa) {
2326 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2327 ifnet_addrs[ifp->if_index - 1] = ifa;
2328 bzero(ifa, ifasize);
2329 sdl->sdl_len = socksize;
2330 sdl->sdl_family = AF_LINK;
2331 bcopy(workbuf, sdl->sdl_data, namelen);
2332 sdl->sdl_nlen = namelen;
2333 sdl->sdl_index = ifp->if_index;
2334 sdl->sdl_type = ifp->if_type;
2335 if (ll_addr) {
2336 sdl->sdl_alen = ll_addr->sdl_alen;
2337 if (ll_addr->sdl_alen != ifp->if_addrlen)
2338 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2339 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2340 }
2341 ifa->ifa_ifp = ifp;
2342 ifa->ifa_rtrequest = link_rtrequest;
2343 ifa->ifa_addr = (struct sockaddr*)sdl;
2344 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2345 ifa->ifa_netmask = (struct sockaddr*)sdl;
2346 sdl->sdl_len = masklen;
2347 while (namelen != 0)
2348 sdl->sdl_data[--namelen] = 0xff;
2349 }
2350 }
2351 else {
2352 /* preserve the first ifaddr */
2353 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2354 }
2355
2356
2357 TAILQ_INIT(&ifp->if_addrhead);
2358 ifa = ifnet_addrs[ifp->if_index - 1];
2359
2360 if (ifa) {
2361 /*
2362 * We don't use if_attach_ifa because we want
2363 * this address to be first on the list.
2364 */
2365 ifaref(ifa);
2366 ifa->ifa_debug |= IFA_ATTACHED;
2367 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2368 }
2369
2370 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2371 ifindex2ifnet[ifp->if_index] = ifp;
2372
2373 ifnet_head_done();
2374 }
2375 dlil_write_end();
2376
2377 if (if_family && if_family->init_if) {
2378 stat = if_family->init_if(ifp);
2379 if (stat) {
2380 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2381 }
2382 }
2383
2384 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2385 ifnet_lock_done(ifp);
2386
2387 return 0;
2388 }
2389
2390 int
2391 dlil_if_attach(struct ifnet *ifp)
2392 {
2393 dlil_if_attach_with_address(ifp, NULL);
2394 }
2395
2396
2397 int
2398 dlil_if_detach(struct ifnet *ifp)
2399 {
2400 struct ifnet_filter *filter;
2401 struct ifnet_filter *filter_next;
2402 int zeroed = 0;
2403 int retval = 0;
2404 struct ifnet_filter_head fhead;
2405
2406
2407 ifnet_lock_exclusive(ifp);
2408
2409 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2410 /* Interface has already been detached */
2411 ifnet_lock_done(ifp);
2412 return ENXIO;
2413 }
2414
2415 /*
2416 * Indicate this interface is being detached.
2417 *
2418 * This should prevent protocols from attaching
2419 * from this point on. Interface will remain on
2420 * the list until all of the protocols are detached.
2421 */
2422 ifp->if_eflags |= IFEF_DETACHING;
2423 ifnet_lock_done(ifp);
2424
2425 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
2426
2427 if ((retval = dlil_write_begin()) != 0) {
2428 if (retval == EDEADLK) {
2429 retval = DLIL_WAIT_FOR_FREE;
2430
2431 /* We need to perform a delayed detach */
2432 ifp->if_delayed_detach = 1;
2433 dlil_detach_waiting = 1;
2434 wakeup(&dlil_detach_waiting);
2435 }
2436 return retval;
2437 }
2438
2439 /* Steal the list of interface filters */
2440 fhead = ifp->if_flt_head;
2441 TAILQ_INIT(&ifp->if_flt_head);
2442
2443 /* unuse the interface */
2444 zeroed = ifp_unuse(ifp);
2445
2446 dlil_write_end();
2447
2448 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2449 filter_next = TAILQ_NEXT(filter, filt_next);
2450 dlil_detach_filter_internal(filter, 1);
2451 }
2452
2453 if (zeroed == 0) {
2454 retval = DLIL_WAIT_FOR_FREE;
2455 }
2456 else
2457 {
2458 ifp_use_reached_zero(ifp);
2459 }
2460
2461 return retval;
2462 }
2463
2464
2465 int
2466 dlil_reg_if_modules(u_long interface_family,
2467 struct dlil_ifmod_reg_str *ifmod)
2468 {
2469 struct if_family_str *if_family;
2470
2471
2472 if (find_family_module(interface_family)) {
2473 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
2474 interface_family);
2475 return EEXIST;
2476 }
2477
2478 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2479 (!ifmod->add_proto) || (!ifmod->del_proto)) {
2480 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
2481 return EINVAL;
2482 }
2483
2484 /*
2485 * The following is a gross hack to keep from breaking
2486 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2487 * does not zero the reserved fields in dlil_ifmod_reg_str.
2488 * As a result, we have to zero any function that used to
2489 * be reserved fields at the time Vicomsoft built their
2490 * kext. Radar #2974305
2491 */
2492 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2493 if (interface_family == 123) { /* Vicom */
2494 ifmod->init_if = 0;
2495 } else {
2496 return EINVAL;
2497 }
2498 }
2499
2500 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2501 if (!if_family) {
2502 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
2503 return ENOMEM;
2504 }
2505
2506 bzero(if_family, sizeof(struct if_family_str));
2507
2508 if_family->if_family = interface_family & 0xffff;
2509 if_family->shutdown = ifmod->shutdown;
2510 if_family->add_if = ifmod->add_if;
2511 if_family->del_if = ifmod->del_if;
2512 if_family->init_if = ifmod->init_if;
2513 if_family->add_proto = ifmod->add_proto;
2514 if_family->del_proto = ifmod->del_proto;
2515 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
2516 if_family->refcnt = 1;
2517 if_family->flags = 0;
2518
2519 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
2520 return 0;
2521 }
2522
2523 int dlil_dereg_if_modules(u_long interface_family)
2524 {
2525 struct if_family_str *if_family;
2526 int ret = 0;
2527
2528
2529 if_family = find_family_module(interface_family);
2530 if (if_family == 0) {
2531 return ENXIO;
2532 }
2533
2534 if (--if_family->refcnt == 0) {
2535 if (if_family->shutdown)
2536 (*if_family->shutdown)();
2537
2538 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2539 FREE(if_family, M_IFADDR);
2540 }
2541 else {
2542 if_family->flags |= DLIL_SHUTDOWN;
2543 ret = DLIL_WAIT_FOR_FREE;
2544 }
2545
2546 return ret;
2547 }
2548
2549
2550
2551 int
2552 dlil_reg_proto_module(
2553 u_long protocol_family,
2554 u_long interface_family,
2555 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2556 int (*detach)(struct ifnet *ifp, u_long protocol_family))
2557 {
2558 struct proto_family_str *proto_family;
2559
2560 if (attach == NULL) return EINVAL;
2561
2562 lck_mtx_lock(proto_family_mutex);
2563
2564 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2565 if (proto_family->proto_family == protocol_family &&
2566 proto_family->if_family == interface_family) {
2567 lck_mtx_unlock(proto_family_mutex);
2568 return EEXIST;
2569 }
2570 }
2571
2572 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2573 if (!proto_family) {
2574 lck_mtx_unlock(proto_family_mutex);
2575 return ENOMEM;
2576 }
2577
2578 bzero(proto_family, sizeof(struct proto_family_str));
2579 proto_family->proto_family = protocol_family;
2580 proto_family->if_family = interface_family & 0xffff;
2581 proto_family->attach_proto = attach;
2582 proto_family->detach_proto = detach;
2583
2584 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
2585 lck_mtx_unlock(proto_family_mutex);
2586 return 0;
2587 }
2588
2589 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2590 {
2591 struct proto_family_str *proto_family;
2592 int ret = 0;
2593
2594 lck_mtx_lock(proto_family_mutex);
2595
2596 proto_family = find_proto_module(protocol_family, interface_family);
2597 if (proto_family == 0) {
2598 lck_mtx_unlock(proto_family_mutex);
2599 return ENXIO;
2600 }
2601
2602 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2603 FREE(proto_family, M_IFADDR);
2604
2605 lck_mtx_unlock(proto_family_mutex);
2606 return ret;
2607 }
2608
2609 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
2610 {
2611 struct proto_family_str *proto_family;
2612 int ret = 0;
2613
2614 lck_mtx_lock(proto_family_mutex);
2615 proto_family = find_proto_module(protocol_family, ifp->if_family);
2616 if (proto_family == 0) {
2617 lck_mtx_unlock(proto_family_mutex);
2618 return ENXIO;
2619 }
2620
2621 ret = proto_family->attach_proto(ifp, protocol_family);
2622
2623 lck_mtx_unlock(proto_family_mutex);
2624 return ret;
2625 }
2626
2627
2628 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2629 {
2630 struct proto_family_str *proto_family;
2631 int ret = 0;
2632
2633 lck_mtx_lock(proto_family_mutex);
2634
2635 proto_family = find_proto_module(protocol_family, ifp->if_family);
2636 if (proto_family && proto_family->detach_proto)
2637 ret = proto_family->detach_proto(ifp, protocol_family);
2638 else
2639 ret = dlil_detach_protocol(ifp, protocol_family);
2640
2641 lck_mtx_unlock(proto_family_mutex);
2642 return ret;
2643 }
2644
2645 static errno_t
2646 dlil_recycle_ioctl(
2647 __unused ifnet_t ifnet_ptr,
2648 __unused u_int32_t ioctl_code,
2649 __unused void *ioctl_arg)
2650 {
2651 return EOPNOTSUPP;
2652 }
2653
2654 static int
2655 dlil_recycle_output(
2656 __unused struct ifnet *ifnet_ptr,
2657 struct mbuf *m)
2658 {
2659 m_freem(m);
2660 return 0;
2661 }
2662
2663 static void
2664 dlil_recycle_free(
2665 __unused ifnet_t ifnet_ptr)
2666 {
2667 }
2668
2669 static errno_t
2670 dlil_recycle_set_bpf_tap(
2671 __unused ifnet_t ifp,
2672 __unused bpf_tap_mode mode,
2673 __unused bpf_packet_func callback)
2674 {
2675 /* XXX not sure what to do here */
2676 return 0;
2677 }
2678
2679 int dlil_if_acquire(
2680 u_long family,
2681 const void *uniqueid,
2682 size_t uniqueid_len,
2683 struct ifnet **ifp)
2684 {
2685 struct ifnet *ifp1 = NULL;
2686 struct dlil_ifnet *dlifp1 = NULL;
2687 int ret = 0;
2688
2689 lck_mtx_lock(dlil_ifnet_mutex);
2690 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2691
2692 ifp1 = (struct ifnet *)dlifp1;
2693
2694 if (ifp1->if_family == family) {
2695
2696 /* same uniqueid and same len or no unique id specified */
2697 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2698 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2699
2700 /* check for matching interface in use */
2701 if (ifp1->if_eflags & IFEF_INUSE) {
2702 if (uniqueid_len) {
2703 ret = EBUSY;
2704 goto end;
2705 }
2706 }
2707 else {
2708 if (!ifp1->if_lock)
2709 panic("ifp's lock is gone\n");
2710 ifnet_lock_exclusive(ifp1);
2711 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2712 ifnet_lock_done(ifp1);
2713 *ifp = ifp1;
2714 goto end;
2715 }
2716 }
2717 }
2718 }
2719
2720 /* no interface found, allocate a new one */
2721 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2722 if (dlifp1 == 0) {
2723 ret = ENOMEM;
2724 goto end;
2725 }
2726
2727 bzero(dlifp1, sizeof(*dlifp1));
2728
2729 if (uniqueid_len) {
2730 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2731 if (dlifp1->if_uniqueid == 0) {
2732 FREE(dlifp1, M_NKE);
2733 ret = ENOMEM;
2734 goto end;
2735 }
2736 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2737 dlifp1->if_uniqueid_len = uniqueid_len;
2738 }
2739
2740 ifp1 = (struct ifnet *)dlifp1;
2741 ifp1->if_eflags |= IFEF_INUSE;
2742 ifp1->if_name = dlifp1->if_namestorage;
2743
2744 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2745
2746 *ifp = ifp1;
2747
2748 end:
2749 lck_mtx_unlock(dlil_ifnet_mutex);
2750
2751 return ret;
2752 }
2753
2754 void dlil_if_release(struct ifnet *ifp)
2755 {
2756 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2757
2758
2759 /* Interface does not have a lock until it is attached - radar 3713951 */
2760 if (ifp->if_lock)
2761 ifnet_lock_exclusive(ifp);
2762 ifp->if_eflags &= ~IFEF_INUSE;
2763 ifp->if_ioctl = dlil_recycle_ioctl;
2764 ifp->if_output = dlil_recycle_output;
2765 ifp->if_free = dlil_recycle_free;
2766 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2767
2768 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2769 ifp->if_name = dlifp->if_namestorage;
2770 if (ifp->if_lock)
2771 ifnet_lock_done(ifp);
2772
2773 }