]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 1999-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Data Link Inteface Layer
30 * Author: Ted Walker
31 */
32 /*
33 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
34 * support for mandatory and extensible security protections. This notice
35 * is included in support of clause 2.2 (b) of the Apple Public License,
36 * Version 2.0.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/socket.h>
45 #include <sys/domain.h>
46 #include <sys/user.h>
47 #include <sys/random.h>
48 #include <net/if_dl.h>
49 #include <net/if.h>
50 #include <net/route.h>
51 #include <net/if_var.h>
52 #include <net/dlil.h>
53 #include <net/if_arp.h>
54 #include <sys/kern_event.h>
55 #include <sys/kdebug.h>
56
57 #include <kern/assert.h>
58 #include <kern/task.h>
59 #include <kern/thread.h>
60 #include <kern/sched_prim.h>
61 #include <kern/locks.h>
62 #include <net/kpi_protocol.h>
63
64 #include <net/if_types.h>
65 #include <net/kpi_interfacefilter.h>
66
67 #include <libkern/OSAtomic.h>
68
69 #include <machine/machine_routines.h>
70
71 #include <mach/thread_act.h>
72
73 #if CONFIG_MACF_NET
74 #include <security/mac_framework.h>
75 #endif /* MAC_NET */
76
77 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
78 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
79 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
80 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
81 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
82
83
84 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
85 #define MAX_LINKADDR 4 /* LONGWORDS */
86 #define M_NKE M_IFADDR
87
88 #if 1
89 #define DLIL_PRINTF printf
90 #else
91 #define DLIL_PRINTF kprintf
92 #endif
93
94
95 enum {
96 kProtoKPI_v1 = 1,
97 kProtoKPI_v2 = 2
98 };
99
100 struct if_proto {
101 SLIST_ENTRY(if_proto) next_hash;
102 int refcount;
103 int detaching;
104 struct ifnet *ifp;
105 struct domain *dl_domain;
106 protocol_family_t protocol_family;
107 int proto_kpi;
108 union {
109 struct {
110 proto_media_input input;
111 proto_media_preout pre_output;
112 proto_media_event event;
113 proto_media_ioctl ioctl;
114 proto_media_detached detached;
115 proto_media_resolve_multi resolve_multi;
116 proto_media_send_arp send_arp;
117 } v1;
118 struct {
119 proto_media_input_v2 input;
120 proto_media_preout pre_output;
121 proto_media_event event;
122 proto_media_ioctl ioctl;
123 proto_media_detached detached;
124 proto_media_resolve_multi resolve_multi;
125 proto_media_send_arp send_arp;
126 } v2;
127 } kpi;
128 };
129
130 SLIST_HEAD(proto_hash_entry, if_proto);
131
132
133 struct dlil_ifnet {
134 /* ifnet and drvr_ext are used by the stack and drivers
135 drvr_ext extends the public ifnet and must follow dl_if */
136 struct ifnet dl_if; /* public ifnet */
137
138 /* dlil private fields */
139 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
140 /* it is not the ifnet list */
141 void *if_uniqueid; /* unique id identifying the interface */
142 size_t if_uniqueid_len;/* length of the unique id */
143 char if_namestorage[IFNAMSIZ]; /* interface name storage */
144 };
145
146 struct ifnet_filter {
147 TAILQ_ENTRY(ifnet_filter) filt_next;
148 ifnet_t filt_ifp;
149 int filt_detaching;
150
151 const char *filt_name;
152 void *filt_cookie;
153 protocol_family_t filt_protocol;
154 iff_input_func filt_input;
155 iff_output_func filt_output;
156 iff_event_func filt_event;
157 iff_ioctl_func filt_ioctl;
158 iff_detached_func filt_detached;
159 };
160
161 struct proto_input_entry;
162
163 static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
164 static lck_grp_t *dlil_lock_group;
165 static lck_grp_t *ifnet_lock_group;
166 static lck_grp_t *ifnet_head_lock_group;
167 static lck_attr_t *ifnet_lock_attr;
168 static lck_rw_t *ifnet_head_mutex;
169 static lck_mtx_t *dlil_ifnet_mutex;
170 static lck_mtx_t *dlil_mutex;
171 static unsigned long dlil_read_count = 0;
172 static unsigned long dlil_detach_waiting = 0;
173 extern u_int32_t ipv4_ll_arp_aware;
174
175 static struct dlil_threading_info dlil_lo_thread;
176 __private_extern__ struct dlil_threading_info *dlil_lo_thread_ptr = &dlil_lo_thread;
177
178 static struct mbuf *dlil_lo_input_mbuf_head = NULL;
179 static struct mbuf *dlil_lo_input_mbuf_tail = NULL;
180
181 #if IFNET_INPUT_SANITY_CHK
182 static int dlil_lo_input_mbuf_count = 0;
183 int dlil_input_sanity_check = 0; /* sanity checking of input packet lists received */
184 #endif
185 int dlil_multithreaded_input = 1;
186 static int cur_dlil_input_threads = 0;
187
188 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
189 static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
190 static void dlil_call_delayed_detach_thread(void);
191
192 static void dlil_read_begin(void);
193 static __inline__ void dlil_read_end(void);
194 static int dlil_write_begin(void);
195 static void dlil_write_end(void);
196
197 unsigned int net_affinity = 1;
198 static kern_return_t dlil_affinity_set(struct thread *, u_int32_t);
199
200 extern void bpfdetach(struct ifnet*);
201 extern void proto_input_run(void); // new run_netisr
202
203 void dlil_input_packet_list(struct ifnet *ifp, struct mbuf *m);
204 static void dlil_input_thread_func(struct dlil_threading_info *inpthread);
205 __private_extern__ int dlil_create_input_thread(
206 ifnet_t, struct dlil_threading_info *);
207 __private_extern__ void dlil_terminate_input_thread(
208 struct dlil_threading_info *);
209
210 __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
211
212 int dlil_expand_mcl;
213
214 extern u_int32_t inject_buckets;
215
216 static const u_int32_t dlil_writer_waiting = 0x80000000;
217 static lck_grp_attr_t *dlil_grp_attributes = NULL;
218 static lck_attr_t *dlil_lck_attributes = NULL;
219 static lck_grp_t *dlil_input_lock_grp = NULL;
220
221 static inline void*
222 _cast_non_const(const void * ptr) {
223 union {
224 const void* cval;
225 void* val;
226 } ret;
227
228 ret.cval = ptr;
229 return (ret.val);
230 }
231
232 /* Should these be inline? */
233 static void
234 dlil_read_begin(void)
235 {
236 unsigned long new_value;
237 unsigned long old_value;
238 struct uthread *uth = get_bsdthread_info(current_thread());
239
240 if (uth->dlil_incremented_read == dlil_writer_waiting)
241 panic("dlil_read_begin - thread is already a writer");
242
243 do {
244 again:
245 old_value = dlil_read_count;
246
247 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
248 {
249 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
250 goto again;
251 }
252
253 new_value = old_value + 1;
254 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
255
256 uth->dlil_incremented_read++;
257 }
258
259 static void
260 dlil_read_end(void)
261 {
262 struct uthread *uth = get_bsdthread_info(current_thread());
263
264 OSDecrementAtomic((SInt32*)&dlil_read_count);
265 uth->dlil_incremented_read--;
266 if (dlil_read_count == dlil_writer_waiting)
267 wakeup(_cast_non_const(&dlil_writer_waiting));
268 }
269
270 static int
271 dlil_write_begin(void)
272 {
273 struct uthread *uth = get_bsdthread_info(current_thread());
274
275 if (uth->dlil_incremented_read != 0) {
276 return EDEADLK;
277 }
278 lck_mtx_lock(dlil_mutex);
279 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
280 again:
281 if (dlil_read_count == dlil_writer_waiting) {
282 uth->dlil_incremented_read = dlil_writer_waiting;
283 return 0;
284 }
285 else {
286 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
287 goto again;
288 }
289 }
290
291 static void
292 dlil_write_end(void)
293 {
294 struct uthread *uth = get_bsdthread_info(current_thread());
295
296 if (uth->dlil_incremented_read != dlil_writer_waiting)
297 panic("dlil_write_end - thread is not a writer");
298 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
299 lck_mtx_unlock(dlil_mutex);
300 uth->dlil_incremented_read = 0;
301 wakeup(&dlil_read_count);
302 }
303
304 #define PROTO_HASH_SLOTS 0x5
305
306 /*
307 * Internal functions.
308 */
309
310 static int
311 proto_hash_value(u_long protocol_family)
312 {
313 /*
314 * dlil_proto_unplumb_all() depends on the mapping between
315 * the hash bucket index and the protocol family defined
316 * here; future changes must be applied there as well.
317 */
318 switch(protocol_family) {
319 case PF_INET:
320 return 0;
321 case PF_INET6:
322 return 1;
323 case PF_APPLETALK:
324 return 2;
325 case PF_VLAN:
326 return 3;
327 default:
328 return 4;
329 }
330 }
331
332 static struct if_proto*
333 find_attached_proto(struct ifnet *ifp, u_long protocol_family)
334 {
335 struct if_proto *proto = NULL;
336 u_long i = proto_hash_value(protocol_family);
337 if (ifp->if_proto_hash) {
338 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
339 }
340
341 while(proto && proto->protocol_family != protocol_family) {
342 proto = SLIST_NEXT(proto, next_hash);
343 }
344
345 return proto;
346 }
347
348 static void
349 if_proto_ref(struct if_proto *proto)
350 {
351 OSAddAtomic(1, (SInt32*)&proto->refcount);
352 }
353
354 static void
355 if_proto_free(struct if_proto *proto)
356 {
357 int oldval = OSAddAtomic(-1, (SInt32*)&proto->refcount);
358
359 if (oldval == 1) { /* This was the last reference */
360 FREE(proto, M_IFADDR);
361 }
362 }
363
364 __private_extern__ void
365 ifnet_lock_assert(
366 __unused struct ifnet *ifp,
367 __unused int what)
368 {
369 #if IFNET_RW_LOCK
370 /*
371 * Not implemented for rw locks.
372 *
373 * Function exists so when/if we use mutex we can
374 * enable this check.
375 */
376 #else
377 lck_mtx_assert(ifp->if_lock, what);
378 #endif
379 }
380
381 __private_extern__ void
382 ifnet_lock_shared(
383 struct ifnet *ifp)
384 {
385 #if IFNET_RW_LOCK
386 lck_rw_lock_shared(ifp->if_lock);
387 #else
388 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
389 lck_mtx_lock(ifp->if_lock);
390 #endif
391 }
392
393 __private_extern__ void
394 ifnet_lock_exclusive(
395 struct ifnet *ifp)
396 {
397 #if IFNET_RW_LOCK
398 lck_rw_lock_exclusive(ifp->if_lock);
399 #else
400 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
401 lck_mtx_lock(ifp->if_lock);
402 #endif
403 }
404
405 __private_extern__ void
406 ifnet_lock_done(
407 struct ifnet *ifp)
408 {
409 #if IFNET_RW_LOCK
410 lck_rw_done(ifp->if_lock);
411 #else
412 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
413 lck_mtx_unlock(ifp->if_lock);
414 #endif
415 }
416
417 __private_extern__ void
418 ifnet_head_lock_shared(void)
419 {
420 lck_rw_lock_shared(ifnet_head_mutex);
421 }
422
423 __private_extern__ void
424 ifnet_head_lock_exclusive(void)
425 {
426 lck_rw_lock_exclusive(ifnet_head_mutex);
427 }
428
429 __private_extern__ void
430 ifnet_head_done(void)
431 {
432 lck_rw_done(ifnet_head_mutex);
433 }
434
435 static int dlil_ifp_proto_count(struct ifnet * ifp)
436 {
437 int count = 0;
438 int i;
439
440 if (ifp->if_proto_hash != NULL) {
441 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
442 struct if_proto *proto;
443 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
444 count++;
445 }
446 }
447 }
448
449 return count;
450 }
451
452 __private_extern__ void
453 dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
454 struct net_event_data *event_data, u_long event_data_len)
455 {
456 struct net_event_data ev_data;
457 struct kev_msg ev_msg;
458
459 /*
460 * a net event always starts with a net_event_data structure
461 * but the caller can generate a simple net event or
462 * provide a longer event structure to post
463 */
464
465 ev_msg.vendor_code = KEV_VENDOR_APPLE;
466 ev_msg.kev_class = KEV_NETWORK_CLASS;
467 ev_msg.kev_subclass = event_subclass;
468 ev_msg.event_code = event_code;
469
470 if (event_data == 0) {
471 event_data = &ev_data;
472 event_data_len = sizeof(struct net_event_data);
473 }
474
475 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
476 event_data->if_family = ifp->if_family;
477 event_data->if_unit = (unsigned long) ifp->if_unit;
478
479 ev_msg.dv[0].data_length = event_data_len;
480 ev_msg.dv[0].data_ptr = event_data;
481 ev_msg.dv[1].data_length = 0;
482
483 dlil_event_internal(ifp, &ev_msg);
484 }
485
486 __private_extern__ int
487 dlil_create_input_thread(
488 ifnet_t ifp, struct dlil_threading_info *inputthread)
489 {
490 int error;
491
492 bzero(inputthread, sizeof(*inputthread));
493 // loopback ifp may not be configured at dlil_init time.
494 if (ifp == lo_ifp)
495 strlcat(inputthread->input_name, "dlil_input_main_thread_mtx", 32);
496 else
497 snprintf(inputthread->input_name, 32, "dlil_input_%s%d_mtx", ifp->if_name, ifp->if_unit);
498
499 inputthread->lck_grp = lck_grp_alloc_init(inputthread->input_name, dlil_grp_attributes);
500 inputthread->input_lck = lck_mtx_alloc_init(inputthread->lck_grp, dlil_lck_attributes);
501
502 error= kernel_thread_start((thread_continue_t)dlil_input_thread_func, inputthread, &inputthread->input_thread);
503 if (error == 0) {
504 ml_thread_policy(inputthread->input_thread, MACHINE_GROUP,
505 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
506 /*
507 * Except for the loopback dlil input thread, we create
508 * an affinity set so that the matching workloop thread
509 * can be scheduled on the same processor set.
510 */
511 if (net_affinity && inputthread != dlil_lo_thread_ptr) {
512 struct thread *tp = inputthread->input_thread;
513 u_int32_t tag;
514 /*
515 * Randomize to reduce the probability
516 * of affinity tag namespace collision.
517 */
518 read_random(&tag, sizeof (tag));
519 if (dlil_affinity_set(tp, tag) == KERN_SUCCESS) {
520 thread_reference(tp);
521 inputthread->tag = tag;
522 inputthread->net_affinity = TRUE;
523 }
524 }
525 } else {
526 panic("dlil_create_input_thread: couldn't create thread\n");
527 }
528 OSAddAtomic(1, (SInt32*)&cur_dlil_input_threads);
529 #if DLIL_DEBUG
530 printf("dlil_create_input_thread: threadinfo: %p input_thread=%p threads: cur=%d max=%d\n",
531 inputthread, inputthread->input_thread, dlil_multithreaded_input, cur_dlil_input_threads);
532 #endif
533 return error;
534 }
535 __private_extern__ void
536 dlil_terminate_input_thread(
537 struct dlil_threading_info *inputthread)
538 {
539 OSAddAtomic(-1, (SInt32*)&cur_dlil_input_threads);
540
541 lck_mtx_unlock(inputthread->input_lck);
542 lck_mtx_free(inputthread->input_lck, inputthread->lck_grp);
543 lck_grp_free(inputthread->lck_grp);
544
545 FREE(inputthread, M_NKE);
546
547 /* For the extra reference count from kernel_thread_start() */
548 thread_deallocate(current_thread());
549
550 thread_terminate(current_thread());
551 }
552
553 static kern_return_t
554 dlil_affinity_set(struct thread *tp, u_int32_t tag)
555 {
556 thread_affinity_policy_data_t policy;
557
558 bzero(&policy, sizeof (policy));
559 policy.affinity_tag = tag;
560 return (thread_policy_set(tp, THREAD_AFFINITY_POLICY,
561 (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT));
562 }
563
564 void
565 dlil_init(void)
566 {
567 PE_parse_boot_argn("net_affinity", &net_affinity, sizeof (net_affinity));
568
569 TAILQ_INIT(&dlil_ifnet_head);
570 TAILQ_INIT(&ifnet_head);
571
572 /* Setup the lock groups we will use */
573 dlil_grp_attributes = lck_grp_attr_alloc_init();
574
575 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", dlil_grp_attributes);
576 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", dlil_grp_attributes);
577 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", dlil_grp_attributes);
578 dlil_input_lock_grp = lck_grp_alloc_init("dlil input lock", dlil_grp_attributes);
579
580 /* Setup the lock attributes we will use */
581 dlil_lck_attributes = lck_attr_alloc_init();
582
583 ifnet_lock_attr = lck_attr_alloc_init();
584
585
586 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, dlil_lck_attributes);
587 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, dlil_lck_attributes);
588 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, dlil_lck_attributes);
589
590 lck_attr_free(dlil_lck_attributes);
591 dlil_lck_attributes = NULL;
592
593 /*
594 * Create and start up the first dlil input thread once everything is initialized
595 */
596 dlil_create_input_thread(0, dlil_lo_thread_ptr);
597
598 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
599 }
600
601 __private_extern__ int
602 dlil_attach_filter(
603 struct ifnet *ifp,
604 const struct iff_filter *if_filter,
605 interface_filter_t *filter_ref)
606 {
607 int retval = 0;
608 struct ifnet_filter *filter;
609
610 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
611 if (filter == NULL)
612 return ENOMEM;
613 bzero(filter, sizeof(*filter));
614
615
616 filter->filt_ifp = ifp;
617 filter->filt_cookie = if_filter->iff_cookie;
618 filter->filt_name = if_filter->iff_name;
619 filter->filt_protocol = if_filter->iff_protocol;
620 filter->filt_input = if_filter->iff_input;
621 filter->filt_output = if_filter->iff_output;
622 filter->filt_event = if_filter->iff_event;
623 filter->filt_ioctl = if_filter->iff_ioctl;
624 filter->filt_detached = if_filter->iff_detached;
625
626 if ((retval = dlil_write_begin()) != 0) {
627 /* Failed to acquire the write lock */
628 FREE(filter, M_NKE);
629 return retval;
630 }
631 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
632 dlil_write_end();
633 *filter_ref = filter;
634 return retval;
635 }
636
637 static int
638 dlil_detach_filter_internal(
639 interface_filter_t filter,
640 int detached)
641 {
642 int retval = 0;
643
644 if (detached == 0) {
645 ifnet_t ifp = NULL;
646 interface_filter_t entry = NULL;
647
648 /* Take the write lock */
649 retval = dlil_write_begin();
650 if (retval != 0 && retval != EDEADLK)
651 return retval;
652
653 /*
654 * At this point either we have the write lock (retval == 0)
655 * or we couldn't get it (retval == EDEADLK) because someone
656 * else up the stack is holding the read lock. It is safe to
657 * read, either the read or write is held. Verify the filter
658 * parameter before proceeding.
659 */
660 ifnet_head_lock_shared();
661 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
662 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
663 if (entry == filter)
664 break;
665 }
666 if (entry == filter)
667 break;
668 }
669 ifnet_head_done();
670
671 if (entry != filter) {
672 /* filter parameter is not a valid filter ref */
673 if (retval == 0) {
674 dlil_write_end();
675 }
676 return EINVAL;
677 }
678
679 if (retval == EDEADLK) {
680 /* Perform a delayed detach */
681 filter->filt_detaching = 1;
682 dlil_detach_waiting = 1;
683 wakeup(&dlil_detach_waiting);
684 return 0;
685 }
686
687 /* Remove the filter from the list */
688 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
689 dlil_write_end();
690 }
691
692 /* Call the detached funciton if there is one */
693 if (filter->filt_detached)
694 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
695
696 /* Free the filter */
697 FREE(filter, M_NKE);
698
699 return retval;
700 }
701
702 __private_extern__ void
703 dlil_detach_filter(interface_filter_t filter)
704 {
705 if (filter == NULL)
706 return;
707 dlil_detach_filter_internal(filter, 0);
708 }
709
710 static void
711 dlil_input_thread_func(
712 struct dlil_threading_info *inputthread)
713 {
714 while (1) {
715 struct mbuf *m = NULL, *m_loop = NULL;
716 #if IFNET_INPUT_SANITY_CHK
717 int loop_cnt = 0, mbuf_cnt;
718 int count;
719 struct mbuf *m1;
720 #endif /* IFNET_INPUT_SANITY_CHK */
721
722 lck_mtx_lock(inputthread->input_lck);
723
724 /* Wait until there is work to be done */
725 while ((inputthread->input_waiting & ~DLIL_INPUT_RUNNING) == 0) {
726 inputthread->input_waiting &= ~DLIL_INPUT_RUNNING;
727 msleep(&inputthread->input_waiting, inputthread->input_lck, 0, inputthread->input_name, 0);
728 }
729
730
731 lck_mtx_assert(inputthread->input_lck, LCK_MTX_ASSERT_OWNED);
732
733 m = inputthread->mbuf_head;
734 inputthread->mbuf_head = NULL;
735 inputthread->mbuf_tail = NULL;
736
737 if (inputthread->input_waiting & DLIL_INPUT_TERMINATE) {
738 if (m)
739 mbuf_freem_list(m);
740 /* this is the end */
741 dlil_terminate_input_thread(inputthread);
742 return;
743 }
744
745 inputthread->input_waiting |= DLIL_INPUT_RUNNING;
746 inputthread->input_waiting &= ~DLIL_INPUT_WAITING;
747
748 if (inputthread == dlil_lo_thread_ptr) {
749 m_loop = dlil_lo_input_mbuf_head;
750 dlil_lo_input_mbuf_head = NULL;
751 dlil_lo_input_mbuf_tail = NULL;
752 }
753
754 #if IFNET_INPUT_SANITY_CHK
755 if (dlil_input_sanity_check != 0) {
756 mbuf_cnt = inputthread->mbuf_count;
757 inputthread->mbuf_count = 0;
758 if (inputthread == dlil_lo_thread_ptr) {
759 loop_cnt = dlil_lo_input_mbuf_count;
760 dlil_lo_input_mbuf_count = 0;
761 }
762
763 lck_mtx_unlock(inputthread->input_lck);
764
765 for (m1 = m, count = 0; m1; m1 = mbuf_nextpkt(m1)) {
766 count++;
767 }
768 if (count != mbuf_cnt) {
769 panic("dlil_input_func - thread=%p reg. loop queue has %d packets, should have %d\n",
770 inputthread, count, mbuf_cnt);
771 }
772
773 if (inputthread == dlil_lo_thread_ptr) {
774 for (m1 = m_loop, count = 0; m1; m1 = mbuf_nextpkt(m1)) {
775 count++;
776 }
777 if (count != loop_cnt) {
778 panic("dlil_input_func - thread=%p loop queue has %d packets, should have %d\n",
779 inputthread, count, loop_cnt);
780 }
781 }
782 } else
783 #endif /* IFNET_INPUT_SANITY_CHK */
784 {
785 lck_mtx_unlock(inputthread->input_lck);
786 }
787
788
789 /*
790 * NOTE warning %%% attention !!!!
791 * We should think about putting some thread starvation safeguards if
792 * we deal with long chains of packets.
793 */
794 if (m_loop) {
795 if (inputthread == dlil_lo_thread_ptr)
796 dlil_input_packet_list(lo_ifp, m_loop);
797 #if IFNET_INPUT_SANITY_CHK
798 else
799 panic("dlil_input_func - thread=%p loop queue has %d packets, should have none!\n",
800 inputthread, loop_cnt);
801 #endif /* IFNET_INPUT_SANITY_CHK */
802 }
803
804
805 if (m)
806 dlil_input_packet_list(0, m);
807
808
809 lck_mtx_lock(inputthread->input_lck);
810
811 if ((inputthread->input_waiting & (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER)) != 0) {
812 lck_mtx_unlock(inputthread->input_lck);
813 proto_input_run();
814 }
815 else
816 lck_mtx_unlock(inputthread->input_lck);
817 }
818 }
819
820 errno_t
821 ifnet_input(
822 ifnet_t ifp,
823 mbuf_t m_head,
824 const struct ifnet_stat_increment_param *stats)
825 {
826 struct thread *tp = current_thread();
827 mbuf_t m_tail;
828 struct dlil_threading_info *inp;
829 #if IFNET_INPUT_SANITY_CHK
830 u_int32_t pkt_count = 0;
831 #endif /* IFNET_INPUT_SANITY_CHK */
832
833 if (ifp == NULL || m_head == NULL) {
834 if (m_head)
835 mbuf_freem_list(m_head);
836 return EINVAL;
837 }
838
839 m_tail = m_head;
840 while (1) {
841 #if IFNET_INPUT_SANITY_CHK
842 if (dlil_input_sanity_check != 0) {
843 ifnet_t rcvif;
844
845 rcvif = mbuf_pkthdr_rcvif(m_tail);
846 pkt_count++;
847
848 if (rcvif == NULL ||
849 (ifp->if_type != IFT_LOOP && rcvif != ifp) ||
850 (mbuf_flags(m_head) & MBUF_PKTHDR) == 0) {
851 panic("ifnet_input - invalid mbuf %p\n", m_tail);
852 }
853 }
854 #endif /* IFNET_INPUT_SANITY_CHK */
855 if (mbuf_nextpkt(m_tail) == NULL)
856 break;
857 m_tail = mbuf_nextpkt(m_tail);
858 }
859
860 inp = ifp->if_input_thread;
861
862 if (dlil_multithreaded_input == 0 || inp == NULL)
863 inp = dlil_lo_thread_ptr;
864
865 /*
866 * If there is a matching dlil input thread associated with an
867 * affinity set, associate this workloop thread with the same set.
868 * We will only do this once.
869 */
870 lck_mtx_lock(inp->input_lck);
871 if (inp->net_affinity && inp->workloop_thread == NULL) {
872 u_int32_t tag = inp->tag;
873 inp->workloop_thread = tp;
874 lck_mtx_unlock(inp->input_lck);
875
876 /* Associated the current thread with the new affinity tag */
877 (void) dlil_affinity_set(tp, tag);
878
879 /*
880 * Take a reference on the workloop (current) thread; during
881 * detach, we will need to refer to it in order ot tear down
882 * its affinity.
883 */
884 thread_reference(tp);
885 lck_mtx_lock(inp->input_lck);
886 }
887
888 /* WARNING
889 * Because of loopbacked multicast we cannot stuff the ifp in
890 * the rcvif of the packet header: loopback has its own dlil
891 * input queue
892 */
893
894 if (inp == dlil_lo_thread_ptr && ifp->if_type == IFT_LOOP) {
895 if (dlil_lo_input_mbuf_head == NULL)
896 dlil_lo_input_mbuf_head = m_head;
897 else if (dlil_lo_input_mbuf_tail != NULL)
898 dlil_lo_input_mbuf_tail->m_nextpkt = m_head;
899 dlil_lo_input_mbuf_tail = m_tail;
900 #if IFNET_INPUT_SANITY_CHK
901 if (dlil_input_sanity_check != 0) {
902 dlil_lo_input_mbuf_count += pkt_count;
903 inp->input_mbuf_cnt += pkt_count;
904 inp->input_wake_cnt++;
905
906 lck_mtx_assert(inp->input_lck, LCK_MTX_ASSERT_OWNED);
907 }
908 #endif
909 }
910 else {
911 if (inp->mbuf_head == NULL)
912 inp->mbuf_head = m_head;
913 else if (inp->mbuf_tail != NULL)
914 inp->mbuf_tail->m_nextpkt = m_head;
915 inp->mbuf_tail = m_tail;
916 #if IFNET_INPUT_SANITY_CHK
917 if (dlil_input_sanity_check != 0) {
918 inp->mbuf_count += pkt_count;
919 inp->input_mbuf_cnt += pkt_count;
920 inp->input_wake_cnt++;
921
922 lck_mtx_assert(inp->input_lck, LCK_MTX_ASSERT_OWNED);
923 }
924 #endif
925 }
926
927
928 inp->input_waiting |= DLIL_INPUT_WAITING;
929 if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) {
930 wakeup((caddr_t)&inp->input_waiting);
931 }
932 if (stats) {
933 ifp->if_data.ifi_ipackets += stats->packets_in;
934 ifp->if_data.ifi_ibytes += stats->bytes_in;
935 ifp->if_data.ifi_ierrors += stats->errors_in;
936
937 ifp->if_data.ifi_opackets += stats->packets_out;
938 ifp->if_data.ifi_obytes += stats->bytes_out;
939 ifp->if_data.ifi_oerrors += stats->errors_out;
940
941 ifp->if_data.ifi_collisions += stats->collisions;
942 ifp->if_data.ifi_iqdrops += stats->dropped;
943 }
944
945 lck_mtx_unlock(inp->input_lck);
946
947 return 0;
948 }
949
950 static int
951 dlil_interface_filters_input(struct ifnet * ifp, struct mbuf * * m_p,
952 char * * frame_header_p,
953 protocol_family_t protocol_family)
954 {
955 struct ifnet_filter * filter;
956
957 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
958 int result;
959
960 if (filter->filt_input
961 && (filter->filt_protocol == 0
962 || filter->filt_protocol == protocol_family)) {
963 result = (*filter->filt_input)(filter->filt_cookie,
964 ifp, protocol_family,
965 m_p, frame_header_p);
966 if (result != 0) {
967 return (result);
968 }
969 }
970 }
971 return (0);
972 }
973
974 static void
975 dlil_ifproto_input(struct if_proto * ifproto, mbuf_t m)
976 {
977 int error;
978
979 if (ifproto->proto_kpi == kProtoKPI_v1) {
980 /* Version 1 protocols get one packet at a time */
981 while (m != NULL) {
982 char * frame_header;
983 mbuf_t next_packet;
984
985 next_packet = m->m_nextpkt;
986 m->m_nextpkt = NULL;
987 frame_header = m->m_pkthdr.header;
988 m->m_pkthdr.header = NULL;
989 error = (*ifproto->kpi.v1.input)(ifproto->ifp,
990 ifproto->protocol_family,
991 m, frame_header);
992 if (error != 0 && error != EJUSTRETURN)
993 m_freem(m);
994 m = next_packet;
995 }
996 }
997 else if (ifproto->proto_kpi == kProtoKPI_v2) {
998 /* Version 2 protocols support packet lists */
999 error = (*ifproto->kpi.v2.input)(ifproto->ifp,
1000 ifproto->protocol_family,
1001 m);
1002 if (error != 0 && error != EJUSTRETURN)
1003 m_freem_list(m);
1004 }
1005 return;
1006 }
1007
1008 __private_extern__ void
1009 dlil_input_packet_list(struct ifnet * ifp_param, struct mbuf *m)
1010 {
1011 int error = 0;
1012 int locked = 0;
1013 protocol_family_t protocol_family;
1014 mbuf_t next_packet;
1015 ifnet_t ifp = ifp_param;
1016 char * frame_header;
1017 struct if_proto * last_ifproto = NULL;
1018 mbuf_t pkt_first = NULL;
1019 mbuf_t * pkt_next = NULL;
1020
1021 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
1022
1023 while (m != NULL) {
1024 struct if_proto * ifproto = NULL;
1025
1026 next_packet = m->m_nextpkt;
1027 m->m_nextpkt = NULL;
1028 if (ifp_param == NULL)
1029 ifp = m->m_pkthdr.rcvif;
1030 frame_header = m->m_pkthdr.header;
1031 m->m_pkthdr.header = NULL;
1032
1033 if (locked == 0) {
1034 /* dlil lock protects the demux and interface filters */
1035 locked = 1;
1036 dlil_read_begin();
1037 }
1038 /* find which protocol family this packet is for */
1039 error = (*ifp->if_demux)(ifp, m, frame_header,
1040 &protocol_family);
1041 if (error != 0) {
1042 if (error == EJUSTRETURN) {
1043 goto next;
1044 }
1045 protocol_family = 0;
1046 }
1047
1048 /* DANGER!!! */
1049 if (m->m_flags & (M_BCAST|M_MCAST))
1050 ifp->if_imcasts++;
1051
1052 /* run interface filters, exclude VLAN packets PR-3586856 */
1053 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1054 int filter_result;
1055
1056 filter_result = dlil_interface_filters_input(ifp, &m,
1057 &frame_header,
1058 protocol_family);
1059 if (filter_result != 0) {
1060 if (filter_result != EJUSTRETURN) {
1061 m_freem(m);
1062 }
1063 goto next;
1064 }
1065 }
1066 if (error != 0 || ((m->m_flags & M_PROMISC) != 0) ) {
1067 m_freem(m);
1068 goto next;
1069 }
1070
1071 /* Lookup the protocol attachment to this interface */
1072 if (protocol_family == 0) {
1073 ifproto = NULL;
1074 }
1075 else if (last_ifproto != NULL
1076 && last_ifproto->ifp == ifp
1077 && (last_ifproto->protocol_family
1078 == protocol_family)) {
1079 ifproto = last_ifproto;
1080 }
1081 else {
1082 ifproto = find_attached_proto(ifp, protocol_family);
1083 }
1084 if (ifproto == NULL) {
1085 /* no protocol for this packet, discard */
1086 m_freem(m);
1087 goto next;
1088 }
1089 if (ifproto != last_ifproto) {
1090 /* make sure ifproto can't go away during input */
1091 if_proto_ref(ifproto);
1092 if (last_ifproto != NULL) {
1093 /* pass up the list for the previous protocol */
1094 dlil_read_end();
1095
1096 dlil_ifproto_input(last_ifproto, pkt_first);
1097 pkt_first = NULL;
1098 if_proto_free(last_ifproto);
1099 dlil_read_begin();
1100 }
1101 last_ifproto = ifproto;
1102 }
1103 /* extend the list */
1104 m->m_pkthdr.header = frame_header;
1105 if (pkt_first == NULL) {
1106 pkt_first = m;
1107 } else {
1108 *pkt_next = m;
1109 }
1110 pkt_next = &m->m_nextpkt;
1111
1112 next:
1113 if (next_packet == NULL && last_ifproto != NULL) {
1114 /* pass up the last list of packets */
1115 dlil_read_end();
1116
1117 dlil_ifproto_input(last_ifproto, pkt_first);
1118 if_proto_free(last_ifproto);
1119 locked = 0;
1120 }
1121 m = next_packet;
1122
1123 }
1124 if (locked != 0) {
1125 dlil_read_end();
1126 }
1127 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
1128 return;
1129 }
1130
1131 static int
1132 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
1133 {
1134 struct ifnet_filter *filter;
1135
1136 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
1137 dlil_read_begin();
1138
1139 /* Pass the event to the interface filters */
1140 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1141 if (filter->filt_event)
1142 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
1143 }
1144
1145 if (ifp->if_proto_hash) {
1146 int i;
1147
1148 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
1149 struct if_proto *proto;
1150
1151 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
1152 proto_media_event eventp = proto->proto_kpi == kProtoKPI_v1
1153 ? proto->kpi.v1.event : proto->kpi.v2.event;
1154
1155 if (eventp)
1156 eventp(ifp, proto->protocol_family, event);
1157 }
1158 }
1159 }
1160
1161 dlil_read_end();
1162
1163 /* Pass the event to the interface */
1164 if (ifp->if_event)
1165 ifp->if_event(ifp, event);
1166
1167 if (ifp_unuse(ifp))
1168 ifp_use_reached_zero(ifp);
1169 }
1170
1171 return kev_post_msg(event);
1172 }
1173
1174 errno_t
1175 ifnet_event(
1176 ifnet_t ifp,
1177 struct kern_event_msg *event)
1178 {
1179 struct kev_msg kev_msg;
1180 int result = 0;
1181
1182 if (ifp == NULL || event == NULL) return EINVAL;
1183
1184 kev_msg.vendor_code = event->vendor_code;
1185 kev_msg.kev_class = event->kev_class;
1186 kev_msg.kev_subclass = event->kev_subclass;
1187 kev_msg.event_code = event->event_code;
1188 kev_msg.dv[0].data_ptr = &event->event_data[0];
1189 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
1190 kev_msg.dv[1].data_length = 0;
1191
1192 result = dlil_event_internal(ifp, &kev_msg);
1193
1194 return result;
1195 }
1196
1197 #if CONFIG_MACF_NET
1198 #include <netinet/ip6.h>
1199 #include <netinet/ip.h>
1200 static int dlil_get_socket_type(struct mbuf **mp, int family, int raw)
1201 {
1202 struct mbuf *m;
1203 struct ip *ip;
1204 struct ip6_hdr *ip6;
1205 int type = SOCK_RAW;
1206
1207 if (!raw) {
1208 switch (family) {
1209 case PF_INET:
1210 m = m_pullup(*mp, sizeof(struct ip));
1211 if (m == NULL)
1212 break;
1213 *mp = m;
1214 ip = mtod(m, struct ip *);
1215 if (ip->ip_p == IPPROTO_TCP)
1216 type = SOCK_STREAM;
1217 else if (ip->ip_p == IPPROTO_UDP)
1218 type = SOCK_DGRAM;
1219 break;
1220 case PF_INET6:
1221 m = m_pullup(*mp, sizeof(struct ip6_hdr));
1222 if (m == NULL)
1223 break;
1224 *mp = m;
1225 ip6 = mtod(m, struct ip6_hdr *);
1226 if (ip6->ip6_nxt == IPPROTO_TCP)
1227 type = SOCK_STREAM;
1228 else if (ip6->ip6_nxt == IPPROTO_UDP)
1229 type = SOCK_DGRAM;
1230 break;
1231 }
1232 }
1233
1234 return (type);
1235 }
1236 #endif
1237
1238 #if 0
1239 int
1240 dlil_output_list(
1241 struct ifnet* ifp,
1242 u_long proto_family,
1243 struct mbuf *packetlist,
1244 caddr_t route,
1245 const struct sockaddr *dest,
1246 int raw)
1247 {
1248 char *frame_type = NULL;
1249 char *dst_linkaddr = NULL;
1250 int retval = 0;
1251 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1252 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1253 struct ifnet_filter *filter;
1254 struct if_proto *proto = 0;
1255 mbuf_t m;
1256 mbuf_t send_head = NULL;
1257 mbuf_t *send_tail = &send_head;
1258
1259 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1260
1261 dlil_read_begin();
1262
1263 frame_type = frame_type_buffer;
1264 dst_linkaddr = dst_linkaddr_buffer;
1265
1266 if (raw == 0) {
1267 proto = find_attached_proto(ifp, proto_family);
1268 if (proto == NULL) {
1269 retval = ENXIO;
1270 goto cleanup;
1271 }
1272 }
1273
1274 preout_again:
1275 if (packetlist == NULL)
1276 goto cleanup;
1277 m = packetlist;
1278 packetlist = packetlist->m_nextpkt;
1279 m->m_nextpkt = NULL;
1280
1281 if (raw == 0) {
1282 proto_media_preout preoutp = proto->proto_kpi == kProtoKPI_v1
1283 ? proto->kpi.v1.pre_output : proto->kpi.v2.pre_output;
1284 retval = 0;
1285 if (preoutp)
1286 retval = preoutp(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1287
1288 if (retval) {
1289 if (retval == EJUSTRETURN) {
1290 goto preout_again;
1291 }
1292
1293 m_freem(m);
1294 goto cleanup;
1295 }
1296 }
1297
1298 do {
1299 #if CONFIG_MACF_NET
1300 retval = mac_ifnet_check_transmit(ifp, m, proto_family,
1301 dlil_get_socket_type(&m, proto_family, raw));
1302 if (retval) {
1303 m_freem(m);
1304 goto cleanup;
1305 }
1306 #endif
1307
1308 if (raw == 0 && ifp->if_framer) {
1309 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1310 if (retval) {
1311 if (retval != EJUSTRETURN) {
1312 m_freem(m);
1313 }
1314 goto next;
1315 }
1316 }
1317
1318 #if BRIDGE
1319 /* !!!LOCKING!!!
1320 *
1321 * Need to consider how to handle this.
1322 * Also note that return should be a goto cleanup
1323 */
1324 broken-locking
1325 if (do_bridge) {
1326 struct mbuf *m0 = m;
1327 struct ether_header *eh = mtod(m, struct ether_header *);
1328
1329 if (m->m_pkthdr.rcvif)
1330 m->m_pkthdr.rcvif = NULL;
1331 ifp = bridge_dst_lookup(eh);
1332 bdg_forward(&m0, ifp);
1333 if (m0)
1334 m_freem(m0);
1335
1336 return 0 - should be goto cleanup?
1337 }
1338 #endif
1339
1340 /*
1341 * Let interface filters (if any) do their thing ...
1342 */
1343 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1344 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1345 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1346 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1347 filter->filt_output) {
1348 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1349 if (retval) {
1350 if (retval != EJUSTRETURN)
1351 m_freem(m);
1352 goto next;
1353 }
1354 }
1355 }
1356 }
1357
1358 /*
1359 * Finally, call the driver.
1360 */
1361
1362 if ((ifp->if_eflags & IFEF_SENDLIST) != 0) {
1363 *send_tail = m;
1364 send_tail = &m->m_nextpkt;
1365 }
1366 else {
1367 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1368 retval = ifp->if_output(ifp, m);
1369 if (retval) {
1370 printf("dlil_output: output error retval = %x\n", retval);
1371 }
1372 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1373 }
1374 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1375
1376 next:
1377 m = packetlist;
1378 if (m) {
1379 packetlist = packetlist->m_nextpkt;
1380 m->m_nextpkt = NULL;
1381 }
1382 } while (m);
1383
1384 if (send_head) {
1385 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1386 retval = ifp->if_output(ifp, send_head);
1387 if (retval) {
1388 printf("dlil_output: output error retval = %x\n", retval);
1389 }
1390 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1391 }
1392
1393 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1394
1395 cleanup:
1396 dlil_read_end();
1397 if (packetlist) /* if any packet left, clean up */
1398 mbuf_freem_list(packetlist);
1399 if (retval == EJUSTRETURN)
1400 retval = 0;
1401 return retval;
1402 }
1403 #endif
1404
1405 /*
1406 * dlil_output
1407 *
1408 * Caller should have a lock on the protocol domain if the protocol
1409 * doesn't support finer grained locking. In most cases, the lock
1410 * will be held from the socket layer and won't be released until
1411 * we return back to the socket layer.
1412 *
1413 * This does mean that we must take a protocol lock before we take
1414 * an interface lock if we're going to take both. This makes sense
1415 * because a protocol is likely to interact with an ifp while it
1416 * is under the protocol lock.
1417 */
1418 __private_extern__ errno_t
1419 dlil_output(
1420 ifnet_t ifp,
1421 protocol_family_t proto_family,
1422 mbuf_t packetlist,
1423 void *route,
1424 const struct sockaddr *dest,
1425 int raw)
1426 {
1427 char *frame_type = NULL;
1428 char *dst_linkaddr = NULL;
1429 int retval = 0;
1430 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1431 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1432 struct ifnet_filter *filter;
1433 struct if_proto *proto = 0;
1434 mbuf_t m;
1435 mbuf_t send_head = NULL;
1436 mbuf_t *send_tail = &send_head;
1437
1438 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1439
1440 dlil_read_begin();
1441
1442 frame_type = frame_type_buffer;
1443 dst_linkaddr = dst_linkaddr_buffer;
1444
1445 if (raw == 0) {
1446 proto = find_attached_proto(ifp, proto_family);
1447 if (proto == NULL) {
1448 retval = ENXIO;
1449 goto cleanup;
1450 }
1451 }
1452
1453 preout_again:
1454 if (packetlist == NULL)
1455 goto cleanup;
1456 m = packetlist;
1457 packetlist = packetlist->m_nextpkt;
1458 m->m_nextpkt = NULL;
1459
1460 if (raw == 0) {
1461 proto_media_preout preoutp = proto->proto_kpi == kProtoKPI_v1
1462 ? proto->kpi.v1.pre_output : proto->kpi.v2.pre_output;
1463 retval = 0;
1464 if (preoutp)
1465 retval = preoutp(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1466
1467 if (retval) {
1468 if (retval == EJUSTRETURN) {
1469 goto preout_again;
1470 }
1471
1472 m_freem(m);
1473 goto cleanup;
1474 }
1475 }
1476
1477 #if CONFIG_MACF_NET
1478 retval = mac_ifnet_check_transmit(ifp, m, proto_family,
1479 dlil_get_socket_type(&m, proto_family, raw));
1480 if (retval) {
1481 m_freem(m);
1482 goto cleanup;
1483 }
1484 #endif
1485
1486 do {
1487 if (raw == 0 && ifp->if_framer) {
1488 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1489 if (retval) {
1490 if (retval != EJUSTRETURN) {
1491 m_freem(m);
1492 }
1493 goto next;
1494 }
1495 }
1496
1497 #if BRIDGE
1498 /* !!!LOCKING!!!
1499 *
1500 * Need to consider how to handle this.
1501 * Also note that return should be a goto cleanup
1502 */
1503 broken-locking
1504 if (do_bridge) {
1505 struct mbuf *m0 = m;
1506 struct ether_header *eh = mtod(m, struct ether_header *);
1507
1508 if (m->m_pkthdr.rcvif)
1509 m->m_pkthdr.rcvif = NULL;
1510 ifp = bridge_dst_lookup(eh);
1511 bdg_forward(&m0, ifp);
1512 if (m0)
1513 m_freem(m0);
1514
1515 return 0 - should be goto cleanup?
1516 }
1517 #endif
1518
1519 /*
1520 * Let interface filters (if any) do their thing ...
1521 */
1522 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1523 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1524 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1525 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1526 filter->filt_output) {
1527 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1528 if (retval) {
1529 if (retval != EJUSTRETURN)
1530 m_freem(m);
1531 goto next;
1532 }
1533 }
1534 }
1535 }
1536
1537 /*
1538 * If the underlying interface is not capable of handling a
1539 * packet whose data portion spans across physically disjoint
1540 * pages, we need to "normalize" the packet so that we pass
1541 * down a chain of mbufs where each mbuf points to a span that
1542 * resides in the system page boundary. If the packet does
1543 * not cross page(s), the following is a no-op.
1544 */
1545 if (!(ifp->if_hwassist & IFNET_MULTIPAGES)) {
1546 if ((m = m_normalize(m)) == NULL)
1547 goto next;
1548 }
1549
1550 /*
1551 * Finally, call the driver.
1552 */
1553
1554 if ((ifp->if_eflags & IFEF_SENDLIST) != 0) {
1555 *send_tail = m;
1556 send_tail = &m->m_nextpkt;
1557 }
1558 else {
1559 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1560 retval = ifp->if_output(ifp, m);
1561 if (retval) {
1562 printf("dlil_output: output error retval = %x\n", retval);
1563 }
1564 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1565 }
1566 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1567
1568 next:
1569 m = packetlist;
1570 if (m) {
1571 packetlist = packetlist->m_nextpkt;
1572 m->m_nextpkt = NULL;
1573 }
1574 } while (m);
1575
1576 if (send_head) {
1577 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1578 retval = ifp->if_output(ifp, send_head);
1579 if (retval) {
1580 printf("dlil_output: output error retval = %x\n", retval);
1581 }
1582 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1583 }
1584
1585 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1586
1587 cleanup:
1588 dlil_read_end();
1589 if (packetlist) /* if any packet left, clean up */
1590 mbuf_freem_list(packetlist);
1591 if (retval == EJUSTRETURN)
1592 retval = 0;
1593 return retval;
1594 }
1595
1596 errno_t
1597 ifnet_ioctl(
1598 ifnet_t ifp,
1599 protocol_family_t proto_fam,
1600 u_int32_t ioctl_code,
1601 void *ioctl_arg)
1602 {
1603 struct ifnet_filter *filter;
1604 int retval = EOPNOTSUPP;
1605 int result = 0;
1606 int holding_read = 0;
1607
1608 if (ifp == NULL || ioctl_code == 0)
1609 return EINVAL;
1610
1611 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1612 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1613 if (result != 0)
1614 return EOPNOTSUPP;
1615
1616 dlil_read_begin();
1617 holding_read = 1;
1618
1619 /* Run the interface filters first.
1620 * We want to run all filters before calling the protocol,
1621 * interface family, or interface.
1622 */
1623 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1624 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1625 filter->filt_ioctl != NULL) {
1626 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1627 /* Only update retval if no one has handled the ioctl */
1628 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1629 if (result == ENOTSUP)
1630 result = EOPNOTSUPP;
1631 retval = result;
1632 if (retval && retval != EOPNOTSUPP) {
1633 goto cleanup;
1634 }
1635 }
1636 }
1637 }
1638
1639 /* Allow the protocol to handle the ioctl */
1640 if (proto_fam) {
1641 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1642
1643 if (proto != 0) {
1644 proto_media_ioctl ioctlp = proto->proto_kpi == kProtoKPI_v1
1645 ? proto->kpi.v1.ioctl : proto->kpi.v2.ioctl;
1646 result = EOPNOTSUPP;
1647 if (ioctlp)
1648 result = ioctlp(ifp, proto_fam, ioctl_code, ioctl_arg);
1649
1650 /* Only update retval if no one has handled the ioctl */
1651 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1652 if (result == ENOTSUP)
1653 result = EOPNOTSUPP;
1654 retval = result;
1655 if (retval && retval != EOPNOTSUPP) {
1656 goto cleanup;
1657 }
1658 }
1659 }
1660 }
1661
1662 /*
1663 * Since we have incremented the use count on the ifp, we are guaranteed
1664 * that the ifp will not go away (the function pointers may not be changed).
1665 * We release the dlil read lock so the interface ioctl may trigger a
1666 * protocol attach. This happens with vlan and may occur with other virtual
1667 * interfaces.
1668 */
1669 dlil_read_end();
1670 holding_read = 0;
1671
1672 /* retval is either 0 or EOPNOTSUPP */
1673
1674 /*
1675 * Let the interface handle this ioctl.
1676 * If it returns EOPNOTSUPP, ignore that, we may have
1677 * already handled this in the protocol or family.
1678 */
1679 if (ifp->if_ioctl)
1680 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1681
1682 /* Only update retval if no one has handled the ioctl */
1683 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1684 if (result == ENOTSUP)
1685 result = EOPNOTSUPP;
1686 retval = result;
1687 if (retval && retval != EOPNOTSUPP) {
1688 goto cleanup;
1689 }
1690 }
1691
1692 cleanup:
1693 if (holding_read)
1694 dlil_read_end();
1695 if (ifp_unuse(ifp))
1696 ifp_use_reached_zero(ifp);
1697
1698 if (retval == EJUSTRETURN)
1699 retval = 0;
1700 return retval;
1701 }
1702
1703 __private_extern__ errno_t
1704 dlil_set_bpf_tap(
1705 ifnet_t ifp,
1706 bpf_tap_mode mode,
1707 bpf_packet_func callback)
1708 {
1709 errno_t error = 0;
1710
1711 dlil_read_begin();
1712 if (ifp->if_set_bpf_tap)
1713 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1714 dlil_read_end();
1715
1716 return error;
1717 }
1718
1719 errno_t
1720 dlil_resolve_multi(
1721 struct ifnet *ifp,
1722 const struct sockaddr *proto_addr,
1723 struct sockaddr *ll_addr,
1724 size_t ll_len)
1725 {
1726 errno_t result = EOPNOTSUPP;
1727 struct if_proto *proto;
1728 const struct sockaddr *verify;
1729 proto_media_resolve_multi resolvep;
1730
1731 dlil_read_begin();
1732
1733 bzero(ll_addr, ll_len);
1734
1735 /* Call the protocol first */
1736 proto = find_attached_proto(ifp, proto_addr->sa_family);
1737 if (proto != NULL) {
1738 resolvep = proto->proto_kpi == kProtoKPI_v1
1739 ? proto->kpi.v1.resolve_multi : proto->kpi.v2.resolve_multi;
1740 if (resolvep != NULL)
1741 result = resolvep(ifp, proto_addr,(struct sockaddr_dl*)ll_addr,
1742 ll_len);
1743 }
1744
1745 /* Let the interface verify the multicast address */
1746 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1747 if (result == 0)
1748 verify = ll_addr;
1749 else
1750 verify = proto_addr;
1751 result = ifp->if_check_multi(ifp, verify);
1752 }
1753
1754 dlil_read_end();
1755
1756 return result;
1757 }
1758
1759 __private_extern__ errno_t
1760 dlil_send_arp_internal(
1761 ifnet_t ifp,
1762 u_short arpop,
1763 const struct sockaddr_dl* sender_hw,
1764 const struct sockaddr* sender_proto,
1765 const struct sockaddr_dl* target_hw,
1766 const struct sockaddr* target_proto)
1767 {
1768 struct if_proto *proto;
1769 errno_t result = 0;
1770
1771 dlil_read_begin();
1772
1773 proto = find_attached_proto(ifp, target_proto->sa_family);
1774 if (proto == NULL) {
1775 result = ENOTSUP;
1776 }
1777 else {
1778 proto_media_send_arp arpp;
1779 arpp = proto->proto_kpi == kProtoKPI_v1
1780 ? proto->kpi.v1.send_arp : proto->kpi.v2.send_arp;
1781 if (arpp == NULL)
1782 result = ENOTSUP;
1783 else
1784 result = arpp(ifp, arpop, sender_hw, sender_proto, target_hw,
1785 target_proto);
1786 }
1787
1788 dlil_read_end();
1789
1790 return result;
1791 }
1792
1793 static __inline__ int
1794 _is_announcement(const struct sockaddr_in * sender_sin,
1795 const struct sockaddr_in * target_sin)
1796 {
1797 if (sender_sin == NULL) {
1798 return FALSE;
1799 }
1800 return (sender_sin->sin_addr.s_addr == target_sin->sin_addr.s_addr);
1801 }
1802
1803 __private_extern__ errno_t
1804 dlil_send_arp(
1805 ifnet_t ifp,
1806 u_short arpop,
1807 const struct sockaddr_dl* sender_hw,
1808 const struct sockaddr* sender_proto,
1809 const struct sockaddr_dl* target_hw,
1810 const struct sockaddr* target_proto)
1811 {
1812 errno_t result = 0;
1813 const struct sockaddr_in * sender_sin;
1814 const struct sockaddr_in * target_sin;
1815
1816 if (target_proto == NULL || (sender_proto &&
1817 sender_proto->sa_family != target_proto->sa_family))
1818 return EINVAL;
1819
1820 /*
1821 * If this is an ARP request and the target IP is IPv4LL,
1822 * send the request on all interfaces. The exception is
1823 * an announcement, which must only appear on the specific
1824 * interface.
1825 */
1826 sender_sin = (const struct sockaddr_in *)sender_proto;
1827 target_sin = (const struct sockaddr_in *)target_proto;
1828 if (target_proto->sa_family == AF_INET
1829 && IN_LINKLOCAL(ntohl(target_sin->sin_addr.s_addr))
1830 && ipv4_ll_arp_aware != 0
1831 && arpop == ARPOP_REQUEST
1832 && !_is_announcement(target_sin, sender_sin)) {
1833 ifnet_t *ifp_list;
1834 u_int32_t count;
1835 u_int32_t ifp_on;
1836
1837 result = ENOTSUP;
1838
1839 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1840 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1841 errno_t new_result;
1842 ifaddr_t source_hw = NULL;
1843 ifaddr_t source_ip = NULL;
1844 struct sockaddr_in source_ip_copy;
1845
1846 /*
1847 * Only arp on interfaces marked for IPv4LL ARPing. This may
1848 * mean that we don't ARP on the interface the subnet route
1849 * points to.
1850 */
1851 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1852 continue;
1853 }
1854
1855 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1856
1857 /* Find the source IP address */
1858 ifnet_lock_shared(ifp_list[ifp_on]);
1859 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1860 ifa_link) {
1861 if (source_ip->ifa_addr &&
1862 source_ip->ifa_addr->sa_family == AF_INET) {
1863 break;
1864 }
1865 }
1866
1867 /* No IP Source, don't arp */
1868 if (source_ip == NULL) {
1869 ifnet_lock_done(ifp_list[ifp_on]);
1870 continue;
1871 }
1872
1873 /* Copy the source IP address */
1874 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1875
1876 ifnet_lock_done(ifp_list[ifp_on]);
1877
1878 /* Send the ARP */
1879 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1880 (struct sockaddr_dl*)source_hw->ifa_addr,
1881 (struct sockaddr*)&source_ip_copy, NULL,
1882 target_proto);
1883
1884 if (result == ENOTSUP) {
1885 result = new_result;
1886 }
1887 }
1888 }
1889
1890 ifnet_list_free(ifp_list);
1891 }
1892 else {
1893 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1894 target_hw, target_proto);
1895 }
1896
1897 return result;
1898 }
1899
1900 __private_extern__ int
1901 ifp_use(
1902 struct ifnet *ifp,
1903 int handle_zero)
1904 {
1905 int old_value;
1906 int retval = 0;
1907
1908 do {
1909 old_value = ifp->if_usecnt;
1910 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1911 retval = ENXIO; // ifp is invalid
1912 break;
1913 }
1914 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1915
1916 return retval;
1917 }
1918
1919 /* ifp_unuse is broken into two pieces.
1920 *
1921 * ifp_use and ifp_unuse must be called between when the caller calls
1922 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1923 * operations after dlil_write_end has been called. For this reason,
1924 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1925 * returns a non-zero value. The caller must call ifp_use_reached_zero
1926 * after the caller has called dlil_write_end.
1927 */
1928 __private_extern__ void
1929 ifp_use_reached_zero(
1930 struct ifnet *ifp)
1931 {
1932 ifnet_detached_func free_func;
1933
1934 dlil_read_begin();
1935
1936 if (ifp->if_usecnt != 0)
1937 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1938
1939 ifnet_head_lock_exclusive();
1940 ifnet_lock_exclusive(ifp);
1941
1942 /* Remove ourselves from the list */
1943 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1944 ifnet_addrs[ifp->if_index - 1] = NULL;
1945
1946 /* ifp should be removed from the interface list */
1947 while (ifp->if_multiaddrs.lh_first) {
1948 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1949
1950 /*
1951 * When the interface is gone, we will no longer
1952 * be listening on these multicasts. Various bits
1953 * of the stack may be referencing these multicasts,
1954 * release only our reference.
1955 */
1956 LIST_REMOVE(ifma, ifma_link);
1957 ifma->ifma_ifp = NULL;
1958 ifma_release(ifma);
1959 }
1960 ifnet_head_done();
1961
1962 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1963 ifnet_lock_done(ifp);
1964
1965 free_func = ifp->if_free;
1966 dlil_read_end();
1967 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, NULL, 0);
1968
1969 if (free_func)
1970 free_func(ifp);
1971 }
1972
1973 __private_extern__ int
1974 ifp_unuse(
1975 struct ifnet *ifp)
1976 {
1977 int oldval;
1978 oldval = OSDecrementAtomic((SInt32*)&ifp->if_usecnt);
1979 if (oldval == 0)
1980 panic("ifp_unuse: ifp(%s%d)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1981
1982 if (oldval > 1)
1983 return 0;
1984
1985 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1986 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1987
1988 return 1; /* caller must call ifp_use_reached_zero */
1989 }
1990
1991 extern lck_mtx_t *domain_proto_mtx;
1992
1993 static errno_t
1994 dlil_attach_protocol_internal(
1995 struct if_proto *proto,
1996 const struct ifnet_demux_desc *demux_list,
1997 u_int32_t demux_count)
1998 {
1999 struct kev_dl_proto_data ev_pr_data;
2000 struct ifnet *ifp = proto->ifp;
2001 int retval = 0;
2002 u_long hash_value = proto_hash_value(proto->protocol_family);
2003
2004 /* setup some of the common values */
2005 {
2006 struct domain *dp;
2007 lck_mtx_lock(domain_proto_mtx);
2008 dp = domains;
2009 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
2010 dp = dp->dom_next;
2011 proto->dl_domain = dp;
2012 lck_mtx_unlock(domain_proto_mtx);
2013 }
2014
2015 /*
2016 * Take the write lock to protect readers and exclude other writers.
2017 */
2018 if ((retval = dlil_write_begin()) != 0) {
2019 printf("dlil_attach_protocol_internal - dlil_write_begin returned %d\n", retval);
2020 return retval;
2021 }
2022
2023 /* Check that the interface isn't currently detaching */
2024 ifnet_lock_shared(ifp);
2025 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2026 ifnet_lock_done(ifp);
2027 dlil_write_end();
2028 return ENXIO;
2029 }
2030 ifnet_lock_done(ifp);
2031
2032 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
2033 dlil_write_end();
2034 return EEXIST;
2035 }
2036
2037 /*
2038 * Call family module add_proto routine so it can refine the
2039 * demux descriptors as it wishes.
2040 */
2041 retval = ifp->if_add_proto(ifp, proto->protocol_family, demux_list, demux_count);
2042 if (retval) {
2043 dlil_write_end();
2044 return retval;
2045 }
2046
2047 /*
2048 * We can't fail from this point on.
2049 * Increment the number of uses (protocol attachments + interface attached).
2050 */
2051 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
2052
2053 /*
2054 * Insert the protocol in the hash
2055 */
2056 {
2057 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
2058 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
2059 prev_proto = SLIST_NEXT(prev_proto, next_hash);
2060 if (prev_proto)
2061 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
2062 else
2063 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
2064 }
2065
2066 /*
2067 * Add to if_proto list for this interface
2068 */
2069 if_proto_ref(proto);
2070 dlil_write_end();
2071
2072 /* the reserved field carries the number of protocol still attached (subject to change) */
2073 ev_pr_data.proto_family = proto->protocol_family;
2074 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
2075 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
2076 (struct net_event_data *)&ev_pr_data,
2077 sizeof(struct kev_dl_proto_data));
2078 #if 0
2079 DLIL_PRINTF("dlil. Attached protocol %d to %s%d - %d\n", proto->protocol_family,
2080 ifp->if_name, ifp->if_unit, retval);
2081 #endif
2082 return retval;
2083 }
2084
2085 errno_t
2086 ifnet_attach_protocol(ifnet_t ifp, protocol_family_t protocol,
2087 const struct ifnet_attach_proto_param *proto_details)
2088 {
2089 int retval = 0;
2090 struct if_proto *ifproto = NULL;
2091
2092 if (ifp == NULL || protocol == 0 || proto_details == NULL)
2093 return EINVAL;
2094
2095 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
2096 if (ifproto == 0) {
2097 DLIL_PRINTF("ERROR - dlil failed if_proto allocation\n");
2098 retval = ENOMEM;
2099 goto end;
2100 }
2101 bzero(ifproto, sizeof(*ifproto));
2102
2103 ifproto->ifp = ifp;
2104 ifproto->protocol_family = protocol;
2105 ifproto->proto_kpi = kProtoKPI_v1;
2106 ifproto->kpi.v1.input = proto_details->input;
2107 ifproto->kpi.v1.pre_output = proto_details->pre_output;
2108 ifproto->kpi.v1.event = proto_details->event;
2109 ifproto->kpi.v1.ioctl = proto_details->ioctl;
2110 ifproto->kpi.v1.detached = proto_details->detached;
2111 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
2112 ifproto->kpi.v1.send_arp = proto_details->send_arp;
2113
2114 retval = dlil_attach_protocol_internal(ifproto,
2115 proto_details->demux_list, proto_details->demux_count);
2116
2117 end:
2118 if (retval && ifproto)
2119 FREE(ifproto, M_IFADDR);
2120 return retval;
2121 }
2122
2123 errno_t
2124 ifnet_attach_protocol_v2(ifnet_t ifp, protocol_family_t protocol,
2125 const struct ifnet_attach_proto_param_v2 *proto_details)
2126 {
2127 int retval = 0;
2128 struct if_proto *ifproto = NULL;
2129
2130 if (ifp == NULL || protocol == 0 || proto_details == NULL)
2131 return EINVAL;
2132
2133 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
2134 if (ifproto == 0) {
2135 DLIL_PRINTF("ERROR - dlil failed if_proto allocation\n");
2136 retval = ENOMEM;
2137 goto end;
2138 }
2139 bzero(ifproto, sizeof(*ifproto));
2140
2141 ifproto->ifp = ifp;
2142 ifproto->protocol_family = protocol;
2143 ifproto->proto_kpi = kProtoKPI_v2;
2144 ifproto->kpi.v2.input = proto_details->input;
2145 ifproto->kpi.v2.pre_output = proto_details->pre_output;
2146 ifproto->kpi.v2.event = proto_details->event;
2147 ifproto->kpi.v2.ioctl = proto_details->ioctl;
2148 ifproto->kpi.v2.detached = proto_details->detached;
2149 ifproto->kpi.v2.resolve_multi = proto_details->resolve;
2150 ifproto->kpi.v2.send_arp = proto_details->send_arp;
2151
2152 retval = dlil_attach_protocol_internal(ifproto,
2153 proto_details->demux_list, proto_details->demux_count);
2154
2155 end:
2156 if (retval && ifproto)
2157 FREE(ifproto, M_IFADDR);
2158 return retval;
2159 }
2160
2161 extern void if_rtproto_del(struct ifnet *ifp, int protocol);
2162
2163 static int
2164 dlil_detach_protocol_internal(
2165 struct if_proto *proto)
2166 {
2167 struct ifnet *ifp = proto->ifp;
2168 u_long proto_family = proto->protocol_family;
2169 struct kev_dl_proto_data ev_pr_data;
2170
2171 if (proto->proto_kpi == kProtoKPI_v1) {
2172 if (proto->kpi.v1.detached)
2173 proto->kpi.v1.detached(ifp, proto->protocol_family);
2174 }
2175 if (proto->proto_kpi == kProtoKPI_v2) {
2176 if (proto->kpi.v2.detached)
2177 proto->kpi.v2.detached(ifp, proto->protocol_family);
2178 }
2179 if_proto_free(proto);
2180
2181 /*
2182 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
2183 */
2184
2185 if_rtproto_del(ifp, proto_family);
2186
2187 /* the reserved field carries the number of protocol still attached (subject to change) */
2188 ev_pr_data.proto_family = proto_family;
2189 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
2190 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
2191 (struct net_event_data *)&ev_pr_data,
2192 sizeof(struct kev_dl_proto_data));
2193 return 0;
2194 }
2195
2196 errno_t
2197 ifnet_detach_protocol(ifnet_t ifp, protocol_family_t proto_family)
2198 {
2199 struct if_proto *proto = NULL;
2200 int retval = 0;
2201 int use_reached_zero = 0;
2202
2203 if (ifp == NULL || proto_family == 0) return EINVAL;
2204
2205 if ((retval = dlil_write_begin()) != 0) {
2206 if (retval == EDEADLK) {
2207 retval = 0;
2208 dlil_read_begin();
2209 proto = find_attached_proto(ifp, proto_family);
2210 if (proto == 0) {
2211 retval = ENXIO;
2212 }
2213 else {
2214 proto->detaching = 1;
2215 dlil_detach_waiting = 1;
2216 wakeup(&dlil_detach_waiting);
2217 }
2218 dlil_read_end();
2219 }
2220 goto end;
2221 }
2222
2223 proto = find_attached_proto(ifp, proto_family);
2224
2225 if (proto == NULL) {
2226 retval = ENXIO;
2227 dlil_write_end();
2228 goto end;
2229 }
2230
2231 /*
2232 * Call family module del_proto
2233 */
2234
2235 if (ifp->if_del_proto)
2236 ifp->if_del_proto(ifp, proto->protocol_family);
2237
2238 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2239
2240 /*
2241 * We can do the rest of the work outside of the write lock.
2242 */
2243 use_reached_zero = ifp_unuse(ifp);
2244 dlil_write_end();
2245
2246 dlil_detach_protocol_internal(proto);
2247
2248 /*
2249 * Only handle the case where the interface will go away after
2250 * we've sent the message. This way post message can send the
2251 * message to the interface safely.
2252 */
2253
2254 if (use_reached_zero)
2255 ifp_use_reached_zero(ifp);
2256
2257 end:
2258 return retval;
2259 }
2260
2261 /*
2262 * dlil_delayed_detach_thread is responsible for detaching
2263 * protocols, protocol filters, and interface filters after
2264 * an attempt was made to detach one of those items while
2265 * it was not safe to do so (i.e. called dlil_read_begin).
2266 *
2267 * This function will take the dlil write lock and walk
2268 * through each of the interfaces looking for items with
2269 * the detaching flag set. When an item is found, it is
2270 * detached from the interface and placed on a local list.
2271 * After all of the items have been collected, we drop the
2272 * write lock and performed the post detach. This is done
2273 * so we only have to take the write lock once.
2274 *
2275 * When detaching a protocol filter, if we find that we
2276 * have detached the very last protocol and we need to call
2277 * ifp_use_reached_zero, we have to break out of our work
2278 * to drop the write lock so we can call ifp_use_reached_zero.
2279 */
2280
2281 static void
2282 dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2283 {
2284 thread_t self = current_thread();
2285 int asserted = 0;
2286
2287 ml_thread_policy(self, MACHINE_GROUP,
2288 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
2289
2290
2291 while (1) {
2292 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2293 struct ifnet *ifp;
2294 struct proto_hash_entry detached_protos;
2295 struct ifnet_filter_head detached_filters;
2296 struct if_proto *proto;
2297 struct if_proto *next_proto;
2298 struct ifnet_filter *filt;
2299 struct ifnet_filter *next_filt;
2300 int reached_zero;
2301
2302 reached_zero = 0;
2303
2304 /* Clear the detach waiting flag */
2305 dlil_detach_waiting = 0;
2306 TAILQ_INIT(&detached_filters);
2307 SLIST_INIT(&detached_protos);
2308
2309 ifnet_head_lock_shared();
2310 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2311 int i;
2312
2313 // Look for protocols and protocol filters
2314 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2315 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2316 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2317
2318 // Detach this protocol
2319 if (proto->detaching) {
2320 if (ifp->if_del_proto)
2321 ifp->if_del_proto(ifp, proto->protocol_family);
2322 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2323 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2324 reached_zero = ifp_unuse(ifp);
2325 if (reached_zero) {
2326 break;
2327 }
2328 }
2329 else {
2330 // Update prev_nextptr to point to our next ptr
2331 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2332 }
2333 }
2334 }
2335
2336 // look for interface filters that need to be detached
2337 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2338 next_filt = TAILQ_NEXT(filt, filt_next);
2339 if (filt->filt_detaching != 0) {
2340 // take this interface filter off the interface filter list
2341 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2342
2343 // put this interface filter on the detached filters list
2344 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2345 }
2346 }
2347
2348 if (ifp->if_delayed_detach) {
2349 ifp->if_delayed_detach = 0;
2350 reached_zero = ifp_unuse(ifp);
2351 }
2352
2353 if (reached_zero)
2354 break;
2355 }
2356 ifnet_head_done();
2357 dlil_write_end();
2358
2359 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2360 next_filt = TAILQ_NEXT(filt, filt_next);
2361 /*
2362 * dlil_detach_filter_internal won't remove an item from
2363 * the list if it is already detached (second parameter).
2364 * The item will be freed though.
2365 */
2366 dlil_detach_filter_internal(filt, 1);
2367 }
2368
2369 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2370 next_proto = SLIST_NEXT(proto, next_hash);
2371 dlil_detach_protocol_internal(proto);
2372 }
2373
2374 if (reached_zero) {
2375 ifp_use_reached_zero(ifp);
2376 dlil_detach_waiting = 1; // we may have missed something
2377 }
2378 }
2379
2380 if (!asserted && dlil_detach_waiting == 0) {
2381 asserted = 1;
2382 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2383 }
2384
2385 if (dlil_detach_waiting == 0) {
2386 asserted = 0;
2387 thread_block(dlil_delayed_detach_thread);
2388 }
2389 }
2390 }
2391
2392 static void
2393 dlil_call_delayed_detach_thread(void) {
2394 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2395 }
2396
2397 extern int if_next_index(void);
2398
2399 errno_t
2400 ifnet_attach(
2401 ifnet_t ifp,
2402 const struct sockaddr_dl *ll_addr)
2403 {
2404 u_long interface_family;
2405 struct ifnet *tmp_if;
2406 struct proto_hash_entry *new_proto_list = NULL;
2407 int locked = 0;
2408
2409 if (ifp == NULL) return EINVAL;
2410 if (ll_addr && ifp->if_addrlen == 0) {
2411 ifp->if_addrlen = ll_addr->sdl_alen;
2412 }
2413 else if (ll_addr && ll_addr->sdl_alen != ifp->if_addrlen) {
2414 return EINVAL;
2415 }
2416
2417 interface_family = ifp->if_family;
2418
2419 ifnet_head_lock_shared();
2420
2421 /* Verify we aren't already on the list */
2422 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2423 if (tmp_if == ifp) {
2424 ifnet_head_done();
2425 return EEXIST;
2426 }
2427 }
2428
2429 ifnet_head_done();
2430
2431 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2432 #if IFNET_RW_LOCK
2433 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2434 #else
2435 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2436 #endif
2437
2438 if (ifp->if_lock == 0) {
2439 return ENOMEM;
2440 }
2441
2442 /*
2443 * Allow interfaces withouth protocol families to attach
2444 * only if they have the necessary fields filled out.
2445 */
2446
2447 if (ifp->if_add_proto == 0 || ifp->if_del_proto == 0) {
2448 DLIL_PRINTF("dlil Attempt to attach interface without family module - %ld\n",
2449 interface_family);
2450 return ENODEV;
2451 }
2452
2453 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2454 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2455 M_NKE, M_WAITOK);
2456
2457 if (new_proto_list == 0) {
2458 return ENOBUFS;
2459 }
2460 }
2461
2462 dlil_write_begin();
2463 locked = 1;
2464
2465 TAILQ_INIT(&ifp->if_flt_head);
2466
2467
2468 if (new_proto_list) {
2469 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2470 ifp->if_proto_hash = new_proto_list;
2471 new_proto_list = NULL;
2472 }
2473
2474 /* old_if_attach */
2475 {
2476 char workbuf[64];
2477 int namelen, masklen, socksize, ifasize;
2478 struct ifaddr *ifa = NULL;
2479
2480 if (ifp->if_snd.ifq_maxlen == 0)
2481 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2482 TAILQ_INIT(&ifp->if_prefixhead);
2483 LIST_INIT(&ifp->if_multiaddrs);
2484 ifnet_touch_lastchange(ifp);
2485
2486 /* usecount to track attachment to the ifnet list */
2487 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2488
2489 /* Lock the list of interfaces */
2490 ifnet_head_lock_exclusive();
2491 ifnet_lock_exclusive(ifp);
2492
2493 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0)
2494 ifp->if_index = if_next_index();
2495 else
2496 ifa = TAILQ_FIRST(&ifp->if_addrhead);
2497
2498 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2499 #define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2500 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2501 socksize = masklen + ifp->if_addrlen;
2502 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2503 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2504 socksize = sizeof(struct sockaddr_dl);
2505 socksize = ROUNDUP(socksize);
2506 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2507
2508 /*
2509 * Allocate a new ifa if we don't have one
2510 * or the old one is too small.
2511 */
2512 if (ifa == NULL || socksize > ifa->ifa_addr->sa_len) {
2513 if (ifa)
2514 if_detach_ifa(ifp, ifa);
2515 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2516 }
2517
2518 if (ifa) {
2519 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2520 ifnet_addrs[ifp->if_index - 1] = ifa;
2521 bzero(ifa, ifasize);
2522 sdl->sdl_len = socksize;
2523 sdl->sdl_family = AF_LINK;
2524 bcopy(workbuf, sdl->sdl_data, namelen);
2525 sdl->sdl_nlen = namelen;
2526 sdl->sdl_index = ifp->if_index;
2527 sdl->sdl_type = ifp->if_type;
2528 if (ll_addr) {
2529 sdl->sdl_alen = ll_addr->sdl_alen;
2530 if (ll_addr->sdl_alen != ifp->if_addrlen)
2531 panic("ifnet_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2532 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2533 }
2534 ifa->ifa_ifp = ifp;
2535 ifa->ifa_rtrequest = link_rtrequest;
2536 ifa->ifa_addr = (struct sockaddr*)sdl;
2537 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2538 ifa->ifa_netmask = (struct sockaddr*)sdl;
2539 sdl->sdl_len = masklen;
2540 while (namelen != 0)
2541 sdl->sdl_data[--namelen] = 0xff;
2542 }
2543
2544 TAILQ_INIT(&ifp->if_addrhead);
2545 ifa = ifnet_addrs[ifp->if_index - 1];
2546
2547 if (ifa) {
2548 /*
2549 * We don't use if_attach_ifa because we want
2550 * this address to be first on the list.
2551 */
2552 ifaref(ifa);
2553 ifa->ifa_debug |= IFA_ATTACHED;
2554 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
2555 }
2556 #if CONFIG_MACF_NET
2557 mac_ifnet_label_associate(ifp);
2558 #endif
2559
2560 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2561 ifindex2ifnet[ifp->if_index] = ifp;
2562
2563 ifnet_head_done();
2564 }
2565
2566 /*
2567 * A specific dlil input thread is created per Ethernet interface.
2568 * pseudo interfaces or other types of interfaces use the main ("loopback") thread.
2569 * If the sysctl "net.link.generic.system.multi_threaded_input" is set to zero, all packets will
2570 * be handled by the main loopback thread, reverting to 10.4.x behaviour.
2571 *
2572 */
2573
2574 if (ifp->if_type == IFT_ETHER) {
2575 int err;
2576
2577 if (dlil_multithreaded_input > 0) {
2578 ifp->if_input_thread = _MALLOC(sizeof(struct dlil_threading_info), M_NKE, M_WAITOK);
2579 if (ifp->if_input_thread == NULL)
2580 panic("ifnet_attach ifp=%p couldn't alloc threading\n", ifp);
2581 if ((err = dlil_create_input_thread(ifp, ifp->if_input_thread)) != 0)
2582 panic("ifnet_attach ifp=%p couldn't get a thread. err=%x\n", ifp, err);
2583 #ifdef DLIL_DEBUG
2584 printf("ifnet_attach: dlil thread for ifp=%p if_index=%x\n", ifp, ifp->if_index);
2585 #endif
2586 }
2587 }
2588 dlil_write_end();
2589 ifnet_lock_done(ifp);
2590
2591 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, NULL, 0);
2592
2593 return 0;
2594 }
2595
2596 errno_t
2597 ifnet_detach(
2598 ifnet_t ifp)
2599 {
2600 struct ifnet_filter *filter;
2601 struct ifnet_filter *filter_next;
2602 int zeroed = 0;
2603 int retval = 0;
2604 struct ifnet_filter_head fhead;
2605 struct dlil_threading_info *inputthread;
2606
2607 if (ifp == NULL) return EINVAL;
2608
2609 ifnet_lock_exclusive(ifp);
2610
2611 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2612 /* Interface has already been detached */
2613 ifnet_lock_done(ifp);
2614 return ENXIO;
2615 }
2616
2617 /*
2618 * Indicate this interface is being detached.
2619 *
2620 * This should prevent protocols from attaching
2621 * from this point on. Interface will remain on
2622 * the list until all of the protocols are detached.
2623 */
2624 ifp->if_eflags |= IFEF_DETACHING;
2625 ifnet_lock_done(ifp);
2626
2627 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, NULL, 0);
2628
2629 /* Let BPF know we're detaching */
2630 bpfdetach(ifp);
2631
2632 if ((retval = dlil_write_begin()) != 0) {
2633 if (retval == EDEADLK) {
2634 retval = 0;
2635
2636 /* We need to perform a delayed detach */
2637 ifp->if_delayed_detach = 1;
2638 dlil_detach_waiting = 1;
2639 wakeup(&dlil_detach_waiting);
2640 }
2641 return retval;
2642 }
2643
2644 /* Steal the list of interface filters */
2645 fhead = ifp->if_flt_head;
2646 TAILQ_INIT(&ifp->if_flt_head);
2647
2648 /* unuse the interface */
2649 zeroed = ifp_unuse(ifp);
2650
2651 /*
2652 * If thread affinity was set for the workloop thread, we will need
2653 * to tear down the affinity and release the extra reference count
2654 * taken at attach time;
2655 */
2656 if ((inputthread = ifp->if_input_thread) != NULL) {
2657 if (inputthread->net_affinity) {
2658 struct thread *tp;
2659
2660 if (inputthread == dlil_lo_thread_ptr)
2661 panic("Thread affinity should not be enabled "
2662 "on the loopback dlil input thread\n");
2663
2664 lck_mtx_lock(inputthread->input_lck);
2665 tp = inputthread->workloop_thread;
2666 inputthread->workloop_thread = NULL;
2667 inputthread->tag = 0;
2668 inputthread->net_affinity = FALSE;
2669 lck_mtx_unlock(inputthread->input_lck);
2670
2671 /* Tear down workloop thread affinity */
2672 if (tp != NULL) {
2673 (void) dlil_affinity_set(tp,
2674 THREAD_AFFINITY_TAG_NULL);
2675 thread_deallocate(tp);
2676 }
2677
2678 /* Tear down dlil input thread affinity */
2679 tp = inputthread->input_thread;
2680 (void) dlil_affinity_set(tp, THREAD_AFFINITY_TAG_NULL);
2681 thread_deallocate(tp);
2682 }
2683
2684 /* cleanup ifp dlil input thread, if any */
2685 ifp->if_input_thread = NULL;
2686
2687 if (inputthread != dlil_lo_thread_ptr) {
2688 #ifdef DLIL_DEBUG
2689 printf("ifnet_detach: wakeup thread threadinfo: %p "
2690 "input_thread=%p threads: cur=%d max=%d\n",
2691 inputthread, inputthread->input_thread,
2692 dlil_multithreaded_input, cur_dlil_input_threads);
2693 #endif
2694 lck_mtx_lock(inputthread->input_lck);
2695
2696 inputthread->input_waiting |= DLIL_INPUT_TERMINATE;
2697 if ((inputthread->input_waiting & DLIL_INPUT_RUNNING) == 0) {
2698 wakeup((caddr_t)&inputthread->input_waiting);
2699 }
2700 lck_mtx_unlock(inputthread->input_lck);
2701 }
2702 }
2703 dlil_write_end();
2704
2705 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2706 filter_next = TAILQ_NEXT(filter, filt_next);
2707 dlil_detach_filter_internal(filter, 1);
2708 }
2709
2710 if (zeroed != 0) {
2711 ifp_use_reached_zero(ifp);
2712 }
2713
2714 return retval;
2715 }
2716
2717 static errno_t
2718 dlil_recycle_ioctl(
2719 __unused ifnet_t ifnet_ptr,
2720 __unused u_int32_t ioctl_code,
2721 __unused void *ioctl_arg)
2722 {
2723 return EOPNOTSUPP;
2724 }
2725
2726 static int
2727 dlil_recycle_output(
2728 __unused struct ifnet *ifnet_ptr,
2729 struct mbuf *m)
2730 {
2731 m_freem(m);
2732 return 0;
2733 }
2734
2735 static void
2736 dlil_recycle_free(
2737 __unused ifnet_t ifnet_ptr)
2738 {
2739 }
2740
2741 static errno_t
2742 dlil_recycle_set_bpf_tap(
2743 __unused ifnet_t ifp,
2744 __unused bpf_tap_mode mode,
2745 __unused bpf_packet_func callback)
2746 {
2747 /* XXX not sure what to do here */
2748 return 0;
2749 }
2750
2751 __private_extern__
2752 int dlil_if_acquire(
2753 u_long family,
2754 const void *uniqueid,
2755 size_t uniqueid_len,
2756 struct ifnet **ifp)
2757 {
2758 struct ifnet *ifp1 = NULL;
2759 struct dlil_ifnet *dlifp1 = NULL;
2760 int ret = 0;
2761
2762 lck_mtx_lock(dlil_ifnet_mutex);
2763 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2764
2765 ifp1 = (struct ifnet *)dlifp1;
2766
2767 if (ifp1->if_family == family) {
2768
2769 /* same uniqueid and same len or no unique id specified */
2770 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2771 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2772
2773 /* check for matching interface in use */
2774 if (ifp1->if_eflags & IFEF_INUSE) {
2775 if (uniqueid_len) {
2776 ret = EBUSY;
2777 goto end;
2778 }
2779 }
2780 else {
2781 if (!ifp1->if_lock)
2782 panic("ifp's lock is gone\n");
2783 ifnet_lock_exclusive(ifp1);
2784 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2785 ifnet_lock_done(ifp1);
2786 *ifp = ifp1;
2787 goto end;
2788 }
2789 }
2790 }
2791 }
2792
2793 /* no interface found, allocate a new one */
2794 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2795 if (dlifp1 == 0) {
2796 ret = ENOMEM;
2797 goto end;
2798 }
2799
2800 bzero(dlifp1, sizeof(*dlifp1));
2801
2802 if (uniqueid_len) {
2803 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2804 if (dlifp1->if_uniqueid == 0) {
2805 FREE(dlifp1, M_NKE);
2806 ret = ENOMEM;
2807 goto end;
2808 }
2809 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2810 dlifp1->if_uniqueid_len = uniqueid_len;
2811 }
2812
2813 ifp1 = (struct ifnet *)dlifp1;
2814 ifp1->if_eflags |= IFEF_INUSE;
2815 ifp1->if_name = dlifp1->if_namestorage;
2816 #if CONFIG_MACF_NET
2817 mac_ifnet_label_init(ifp1);
2818 #endif
2819
2820 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2821
2822 *ifp = ifp1;
2823
2824 end:
2825 lck_mtx_unlock(dlil_ifnet_mutex);
2826
2827 return ret;
2828 }
2829
2830 __private_extern__ void
2831 dlil_if_release(
2832 ifnet_t ifp)
2833 {
2834 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2835
2836 /* Interface does not have a lock until it is attached - radar 3713951 */
2837 if (ifp->if_lock)
2838 ifnet_lock_exclusive(ifp);
2839 ifp->if_eflags &= ~IFEF_INUSE;
2840 ifp->if_ioctl = dlil_recycle_ioctl;
2841 ifp->if_output = dlil_recycle_output;
2842 ifp->if_free = dlil_recycle_free;
2843 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2844
2845 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2846 ifp->if_name = dlifp->if_namestorage;
2847 #if CONFIG_MACF_NET
2848 /*
2849 * We can either recycle the MAC label here or in dlil_if_acquire().
2850 * It seems logical to do it here but this means that anything that
2851 * still has a handle on ifp will now see it as unlabeled.
2852 * Since the interface is "dead" that may be OK. Revisit later.
2853 */
2854 mac_ifnet_label_recycle(ifp);
2855 #endif
2856 if (ifp->if_lock)
2857 ifnet_lock_done(ifp);
2858
2859 }
2860
2861 __private_extern__ void
2862 dlil_proto_unplumb_all(struct ifnet *ifp)
2863 {
2864 /*
2865 * if_proto_hash[0-3] are for PF_INET, PF_INET6, PF_APPLETALK
2866 * and PF_VLAN, where each bucket contains exactly one entry;
2867 * PF_VLAN does not need an explicit unplumb.
2868 *
2869 * if_proto_hash[4] is for other protocols; we expect anything
2870 * in this bucket to respond to the DETACHING event (which would
2871 * have happened by now) and do the unplumb then.
2872 */
2873 (void) proto_unplumb(PF_INET, ifp);
2874 #if INET6
2875 (void) proto_unplumb(PF_INET6, ifp);
2876 #endif /* INET6 */
2877 #if NETAT
2878 (void) proto_unplumb(PF_APPLETALK, ifp);
2879 #endif /* NETAT */
2880 }