]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/dlil.c
xnu-1504.7.4.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
CommitLineData
1c79356b 1/*
b7266188 2 * Copyright (c) 1999-2009 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
1c79356b
A
29 * Data Link Inteface Layer
30 * Author: Ted Walker
31 */
2d21ac55
A
32/*
33 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
34 * support for mandatory and extensible security protections. This notice
35 * is included in support of clause 2.2 (b) of the Apple Public License,
36 * Version 2.0.
37 */
1c79356b 38
1c79356b
A
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/socket.h>
91447636
A
45#include <sys/domain.h>
46#include <sys/user.h>
2d21ac55 47#include <sys/random.h>
1c79356b
A
48#include <net/if_dl.h>
49#include <net/if.h>
91447636 50#include <net/route.h>
1c79356b
A
51#include <net/if_var.h>
52#include <net/dlil.h>
91447636 53#include <net/if_arp.h>
1c79356b
A
54#include <sys/kern_event.h>
55#include <sys/kdebug.h>
1c79356b 56
91447636 57#include <kern/assert.h>
1c79356b 58#include <kern/task.h>
9bccf70c
A
59#include <kern/thread.h>
60#include <kern/sched_prim.h>
91447636 61#include <kern/locks.h>
2d21ac55 62#include <net/kpi_protocol.h>
9bccf70c 63
1c79356b 64#include <net/if_types.h>
91447636
A
65#include <net/kpi_interfacefilter.h>
66
67#include <libkern/OSAtomic.h>
1c79356b 68
d52fe63f 69#include <machine/machine_routines.h>
1c79356b 70
2d21ac55
A
71#include <mach/thread_act.h>
72
73#if CONFIG_MACF_NET
74#include <security/mac_framework.h>
75#endif /* MAC_NET */
76
b0d623f7
A
77#if PF
78#include <net/pfvar.h>
79#endif /* PF */
80
2d21ac55
A
81#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
82#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
1c79356b
A
83#define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
84#define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
85#define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
86
87
1c79356b
A
88#define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
89#define MAX_LINKADDR 4 /* LONGWORDS */
90#define M_NKE M_IFADDR
91
2d21ac55 92#if 1
91447636
A
93#define DLIL_PRINTF printf
94#else
95#define DLIL_PRINTF kprintf
96#endif
97
d1ecb069
A
98#define atomic_add_32(a, n) \
99 ((void) OSAddAtomic(n, (volatile SInt32 *)a))
100
101#if PKT_PRIORITY
102#define _CASSERT(x) \
103 switch (0) { case 0: case (x): ; }
104
105#define IF_DATA_REQUIRE_ALIGNED_32(f) \
106 _CASSERT(!(offsetof(struct if_data_internal, f) % sizeof (u_int32_t)))
107
108#define IFNET_IF_DATA_REQUIRE_ALIGNED_32(f) \
109 _CASSERT(!(offsetof(struct ifnet, if_data.f) % sizeof (u_int32_t)))
110#endif /* PKT_PRIORITY */
2d21ac55 111
91447636 112enum {
2d21ac55
A
113 kProtoKPI_v1 = 1,
114 kProtoKPI_v2 = 2
91447636
A
115};
116
117struct if_proto {
118 SLIST_ENTRY(if_proto) next_hash;
119 int refcount;
120 int detaching;
121 struct ifnet *ifp;
122 struct domain *dl_domain;
123 protocol_family_t protocol_family;
124 int proto_kpi;
125 union {
91447636
A
126 struct {
127 proto_media_input input;
128 proto_media_preout pre_output;
129 proto_media_event event;
130 proto_media_ioctl ioctl;
131 proto_media_detached detached;
132 proto_media_resolve_multi resolve_multi;
133 proto_media_send_arp send_arp;
134 } v1;
2d21ac55
A
135 struct {
136 proto_media_input_v2 input;
137 proto_media_preout pre_output;
138 proto_media_event event;
139 proto_media_ioctl ioctl;
140 proto_media_detached detached;
141 proto_media_resolve_multi resolve_multi;
142 proto_media_send_arp send_arp;
143 } v2;
91447636 144 } kpi;
1c79356b
A
145};
146
91447636
A
147SLIST_HEAD(proto_hash_entry, if_proto);
148
1c79356b 149
9bccf70c
A
150struct dlil_ifnet {
151 /* ifnet and drvr_ext are used by the stack and drivers
152 drvr_ext extends the public ifnet and must follow dl_if */
153 struct ifnet dl_if; /* public ifnet */
9bccf70c
A
154
155 /* dlil private fields */
156 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
157 /* it is not the ifnet list */
158 void *if_uniqueid; /* unique id identifying the interface */
159 size_t if_uniqueid_len;/* length of the unique id */
91447636 160 char if_namestorage[IFNAMSIZ]; /* interface name storage */
1c79356b
A
161};
162
91447636
A
163struct ifnet_filter {
164 TAILQ_ENTRY(ifnet_filter) filt_next;
165 ifnet_t filt_ifp;
166 int filt_detaching;
167
168 const char *filt_name;
169 void *filt_cookie;
170 protocol_family_t filt_protocol;
171 iff_input_func filt_input;
172 iff_output_func filt_output;
173 iff_event_func filt_event;
174 iff_ioctl_func filt_ioctl;
175 iff_detached_func filt_detached;
1c79356b
A
176};
177
2d21ac55 178struct proto_input_entry;
55e303ae 179
91447636 180static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
91447636
A
181static lck_grp_t *dlil_lock_group;
182static lck_grp_t *ifnet_lock_group;
183static lck_grp_t *ifnet_head_lock_group;
184static lck_attr_t *ifnet_lock_attr;
91447636
A
185static lck_rw_t *ifnet_head_mutex;
186static lck_mtx_t *dlil_ifnet_mutex;
187static lck_mtx_t *dlil_mutex;
b0d623f7
A
188static u_int32_t dlil_read_count = 0;
189static u_int32_t dlil_detach_waiting = 0;
190u_int32_t dlil_filter_count = 0;
91447636 191extern u_int32_t ipv4_ll_arp_aware;
1c79356b 192
d1ecb069
A
193#if IFNET_ROUTE_REFCNT
194/*
195 * Updating this variable should be done by first acquiring the global
196 * radix node head (rnh_lock), in tandem with settting/clearing the
197 * PR_AGGDRAIN for routedomain.
198 */
199u_int32_t ifnet_aggressive_drainers;
200static u_int32_t net_rtref;
201#endif /* IFNET_ROUTE_REFCNT */
202
2d21ac55
A
203static struct dlil_threading_info dlil_lo_thread;
204__private_extern__ struct dlil_threading_info *dlil_lo_thread_ptr = &dlil_lo_thread;
205
206static struct mbuf *dlil_lo_input_mbuf_head = NULL;
207static struct mbuf *dlil_lo_input_mbuf_tail = NULL;
208
209#if IFNET_INPUT_SANITY_CHK
210static int dlil_lo_input_mbuf_count = 0;
211int dlil_input_sanity_check = 0; /* sanity checking of input packet lists received */
9bccf70c 212#endif
2d21ac55
A
213int dlil_multithreaded_input = 1;
214static int cur_dlil_input_threads = 0;
1c79356b 215
91447636 216static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
91447636
A
217static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
218static void dlil_call_delayed_detach_thread(void);
219
220static void dlil_read_begin(void);
2d21ac55 221static __inline__ void dlil_read_end(void);
91447636
A
222static int dlil_write_begin(void);
223static void dlil_write_end(void);
224
b0d623f7
A
225#if DEBUG
226__private_extern__ int dlil_verbose = 1;
227#else
228__private_extern__ int dlil_verbose = 0;
229#endif /* DEBUG */
230
2d21ac55
A
231unsigned int net_affinity = 1;
232static kern_return_t dlil_affinity_set(struct thread *, u_int32_t);
91447636 233
9bccf70c 234extern void bpfdetach(struct ifnet*);
91447636
A
235extern void proto_input_run(void); // new run_netisr
236
2d21ac55
A
237void dlil_input_packet_list(struct ifnet *ifp, struct mbuf *m);
238static void dlil_input_thread_func(struct dlil_threading_info *inpthread);
239__private_extern__ int dlil_create_input_thread(
240 ifnet_t, struct dlil_threading_info *);
241__private_extern__ void dlil_terminate_input_thread(
242 struct dlil_threading_info *);
91447636
A
243
244__private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
1c79356b 245
55e303ae 246int dlil_expand_mcl;
1c79356b 247
b36670ce
A
248extern u_int32_t inject_buckets;
249
91447636 250static const u_int32_t dlil_writer_waiting = 0x80000000;
2d21ac55
A
251static lck_grp_attr_t *dlil_grp_attributes = NULL;
252static lck_attr_t *dlil_lck_attributes = NULL;
253static lck_grp_t *dlil_input_lock_grp = NULL;
91447636 254
2d21ac55 255static inline void*
91447636
A
256_cast_non_const(const void * ptr) {
257 union {
258 const void* cval;
259 void* val;
260 } ret;
261
262 ret.cval = ptr;
263 return (ret.val);
264}
265
266/* Should these be inline? */
267static void
268dlil_read_begin(void)
269{
b0d623f7
A
270 u_int32_t new_value;
271 u_int32_t old_value;
91447636
A
272 struct uthread *uth = get_bsdthread_info(current_thread());
273
274 if (uth->dlil_incremented_read == dlil_writer_waiting)
275 panic("dlil_read_begin - thread is already a writer");
276
277 do {
278again:
279 old_value = dlil_read_count;
280
281 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
282 {
283 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
284 goto again;
285 }
286
287 new_value = old_value + 1;
288 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
289
290 uth->dlil_incremented_read++;
291}
292
293static void
294dlil_read_end(void)
295{
296 struct uthread *uth = get_bsdthread_info(current_thread());
297
b0d623f7 298 OSDecrementAtomic(&dlil_read_count);
91447636
A
299 uth->dlil_incremented_read--;
300 if (dlil_read_count == dlil_writer_waiting)
301 wakeup(_cast_non_const(&dlil_writer_waiting));
302}
303
304static int
305dlil_write_begin(void)
306{
307 struct uthread *uth = get_bsdthread_info(current_thread());
308
309 if (uth->dlil_incremented_read != 0) {
310 return EDEADLK;
311 }
312 lck_mtx_lock(dlil_mutex);
b0d623f7 313 OSBitOrAtomic((UInt32)dlil_writer_waiting, &dlil_read_count);
91447636
A
314again:
315 if (dlil_read_count == dlil_writer_waiting) {
316 uth->dlil_incremented_read = dlil_writer_waiting;
317 return 0;
318 }
319 else {
320 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
321 goto again;
322 }
323}
324
325static void
326dlil_write_end(void)
327{
328 struct uthread *uth = get_bsdthread_info(current_thread());
329
330 if (uth->dlil_incremented_read != dlil_writer_waiting)
331 panic("dlil_write_end - thread is not a writer");
b0d623f7 332 OSBitAndAtomic((UInt32)~dlil_writer_waiting, &dlil_read_count);
91447636
A
333 lck_mtx_unlock(dlil_mutex);
334 uth->dlil_incremented_read = 0;
335 wakeup(&dlil_read_count);
336}
337
338#define PROTO_HASH_SLOTS 0x5
339
1c79356b
A
340/*
341 * Internal functions.
342 */
343
91447636 344static int
b0d623f7 345proto_hash_value(u_int32_t protocol_family)
91447636 346{
4a3eedf9
A
347 /*
348 * dlil_proto_unplumb_all() depends on the mapping between
349 * the hash bucket index and the protocol family defined
350 * here; future changes must be applied there as well.
351 */
91447636
A
352 switch(protocol_family) {
353 case PF_INET:
354 return 0;
355 case PF_INET6:
356 return 1;
357 case PF_APPLETALK:
358 return 2;
359 case PF_VLAN:
360 return 3;
361 default:
362 return 4;
363 }
364}
365
91447636 366static struct if_proto*
b0d623f7 367find_attached_proto(struct ifnet *ifp, u_int32_t protocol_family)
1c79356b 368{
91447636 369 struct if_proto *proto = NULL;
b0d623f7 370 u_int32_t i = proto_hash_value(protocol_family);
91447636
A
371 if (ifp->if_proto_hash) {
372 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
373 }
374
375 while(proto && proto->protocol_family != protocol_family) {
376 proto = SLIST_NEXT(proto, next_hash);
377 }
378
379 return proto;
1c79356b
A
380}
381
91447636
A
382static void
383if_proto_ref(struct if_proto *proto)
1c79356b 384{
b0d623f7 385 OSAddAtomic(1, &proto->refcount);
1c79356b
A
386}
387
91447636
A
388static void
389if_proto_free(struct if_proto *proto)
0b4e3aa0 390{
b0d623f7 391 int oldval = OSAddAtomic(-1, &proto->refcount);
91447636
A
392
393 if (oldval == 1) { /* This was the last reference */
394 FREE(proto, M_IFADDR);
395 }
0b4e3aa0
A
396}
397
91447636
A
398__private_extern__ void
399ifnet_lock_assert(
400 __unused struct ifnet *ifp,
401 __unused int what)
1c79356b 402{
91447636
A
403#if IFNET_RW_LOCK
404 /*
405 * Not implemented for rw locks.
406 *
407 * Function exists so when/if we use mutex we can
408 * enable this check.
409 */
410#else
411 lck_mtx_assert(ifp->if_lock, what);
412#endif
1c79356b
A
413}
414
91447636
A
415__private_extern__ void
416ifnet_lock_shared(
417 struct ifnet *ifp)
1c79356b 418{
91447636
A
419#if IFNET_RW_LOCK
420 lck_rw_lock_shared(ifp->if_lock);
421#else
422 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
423 lck_mtx_lock(ifp->if_lock);
424#endif
1c79356b
A
425}
426
91447636
A
427__private_extern__ void
428ifnet_lock_exclusive(
429 struct ifnet *ifp)
0b4e3aa0 430{
91447636
A
431#if IFNET_RW_LOCK
432 lck_rw_lock_exclusive(ifp->if_lock);
433#else
434 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
435 lck_mtx_lock(ifp->if_lock);
436#endif
0b4e3aa0
A
437}
438
91447636
A
439__private_extern__ void
440ifnet_lock_done(
441 struct ifnet *ifp)
1c79356b 442{
91447636
A
443#if IFNET_RW_LOCK
444 lck_rw_done(ifp->if_lock);
445#else
446 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
447 lck_mtx_unlock(ifp->if_lock);
448#endif
1c79356b
A
449}
450
91447636 451__private_extern__ void
2d21ac55 452ifnet_head_lock_shared(void)
1c79356b 453{
91447636 454 lck_rw_lock_shared(ifnet_head_mutex);
1c79356b
A
455}
456
91447636 457__private_extern__ void
2d21ac55 458ifnet_head_lock_exclusive(void)
91447636
A
459{
460 lck_rw_lock_exclusive(ifnet_head_mutex);
461}
1c79356b 462
91447636 463__private_extern__ void
2d21ac55 464ifnet_head_done(void)
1c79356b 465{
91447636
A
466 lck_rw_done(ifnet_head_mutex);
467}
1c79356b 468
91447636
A
469static int dlil_ifp_proto_count(struct ifnet * ifp)
470{
471 int count = 0;
472 int i;
473
474 if (ifp->if_proto_hash != NULL) {
475 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
476 struct if_proto *proto;
477 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
478 count++;
479 }
480 }
481 }
482
483 return count;
484}
1c79356b 485
91447636 486__private_extern__ void
b0d623f7
A
487dlil_post_msg(struct ifnet *ifp, u_int32_t event_subclass, u_int32_t event_code,
488 struct net_event_data *event_data, u_int32_t event_data_len)
91447636
A
489{
490 struct net_event_data ev_data;
491 struct kev_msg ev_msg;
492
493 /*
2d21ac55 494 * a net event always starts with a net_event_data structure
91447636
A
495 * but the caller can generate a simple net event or
496 * provide a longer event structure to post
497 */
498
499 ev_msg.vendor_code = KEV_VENDOR_APPLE;
500 ev_msg.kev_class = KEV_NETWORK_CLASS;
501 ev_msg.kev_subclass = event_subclass;
502 ev_msg.event_code = event_code;
503
504 if (event_data == 0) {
505 event_data = &ev_data;
506 event_data_len = sizeof(struct net_event_data);
507 }
508
509 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
510 event_data->if_family = ifp->if_family;
b0d623f7 511 event_data->if_unit = (u_int32_t) ifp->if_unit;
91447636
A
512
513 ev_msg.dv[0].data_length = event_data_len;
514 ev_msg.dv[0].data_ptr = event_data;
515 ev_msg.dv[1].data_length = 0;
516
517 dlil_event_internal(ifp, &ev_msg);
1c79356b
A
518}
519
2d21ac55
A
520__private_extern__ int
521dlil_create_input_thread(
522 ifnet_t ifp, struct dlil_threading_info *inputthread)
523{
524 int error;
525
526 bzero(inputthread, sizeof(*inputthread));
527 // loopback ifp may not be configured at dlil_init time.
528 if (ifp == lo_ifp)
529 strlcat(inputthread->input_name, "dlil_input_main_thread_mtx", 32);
530 else
531 snprintf(inputthread->input_name, 32, "dlil_input_%s%d_mtx", ifp->if_name, ifp->if_unit);
532
533 inputthread->lck_grp = lck_grp_alloc_init(inputthread->input_name, dlil_grp_attributes);
534 inputthread->input_lck = lck_mtx_alloc_init(inputthread->lck_grp, dlil_lck_attributes);
535
536 error= kernel_thread_start((thread_continue_t)dlil_input_thread_func, inputthread, &inputthread->input_thread);
537 if (error == 0) {
538 ml_thread_policy(inputthread->input_thread, MACHINE_GROUP,
539 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
540 /*
541 * Except for the loopback dlil input thread, we create
542 * an affinity set so that the matching workloop thread
543 * can be scheduled on the same processor set.
544 */
545 if (net_affinity && inputthread != dlil_lo_thread_ptr) {
546 struct thread *tp = inputthread->input_thread;
547 u_int32_t tag;
548 /*
549 * Randomize to reduce the probability
550 * of affinity tag namespace collision.
551 */
552 read_random(&tag, sizeof (tag));
553 if (dlil_affinity_set(tp, tag) == KERN_SUCCESS) {
554 thread_reference(tp);
555 inputthread->tag = tag;
556 inputthread->net_affinity = TRUE;
557 }
558 }
559 } else {
560 panic("dlil_create_input_thread: couldn't create thread\n");
561 }
b0d623f7 562 OSAddAtomic(1, &cur_dlil_input_threads);
2d21ac55
A
563#if DLIL_DEBUG
564 printf("dlil_create_input_thread: threadinfo: %p input_thread=%p threads: cur=%d max=%d\n",
565 inputthread, inputthread->input_thread, dlil_multithreaded_input, cur_dlil_input_threads);
566#endif
567 return error;
568}
569__private_extern__ void
570dlil_terminate_input_thread(
571 struct dlil_threading_info *inputthread)
572{
b0d623f7 573 OSAddAtomic(-1, &cur_dlil_input_threads);
2d21ac55
A
574
575 lck_mtx_unlock(inputthread->input_lck);
576 lck_mtx_free(inputthread->input_lck, inputthread->lck_grp);
577 lck_grp_free(inputthread->lck_grp);
578
579 FREE(inputthread, M_NKE);
580
581 /* For the extra reference count from kernel_thread_start() */
582 thread_deallocate(current_thread());
583
584 thread_terminate(current_thread());
585}
586
587static kern_return_t
588dlil_affinity_set(struct thread *tp, u_int32_t tag)
589{
590 thread_affinity_policy_data_t policy;
591
592 bzero(&policy, sizeof (policy));
593 policy.affinity_tag = tag;
594 return (thread_policy_set(tp, THREAD_AFFINITY_POLICY,
595 (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT));
596}
597
91447636
A
598void
599dlil_init(void)
600{
b0d623f7
A
601 thread_t thread = THREAD_NULL;
602
d1ecb069
A
603#if PKT_PRIORITY
604 /*
605 * The following fields must be 32-bit aligned for atomic operations.
606 */
607 IF_DATA_REQUIRE_ALIGNED_32(ifi_obgpackets);
608 IF_DATA_REQUIRE_ALIGNED_32(ifi_obgbytes)
609
610 IFNET_IF_DATA_REQUIRE_ALIGNED_32(ifi_obgpackets);
611 IFNET_IF_DATA_REQUIRE_ALIGNED_32(ifi_obgbytes)
612#endif /* PKT_PRIORITY */
613
593a1d5f 614 PE_parse_boot_argn("net_affinity", &net_affinity, sizeof (net_affinity));
d1ecb069
A
615#if IFNET_ROUTE_REFCNT
616 PE_parse_boot_argn("net_rtref", &net_rtref, sizeof (net_rtref));
617#endif /* IFNET_ROUTE_REFCNT */
618
91447636 619 TAILQ_INIT(&dlil_ifnet_head);
91447636
A
620 TAILQ_INIT(&ifnet_head);
621
622 /* Setup the lock groups we will use */
2d21ac55 623 dlil_grp_attributes = lck_grp_attr_alloc_init();
91447636 624
2d21ac55
A
625 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", dlil_grp_attributes);
626 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", dlil_grp_attributes);
627 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", dlil_grp_attributes);
628 dlil_input_lock_grp = lck_grp_alloc_init("dlil input lock", dlil_grp_attributes);
91447636
A
629
630 /* Setup the lock attributes we will use */
2d21ac55 631 dlil_lck_attributes = lck_attr_alloc_init();
91447636
A
632
633 ifnet_lock_attr = lck_attr_alloc_init();
91447636 634
91447636 635
2d21ac55
A
636 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, dlil_lck_attributes);
637 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, dlil_lck_attributes);
638 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, dlil_lck_attributes);
91447636 639
2d21ac55
A
640 lck_attr_free(dlil_lck_attributes);
641 dlil_lck_attributes = NULL;
91447636
A
642
643 /*
2d21ac55 644 * Create and start up the first dlil input thread once everything is initialized
91447636 645 */
2d21ac55
A
646 dlil_create_input_thread(0, dlil_lo_thread_ptr);
647
b0d623f7
A
648 (void) kernel_thread_start((thread_continue_t)dlil_call_delayed_detach_thread, NULL, &thread);
649 thread_deallocate(thread);
650#if PF
651 /* Initialize the packet filter */
652 pfinit();
653#endif /* PF */
91447636 654}
1c79356b 655
2d21ac55 656__private_extern__ int
91447636
A
657dlil_attach_filter(
658 struct ifnet *ifp,
659 const struct iff_filter *if_filter,
660 interface_filter_t *filter_ref)
1c79356b 661{
9bccf70c 662 int retval = 0;
91447636 663 struct ifnet_filter *filter;
9bccf70c 664
91447636
A
665 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
666 if (filter == NULL)
667 return ENOMEM;
668 bzero(filter, sizeof(*filter));
9bccf70c 669
9bccf70c 670
91447636
A
671 filter->filt_ifp = ifp;
672 filter->filt_cookie = if_filter->iff_cookie;
673 filter->filt_name = if_filter->iff_name;
674 filter->filt_protocol = if_filter->iff_protocol;
675 filter->filt_input = if_filter->iff_input;
676 filter->filt_output = if_filter->iff_output;
677 filter->filt_event = if_filter->iff_event;
678 filter->filt_ioctl = if_filter->iff_ioctl;
679 filter->filt_detached = if_filter->iff_detached;
680
681 if ((retval = dlil_write_begin()) != 0) {
682 /* Failed to acquire the write lock */
683 FREE(filter, M_NKE);
684 return retval;
685 }
686 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
687 dlil_write_end();
688 *filter_ref = filter;
b0d623f7
A
689
690 /*
691 * Bump filter count and route_generation ID to let TCP
692 * know it shouldn't do TSO on this connection
693 */
694 OSAddAtomic(1, &dlil_filter_count);
695 if (use_routegenid)
696 routegenid_update();
697
91447636 698 return retval;
1c79356b
A
699}
700
91447636 701static int
2d21ac55
A
702dlil_detach_filter_internal(
703 interface_filter_t filter,
704 int detached)
1c79356b 705{
91447636
A
706 int retval = 0;
707
3a60a9f5
A
708 if (detached == 0) {
709 ifnet_t ifp = NULL;
710 interface_filter_t entry = NULL;
711
712 /* Take the write lock */
713 retval = dlil_write_begin();
714 if (retval != 0 && retval != EDEADLK)
715 return retval;
716
717 /*
718 * At this point either we have the write lock (retval == 0)
719 * or we couldn't get it (retval == EDEADLK) because someone
720 * else up the stack is holding the read lock. It is safe to
721 * read, either the read or write is held. Verify the filter
722 * parameter before proceeding.
723 */
724 ifnet_head_lock_shared();
725 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
726 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
727 if (entry == filter)
728 break;
729 }
730 if (entry == filter)
731 break;
732 }
733 ifnet_head_done();
734
735 if (entry != filter) {
736 /* filter parameter is not a valid filter ref */
737 if (retval == 0) {
738 dlil_write_end();
739 }
740 return EINVAL;
741 }
742
91447636
A
743 if (retval == EDEADLK) {
744 /* Perform a delayed detach */
745 filter->filt_detaching = 1;
746 dlil_detach_waiting = 1;
747 wakeup(&dlil_detach_waiting);
3a60a9f5 748 return 0;
91447636 749 }
3a60a9f5
A
750
751 /* Remove the filter from the list */
752 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
91447636 753 dlil_write_end();
3a60a9f5 754 }
91447636 755
3a60a9f5 756 /* Call the detached funciton if there is one */
91447636
A
757 if (filter->filt_detached)
758 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
9bccf70c 759
3a60a9f5 760 /* Free the filter */
91447636
A
761 FREE(filter, M_NKE);
762
b0d623f7
A
763 /*
764 * Decrease filter count and route_generation ID to let TCP
765 * know it should reevalute doing TSO or not
766 */
767 OSAddAtomic(-1, &dlil_filter_count);
768 if (use_routegenid)
769 routegenid_update();
770
91447636 771 return retval;
1c79356b
A
772}
773
2d21ac55 774__private_extern__ void
91447636
A
775dlil_detach_filter(interface_filter_t filter)
776{
3a60a9f5
A
777 if (filter == NULL)
778 return;
91447636
A
779 dlil_detach_filter_internal(filter, 0);
780}
1c79356b 781
91447636 782static void
2d21ac55
A
783dlil_input_thread_func(
784 struct dlil_threading_info *inputthread)
91447636
A
785{
786 while (1) {
2d21ac55
A
787 struct mbuf *m = NULL, *m_loop = NULL;
788#if IFNET_INPUT_SANITY_CHK
789 int loop_cnt = 0, mbuf_cnt;
790 int count;
791 struct mbuf *m1;
792#endif /* IFNET_INPUT_SANITY_CHK */
793
794 lck_mtx_lock(inputthread->input_lck);
795
796 /* Wait until there is work to be done */
797 while ((inputthread->input_waiting & ~DLIL_INPUT_RUNNING) == 0) {
798 inputthread->input_waiting &= ~DLIL_INPUT_RUNNING;
799 msleep(&inputthread->input_waiting, inputthread->input_lck, 0, inputthread->input_name, 0);
800 }
801
802
803 lck_mtx_assert(inputthread->input_lck, LCK_MTX_ASSERT_OWNED);
804
805 m = inputthread->mbuf_head;
806 inputthread->mbuf_head = NULL;
807 inputthread->mbuf_tail = NULL;
808
809 if (inputthread->input_waiting & DLIL_INPUT_TERMINATE) {
810 if (m)
811 mbuf_freem_list(m);
812 /* this is the end */
813 dlil_terminate_input_thread(inputthread);
814 return;
815 }
816
817 inputthread->input_waiting |= DLIL_INPUT_RUNNING;
818 inputthread->input_waiting &= ~DLIL_INPUT_WAITING;
819
820 if (inputthread == dlil_lo_thread_ptr) {
821 m_loop = dlil_lo_input_mbuf_head;
822 dlil_lo_input_mbuf_head = NULL;
823 dlil_lo_input_mbuf_tail = NULL;
824 }
825
826#if IFNET_INPUT_SANITY_CHK
827 if (dlil_input_sanity_check != 0) {
828 mbuf_cnt = inputthread->mbuf_count;
829 inputthread->mbuf_count = 0;
830 if (inputthread == dlil_lo_thread_ptr) {
831 loop_cnt = dlil_lo_input_mbuf_count;
832 dlil_lo_input_mbuf_count = 0;
833 }
91447636 834
2d21ac55 835 lck_mtx_unlock(inputthread->input_lck);
91447636 836
2d21ac55
A
837 for (m1 = m, count = 0; m1; m1 = mbuf_nextpkt(m1)) {
838 count++;
839 }
840 if (count != mbuf_cnt) {
841 panic("dlil_input_func - thread=%p reg. loop queue has %d packets, should have %d\n",
842 inputthread, count, mbuf_cnt);
843 }
844
845 if (inputthread == dlil_lo_thread_ptr) {
846 for (m1 = m_loop, count = 0; m1; m1 = mbuf_nextpkt(m1)) {
847 count++;
848 }
849 if (count != loop_cnt) {
850 panic("dlil_input_func - thread=%p loop queue has %d packets, should have %d\n",
851 inputthread, count, loop_cnt);
852 }
853 }
854 } else
855#endif /* IFNET_INPUT_SANITY_CHK */
856 {
857 lck_mtx_unlock(inputthread->input_lck);
858 }
859
860
91447636
A
861 /*
862 * NOTE warning %%% attention !!!!
863 * We should think about putting some thread starvation safeguards if
864 * we deal with long chains of packets.
865 */
2d21ac55
A
866 if (m_loop) {
867 if (inputthread == dlil_lo_thread_ptr)
868 dlil_input_packet_list(lo_ifp, m_loop);
869#if IFNET_INPUT_SANITY_CHK
870 else
871 panic("dlil_input_func - thread=%p loop queue has %d packets, should have none!\n",
872 inputthread, loop_cnt);
873#endif /* IFNET_INPUT_SANITY_CHK */
91447636 874 }
2d21ac55
A
875
876
877 if (m)
878 dlil_input_packet_list(0, m);
879
880
881 lck_mtx_lock(inputthread->input_lck);
882
883 if ((inputthread->input_waiting & (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER)) != 0) {
884 lck_mtx_unlock(inputthread->input_lck);
885 proto_input_run();
886 }
887 else
888 lck_mtx_unlock(inputthread->input_lck);
889 }
890}
891
892errno_t
893ifnet_input(
894 ifnet_t ifp,
895 mbuf_t m_head,
896 const struct ifnet_stat_increment_param *stats)
897{
898 struct thread *tp = current_thread();
899 mbuf_t m_tail;
900 struct dlil_threading_info *inp;
901#if IFNET_INPUT_SANITY_CHK
902 u_int32_t pkt_count = 0;
903#endif /* IFNET_INPUT_SANITY_CHK */
904
905 if (ifp == NULL || m_head == NULL) {
906 if (m_head)
907 mbuf_freem_list(m_head);
908 return EINVAL;
909 }
910
911 m_tail = m_head;
912 while (1) {
913#if IFNET_INPUT_SANITY_CHK
914 if (dlil_input_sanity_check != 0) {
915 ifnet_t rcvif;
91447636 916
2d21ac55
A
917 rcvif = mbuf_pkthdr_rcvif(m_tail);
918 pkt_count++;
b36670ce 919
2d21ac55
A
920 if (rcvif == NULL ||
921 (ifp->if_type != IFT_LOOP && rcvif != ifp) ||
922 (mbuf_flags(m_head) & MBUF_PKTHDR) == 0) {
923 panic("ifnet_input - invalid mbuf %p\n", m_tail);
924 }
91447636 925 }
2d21ac55
A
926#endif /* IFNET_INPUT_SANITY_CHK */
927 if (mbuf_nextpkt(m_tail) == NULL)
928 break;
929 m_tail = mbuf_nextpkt(m_tail);
91447636 930 }
1c79356b 931
2d21ac55 932 inp = ifp->if_input_thread;
1c79356b 933
2d21ac55
A
934 if (dlil_multithreaded_input == 0 || inp == NULL)
935 inp = dlil_lo_thread_ptr;
936
937 /*
938 * If there is a matching dlil input thread associated with an
939 * affinity set, associate this workloop thread with the same set.
940 * We will only do this once.
941 */
942 lck_mtx_lock(inp->input_lck);
943 if (inp->net_affinity && inp->workloop_thread == NULL) {
944 u_int32_t tag = inp->tag;
945 inp->workloop_thread = tp;
946 lck_mtx_unlock(inp->input_lck);
947
948 /* Associated the current thread with the new affinity tag */
949 (void) dlil_affinity_set(tp, tag);
950
951 /*
952 * Take a reference on the workloop (current) thread; during
953 * detach, we will need to refer to it in order ot tear down
954 * its affinity.
955 */
956 thread_reference(tp);
957 lck_mtx_lock(inp->input_lck);
958 }
959
960 /* WARNING
91447636
A
961 * Because of loopbacked multicast we cannot stuff the ifp in
962 * the rcvif of the packet header: loopback has its own dlil
963 * input queue
964 */
2d21ac55
A
965
966 if (inp == dlil_lo_thread_ptr && ifp->if_type == IFT_LOOP) {
967 if (dlil_lo_input_mbuf_head == NULL)
968 dlil_lo_input_mbuf_head = m_head;
969 else if (dlil_lo_input_mbuf_tail != NULL)
970 dlil_lo_input_mbuf_tail->m_nextpkt = m_head;
971 dlil_lo_input_mbuf_tail = m_tail;
972#if IFNET_INPUT_SANITY_CHK
973 if (dlil_input_sanity_check != 0) {
974 dlil_lo_input_mbuf_count += pkt_count;
975 inp->input_mbuf_cnt += pkt_count;
976 inp->input_wake_cnt++;
977
978 lck_mtx_assert(inp->input_lck, LCK_MTX_ASSERT_OWNED);
979 }
980#endif
981 }
982 else {
983 if (inp->mbuf_head == NULL)
984 inp->mbuf_head = m_head;
985 else if (inp->mbuf_tail != NULL)
986 inp->mbuf_tail->m_nextpkt = m_head;
987 inp->mbuf_tail = m_tail;
988#if IFNET_INPUT_SANITY_CHK
989 if (dlil_input_sanity_check != 0) {
990 inp->mbuf_count += pkt_count;
991 inp->input_mbuf_cnt += pkt_count;
992 inp->input_wake_cnt++;
993
994 lck_mtx_assert(inp->input_lck, LCK_MTX_ASSERT_OWNED);
995 }
996#endif
997 }
998
999
1000 inp->input_waiting |= DLIL_INPUT_WAITING;
1001 if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) {
1002 wakeup((caddr_t)&inp->input_waiting);
91447636
A
1003 }
1004 if (stats) {
1005 ifp->if_data.ifi_ipackets += stats->packets_in;
1006 ifp->if_data.ifi_ibytes += stats->bytes_in;
1007 ifp->if_data.ifi_ierrors += stats->errors_in;
1008
1009 ifp->if_data.ifi_opackets += stats->packets_out;
1010 ifp->if_data.ifi_obytes += stats->bytes_out;
1011 ifp->if_data.ifi_oerrors += stats->errors_out;
1012
1013 ifp->if_data.ifi_collisions += stats->collisions;
1014 ifp->if_data.ifi_iqdrops += stats->dropped;
1015 }
2d21ac55
A
1016
1017 lck_mtx_unlock(inp->input_lck);
91447636
A
1018
1019 return 0;
1c79356b
A
1020}
1021
2d21ac55
A
1022static int
1023dlil_interface_filters_input(struct ifnet * ifp, struct mbuf * * m_p,
1024 char * * frame_header_p,
1025 protocol_family_t protocol_family)
91447636 1026{
2d21ac55
A
1027 struct ifnet_filter * filter;
1028
1029 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1030 int result;
1031
1032 if (filter->filt_input
1033 && (filter->filt_protocol == 0
1034 || filter->filt_protocol == protocol_family)) {
1035 result = (*filter->filt_input)(filter->filt_cookie,
1036 ifp, protocol_family,
1037 m_p, frame_header_p);
1038 if (result != 0) {
1039 return (result);
1040 }
1041 }
1042 }
b7266188
A
1043
1044 /*
1045 * Strip away M_PROTO1 bit prior to sending packet up the stack as
1046 * it is meant to be local to a subsystem -- if_bridge for M_PROTO1
1047 */
1048 if (*m_p != NULL)
1049 (*m_p)->m_flags &= ~M_PROTO1;
1050
2d21ac55 1051 return (0);
1c79356b
A
1052}
1053
2d21ac55
A
1054static void
1055dlil_ifproto_input(struct if_proto * ifproto, mbuf_t m)
1c79356b 1056{
2d21ac55 1057 int error;
1c79356b 1058
2d21ac55
A
1059 if (ifproto->proto_kpi == kProtoKPI_v1) {
1060 /* Version 1 protocols get one packet at a time */
1061 while (m != NULL) {
1062 char * frame_header;
1063 mbuf_t next_packet;
1064
1065 next_packet = m->m_nextpkt;
1066 m->m_nextpkt = NULL;
1067 frame_header = m->m_pkthdr.header;
1068 m->m_pkthdr.header = NULL;
1069 error = (*ifproto->kpi.v1.input)(ifproto->ifp,
1070 ifproto->protocol_family,
1071 m, frame_header);
1072 if (error != 0 && error != EJUSTRETURN)
1073 m_freem(m);
1074 m = next_packet;
1075 }
1076 }
1077 else if (ifproto->proto_kpi == kProtoKPI_v2) {
1078 /* Version 2 protocols support packet lists */
1079 error = (*ifproto->kpi.v2.input)(ifproto->ifp,
1080 ifproto->protocol_family,
1081 m);
1082 if (error != 0 && error != EJUSTRETURN)
1083 m_freem_list(m);
91447636 1084 }
2d21ac55
A
1085 return;
1086}
1c79356b 1087
2d21ac55
A
1088__private_extern__ void
1089dlil_input_packet_list(struct ifnet * ifp_param, struct mbuf *m)
1090{
1091 int error = 0;
1092 int locked = 0;
1093 protocol_family_t protocol_family;
1094 mbuf_t next_packet;
1095 ifnet_t ifp = ifp_param;
1096 char * frame_header;
1097 struct if_proto * last_ifproto = NULL;
1098 mbuf_t pkt_first = NULL;
1099 mbuf_t * pkt_next = NULL;
1100
1101 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
1102
1103 while (m != NULL) {
1104 struct if_proto * ifproto = NULL;
1105
1106 next_packet = m->m_nextpkt;
1107 m->m_nextpkt = NULL;
1108 if (ifp_param == NULL)
1109 ifp = m->m_pkthdr.rcvif;
1110 frame_header = m->m_pkthdr.header;
1111 m->m_pkthdr.header = NULL;
1112
1113 if (locked == 0) {
1114 /* dlil lock protects the demux and interface filters */
1115 locked = 1;
1116 dlil_read_begin();
1117 }
1118 /* find which protocol family this packet is for */
1119 error = (*ifp->if_demux)(ifp, m, frame_header,
1120 &protocol_family);
1121 if (error != 0) {
1122 if (error == EJUSTRETURN) {
1123 goto next;
1124 }
1125 protocol_family = 0;
1126 }
1127
1128 /* DANGER!!! */
1129 if (m->m_flags & (M_BCAST|M_MCAST))
1130 ifp->if_imcasts++;
1c79356b 1131
2d21ac55
A
1132 /* run interface filters, exclude VLAN packets PR-3586856 */
1133 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
91447636 1134 int filter_result;
2d21ac55
A
1135
1136 filter_result = dlil_interface_filters_input(ifp, &m,
1137 &frame_header,
1138 protocol_family);
1139 if (filter_result != 0) {
1140 if (filter_result != EJUSTRETURN) {
1141 m_freem(m);
91447636 1142 }
2d21ac55 1143 goto next;
91447636
A
1144 }
1145 }
2d21ac55 1146 if (error != 0 || ((m->m_flags & M_PROMISC) != 0) ) {
91447636 1147 m_freem(m);
2d21ac55 1148 goto next;
91447636 1149 }
2d21ac55
A
1150
1151 /* Lookup the protocol attachment to this interface */
1152 if (protocol_family == 0) {
1153 ifproto = NULL;
1154 }
1155 else if (last_ifproto != NULL
1156 && last_ifproto->ifp == ifp
1157 && (last_ifproto->protocol_family
1158 == protocol_family)) {
1159 ifproto = last_ifproto;
1160 }
1161 else {
1162 ifproto = find_attached_proto(ifp, protocol_family);
1163 }
1164 if (ifproto == NULL) {
1165 /* no protocol for this packet, discard */
1166 m_freem(m);
1167 goto next;
1168 }
1169 if (ifproto != last_ifproto) {
1170 /* make sure ifproto can't go away during input */
1171 if_proto_ref(ifproto);
1172 if (last_ifproto != NULL) {
1173 /* pass up the list for the previous protocol */
1174 dlil_read_end();
1175
1176 dlil_ifproto_input(last_ifproto, pkt_first);
1177 pkt_first = NULL;
1178 if_proto_free(last_ifproto);
1179 dlil_read_begin();
1180 }
1181 last_ifproto = ifproto;
1182 }
1183 /* extend the list */
1184 m->m_pkthdr.header = frame_header;
1185 if (pkt_first == NULL) {
1186 pkt_first = m;
1187 } else {
1188 *pkt_next = m;
1189 }
1190 pkt_next = &m->m_nextpkt;
1c79356b 1191
2d21ac55
A
1192 next:
1193 if (next_packet == NULL && last_ifproto != NULL) {
1194 /* pass up the last list of packets */
1195 dlil_read_end();
1c79356b 1196
2d21ac55
A
1197 dlil_ifproto_input(last_ifproto, pkt_first);
1198 if_proto_free(last_ifproto);
1199 locked = 0;
1200 }
1201 m = next_packet;
1c79356b 1202
91447636 1203 }
2d21ac55
A
1204 if (locked != 0) {
1205 dlil_read_end();
1206 }
91447636 1207 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
2d21ac55 1208 return;
1c79356b
A
1209}
1210
91447636
A
1211static int
1212dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
1c79356b 1213{
91447636
A
1214 struct ifnet_filter *filter;
1215
1216 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
1217 dlil_read_begin();
1218
1219 /* Pass the event to the interface filters */
1220 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1221 if (filter->filt_event)
1222 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
1223 }
1224
1225 if (ifp->if_proto_hash) {
1226 int i;
1227
1228 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
1229 struct if_proto *proto;
1230
1231 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
2d21ac55
A
1232 proto_media_event eventp = proto->proto_kpi == kProtoKPI_v1
1233 ? proto->kpi.v1.event : proto->kpi.v2.event;
1234
1235 if (eventp)
1236 eventp(ifp, proto->protocol_family, event);
91447636
A
1237 }
1238 }
1239 }
1240
1241 dlil_read_end();
1242
1243 /* Pass the event to the interface */
1244 if (ifp->if_event)
1245 ifp->if_event(ifp, event);
1246
1247 if (ifp_unuse(ifp))
1248 ifp_use_reached_zero(ifp);
1249 }
1250
1251 return kev_post_msg(event);
1c79356b
A
1252}
1253
2d21ac55
A
1254errno_t
1255ifnet_event(
1256 ifnet_t ifp,
1257 struct kern_event_msg *event)
1c79356b 1258{
91447636 1259 struct kev_msg kev_msg;
2d21ac55
A
1260 int result = 0;
1261
1262 if (ifp == NULL || event == NULL) return EINVAL;
1c79356b 1263
91447636
A
1264 kev_msg.vendor_code = event->vendor_code;
1265 kev_msg.kev_class = event->kev_class;
1266 kev_msg.kev_subclass = event->kev_subclass;
1267 kev_msg.event_code = event->event_code;
1268 kev_msg.dv[0].data_ptr = &event->event_data[0];
1269 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
1270 kev_msg.dv[1].data_length = 0;
1271
91447636 1272 result = dlil_event_internal(ifp, &kev_msg);
1c79356b 1273
91447636
A
1274 return result;
1275}
1c79356b 1276
2d21ac55
A
1277#if CONFIG_MACF_NET
1278#include <netinet/ip6.h>
1279#include <netinet/ip.h>
1280static int dlil_get_socket_type(struct mbuf **mp, int family, int raw)
1281{
1282 struct mbuf *m;
1283 struct ip *ip;
1284 struct ip6_hdr *ip6;
1285 int type = SOCK_RAW;
1286
1287 if (!raw) {
1288 switch (family) {
1289 case PF_INET:
1290 m = m_pullup(*mp, sizeof(struct ip));
1291 if (m == NULL)
1292 break;
1293 *mp = m;
1294 ip = mtod(m, struct ip *);
1295 if (ip->ip_p == IPPROTO_TCP)
1296 type = SOCK_STREAM;
1297 else if (ip->ip_p == IPPROTO_UDP)
1298 type = SOCK_DGRAM;
1299 break;
1300 case PF_INET6:
1301 m = m_pullup(*mp, sizeof(struct ip6_hdr));
1302 if (m == NULL)
1303 break;
1304 *mp = m;
1305 ip6 = mtod(m, struct ip6_hdr *);
1306 if (ip6->ip6_nxt == IPPROTO_TCP)
1307 type = SOCK_STREAM;
1308 else if (ip6->ip6_nxt == IPPROTO_UDP)
1309 type = SOCK_DGRAM;
1310 break;
1311 }
1312 }
1313
1314 return (type);
1315}
1316#endif
1317
1318#if 0
3a60a9f5 1319int
91447636
A
1320dlil_output_list(
1321 struct ifnet* ifp,
1322 u_long proto_family,
1323 struct mbuf *packetlist,
1324 caddr_t route,
1325 const struct sockaddr *dest,
2d21ac55 1326 int raw)
91447636 1327{
2d21ac55
A
1328 char *frame_type = NULL;
1329 char *dst_linkaddr = NULL;
1330 int retval = 0;
91447636
A
1331 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1332 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1333 struct ifnet_filter *filter;
1334 struct if_proto *proto = 0;
2d21ac55
A
1335 mbuf_t m;
1336 mbuf_t send_head = NULL;
1337 mbuf_t *send_tail = &send_head;
91447636
A
1338
1339 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
91447636
A
1340
1341 dlil_read_begin();
1342
1343 frame_type = frame_type_buffer;
1344 dst_linkaddr = dst_linkaddr_buffer;
2d21ac55
A
1345
1346 if (raw == 0) {
1347 proto = find_attached_proto(ifp, proto_family);
1348 if (proto == NULL) {
1349 retval = ENXIO;
1350 goto cleanup;
1351 }
1352 }
1353
1354preout_again:
1355 if (packetlist == NULL)
1356 goto cleanup;
91447636
A
1357 m = packetlist;
1358 packetlist = packetlist->m_nextpkt;
1359 m->m_nextpkt = NULL;
1360
2d21ac55
A
1361 if (raw == 0) {
1362 proto_media_preout preoutp = proto->proto_kpi == kProtoKPI_v1
1363 ? proto->kpi.v1.pre_output : proto->kpi.v2.pre_output;
1364 retval = 0;
1365 if (preoutp)
1366 retval = preoutp(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1367
1368 if (retval) {
1369 if (retval == EJUSTRETURN) {
1370 goto preout_again;
1371 }
1372
91447636 1373 m_freem(m);
2d21ac55 1374 goto cleanup;
91447636 1375 }
91447636 1376 }
1c79356b 1377
91447636 1378 do {
2d21ac55
A
1379#if CONFIG_MACF_NET
1380 retval = mac_ifnet_check_transmit(ifp, m, proto_family,
1381 dlil_get_socket_type(&m, proto_family, raw));
1382 if (retval) {
1383 m_freem(m);
1384 goto cleanup;
1385 }
1386#endif
91447636 1387
2d21ac55 1388 if (raw == 0 && ifp->if_framer) {
91447636
A
1389 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1390 if (retval) {
1391 if (retval != EJUSTRETURN) {
1392 m_freem(m);
1393 }
2d21ac55 1394 goto next;
91447636
A
1395 }
1396 }
1397
1398 /*
1399 * Let interface filters (if any) do their thing ...
1400 */
1401 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1402 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1403 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1404 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1405 filter->filt_output) {
1406 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1407 if (retval) {
6601e61a 1408 if (retval != EJUSTRETURN)
91447636 1409 m_freem(m);
6601e61a 1410 goto next;
91447636
A
1411 }
1412 }
1413 }
1414 }
b7266188
A
1415 /*
1416 * Strip away M_PROTO1 bit prior to sending packet to the driver
1417 * as this field may be used by the driver
1418 */
1419 m->m_flags &= ~M_PROTO1;
2d21ac55 1420
91447636 1421 /*
2d21ac55
A
1422 * Finally, call the driver.
1423 */
91447636 1424
2d21ac55
A
1425 if ((ifp->if_eflags & IFEF_SENDLIST) != 0) {
1426 *send_tail = m;
1427 send_tail = &m->m_nextpkt;
1428 }
1429 else {
1430 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1431 retval = ifp->if_output(ifp, m);
b0d623f7
A
1432 if (retval && dlil_verbose) {
1433 printf("dlil_output: output error on %s%d retval = %d\n",
1434 ifp->if_name, ifp->if_unit, retval);
2d21ac55
A
1435 }
1436 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
91447636
A
1437 }
1438 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
2d21ac55 1439
6601e61a 1440next:
91447636
A
1441 m = packetlist;
1442 if (m) {
1443 packetlist = packetlist->m_nextpkt;
1444 m->m_nextpkt = NULL;
1445 }
1446 } while (m);
1c79356b 1447
2d21ac55
A
1448 if (send_head) {
1449 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1450 retval = ifp->if_output(ifp, send_head);
b0d623f7
A
1451 if (retval && dlil_verbose) {
1452 printf("dlil_output: output error on %s%d retval = %d\n",
1453 ifp->if_name, ifp->if_unit, retval);
2d21ac55
A
1454 }
1455 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1456 }
91447636
A
1457
1458 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1c79356b 1459
91447636
A
1460cleanup:
1461 dlil_read_end();
1462 if (packetlist) /* if any packet left, clean up */
2d21ac55 1463 mbuf_freem_list(packetlist);
91447636
A
1464 if (retval == EJUSTRETURN)
1465 retval = 0;
1466 return retval;
1c79356b 1467}
2d21ac55 1468#endif
1c79356b 1469
1c79356b 1470/*
91447636
A
1471 * dlil_output
1472 *
1473 * Caller should have a lock on the protocol domain if the protocol
1474 * doesn't support finer grained locking. In most cases, the lock
1475 * will be held from the socket layer and won't be released until
1476 * we return back to the socket layer.
1477 *
1478 * This does mean that we must take a protocol lock before we take
1479 * an interface lock if we're going to take both. This makes sense
1480 * because a protocol is likely to interact with an ifp while it
1481 * is under the protocol lock.
1c79356b 1482 */
2d21ac55 1483__private_extern__ errno_t
91447636 1484dlil_output(
2d21ac55
A
1485 ifnet_t ifp,
1486 protocol_family_t proto_family,
1487 mbuf_t packetlist,
1488 void *route,
91447636 1489 const struct sockaddr *dest,
2d21ac55 1490 int raw)
91447636 1491{
2d21ac55
A
1492 char *frame_type = NULL;
1493 char *dst_linkaddr = NULL;
91447636
A
1494 int retval = 0;
1495 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1496 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1497 struct ifnet_filter *filter;
2d21ac55
A
1498 struct if_proto *proto = 0;
1499 mbuf_t m;
1500 mbuf_t send_head = NULL;
1501 mbuf_t *send_tail = &send_head;
91447636
A
1502
1503 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1504
1505 dlil_read_begin();
1506
1507 frame_type = frame_type_buffer;
1508 dst_linkaddr = dst_linkaddr_buffer;
1509
1510 if (raw == 0) {
91447636
A
1511 proto = find_attached_proto(ifp, proto_family);
1512 if (proto == NULL) {
91447636
A
1513 retval = ENXIO;
1514 goto cleanup;
1515 }
2d21ac55
A
1516 }
1517
1518preout_again:
1519 if (packetlist == NULL)
1520 goto cleanup;
1521 m = packetlist;
1522 packetlist = packetlist->m_nextpkt;
1523 m->m_nextpkt = NULL;
1524
1525 if (raw == 0) {
1526 proto_media_preout preoutp = proto->proto_kpi == kProtoKPI_v1
1527 ? proto->kpi.v1.pre_output : proto->kpi.v2.pre_output;
91447636 1528 retval = 0;
2d21ac55
A
1529 if (preoutp)
1530 retval = preoutp(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1531
91447636 1532 if (retval) {
2d21ac55
A
1533 if (retval == EJUSTRETURN) {
1534 goto preout_again;
91447636 1535 }
2d21ac55
A
1536
1537 m_freem(m);
91447636 1538 goto cleanup;
1c79356b 1539 }
1c79356b 1540 }
2d21ac55
A
1541
1542#if CONFIG_MACF_NET
1543 retval = mac_ifnet_check_transmit(ifp, m, proto_family,
1544 dlil_get_socket_type(&m, proto_family, raw));
1545 if (retval) {
1546 m_freem(m);
1547 goto cleanup;
1548 }
1549#endif
1550
1551 do {
1552 if (raw == 0 && ifp->if_framer) {
7e4a7d39
A
1553 int rcvif_set = 0;
1554
1555 /*
1556 * If this is a broadcast packet that needs to be
1557 * looped back into the system, set the inbound ifp
1558 * to that of the outbound ifp. This will allow
1559 * us to determine that it is a legitimate packet
1560 * for the system. Only set the ifp if it's not
1561 * already set, just to be safe.
1562 */
1563 if ((m->m_flags & (M_BCAST | M_LOOP)) &&
1564 m->m_pkthdr.rcvif == NULL) {
1565 m->m_pkthdr.rcvif = ifp;
1566 rcvif_set = 1;
1567 }
1568
2d21ac55
A
1569 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1570 if (retval) {
1571 if (retval != EJUSTRETURN) {
1572 m_freem(m);
1573 }
1574 goto next;
91447636 1575 }
7e4a7d39
A
1576
1577 /*
1578 * Clear the ifp if it was set above, and to be
1579 * safe, only if it is still the same as the
1580 * outbound ifp we have in context. If it was
1581 * looped back, then a copy of it was sent to the
1582 * loopback interface with the rcvif set, and we
1583 * are clearing the one that will go down to the
1584 * layer below.
1585 */
1586 if (rcvif_set && m->m_pkthdr.rcvif == ifp)
1587 m->m_pkthdr.rcvif = NULL;
91447636 1588 }
1c79356b 1589
2d21ac55
A
1590 /*
1591 * Let interface filters (if any) do their thing ...
1592 */
1593 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1594 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1595 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1596 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1597 filter->filt_output) {
1598 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1599 if (retval) {
1600 if (retval != EJUSTRETURN)
1601 m_freem(m);
1602 goto next;
1603 }
91447636 1604 }
1c79356b 1605 }
1c79356b 1606 }
2d21ac55 1607
b7266188
A
1608 /*
1609 * Strip away M_PROTO1 bit prior to sending packet to the driver
1610 * as this field may be used by the driver
1611 */
1612 m->m_flags &= ~M_PROTO1;
1613
2d21ac55
A
1614 /*
1615 * If the underlying interface is not capable of handling a
1616 * packet whose data portion spans across physically disjoint
1617 * pages, we need to "normalize" the packet so that we pass
1618 * down a chain of mbufs where each mbuf points to a span that
1619 * resides in the system page boundary. If the packet does
1620 * not cross page(s), the following is a no-op.
1621 */
1622 if (!(ifp->if_hwassist & IFNET_MULTIPAGES)) {
1623 if ((m = m_normalize(m)) == NULL)
1624 goto next;
1625 }
1626
b0d623f7
A
1627 /*
1628 * If this is a TSO packet, make sure the interface still advertise TSO capability
1629 */
1630
1631 if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) && !(ifp->if_hwassist & IFNET_TSO_IPV4)) {
1632 retval = EMSGSIZE;
1633 m_freem(m);
1634 goto cleanup;
1635 }
1636
1637 if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV6) && !(ifp->if_hwassist & IFNET_TSO_IPV6)) {
1638 retval = EMSGSIZE;
1639 m_freem(m);
1640 goto cleanup;
1641 }
2d21ac55
A
1642 /*
1643 * Finally, call the driver.
1644 */
1645
1646 if ((ifp->if_eflags & IFEF_SENDLIST) != 0) {
1647 *send_tail = m;
1648 send_tail = &m->m_nextpkt;
1649 }
1650 else {
1651 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
d1ecb069
A
1652#if PKT_PRIORITY
1653 if (mbuf_get_priority(m) == MBUF_PRIORITY_BACKGROUND) {
1654 atomic_add_32(&ifp->if_obgpackets, 1);
1655 atomic_add_32(&ifp->if_obgbytes,
1656 m->m_pkthdr.len);
1657 }
1658#endif /* PKT_PRIORITY */
2d21ac55 1659 retval = ifp->if_output(ifp, m);
b0d623f7
A
1660 if (retval && dlil_verbose) {
1661 printf("dlil_output: output error on %s%d retval = %d\n",
1662 ifp->if_name, ifp->if_unit, retval);
2d21ac55
A
1663 }
1664 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1665 }
1666 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1667
1668next:
1669 m = packetlist;
1670 if (m) {
1671 packetlist = packetlist->m_nextpkt;
1672 m->m_nextpkt = NULL;
1673 }
1674 } while (m);
1675
1676 if (send_head) {
1677 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
d1ecb069
A
1678#if PKT_PRIORITY
1679 if (mbuf_get_priority(send_head) == MBUF_PRIORITY_BACKGROUND) {
1680 atomic_add_32(&ifp->if_obgpackets, 1);
1681 atomic_add_32(&ifp->if_obgbytes,
1682 send_head->m_pkthdr.len);
1683 }
1684#endif /* PKT_PRIORITY */
2d21ac55 1685 retval = ifp->if_output(ifp, send_head);
b0d623f7
A
1686 if (retval && dlil_verbose) {
1687 printf("dlil_output: output error on %s%d retval = %d\n",
1688 ifp->if_name, ifp->if_unit, retval);
2d21ac55
A
1689 }
1690 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1c79356b 1691 }
91447636 1692
91447636 1693 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1c79356b 1694
91447636
A
1695cleanup:
1696 dlil_read_end();
2d21ac55
A
1697 if (packetlist) /* if any packet left, clean up */
1698 mbuf_freem_list(packetlist);
91447636
A
1699 if (retval == EJUSTRETURN)
1700 retval = 0;
1c79356b
A
1701 return retval;
1702}
1703
2d21ac55
A
1704errno_t
1705ifnet_ioctl(
1706 ifnet_t ifp,
1707 protocol_family_t proto_fam,
b0d623f7 1708 u_long ioctl_code,
2d21ac55 1709 void *ioctl_arg)
1c79356b 1710{
91447636
A
1711 struct ifnet_filter *filter;
1712 int retval = EOPNOTSUPP;
1713 int result = 0;
91447636
A
1714 int holding_read = 0;
1715
2d21ac55
A
1716 if (ifp == NULL || ioctl_code == 0)
1717 return EINVAL;
1718
91447636
A
1719 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1720 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1721 if (result != 0)
1722 return EOPNOTSUPP;
1723
1724 dlil_read_begin();
1725 holding_read = 1;
1726
1727 /* Run the interface filters first.
1728 * We want to run all filters before calling the protocol,
1729 * interface family, or interface.
1730 */
1731 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1732 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1733 filter->filt_ioctl != NULL) {
1734 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1735 /* Only update retval if no one has handled the ioctl */
1736 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1737 if (result == ENOTSUP)
1738 result = EOPNOTSUPP;
1739 retval = result;
1740 if (retval && retval != EOPNOTSUPP) {
1741 goto cleanup;
1742 }
1743 }
1744 }
1745 }
1746
1747 /* Allow the protocol to handle the ioctl */
1748 if (proto_fam) {
1749 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1750
1751 if (proto != 0) {
2d21ac55
A
1752 proto_media_ioctl ioctlp = proto->proto_kpi == kProtoKPI_v1
1753 ? proto->kpi.v1.ioctl : proto->kpi.v2.ioctl;
91447636 1754 result = EOPNOTSUPP;
2d21ac55
A
1755 if (ioctlp)
1756 result = ioctlp(ifp, proto_fam, ioctl_code, ioctl_arg);
91447636
A
1757
1758 /* Only update retval if no one has handled the ioctl */
1759 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1760 if (result == ENOTSUP)
1761 result = EOPNOTSUPP;
1762 retval = result;
1763 if (retval && retval != EOPNOTSUPP) {
1764 goto cleanup;
1765 }
1766 }
1767 }
1768 }
1769
1770 /*
1771 * Since we have incremented the use count on the ifp, we are guaranteed
1772 * that the ifp will not go away (the function pointers may not be changed).
1773 * We release the dlil read lock so the interface ioctl may trigger a
1774 * protocol attach. This happens with vlan and may occur with other virtual
1775 * interfaces.
1776 */
1777 dlil_read_end();
1778 holding_read = 0;
1779
1780 /* retval is either 0 or EOPNOTSUPP */
1781
91447636
A
1782 /*
1783 * Let the interface handle this ioctl.
1784 * If it returns EOPNOTSUPP, ignore that, we may have
1785 * already handled this in the protocol or family.
1786 */
1787 if (ifp->if_ioctl)
1788 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1789
1790 /* Only update retval if no one has handled the ioctl */
1791 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1792 if (result == ENOTSUP)
1793 result = EOPNOTSUPP;
1794 retval = result;
1795 if (retval && retval != EOPNOTSUPP) {
1796 goto cleanup;
1797 }
1798 }
1799
1800cleanup:
1801 if (holding_read)
1802 dlil_read_end();
1803 if (ifp_unuse(ifp))
1804 ifp_use_reached_zero(ifp);
1c79356b 1805
91447636
A
1806 if (retval == EJUSTRETURN)
1807 retval = 0;
1808 return retval;
1809}
1c79356b 1810
91447636
A
1811__private_extern__ errno_t
1812dlil_set_bpf_tap(
1813 ifnet_t ifp,
1814 bpf_tap_mode mode,
1815 bpf_packet_func callback)
1816{
1817 errno_t error = 0;
1c79356b 1818
91447636
A
1819 dlil_read_begin();
1820 if (ifp->if_set_bpf_tap)
1821 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1822 dlil_read_end();
1823
1824 return error;
1c79356b
A
1825}
1826
2d21ac55 1827errno_t
91447636
A
1828dlil_resolve_multi(
1829 struct ifnet *ifp,
1830 const struct sockaddr *proto_addr,
1831 struct sockaddr *ll_addr,
1832 size_t ll_len)
1c79356b 1833{
91447636
A
1834 errno_t result = EOPNOTSUPP;
1835 struct if_proto *proto;
1836 const struct sockaddr *verify;
2d21ac55 1837 proto_media_resolve_multi resolvep;
91447636
A
1838
1839 dlil_read_begin();
1840
1841 bzero(ll_addr, ll_len);
1842
1843 /* Call the protocol first */
1844 proto = find_attached_proto(ifp, proto_addr->sa_family);
2d21ac55
A
1845 if (proto != NULL) {
1846 resolvep = proto->proto_kpi == kProtoKPI_v1
1847 ? proto->kpi.v1.resolve_multi : proto->kpi.v2.resolve_multi;
1848 if (resolvep != NULL)
1849 result = resolvep(ifp, proto_addr,(struct sockaddr_dl*)ll_addr,
1850 ll_len);
91447636
A
1851 }
1852
1853 /* Let the interface verify the multicast address */
1854 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1855 if (result == 0)
1856 verify = ll_addr;
1857 else
1858 verify = proto_addr;
1859 result = ifp->if_check_multi(ifp, verify);
1860 }
1861
1862 dlil_read_end();
1863
1864 return result;
1865}
1c79356b 1866
91447636
A
1867__private_extern__ errno_t
1868dlil_send_arp_internal(
1869 ifnet_t ifp,
1870 u_short arpop,
1871 const struct sockaddr_dl* sender_hw,
1872 const struct sockaddr* sender_proto,
1873 const struct sockaddr_dl* target_hw,
1874 const struct sockaddr* target_proto)
1875{
1876 struct if_proto *proto;
1877 errno_t result = 0;
1878
1879 dlil_read_begin();
1880
1881 proto = find_attached_proto(ifp, target_proto->sa_family);
2d21ac55 1882 if (proto == NULL) {
91447636
A
1883 result = ENOTSUP;
1884 }
1885 else {
2d21ac55
A
1886 proto_media_send_arp arpp;
1887 arpp = proto->proto_kpi == kProtoKPI_v1
1888 ? proto->kpi.v1.send_arp : proto->kpi.v2.send_arp;
1889 if (arpp == NULL)
1890 result = ENOTSUP;
1891 else
1892 result = arpp(ifp, arpop, sender_hw, sender_proto, target_hw,
1893 target_proto);
91447636
A
1894 }
1895
1896 dlil_read_end();
1897
1898 return result;
1899}
1c79356b 1900
2d21ac55
A
1901static __inline__ int
1902_is_announcement(const struct sockaddr_in * sender_sin,
1903 const struct sockaddr_in * target_sin)
1904{
1905 if (sender_sin == NULL) {
1906 return FALSE;
1907 }
1908 return (sender_sin->sin_addr.s_addr == target_sin->sin_addr.s_addr);
1909}
1910
91447636
A
1911__private_extern__ errno_t
1912dlil_send_arp(
1913 ifnet_t ifp,
1914 u_short arpop,
1915 const struct sockaddr_dl* sender_hw,
1916 const struct sockaddr* sender_proto,
1917 const struct sockaddr_dl* target_hw,
1918 const struct sockaddr* target_proto)
1919{
1920 errno_t result = 0;
2d21ac55
A
1921 const struct sockaddr_in * sender_sin;
1922 const struct sockaddr_in * target_sin;
91447636
A
1923
1924 if (target_proto == NULL || (sender_proto &&
1925 sender_proto->sa_family != target_proto->sa_family))
1926 return EINVAL;
1927
1928 /*
1929 * If this is an ARP request and the target IP is IPv4LL,
2d21ac55
A
1930 * send the request on all interfaces. The exception is
1931 * an announcement, which must only appear on the specific
1932 * interface.
91447636 1933 */
2d21ac55
A
1934 sender_sin = (const struct sockaddr_in *)sender_proto;
1935 target_sin = (const struct sockaddr_in *)target_proto;
1936 if (target_proto->sa_family == AF_INET
1937 && IN_LINKLOCAL(ntohl(target_sin->sin_addr.s_addr))
1938 && ipv4_ll_arp_aware != 0
1939 && arpop == ARPOP_REQUEST
1940 && !_is_announcement(target_sin, sender_sin)) {
91447636
A
1941 ifnet_t *ifp_list;
1942 u_int32_t count;
1943 u_int32_t ifp_on;
1944
1945 result = ENOTSUP;
1946
1947 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1948 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1949 errno_t new_result;
1950 ifaddr_t source_hw = NULL;
1951 ifaddr_t source_ip = NULL;
1952 struct sockaddr_in source_ip_copy;
1953
1954 /*
1955 * Only arp on interfaces marked for IPv4LL ARPing. This may
1956 * mean that we don't ARP on the interface the subnet route
1957 * points to.
1958 */
1959 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1960 continue;
1961 }
b0d623f7 1962
91447636
A
1963 /* Find the source IP address */
1964 ifnet_lock_shared(ifp_list[ifp_on]);
b0d623f7 1965 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
91447636
A
1966 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1967 ifa_link) {
1968 if (source_ip->ifa_addr &&
1969 source_ip->ifa_addr->sa_family == AF_INET) {
1970 break;
1971 }
1972 }
1973
1974 /* No IP Source, don't arp */
1975 if (source_ip == NULL) {
1976 ifnet_lock_done(ifp_list[ifp_on]);
1977 continue;
1978 }
1979
1980 /* Copy the source IP address */
1981 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
b0d623f7 1982 ifaref(source_hw);
91447636
A
1983 ifnet_lock_done(ifp_list[ifp_on]);
1984
1985 /* Send the ARP */
1986 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1987 (struct sockaddr_dl*)source_hw->ifa_addr,
1988 (struct sockaddr*)&source_ip_copy, NULL,
1989 target_proto);
b0d623f7
A
1990
1991 ifafree(source_hw);
91447636
A
1992 if (result == ENOTSUP) {
1993 result = new_result;
1994 }
1995 }
1996 }
1997
1998 ifnet_list_free(ifp_list);
1999 }
2000 else {
2001 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
2002 target_hw, target_proto);
2003 }
2004
2005 return result;
2006}
1c79356b 2007
2d21ac55 2008__private_extern__ int
91447636
A
2009ifp_use(
2010 struct ifnet *ifp,
2011 int handle_zero)
2012{
2013 int old_value;
2014 int retval = 0;
2015
2016 do {
2017 old_value = ifp->if_usecnt;
2018 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
2019 retval = ENXIO; // ifp is invalid
2020 break;
2021 }
2022 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
2023
2024 return retval;
2025}
1c79356b 2026
91447636
A
2027/* ifp_unuse is broken into two pieces.
2028 *
2029 * ifp_use and ifp_unuse must be called between when the caller calls
2030 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
2031 * operations after dlil_write_end has been called. For this reason,
2032 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
2033 * returns a non-zero value. The caller must call ifp_use_reached_zero
2034 * after the caller has called dlil_write_end.
2035 */
2d21ac55 2036__private_extern__ void
91447636
A
2037ifp_use_reached_zero(
2038 struct ifnet *ifp)
2039{
91447636
A
2040 ifnet_detached_func free_func;
2041
2042 dlil_read_begin();
2043
2044 if (ifp->if_usecnt != 0)
2045 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
2046
91447636
A
2047 ifnet_head_lock_exclusive();
2048 ifnet_lock_exclusive(ifp);
2049
2050 /* Remove ourselves from the list */
2051 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
2d21ac55 2052 ifnet_addrs[ifp->if_index - 1] = NULL;
91447636
A
2053
2054 /* ifp should be removed from the interface list */
2055 while (ifp->if_multiaddrs.lh_first) {
2056 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
2057
2058 /*
2059 * When the interface is gone, we will no longer
2060 * be listening on these multicasts. Various bits
2061 * of the stack may be referencing these multicasts,
2062 * release only our reference.
2063 */
2064 LIST_REMOVE(ifma, ifma_link);
2065 ifma->ifma_ifp = NULL;
2066 ifma_release(ifma);
2067 }
b0d623f7 2068
91447636
A
2069 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
2070 ifnet_lock_done(ifp);
b0d623f7 2071 ifnet_head_done();
1c79356b 2072
91447636
A
2073 free_func = ifp->if_free;
2074 dlil_read_end();
2d21ac55 2075 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, NULL, 0);
91447636
A
2076
2077 if (free_func)
2078 free_func(ifp);
2079}
1c79356b 2080
2d21ac55 2081__private_extern__ int
91447636
A
2082ifp_unuse(
2083 struct ifnet *ifp)
2084{
2085 int oldval;
b0d623f7 2086 oldval = OSDecrementAtomic(&ifp->if_usecnt);
91447636 2087 if (oldval == 0)
2d21ac55 2088 panic("ifp_unuse: ifp(%s%d)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
91447636
A
2089
2090 if (oldval > 1)
2091 return 0;
2092
2093 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
2094 panic("ifp_unuse: use count reached zero but detching flag is not set!");
2095
2096 return 1; /* caller must call ifp_use_reached_zero */
2097}
1c79356b 2098
91447636 2099extern lck_mtx_t *domain_proto_mtx;
1c79356b 2100
2d21ac55 2101static errno_t
91447636
A
2102dlil_attach_protocol_internal(
2103 struct if_proto *proto,
91447636
A
2104 const struct ifnet_demux_desc *demux_list,
2105 u_int32_t demux_count)
2106{
91447636
A
2107 struct kev_dl_proto_data ev_pr_data;
2108 struct ifnet *ifp = proto->ifp;
2109 int retval = 0;
b0d623f7 2110 u_int32_t hash_value = proto_hash_value(proto->protocol_family);
91447636
A
2111
2112 /* setup some of the common values */
91447636 2113 {
2d21ac55 2114 struct domain *dp;
91447636 2115 lck_mtx_lock(domain_proto_mtx);
2d21ac55 2116 dp = domains;
91447636
A
2117 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
2118 dp = dp->dom_next;
2119 proto->dl_domain = dp;
2120 lck_mtx_unlock(domain_proto_mtx);
2121 }
2122
91447636
A
2123 /*
2124 * Take the write lock to protect readers and exclude other writers.
2125 */
2d21ac55
A
2126 if ((retval = dlil_write_begin()) != 0) {
2127 printf("dlil_attach_protocol_internal - dlil_write_begin returned %d\n", retval);
2128 return retval;
2129 }
91447636
A
2130
2131 /* Check that the interface isn't currently detaching */
2132 ifnet_lock_shared(ifp);
2133 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2134 ifnet_lock_done(ifp);
2135 dlil_write_end();
91447636
A
2136 return ENXIO;
2137 }
2138 ifnet_lock_done(ifp);
2139
2140 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
2141 dlil_write_end();
91447636
A
2142 return EEXIST;
2143 }
2144
2145 /*
2146 * Call family module add_proto routine so it can refine the
2147 * demux descriptors as it wishes.
2148 */
2d21ac55 2149 retval = ifp->if_add_proto(ifp, proto->protocol_family, demux_list, demux_count);
91447636
A
2150 if (retval) {
2151 dlil_write_end();
91447636
A
2152 return retval;
2153 }
2154
2155 /*
2156 * We can't fail from this point on.
2157 * Increment the number of uses (protocol attachments + interface attached).
2158 */
2159 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
2160
2161 /*
2162 * Insert the protocol in the hash
2163 */
2164 {
2165 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
2166 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
2167 prev_proto = SLIST_NEXT(prev_proto, next_hash);
2168 if (prev_proto)
2169 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
2170 else
2171 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
2172 }
1c79356b 2173
91447636
A
2174 /*
2175 * Add to if_proto list for this interface
2176 */
2177 if_proto_ref(proto);
91447636
A
2178 dlil_write_end();
2179
2180 /* the reserved field carries the number of protocol still attached (subject to change) */
2181 ev_pr_data.proto_family = proto->protocol_family;
2182 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
2183 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
2184 (struct net_event_data *)&ev_pr_data,
2185 sizeof(struct kev_dl_proto_data));
2d21ac55
A
2186#if 0
2187 DLIL_PRINTF("dlil. Attached protocol %d to %s%d - %d\n", proto->protocol_family,
91447636 2188 ifp->if_name, ifp->if_unit, retval);
2d21ac55 2189#endif
91447636
A
2190 return retval;
2191}
0b4e3aa0 2192
2d21ac55
A
2193errno_t
2194ifnet_attach_protocol(ifnet_t ifp, protocol_family_t protocol,
91447636
A
2195 const struct ifnet_attach_proto_param *proto_details)
2196{
2197 int retval = 0;
2198 struct if_proto *ifproto = NULL;
2199
2d21ac55
A
2200 if (ifp == NULL || protocol == 0 || proto_details == NULL)
2201 return EINVAL;
2202
91447636
A
2203 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
2204 if (ifproto == 0) {
2d21ac55 2205 DLIL_PRINTF("ERROR - dlil failed if_proto allocation\n");
91447636
A
2206 retval = ENOMEM;
2207 goto end;
2208 }
2209 bzero(ifproto, sizeof(*ifproto));
2210
2211 ifproto->ifp = ifp;
2212 ifproto->protocol_family = protocol;
2213 ifproto->proto_kpi = kProtoKPI_v1;
2214 ifproto->kpi.v1.input = proto_details->input;
2215 ifproto->kpi.v1.pre_output = proto_details->pre_output;
2216 ifproto->kpi.v1.event = proto_details->event;
2217 ifproto->kpi.v1.ioctl = proto_details->ioctl;
2218 ifproto->kpi.v1.detached = proto_details->detached;
2219 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
2220 ifproto->kpi.v1.send_arp = proto_details->send_arp;
2221
2d21ac55 2222 retval = dlil_attach_protocol_internal(ifproto,
91447636
A
2223 proto_details->demux_list, proto_details->demux_count);
2224
9bccf70c 2225end:
91447636
A
2226 if (retval && ifproto)
2227 FREE(ifproto, M_IFADDR);
2228 return retval;
1c79356b
A
2229}
2230
2d21ac55
A
2231errno_t
2232ifnet_attach_protocol_v2(ifnet_t ifp, protocol_family_t protocol,
2233 const struct ifnet_attach_proto_param_v2 *proto_details)
91447636 2234{
2d21ac55 2235 int retval = 0;
91447636 2236 struct if_proto *ifproto = NULL;
91447636 2237
2d21ac55 2238 if (ifp == NULL || protocol == 0 || proto_details == NULL)
91447636 2239 return EINVAL;
2d21ac55 2240
91447636 2241 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
2d21ac55
A
2242 if (ifproto == 0) {
2243 DLIL_PRINTF("ERROR - dlil failed if_proto allocation\n");
91447636
A
2244 retval = ENOMEM;
2245 goto end;
2246 }
2d21ac55 2247 bzero(ifproto, sizeof(*ifproto));
91447636 2248
2d21ac55
A
2249 ifproto->ifp = ifp;
2250 ifproto->protocol_family = protocol;
2251 ifproto->proto_kpi = kProtoKPI_v2;
2252 ifproto->kpi.v2.input = proto_details->input;
2253 ifproto->kpi.v2.pre_output = proto_details->pre_output;
2254 ifproto->kpi.v2.event = proto_details->event;
2255 ifproto->kpi.v2.ioctl = proto_details->ioctl;
2256 ifproto->kpi.v2.detached = proto_details->detached;
2257 ifproto->kpi.v2.resolve_multi = proto_details->resolve;
2258 ifproto->kpi.v2.send_arp = proto_details->send_arp;
2259
2260 retval = dlil_attach_protocol_internal(ifproto,
2261 proto_details->demux_list, proto_details->demux_count);
91447636
A
2262
2263end:
2264 if (retval && ifproto)
2265 FREE(ifproto, M_IFADDR);
2266 return retval;
2267}
1c79356b 2268
91447636 2269extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1c79356b 2270
91447636
A
2271static int
2272dlil_detach_protocol_internal(
2273 struct if_proto *proto)
2274{
2275 struct ifnet *ifp = proto->ifp;
b0d623f7 2276 u_int32_t proto_family = proto->protocol_family;
91447636
A
2277 struct kev_dl_proto_data ev_pr_data;
2278
2d21ac55 2279 if (proto->proto_kpi == kProtoKPI_v1) {
91447636
A
2280 if (proto->kpi.v1.detached)
2281 proto->kpi.v1.detached(ifp, proto->protocol_family);
2282 }
2d21ac55
A
2283 if (proto->proto_kpi == kProtoKPI_v2) {
2284 if (proto->kpi.v2.detached)
2285 proto->kpi.v2.detached(ifp, proto->protocol_family);
2286 }
91447636
A
2287 if_proto_free(proto);
2288
2289 /*
2290 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
2291 */
2292
2293 if_rtproto_del(ifp, proto_family);
2294
2295 /* the reserved field carries the number of protocol still attached (subject to change) */
2296 ev_pr_data.proto_family = proto_family;
2297 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
2298 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
2299 (struct net_event_data *)&ev_pr_data,
2300 sizeof(struct kev_dl_proto_data));
2301 return 0;
2302}
1c79356b 2303
2d21ac55
A
2304errno_t
2305ifnet_detach_protocol(ifnet_t ifp, protocol_family_t proto_family)
91447636
A
2306{
2307 struct if_proto *proto = NULL;
2308 int retval = 0;
2309 int use_reached_zero = 0;
2310
2d21ac55 2311 if (ifp == NULL || proto_family == 0) return EINVAL;
1c79356b 2312
91447636 2313 if ((retval = dlil_write_begin()) != 0) {
91447636
A
2314 if (retval == EDEADLK) {
2315 retval = 0;
2316 dlil_read_begin();
2317 proto = find_attached_proto(ifp, proto_family);
2318 if (proto == 0) {
2319 retval = ENXIO;
2320 }
2321 else {
2322 proto->detaching = 1;
2323 dlil_detach_waiting = 1;
2324 wakeup(&dlil_detach_waiting);
2325 }
2326 dlil_read_end();
2327 }
2328 goto end;
2329 }
2330
2331 proto = find_attached_proto(ifp, proto_family);
2332
2333 if (proto == NULL) {
2334 retval = ENXIO;
2335 dlil_write_end();
2336 goto end;
2337 }
2338
2339 /*
2340 * Call family module del_proto
2341 */
2342
2343 if (ifp->if_del_proto)
2344 ifp->if_del_proto(ifp, proto->protocol_family);
1c79356b 2345
91447636
A
2346 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2347
2348 /*
2349 * We can do the rest of the work outside of the write lock.
2350 */
2351 use_reached_zero = ifp_unuse(ifp);
2352 dlil_write_end();
2353
2354 dlil_detach_protocol_internal(proto);
2355
2356 /*
2357 * Only handle the case where the interface will go away after
2358 * we've sent the message. This way post message can send the
2359 * message to the interface safely.
2360 */
2361
2362 if (use_reached_zero)
2363 ifp_use_reached_zero(ifp);
2364
2365end:
2366 return retval;
2367}
1c79356b 2368
91447636
A
2369/*
2370 * dlil_delayed_detach_thread is responsible for detaching
2371 * protocols, protocol filters, and interface filters after
2372 * an attempt was made to detach one of those items while
2373 * it was not safe to do so (i.e. called dlil_read_begin).
2374 *
2375 * This function will take the dlil write lock and walk
2376 * through each of the interfaces looking for items with
2377 * the detaching flag set. When an item is found, it is
2378 * detached from the interface and placed on a local list.
2379 * After all of the items have been collected, we drop the
2380 * write lock and performed the post detach. This is done
2381 * so we only have to take the write lock once.
2382 *
2383 * When detaching a protocol filter, if we find that we
2384 * have detached the very last protocol and we need to call
2385 * ifp_use_reached_zero, we have to break out of our work
2386 * to drop the write lock so we can call ifp_use_reached_zero.
2387 */
2388
2389static void
2390dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2391{
2392 thread_t self = current_thread();
2393 int asserted = 0;
0b4e3aa0 2394
91447636
A
2395 ml_thread_policy(self, MACHINE_GROUP,
2396 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
9bccf70c 2397
91447636
A
2398
2399 while (1) {
2400 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2401 struct ifnet *ifp;
2402 struct proto_hash_entry detached_protos;
2403 struct ifnet_filter_head detached_filters;
2404 struct if_proto *proto;
2405 struct if_proto *next_proto;
2406 struct ifnet_filter *filt;
2407 struct ifnet_filter *next_filt;
2408 int reached_zero;
2409
2410 reached_zero = 0;
2411
2412 /* Clear the detach waiting flag */
2413 dlil_detach_waiting = 0;
2414 TAILQ_INIT(&detached_filters);
2415 SLIST_INIT(&detached_protos);
2416
2417 ifnet_head_lock_shared();
2418 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2419 int i;
2420
2421 // Look for protocols and protocol filters
2422 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2423 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2424 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2425
2426 // Detach this protocol
2427 if (proto->detaching) {
2428 if (ifp->if_del_proto)
2429 ifp->if_del_proto(ifp, proto->protocol_family);
91447636
A
2430 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2431 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2432 reached_zero = ifp_unuse(ifp);
2433 if (reached_zero) {
2434 break;
2435 }
2436 }
2437 else {
2438 // Update prev_nextptr to point to our next ptr
2439 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2440 }
2441 }
2442 }
2443
2444 // look for interface filters that need to be detached
2445 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2446 next_filt = TAILQ_NEXT(filt, filt_next);
2447 if (filt->filt_detaching != 0) {
2448 // take this interface filter off the interface filter list
2449 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2450
2451 // put this interface filter on the detached filters list
2452 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2453 }
2454 }
2455
2456 if (ifp->if_delayed_detach) {
2457 ifp->if_delayed_detach = 0;
2458 reached_zero = ifp_unuse(ifp);
2459 }
2460
2461 if (reached_zero)
2462 break;
2463 }
2464 ifnet_head_done();
2465 dlil_write_end();
2466
2467 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2468 next_filt = TAILQ_NEXT(filt, filt_next);
2469 /*
2470 * dlil_detach_filter_internal won't remove an item from
2471 * the list if it is already detached (second parameter).
2472 * The item will be freed though.
2473 */
2474 dlil_detach_filter_internal(filt, 1);
2475 }
2476
2477 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2478 next_proto = SLIST_NEXT(proto, next_hash);
2479 dlil_detach_protocol_internal(proto);
2480 }
2481
2482 if (reached_zero) {
2483 ifp_use_reached_zero(ifp);
2484 dlil_detach_waiting = 1; // we may have missed something
2485 }
2486 }
2487
2488 if (!asserted && dlil_detach_waiting == 0) {
2489 asserted = 1;
2490 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2491 }
2492
2493 if (dlil_detach_waiting == 0) {
2494 asserted = 0;
2495 thread_block(dlil_delayed_detach_thread);
2496 }
2497 }
2498}
9bccf70c 2499
91447636
A
2500static void
2501dlil_call_delayed_detach_thread(void) {
2502 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2503}
9bccf70c 2504
91447636
A
2505extern int if_next_index(void);
2506
2d21ac55
A
2507errno_t
2508ifnet_attach(
2509 ifnet_t ifp,
91447636
A
2510 const struct sockaddr_dl *ll_addr)
2511{
b0d623f7 2512 u_int32_t interface_family;
91447636
A
2513 struct ifnet *tmp_if;
2514 struct proto_hash_entry *new_proto_list = NULL;
2515 int locked = 0;
2516
2d21ac55
A
2517 if (ifp == NULL) return EINVAL;
2518 if (ll_addr && ifp->if_addrlen == 0) {
2519 ifp->if_addrlen = ll_addr->sdl_alen;
2520 }
2521 else if (ll_addr && ll_addr->sdl_alen != ifp->if_addrlen) {
2522 return EINVAL;
2523 }
2524
2525 interface_family = ifp->if_family;
91447636
A
2526
2527 ifnet_head_lock_shared();
1c79356b 2528
91447636
A
2529 /* Verify we aren't already on the list */
2530 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2531 if (tmp_if == ifp) {
2532 ifnet_head_done();
2533 return EEXIST;
2534 }
2535 }
2536
2537 ifnet_head_done();
2538
2539 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2540#if IFNET_RW_LOCK
2541 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2542#else
2543 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2544#endif
0b4e3aa0 2545
91447636
A
2546 if (ifp->if_lock == 0) {
2547 return ENOMEM;
2548 }
1c79356b 2549
b0d623f7
A
2550 if (!(ifp->if_eflags & IFEF_REUSE) || ifp->if_fwd_route_lock == NULL) {
2551 if (ifp->if_fwd_route_lock == NULL)
2552 ifp->if_fwd_route_lock = lck_mtx_alloc_init(
2553 ifnet_lock_group, ifnet_lock_attr);
2554
2555 if (ifp->if_fwd_route_lock == NULL) {
2556#if IFNET_RW_LOCK
2557 lck_rw_free(ifp->if_lock, ifnet_lock_group);
2558#else
2559 lck_mtx_free(ifp->if_lock, ifnet_lock_group);
2560#endif
2561 ifp->if_lock = NULL;
2562 return (ENOMEM);
2563 }
2564 }
2565
91447636 2566 /*
b0d623f7 2567 * Allow interfaces without protocol families to attach
91447636
A
2568 * only if they have the necessary fields filled out.
2569 */
2570
2d21ac55 2571 if (ifp->if_add_proto == 0 || ifp->if_del_proto == 0) {
b0d623f7 2572 DLIL_PRINTF("dlil Attempt to attach interface without family module - %d\n",
91447636
A
2573 interface_family);
2574 return ENODEV;
2575 }
2576
2577 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2578 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2579 M_NKE, M_WAITOK);
1c79356b 2580
91447636
A
2581 if (new_proto_list == 0) {
2582 return ENOBUFS;
2583 }
1c79356b
A
2584 }
2585
91447636
A
2586 dlil_write_begin();
2587 locked = 1;
2588
91447636
A
2589 TAILQ_INIT(&ifp->if_flt_head);
2590
2591
2592 if (new_proto_list) {
2593 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2594 ifp->if_proto_hash = new_proto_list;
2d21ac55 2595 new_proto_list = NULL;
91447636
A
2596 }
2597
2598 /* old_if_attach */
2599 {
2d21ac55
A
2600 char workbuf[64];
2601 int namelen, masklen, socksize, ifasize;
2602 struct ifaddr *ifa = NULL;
91447636
A
2603
2604 if (ifp->if_snd.ifq_maxlen == 0)
2605 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2606 TAILQ_INIT(&ifp->if_prefixhead);
2607 LIST_INIT(&ifp->if_multiaddrs);
2608 ifnet_touch_lastchange(ifp);
2609
2610 /* usecount to track attachment to the ifnet list */
2611 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2612
2613 /* Lock the list of interfaces */
2614 ifnet_head_lock_exclusive();
2615 ifnet_lock_exclusive(ifp);
2616
b0d623f7
A
2617 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2618 int idx = if_next_index();
2619
2620 if (idx == -1) {
2621 ifnet_lock_done(ifp);
2622 ifnet_head_done();
2623 ifp_unuse(ifp);
2624 dlil_write_end();
2625
2626 return ENOBUFS;
2627 }
2628 ifp->if_index = idx;
2629 } else {
2d21ac55 2630 ifa = TAILQ_FIRST(&ifp->if_addrhead);
b0d623f7 2631 }
2d21ac55 2632 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
b0d623f7 2633#define _offsetof(t, m) ((uintptr_t)((caddr_t)&((t *)0)->m))
2d21ac55
A
2634 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2635 socksize = masklen + ifp->if_addrlen;
7e4a7d39 2636#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(u_int32_t) - 1)))
b0d623f7 2637 if ((u_int32_t)socksize < sizeof(struct sockaddr_dl))
2d21ac55
A
2638 socksize = sizeof(struct sockaddr_dl);
2639 socksize = ROUNDUP(socksize);
2640 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2641
2642 /*
2643 * Allocate a new ifa if we don't have one
2644 * or the old one is too small.
2645 */
2646 if (ifa == NULL || socksize > ifa->ifa_addr->sa_len) {
2647 if (ifa)
2648 if_detach_ifa(ifp, ifa);
91447636 2649 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
91447636
A
2650 }
2651
2d21ac55
A
2652 if (ifa) {
2653 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2654 ifnet_addrs[ifp->if_index - 1] = ifa;
2655 bzero(ifa, ifasize);
b0d623f7 2656 ifa->ifa_debug |= IFD_ALLOC;
2d21ac55
A
2657 sdl->sdl_len = socksize;
2658 sdl->sdl_family = AF_LINK;
2659 bcopy(workbuf, sdl->sdl_data, namelen);
2660 sdl->sdl_nlen = namelen;
2661 sdl->sdl_index = ifp->if_index;
2662 sdl->sdl_type = ifp->if_type;
2663 if (ll_addr) {
2664 sdl->sdl_alen = ll_addr->sdl_alen;
2665 if (ll_addr->sdl_alen != ifp->if_addrlen)
2666 panic("ifnet_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2667 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2668 }
2669 ifa->ifa_ifp = ifp;
2670 ifa->ifa_rtrequest = link_rtrequest;
2671 ifa->ifa_addr = (struct sockaddr*)sdl;
2672 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2673 ifa->ifa_netmask = (struct sockaddr*)sdl;
2674 sdl->sdl_len = masklen;
2675 while (namelen != 0)
2676 sdl->sdl_data[--namelen] = 0xff;
2677 }
1c79356b 2678
91447636
A
2679 TAILQ_INIT(&ifp->if_addrhead);
2680 ifa = ifnet_addrs[ifp->if_index - 1];
2681
2682 if (ifa) {
2683 /*
2684 * We don't use if_attach_ifa because we want
2685 * this address to be first on the list.
2686 */
2687 ifaref(ifa);
b0d623f7 2688 ifa->ifa_debug |= IFD_ATTACHED;
91447636 2689 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
1c79356b 2690 }
2d21ac55
A
2691#if CONFIG_MACF_NET
2692 mac_ifnet_label_associate(ifp);
2693#endif
91447636
A
2694
2695 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2696 ifindex2ifnet[ifp->if_index] = ifp;
1c79356b 2697 }
2d21ac55
A
2698
2699 /*
b0d623f7 2700 * A specific dlil input thread is created per Ethernet/PDP interface.
2d21ac55
A
2701 * pseudo interfaces or other types of interfaces use the main ("loopback") thread.
2702 * If the sysctl "net.link.generic.system.multi_threaded_input" is set to zero, all packets will
2703 * be handled by the main loopback thread, reverting to 10.4.x behaviour.
2704 *
2705 */
2706
b0d623f7 2707 if (ifp->if_type == IFT_ETHER || ifp->if_type == IFT_PDP) {
2d21ac55
A
2708 int err;
2709
2710 if (dlil_multithreaded_input > 0) {
2711 ifp->if_input_thread = _MALLOC(sizeof(struct dlil_threading_info), M_NKE, M_WAITOK);
2712 if (ifp->if_input_thread == NULL)
2713 panic("ifnet_attach ifp=%p couldn't alloc threading\n", ifp);
2714 if ((err = dlil_create_input_thread(ifp, ifp->if_input_thread)) != 0)
b0d623f7 2715 panic("ifnet_attach ifp=%p couldn't get a thread. err=%d\n", ifp, err);
2d21ac55 2716#ifdef DLIL_DEBUG
b0d623f7 2717 printf("ifnet_attach: dlil thread for ifp=%p if_index=%d\n", ifp, ifp->if_index);
2d21ac55 2718#endif
91447636
A
2719 }
2720 }
0c530ab8 2721 ifnet_lock_done(ifp);
b0d623f7
A
2722 ifnet_head_done();
2723#if PF
2724 /*
2725 * Attach packet filter to this interface, if enaled.
2726 */
2727 pf_ifnet_hook(ifp, 1);
2728#endif /* PF */
2729 dlil_write_end();
2730
d1ecb069
A
2731#if IFNET_ROUTE_REFCNT
2732 if (net_rtref) {
2733 (void) ifnet_set_idle_flags(ifp, IFRF_IDLE_NOTIFY,
2734 IFRF_IDLE_NOTIFY);
2735 }
2736#endif /* IFNET_ROUTE_REFCNT */
2737
2d21ac55 2738 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, NULL, 0);
1c79356b 2739
91447636 2740 return 0;
1c79356b
A
2741}
2742
2d21ac55
A
2743errno_t
2744ifnet_detach(
2745 ifnet_t ifp)
1c79356b 2746{
91447636
A
2747 struct ifnet_filter *filter;
2748 struct ifnet_filter *filter_next;
2749 int zeroed = 0;
2750 int retval = 0;
2751 struct ifnet_filter_head fhead;
2d21ac55 2752 struct dlil_threading_info *inputthread;
55e303ae 2753
2d21ac55 2754 if (ifp == NULL) return EINVAL;
55e303ae 2755
91447636 2756 ifnet_lock_exclusive(ifp);
55e303ae 2757
91447636
A
2758 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2759 /* Interface has already been detached */
2760 ifnet_lock_done(ifp);
2761 return ENXIO;
55e303ae
A
2762 }
2763
91447636
A
2764 /*
2765 * Indicate this interface is being detached.
2766 *
2767 * This should prevent protocols from attaching
2768 * from this point on. Interface will remain on
2769 * the list until all of the protocols are detached.
2770 */
2771 ifp->if_eflags |= IFEF_DETACHING;
2772 ifnet_lock_done(ifp);
55e303ae 2773
2d21ac55
A
2774 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, NULL, 0);
2775
2776 /* Let BPF know we're detaching */
2777 bpfdetach(ifp);
55e303ae 2778
d1ecb069
A
2779#if IFNET_ROUTE_REFCNT
2780 /*
2781 * Check to see if this interface has previously triggered
2782 * aggressive protocol draining; if so, decrement the global
2783 * refcnt and clear PR_AGGDRAIN on the route domain if
2784 * there are no more of such an interface around.
2785 */
2786 if (ifp->if_want_aggressive_drain != 0)
2787 (void) ifnet_set_idle_flags(ifp, 0, ~0);
2788#endif /* IFNET_ROUTE_REFCNT */
2789
91447636
A
2790 if ((retval = dlil_write_begin()) != 0) {
2791 if (retval == EDEADLK) {
2d21ac55 2792 retval = 0;
91447636
A
2793
2794 /* We need to perform a delayed detach */
2795 ifp->if_delayed_detach = 1;
2796 dlil_detach_waiting = 1;
2797 wakeup(&dlil_detach_waiting);
2798 }
2799 return retval;
55e303ae 2800 }
b0d623f7
A
2801
2802#if PF
2803 /*
2804 * Detach this interface from packet filter, if enabled.
2805 */
2806 pf_ifnet_hook(ifp, 0);
2807#endif /* PF */
2808
91447636
A
2809 /* Steal the list of interface filters */
2810 fhead = ifp->if_flt_head;
2811 TAILQ_INIT(&ifp->if_flt_head);
2d21ac55 2812
91447636
A
2813 /* unuse the interface */
2814 zeroed = ifp_unuse(ifp);
1c79356b 2815
2d21ac55
A
2816 /*
2817 * If thread affinity was set for the workloop thread, we will need
2818 * to tear down the affinity and release the extra reference count
2819 * taken at attach time;
2820 */
2821 if ((inputthread = ifp->if_input_thread) != NULL) {
2822 if (inputthread->net_affinity) {
2823 struct thread *tp;
2824
2825 if (inputthread == dlil_lo_thread_ptr)
2826 panic("Thread affinity should not be enabled "
2827 "on the loopback dlil input thread\n");
2828
2829 lck_mtx_lock(inputthread->input_lck);
2830 tp = inputthread->workloop_thread;
2831 inputthread->workloop_thread = NULL;
2832 inputthread->tag = 0;
2833 inputthread->net_affinity = FALSE;
2834 lck_mtx_unlock(inputthread->input_lck);
2835
2836 /* Tear down workloop thread affinity */
2837 if (tp != NULL) {
2838 (void) dlil_affinity_set(tp,
2839 THREAD_AFFINITY_TAG_NULL);
2840 thread_deallocate(tp);
2841 }
1c79356b 2842
2d21ac55
A
2843 /* Tear down dlil input thread affinity */
2844 tp = inputthread->input_thread;
2845 (void) dlil_affinity_set(tp, THREAD_AFFINITY_TAG_NULL);
2846 thread_deallocate(tp);
9bccf70c 2847 }
1c79356b 2848
2d21ac55
A
2849 /* cleanup ifp dlil input thread, if any */
2850 ifp->if_input_thread = NULL;
55e303ae 2851
2d21ac55
A
2852 if (inputthread != dlil_lo_thread_ptr) {
2853#ifdef DLIL_DEBUG
2854 printf("ifnet_detach: wakeup thread threadinfo: %p "
2855 "input_thread=%p threads: cur=%d max=%d\n",
2856 inputthread, inputthread->input_thread,
2857 dlil_multithreaded_input, cur_dlil_input_threads);
2858#endif
2859 lck_mtx_lock(inputthread->input_lck);
55e303ae 2860
2d21ac55
A
2861 inputthread->input_waiting |= DLIL_INPUT_TERMINATE;
2862 if ((inputthread->input_waiting & DLIL_INPUT_RUNNING) == 0) {
2863 wakeup((caddr_t)&inputthread->input_waiting);
2864 }
2865 lck_mtx_unlock(inputthread->input_lck);
91447636 2866 }
55e303ae 2867 }
b0d623f7
A
2868 /* last chance to clean up IPv4 forwarding cached route */
2869 lck_mtx_lock(ifp->if_fwd_route_lock);
2870 if (ifp->if_fwd_route.ro_rt != NULL) {
2871 rtfree(ifp->if_fwd_route.ro_rt);
2872 ifp->if_fwd_route.ro_rt = NULL;
2873 }
2874 lck_mtx_unlock(ifp->if_fwd_route_lock);
2d21ac55
A
2875 dlil_write_end();
2876
2877 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2878 filter_next = TAILQ_NEXT(filter, filt_next);
2879 dlil_detach_filter_internal(filter, 1);
55e303ae 2880 }
91447636 2881
2d21ac55
A
2882 if (zeroed != 0) {
2883 ifp_use_reached_zero(ifp);
55e303ae 2884 }
2d21ac55
A
2885
2886 return retval;
1c79356b 2887}
9bccf70c 2888
91447636
A
2889static errno_t
2890dlil_recycle_ioctl(
2891 __unused ifnet_t ifnet_ptr,
b0d623f7 2892 __unused u_long ioctl_code,
91447636 2893 __unused void *ioctl_arg)
9bccf70c 2894{
9bccf70c
A
2895 return EOPNOTSUPP;
2896}
2897
91447636
A
2898static int
2899dlil_recycle_output(
2900 __unused struct ifnet *ifnet_ptr,
2901 struct mbuf *m)
9bccf70c 2902{
9bccf70c
A
2903 m_freem(m);
2904 return 0;
2905}
2906
91447636
A
2907static void
2908dlil_recycle_free(
2909 __unused ifnet_t ifnet_ptr)
9bccf70c 2910{
9bccf70c
A
2911}
2912
91447636
A
2913static errno_t
2914dlil_recycle_set_bpf_tap(
2915 __unused ifnet_t ifp,
2916 __unused bpf_tap_mode mode,
2917 __unused bpf_packet_func callback)
9bccf70c
A
2918{
2919 /* XXX not sure what to do here */
2920 return 0;
2921}
2922
2d21ac55 2923__private_extern__
91447636 2924int dlil_if_acquire(
b0d623f7 2925 u_int32_t family,
91447636
A
2926 const void *uniqueid,
2927 size_t uniqueid_len,
2928 struct ifnet **ifp)
9bccf70c
A
2929{
2930 struct ifnet *ifp1 = NULL;
2931 struct dlil_ifnet *dlifp1 = NULL;
91447636 2932 int ret = 0;
9bccf70c 2933
91447636 2934 lck_mtx_lock(dlil_ifnet_mutex);
9bccf70c
A
2935 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2936
2937 ifp1 = (struct ifnet *)dlifp1;
2938
2939 if (ifp1->if_family == family) {
2940
2941 /* same uniqueid and same len or no unique id specified */
2942 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2943 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2944
2945 /* check for matching interface in use */
2946 if (ifp1->if_eflags & IFEF_INUSE) {
2947 if (uniqueid_len) {
2948 ret = EBUSY;
2949 goto end;
2950 }
2951 }
2952 else {
91447636
A
2953 if (!ifp1->if_lock)
2954 panic("ifp's lock is gone\n");
2955 ifnet_lock_exclusive(ifp1);
2956 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2957 ifnet_lock_done(ifp1);
9bccf70c
A
2958 *ifp = ifp1;
2959 goto end;
2960 }
2961 }
2962 }
2963 }
2964
2965 /* no interface found, allocate a new one */
2966 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2967 if (dlifp1 == 0) {
2968 ret = ENOMEM;
2969 goto end;
2970 }
2971
2972 bzero(dlifp1, sizeof(*dlifp1));
2973
2974 if (uniqueid_len) {
2975 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2976 if (dlifp1->if_uniqueid == 0) {
2977 FREE(dlifp1, M_NKE);
2978 ret = ENOMEM;
2979 goto end;
2980 }
2981 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2982 dlifp1->if_uniqueid_len = uniqueid_len;
2983 }
2984
2985 ifp1 = (struct ifnet *)dlifp1;
2986 ifp1->if_eflags |= IFEF_INUSE;
91447636 2987 ifp1->if_name = dlifp1->if_namestorage;
2d21ac55
A
2988#if CONFIG_MACF_NET
2989 mac_ifnet_label_init(ifp1);
2990#endif
9bccf70c
A
2991
2992 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2993
2994 *ifp = ifp1;
2995
2996end:
91447636 2997 lck_mtx_unlock(dlil_ifnet_mutex);
9bccf70c 2998
9bccf70c
A
2999 return ret;
3000}
3001
2d21ac55
A
3002__private_extern__ void
3003dlil_if_release(
3004 ifnet_t ifp)
9bccf70c
A
3005{
3006 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
9bccf70c 3007
91447636
A
3008 /* Interface does not have a lock until it is attached - radar 3713951 */
3009 if (ifp->if_lock)
3010 ifnet_lock_exclusive(ifp);
9bccf70c
A
3011 ifp->if_eflags &= ~IFEF_INUSE;
3012 ifp->if_ioctl = dlil_recycle_ioctl;
3013 ifp->if_output = dlil_recycle_output;
3014 ifp->if_free = dlil_recycle_free;
3015 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
3016
3017 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
3018 ifp->if_name = dlifp->if_namestorage;
2d21ac55
A
3019#if CONFIG_MACF_NET
3020 /*
3021 * We can either recycle the MAC label here or in dlil_if_acquire().
3022 * It seems logical to do it here but this means that anything that
3023 * still has a handle on ifp will now see it as unlabeled.
3024 * Since the interface is "dead" that may be OK. Revisit later.
3025 */
3026 mac_ifnet_label_recycle(ifp);
3027#endif
91447636
A
3028 if (ifp->if_lock)
3029 ifnet_lock_done(ifp);
9bccf70c 3030
9bccf70c 3031}
4a3eedf9
A
3032
3033__private_extern__ void
3034dlil_proto_unplumb_all(struct ifnet *ifp)
3035{
3036 /*
3037 * if_proto_hash[0-3] are for PF_INET, PF_INET6, PF_APPLETALK
3038 * and PF_VLAN, where each bucket contains exactly one entry;
3039 * PF_VLAN does not need an explicit unplumb.
3040 *
3041 * if_proto_hash[4] is for other protocols; we expect anything
3042 * in this bucket to respond to the DETACHING event (which would
3043 * have happened by now) and do the unplumb then.
3044 */
3045 (void) proto_unplumb(PF_INET, ifp);
3046#if INET6
3047 (void) proto_unplumb(PF_INET6, ifp);
3048#endif /* INET6 */
3049#if NETAT
3050 (void) proto_unplumb(PF_APPLETALK, ifp);
3051#endif /* NETAT */
3052}