]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/dlil.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
CommitLineData
1c79356b 1/*
5d5c5d0d
A
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1999 Apple Computer, Inc.
30 *
31 * Data Link Inteface Layer
32 * Author: Ted Walker
33 */
34
1c79356b
A
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/mbuf.h>
40#include <sys/socket.h>
91447636
A
41#include <sys/domain.h>
42#include <sys/user.h>
1c79356b
A
43#include <net/if_dl.h>
44#include <net/if.h>
91447636 45#include <net/route.h>
1c79356b
A
46#include <net/if_var.h>
47#include <net/dlil.h>
91447636 48#include <net/if_arp.h>
1c79356b
A
49#include <sys/kern_event.h>
50#include <sys/kdebug.h>
1c79356b 51
91447636 52#include <kern/assert.h>
1c79356b 53#include <kern/task.h>
9bccf70c
A
54#include <kern/thread.h>
55#include <kern/sched_prim.h>
91447636 56#include <kern/locks.h>
9bccf70c 57
1c79356b 58#include <net/if_types.h>
91447636
A
59#include <net/kpi_interfacefilter.h>
60
61#include <libkern/OSAtomic.h>
1c79356b 62
d52fe63f 63#include <machine/machine_routines.h>
1c79356b
A
64
65#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
66#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
67#define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
68#define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
69#define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
70
71
9bccf70c
A
72#define MAX_DL_TAGS 16
73#define MAX_DLIL_FILTERS 16
1c79356b
A
74#define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
75#define MAX_LINKADDR 4 /* LONGWORDS */
76#define M_NKE M_IFADDR
77
78#define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
79#define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
80
91447636
A
81#if 0
82#define DLIL_PRINTF printf
83#else
84#define DLIL_PRINTF kprintf
85#endif
86
91447636
A
87enum {
88 kProtoKPI_DLIL = 0,
89 kProtoKPI_v1 = 1
90};
91
92struct if_proto {
93 SLIST_ENTRY(if_proto) next_hash;
94 int refcount;
95 int detaching;
96 struct ifnet *ifp;
97 struct domain *dl_domain;
98 protocol_family_t protocol_family;
99 int proto_kpi;
100 union {
101 struct {
102 dl_input_func dl_input;
103 dl_pre_output_func dl_pre_output;
104 dl_event_func dl_event;
105 dl_offer_func dl_offer;
106 dl_ioctl_func dl_ioctl;
107 dl_detached_func dl_detached;
108 } dlil;
109 struct {
110 proto_media_input input;
111 proto_media_preout pre_output;
112 proto_media_event event;
113 proto_media_ioctl ioctl;
114 proto_media_detached detached;
115 proto_media_resolve_multi resolve_multi;
116 proto_media_send_arp send_arp;
117 } v1;
118 } kpi;
1c79356b
A
119};
120
91447636
A
121SLIST_HEAD(proto_hash_entry, if_proto);
122
1c79356b 123
9bccf70c
A
124struct dlil_ifnet {
125 /* ifnet and drvr_ext are used by the stack and drivers
126 drvr_ext extends the public ifnet and must follow dl_if */
127 struct ifnet dl_if; /* public ifnet */
9bccf70c
A
128
129 /* dlil private fields */
130 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
131 /* it is not the ifnet list */
132 void *if_uniqueid; /* unique id identifying the interface */
133 size_t if_uniqueid_len;/* length of the unique id */
91447636 134 char if_namestorage[IFNAMSIZ]; /* interface name storage */
1c79356b
A
135};
136
91447636
A
137struct ifnet_filter {
138 TAILQ_ENTRY(ifnet_filter) filt_next;
139 ifnet_t filt_ifp;
140 int filt_detaching;
141
142 const char *filt_name;
143 void *filt_cookie;
144 protocol_family_t filt_protocol;
145 iff_input_func filt_input;
146 iff_output_func filt_output;
147 iff_event_func filt_event;
148 iff_ioctl_func filt_ioctl;
149 iff_detached_func filt_detached;
1c79356b
A
150};
151
1c79356b
A
152struct if_family_str {
153 TAILQ_ENTRY(if_family_str) if_fam_next;
154 u_long if_family;
155 int refcnt;
156 int flags;
157
158#define DLIL_SHUTDOWN 1
159
160 int (*add_if)(struct ifnet *ifp);
161 int (*del_if)(struct ifnet *ifp);
9bccf70c 162 int (*init_if)(struct ifnet *ifp);
91447636
A
163 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
164 ifnet_del_proto_func del_proto;
165 ifnet_ioctl_func ifmod_ioctl;
166 int (*shutdown)(void);
1c79356b
A
167};
168
55e303ae
A
169struct proto_family_str {
170 TAILQ_ENTRY(proto_family_str) proto_fam_next;
171 u_long proto_family;
172 u_long if_family;
91447636 173 int usecnt;
55e303ae 174
91447636
A
175 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
176 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
55e303ae
A
177};
178
91447636
A
179enum {
180 kIfNetUseCount_MayBeZero = 0,
181 kIfNetUseCount_MustNotBeZero = 1
182};
55e303ae 183
91447636
A
184static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
185static TAILQ_HEAD(, if_family_str) if_family_head;
186static TAILQ_HEAD(, proto_family_str) proto_family_head;
187static lck_grp_t *dlil_lock_group;
188static lck_grp_t *ifnet_lock_group;
189static lck_grp_t *ifnet_head_lock_group;
190static lck_attr_t *ifnet_lock_attr;
191static lck_mtx_t *proto_family_mutex;
192static lck_rw_t *ifnet_head_mutex;
193static lck_mtx_t *dlil_ifnet_mutex;
194static lck_mtx_t *dlil_mutex;
195static unsigned long dlil_read_count = 0;
196static unsigned long dlil_detach_waiting = 0;
197extern u_int32_t ipv4_ll_arp_aware;
1c79356b
A
198
199int dlil_initialized = 0;
91447636
A
200lck_spin_t *dlil_input_lock;
201__private_extern__ thread_t dlil_input_thread_ptr = 0;
1c79356b 202int dlil_input_thread_wakeup = 0;
91447636 203__private_extern__ int dlil_output_thread_wakeup = 0;
1c79356b
A
204static struct mbuf *dlil_input_mbuf_head = NULL;
205static struct mbuf *dlil_input_mbuf_tail = NULL;
9bccf70c
A
206#if NLOOP > 1
207#error dlil_input() needs to be revised to support more than on loopback interface
208#endif
1c79356b
A
209static struct mbuf *dlil_input_loop_head = NULL;
210static struct mbuf *dlil_input_loop_tail = NULL;
211
212static void dlil_input_thread(void);
91447636
A
213static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
214struct ifnet *ifbyfamily(u_long family, short unit);
215static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
216static void dlil_call_delayed_detach_thread(void);
217
218static void dlil_read_begin(void);
219static void dlil_read_end(void);
220static int dlil_write_begin(void);
221static void dlil_write_end(void);
222
223static int ifp_use(struct ifnet *ifp, int handle_zero);
224static int ifp_unuse(struct ifnet *ifp);
225static void ifp_use_reached_zero(struct ifnet *ifp);
226
9bccf70c 227extern void bpfdetach(struct ifnet*);
91447636
A
228extern void proto_input_run(void); // new run_netisr
229
230
231int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
232
233__private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
1c79356b 234
55e303ae 235int dlil_expand_mcl;
1c79356b 236
b36670ce
A
237extern u_int32_t inject_buckets;
238
91447636
A
239static const u_int32_t dlil_writer_waiting = 0x80000000;
240
241static __inline__ void*
242_cast_non_const(const void * ptr) {
243 union {
244 const void* cval;
245 void* val;
246 } ret;
247
248 ret.cval = ptr;
249 return (ret.val);
250}
251
252/* Should these be inline? */
253static void
254dlil_read_begin(void)
255{
256 unsigned long new_value;
257 unsigned long old_value;
258 struct uthread *uth = get_bsdthread_info(current_thread());
259
260 if (uth->dlil_incremented_read == dlil_writer_waiting)
261 panic("dlil_read_begin - thread is already a writer");
262
263 do {
264again:
265 old_value = dlil_read_count;
266
267 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
268 {
269 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
270 goto again;
271 }
272
273 new_value = old_value + 1;
274 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
275
276 uth->dlil_incremented_read++;
277}
278
279static void
280dlil_read_end(void)
281{
282 struct uthread *uth = get_bsdthread_info(current_thread());
283
284 OSDecrementAtomic((UInt32*)&dlil_read_count);
285 uth->dlil_incremented_read--;
286 if (dlil_read_count == dlil_writer_waiting)
287 wakeup(_cast_non_const(&dlil_writer_waiting));
288}
289
290static int
291dlil_write_begin(void)
292{
293 struct uthread *uth = get_bsdthread_info(current_thread());
294
295 if (uth->dlil_incremented_read != 0) {
296 return EDEADLK;
297 }
298 lck_mtx_lock(dlil_mutex);
299 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
300again:
301 if (dlil_read_count == dlil_writer_waiting) {
302 uth->dlil_incremented_read = dlil_writer_waiting;
303 return 0;
304 }
305 else {
306 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
307 goto again;
308 }
309}
310
311static void
312dlil_write_end(void)
313{
314 struct uthread *uth = get_bsdthread_info(current_thread());
315
316 if (uth->dlil_incremented_read != dlil_writer_waiting)
317 panic("dlil_write_end - thread is not a writer");
318 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
319 lck_mtx_unlock(dlil_mutex);
320 uth->dlil_incremented_read = 0;
321 wakeup(&dlil_read_count);
322}
323
324#define PROTO_HASH_SLOTS 0x5
325
1c79356b
A
326/*
327 * Internal functions.
328 */
329
91447636
A
330static int
331proto_hash_value(u_long protocol_family)
332{
333 switch(protocol_family) {
334 case PF_INET:
335 return 0;
336 case PF_INET6:
337 return 1;
338 case PF_APPLETALK:
339 return 2;
340 case PF_VLAN:
341 return 3;
342 default:
343 return 4;
344 }
345}
346
1c79356b
A
347static
348struct if_family_str *find_family_module(u_long if_family)
349{
350 struct if_family_str *mod = NULL;
351
352 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
353 if (mod->if_family == (if_family & 0xffff))
354 break;
355 }
356
357 return mod;
358}
359
55e303ae 360static
91447636
A
361struct proto_family_str*
362find_proto_module(u_long proto_family, u_long if_family)
55e303ae
A
363{
364 struct proto_family_str *mod = NULL;
365
366 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
367 if ((mod->proto_family == (proto_family & 0xffff))
368 && (mod->if_family == (if_family & 0xffff)))
369 break;
370 }
371
372 return mod;
373}
374
91447636
A
375static struct if_proto*
376find_attached_proto(struct ifnet *ifp, u_long protocol_family)
1c79356b 377{
91447636
A
378 struct if_proto *proto = NULL;
379 u_long i = proto_hash_value(protocol_family);
380 if (ifp->if_proto_hash) {
381 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
382 }
383
384 while(proto && proto->protocol_family != protocol_family) {
385 proto = SLIST_NEXT(proto, next_hash);
386 }
387
388 return proto;
1c79356b
A
389}
390
91447636
A
391static void
392if_proto_ref(struct if_proto *proto)
1c79356b 393{
91447636 394 OSAddAtomic(1, (UInt32*)&proto->refcount);
1c79356b
A
395}
396
91447636
A
397static void
398if_proto_free(struct if_proto *proto)
0b4e3aa0 399{
91447636
A
400 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
401
402 if (oldval == 1) { /* This was the last reference */
403 FREE(proto, M_IFADDR);
404 }
0b4e3aa0
A
405}
406
91447636
A
407__private_extern__ void
408ifnet_lock_assert(
409 __unused struct ifnet *ifp,
410 __unused int what)
1c79356b 411{
91447636
A
412#if IFNET_RW_LOCK
413 /*
414 * Not implemented for rw locks.
415 *
416 * Function exists so when/if we use mutex we can
417 * enable this check.
418 */
419#else
420 lck_mtx_assert(ifp->if_lock, what);
421#endif
1c79356b
A
422}
423
91447636
A
424__private_extern__ void
425ifnet_lock_shared(
426 struct ifnet *ifp)
1c79356b 427{
91447636
A
428#if IFNET_RW_LOCK
429 lck_rw_lock_shared(ifp->if_lock);
430#else
431 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
432 lck_mtx_lock(ifp->if_lock);
433#endif
1c79356b
A
434}
435
91447636
A
436__private_extern__ void
437ifnet_lock_exclusive(
438 struct ifnet *ifp)
0b4e3aa0 439{
91447636
A
440#if IFNET_RW_LOCK
441 lck_rw_lock_exclusive(ifp->if_lock);
442#else
443 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
444 lck_mtx_lock(ifp->if_lock);
445#endif
0b4e3aa0
A
446}
447
91447636
A
448__private_extern__ void
449ifnet_lock_done(
450 struct ifnet *ifp)
1c79356b 451{
91447636
A
452#if IFNET_RW_LOCK
453 lck_rw_done(ifp->if_lock);
454#else
455 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
456 lck_mtx_unlock(ifp->if_lock);
457#endif
1c79356b
A
458}
459
91447636
A
460__private_extern__ void
461ifnet_head_lock_shared()
1c79356b 462{
91447636 463 lck_rw_lock_shared(ifnet_head_mutex);
1c79356b
A
464}
465
91447636
A
466__private_extern__ void
467ifnet_head_lock_exclusive()
468{
469 lck_rw_lock_exclusive(ifnet_head_mutex);
470}
1c79356b 471
91447636
A
472__private_extern__ void
473ifnet_head_done()
1c79356b 474{
91447636
A
475 lck_rw_done(ifnet_head_mutex);
476}
1c79356b 477
91447636
A
478/*
479 * Public functions.
480 */
481struct ifnet *ifbyfamily(u_long family, short unit)
482{
483 struct ifnet *ifp;
0b4e3aa0 484
91447636
A
485 ifnet_head_lock_shared();
486 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
487 if ((family == ifp->if_family) && (ifp->if_unit == unit))
488 break;
489 ifnet_head_done();
1c79356b 490
91447636
A
491 return ifp;
492}
1c79356b 493
91447636
A
494static int dlil_ifp_proto_count(struct ifnet * ifp)
495{
496 int count = 0;
497 int i;
498
499 if (ifp->if_proto_hash != NULL) {
500 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
501 struct if_proto *proto;
502 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
503 count++;
504 }
505 }
506 }
507
508 return count;
509}
1c79356b 510
91447636
A
511__private_extern__ void
512dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
513 struct net_event_data *event_data, u_long event_data_len)
514{
515 struct net_event_data ev_data;
516 struct kev_msg ev_msg;
517
518 /*
519 * a net event always start with a net_event_data structure
520 * but the caller can generate a simple net event or
521 * provide a longer event structure to post
522 */
523
524 ev_msg.vendor_code = KEV_VENDOR_APPLE;
525 ev_msg.kev_class = KEV_NETWORK_CLASS;
526 ev_msg.kev_subclass = event_subclass;
527 ev_msg.event_code = event_code;
528
529 if (event_data == 0) {
530 event_data = &ev_data;
531 event_data_len = sizeof(struct net_event_data);
532 }
533
534 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
535 event_data->if_family = ifp->if_family;
536 event_data->if_unit = (unsigned long) ifp->if_unit;
537
538 ev_msg.dv[0].data_length = event_data_len;
539 ev_msg.dv[0].data_ptr = event_data;
540 ev_msg.dv[1].data_length = 0;
541
542 dlil_event_internal(ifp, &ev_msg);
1c79356b
A
543}
544
91447636
A
545void dlil_init(void);
546void
547dlil_init(void)
548{
549 lck_grp_attr_t *grp_attributes = 0;
550 lck_attr_t *lck_attributes = 0;
551 lck_grp_t *input_lock_grp = 0;
552
553 TAILQ_INIT(&dlil_ifnet_head);
554 TAILQ_INIT(&if_family_head);
555 TAILQ_INIT(&proto_family_head);
556 TAILQ_INIT(&ifnet_head);
557
558 /* Setup the lock groups we will use */
559 grp_attributes = lck_grp_attr_alloc_init();
21362eb3 560 lck_grp_attr_setdefault(grp_attributes);
91447636
A
561
562 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
563#if IFNET_RW_LOCK
564 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
565#else
566 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
567#endif
568 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
569 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
570 lck_grp_attr_free(grp_attributes);
571 grp_attributes = 0;
572
573 /* Setup the lock attributes we will use */
574 lck_attributes = lck_attr_alloc_init();
21362eb3 575 lck_attr_setdefault(lck_attributes);
91447636
A
576
577 ifnet_lock_attr = lck_attr_alloc_init();
21362eb3 578 lck_attr_setdefault(ifnet_lock_attr);
91447636
A
579
580 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
581 input_lock_grp = 0;
582
583 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
584 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
585 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
586 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
587
588 lck_attr_free(lck_attributes);
589 lck_attributes = 0;
590
591 /*
592 * Start up the dlil input thread once everything is initialized
593 */
594 (void) kernel_thread(kernel_task, dlil_input_thread);
595 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
596}
1c79356b 597
91447636
A
598int
599dlil_attach_filter(
600 struct ifnet *ifp,
601 const struct iff_filter *if_filter,
602 interface_filter_t *filter_ref)
1c79356b 603{
9bccf70c 604 int retval = 0;
91447636 605 struct ifnet_filter *filter;
9bccf70c 606
91447636
A
607 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
608 if (filter == NULL)
609 return ENOMEM;
610 bzero(filter, sizeof(*filter));
9bccf70c 611
9bccf70c 612
91447636
A
613 filter->filt_ifp = ifp;
614 filter->filt_cookie = if_filter->iff_cookie;
615 filter->filt_name = if_filter->iff_name;
616 filter->filt_protocol = if_filter->iff_protocol;
617 filter->filt_input = if_filter->iff_input;
618 filter->filt_output = if_filter->iff_output;
619 filter->filt_event = if_filter->iff_event;
620 filter->filt_ioctl = if_filter->iff_ioctl;
621 filter->filt_detached = if_filter->iff_detached;
622
623 if ((retval = dlil_write_begin()) != 0) {
624 /* Failed to acquire the write lock */
625 FREE(filter, M_NKE);
626 return retval;
627 }
628 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
629 dlil_write_end();
630 *filter_ref = filter;
631 return retval;
1c79356b
A
632}
633
91447636
A
634static int
635dlil_detach_filter_internal(interface_filter_t filter, int detached)
1c79356b 636{
91447636
A
637 int retval = 0;
638
3a60a9f5
A
639 if (detached == 0) {
640 ifnet_t ifp = NULL;
641 interface_filter_t entry = NULL;
642
643 /* Take the write lock */
644 retval = dlil_write_begin();
645 if (retval != 0 && retval != EDEADLK)
646 return retval;
647
648 /*
649 * At this point either we have the write lock (retval == 0)
650 * or we couldn't get it (retval == EDEADLK) because someone
651 * else up the stack is holding the read lock. It is safe to
652 * read, either the read or write is held. Verify the filter
653 * parameter before proceeding.
654 */
655 ifnet_head_lock_shared();
656 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
657 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
658 if (entry == filter)
659 break;
660 }
661 if (entry == filter)
662 break;
663 }
664 ifnet_head_done();
665
666 if (entry != filter) {
667 /* filter parameter is not a valid filter ref */
668 if (retval == 0) {
669 dlil_write_end();
670 }
671 return EINVAL;
672 }
673
91447636
A
674 if (retval == EDEADLK) {
675 /* Perform a delayed detach */
676 filter->filt_detaching = 1;
677 dlil_detach_waiting = 1;
678 wakeup(&dlil_detach_waiting);
3a60a9f5 679 return 0;
91447636 680 }
3a60a9f5
A
681
682 /* Remove the filter from the list */
683 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
91447636 684 dlil_write_end();
3a60a9f5 685 }
91447636 686
3a60a9f5 687 /* Call the detached funciton if there is one */
91447636
A
688 if (filter->filt_detached)
689 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
9bccf70c 690
3a60a9f5 691 /* Free the filter */
91447636
A
692 FREE(filter, M_NKE);
693
694 return retval;
1c79356b
A
695}
696
1c79356b 697void
91447636
A
698dlil_detach_filter(interface_filter_t filter)
699{
3a60a9f5
A
700 if (filter == NULL)
701 return;
91447636
A
702 dlil_detach_filter_internal(filter, 0);
703}
1c79356b 704
91447636
A
705static void
706dlil_input_thread_continue(
707 __unused void* foo,
708 __unused wait_result_t wait)
709{
710 while (1) {
711 struct mbuf *m, *m_loop;
712
713 lck_spin_lock(dlil_input_lock);
714 m = dlil_input_mbuf_head;
715 dlil_input_mbuf_head = NULL;
716 dlil_input_mbuf_tail = NULL;
717 m_loop = dlil_input_loop_head;
718 dlil_input_loop_head = NULL;
719 dlil_input_loop_tail = NULL;
720 lck_spin_unlock(dlil_input_lock);
721
722 /*
723 * NOTE warning %%% attention !!!!
724 * We should think about putting some thread starvation safeguards if
725 * we deal with long chains of packets.
726 */
727 while (m) {
728 struct mbuf *m0 = m->m_nextpkt;
729 void *header = m->m_pkthdr.header;
730
731 m->m_nextpkt = NULL;
732 m->m_pkthdr.header = NULL;
733 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
734 m = m0;
735 }
736 m = m_loop;
737 while (m) {
738 struct mbuf *m0 = m->m_nextpkt;
739 void *header = m->m_pkthdr.header;
740 struct ifnet *ifp = &loif[0];
741
742 m->m_nextpkt = NULL;
743 m->m_pkthdr.header = NULL;
744 (void) dlil_input_packet(ifp, m, header);
745 m = m0;
746 }
747
748 proto_input_run();
b36670ce 749
91447636 750 if (dlil_input_mbuf_head == NULL &&
b36670ce 751 dlil_input_loop_head == NULL && inject_buckets == 0) {
91447636
A
752 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
753 (void) thread_block(dlil_input_thread_continue);
754 /* NOTREACHED */
755 }
756 }
1c79356b
A
757}
758
759void dlil_input_thread(void)
760{
91447636
A
761 register thread_t self = current_thread();
762
763 ml_thread_policy(self, MACHINE_GROUP,
764 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
765
766 dlil_initialized = 1;
767 dlil_input_thread_ptr = current_thread();
768 dlil_input_thread_continue(NULL, THREAD_RESTART);
769}
1c79356b 770
91447636
A
771int
772dlil_input_with_stats(
773 struct ifnet *ifp,
774 struct mbuf *m_head,
775 struct mbuf *m_tail,
776 const struct ifnet_stat_increment_param *stats)
777{
778 /* WARNING
779 * Because of loopbacked multicast we cannot stuff the ifp in
780 * the rcvif of the packet header: loopback has its own dlil
781 * input queue
782 */
783
784 lck_spin_lock(dlil_input_lock);
785 if (ifp->if_type != IFT_LOOP) {
786 if (dlil_input_mbuf_head == NULL)
787 dlil_input_mbuf_head = m_head;
788 else if (dlil_input_mbuf_tail != NULL)
789 dlil_input_mbuf_tail->m_nextpkt = m_head;
790 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
791 } else {
792 if (dlil_input_loop_head == NULL)
793 dlil_input_loop_head = m_head;
794 else if (dlil_input_loop_tail != NULL)
795 dlil_input_loop_tail->m_nextpkt = m_head;
796 dlil_input_loop_tail = m_tail ? m_tail : m_head;
797 }
798 if (stats) {
799 ifp->if_data.ifi_ipackets += stats->packets_in;
800 ifp->if_data.ifi_ibytes += stats->bytes_in;
801 ifp->if_data.ifi_ierrors += stats->errors_in;
802
803 ifp->if_data.ifi_opackets += stats->packets_out;
804 ifp->if_data.ifi_obytes += stats->bytes_out;
805 ifp->if_data.ifi_oerrors += stats->errors_out;
806
807 ifp->if_data.ifi_collisions += stats->collisions;
808 ifp->if_data.ifi_iqdrops += stats->dropped;
809 }
810 lck_spin_unlock(dlil_input_lock);
811
812 wakeup((caddr_t)&dlil_input_thread_wakeup);
813
814 return 0;
1c79356b
A
815}
816
817int
818dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
91447636
A
819{
820 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
1c79356b
A
821}
822
823int
91447636 824dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
1c79356b
A
825 char *frame_header)
826{
1c79356b
A
827 int retval;
828 struct if_proto *ifproto = 0;
91447636
A
829 protocol_family_t protocol_family;
830 struct ifnet_filter *filter;
1c79356b
A
831
832
833 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
834
91447636
A
835 /*
836 * Lock the interface while we run through
837 * the filters and the demux. This lock
838 * protects the filter list and the demux list.
839 */
840 dlil_read_begin();
1c79356b 841
91447636
A
842 /*
843 * Call family demux module. If the demux module finds a match
844 * for the frame it will fill-in the ifproto pointer.
845 */
1c79356b 846
91447636
A
847 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
848 if (retval != 0)
849 protocol_family = 0;
850 if (retval == EJUSTRETURN) {
851 dlil_read_end();
852 return 0;
853 }
1c79356b 854
91447636
A
855 /* DANGER!!! */
856 if (m->m_flags & (M_BCAST|M_MCAST))
857 ifp->if_imcasts++;
1c79356b 858
1c79356b 859 /*
91447636 860 * Run interface filters
1c79356b 861 */
91447636
A
862
863 /* Do not pass VLAN tagged packets to filters PR-3586856 */
864 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
865 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
866 int filter_result;
867 if (filter->filt_input && (filter->filt_protocol == 0 ||
868 filter->filt_protocol == protocol_family)) {
869 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
870
871 if (filter_result) {
872 dlil_read_end();
873 if (filter_result == EJUSTRETURN) {
874 filter_result = 0;
875 }
876 else {
877 m_freem(m);
878 }
879
880 return filter_result;
881 }
882 }
883 }
1c79356b 884 }
1c79356b 885
91447636
A
886 /* Demux is done, interface filters have been processed, unlock the mutex */
887 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
888 dlil_read_end();
889 if (retval != EJUSTRETURN) {
890 m_freem(m);
891 return retval;
892 }
893 else
894 return 0;
1c79356b 895 }
91447636
A
896
897 ifproto = find_attached_proto(ifp, protocol_family);
898
1c79356b 899 if (ifproto == 0) {
91447636
A
900 dlil_read_end();
901 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
902 m_freem(m);
903 return 0;
1c79356b 904 }
91447636
A
905
906 /*
907 * Hand the packet off to the protocol.
908 */
1c79356b 909
91447636
A
910 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
911 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
912 }
1c79356b 913
91447636
A
914 if (ifproto->proto_kpi == kProtoKPI_DLIL)
915 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
916 ifp, ifproto->protocol_family,
917 TRUE);
918 else
919 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
1c79356b 920
91447636
A
921 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
922 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
923 }
1c79356b 924
91447636 925 dlil_read_end();
1c79356b 926
91447636
A
927 if (retval == EJUSTRETURN)
928 retval = 0;
929 else
930 if (retval)
931 m_freem(m);
1c79356b 932
91447636
A
933 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
934 return retval;
1c79356b
A
935}
936
91447636
A
937static int
938dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
1c79356b 939{
91447636
A
940 struct ifnet_filter *filter;
941
942 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
943 dlil_read_begin();
944
945 /* Pass the event to the interface filters */
946 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
947 if (filter->filt_event)
948 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
949 }
950
951 if (ifp->if_proto_hash) {
952 int i;
953
954 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
955 struct if_proto *proto;
956
957 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
958 /* Pass the event to the protocol */
959 if (proto->proto_kpi == kProtoKPI_DLIL) {
960 if (proto->kpi.dlil.dl_event)
961 proto->kpi.dlil.dl_event(ifp, event);
962 }
963 else {
964 if (proto->kpi.v1.event)
965 proto->kpi.v1.event(ifp, proto->protocol_family, event);
966 }
967 }
968 }
969 }
970
971 dlil_read_end();
972
973 /* Pass the event to the interface */
974 if (ifp->if_event)
975 ifp->if_event(ifp, event);
976
977 if (ifp_unuse(ifp))
978 ifp_use_reached_zero(ifp);
979 }
980
981 return kev_post_msg(event);
1c79356b
A
982}
983
1c79356b
A
984int
985dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
986{
91447636 987 int result = 0;
1c79356b 988
91447636 989 struct kev_msg kev_msg;
1c79356b 990
91447636
A
991 kev_msg.vendor_code = event->vendor_code;
992 kev_msg.kev_class = event->kev_class;
993 kev_msg.kev_subclass = event->kev_subclass;
994 kev_msg.event_code = event->event_code;
995 kev_msg.dv[0].data_ptr = &event->event_data[0];
996 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
997 kev_msg.dv[1].data_length = 0;
998
1c79356b 999
91447636 1000 result = dlil_event_internal(ifp, &kev_msg);
1c79356b 1001
1c79356b 1002
91447636
A
1003 return result;
1004}
1c79356b 1005
3a60a9f5 1006int
91447636
A
1007dlil_output_list(
1008 struct ifnet* ifp,
1009 u_long proto_family,
1010 struct mbuf *packetlist,
1011 caddr_t route,
1012 const struct sockaddr *dest,
1013 int raw)
1014{
1015 char *frame_type = 0;
1016 char *dst_linkaddr = 0;
1017 int error, retval = 0;
1018 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1019 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1020 struct ifnet_filter *filter;
1021 struct if_proto *proto = 0;
1022 struct mbuf *m;
1023
1024 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1025#if BRIDGE
1026 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1027#else
1028 if ((raw != 0) || proto_family != PF_INET) {
1029#endif
1030 while (packetlist) {
1031 m = packetlist;
1032 packetlist = packetlist->m_nextpkt;
1033 m->m_nextpkt = NULL;
1034 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1035 if (error) {
1036 if (packetlist)
1037 m_freem_list(packetlist);
1038 return (error);
1039 }
1040 }
1041 return (0);
1042 }
1043
1044 dlil_read_begin();
1045
1046 frame_type = frame_type_buffer;
1047 dst_linkaddr = dst_linkaddr_buffer;
1048 m = packetlist;
1049 packetlist = packetlist->m_nextpkt;
1050 m->m_nextpkt = NULL;
1051
1052 proto = find_attached_proto(ifp, proto_family);
1053 if (proto == NULL) {
1054 retval = ENXIO;
1055 goto cleanup;
1056 }
1c79356b 1057
91447636
A
1058 retval = 0;
1059 if (proto->proto_kpi == kProtoKPI_DLIL) {
1060 if (proto->kpi.dlil.dl_pre_output)
1061 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1062 }
1063 else {
1064 if (proto->kpi.v1.pre_output)
1065 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1066 }
1c79356b 1067
91447636
A
1068 if (retval) {
1069 if (retval != EJUSTRETURN) {
1070 m_freem(m);
1071 }
1072 goto cleanup;
1073 }
1c79356b 1074
91447636
A
1075 do {
1076
1077
1078 if (ifp->if_framer) {
1079 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1080 if (retval) {
1081 if (retval != EJUSTRETURN) {
1082 m_freem(m);
1083 }
1084 goto cleanup;
1085 }
1086 }
1087
1088 /*
1089 * Let interface filters (if any) do their thing ...
1090 */
1091 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1092 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1093 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1094 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1095 filter->filt_output) {
1096 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1097 if (retval) {
1098 if (retval == EJUSTRETURN)
1099 continue;
1100 else {
1101 m_freem(m);
1102 }
1103 goto cleanup;
1104 }
1105 }
1106 }
1107 }
1108 /*
1109 * Finally, call the driver.
1110 */
1111
1112 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1113 retval = ifp->if_output(ifp, m);
1114 if (retval) {
1115 printf("dlil_output_list: output error retval = %x\n", retval);
1116 goto cleanup;
1117 }
1118 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1c79356b 1119
91447636
A
1120 m = packetlist;
1121 if (m) {
1122 packetlist = packetlist->m_nextpkt;
1123 m->m_nextpkt = NULL;
1124 }
1125 } while (m);
1c79356b 1126
91447636
A
1127
1128 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1c79356b 1129
91447636
A
1130cleanup:
1131 dlil_read_end();
1132 if (packetlist) /* if any packet left, clean up */
1133 m_freem_list(packetlist);
1134 if (retval == EJUSTRETURN)
1135 retval = 0;
1136 return retval;
1c79356b
A
1137}
1138
1c79356b 1139/*
91447636
A
1140 * dlil_output
1141 *
1142 * Caller should have a lock on the protocol domain if the protocol
1143 * doesn't support finer grained locking. In most cases, the lock
1144 * will be held from the socket layer and won't be released until
1145 * we return back to the socket layer.
1146 *
1147 * This does mean that we must take a protocol lock before we take
1148 * an interface lock if we're going to take both. This makes sense
1149 * because a protocol is likely to interact with an ifp while it
1150 * is under the protocol lock.
1c79356b 1151 */
91447636
A
1152int
1153dlil_output(
1154 struct ifnet* ifp,
1155 u_long proto_family,
1156 struct mbuf *m,
1157 caddr_t route,
1158 const struct sockaddr *dest,
1159 int raw)
1160{
1161 char *frame_type = 0;
1162 char *dst_linkaddr = 0;
1163 int retval = 0;
1164 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1165 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1166 struct ifnet_filter *filter;
1167
1168 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1169
1170 dlil_read_begin();
1171
1172 frame_type = frame_type_buffer;
1173 dst_linkaddr = dst_linkaddr_buffer;
1174
1175 if (raw == 0) {
1176 struct if_proto *proto = 0;
1177
1178 proto = find_attached_proto(ifp, proto_family);
1179 if (proto == NULL) {
1c79356b 1180 m_freem(m);
91447636
A
1181 retval = ENXIO;
1182 goto cleanup;
1183 }
1184
1185 retval = 0;
1186 if (proto->proto_kpi == kProtoKPI_DLIL) {
1187 if (proto->kpi.dlil.dl_pre_output)
1188 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1189 }
1190 else {
1191 if (proto->kpi.v1.pre_output)
1192 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1193 }
1194
1195 if (retval) {
1196 if (retval != EJUSTRETURN) {
1197 m_freem(m);
1198 }
1199 goto cleanup;
1c79356b 1200 }
1c79356b 1201 }
91447636
A
1202
1203 /*
1204 * Call framing module
1205 */
1206 if ((raw == 0) && (ifp->if_framer)) {
1207 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1208 if (retval) {
1209 if (retval != EJUSTRETURN) {
1210 m_freem(m);
1211 }
1212 goto cleanup;
1213 }
1c79356b 1214 }
1c79356b 1215
91447636
A
1216#if BRIDGE
1217 /* !!!LOCKING!!!
1218 *
1219 * Need to consider how to handle this.
1220 */
1221 broken-locking
1222 if (do_bridge) {
1223 struct mbuf *m0 = m;
1224 struct ether_header *eh = mtod(m, struct ether_header *);
1225
1226 if (m->m_pkthdr.rcvif)
1227 m->m_pkthdr.rcvif = NULL;
1228 ifp = bridge_dst_lookup(eh);
1229 bdg_forward(&m0, ifp);
1230 if (m0)
1231 m_freem(m0);
1232
1233 return 0;
1234 }
1c79356b 1235#endif
91447636
A
1236
1237
1238 /*
1239 * Let interface filters (if any) do their thing ...
1240 */
1241
1242 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1243 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1244 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1245 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1246 filter->filt_output) {
1247 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1248 if (retval) {
1249 if (retval != EJUSTRETURN)
1250 m_freem(m);
1251 goto cleanup;
1252 }
1c79356b 1253 }
1c79356b 1254 }
1c79356b 1255 }
91447636
A
1256
1257 /*
1258 * Finally, call the driver.
1259 */
1260
1261 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1262 retval = ifp->if_output(ifp, m);
1263 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1264
1265 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1c79356b 1266
91447636
A
1267cleanup:
1268 dlil_read_end();
1269 if (retval == EJUSTRETURN)
1270 retval = 0;
1c79356b
A
1271 return retval;
1272}
1273
1c79356b
A
1274int
1275dlil_ioctl(u_long proto_fam,
1276 struct ifnet *ifp,
1277 u_long ioctl_code,
1278 caddr_t ioctl_arg)
1279{
91447636
A
1280 struct ifnet_filter *filter;
1281 int retval = EOPNOTSUPP;
1282 int result = 0;
1283 struct if_family_str *if_family;
1284 int holding_read = 0;
1285
1286 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1287 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1288 if (result != 0)
1289 return EOPNOTSUPP;
1290
1291 dlil_read_begin();
1292 holding_read = 1;
1293
1294 /* Run the interface filters first.
1295 * We want to run all filters before calling the protocol,
1296 * interface family, or interface.
1297 */
1298 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1299 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1300 filter->filt_ioctl != NULL) {
1301 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1302 /* Only update retval if no one has handled the ioctl */
1303 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1304 if (result == ENOTSUP)
1305 result = EOPNOTSUPP;
1306 retval = result;
1307 if (retval && retval != EOPNOTSUPP) {
1308 goto cleanup;
1309 }
1310 }
1311 }
1312 }
1313
1314 /* Allow the protocol to handle the ioctl */
1315 if (proto_fam) {
1316 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1317
1318 if (proto != 0) {
1319 result = EOPNOTSUPP;
1320 if (proto->proto_kpi == kProtoKPI_DLIL) {
1321 if (proto->kpi.dlil.dl_ioctl)
1322 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1323 }
1324 else {
1325 if (proto->kpi.v1.ioctl)
1326 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1327 }
1328
1329 /* Only update retval if no one has handled the ioctl */
1330 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1331 if (result == ENOTSUP)
1332 result = EOPNOTSUPP;
1333 retval = result;
1334 if (retval && retval != EOPNOTSUPP) {
1335 goto cleanup;
1336 }
1337 }
1338 }
1339 }
1340
1341 /*
1342 * Since we have incremented the use count on the ifp, we are guaranteed
1343 * that the ifp will not go away (the function pointers may not be changed).
1344 * We release the dlil read lock so the interface ioctl may trigger a
1345 * protocol attach. This happens with vlan and may occur with other virtual
1346 * interfaces.
1347 */
1348 dlil_read_end();
1349 holding_read = 0;
1350
1351 /* retval is either 0 or EOPNOTSUPP */
1352
1353 /*
1354 * Let the family handle this ioctl.
1355 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1356 * If it returns zero, the ioctl was handled, so set retval to zero.
1357 */
1358 if_family = find_family_module(ifp->if_family);
1359 if ((if_family) && (if_family->ifmod_ioctl)) {
1360 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1361
1362 /* Only update retval if no one has handled the ioctl */
1363 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1364 if (result == ENOTSUP)
1365 result = EOPNOTSUPP;
1366 retval = result;
1367 if (retval && retval != EOPNOTSUPP) {
1368 goto cleanup;
1369 }
1370 }
1371 }
1372
1373 /*
1374 * Let the interface handle this ioctl.
1375 * If it returns EOPNOTSUPP, ignore that, we may have
1376 * already handled this in the protocol or family.
1377 */
1378 if (ifp->if_ioctl)
1379 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1380
1381 /* Only update retval if no one has handled the ioctl */
1382 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1383 if (result == ENOTSUP)
1384 result = EOPNOTSUPP;
1385 retval = result;
1386 if (retval && retval != EOPNOTSUPP) {
1387 goto cleanup;
1388 }
1389 }
1390
1391cleanup:
1392 if (holding_read)
1393 dlil_read_end();
1394 if (ifp_unuse(ifp))
1395 ifp_use_reached_zero(ifp);
1c79356b 1396
91447636
A
1397 if (retval == EJUSTRETURN)
1398 retval = 0;
1399 return retval;
1400}
1c79356b 1401
91447636
A
1402__private_extern__ errno_t
1403dlil_set_bpf_tap(
1404 ifnet_t ifp,
1405 bpf_tap_mode mode,
1406 bpf_packet_func callback)
1407{
1408 errno_t error = 0;
1c79356b 1409
91447636
A
1410 dlil_read_begin();
1411 if (ifp->if_set_bpf_tap)
1412 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1413 dlil_read_end();
1414
1415 return error;
1c79356b
A
1416}
1417
91447636
A
1418__private_extern__ errno_t
1419dlil_resolve_multi(
1420 struct ifnet *ifp,
1421 const struct sockaddr *proto_addr,
1422 struct sockaddr *ll_addr,
1423 size_t ll_len)
1c79356b 1424{
91447636
A
1425 errno_t result = EOPNOTSUPP;
1426 struct if_proto *proto;
1427 const struct sockaddr *verify;
1428
1429 dlil_read_begin();
1430
1431 bzero(ll_addr, ll_len);
1432
1433 /* Call the protocol first */
1434 proto = find_attached_proto(ifp, proto_addr->sa_family);
1435 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1436 proto->kpi.v1.resolve_multi != NULL) {
1437 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1438 (struct sockaddr_dl*)ll_addr, ll_len);
1439 }
1440
1441 /* Let the interface verify the multicast address */
1442 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1443 if (result == 0)
1444 verify = ll_addr;
1445 else
1446 verify = proto_addr;
1447 result = ifp->if_check_multi(ifp, verify);
1448 }
1449
1450 dlil_read_end();
1451
1452 return result;
1453}
1c79356b 1454
91447636
A
1455__private_extern__ errno_t
1456dlil_send_arp_internal(
1457 ifnet_t ifp,
1458 u_short arpop,
1459 const struct sockaddr_dl* sender_hw,
1460 const struct sockaddr* sender_proto,
1461 const struct sockaddr_dl* target_hw,
1462 const struct sockaddr* target_proto)
1463{
1464 struct if_proto *proto;
1465 errno_t result = 0;
1466
1467 dlil_read_begin();
1468
1469 proto = find_attached_proto(ifp, target_proto->sa_family);
1470 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1471 proto->kpi.v1.send_arp == NULL) {
1472 result = ENOTSUP;
1473 }
1474 else {
1475 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1476 target_hw, target_proto);
1477 }
1478
1479 dlil_read_end();
1480
1481 return result;
1482}
1c79356b 1483
91447636
A
1484__private_extern__ errno_t
1485dlil_send_arp(
1486 ifnet_t ifp,
1487 u_short arpop,
1488 const struct sockaddr_dl* sender_hw,
1489 const struct sockaddr* sender_proto,
1490 const struct sockaddr_dl* target_hw,
1491 const struct sockaddr* target_proto)
1492{
1493 errno_t result = 0;
1494
1495 if (target_proto == NULL || (sender_proto &&
1496 sender_proto->sa_family != target_proto->sa_family))
1497 return EINVAL;
1498
1499 /*
1500 * If this is an ARP request and the target IP is IPv4LL,
1501 * send the request on all interfaces.
1502 */
1503 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1504 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1505 arpop == ARPOP_REQUEST) {
1506 ifnet_t *ifp_list;
1507 u_int32_t count;
1508 u_int32_t ifp_on;
1509
1510 result = ENOTSUP;
1511
1512 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1513 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1514 errno_t new_result;
1515 ifaddr_t source_hw = NULL;
1516 ifaddr_t source_ip = NULL;
1517 struct sockaddr_in source_ip_copy;
1518
1519 /*
1520 * Only arp on interfaces marked for IPv4LL ARPing. This may
1521 * mean that we don't ARP on the interface the subnet route
1522 * points to.
1523 */
1524 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1525 continue;
1526 }
1527
1528 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1529
1530 /* Find the source IP address */
1531 ifnet_lock_shared(ifp_list[ifp_on]);
1532 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1533 ifa_link) {
1534 if (source_ip->ifa_addr &&
1535 source_ip->ifa_addr->sa_family == AF_INET) {
1536 break;
1537 }
1538 }
1539
1540 /* No IP Source, don't arp */
1541 if (source_ip == NULL) {
1542 ifnet_lock_done(ifp_list[ifp_on]);
1543 continue;
1544 }
1545
1546 /* Copy the source IP address */
1547 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1548
1549 ifnet_lock_done(ifp_list[ifp_on]);
1550
1551 /* Send the ARP */
1552 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1553 (struct sockaddr_dl*)source_hw->ifa_addr,
1554 (struct sockaddr*)&source_ip_copy, NULL,
1555 target_proto);
1556
1557 if (result == ENOTSUP) {
1558 result = new_result;
1559 }
1560 }
1561 }
1562
1563 ifnet_list_free(ifp_list);
1564 }
1565 else {
1566 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1567 target_hw, target_proto);
1568 }
1569
1570 return result;
1571}
1c79356b 1572
91447636
A
1573static int
1574ifp_use(
1575 struct ifnet *ifp,
1576 int handle_zero)
1577{
1578 int old_value;
1579 int retval = 0;
1580
1581 do {
1582 old_value = ifp->if_usecnt;
1583 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1584 retval = ENXIO; // ifp is invalid
1585 break;
1586 }
1587 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1588
1589 return retval;
1590}
1c79356b 1591
91447636
A
1592/* ifp_unuse is broken into two pieces.
1593 *
1594 * ifp_use and ifp_unuse must be called between when the caller calls
1595 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1596 * operations after dlil_write_end has been called. For this reason,
1597 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1598 * returns a non-zero value. The caller must call ifp_use_reached_zero
1599 * after the caller has called dlil_write_end.
1600 */
1601static void
1602ifp_use_reached_zero(
1603 struct ifnet *ifp)
1604{
1605 struct if_family_str *if_family;
1606 ifnet_detached_func free_func;
1607
1608 dlil_read_begin();
1609
1610 if (ifp->if_usecnt != 0)
1611 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1612
1613 /* Let BPF know we're detaching */
1614 bpfdetach(ifp);
1615
1616 ifnet_head_lock_exclusive();
1617 ifnet_lock_exclusive(ifp);
1618
1619 /* Remove ourselves from the list */
1620 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1621 ifnet_addrs[ifp->if_index - 1] = 0;
1622
1623 /* ifp should be removed from the interface list */
1624 while (ifp->if_multiaddrs.lh_first) {
1625 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1626
1627 /*
1628 * When the interface is gone, we will no longer
1629 * be listening on these multicasts. Various bits
1630 * of the stack may be referencing these multicasts,
1631 * release only our reference.
1632 */
1633 LIST_REMOVE(ifma, ifma_link);
1634 ifma->ifma_ifp = NULL;
1635 ifma_release(ifma);
1636 }
1637 ifnet_head_done();
1638
1639 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1640 ifnet_lock_done(ifp);
1c79356b 1641
91447636
A
1642 if_family = find_family_module(ifp->if_family);
1643 if (if_family && if_family->del_if)
1644 if_family->del_if(ifp);
1645#if 0
1646 if (--if_family->if_usecnt == 0) {
1647 if (if_family->shutdown)
1648 (*if_family->shutdown)();
1649
1650 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1651 FREE(if_family, M_IFADDR);
1652 }
1653#endif
1c79356b 1654
91447636
A
1655 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1656 free_func = ifp->if_free;
1657 dlil_read_end();
1658
1659 if (free_func)
1660 free_func(ifp);
1661}
1c79356b 1662
91447636
A
1663static int
1664ifp_unuse(
1665 struct ifnet *ifp)
1666{
1667 int oldval;
1668 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1669 if (oldval == 0)
1670 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1671
1672 if (oldval > 1)
1673 return 0;
1674
1675 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1676 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1677
1678 return 1; /* caller must call ifp_use_reached_zero */
1679}
1c79356b 1680
91447636
A
1681void
1682ifp_reference(
1683 struct ifnet *ifp)
1684{
1685 int oldval;
1686 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1687}
1c79356b 1688
91447636
A
1689void
1690ifp_release(
1691 struct ifnet *ifp)
1692{
1693 int oldval;
1694 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1695 if (oldval == 0)
1696 panic("dlil_if_reference - refcount decremented past zero!");
1697}
1c79356b 1698
91447636 1699extern lck_mtx_t *domain_proto_mtx;
1c79356b 1700
91447636
A
1701static int
1702dlil_attach_protocol_internal(
1703 struct if_proto *proto,
1704 const struct ddesc_head_str *demux,
1705 const struct ifnet_demux_desc *demux_list,
1706 u_int32_t demux_count)
1707{
1708 struct ddesc_head_str temp_head;
1709 struct kev_dl_proto_data ev_pr_data;
1710 struct ifnet *ifp = proto->ifp;
1711 int retval = 0;
1712 u_long hash_value = proto_hash_value(proto->protocol_family);
1713 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1714 void* free_me = NULL;
1715
1716 /* setup some of the common values */
1717
1718 {
1719 lck_mtx_lock(domain_proto_mtx);
1720 struct domain *dp = domains;
1721 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1722 dp = dp->dom_next;
1723 proto->dl_domain = dp;
1724 lck_mtx_unlock(domain_proto_mtx);
1725 }
1726
1727 /*
1728 * Convert the demux descriptors to a type the interface
1729 * will understand. Checking e_flags should be safe, this
1730 * flag won't change.
1731 */
1732 if (if_using_kpi && demux) {
1733 /* Convert the demux linked list to a demux_list */
1734 struct dlil_demux_desc *demux_entry;
1735 struct ifnet_demux_desc *temp_list = NULL;
1736 u_int32_t i = 0;
1737
1738 TAILQ_FOREACH(demux_entry, demux, next) {
1739 i++;
1740 }
1741
1742 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1743 free_me = temp_list;
1744
1745 if (temp_list == NULL)
1746 return ENOMEM;
1747
1748 i = 0;
1749 TAILQ_FOREACH(demux_entry, demux, next) {
1750 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1751 if (demux_entry->type == 1 ||
1752 demux_entry->type == 2 ||
1753 demux_entry->type == 3) {
1754 FREE(free_me, M_TEMP);
1755 return ENOTSUP;
1756 }
1757
1758 temp_list[i].type = demux_entry->type;
1759 temp_list[i].data = demux_entry->native_type;
1760 temp_list[i].datalen = demux_entry->variants.native_type_length;
1761 i++;
1762 }
1763 demux_count = i;
1764 demux_list = temp_list;
1765 }
1766 else if (!if_using_kpi && demux_list != NULL) {
1767 struct dlil_demux_desc *demux_entry;
1768 u_int32_t i = 0;
1769
1770 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1771 free_me = demux_entry;
1772 if (demux_entry == NULL)
1773 return ENOMEM;
1774
1775 TAILQ_INIT(&temp_head);
1776
1777 for (i = 0; i < demux_count; i++) {
1778 demux_entry[i].type = demux_list[i].type;
1779 demux_entry[i].native_type = demux_list[i].data;
1780 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1781 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1782 }
1783 demux = &temp_head;
1784 }
1785
1786 /*
1787 * Take the write lock to protect readers and exclude other writers.
1788 */
1789 dlil_write_begin();
1790
1791 /* Check that the interface isn't currently detaching */
1792 ifnet_lock_shared(ifp);
1793 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1794 ifnet_lock_done(ifp);
1795 dlil_write_end();
1796 if (free_me)
1797 FREE(free_me, M_TEMP);
1798 return ENXIO;
1799 }
1800 ifnet_lock_done(ifp);
1801
1802 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1803 dlil_write_end();
1804 if (free_me)
1805 FREE(free_me, M_TEMP);
1806 return EEXIST;
1807 }
1808
1809 /*
1810 * Call family module add_proto routine so it can refine the
1811 * demux descriptors as it wishes.
1812 */
1813 if (if_using_kpi)
1814 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1815 else {
1816 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1817 _cast_non_const(demux));
1818 }
1819 if (retval) {
1820 dlil_write_end();
1821 if (free_me)
1822 FREE(free_me, M_TEMP);
1823 return retval;
1824 }
1825
1826 /*
1827 * We can't fail from this point on.
1828 * Increment the number of uses (protocol attachments + interface attached).
1829 */
1830 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1831
1832 /*
1833 * Insert the protocol in the hash
1834 */
1835 {
1836 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1837 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1838 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1839 if (prev_proto)
1840 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1841 else
1842 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1843 }
1c79356b 1844
91447636
A
1845 /*
1846 * Add to if_proto list for this interface
1847 */
1848 if_proto_ref(proto);
1849 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1850 ifp->offercnt++;
1851 dlil_write_end();
1852
1853 /* the reserved field carries the number of protocol still attached (subject to change) */
1854 ev_pr_data.proto_family = proto->protocol_family;
1855 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1856 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1857 (struct net_event_data *)&ev_pr_data,
1858 sizeof(struct kev_dl_proto_data));
1859
1860 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1861 ifp->if_name, ifp->if_unit, retval);
1862 if (free_me)
1863 FREE(free_me, M_TEMP);
1864 return retval;
1865}
0b4e3aa0 1866
91447636
A
1867__private_extern__ int
1868dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1869 const struct ifnet_attach_proto_param *proto_details)
1870{
1871 int retval = 0;
1872 struct if_proto *ifproto = NULL;
1873
1874 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1875 if (ifproto == 0) {
1876 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1877 retval = ENOMEM;
1878 goto end;
1879 }
1880 bzero(ifproto, sizeof(*ifproto));
1881
1882 ifproto->ifp = ifp;
1883 ifproto->protocol_family = protocol;
1884 ifproto->proto_kpi = kProtoKPI_v1;
1885 ifproto->kpi.v1.input = proto_details->input;
1886 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1887 ifproto->kpi.v1.event = proto_details->event;
1888 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1889 ifproto->kpi.v1.detached = proto_details->detached;
1890 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1891 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1892
1893 retval = dlil_attach_protocol_internal(ifproto, NULL,
1894 proto_details->demux_list, proto_details->demux_count);
1895
9bccf70c 1896end:
91447636
A
1897 if (retval && ifproto)
1898 FREE(ifproto, M_IFADDR);
1899 return retval;
1c79356b
A
1900}
1901
91447636
A
1902int
1903dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1904{
1905 struct ifnet *ifp = NULL;
1906 struct if_proto *ifproto = NULL;
1907 int retval = 0;
1c79356b 1908
91447636
A
1909 /*
1910 * Do everything we can before taking the write lock
1911 */
1912
1913 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1914 return EINVAL;
1c79356b 1915
91447636
A
1916 /*
1917 * Allocate and init a new if_proto structure
1918 */
1919 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1920 if (!ifproto) {
1921 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1922 retval = ENOMEM;
1923 goto end;
1924 }
1925
1c79356b 1926
91447636
A
1927 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1928 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1929 if (!ifp) {
1930 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1931 proto->interface_family, proto->unit_number);
1932 retval = ENXIO;
1933 goto end;
1934 }
1c79356b 1935
91447636
A
1936 bzero(ifproto, sizeof(struct if_proto));
1937
1938 ifproto->ifp = ifp;
1939 ifproto->protocol_family = proto->protocol_family;
1940 ifproto->proto_kpi = kProtoKPI_DLIL;
1941 ifproto->kpi.dlil.dl_input = proto->input;
1942 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1943 ifproto->kpi.dlil.dl_event = proto->event;
1944 ifproto->kpi.dlil.dl_offer = proto->offer;
1945 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1946 ifproto->kpi.dlil.dl_detached = proto->detached;
1947
1948 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1949
1950end:
1951 if (retval && ifproto)
1952 FREE(ifproto, M_IFADDR);
1953 return retval;
1954}
1c79356b 1955
91447636 1956extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1c79356b 1957
91447636
A
1958static int
1959dlil_detach_protocol_internal(
1960 struct if_proto *proto)
1961{
1962 struct ifnet *ifp = proto->ifp;
1963 u_long proto_family = proto->protocol_family;
1964 struct kev_dl_proto_data ev_pr_data;
1965
1966 if (proto->proto_kpi == kProtoKPI_DLIL) {
1967 if (proto->kpi.dlil.dl_detached)
1968 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1969 }
1970 else {
1971 if (proto->kpi.v1.detached)
1972 proto->kpi.v1.detached(ifp, proto->protocol_family);
1973 }
1974 if_proto_free(proto);
1975
1976 /*
1977 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1978 */
1979
1980 if_rtproto_del(ifp, proto_family);
1981
1982 /* the reserved field carries the number of protocol still attached (subject to change) */
1983 ev_pr_data.proto_family = proto_family;
1984 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1985 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1986 (struct net_event_data *)&ev_pr_data,
1987 sizeof(struct kev_dl_proto_data));
1988 return 0;
1989}
1c79356b 1990
91447636
A
1991int
1992dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1993{
1994 struct if_proto *proto = NULL;
1995 int retval = 0;
1996 int use_reached_zero = 0;
1997
1c79356b 1998
91447636 1999 if ((retval = dlil_write_begin()) != 0) {
91447636
A
2000 if (retval == EDEADLK) {
2001 retval = 0;
2002 dlil_read_begin();
2003 proto = find_attached_proto(ifp, proto_family);
2004 if (proto == 0) {
2005 retval = ENXIO;
2006 }
2007 else {
2008 proto->detaching = 1;
2009 dlil_detach_waiting = 1;
2010 wakeup(&dlil_detach_waiting);
2011 }
2012 dlil_read_end();
2013 }
2014 goto end;
2015 }
2016
2017 proto = find_attached_proto(ifp, proto_family);
2018
2019 if (proto == NULL) {
2020 retval = ENXIO;
2021 dlil_write_end();
2022 goto end;
2023 }
2024
2025 /*
2026 * Call family module del_proto
2027 */
2028
2029 if (ifp->if_del_proto)
2030 ifp->if_del_proto(ifp, proto->protocol_family);
1c79356b 2031
91447636
A
2032 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2033 ifp->offercnt--;
1c79356b 2034
91447636
A
2035 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2036
2037 /*
2038 * We can do the rest of the work outside of the write lock.
2039 */
2040 use_reached_zero = ifp_unuse(ifp);
2041 dlil_write_end();
2042
2043 dlil_detach_protocol_internal(proto);
2044
2045 /*
2046 * Only handle the case where the interface will go away after
2047 * we've sent the message. This way post message can send the
2048 * message to the interface safely.
2049 */
2050
2051 if (use_reached_zero)
2052 ifp_use_reached_zero(ifp);
2053
2054end:
2055 return retval;
2056}
1c79356b 2057
91447636
A
2058/*
2059 * dlil_delayed_detach_thread is responsible for detaching
2060 * protocols, protocol filters, and interface filters after
2061 * an attempt was made to detach one of those items while
2062 * it was not safe to do so (i.e. called dlil_read_begin).
2063 *
2064 * This function will take the dlil write lock and walk
2065 * through each of the interfaces looking for items with
2066 * the detaching flag set. When an item is found, it is
2067 * detached from the interface and placed on a local list.
2068 * After all of the items have been collected, we drop the
2069 * write lock and performed the post detach. This is done
2070 * so we only have to take the write lock once.
2071 *
2072 * When detaching a protocol filter, if we find that we
2073 * have detached the very last protocol and we need to call
2074 * ifp_use_reached_zero, we have to break out of our work
2075 * to drop the write lock so we can call ifp_use_reached_zero.
2076 */
2077
2078static void
2079dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2080{
2081 thread_t self = current_thread();
2082 int asserted = 0;
0b4e3aa0 2083
91447636
A
2084 ml_thread_policy(self, MACHINE_GROUP,
2085 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
9bccf70c 2086
91447636
A
2087
2088 while (1) {
2089 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2090 struct ifnet *ifp;
2091 struct proto_hash_entry detached_protos;
2092 struct ifnet_filter_head detached_filters;
2093 struct if_proto *proto;
2094 struct if_proto *next_proto;
2095 struct ifnet_filter *filt;
2096 struct ifnet_filter *next_filt;
2097 int reached_zero;
2098
2099 reached_zero = 0;
2100
2101 /* Clear the detach waiting flag */
2102 dlil_detach_waiting = 0;
2103 TAILQ_INIT(&detached_filters);
2104 SLIST_INIT(&detached_protos);
2105
2106 ifnet_head_lock_shared();
2107 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2108 int i;
2109
2110 // Look for protocols and protocol filters
2111 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2112 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2113 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2114
2115 // Detach this protocol
2116 if (proto->detaching) {
2117 if (ifp->if_del_proto)
2118 ifp->if_del_proto(ifp, proto->protocol_family);
2119 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2120 ifp->offercnt--;
2121 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2122 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2123 reached_zero = ifp_unuse(ifp);
2124 if (reached_zero) {
2125 break;
2126 }
2127 }
2128 else {
2129 // Update prev_nextptr to point to our next ptr
2130 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2131 }
2132 }
2133 }
2134
2135 // look for interface filters that need to be detached
2136 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2137 next_filt = TAILQ_NEXT(filt, filt_next);
2138 if (filt->filt_detaching != 0) {
2139 // take this interface filter off the interface filter list
2140 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2141
2142 // put this interface filter on the detached filters list
2143 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2144 }
2145 }
2146
2147 if (ifp->if_delayed_detach) {
2148 ifp->if_delayed_detach = 0;
2149 reached_zero = ifp_unuse(ifp);
2150 }
2151
2152 if (reached_zero)
2153 break;
2154 }
2155 ifnet_head_done();
2156 dlil_write_end();
2157
2158 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2159 next_filt = TAILQ_NEXT(filt, filt_next);
2160 /*
2161 * dlil_detach_filter_internal won't remove an item from
2162 * the list if it is already detached (second parameter).
2163 * The item will be freed though.
2164 */
2165 dlil_detach_filter_internal(filt, 1);
2166 }
2167
2168 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2169 next_proto = SLIST_NEXT(proto, next_hash);
2170 dlil_detach_protocol_internal(proto);
2171 }
2172
2173 if (reached_zero) {
2174 ifp_use_reached_zero(ifp);
2175 dlil_detach_waiting = 1; // we may have missed something
2176 }
2177 }
2178
2179 if (!asserted && dlil_detach_waiting == 0) {
2180 asserted = 1;
2181 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2182 }
2183
2184 if (dlil_detach_waiting == 0) {
2185 asserted = 0;
2186 thread_block(dlil_delayed_detach_thread);
2187 }
2188 }
2189}
9bccf70c 2190
91447636
A
2191static void
2192dlil_call_delayed_detach_thread(void) {
2193 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2194}
9bccf70c 2195
91447636
A
2196extern int if_next_index(void);
2197
2198__private_extern__ int
2199dlil_if_attach_with_address(
2200 struct ifnet *ifp,
2201 const struct sockaddr_dl *ll_addr)
2202{
2203 u_long interface_family = ifp->if_family;
2204 struct if_family_str *if_family = NULL;
2205 int stat;
2206 struct ifnet *tmp_if;
2207 struct proto_hash_entry *new_proto_list = NULL;
2208 int locked = 0;
2209
2210
2211 ifnet_head_lock_shared();
1c79356b 2212
91447636
A
2213 /* Verify we aren't already on the list */
2214 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2215 if (tmp_if == ifp) {
2216 ifnet_head_done();
2217 return EEXIST;
2218 }
2219 }
2220
2221 ifnet_head_done();
2222
2223 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2224#if IFNET_RW_LOCK
2225 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2226#else
2227 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2228#endif
0b4e3aa0 2229
91447636
A
2230 if (ifp->if_lock == 0) {
2231 return ENOMEM;
2232 }
1c79356b 2233
91447636
A
2234 // Only use family if this is not a KPI interface
2235 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2236 if_family = find_family_module(interface_family);
2237 }
1c79356b 2238
91447636
A
2239 /*
2240 * Allow interfaces withouth protocol families to attach
2241 * only if they have the necessary fields filled out.
2242 */
2243
2244 if ((if_family == 0) &&
2245 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2246 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2247 interface_family);
2248 return ENODEV;
2249 }
2250
2251 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2252 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2253 M_NKE, M_WAITOK);
1c79356b 2254
91447636
A
2255 if (new_proto_list == 0) {
2256 return ENOBUFS;
2257 }
1c79356b
A
2258 }
2259
91447636
A
2260 dlil_write_begin();
2261 locked = 1;
2262
2263 /*
2264 * Call the family module to fill in the appropriate fields in the
2265 * ifnet structure.
2266 */
2267
2268 if (if_family) {
2269 stat = if_family->add_if(ifp);
2270 if (stat) {
2271 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2272 dlil_write_end();
2273 return stat;
2274 }
2275 ifp->if_add_proto_u.original = if_family->add_proto;
2276 ifp->if_del_proto = if_family->del_proto;
2277 if_family->refcnt++;
2278 }
2279
2280 ifp->offercnt = 0;
2281 TAILQ_INIT(&ifp->if_flt_head);
2282
2283
2284 if (new_proto_list) {
2285 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2286 ifp->if_proto_hash = new_proto_list;
2287 new_proto_list = 0;
2288 }
2289
2290 /* old_if_attach */
2291 {
2292 struct ifaddr *ifa = 0;
2293
2294 if (ifp->if_snd.ifq_maxlen == 0)
2295 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2296 TAILQ_INIT(&ifp->if_prefixhead);
2297 LIST_INIT(&ifp->if_multiaddrs);
2298 ifnet_touch_lastchange(ifp);
2299
2300 /* usecount to track attachment to the ifnet list */
2301 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2302
2303 /* Lock the list of interfaces */
2304 ifnet_head_lock_exclusive();
2305 ifnet_lock_exclusive(ifp);
2306
2307 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2308 char workbuf[64];
2309 int namelen, masklen, socksize, ifasize;
2310
2311 ifp->if_index = if_next_index();
2312
2313 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2314#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2315 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2316 socksize = masklen + ifp->if_addrlen;
2317#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2318 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2319 socksize = sizeof(struct sockaddr_dl);
2320 socksize = ROUNDUP(socksize);
2321 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2322 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2323 if (ifa) {
2324 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2325 ifnet_addrs[ifp->if_index - 1] = ifa;
2326 bzero(ifa, ifasize);
2327 sdl->sdl_len = socksize;
2328 sdl->sdl_family = AF_LINK;
2329 bcopy(workbuf, sdl->sdl_data, namelen);
2330 sdl->sdl_nlen = namelen;
2331 sdl->sdl_index = ifp->if_index;
2332 sdl->sdl_type = ifp->if_type;
2333 if (ll_addr) {
2334 sdl->sdl_alen = ll_addr->sdl_alen;
2335 if (ll_addr->sdl_alen != ifp->if_addrlen)
2336 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2337 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2338 }
2339 ifa->ifa_ifp = ifp;
2340 ifa->ifa_rtrequest = link_rtrequest;
2341 ifa->ifa_addr = (struct sockaddr*)sdl;
2342 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2343 ifa->ifa_netmask = (struct sockaddr*)sdl;
2344 sdl->sdl_len = masklen;
2345 while (namelen != 0)
2346 sdl->sdl_data[--namelen] = 0xff;
2347 }
2348 }
2349 else {
2350 /* preserve the first ifaddr */
2351 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2352 }
2353
1c79356b 2354
91447636
A
2355 TAILQ_INIT(&ifp->if_addrhead);
2356 ifa = ifnet_addrs[ifp->if_index - 1];
2357
2358 if (ifa) {
2359 /*
2360 * We don't use if_attach_ifa because we want
2361 * this address to be first on the list.
2362 */
2363 ifaref(ifa);
2364 ifa->ifa_debug |= IFA_ATTACHED;
2365 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
1c79356b 2366 }
91447636
A
2367
2368 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2369 ifindex2ifnet[ifp->if_index] = ifp;
2370
2371 ifnet_head_done();
1c79356b 2372 }
91447636
A
2373 dlil_write_end();
2374
2375 if (if_family && if_family->init_if) {
2376 stat = if_family->init_if(ifp);
2377 if (stat) {
2378 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2379 }
2380 }
21362eb3 2381
89b3af67 2382 ifnet_lock_done(ifp);
21362eb3 2383
91447636 2384 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
1c79356b 2385
91447636 2386 return 0;
1c79356b
A
2387}
2388
1c79356b
A
2389int
2390dlil_if_attach(struct ifnet *ifp)
2391{
91447636 2392 dlil_if_attach_with_address(ifp, NULL);
1c79356b
A
2393}
2394
2395
2396int
2397dlil_if_detach(struct ifnet *ifp)
2398{
91447636
A
2399 struct ifnet_filter *filter;
2400 struct ifnet_filter *filter_next;
2401 int zeroed = 0;
2402 int retval = 0;
2403 struct ifnet_filter_head fhead;
55e303ae 2404
55e303ae 2405
91447636 2406 ifnet_lock_exclusive(ifp);
55e303ae 2407
91447636
A
2408 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2409 /* Interface has already been detached */
2410 ifnet_lock_done(ifp);
2411 return ENXIO;
55e303ae
A
2412 }
2413
91447636
A
2414 /*
2415 * Indicate this interface is being detached.
2416 *
2417 * This should prevent protocols from attaching
2418 * from this point on. Interface will remain on
2419 * the list until all of the protocols are detached.
2420 */
2421 ifp->if_eflags |= IFEF_DETACHING;
2422 ifnet_lock_done(ifp);
55e303ae 2423
91447636 2424 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
55e303ae 2425
91447636
A
2426 if ((retval = dlil_write_begin()) != 0) {
2427 if (retval == EDEADLK) {
2428 retval = DLIL_WAIT_FOR_FREE;
2429
2430 /* We need to perform a delayed detach */
2431 ifp->if_delayed_detach = 1;
2432 dlil_detach_waiting = 1;
2433 wakeup(&dlil_detach_waiting);
2434 }
2435 return retval;
55e303ae
A
2436 }
2437
91447636
A
2438 /* Steal the list of interface filters */
2439 fhead = ifp->if_flt_head;
2440 TAILQ_INIT(&ifp->if_flt_head);
55e303ae 2441
91447636
A
2442 /* unuse the interface */
2443 zeroed = ifp_unuse(ifp);
1c79356b 2444
91447636 2445 dlil_write_end();
55e303ae 2446
91447636
A
2447 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2448 filter_next = TAILQ_NEXT(filter, filt_next);
2449 dlil_detach_filter_internal(filter, 1);
1c79356b 2450 }
55e303ae 2451
91447636
A
2452 if (zeroed == 0) {
2453 retval = DLIL_WAIT_FOR_FREE;
2454 }
2455 else
2456 {
2457 ifp_use_reached_zero(ifp);
2458 }
2459
2460 return retval;
1c79356b
A
2461}
2462
2463
2464int
2465dlil_reg_if_modules(u_long interface_family,
2466 struct dlil_ifmod_reg_str *ifmod)
2467{
2468 struct if_family_str *if_family;
1c79356b
A
2469
2470
1c79356b 2471 if (find_family_module(interface_family)) {
91447636 2472 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
1c79356b 2473 interface_family);
1c79356b
A
2474 return EEXIST;
2475 }
2476
2477 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2478 (!ifmod->add_proto) || (!ifmod->del_proto)) {
91447636 2479 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
1c79356b
A
2480 return EINVAL;
2481 }
9bccf70c
A
2482
2483 /*
2484 * The following is a gross hack to keep from breaking
2485 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2486 * does not zero the reserved fields in dlil_ifmod_reg_str.
2487 * As a result, we have to zero any function that used to
2488 * be reserved fields at the time Vicomsoft built their
2489 * kext. Radar #2974305
2490 */
2491 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2492 if (interface_family == 123) { /* Vicom */
2493 ifmod->init_if = 0;
2494 } else {
9bccf70c
A
2495 return EINVAL;
2496 }
2497 }
1c79356b
A
2498
2499 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2500 if (!if_family) {
91447636 2501 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
1c79356b
A
2502 return ENOMEM;
2503 }
2504
2505 bzero(if_family, sizeof(struct if_family_str));
2506
2507 if_family->if_family = interface_family & 0xffff;
2508 if_family->shutdown = ifmod->shutdown;
2509 if_family->add_if = ifmod->add_if;
2510 if_family->del_if = ifmod->del_if;
9bccf70c 2511 if_family->init_if = ifmod->init_if;
1c79356b
A
2512 if_family->add_proto = ifmod->add_proto;
2513 if_family->del_proto = ifmod->del_proto;
91447636 2514 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
1c79356b
A
2515 if_family->refcnt = 1;
2516 if_family->flags = 0;
2517
2518 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
1c79356b
A
2519 return 0;
2520}
2521
2522int dlil_dereg_if_modules(u_long interface_family)
2523{
2524 struct if_family_str *if_family;
91447636
A
2525 int ret = 0;
2526
1c79356b 2527
1c79356b
A
2528 if_family = find_family_module(interface_family);
2529 if (if_family == 0) {
91447636 2530 return ENXIO;
1c79356b
A
2531 }
2532
2533 if (--if_family->refcnt == 0) {
2534 if (if_family->shutdown)
2535 (*if_family->shutdown)();
2536
2537 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2538 FREE(if_family, M_IFADDR);
2539 }
9bccf70c 2540 else {
1c79356b 2541 if_family->flags |= DLIL_SHUTDOWN;
9bccf70c
A
2542 ret = DLIL_WAIT_FOR_FREE;
2543 }
1c79356b 2544
9bccf70c 2545 return ret;
1c79356b
A
2546}
2547
2548
2549
55e303ae 2550int
91447636
A
2551dlil_reg_proto_module(
2552 u_long protocol_family,
2553 u_long interface_family,
2554 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2555 int (*detach)(struct ifnet *ifp, u_long protocol_family))
55e303ae
A
2556{
2557 struct proto_family_str *proto_family;
55e303ae 2558
91447636 2559 if (attach == NULL) return EINVAL;
55e303ae 2560
91447636
A
2561 lck_mtx_lock(proto_family_mutex);
2562
2563 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2564 if (proto_family->proto_family == protocol_family &&
2565 proto_family->if_family == interface_family) {
2566 lck_mtx_unlock(proto_family_mutex);
2567 return EEXIST;
2568 }
55e303ae
A
2569 }
2570
2571 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2572 if (!proto_family) {
91447636 2573 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2574 return ENOMEM;
2575 }
2576
2577 bzero(proto_family, sizeof(struct proto_family_str));
2578 proto_family->proto_family = protocol_family;
2579 proto_family->if_family = interface_family & 0xffff;
91447636
A
2580 proto_family->attach_proto = attach;
2581 proto_family->detach_proto = detach;
55e303ae
A
2582
2583 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
91447636 2584 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2585 return 0;
2586}
2587
2588int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2589{
2590 struct proto_family_str *proto_family;
91447636
A
2591 int ret = 0;
2592
2593 lck_mtx_lock(proto_family_mutex);
55e303ae 2594
55e303ae
A
2595 proto_family = find_proto_module(protocol_family, interface_family);
2596 if (proto_family == 0) {
91447636
A
2597 lck_mtx_unlock(proto_family_mutex);
2598 return ENXIO;
55e303ae
A
2599 }
2600
2601 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2602 FREE(proto_family, M_IFADDR);
91447636
A
2603
2604 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2605 return ret;
2606}
2607
91447636 2608int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
55e303ae
A
2609{
2610 struct proto_family_str *proto_family;
91447636 2611 int ret = 0;
55e303ae 2612
91447636 2613 lck_mtx_lock(proto_family_mutex);
55e303ae
A
2614 proto_family = find_proto_module(protocol_family, ifp->if_family);
2615 if (proto_family == 0) {
91447636
A
2616 lck_mtx_unlock(proto_family_mutex);
2617 return ENXIO;
55e303ae
A
2618 }
2619
91447636 2620 ret = proto_family->attach_proto(ifp, protocol_family);
55e303ae 2621
91447636 2622 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2623 return ret;
2624}
2625
2626
2627int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2628{
2629 struct proto_family_str *proto_family;
91447636 2630 int ret = 0;
1c79356b 2631
91447636 2632 lck_mtx_lock(proto_family_mutex);
1c79356b 2633
91447636
A
2634 proto_family = find_proto_module(protocol_family, ifp->if_family);
2635 if (proto_family && proto_family->detach_proto)
2636 ret = proto_family->detach_proto(ifp, protocol_family);
1c79356b 2637 else
91447636 2638 ret = dlil_detach_protocol(ifp, protocol_family);
1c79356b 2639
91447636
A
2640 lck_mtx_unlock(proto_family_mutex);
2641 return ret;
1c79356b 2642}
9bccf70c 2643
91447636
A
2644static errno_t
2645dlil_recycle_ioctl(
2646 __unused ifnet_t ifnet_ptr,
2647 __unused u_int32_t ioctl_code,
2648 __unused void *ioctl_arg)
9bccf70c 2649{
9bccf70c
A
2650 return EOPNOTSUPP;
2651}
2652
91447636
A
2653static int
2654dlil_recycle_output(
2655 __unused struct ifnet *ifnet_ptr,
2656 struct mbuf *m)
9bccf70c 2657{
9bccf70c
A
2658 m_freem(m);
2659 return 0;
2660}
2661
91447636
A
2662static void
2663dlil_recycle_free(
2664 __unused ifnet_t ifnet_ptr)
9bccf70c 2665{
9bccf70c
A
2666}
2667
91447636
A
2668static errno_t
2669dlil_recycle_set_bpf_tap(
2670 __unused ifnet_t ifp,
2671 __unused bpf_tap_mode mode,
2672 __unused bpf_packet_func callback)
9bccf70c
A
2673{
2674 /* XXX not sure what to do here */
2675 return 0;
2676}
2677
91447636
A
2678int dlil_if_acquire(
2679 u_long family,
2680 const void *uniqueid,
2681 size_t uniqueid_len,
2682 struct ifnet **ifp)
9bccf70c
A
2683{
2684 struct ifnet *ifp1 = NULL;
2685 struct dlil_ifnet *dlifp1 = NULL;
91447636 2686 int ret = 0;
9bccf70c 2687
91447636 2688 lck_mtx_lock(dlil_ifnet_mutex);
9bccf70c
A
2689 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2690
2691 ifp1 = (struct ifnet *)dlifp1;
2692
2693 if (ifp1->if_family == family) {
2694
2695 /* same uniqueid and same len or no unique id specified */
2696 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2697 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2698
2699 /* check for matching interface in use */
2700 if (ifp1->if_eflags & IFEF_INUSE) {
2701 if (uniqueid_len) {
2702 ret = EBUSY;
2703 goto end;
2704 }
2705 }
2706 else {
91447636
A
2707 if (!ifp1->if_lock)
2708 panic("ifp's lock is gone\n");
2709 ifnet_lock_exclusive(ifp1);
2710 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2711 ifnet_lock_done(ifp1);
9bccf70c
A
2712 *ifp = ifp1;
2713 goto end;
2714 }
2715 }
2716 }
2717 }
2718
2719 /* no interface found, allocate a new one */
2720 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2721 if (dlifp1 == 0) {
2722 ret = ENOMEM;
2723 goto end;
2724 }
2725
2726 bzero(dlifp1, sizeof(*dlifp1));
2727
2728 if (uniqueid_len) {
2729 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2730 if (dlifp1->if_uniqueid == 0) {
2731 FREE(dlifp1, M_NKE);
2732 ret = ENOMEM;
2733 goto end;
2734 }
2735 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2736 dlifp1->if_uniqueid_len = uniqueid_len;
2737 }
2738
2739 ifp1 = (struct ifnet *)dlifp1;
2740 ifp1->if_eflags |= IFEF_INUSE;
91447636 2741 ifp1->if_name = dlifp1->if_namestorage;
9bccf70c
A
2742
2743 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2744
2745 *ifp = ifp1;
2746
2747end:
91447636 2748 lck_mtx_unlock(dlil_ifnet_mutex);
9bccf70c 2749
9bccf70c
A
2750 return ret;
2751}
2752
2753void dlil_if_release(struct ifnet *ifp)
2754{
2755 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
9bccf70c 2756
9bccf70c 2757
91447636
A
2758 /* Interface does not have a lock until it is attached - radar 3713951 */
2759 if (ifp->if_lock)
2760 ifnet_lock_exclusive(ifp);
9bccf70c
A
2761 ifp->if_eflags &= ~IFEF_INUSE;
2762 ifp->if_ioctl = dlil_recycle_ioctl;
2763 ifp->if_output = dlil_recycle_output;
2764 ifp->if_free = dlil_recycle_free;
2765 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2766
2767 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2768 ifp->if_name = dlifp->if_namestorage;
91447636
A
2769 if (ifp->if_lock)
2770 ifnet_lock_done(ifp);
9bccf70c 2771
9bccf70c 2772}