]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/dlil.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1999 Apple Computer, Inc.
24 *
25 * Data Link Inteface Layer
26 * Author: Ted Walker
27 */
28
1c79356b
A
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/malloc.h>
33#include <sys/mbuf.h>
34#include <sys/socket.h>
91447636
A
35#include <sys/domain.h>
36#include <sys/user.h>
1c79356b
A
37#include <net/if_dl.h>
38#include <net/if.h>
91447636 39#include <net/route.h>
1c79356b
A
40#include <net/if_var.h>
41#include <net/dlil.h>
91447636 42#include <net/if_arp.h>
1c79356b
A
43#include <sys/kern_event.h>
44#include <sys/kdebug.h>
1c79356b 45
91447636 46#include <kern/assert.h>
1c79356b 47#include <kern/task.h>
9bccf70c
A
48#include <kern/thread.h>
49#include <kern/sched_prim.h>
91447636 50#include <kern/locks.h>
9bccf70c 51
1c79356b 52#include <net/if_types.h>
91447636
A
53#include <net/kpi_interfacefilter.h>
54
55#include <libkern/OSAtomic.h>
1c79356b 56
d52fe63f 57#include <machine/machine_routines.h>
1c79356b
A
58
59#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
60#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
61#define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
62#define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
63#define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
64
65
9bccf70c
A
66#define MAX_DL_TAGS 16
67#define MAX_DLIL_FILTERS 16
1c79356b
A
68#define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
69#define MAX_LINKADDR 4 /* LONGWORDS */
70#define M_NKE M_IFADDR
71
72#define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
73#define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
74
91447636
A
75#if 0
76#define DLIL_PRINTF printf
77#else
78#define DLIL_PRINTF kprintf
79#endif
80
91447636
A
81enum {
82 kProtoKPI_DLIL = 0,
83 kProtoKPI_v1 = 1
84};
85
86struct if_proto {
87 SLIST_ENTRY(if_proto) next_hash;
88 int refcount;
89 int detaching;
90 struct ifnet *ifp;
91 struct domain *dl_domain;
92 protocol_family_t protocol_family;
93 int proto_kpi;
94 union {
95 struct {
96 dl_input_func dl_input;
97 dl_pre_output_func dl_pre_output;
98 dl_event_func dl_event;
99 dl_offer_func dl_offer;
100 dl_ioctl_func dl_ioctl;
101 dl_detached_func dl_detached;
102 } dlil;
103 struct {
104 proto_media_input input;
105 proto_media_preout pre_output;
106 proto_media_event event;
107 proto_media_ioctl ioctl;
108 proto_media_detached detached;
109 proto_media_resolve_multi resolve_multi;
110 proto_media_send_arp send_arp;
111 } v1;
112 } kpi;
1c79356b
A
113};
114
91447636
A
115SLIST_HEAD(proto_hash_entry, if_proto);
116
1c79356b 117
9bccf70c
A
118struct dlil_ifnet {
119 /* ifnet and drvr_ext are used by the stack and drivers
120 drvr_ext extends the public ifnet and must follow dl_if */
121 struct ifnet dl_if; /* public ifnet */
9bccf70c
A
122
123 /* dlil private fields */
124 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
125 /* it is not the ifnet list */
126 void *if_uniqueid; /* unique id identifying the interface */
127 size_t if_uniqueid_len;/* length of the unique id */
91447636 128 char if_namestorage[IFNAMSIZ]; /* interface name storage */
1c79356b
A
129};
130
91447636
A
131struct ifnet_filter {
132 TAILQ_ENTRY(ifnet_filter) filt_next;
133 ifnet_t filt_ifp;
134 int filt_detaching;
135
136 const char *filt_name;
137 void *filt_cookie;
138 protocol_family_t filt_protocol;
139 iff_input_func filt_input;
140 iff_output_func filt_output;
141 iff_event_func filt_event;
142 iff_ioctl_func filt_ioctl;
143 iff_detached_func filt_detached;
1c79356b
A
144};
145
1c79356b
A
146struct if_family_str {
147 TAILQ_ENTRY(if_family_str) if_fam_next;
148 u_long if_family;
149 int refcnt;
150 int flags;
151
152#define DLIL_SHUTDOWN 1
153
154 int (*add_if)(struct ifnet *ifp);
155 int (*del_if)(struct ifnet *ifp);
9bccf70c 156 int (*init_if)(struct ifnet *ifp);
91447636
A
157 int (*add_proto)(struct ifnet *ifp, u_long protocol_family, struct ddesc_head_str *demux_desc_head);
158 ifnet_del_proto_func del_proto;
159 ifnet_ioctl_func ifmod_ioctl;
160 int (*shutdown)(void);
1c79356b
A
161};
162
55e303ae
A
163struct proto_family_str {
164 TAILQ_ENTRY(proto_family_str) proto_fam_next;
165 u_long proto_family;
166 u_long if_family;
91447636 167 int usecnt;
55e303ae 168
91447636
A
169 int (*attach_proto)(struct ifnet *ifp, u_long protocol_family);
170 int (*detach_proto)(struct ifnet *ifp, u_long protocol_family);
55e303ae
A
171};
172
91447636
A
173enum {
174 kIfNetUseCount_MayBeZero = 0,
175 kIfNetUseCount_MustNotBeZero = 1
176};
55e303ae 177
91447636
A
178static TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
179static TAILQ_HEAD(, if_family_str) if_family_head;
180static TAILQ_HEAD(, proto_family_str) proto_family_head;
181static lck_grp_t *dlil_lock_group;
182static lck_grp_t *ifnet_lock_group;
183static lck_grp_t *ifnet_head_lock_group;
184static lck_attr_t *ifnet_lock_attr;
185static lck_mtx_t *proto_family_mutex;
186static lck_rw_t *ifnet_head_mutex;
187static lck_mtx_t *dlil_ifnet_mutex;
188static lck_mtx_t *dlil_mutex;
189static unsigned long dlil_read_count = 0;
190static unsigned long dlil_detach_waiting = 0;
191extern u_int32_t ipv4_ll_arp_aware;
1c79356b
A
192
193int dlil_initialized = 0;
91447636
A
194lck_spin_t *dlil_input_lock;
195__private_extern__ thread_t dlil_input_thread_ptr = 0;
1c79356b 196int dlil_input_thread_wakeup = 0;
91447636 197__private_extern__ int dlil_output_thread_wakeup = 0;
1c79356b
A
198static struct mbuf *dlil_input_mbuf_head = NULL;
199static struct mbuf *dlil_input_mbuf_tail = NULL;
9bccf70c
A
200#if NLOOP > 1
201#error dlil_input() needs to be revised to support more than on loopback interface
202#endif
1c79356b
A
203static struct mbuf *dlil_input_loop_head = NULL;
204static struct mbuf *dlil_input_loop_tail = NULL;
205
206static void dlil_input_thread(void);
91447636
A
207static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *msg);
208struct ifnet *ifbyfamily(u_long family, short unit);
209static int dlil_detach_filter_internal(interface_filter_t filter, int detached);
210static void dlil_call_delayed_detach_thread(void);
211
212static void dlil_read_begin(void);
213static void dlil_read_end(void);
214static int dlil_write_begin(void);
215static void dlil_write_end(void);
216
217static int ifp_use(struct ifnet *ifp, int handle_zero);
218static int ifp_unuse(struct ifnet *ifp);
219static void ifp_use_reached_zero(struct ifnet *ifp);
220
9bccf70c 221extern void bpfdetach(struct ifnet*);
91447636
A
222extern void proto_input_run(void); // new run_netisr
223
224
225int dlil_input_packet(struct ifnet *ifp, struct mbuf *m, char *frame_header);
226
227__private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *);
1c79356b 228
55e303ae 229int dlil_expand_mcl;
1c79356b 230
b36670ce
A
231extern u_int32_t inject_buckets;
232
91447636
A
233static const u_int32_t dlil_writer_waiting = 0x80000000;
234
235static __inline__ void*
236_cast_non_const(const void * ptr) {
237 union {
238 const void* cval;
239 void* val;
240 } ret;
241
242 ret.cval = ptr;
243 return (ret.val);
244}
245
246/* Should these be inline? */
247static void
248dlil_read_begin(void)
249{
250 unsigned long new_value;
251 unsigned long old_value;
252 struct uthread *uth = get_bsdthread_info(current_thread());
253
254 if (uth->dlil_incremented_read == dlil_writer_waiting)
255 panic("dlil_read_begin - thread is already a writer");
256
257 do {
258again:
259 old_value = dlil_read_count;
260
261 if ((old_value & dlil_writer_waiting) != 0 && uth->dlil_incremented_read == 0)
262 {
263 tsleep(&dlil_read_count, PRIBIO, "dlil_read_count", 1);
264 goto again;
265 }
266
267 new_value = old_value + 1;
268 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)new_value, (UInt32*)&dlil_read_count));
269
270 uth->dlil_incremented_read++;
271}
272
273static void
274dlil_read_end(void)
275{
276 struct uthread *uth = get_bsdthread_info(current_thread());
277
278 OSDecrementAtomic((UInt32*)&dlil_read_count);
279 uth->dlil_incremented_read--;
280 if (dlil_read_count == dlil_writer_waiting)
281 wakeup(_cast_non_const(&dlil_writer_waiting));
282}
283
284static int
285dlil_write_begin(void)
286{
287 struct uthread *uth = get_bsdthread_info(current_thread());
288
289 if (uth->dlil_incremented_read != 0) {
290 return EDEADLK;
291 }
292 lck_mtx_lock(dlil_mutex);
293 OSBitOrAtomic((UInt32)dlil_writer_waiting, (UInt32*)&dlil_read_count);
294again:
295 if (dlil_read_count == dlil_writer_waiting) {
296 uth->dlil_incremented_read = dlil_writer_waiting;
297 return 0;
298 }
299 else {
300 tsleep(_cast_non_const(&dlil_writer_waiting), PRIBIO, "dlil_writer_waiting", 1);
301 goto again;
302 }
303}
304
305static void
306dlil_write_end(void)
307{
308 struct uthread *uth = get_bsdthread_info(current_thread());
309
310 if (uth->dlil_incremented_read != dlil_writer_waiting)
311 panic("dlil_write_end - thread is not a writer");
312 OSBitAndAtomic((UInt32)~dlil_writer_waiting, (UInt32*)&dlil_read_count);
313 lck_mtx_unlock(dlil_mutex);
314 uth->dlil_incremented_read = 0;
315 wakeup(&dlil_read_count);
316}
317
318#define PROTO_HASH_SLOTS 0x5
319
1c79356b
A
320/*
321 * Internal functions.
322 */
323
91447636
A
324static int
325proto_hash_value(u_long protocol_family)
326{
327 switch(protocol_family) {
328 case PF_INET:
329 return 0;
330 case PF_INET6:
331 return 1;
332 case PF_APPLETALK:
333 return 2;
334 case PF_VLAN:
335 return 3;
336 default:
337 return 4;
338 }
339}
340
1c79356b
A
341static
342struct if_family_str *find_family_module(u_long if_family)
343{
344 struct if_family_str *mod = NULL;
345
346 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
347 if (mod->if_family == (if_family & 0xffff))
348 break;
349 }
350
351 return mod;
352}
353
55e303ae 354static
91447636
A
355struct proto_family_str*
356find_proto_module(u_long proto_family, u_long if_family)
55e303ae
A
357{
358 struct proto_family_str *mod = NULL;
359
360 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
361 if ((mod->proto_family == (proto_family & 0xffff))
362 && (mod->if_family == (if_family & 0xffff)))
363 break;
364 }
365
366 return mod;
367}
368
91447636
A
369static struct if_proto*
370find_attached_proto(struct ifnet *ifp, u_long protocol_family)
1c79356b 371{
91447636
A
372 struct if_proto *proto = NULL;
373 u_long i = proto_hash_value(protocol_family);
374 if (ifp->if_proto_hash) {
375 proto = SLIST_FIRST(&ifp->if_proto_hash[i]);
376 }
377
378 while(proto && proto->protocol_family != protocol_family) {
379 proto = SLIST_NEXT(proto, next_hash);
380 }
381
382 return proto;
1c79356b
A
383}
384
91447636
A
385static void
386if_proto_ref(struct if_proto *proto)
1c79356b 387{
91447636 388 OSAddAtomic(1, (UInt32*)&proto->refcount);
1c79356b
A
389}
390
91447636
A
391static void
392if_proto_free(struct if_proto *proto)
0b4e3aa0 393{
91447636
A
394 int oldval = OSAddAtomic(-1, (UInt32*)&proto->refcount);
395
396 if (oldval == 1) { /* This was the last reference */
397 FREE(proto, M_IFADDR);
398 }
0b4e3aa0
A
399}
400
91447636
A
401__private_extern__ void
402ifnet_lock_assert(
403 __unused struct ifnet *ifp,
404 __unused int what)
1c79356b 405{
91447636
A
406#if IFNET_RW_LOCK
407 /*
408 * Not implemented for rw locks.
409 *
410 * Function exists so when/if we use mutex we can
411 * enable this check.
412 */
413#else
414 lck_mtx_assert(ifp->if_lock, what);
415#endif
1c79356b
A
416}
417
91447636
A
418__private_extern__ void
419ifnet_lock_shared(
420 struct ifnet *ifp)
1c79356b 421{
91447636
A
422#if IFNET_RW_LOCK
423 lck_rw_lock_shared(ifp->if_lock);
424#else
425 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
426 lck_mtx_lock(ifp->if_lock);
427#endif
1c79356b
A
428}
429
91447636
A
430__private_extern__ void
431ifnet_lock_exclusive(
432 struct ifnet *ifp)
0b4e3aa0 433{
91447636
A
434#if IFNET_RW_LOCK
435 lck_rw_lock_exclusive(ifp->if_lock);
436#else
437 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_NOTOWNED);
438 lck_mtx_lock(ifp->if_lock);
439#endif
0b4e3aa0
A
440}
441
91447636
A
442__private_extern__ void
443ifnet_lock_done(
444 struct ifnet *ifp)
1c79356b 445{
91447636
A
446#if IFNET_RW_LOCK
447 lck_rw_done(ifp->if_lock);
448#else
449 lck_mtx_assert(ifp->if_lock, LCK_MTX_ASSERT_OWNED);
450 lck_mtx_unlock(ifp->if_lock);
451#endif
1c79356b
A
452}
453
91447636
A
454__private_extern__ void
455ifnet_head_lock_shared()
1c79356b 456{
91447636 457 lck_rw_lock_shared(ifnet_head_mutex);
1c79356b
A
458}
459
91447636
A
460__private_extern__ void
461ifnet_head_lock_exclusive()
462{
463 lck_rw_lock_exclusive(ifnet_head_mutex);
464}
1c79356b 465
91447636
A
466__private_extern__ void
467ifnet_head_done()
1c79356b 468{
91447636
A
469 lck_rw_done(ifnet_head_mutex);
470}
1c79356b 471
91447636
A
472/*
473 * Public functions.
474 */
475struct ifnet *ifbyfamily(u_long family, short unit)
476{
477 struct ifnet *ifp;
0b4e3aa0 478
91447636
A
479 ifnet_head_lock_shared();
480 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
481 if ((family == ifp->if_family) && (ifp->if_unit == unit))
482 break;
483 ifnet_head_done();
1c79356b 484
91447636
A
485 return ifp;
486}
1c79356b 487
91447636
A
488static int dlil_ifp_proto_count(struct ifnet * ifp)
489{
490 int count = 0;
491 int i;
492
493 if (ifp->if_proto_hash != NULL) {
494 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
495 struct if_proto *proto;
496 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
497 count++;
498 }
499 }
500 }
501
502 return count;
503}
1c79356b 504
91447636
A
505__private_extern__ void
506dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
507 struct net_event_data *event_data, u_long event_data_len)
508{
509 struct net_event_data ev_data;
510 struct kev_msg ev_msg;
511
512 /*
513 * a net event always start with a net_event_data structure
514 * but the caller can generate a simple net event or
515 * provide a longer event structure to post
516 */
517
518 ev_msg.vendor_code = KEV_VENDOR_APPLE;
519 ev_msg.kev_class = KEV_NETWORK_CLASS;
520 ev_msg.kev_subclass = event_subclass;
521 ev_msg.event_code = event_code;
522
523 if (event_data == 0) {
524 event_data = &ev_data;
525 event_data_len = sizeof(struct net_event_data);
526 }
527
528 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
529 event_data->if_family = ifp->if_family;
530 event_data->if_unit = (unsigned long) ifp->if_unit;
531
532 ev_msg.dv[0].data_length = event_data_len;
533 ev_msg.dv[0].data_ptr = event_data;
534 ev_msg.dv[1].data_length = 0;
535
536 dlil_event_internal(ifp, &ev_msg);
1c79356b
A
537}
538
91447636
A
539void dlil_init(void);
540void
541dlil_init(void)
542{
543 lck_grp_attr_t *grp_attributes = 0;
544 lck_attr_t *lck_attributes = 0;
545 lck_grp_t *input_lock_grp = 0;
546
547 TAILQ_INIT(&dlil_ifnet_head);
548 TAILQ_INIT(&if_family_head);
549 TAILQ_INIT(&proto_family_head);
550 TAILQ_INIT(&ifnet_head);
551
552 /* Setup the lock groups we will use */
553 grp_attributes = lck_grp_attr_alloc_init();
91447636
A
554
555 dlil_lock_group = lck_grp_alloc_init("dlil internal locks", grp_attributes);
556#if IFNET_RW_LOCK
557 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
558#else
559 ifnet_lock_group = lck_grp_alloc_init("ifnet locks", grp_attributes);
560#endif
561 ifnet_head_lock_group = lck_grp_alloc_init("ifnet head lock", grp_attributes);
562 input_lock_grp = lck_grp_alloc_init("dlil input lock", grp_attributes);
563 lck_grp_attr_free(grp_attributes);
564 grp_attributes = 0;
565
566 /* Setup the lock attributes we will use */
567 lck_attributes = lck_attr_alloc_init();
91447636
A
568
569 ifnet_lock_attr = lck_attr_alloc_init();
91447636
A
570
571 dlil_input_lock = lck_spin_alloc_init(input_lock_grp, lck_attributes);
572 input_lock_grp = 0;
573
574 ifnet_head_mutex = lck_rw_alloc_init(ifnet_head_lock_group, lck_attributes);
575 proto_family_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
576 dlil_ifnet_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
577 dlil_mutex = lck_mtx_alloc_init(dlil_lock_group, lck_attributes);
578
579 lck_attr_free(lck_attributes);
580 lck_attributes = 0;
581
582 /*
583 * Start up the dlil input thread once everything is initialized
584 */
585 (void) kernel_thread(kernel_task, dlil_input_thread);
586 (void) kernel_thread(kernel_task, dlil_call_delayed_detach_thread);
587}
1c79356b 588
91447636
A
589int
590dlil_attach_filter(
591 struct ifnet *ifp,
592 const struct iff_filter *if_filter,
593 interface_filter_t *filter_ref)
1c79356b 594{
9bccf70c 595 int retval = 0;
91447636 596 struct ifnet_filter *filter;
9bccf70c 597
91447636
A
598 MALLOC(filter, struct ifnet_filter *, sizeof(*filter), M_NKE, M_WAITOK);
599 if (filter == NULL)
600 return ENOMEM;
601 bzero(filter, sizeof(*filter));
9bccf70c 602
9bccf70c 603
91447636
A
604 filter->filt_ifp = ifp;
605 filter->filt_cookie = if_filter->iff_cookie;
606 filter->filt_name = if_filter->iff_name;
607 filter->filt_protocol = if_filter->iff_protocol;
608 filter->filt_input = if_filter->iff_input;
609 filter->filt_output = if_filter->iff_output;
610 filter->filt_event = if_filter->iff_event;
611 filter->filt_ioctl = if_filter->iff_ioctl;
612 filter->filt_detached = if_filter->iff_detached;
613
614 if ((retval = dlil_write_begin()) != 0) {
615 /* Failed to acquire the write lock */
616 FREE(filter, M_NKE);
617 return retval;
618 }
619 TAILQ_INSERT_TAIL(&ifp->if_flt_head, filter, filt_next);
620 dlil_write_end();
621 *filter_ref = filter;
622 return retval;
1c79356b
A
623}
624
91447636
A
625static int
626dlil_detach_filter_internal(interface_filter_t filter, int detached)
1c79356b 627{
91447636
A
628 int retval = 0;
629
3a60a9f5
A
630 if (detached == 0) {
631 ifnet_t ifp = NULL;
632 interface_filter_t entry = NULL;
633
634 /* Take the write lock */
635 retval = dlil_write_begin();
636 if (retval != 0 && retval != EDEADLK)
637 return retval;
638
639 /*
640 * At this point either we have the write lock (retval == 0)
641 * or we couldn't get it (retval == EDEADLK) because someone
642 * else up the stack is holding the read lock. It is safe to
643 * read, either the read or write is held. Verify the filter
644 * parameter before proceeding.
645 */
646 ifnet_head_lock_shared();
647 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
648 TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) {
649 if (entry == filter)
650 break;
651 }
652 if (entry == filter)
653 break;
654 }
655 ifnet_head_done();
656
657 if (entry != filter) {
658 /* filter parameter is not a valid filter ref */
659 if (retval == 0) {
660 dlil_write_end();
661 }
662 return EINVAL;
663 }
664
91447636
A
665 if (retval == EDEADLK) {
666 /* Perform a delayed detach */
667 filter->filt_detaching = 1;
668 dlil_detach_waiting = 1;
669 wakeup(&dlil_detach_waiting);
3a60a9f5 670 return 0;
91447636 671 }
3a60a9f5
A
672
673 /* Remove the filter from the list */
674 TAILQ_REMOVE(&ifp->if_flt_head, filter, filt_next);
91447636 675 dlil_write_end();
3a60a9f5 676 }
91447636 677
3a60a9f5 678 /* Call the detached funciton if there is one */
91447636
A
679 if (filter->filt_detached)
680 filter->filt_detached(filter->filt_cookie, filter->filt_ifp);
9bccf70c 681
3a60a9f5 682 /* Free the filter */
91447636
A
683 FREE(filter, M_NKE);
684
685 return retval;
1c79356b
A
686}
687
1c79356b 688void
91447636
A
689dlil_detach_filter(interface_filter_t filter)
690{
3a60a9f5
A
691 if (filter == NULL)
692 return;
91447636
A
693 dlil_detach_filter_internal(filter, 0);
694}
1c79356b 695
91447636
A
696static void
697dlil_input_thread_continue(
698 __unused void* foo,
699 __unused wait_result_t wait)
700{
701 while (1) {
702 struct mbuf *m, *m_loop;
703
704 lck_spin_lock(dlil_input_lock);
705 m = dlil_input_mbuf_head;
706 dlil_input_mbuf_head = NULL;
707 dlil_input_mbuf_tail = NULL;
708 m_loop = dlil_input_loop_head;
709 dlil_input_loop_head = NULL;
710 dlil_input_loop_tail = NULL;
711 lck_spin_unlock(dlil_input_lock);
712
713 /*
714 * NOTE warning %%% attention !!!!
715 * We should think about putting some thread starvation safeguards if
716 * we deal with long chains of packets.
717 */
718 while (m) {
719 struct mbuf *m0 = m->m_nextpkt;
720 void *header = m->m_pkthdr.header;
721
722 m->m_nextpkt = NULL;
723 m->m_pkthdr.header = NULL;
724 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
725 m = m0;
726 }
727 m = m_loop;
728 while (m) {
729 struct mbuf *m0 = m->m_nextpkt;
730 void *header = m->m_pkthdr.header;
731 struct ifnet *ifp = &loif[0];
732
733 m->m_nextpkt = NULL;
734 m->m_pkthdr.header = NULL;
735 (void) dlil_input_packet(ifp, m, header);
736 m = m0;
737 }
738
739 proto_input_run();
b36670ce 740
91447636 741 if (dlil_input_mbuf_head == NULL &&
b36670ce 742 dlil_input_loop_head == NULL && inject_buckets == 0) {
91447636
A
743 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
744 (void) thread_block(dlil_input_thread_continue);
745 /* NOTREACHED */
746 }
747 }
1c79356b
A
748}
749
750void dlil_input_thread(void)
751{
91447636
A
752 register thread_t self = current_thread();
753
754 ml_thread_policy(self, MACHINE_GROUP,
755 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
756
757 dlil_initialized = 1;
758 dlil_input_thread_ptr = current_thread();
759 dlil_input_thread_continue(NULL, THREAD_RESTART);
760}
1c79356b 761
91447636
A
762int
763dlil_input_with_stats(
764 struct ifnet *ifp,
765 struct mbuf *m_head,
766 struct mbuf *m_tail,
767 const struct ifnet_stat_increment_param *stats)
768{
769 /* WARNING
770 * Because of loopbacked multicast we cannot stuff the ifp in
771 * the rcvif of the packet header: loopback has its own dlil
772 * input queue
773 */
774
775 lck_spin_lock(dlil_input_lock);
776 if (ifp->if_type != IFT_LOOP) {
777 if (dlil_input_mbuf_head == NULL)
778 dlil_input_mbuf_head = m_head;
779 else if (dlil_input_mbuf_tail != NULL)
780 dlil_input_mbuf_tail->m_nextpkt = m_head;
781 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
782 } else {
783 if (dlil_input_loop_head == NULL)
784 dlil_input_loop_head = m_head;
785 else if (dlil_input_loop_tail != NULL)
786 dlil_input_loop_tail->m_nextpkt = m_head;
787 dlil_input_loop_tail = m_tail ? m_tail : m_head;
788 }
789 if (stats) {
790 ifp->if_data.ifi_ipackets += stats->packets_in;
791 ifp->if_data.ifi_ibytes += stats->bytes_in;
792 ifp->if_data.ifi_ierrors += stats->errors_in;
793
794 ifp->if_data.ifi_opackets += stats->packets_out;
795 ifp->if_data.ifi_obytes += stats->bytes_out;
796 ifp->if_data.ifi_oerrors += stats->errors_out;
797
798 ifp->if_data.ifi_collisions += stats->collisions;
799 ifp->if_data.ifi_iqdrops += stats->dropped;
800 }
801 lck_spin_unlock(dlil_input_lock);
802
803 wakeup((caddr_t)&dlil_input_thread_wakeup);
804
805 return 0;
1c79356b
A
806}
807
808int
809dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
91447636
A
810{
811 return dlil_input_with_stats(ifp, m_head, m_tail, NULL);
1c79356b
A
812}
813
814int
91447636 815dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
1c79356b
A
816 char *frame_header)
817{
1c79356b
A
818 int retval;
819 struct if_proto *ifproto = 0;
91447636
A
820 protocol_family_t protocol_family;
821 struct ifnet_filter *filter;
1c79356b
A
822
823
824 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
825
91447636
A
826 /*
827 * Lock the interface while we run through
828 * the filters and the demux. This lock
829 * protects the filter list and the demux list.
830 */
831 dlil_read_begin();
1c79356b 832
91447636
A
833 /*
834 * Call family demux module. If the demux module finds a match
835 * for the frame it will fill-in the ifproto pointer.
836 */
1c79356b 837
91447636
A
838 retval = ifp->if_demux(ifp, m, frame_header, &protocol_family);
839 if (retval != 0)
840 protocol_family = 0;
841 if (retval == EJUSTRETURN) {
842 dlil_read_end();
843 return 0;
844 }
1c79356b 845
91447636
A
846 /* DANGER!!! */
847 if (m->m_flags & (M_BCAST|M_MCAST))
848 ifp->if_imcasts++;
1c79356b 849
1c79356b 850 /*
91447636 851 * Run interface filters
1c79356b 852 */
91447636
A
853
854 /* Do not pass VLAN tagged packets to filters PR-3586856 */
855 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
856 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
857 int filter_result;
858 if (filter->filt_input && (filter->filt_protocol == 0 ||
859 filter->filt_protocol == protocol_family)) {
860 filter_result = filter->filt_input(filter->filt_cookie, ifp, protocol_family, &m, &frame_header);
861
862 if (filter_result) {
863 dlil_read_end();
864 if (filter_result == EJUSTRETURN) {
865 filter_result = 0;
866 }
867 else {
868 m_freem(m);
869 }
870
871 return filter_result;
872 }
873 }
874 }
1c79356b 875 }
1c79356b 876
91447636
A
877 /* Demux is done, interface filters have been processed, unlock the mutex */
878 if (retval || ((m->m_flags & M_PROMISC) != 0) ) {
879 dlil_read_end();
880 if (retval != EJUSTRETURN) {
881 m_freem(m);
882 return retval;
883 }
884 else
885 return 0;
1c79356b 886 }
91447636
A
887
888 ifproto = find_attached_proto(ifp, protocol_family);
889
1c79356b 890 if (ifproto == 0) {
91447636
A
891 dlil_read_end();
892 DLIL_PRINTF("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
893 m_freem(m);
894 return 0;
1c79356b 895 }
91447636
A
896
897 /*
898 * Hand the packet off to the protocol.
899 */
1c79356b 900
91447636
A
901 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
902 lck_mtx_lock(ifproto->dl_domain->dom_mtx);
903 }
1c79356b 904
91447636
A
905 if (ifproto->proto_kpi == kProtoKPI_DLIL)
906 retval = (*ifproto->kpi.dlil.dl_input)(m, frame_header,
907 ifp, ifproto->protocol_family,
908 TRUE);
909 else
910 retval = ifproto->kpi.v1.input(ifp, ifproto->protocol_family, m, frame_header);
1c79356b 911
91447636
A
912 if (ifproto->dl_domain && (ifproto->dl_domain->dom_flags & DOM_REENTRANT) == 0) {
913 lck_mtx_unlock(ifproto->dl_domain->dom_mtx);
914 }
1c79356b 915
91447636 916 dlil_read_end();
1c79356b 917
91447636
A
918 if (retval == EJUSTRETURN)
919 retval = 0;
920 else
921 if (retval)
922 m_freem(m);
1c79356b 923
91447636
A
924 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
925 return retval;
1c79356b
A
926}
927
91447636
A
928static int
929dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
1c79356b 930{
91447636
A
931 struct ifnet_filter *filter;
932
933 if (ifp_use(ifp, kIfNetUseCount_MustNotBeZero) == 0) {
934 dlil_read_begin();
935
936 /* Pass the event to the interface filters */
937 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
938 if (filter->filt_event)
939 filter->filt_event(filter->filt_cookie, ifp, filter->filt_protocol, event);
940 }
941
942 if (ifp->if_proto_hash) {
943 int i;
944
945 for (i = 0; i < PROTO_HASH_SLOTS; i++) {
946 struct if_proto *proto;
947
948 SLIST_FOREACH(proto, &ifp->if_proto_hash[i], next_hash) {
949 /* Pass the event to the protocol */
950 if (proto->proto_kpi == kProtoKPI_DLIL) {
951 if (proto->kpi.dlil.dl_event)
952 proto->kpi.dlil.dl_event(ifp, event);
953 }
954 else {
955 if (proto->kpi.v1.event)
956 proto->kpi.v1.event(ifp, proto->protocol_family, event);
957 }
958 }
959 }
960 }
961
962 dlil_read_end();
963
964 /* Pass the event to the interface */
965 if (ifp->if_event)
966 ifp->if_event(ifp, event);
967
968 if (ifp_unuse(ifp))
969 ifp_use_reached_zero(ifp);
970 }
971
972 return kev_post_msg(event);
1c79356b
A
973}
974
1c79356b
A
975int
976dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
977{
91447636 978 int result = 0;
1c79356b 979
91447636 980 struct kev_msg kev_msg;
1c79356b 981
91447636
A
982 kev_msg.vendor_code = event->vendor_code;
983 kev_msg.kev_class = event->kev_class;
984 kev_msg.kev_subclass = event->kev_subclass;
985 kev_msg.event_code = event->event_code;
986 kev_msg.dv[0].data_ptr = &event->event_data[0];
987 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
988 kev_msg.dv[1].data_length = 0;
989
1c79356b 990
91447636 991 result = dlil_event_internal(ifp, &kev_msg);
1c79356b 992
1c79356b 993
91447636
A
994 return result;
995}
1c79356b 996
3a60a9f5 997int
91447636
A
998dlil_output_list(
999 struct ifnet* ifp,
1000 u_long proto_family,
1001 struct mbuf *packetlist,
1002 caddr_t route,
1003 const struct sockaddr *dest,
1004 int raw)
1005{
1006 char *frame_type = 0;
1007 char *dst_linkaddr = 0;
1008 int error, retval = 0;
1009 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1010 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1011 struct ifnet_filter *filter;
1012 struct if_proto *proto = 0;
1013 struct mbuf *m;
1014
1015 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1016#if BRIDGE
1017 if ((raw != 0) || proto_family != PF_INET || do_brige) {
1018#else
1019 if ((raw != 0) || proto_family != PF_INET) {
1020#endif
1021 while (packetlist) {
1022 m = packetlist;
1023 packetlist = packetlist->m_nextpkt;
1024 m->m_nextpkt = NULL;
1025 error = dlil_output(ifp, proto_family, m, route, dest, raw);
1026 if (error) {
1027 if (packetlist)
1028 m_freem_list(packetlist);
1029 return (error);
1030 }
1031 }
1032 return (0);
1033 }
1034
1035 dlil_read_begin();
1036
1037 frame_type = frame_type_buffer;
1038 dst_linkaddr = dst_linkaddr_buffer;
1039 m = packetlist;
1040 packetlist = packetlist->m_nextpkt;
1041 m->m_nextpkt = NULL;
1042
1043 proto = find_attached_proto(ifp, proto_family);
1044 if (proto == NULL) {
1045 retval = ENXIO;
1046 goto cleanup;
1047 }
1c79356b 1048
91447636
A
1049 retval = 0;
1050 if (proto->proto_kpi == kProtoKPI_DLIL) {
1051 if (proto->kpi.dlil.dl_pre_output)
1052 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1053 }
1054 else {
1055 if (proto->kpi.v1.pre_output)
1056 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1057 }
1c79356b 1058
91447636
A
1059 if (retval) {
1060 if (retval != EJUSTRETURN) {
1061 m_freem(m);
1062 }
1063 goto cleanup;
1064 }
1c79356b 1065
91447636
A
1066 do {
1067
1068
1069 if (ifp->if_framer) {
1070 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1071 if (retval) {
1072 if (retval != EJUSTRETURN) {
1073 m_freem(m);
1074 }
1075 goto cleanup;
1076 }
1077 }
1078
1079 /*
1080 * Let interface filters (if any) do their thing ...
1081 */
1082 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1083 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1084 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1085 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1086 filter->filt_output) {
1087 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1088 if (retval) {
1089 if (retval == EJUSTRETURN)
1090 continue;
1091 else {
1092 m_freem(m);
1093 }
1094 goto cleanup;
1095 }
1096 }
1097 }
1098 }
1099 /*
1100 * Finally, call the driver.
1101 */
1102
1103 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1104 retval = ifp->if_output(ifp, m);
1105 if (retval) {
1106 printf("dlil_output_list: output error retval = %x\n", retval);
1107 goto cleanup;
1108 }
1109 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1c79356b 1110
91447636
A
1111 m = packetlist;
1112 if (m) {
1113 packetlist = packetlist->m_nextpkt;
1114 m->m_nextpkt = NULL;
1115 }
1116 } while (m);
1c79356b 1117
91447636
A
1118
1119 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1c79356b 1120
91447636
A
1121cleanup:
1122 dlil_read_end();
1123 if (packetlist) /* if any packet left, clean up */
1124 m_freem_list(packetlist);
1125 if (retval == EJUSTRETURN)
1126 retval = 0;
1127 return retval;
1c79356b
A
1128}
1129
1c79356b 1130/*
91447636
A
1131 * dlil_output
1132 *
1133 * Caller should have a lock on the protocol domain if the protocol
1134 * doesn't support finer grained locking. In most cases, the lock
1135 * will be held from the socket layer and won't be released until
1136 * we return back to the socket layer.
1137 *
1138 * This does mean that we must take a protocol lock before we take
1139 * an interface lock if we're going to take both. This makes sense
1140 * because a protocol is likely to interact with an ifp while it
1141 * is under the protocol lock.
1c79356b 1142 */
91447636
A
1143int
1144dlil_output(
1145 struct ifnet* ifp,
1146 u_long proto_family,
1147 struct mbuf *m,
1148 caddr_t route,
1149 const struct sockaddr *dest,
1150 int raw)
1151{
1152 char *frame_type = 0;
1153 char *dst_linkaddr = 0;
1154 int retval = 0;
1155 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1156 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1157 struct ifnet_filter *filter;
1158
1159 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
1160
1161 dlil_read_begin();
1162
1163 frame_type = frame_type_buffer;
1164 dst_linkaddr = dst_linkaddr_buffer;
1165
1166 if (raw == 0) {
1167 struct if_proto *proto = 0;
1168
1169 proto = find_attached_proto(ifp, proto_family);
1170 if (proto == NULL) {
1c79356b 1171 m_freem(m);
91447636
A
1172 retval = ENXIO;
1173 goto cleanup;
1174 }
1175
1176 retval = 0;
1177 if (proto->proto_kpi == kProtoKPI_DLIL) {
1178 if (proto->kpi.dlil.dl_pre_output)
1179 retval = proto->kpi.dlil.dl_pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1180 }
1181 else {
1182 if (proto->kpi.v1.pre_output)
1183 retval = proto->kpi.v1.pre_output(ifp, proto_family, &m, dest, route, frame_type, dst_linkaddr);
1184 }
1185
1186 if (retval) {
1187 if (retval != EJUSTRETURN) {
1188 m_freem(m);
1189 }
1190 goto cleanup;
1c79356b 1191 }
1c79356b 1192 }
91447636
A
1193
1194 /*
1195 * Call framing module
1196 */
1197 if ((raw == 0) && (ifp->if_framer)) {
1198 retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
1199 if (retval) {
1200 if (retval != EJUSTRETURN) {
1201 m_freem(m);
1202 }
1203 goto cleanup;
1204 }
1c79356b 1205 }
1c79356b 1206
91447636
A
1207#if BRIDGE
1208 /* !!!LOCKING!!!
1209 *
1210 * Need to consider how to handle this.
1211 */
1212 broken-locking
1213 if (do_bridge) {
1214 struct mbuf *m0 = m;
1215 struct ether_header *eh = mtod(m, struct ether_header *);
1216
1217 if (m->m_pkthdr.rcvif)
1218 m->m_pkthdr.rcvif = NULL;
1219 ifp = bridge_dst_lookup(eh);
1220 bdg_forward(&m0, ifp);
1221 if (m0)
1222 m_freem(m0);
1223
1224 return 0;
1225 }
1c79356b 1226#endif
91447636
A
1227
1228
1229 /*
1230 * Let interface filters (if any) do their thing ...
1231 */
1232
1233 /* Do not pass VLAN tagged packets to filters PR-3586856 */
1234 if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
1235 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1236 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_family)) &&
1237 filter->filt_output) {
1238 retval = filter->filt_output(filter->filt_cookie, ifp, proto_family, &m);
1239 if (retval) {
1240 if (retval != EJUSTRETURN)
1241 m_freem(m);
1242 goto cleanup;
1243 }
1c79356b 1244 }
1c79356b 1245 }
1c79356b 1246 }
91447636
A
1247
1248 /*
1249 * Finally, call the driver.
1250 */
1251
1252 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1253 retval = ifp->if_output(ifp, m);
1254 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1255
1256 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1c79356b 1257
91447636
A
1258cleanup:
1259 dlil_read_end();
1260 if (retval == EJUSTRETURN)
1261 retval = 0;
1c79356b
A
1262 return retval;
1263}
1264
1c79356b
A
1265int
1266dlil_ioctl(u_long proto_fam,
1267 struct ifnet *ifp,
1268 u_long ioctl_code,
1269 caddr_t ioctl_arg)
1270{
91447636
A
1271 struct ifnet_filter *filter;
1272 int retval = EOPNOTSUPP;
1273 int result = 0;
1274 struct if_family_str *if_family;
1275 int holding_read = 0;
1276
1277 /* Attempt to increment the use count. If it's zero, bail out, the ifp is invalid */
1278 result = ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1279 if (result != 0)
1280 return EOPNOTSUPP;
1281
1282 dlil_read_begin();
1283 holding_read = 1;
1284
1285 /* Run the interface filters first.
1286 * We want to run all filters before calling the protocol,
1287 * interface family, or interface.
1288 */
1289 TAILQ_FOREACH(filter, &ifp->if_flt_head, filt_next) {
1290 if ((filter->filt_protocol == 0 || (filter->filt_protocol == proto_fam)) &&
1291 filter->filt_ioctl != NULL) {
1292 result = filter->filt_ioctl(filter->filt_cookie, ifp, proto_fam, ioctl_code, ioctl_arg);
1293 /* Only update retval if no one has handled the ioctl */
1294 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1295 if (result == ENOTSUP)
1296 result = EOPNOTSUPP;
1297 retval = result;
1298 if (retval && retval != EOPNOTSUPP) {
1299 goto cleanup;
1300 }
1301 }
1302 }
1303 }
1304
1305 /* Allow the protocol to handle the ioctl */
1306 if (proto_fam) {
1307 struct if_proto *proto = find_attached_proto(ifp, proto_fam);
1308
1309 if (proto != 0) {
1310 result = EOPNOTSUPP;
1311 if (proto->proto_kpi == kProtoKPI_DLIL) {
1312 if (proto->kpi.dlil.dl_ioctl)
1313 result = proto->kpi.dlil.dl_ioctl(proto_fam, ifp, ioctl_code, ioctl_arg);
1314 }
1315 else {
1316 if (proto->kpi.v1.ioctl)
1317 result = proto->kpi.v1.ioctl(ifp, proto_fam, ioctl_code, ioctl_arg);
1318 }
1319
1320 /* Only update retval if no one has handled the ioctl */
1321 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1322 if (result == ENOTSUP)
1323 result = EOPNOTSUPP;
1324 retval = result;
1325 if (retval && retval != EOPNOTSUPP) {
1326 goto cleanup;
1327 }
1328 }
1329 }
1330 }
1331
1332 /*
1333 * Since we have incremented the use count on the ifp, we are guaranteed
1334 * that the ifp will not go away (the function pointers may not be changed).
1335 * We release the dlil read lock so the interface ioctl may trigger a
1336 * protocol attach. This happens with vlan and may occur with other virtual
1337 * interfaces.
1338 */
1339 dlil_read_end();
1340 holding_read = 0;
1341
1342 /* retval is either 0 or EOPNOTSUPP */
1343
1344 /*
1345 * Let the family handle this ioctl.
1346 * If it returns something non-zero and not EOPNOTSUPP, we're done.
1347 * If it returns zero, the ioctl was handled, so set retval to zero.
1348 */
1349 if_family = find_family_module(ifp->if_family);
1350 if ((if_family) && (if_family->ifmod_ioctl)) {
1351 result = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1352
1353 /* Only update retval if no one has handled the ioctl */
1354 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1355 if (result == ENOTSUP)
1356 result = EOPNOTSUPP;
1357 retval = result;
1358 if (retval && retval != EOPNOTSUPP) {
1359 goto cleanup;
1360 }
1361 }
1362 }
1363
1364 /*
1365 * Let the interface handle this ioctl.
1366 * If it returns EOPNOTSUPP, ignore that, we may have
1367 * already handled this in the protocol or family.
1368 */
1369 if (ifp->if_ioctl)
1370 result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1371
1372 /* Only update retval if no one has handled the ioctl */
1373 if (retval == EOPNOTSUPP || result == EJUSTRETURN) {
1374 if (result == ENOTSUP)
1375 result = EOPNOTSUPP;
1376 retval = result;
1377 if (retval && retval != EOPNOTSUPP) {
1378 goto cleanup;
1379 }
1380 }
1381
1382cleanup:
1383 if (holding_read)
1384 dlil_read_end();
1385 if (ifp_unuse(ifp))
1386 ifp_use_reached_zero(ifp);
1c79356b 1387
91447636
A
1388 if (retval == EJUSTRETURN)
1389 retval = 0;
1390 return retval;
1391}
1c79356b 1392
91447636
A
1393__private_extern__ errno_t
1394dlil_set_bpf_tap(
1395 ifnet_t ifp,
1396 bpf_tap_mode mode,
1397 bpf_packet_func callback)
1398{
1399 errno_t error = 0;
1c79356b 1400
91447636
A
1401 dlil_read_begin();
1402 if (ifp->if_set_bpf_tap)
1403 error = ifp->if_set_bpf_tap(ifp, mode, callback);
1404 dlil_read_end();
1405
1406 return error;
1c79356b
A
1407}
1408
91447636
A
1409__private_extern__ errno_t
1410dlil_resolve_multi(
1411 struct ifnet *ifp,
1412 const struct sockaddr *proto_addr,
1413 struct sockaddr *ll_addr,
1414 size_t ll_len)
1c79356b 1415{
91447636
A
1416 errno_t result = EOPNOTSUPP;
1417 struct if_proto *proto;
1418 const struct sockaddr *verify;
1419
1420 dlil_read_begin();
1421
1422 bzero(ll_addr, ll_len);
1423
1424 /* Call the protocol first */
1425 proto = find_attached_proto(ifp, proto_addr->sa_family);
1426 if (proto != NULL && proto->proto_kpi != kProtoKPI_DLIL &&
1427 proto->kpi.v1.resolve_multi != NULL) {
1428 result = proto->kpi.v1.resolve_multi(ifp, proto_addr,
1429 (struct sockaddr_dl*)ll_addr, ll_len);
1430 }
1431
1432 /* Let the interface verify the multicast address */
1433 if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) {
1434 if (result == 0)
1435 verify = ll_addr;
1436 else
1437 verify = proto_addr;
1438 result = ifp->if_check_multi(ifp, verify);
1439 }
1440
1441 dlil_read_end();
1442
1443 return result;
1444}
1c79356b 1445
91447636
A
1446__private_extern__ errno_t
1447dlil_send_arp_internal(
1448 ifnet_t ifp,
1449 u_short arpop,
1450 const struct sockaddr_dl* sender_hw,
1451 const struct sockaddr* sender_proto,
1452 const struct sockaddr_dl* target_hw,
1453 const struct sockaddr* target_proto)
1454{
1455 struct if_proto *proto;
1456 errno_t result = 0;
1457
1458 dlil_read_begin();
1459
1460 proto = find_attached_proto(ifp, target_proto->sa_family);
1461 if (proto == NULL || proto->proto_kpi == kProtoKPI_DLIL ||
1462 proto->kpi.v1.send_arp == NULL) {
1463 result = ENOTSUP;
1464 }
1465 else {
1466 result = proto->kpi.v1.send_arp(ifp, arpop, sender_hw, sender_proto,
1467 target_hw, target_proto);
1468 }
1469
1470 dlil_read_end();
1471
1472 return result;
1473}
1c79356b 1474
91447636
A
1475__private_extern__ errno_t
1476dlil_send_arp(
1477 ifnet_t ifp,
1478 u_short arpop,
1479 const struct sockaddr_dl* sender_hw,
1480 const struct sockaddr* sender_proto,
1481 const struct sockaddr_dl* target_hw,
1482 const struct sockaddr* target_proto)
1483{
1484 errno_t result = 0;
1485
1486 if (target_proto == NULL || (sender_proto &&
1487 sender_proto->sa_family != target_proto->sa_family))
1488 return EINVAL;
1489
1490 /*
1491 * If this is an ARP request and the target IP is IPv4LL,
1492 * send the request on all interfaces.
1493 */
1494 if (IN_LINKLOCAL(((const struct sockaddr_in*)target_proto)->sin_addr.s_addr)
1495 && ipv4_ll_arp_aware != 0 && target_proto->sa_family == AF_INET &&
1496 arpop == ARPOP_REQUEST) {
1497 ifnet_t *ifp_list;
1498 u_int32_t count;
1499 u_int32_t ifp_on;
1500
1501 result = ENOTSUP;
1502
1503 if (ifnet_list_get(IFNET_FAMILY_ANY, &ifp_list, &count) == 0) {
1504 for (ifp_on = 0; ifp_on < count; ifp_on++) {
1505 errno_t new_result;
1506 ifaddr_t source_hw = NULL;
1507 ifaddr_t source_ip = NULL;
1508 struct sockaddr_in source_ip_copy;
1509
1510 /*
1511 * Only arp on interfaces marked for IPv4LL ARPing. This may
1512 * mean that we don't ARP on the interface the subnet route
1513 * points to.
1514 */
1515 if ((ifp_list[ifp_on]->if_eflags & IFEF_ARPLL) == 0) {
1516 continue;
1517 }
1518
1519 source_hw = TAILQ_FIRST(&ifp_list[ifp_on]->if_addrhead);
1520
1521 /* Find the source IP address */
1522 ifnet_lock_shared(ifp_list[ifp_on]);
1523 TAILQ_FOREACH(source_ip, &ifp_list[ifp_on]->if_addrhead,
1524 ifa_link) {
1525 if (source_ip->ifa_addr &&
1526 source_ip->ifa_addr->sa_family == AF_INET) {
1527 break;
1528 }
1529 }
1530
1531 /* No IP Source, don't arp */
1532 if (source_ip == NULL) {
1533 ifnet_lock_done(ifp_list[ifp_on]);
1534 continue;
1535 }
1536
1537 /* Copy the source IP address */
1538 source_ip_copy = *(struct sockaddr_in*)source_ip->ifa_addr;
1539
1540 ifnet_lock_done(ifp_list[ifp_on]);
1541
1542 /* Send the ARP */
1543 new_result = dlil_send_arp_internal(ifp_list[ifp_on], arpop,
1544 (struct sockaddr_dl*)source_hw->ifa_addr,
1545 (struct sockaddr*)&source_ip_copy, NULL,
1546 target_proto);
1547
1548 if (result == ENOTSUP) {
1549 result = new_result;
1550 }
1551 }
1552 }
1553
1554 ifnet_list_free(ifp_list);
1555 }
1556 else {
1557 result = dlil_send_arp_internal(ifp, arpop, sender_hw, sender_proto,
1558 target_hw, target_proto);
1559 }
1560
1561 return result;
1562}
1c79356b 1563
91447636
A
1564static int
1565ifp_use(
1566 struct ifnet *ifp,
1567 int handle_zero)
1568{
1569 int old_value;
1570 int retval = 0;
1571
1572 do {
1573 old_value = ifp->if_usecnt;
1574 if (old_value == 0 && handle_zero == kIfNetUseCount_MustNotBeZero) {
1575 retval = ENXIO; // ifp is invalid
1576 break;
1577 }
1578 } while (!OSCompareAndSwap((UInt32)old_value, (UInt32)old_value + 1, (UInt32*)&ifp->if_usecnt));
1579
1580 return retval;
1581}
1c79356b 1582
91447636
A
1583/* ifp_unuse is broken into two pieces.
1584 *
1585 * ifp_use and ifp_unuse must be called between when the caller calls
1586 * dlil_write_begin and dlil_write_end. ifp_unuse needs to perform some
1587 * operations after dlil_write_end has been called. For this reason,
1588 * anyone calling ifp_unuse must call ifp_use_reached_zero if ifp_unuse
1589 * returns a non-zero value. The caller must call ifp_use_reached_zero
1590 * after the caller has called dlil_write_end.
1591 */
1592static void
1593ifp_use_reached_zero(
1594 struct ifnet *ifp)
1595{
1596 struct if_family_str *if_family;
1597 ifnet_detached_func free_func;
1598
1599 dlil_read_begin();
1600
1601 if (ifp->if_usecnt != 0)
1602 panic("ifp_use_reached_zero: ifp->if_usecnt != 0");
1603
1604 /* Let BPF know we're detaching */
1605 bpfdetach(ifp);
1606
1607 ifnet_head_lock_exclusive();
1608 ifnet_lock_exclusive(ifp);
1609
1610 /* Remove ourselves from the list */
1611 TAILQ_REMOVE(&ifnet_head, ifp, if_link);
1612 ifnet_addrs[ifp->if_index - 1] = 0;
1613
1614 /* ifp should be removed from the interface list */
1615 while (ifp->if_multiaddrs.lh_first) {
1616 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1617
1618 /*
1619 * When the interface is gone, we will no longer
1620 * be listening on these multicasts. Various bits
1621 * of the stack may be referencing these multicasts,
1622 * release only our reference.
1623 */
1624 LIST_REMOVE(ifma, ifma_link);
1625 ifma->ifma_ifp = NULL;
1626 ifma_release(ifma);
1627 }
1628 ifnet_head_done();
1629
1630 ifp->if_eflags &= ~IFEF_DETACHING; // clear the detaching flag
1631 ifnet_lock_done(ifp);
1c79356b 1632
91447636
A
1633 if_family = find_family_module(ifp->if_family);
1634 if (if_family && if_family->del_if)
1635 if_family->del_if(ifp);
1636#if 0
1637 if (--if_family->if_usecnt == 0) {
1638 if (if_family->shutdown)
1639 (*if_family->shutdown)();
1640
1641 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1642 FREE(if_family, M_IFADDR);
1643 }
1644#endif
1c79356b 1645
91447636
A
1646 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1647 free_func = ifp->if_free;
1648 dlil_read_end();
1649
1650 if (free_func)
1651 free_func(ifp);
1652}
1c79356b 1653
91447636
A
1654static int
1655ifp_unuse(
1656 struct ifnet *ifp)
1657{
1658 int oldval;
1659 oldval = OSDecrementAtomic((UInt32*)&ifp->if_usecnt);
1660 if (oldval == 0)
1661 panic("ifp_unuse: ifp(%s%n)->if_usecnt was zero\n", ifp->if_name, ifp->if_unit);
1662
1663 if (oldval > 1)
1664 return 0;
1665
1666 if ((ifp->if_eflags & IFEF_DETACHING) == 0)
1667 panic("ifp_unuse: use count reached zero but detching flag is not set!");
1668
1669 return 1; /* caller must call ifp_use_reached_zero */
1670}
1c79356b 1671
91447636
A
1672void
1673ifp_reference(
1674 struct ifnet *ifp)
1675{
1676 int oldval;
1677 oldval = OSIncrementAtomic(&ifp->if_refcnt);
1678}
1c79356b 1679
91447636
A
1680void
1681ifp_release(
1682 struct ifnet *ifp)
1683{
1684 int oldval;
1685 oldval = OSDecrementAtomic((UInt32*)&ifp->if_refcnt);
1686 if (oldval == 0)
1687 panic("dlil_if_reference - refcount decremented past zero!");
1688}
1c79356b 1689
91447636 1690extern lck_mtx_t *domain_proto_mtx;
1c79356b 1691
91447636
A
1692static int
1693dlil_attach_protocol_internal(
1694 struct if_proto *proto,
1695 const struct ddesc_head_str *demux,
1696 const struct ifnet_demux_desc *demux_list,
1697 u_int32_t demux_count)
1698{
1699 struct ddesc_head_str temp_head;
1700 struct kev_dl_proto_data ev_pr_data;
1701 struct ifnet *ifp = proto->ifp;
1702 int retval = 0;
1703 u_long hash_value = proto_hash_value(proto->protocol_family);
1704 int if_using_kpi = (ifp->if_eflags & IFEF_USEKPI) != 0;
1705 void* free_me = NULL;
1706
1707 /* setup some of the common values */
1708
1709 {
1710 lck_mtx_lock(domain_proto_mtx);
1711 struct domain *dp = domains;
1712 while (dp && (protocol_family_t)dp->dom_family != proto->protocol_family)
1713 dp = dp->dom_next;
1714 proto->dl_domain = dp;
1715 lck_mtx_unlock(domain_proto_mtx);
1716 }
1717
1718 /*
1719 * Convert the demux descriptors to a type the interface
1720 * will understand. Checking e_flags should be safe, this
1721 * flag won't change.
1722 */
1723 if (if_using_kpi && demux) {
1724 /* Convert the demux linked list to a demux_list */
1725 struct dlil_demux_desc *demux_entry;
1726 struct ifnet_demux_desc *temp_list = NULL;
1727 u_int32_t i = 0;
1728
1729 TAILQ_FOREACH(demux_entry, demux, next) {
1730 i++;
1731 }
1732
1733 temp_list = _MALLOC(sizeof(struct ifnet_demux_desc) * i, M_TEMP, M_WAITOK);
1734 free_me = temp_list;
1735
1736 if (temp_list == NULL)
1737 return ENOMEM;
1738
1739 i = 0;
1740 TAILQ_FOREACH(demux_entry, demux, next) {
1741 /* dlil_demux_desc types 1, 2, and 3 are obsolete and can not be translated */
1742 if (demux_entry->type == 1 ||
1743 demux_entry->type == 2 ||
1744 demux_entry->type == 3) {
1745 FREE(free_me, M_TEMP);
1746 return ENOTSUP;
1747 }
1748
1749 temp_list[i].type = demux_entry->type;
1750 temp_list[i].data = demux_entry->native_type;
1751 temp_list[i].datalen = demux_entry->variants.native_type_length;
1752 i++;
1753 }
1754 demux_count = i;
1755 demux_list = temp_list;
1756 }
1757 else if (!if_using_kpi && demux_list != NULL) {
1758 struct dlil_demux_desc *demux_entry;
1759 u_int32_t i = 0;
1760
1761 demux_entry = _MALLOC(sizeof(struct dlil_demux_desc) * demux_count, M_TEMP, M_WAITOK);
1762 free_me = demux_entry;
1763 if (demux_entry == NULL)
1764 return ENOMEM;
1765
1766 TAILQ_INIT(&temp_head);
1767
1768 for (i = 0; i < demux_count; i++) {
1769 demux_entry[i].type = demux_list[i].type;
1770 demux_entry[i].native_type = demux_list[i].data;
1771 demux_entry[i].variants.native_type_length = demux_list[i].datalen;
1772 TAILQ_INSERT_TAIL(&temp_head, &demux_entry[i], next);
1773 }
1774 demux = &temp_head;
1775 }
1776
1777 /*
1778 * Take the write lock to protect readers and exclude other writers.
1779 */
1780 dlil_write_begin();
1781
1782 /* Check that the interface isn't currently detaching */
1783 ifnet_lock_shared(ifp);
1784 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
1785 ifnet_lock_done(ifp);
1786 dlil_write_end();
1787 if (free_me)
1788 FREE(free_me, M_TEMP);
1789 return ENXIO;
1790 }
1791 ifnet_lock_done(ifp);
1792
1793 if (find_attached_proto(ifp, proto->protocol_family) != NULL) {
1794 dlil_write_end();
1795 if (free_me)
1796 FREE(free_me, M_TEMP);
1797 return EEXIST;
1798 }
1799
1800 /*
1801 * Call family module add_proto routine so it can refine the
1802 * demux descriptors as it wishes.
1803 */
1804 if (if_using_kpi)
1805 retval = ifp->if_add_proto_u.kpi(ifp, proto->protocol_family, demux_list, demux_count);
1806 else {
1807 retval = ifp->if_add_proto_u.original(ifp, proto->protocol_family,
1808 _cast_non_const(demux));
1809 }
1810 if (retval) {
1811 dlil_write_end();
1812 if (free_me)
1813 FREE(free_me, M_TEMP);
1814 return retval;
1815 }
1816
1817 /*
1818 * We can't fail from this point on.
1819 * Increment the number of uses (protocol attachments + interface attached).
1820 */
1821 ifp_use(ifp, kIfNetUseCount_MustNotBeZero);
1822
1823 /*
1824 * Insert the protocol in the hash
1825 */
1826 {
1827 struct if_proto* prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]);
1828 while (prev_proto && SLIST_NEXT(prev_proto, next_hash) != NULL)
1829 prev_proto = SLIST_NEXT(prev_proto, next_hash);
1830 if (prev_proto)
1831 SLIST_INSERT_AFTER(prev_proto, proto, next_hash);
1832 else
1833 SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash);
1834 }
1c79356b 1835
91447636
A
1836 /*
1837 * Add to if_proto list for this interface
1838 */
1839 if_proto_ref(proto);
1840 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
1841 ifp->offercnt++;
1842 dlil_write_end();
1843
1844 /* the reserved field carries the number of protocol still attached (subject to change) */
1845 ev_pr_data.proto_family = proto->protocol_family;
1846 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1847 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1848 (struct net_event_data *)&ev_pr_data,
1849 sizeof(struct kev_dl_proto_data));
1850
1851 DLIL_PRINTF("Attached protocol %d to %s%d - %d\n", proto->protocol_family,
1852 ifp->if_name, ifp->if_unit, retval);
1853 if (free_me)
1854 FREE(free_me, M_TEMP);
1855 return retval;
1856}
0b4e3aa0 1857
91447636
A
1858__private_extern__ int
1859dlil_attach_protocol_kpi(ifnet_t ifp, protocol_family_t protocol,
1860 const struct ifnet_attach_proto_param *proto_details)
1861{
1862 int retval = 0;
1863 struct if_proto *ifproto = NULL;
1864
1865 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1866 if (ifproto == 0) {
1867 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1868 retval = ENOMEM;
1869 goto end;
1870 }
1871 bzero(ifproto, sizeof(*ifproto));
1872
1873 ifproto->ifp = ifp;
1874 ifproto->protocol_family = protocol;
1875 ifproto->proto_kpi = kProtoKPI_v1;
1876 ifproto->kpi.v1.input = proto_details->input;
1877 ifproto->kpi.v1.pre_output = proto_details->pre_output;
1878 ifproto->kpi.v1.event = proto_details->event;
1879 ifproto->kpi.v1.ioctl = proto_details->ioctl;
1880 ifproto->kpi.v1.detached = proto_details->detached;
1881 ifproto->kpi.v1.resolve_multi = proto_details->resolve;
1882 ifproto->kpi.v1.send_arp = proto_details->send_arp;
1883
1884 retval = dlil_attach_protocol_internal(ifproto, NULL,
1885 proto_details->demux_list, proto_details->demux_count);
1886
9bccf70c 1887end:
91447636
A
1888 if (retval && ifproto)
1889 FREE(ifproto, M_IFADDR);
1890 return retval;
1c79356b
A
1891}
1892
91447636
A
1893int
1894dlil_attach_protocol(struct dlil_proto_reg_str *proto)
1895{
1896 struct ifnet *ifp = NULL;
1897 struct if_proto *ifproto = NULL;
1898 int retval = 0;
1c79356b 1899
91447636
A
1900 /*
1901 * Do everything we can before taking the write lock
1902 */
1903
1904 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1905 return EINVAL;
1c79356b 1906
91447636
A
1907 /*
1908 * Allocate and init a new if_proto structure
1909 */
1910 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1911 if (!ifproto) {
1912 DLIL_PRINTF("ERROR - DLIL failed if_proto allocation\n");
1913 retval = ENOMEM;
1914 goto end;
1915 }
1916
1c79356b 1917
91447636
A
1918 /* ifbyfamily returns us an ifp with an incremented if_usecnt */
1919 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1920 if (!ifp) {
1921 DLIL_PRINTF("dlil_attach_protocol -- no such interface %d unit %d\n",
1922 proto->interface_family, proto->unit_number);
1923 retval = ENXIO;
1924 goto end;
1925 }
1c79356b 1926
91447636
A
1927 bzero(ifproto, sizeof(struct if_proto));
1928
1929 ifproto->ifp = ifp;
1930 ifproto->protocol_family = proto->protocol_family;
1931 ifproto->proto_kpi = kProtoKPI_DLIL;
1932 ifproto->kpi.dlil.dl_input = proto->input;
1933 ifproto->kpi.dlil.dl_pre_output = proto->pre_output;
1934 ifproto->kpi.dlil.dl_event = proto->event;
1935 ifproto->kpi.dlil.dl_offer = proto->offer;
1936 ifproto->kpi.dlil.dl_ioctl = proto->ioctl;
1937 ifproto->kpi.dlil.dl_detached = proto->detached;
1938
1939 retval = dlil_attach_protocol_internal(ifproto, &proto->demux_desc_head, NULL, 0);
1940
1941end:
1942 if (retval && ifproto)
1943 FREE(ifproto, M_IFADDR);
1944 return retval;
1945}
1c79356b 1946
91447636 1947extern void if_rtproto_del(struct ifnet *ifp, int protocol);
1c79356b 1948
91447636
A
1949static int
1950dlil_detach_protocol_internal(
1951 struct if_proto *proto)
1952{
1953 struct ifnet *ifp = proto->ifp;
1954 u_long proto_family = proto->protocol_family;
1955 struct kev_dl_proto_data ev_pr_data;
1956
1957 if (proto->proto_kpi == kProtoKPI_DLIL) {
1958 if (proto->kpi.dlil.dl_detached)
1959 proto->kpi.dlil.dl_detached(proto->protocol_family, ifp);
1960 }
1961 else {
1962 if (proto->kpi.v1.detached)
1963 proto->kpi.v1.detached(ifp, proto->protocol_family);
1964 }
1965 if_proto_free(proto);
1966
1967 /*
1968 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1969 */
1970
1971 if_rtproto_del(ifp, proto_family);
1972
1973 /* the reserved field carries the number of protocol still attached (subject to change) */
1974 ev_pr_data.proto_family = proto_family;
1975 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1976 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1977 (struct net_event_data *)&ev_pr_data,
1978 sizeof(struct kev_dl_proto_data));
1979 return 0;
1980}
1c79356b 1981
91447636
A
1982int
1983dlil_detach_protocol(struct ifnet *ifp, u_long proto_family)
1984{
1985 struct if_proto *proto = NULL;
1986 int retval = 0;
1987 int use_reached_zero = 0;
1988
1c79356b 1989
91447636 1990 if ((retval = dlil_write_begin()) != 0) {
91447636
A
1991 if (retval == EDEADLK) {
1992 retval = 0;
1993 dlil_read_begin();
1994 proto = find_attached_proto(ifp, proto_family);
1995 if (proto == 0) {
1996 retval = ENXIO;
1997 }
1998 else {
1999 proto->detaching = 1;
2000 dlil_detach_waiting = 1;
2001 wakeup(&dlil_detach_waiting);
2002 }
2003 dlil_read_end();
2004 }
2005 goto end;
2006 }
2007
2008 proto = find_attached_proto(ifp, proto_family);
2009
2010 if (proto == NULL) {
2011 retval = ENXIO;
2012 dlil_write_end();
2013 goto end;
2014 }
2015
2016 /*
2017 * Call family module del_proto
2018 */
2019
2020 if (ifp->if_del_proto)
2021 ifp->if_del_proto(ifp, proto->protocol_family);
1c79356b 2022
91447636
A
2023 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2024 ifp->offercnt--;
1c79356b 2025
91447636
A
2026 SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash);
2027
2028 /*
2029 * We can do the rest of the work outside of the write lock.
2030 */
2031 use_reached_zero = ifp_unuse(ifp);
2032 dlil_write_end();
2033
2034 dlil_detach_protocol_internal(proto);
2035
2036 /*
2037 * Only handle the case where the interface will go away after
2038 * we've sent the message. This way post message can send the
2039 * message to the interface safely.
2040 */
2041
2042 if (use_reached_zero)
2043 ifp_use_reached_zero(ifp);
2044
2045end:
2046 return retval;
2047}
1c79356b 2048
91447636
A
2049/*
2050 * dlil_delayed_detach_thread is responsible for detaching
2051 * protocols, protocol filters, and interface filters after
2052 * an attempt was made to detach one of those items while
2053 * it was not safe to do so (i.e. called dlil_read_begin).
2054 *
2055 * This function will take the dlil write lock and walk
2056 * through each of the interfaces looking for items with
2057 * the detaching flag set. When an item is found, it is
2058 * detached from the interface and placed on a local list.
2059 * After all of the items have been collected, we drop the
2060 * write lock and performed the post detach. This is done
2061 * so we only have to take the write lock once.
2062 *
2063 * When detaching a protocol filter, if we find that we
2064 * have detached the very last protocol and we need to call
2065 * ifp_use_reached_zero, we have to break out of our work
2066 * to drop the write lock so we can call ifp_use_reached_zero.
2067 */
2068
2069static void
2070dlil_delayed_detach_thread(__unused void* foo, __unused wait_result_t wait)
2071{
2072 thread_t self = current_thread();
2073 int asserted = 0;
0b4e3aa0 2074
91447636
A
2075 ml_thread_policy(self, MACHINE_GROUP,
2076 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
9bccf70c 2077
91447636
A
2078
2079 while (1) {
2080 if (dlil_detach_waiting != 0 && dlil_write_begin() == 0) {
2081 struct ifnet *ifp;
2082 struct proto_hash_entry detached_protos;
2083 struct ifnet_filter_head detached_filters;
2084 struct if_proto *proto;
2085 struct if_proto *next_proto;
2086 struct ifnet_filter *filt;
2087 struct ifnet_filter *next_filt;
2088 int reached_zero;
2089
2090 reached_zero = 0;
2091
2092 /* Clear the detach waiting flag */
2093 dlil_detach_waiting = 0;
2094 TAILQ_INIT(&detached_filters);
2095 SLIST_INIT(&detached_protos);
2096
2097 ifnet_head_lock_shared();
2098 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2099 int i;
2100
2101 // Look for protocols and protocol filters
2102 for (i = 0; i < PROTO_HASH_SLOTS && !reached_zero; i++) {
2103 struct if_proto **prev_nextptr = &SLIST_FIRST(&ifp->if_proto_hash[i]);
2104 for (proto = *prev_nextptr; proto; proto = *prev_nextptr) {
2105
2106 // Detach this protocol
2107 if (proto->detaching) {
2108 if (ifp->if_del_proto)
2109 ifp->if_del_proto(ifp, proto->protocol_family);
2110 if (proto->proto_kpi == kProtoKPI_DLIL && proto->kpi.dlil.dl_offer)
2111 ifp->offercnt--;
2112 *prev_nextptr = SLIST_NEXT(proto, next_hash);
2113 SLIST_INSERT_HEAD(&detached_protos, proto, next_hash);
2114 reached_zero = ifp_unuse(ifp);
2115 if (reached_zero) {
2116 break;
2117 }
2118 }
2119 else {
2120 // Update prev_nextptr to point to our next ptr
2121 prev_nextptr = &SLIST_NEXT(proto, next_hash);
2122 }
2123 }
2124 }
2125
2126 // look for interface filters that need to be detached
2127 for (filt = TAILQ_FIRST(&ifp->if_flt_head); filt; filt = next_filt) {
2128 next_filt = TAILQ_NEXT(filt, filt_next);
2129 if (filt->filt_detaching != 0) {
2130 // take this interface filter off the interface filter list
2131 TAILQ_REMOVE(&ifp->if_flt_head, filt, filt_next);
2132
2133 // put this interface filter on the detached filters list
2134 TAILQ_INSERT_TAIL(&detached_filters, filt, filt_next);
2135 }
2136 }
2137
2138 if (ifp->if_delayed_detach) {
2139 ifp->if_delayed_detach = 0;
2140 reached_zero = ifp_unuse(ifp);
2141 }
2142
2143 if (reached_zero)
2144 break;
2145 }
2146 ifnet_head_done();
2147 dlil_write_end();
2148
2149 for (filt = TAILQ_FIRST(&detached_filters); filt; filt = next_filt) {
2150 next_filt = TAILQ_NEXT(filt, filt_next);
2151 /*
2152 * dlil_detach_filter_internal won't remove an item from
2153 * the list if it is already detached (second parameter).
2154 * The item will be freed though.
2155 */
2156 dlil_detach_filter_internal(filt, 1);
2157 }
2158
2159 for (proto = SLIST_FIRST(&detached_protos); proto; proto = next_proto) {
2160 next_proto = SLIST_NEXT(proto, next_hash);
2161 dlil_detach_protocol_internal(proto);
2162 }
2163
2164 if (reached_zero) {
2165 ifp_use_reached_zero(ifp);
2166 dlil_detach_waiting = 1; // we may have missed something
2167 }
2168 }
2169
2170 if (!asserted && dlil_detach_waiting == 0) {
2171 asserted = 1;
2172 assert_wait(&dlil_detach_waiting, THREAD_UNINT);
2173 }
2174
2175 if (dlil_detach_waiting == 0) {
2176 asserted = 0;
2177 thread_block(dlil_delayed_detach_thread);
2178 }
2179 }
2180}
9bccf70c 2181
91447636
A
2182static void
2183dlil_call_delayed_detach_thread(void) {
2184 dlil_delayed_detach_thread(NULL, THREAD_RESTART);
2185}
9bccf70c 2186
91447636
A
2187extern int if_next_index(void);
2188
2189__private_extern__ int
2190dlil_if_attach_with_address(
2191 struct ifnet *ifp,
2192 const struct sockaddr_dl *ll_addr)
2193{
2194 u_long interface_family = ifp->if_family;
2195 struct if_family_str *if_family = NULL;
2196 int stat;
2197 struct ifnet *tmp_if;
2198 struct proto_hash_entry *new_proto_list = NULL;
2199 int locked = 0;
2200
2201
2202 ifnet_head_lock_shared();
1c79356b 2203
91447636
A
2204 /* Verify we aren't already on the list */
2205 TAILQ_FOREACH(tmp_if, &ifnet_head, if_link) {
2206 if (tmp_if == ifp) {
2207 ifnet_head_done();
2208 return EEXIST;
2209 }
2210 }
2211
2212 ifnet_head_done();
2213
2214 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_lock == 0)
2215#if IFNET_RW_LOCK
2216 ifp->if_lock = lck_rw_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2217#else
2218 ifp->if_lock = lck_mtx_alloc_init(ifnet_lock_group, ifnet_lock_attr);
2219#endif
0b4e3aa0 2220
91447636
A
2221 if (ifp->if_lock == 0) {
2222 return ENOMEM;
2223 }
1c79356b 2224
91447636
A
2225 // Only use family if this is not a KPI interface
2226 if ((ifp->if_eflags & IFEF_USEKPI) == 0) {
2227 if_family = find_family_module(interface_family);
2228 }
1c79356b 2229
91447636
A
2230 /*
2231 * Allow interfaces withouth protocol families to attach
2232 * only if they have the necessary fields filled out.
2233 */
2234
2235 if ((if_family == 0) &&
2236 (ifp->if_add_proto == 0 || ifp->if_del_proto == 0)) {
2237 DLIL_PRINTF("Attempt to attach interface without family module - %d\n",
2238 interface_family);
2239 return ENODEV;
2240 }
2241
2242 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_proto_hash == NULL) {
2243 MALLOC(new_proto_list, struct proto_hash_entry*, sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS,
2244 M_NKE, M_WAITOK);
1c79356b 2245
91447636
A
2246 if (new_proto_list == 0) {
2247 return ENOBUFS;
2248 }
1c79356b
A
2249 }
2250
91447636
A
2251 dlil_write_begin();
2252 locked = 1;
2253
2254 /*
2255 * Call the family module to fill in the appropriate fields in the
2256 * ifnet structure.
2257 */
2258
2259 if (if_family) {
2260 stat = if_family->add_if(ifp);
2261 if (stat) {
2262 DLIL_PRINTF("dlil_if_attach -- add_if failed with %d\n", stat);
2263 dlil_write_end();
2264 return stat;
2265 }
2266 ifp->if_add_proto_u.original = if_family->add_proto;
2267 ifp->if_del_proto = if_family->del_proto;
2268 if_family->refcnt++;
2269 }
2270
2271 ifp->offercnt = 0;
2272 TAILQ_INIT(&ifp->if_flt_head);
2273
2274
2275 if (new_proto_list) {
2276 bzero(new_proto_list, (PROTO_HASH_SLOTS * sizeof(struct proto_hash_entry)));
2277 ifp->if_proto_hash = new_proto_list;
2278 new_proto_list = 0;
2279 }
2280
2281 /* old_if_attach */
2282 {
2283 struct ifaddr *ifa = 0;
2284
2285 if (ifp->if_snd.ifq_maxlen == 0)
2286 ifp->if_snd.ifq_maxlen = ifqmaxlen;
2287 TAILQ_INIT(&ifp->if_prefixhead);
2288 LIST_INIT(&ifp->if_multiaddrs);
2289 ifnet_touch_lastchange(ifp);
2290
2291 /* usecount to track attachment to the ifnet list */
2292 ifp_use(ifp, kIfNetUseCount_MayBeZero);
2293
2294 /* Lock the list of interfaces */
2295 ifnet_head_lock_exclusive();
2296 ifnet_lock_exclusive(ifp);
2297
2298 if ((ifp->if_eflags & IFEF_REUSE) == 0 || ifp->if_index == 0) {
2299 char workbuf[64];
2300 int namelen, masklen, socksize, ifasize;
2301
2302 ifp->if_index = if_next_index();
2303
2304 namelen = snprintf(workbuf, sizeof(workbuf), "%s%d", ifp->if_name, ifp->if_unit);
2305#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m))
2306 masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
2307 socksize = masklen + ifp->if_addrlen;
2308#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
2309 if ((u_long)socksize < sizeof(struct sockaddr_dl))
2310 socksize = sizeof(struct sockaddr_dl);
2311 socksize = ROUNDUP(socksize);
2312 ifasize = sizeof(struct ifaddr) + 2 * socksize;
2313 ifa = (struct ifaddr*)_MALLOC(ifasize, M_IFADDR, M_WAITOK);
2314 if (ifa) {
2315 struct sockaddr_dl *sdl = (struct sockaddr_dl *)(ifa + 1);
2316 ifnet_addrs[ifp->if_index - 1] = ifa;
2317 bzero(ifa, ifasize);
2318 sdl->sdl_len = socksize;
2319 sdl->sdl_family = AF_LINK;
2320 bcopy(workbuf, sdl->sdl_data, namelen);
2321 sdl->sdl_nlen = namelen;
2322 sdl->sdl_index = ifp->if_index;
2323 sdl->sdl_type = ifp->if_type;
2324 if (ll_addr) {
2325 sdl->sdl_alen = ll_addr->sdl_alen;
2326 if (ll_addr->sdl_alen != ifp->if_addrlen)
2327 panic("dlil_if_attach - ll_addr->sdl_alen != ifp->if_addrlen");
2328 bcopy(CONST_LLADDR(ll_addr), LLADDR(sdl), sdl->sdl_alen);
2329 }
2330 ifa->ifa_ifp = ifp;
2331 ifa->ifa_rtrequest = link_rtrequest;
2332 ifa->ifa_addr = (struct sockaddr*)sdl;
2333 sdl = (struct sockaddr_dl*)(socksize + (caddr_t)sdl);
2334 ifa->ifa_netmask = (struct sockaddr*)sdl;
2335 sdl->sdl_len = masklen;
2336 while (namelen != 0)
2337 sdl->sdl_data[--namelen] = 0xff;
2338 }
2339 }
2340 else {
2341 /* preserve the first ifaddr */
2342 ifnet_addrs[ifp->if_index - 1] = TAILQ_FIRST(&ifp->if_addrhead);
2343 }
2344
1c79356b 2345
91447636
A
2346 TAILQ_INIT(&ifp->if_addrhead);
2347 ifa = ifnet_addrs[ifp->if_index - 1];
2348
2349 if (ifa) {
2350 /*
2351 * We don't use if_attach_ifa because we want
2352 * this address to be first on the list.
2353 */
2354 ifaref(ifa);
2355 ifa->ifa_debug |= IFA_ATTACHED;
2356 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
1c79356b 2357 }
91447636
A
2358
2359 TAILQ_INSERT_TAIL(&ifnet_head, ifp, if_link);
2360 ifindex2ifnet[ifp->if_index] = ifp;
2361
2362 ifnet_head_done();
1c79356b 2363 }
91447636
A
2364 dlil_write_end();
2365
2366 if (if_family && if_family->init_if) {
2367 stat = if_family->init_if(ifp);
2368 if (stat) {
2369 DLIL_PRINTF("dlil_if_attach -- init_if failed with %d\n", stat);
2370 }
2371 }
2372
2373 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
2374 ifnet_lock_done(ifp);
1c79356b 2375
91447636 2376 return 0;
1c79356b
A
2377}
2378
1c79356b
A
2379int
2380dlil_if_attach(struct ifnet *ifp)
2381{
91447636 2382 dlil_if_attach_with_address(ifp, NULL);
1c79356b
A
2383}
2384
2385
2386int
2387dlil_if_detach(struct ifnet *ifp)
2388{
91447636
A
2389 struct ifnet_filter *filter;
2390 struct ifnet_filter *filter_next;
2391 int zeroed = 0;
2392 int retval = 0;
2393 struct ifnet_filter_head fhead;
55e303ae 2394
55e303ae 2395
91447636 2396 ifnet_lock_exclusive(ifp);
55e303ae 2397
91447636
A
2398 if ((ifp->if_eflags & IFEF_DETACHING) != 0) {
2399 /* Interface has already been detached */
2400 ifnet_lock_done(ifp);
2401 return ENXIO;
55e303ae
A
2402 }
2403
91447636
A
2404 /*
2405 * Indicate this interface is being detached.
2406 *
2407 * This should prevent protocols from attaching
2408 * from this point on. Interface will remain on
2409 * the list until all of the protocols are detached.
2410 */
2411 ifp->if_eflags |= IFEF_DETACHING;
2412 ifnet_lock_done(ifp);
55e303ae 2413
91447636 2414 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
55e303ae 2415
91447636
A
2416 if ((retval = dlil_write_begin()) != 0) {
2417 if (retval == EDEADLK) {
2418 retval = DLIL_WAIT_FOR_FREE;
2419
2420 /* We need to perform a delayed detach */
2421 ifp->if_delayed_detach = 1;
2422 dlil_detach_waiting = 1;
2423 wakeup(&dlil_detach_waiting);
2424 }
2425 return retval;
55e303ae
A
2426 }
2427
91447636
A
2428 /* Steal the list of interface filters */
2429 fhead = ifp->if_flt_head;
2430 TAILQ_INIT(&ifp->if_flt_head);
55e303ae 2431
91447636
A
2432 /* unuse the interface */
2433 zeroed = ifp_unuse(ifp);
1c79356b 2434
91447636 2435 dlil_write_end();
55e303ae 2436
91447636
A
2437 for (filter = TAILQ_FIRST(&fhead); filter; filter = filter_next) {
2438 filter_next = TAILQ_NEXT(filter, filt_next);
2439 dlil_detach_filter_internal(filter, 1);
1c79356b 2440 }
55e303ae 2441
91447636
A
2442 if (zeroed == 0) {
2443 retval = DLIL_WAIT_FOR_FREE;
2444 }
2445 else
2446 {
2447 ifp_use_reached_zero(ifp);
2448 }
2449
2450 return retval;
1c79356b
A
2451}
2452
2453
2454int
2455dlil_reg_if_modules(u_long interface_family,
2456 struct dlil_ifmod_reg_str *ifmod)
2457{
2458 struct if_family_str *if_family;
1c79356b
A
2459
2460
1c79356b 2461 if (find_family_module(interface_family)) {
91447636 2462 DLIL_PRINTF("Attempt to register dlil family module more than once - %d\n",
1c79356b 2463 interface_family);
1c79356b
A
2464 return EEXIST;
2465 }
2466
2467 if ((!ifmod->add_if) || (!ifmod->del_if) ||
2468 (!ifmod->add_proto) || (!ifmod->del_proto)) {
91447636 2469 DLIL_PRINTF("dlil_reg_if_modules passed at least one null pointer\n");
1c79356b
A
2470 return EINVAL;
2471 }
9bccf70c
A
2472
2473 /*
2474 * The following is a gross hack to keep from breaking
2475 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
2476 * does not zero the reserved fields in dlil_ifmod_reg_str.
2477 * As a result, we have to zero any function that used to
2478 * be reserved fields at the time Vicomsoft built their
2479 * kext. Radar #2974305
2480 */
2481 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
2482 if (interface_family == 123) { /* Vicom */
2483 ifmod->init_if = 0;
2484 } else {
9bccf70c
A
2485 return EINVAL;
2486 }
2487 }
1c79356b
A
2488
2489 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
2490 if (!if_family) {
91447636 2491 DLIL_PRINTF("dlil_reg_if_modules failed allocation\n");
1c79356b
A
2492 return ENOMEM;
2493 }
2494
2495 bzero(if_family, sizeof(struct if_family_str));
2496
2497 if_family->if_family = interface_family & 0xffff;
2498 if_family->shutdown = ifmod->shutdown;
2499 if_family->add_if = ifmod->add_if;
2500 if_family->del_if = ifmod->del_if;
9bccf70c 2501 if_family->init_if = ifmod->init_if;
1c79356b
A
2502 if_family->add_proto = ifmod->add_proto;
2503 if_family->del_proto = ifmod->del_proto;
91447636 2504 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
1c79356b
A
2505 if_family->refcnt = 1;
2506 if_family->flags = 0;
2507
2508 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
1c79356b
A
2509 return 0;
2510}
2511
2512int dlil_dereg_if_modules(u_long interface_family)
2513{
2514 struct if_family_str *if_family;
91447636
A
2515 int ret = 0;
2516
1c79356b 2517
1c79356b
A
2518 if_family = find_family_module(interface_family);
2519 if (if_family == 0) {
91447636 2520 return ENXIO;
1c79356b
A
2521 }
2522
2523 if (--if_family->refcnt == 0) {
2524 if (if_family->shutdown)
2525 (*if_family->shutdown)();
2526
2527 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
2528 FREE(if_family, M_IFADDR);
2529 }
9bccf70c 2530 else {
1c79356b 2531 if_family->flags |= DLIL_SHUTDOWN;
9bccf70c
A
2532 ret = DLIL_WAIT_FOR_FREE;
2533 }
1c79356b 2534
9bccf70c 2535 return ret;
1c79356b
A
2536}
2537
2538
2539
55e303ae 2540int
91447636
A
2541dlil_reg_proto_module(
2542 u_long protocol_family,
2543 u_long interface_family,
2544 int (*attach)(struct ifnet *ifp, u_long protocol_family),
2545 int (*detach)(struct ifnet *ifp, u_long protocol_family))
55e303ae
A
2546{
2547 struct proto_family_str *proto_family;
55e303ae 2548
91447636 2549 if (attach == NULL) return EINVAL;
55e303ae 2550
91447636
A
2551 lck_mtx_lock(proto_family_mutex);
2552
2553 TAILQ_FOREACH(proto_family, &proto_family_head, proto_fam_next) {
2554 if (proto_family->proto_family == protocol_family &&
2555 proto_family->if_family == interface_family) {
2556 lck_mtx_unlock(proto_family_mutex);
2557 return EEXIST;
2558 }
55e303ae
A
2559 }
2560
2561 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
2562 if (!proto_family) {
91447636 2563 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2564 return ENOMEM;
2565 }
2566
2567 bzero(proto_family, sizeof(struct proto_family_str));
2568 proto_family->proto_family = protocol_family;
2569 proto_family->if_family = interface_family & 0xffff;
91447636
A
2570 proto_family->attach_proto = attach;
2571 proto_family->detach_proto = detach;
55e303ae
A
2572
2573 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
91447636 2574 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2575 return 0;
2576}
2577
2578int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
2579{
2580 struct proto_family_str *proto_family;
91447636
A
2581 int ret = 0;
2582
2583 lck_mtx_lock(proto_family_mutex);
55e303ae 2584
55e303ae
A
2585 proto_family = find_proto_module(protocol_family, interface_family);
2586 if (proto_family == 0) {
91447636
A
2587 lck_mtx_unlock(proto_family_mutex);
2588 return ENXIO;
55e303ae
A
2589 }
2590
2591 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
2592 FREE(proto_family, M_IFADDR);
91447636
A
2593
2594 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2595 return ret;
2596}
2597
91447636 2598int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp)
55e303ae
A
2599{
2600 struct proto_family_str *proto_family;
91447636 2601 int ret = 0;
55e303ae 2602
91447636 2603 lck_mtx_lock(proto_family_mutex);
55e303ae
A
2604 proto_family = find_proto_module(protocol_family, ifp->if_family);
2605 if (proto_family == 0) {
91447636
A
2606 lck_mtx_unlock(proto_family_mutex);
2607 return ENXIO;
55e303ae
A
2608 }
2609
91447636 2610 ret = proto_family->attach_proto(ifp, protocol_family);
55e303ae 2611
91447636 2612 lck_mtx_unlock(proto_family_mutex);
55e303ae
A
2613 return ret;
2614}
2615
2616
2617int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
2618{
2619 struct proto_family_str *proto_family;
91447636 2620 int ret = 0;
1c79356b 2621
91447636 2622 lck_mtx_lock(proto_family_mutex);
1c79356b 2623
91447636
A
2624 proto_family = find_proto_module(protocol_family, ifp->if_family);
2625 if (proto_family && proto_family->detach_proto)
2626 ret = proto_family->detach_proto(ifp, protocol_family);
1c79356b 2627 else
91447636 2628 ret = dlil_detach_protocol(ifp, protocol_family);
1c79356b 2629
91447636
A
2630 lck_mtx_unlock(proto_family_mutex);
2631 return ret;
1c79356b 2632}
9bccf70c 2633
91447636
A
2634static errno_t
2635dlil_recycle_ioctl(
2636 __unused ifnet_t ifnet_ptr,
2637 __unused u_int32_t ioctl_code,
2638 __unused void *ioctl_arg)
9bccf70c 2639{
9bccf70c
A
2640 return EOPNOTSUPP;
2641}
2642
91447636
A
2643static int
2644dlil_recycle_output(
2645 __unused struct ifnet *ifnet_ptr,
2646 struct mbuf *m)
9bccf70c 2647{
9bccf70c
A
2648 m_freem(m);
2649 return 0;
2650}
2651
91447636
A
2652static void
2653dlil_recycle_free(
2654 __unused ifnet_t ifnet_ptr)
9bccf70c 2655{
9bccf70c
A
2656}
2657
91447636
A
2658static errno_t
2659dlil_recycle_set_bpf_tap(
2660 __unused ifnet_t ifp,
2661 __unused bpf_tap_mode mode,
2662 __unused bpf_packet_func callback)
9bccf70c
A
2663{
2664 /* XXX not sure what to do here */
2665 return 0;
2666}
2667
91447636
A
2668int dlil_if_acquire(
2669 u_long family,
2670 const void *uniqueid,
2671 size_t uniqueid_len,
2672 struct ifnet **ifp)
9bccf70c
A
2673{
2674 struct ifnet *ifp1 = NULL;
2675 struct dlil_ifnet *dlifp1 = NULL;
91447636 2676 int ret = 0;
9bccf70c 2677
91447636 2678 lck_mtx_lock(dlil_ifnet_mutex);
9bccf70c
A
2679 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2680
2681 ifp1 = (struct ifnet *)dlifp1;
2682
2683 if (ifp1->if_family == family) {
2684
2685 /* same uniqueid and same len or no unique id specified */
2686 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2687 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2688
2689 /* check for matching interface in use */
2690 if (ifp1->if_eflags & IFEF_INUSE) {
2691 if (uniqueid_len) {
2692 ret = EBUSY;
2693 goto end;
2694 }
2695 }
2696 else {
91447636
A
2697 if (!ifp1->if_lock)
2698 panic("ifp's lock is gone\n");
2699 ifnet_lock_exclusive(ifp1);
2700 ifp1->if_eflags |= (IFEF_INUSE | IFEF_REUSE);
2701 ifnet_lock_done(ifp1);
9bccf70c
A
2702 *ifp = ifp1;
2703 goto end;
2704 }
2705 }
2706 }
2707 }
2708
2709 /* no interface found, allocate a new one */
2710 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2711 if (dlifp1 == 0) {
2712 ret = ENOMEM;
2713 goto end;
2714 }
2715
2716 bzero(dlifp1, sizeof(*dlifp1));
2717
2718 if (uniqueid_len) {
2719 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2720 if (dlifp1->if_uniqueid == 0) {
2721 FREE(dlifp1, M_NKE);
2722 ret = ENOMEM;
2723 goto end;
2724 }
2725 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2726 dlifp1->if_uniqueid_len = uniqueid_len;
2727 }
2728
2729 ifp1 = (struct ifnet *)dlifp1;
2730 ifp1->if_eflags |= IFEF_INUSE;
91447636 2731 ifp1->if_name = dlifp1->if_namestorage;
9bccf70c
A
2732
2733 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2734
2735 *ifp = ifp1;
2736
2737end:
91447636 2738 lck_mtx_unlock(dlil_ifnet_mutex);
9bccf70c 2739
9bccf70c
A
2740 return ret;
2741}
2742
2743void dlil_if_release(struct ifnet *ifp)
2744{
2745 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
9bccf70c 2746
9bccf70c 2747
91447636
A
2748 /* Interface does not have a lock until it is attached - radar 3713951 */
2749 if (ifp->if_lock)
2750 ifnet_lock_exclusive(ifp);
9bccf70c
A
2751 ifp->if_eflags &= ~IFEF_INUSE;
2752 ifp->if_ioctl = dlil_recycle_ioctl;
2753 ifp->if_output = dlil_recycle_output;
2754 ifp->if_free = dlil_recycle_free;
2755 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2756
2757 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2758 ifp->if_name = dlifp->if_namestorage;
91447636
A
2759 if (ifp->if_lock)
2760 ifnet_lock_done(ifp);
9bccf70c 2761
9bccf70c 2762}