]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc.
24 *
25 * Data Link Inteface Layer
26 * Author: Ted Walker
27 */
28
29
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/socket.h>
37 #include <net/if_dl.h>
38 #include <net/if.h>
39 #include <net/if_var.h>
40 #include <net/dlil.h>
41 #include <sys/kern_event.h>
42 #include <sys/kdebug.h>
43 #include <string.h>
44
45 #include <kern/task.h>
46 #include <kern/thread.h>
47 #include <kern/sched_prim.h>
48
49 #include <net/netisr.h>
50 #include <net/if_types.h>
51
52 #include <machine/machine_routines.h>
53
54 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
55 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
56 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
57 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
58 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
59
60
61 #define MAX_DL_TAGS 16
62 #define MAX_DLIL_FILTERS 16
63 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
64 #define MAX_LINKADDR 4 /* LONGWORDS */
65 #define M_NKE M_IFADDR
66
67 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
68 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
69
70 struct dl_tag_str {
71 struct ifnet *ifp;
72 struct if_proto *proto;
73 struct dlil_filterq_head *pr_flt_head;
74 };
75
76
77 struct dlil_ifnet {
78 /* ifnet and drvr_ext are used by the stack and drivers
79 drvr_ext extends the public ifnet and must follow dl_if */
80 struct ifnet dl_if; /* public ifnet */
81 void *drvr_ext[4]; /* driver reserved (e.g arpcom extension for enet) */
82
83 /* dlil private fields */
84 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
85 /* it is not the ifnet list */
86 void *if_uniqueid; /* unique id identifying the interface */
87 size_t if_uniqueid_len;/* length of the unique id */
88 char if_namestorage[IFNAMSIZ]; /* interface name storage for detached interfaces */
89 };
90
91 struct dlil_stats_str {
92 int inject_pr_in1;
93 int inject_pr_in2;
94 int inject_pr_out1;
95 int inject_pr_out2;
96 int inject_if_in1;
97 int inject_if_in2;
98 int inject_if_out1;
99 int inject_if_out2;
100 };
101
102
103 struct dlil_filter_id_str {
104 int type;
105 struct dlil_filterq_head *head;
106 struct dlil_filterq_entry *filter_ptr;
107 struct ifnet *ifp;
108 struct if_proto *proto;
109 };
110
111
112
113 struct if_family_str {
114 TAILQ_ENTRY(if_family_str) if_fam_next;
115 u_long if_family;
116 int refcnt;
117 int flags;
118
119 #define DLIL_SHUTDOWN 1
120
121 int (*add_if)(struct ifnet *ifp);
122 int (*del_if)(struct ifnet *ifp);
123 int (*init_if)(struct ifnet *ifp);
124 int (*add_proto)(struct ddesc_head_str *demux_desc_head,
125 struct if_proto *proto, u_long dl_tag);
126 int (*del_proto)(struct if_proto *proto, u_long dl_tag);
127 int (*ifmod_ioctl)(struct ifnet *ifp, u_long command, caddr_t data);
128 int (*shutdown)();
129 };
130
131
132 struct proto_family_str {
133 TAILQ_ENTRY(proto_family_str) proto_fam_next;
134 u_long proto_family;
135 u_long if_family;
136
137 int (*attach_proto)(struct ifnet *ifp, u_long *dl_tag);
138 int (*detach_proto)(struct ifnet *ifp, u_long dl_tag);
139 };
140
141
142
143 struct dlil_stats_str dlil_stats;
144
145 static
146 struct dlil_filter_id_str *dlil_filters;
147
148 static
149 struct dl_tag_str *dl_tag_array;
150
151 static
152 TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
153
154 static
155 TAILQ_HEAD(, if_family_str) if_family_head;
156
157 static
158 TAILQ_HEAD(, proto_family_str) proto_family_head;
159
160 static ifnet_inited = 0;
161 static u_long dl_tag_nb = 0;
162 static u_long dlil_filters_nb = 0;
163
164 int dlil_initialized = 0;
165 decl_simple_lock_data(, dlil_input_lock)
166 int dlil_input_thread_wakeup = 0;
167 static struct mbuf *dlil_input_mbuf_head = NULL;
168 static struct mbuf *dlil_input_mbuf_tail = NULL;
169 #if NLOOP > 1
170 #error dlil_input() needs to be revised to support more than on loopback interface
171 #endif
172 static struct mbuf *dlil_input_loop_head = NULL;
173 static struct mbuf *dlil_input_loop_tail = NULL;
174 extern struct ifmultihead ifma_lostlist;
175
176 static void dlil_input_thread(void);
177 extern void run_netisr(void);
178 extern void bpfdetach(struct ifnet*);
179
180 int dlil_expand_mcl;
181
182 /*
183 * Internal functions.
184 */
185
186 static
187 struct if_family_str *find_family_module(u_long if_family)
188 {
189 struct if_family_str *mod = NULL;
190
191 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
192 if (mod->if_family == (if_family & 0xffff))
193 break;
194 }
195
196 return mod;
197 }
198
199 static
200 struct proto_family_str *find_proto_module(u_long proto_family, u_long if_family)
201 {
202 struct proto_family_str *mod = NULL;
203
204 TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) {
205 if ((mod->proto_family == (proto_family & 0xffff))
206 && (mod->if_family == (if_family & 0xffff)))
207 break;
208 }
209
210 return mod;
211 }
212
213
214 /*
215 * Public functions.
216 */
217
218 struct ifnet *ifbyfamily(u_long family, short unit)
219 {
220 struct ifnet *ifp;
221
222 TAILQ_FOREACH(ifp, &ifnet, if_link)
223 if ((family == ifp->if_family) &&
224 (ifp->if_unit == unit))
225 return ifp;
226
227 return 0;
228 }
229
230 struct if_proto *dlttoproto(u_long dl_tag)
231 {
232 if (dl_tag < dl_tag_nb && dl_tag_array[dl_tag].ifp)
233 return dl_tag_array[dl_tag].proto;
234 return 0;
235 }
236
237
238 static int dlil_ifp_proto_count(struct ifnet * ifp)
239 {
240 int count = 0;
241 struct if_proto * proto;
242 struct dlil_proto_head * tmp;
243
244 tmp = (struct dlil_proto_head *) &ifp->proto_head;
245
246 TAILQ_FOREACH(proto, tmp, next)
247 count++;
248
249 return count;
250 }
251
252 u_long ifptodlt(struct ifnet *ifp, u_long proto_family)
253 {
254 struct if_proto *proto;
255 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
256
257
258 TAILQ_FOREACH(proto, tmp, next)
259 if (proto->protocol_family == proto_family)
260 return proto->dl_tag;
261
262 return 0;
263 }
264
265
266 int dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag)
267 {
268 struct ifnet *ifp;
269
270 ifp = ifbyfamily(if_family, unit);
271 if (!ifp)
272 return ENOENT;
273
274 *dl_tag = ifptodlt(ifp, proto_family);
275 if (*dl_tag == 0)
276 return EPROTONOSUPPORT;
277 else
278 return 0;
279 }
280
281
282 void dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
283 struct net_event_data *event_data, u_long event_data_len)
284 {
285 struct net_event_data ev_data;
286 struct kev_msg ev_msg;
287
288 /*
289 * a net event always start with a net_event_data structure
290 * but the caller can generate a simple net event or
291 * provide a longer event structure to post
292 */
293
294 ev_msg.vendor_code = KEV_VENDOR_APPLE;
295 ev_msg.kev_class = KEV_NETWORK_CLASS;
296 ev_msg.kev_subclass = event_subclass;
297 ev_msg.event_code = event_code;
298
299 if (event_data == 0) {
300 event_data = &ev_data;
301 event_data_len = sizeof(struct net_event_data);
302 }
303
304 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
305 event_data->if_family = ifp->if_family;
306 event_data->if_unit = (unsigned long) ifp->if_unit;
307
308 ev_msg.dv[0].data_length = event_data_len;
309 ev_msg.dv[0].data_ptr = event_data;
310 ev_msg.dv[1].data_length = 0;
311
312 kev_post_msg(&ev_msg);
313 }
314
315
316
317 void
318 dlil_init()
319 {
320 int i;
321
322 TAILQ_INIT(&dlil_ifnet_head);
323 TAILQ_INIT(&if_family_head);
324 TAILQ_INIT(&proto_family_head);
325
326 // create the dl tag array
327 MALLOC(dl_tag_array, void *, sizeof(struct dl_tag_str) * MAX_DL_TAGS, M_NKE, M_WAITOK);
328 if (dl_tag_array == 0) {
329 printf("dlil_init tags array allocation failed\n");
330 return; //very bad
331 }
332 bzero(dl_tag_array, sizeof(struct dl_tag_str) * MAX_DL_TAGS);
333 dl_tag_nb = MAX_DL_TAGS;
334
335 // create the dl filters array
336 MALLOC(dlil_filters, void *, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS, M_NKE, M_WAITOK);
337 if (dlil_filters == 0) {
338 printf("dlil_init filters array allocation failed\n");
339 return; //very bad
340 }
341 bzero(dlil_filters, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS);
342 dlil_filters_nb = MAX_DLIL_FILTERS;
343
344 bzero(&dlil_stats, sizeof(dlil_stats));
345
346 simple_lock_init(&dlil_input_lock);
347
348 /*
349 * Start up the dlil input thread once everything is initialized
350 */
351 (void) kernel_thread(kernel_task, dlil_input_thread);
352 }
353
354 u_long get_new_filter_id()
355 {
356 u_long i;
357 u_char *p;
358
359 for (i=1; i < dlil_filters_nb; i++)
360 if (dlil_filters[i].type == 0)
361 break;
362
363 if (i == dlil_filters_nb) {
364 // expand the filters array by MAX_DLIL_FILTERS
365 MALLOC(p, u_char *, sizeof(struct dlil_filter_id_str) * (dlil_filters_nb + MAX_DLIL_FILTERS), M_NKE, M_WAITOK);
366 if (p == 0)
367 return 0;
368
369 bcopy(dlil_filters, p, sizeof(struct dlil_filter_id_str) * dlil_filters_nb);
370 bzero(p + sizeof(struct dlil_filter_id_str) * dlil_filters_nb, sizeof(struct dlil_filter_id_str) * MAX_DL_TAGS);
371 dlil_filters_nb += MAX_DLIL_FILTERS;
372 FREE(dlil_filters, M_NKE);
373 dlil_filters = (struct dlil_filter_id_str *)p;
374 }
375
376 return i;
377 }
378
379
380 int dlil_attach_interface_filter(struct ifnet *ifp,
381 struct dlil_if_flt_str *if_filter,
382 u_long *filter_id,
383 int insertion_point)
384 {
385 int s;
386 int retval = 0;
387 struct dlil_filterq_entry *tmp_ptr;
388 struct dlil_filterq_entry *if_filt;
389 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
390 boolean_t funnel_state;
391
392 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
393 if (tmp_ptr == NULL)
394 return (ENOBUFS);
395
396 bcopy((caddr_t) if_filter, (caddr_t) &tmp_ptr->variants.if_filter,
397 sizeof(struct dlil_if_flt_str));
398
399 funnel_state = thread_funnel_set(network_flock, TRUE);
400 s = splnet();
401
402 *filter_id = get_new_filter_id();
403 if (*filter_id == 0) {
404 FREE(tmp_ptr, M_NKE);
405 retval = ENOMEM;
406 goto end;
407 }
408
409 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
410 dlil_filters[*filter_id].head = (struct dlil_filterq_head *) &ifp->if_flt_head;
411 dlil_filters[*filter_id].type = DLIL_IF_FILTER;
412 dlil_filters[*filter_id].ifp = ifp;
413 tmp_ptr->filter_id = *filter_id;
414 tmp_ptr->type = DLIL_IF_FILTER;
415
416 if (insertion_point != DLIL_LAST_FILTER) {
417 TAILQ_FOREACH(if_filt, fhead, que)
418 if (insertion_point == if_filt->filter_id) {
419 TAILQ_INSERT_BEFORE(if_filt, tmp_ptr, que);
420 break;
421 }
422 }
423 else
424 TAILQ_INSERT_TAIL(fhead, tmp_ptr, que);
425
426 end:
427 splx(s);
428 thread_funnel_set(network_flock, funnel_state);
429 return retval;
430 }
431
432
433 int dlil_attach_protocol_filter(u_long dl_tag,
434 struct dlil_pr_flt_str *pr_filter,
435 u_long *filter_id,
436 int insertion_point)
437 {
438 struct dlil_filterq_entry *tmp_ptr, *pr_filt;
439 int s;
440 int retval = 0;
441 boolean_t funnel_state;
442
443 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0)
444 return (ENOENT);
445
446 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
447 if (tmp_ptr == NULL)
448 return (ENOBUFS);
449
450 bcopy((caddr_t) pr_filter, (caddr_t) &tmp_ptr->variants.pr_filter,
451 sizeof(struct dlil_pr_flt_str));
452
453 funnel_state = thread_funnel_set(network_flock, TRUE);
454 s = splnet();
455
456 *filter_id = get_new_filter_id();
457 if (*filter_id == 0) {
458 FREE(tmp_ptr, M_NKE);
459 retval = ENOMEM;
460 goto end;
461 }
462
463 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
464 dlil_filters[*filter_id].head = dl_tag_array[dl_tag].pr_flt_head;
465 dlil_filters[*filter_id].type = DLIL_PR_FILTER;
466 dlil_filters[*filter_id].proto = dl_tag_array[dl_tag].proto;
467 dlil_filters[*filter_id].ifp = dl_tag_array[dl_tag].ifp;
468 tmp_ptr->filter_id = *filter_id;
469 tmp_ptr->type = DLIL_PR_FILTER;
470
471 if (insertion_point != DLIL_LAST_FILTER) {
472 TAILQ_FOREACH(pr_filt, dl_tag_array[dl_tag].pr_flt_head, que)
473 if (insertion_point == pr_filt->filter_id) {
474 TAILQ_INSERT_BEFORE(pr_filt, tmp_ptr, que);
475 break;
476 }
477 }
478 else
479 TAILQ_INSERT_TAIL(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que);
480
481 end:
482 splx(s);
483 thread_funnel_set(network_flock, funnel_state);
484 return retval;
485 }
486
487
488 int
489 dlil_detach_filter(u_long filter_id)
490 {
491 struct dlil_filter_id_str *flt;
492 int s, retval = 0;
493 boolean_t funnel_state;
494
495 funnel_state = thread_funnel_set(network_flock, TRUE);
496 s = splnet();
497
498 if (filter_id >= dlil_filters_nb || dlil_filters[filter_id].type == 0) {
499 retval = ENOENT;
500 goto end;
501 }
502
503 flt = &dlil_filters[filter_id];
504
505 if (flt->type == DLIL_IF_FILTER) {
506 if (IFILT(flt->filter_ptr).filter_detach)
507 (*IFILT(flt->filter_ptr).filter_detach)(IFILT(flt->filter_ptr).cookie);
508 }
509 else {
510 if (flt->type == DLIL_PR_FILTER) {
511 if (PFILT(flt->filter_ptr).filter_detach)
512 (*PFILT(flt->filter_ptr).filter_detach)(PFILT(flt->filter_ptr).cookie);
513 }
514 }
515
516 TAILQ_REMOVE(flt->head, flt->filter_ptr, que);
517 FREE(flt->filter_ptr, M_NKE);
518 flt->type = 0;
519
520 end:
521 splx(s);
522 thread_funnel_set(network_flock, funnel_state);
523 return retval;
524 }
525
526 void
527 dlil_input_thread_continue(void)
528 {
529 while (1) {
530 struct mbuf *m, *m_loop;
531
532 usimple_lock(&dlil_input_lock);
533 m = dlil_input_mbuf_head;
534 dlil_input_mbuf_head = NULL;
535 dlil_input_mbuf_tail = NULL;
536 m_loop = dlil_input_loop_head;
537 dlil_input_loop_head = NULL;
538 dlil_input_loop_tail = NULL;
539 usimple_unlock(&dlil_input_lock);
540
541 /*
542 * NOTE warning %%% attention !!!!
543 * We should think about putting some thread starvation safeguards if
544 * we deal with long chains of packets.
545 */
546 while (m) {
547 struct mbuf *m0 = m->m_nextpkt;
548 void *header = m->m_pkthdr.header;
549
550 m->m_nextpkt = NULL;
551 m->m_pkthdr.header = NULL;
552 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
553 m = m0;
554 }
555 m = m_loop;
556 while (m) {
557 struct mbuf *m0 = m->m_nextpkt;
558 void *header = m->m_pkthdr.header;
559 struct ifnet *ifp = &loif[0];
560
561 m->m_nextpkt = NULL;
562 m->m_pkthdr.header = NULL;
563 (void) dlil_input_packet(ifp, m, header);
564 m = m0;
565 }
566
567 if (netisr != 0)
568 run_netisr();
569
570 if (dlil_input_mbuf_head == NULL &&
571 dlil_input_loop_head == NULL &&
572 netisr == 0) {
573 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
574 (void) thread_block(dlil_input_thread_continue);
575 /* NOTREACHED */
576 }
577 }
578 }
579
580 void dlil_input_thread(void)
581 {
582 register thread_t self = current_act();
583
584 ml_thread_policy(self, MACHINE_GROUP,
585 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
586
587 /* The dlil thread is always funneled */
588 thread_funnel_set(network_flock, TRUE);
589 dlil_initialized = 1;
590 dlil_input_thread_continue();
591 }
592
593 int
594 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
595 {
596 /* WARNING
597 * Because of loopbacked multicast we cannot stuff the ifp in
598 * the rcvif of the packet header: loopback has its own dlil
599 * input queue
600 */
601
602 usimple_lock(&dlil_input_lock);
603 if (ifp->if_type != IFT_LOOP) {
604 if (dlil_input_mbuf_head == NULL)
605 dlil_input_mbuf_head = m_head;
606 else if (dlil_input_mbuf_tail != NULL)
607 dlil_input_mbuf_tail->m_nextpkt = m_head;
608 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
609 } else {
610 if (dlil_input_loop_head == NULL)
611 dlil_input_loop_head = m_head;
612 else if (dlil_input_loop_tail != NULL)
613 dlil_input_loop_tail->m_nextpkt = m_head;
614 dlil_input_loop_tail = m_tail ? m_tail : m_head;
615 }
616 usimple_unlock(&dlil_input_lock);
617
618 wakeup((caddr_t)&dlil_input_thread_wakeup);
619
620 return 0;
621 }
622
623 int
624 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
625 char *frame_header)
626 {
627 struct ifnet *orig_ifp = 0;
628 struct dlil_filterq_entry *tmp;
629 int retval;
630 struct if_proto *ifproto = 0;
631 struct if_proto *proto;
632 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
633
634
635 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
636
637 /*
638 * Run interface filters
639 */
640
641 while (orig_ifp != ifp) {
642 orig_ifp = ifp;
643
644 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
645 if (IFILT(tmp).filter_if_input) {
646 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
647 &ifp,
648 &m,
649 &frame_header);
650 if (retval) {
651 if (retval == EJUSTRETURN)
652 return 0;
653 else {
654 m_freem(m);
655 return retval;
656 }
657 }
658 }
659
660 if (ifp != orig_ifp)
661 break;
662 }
663 }
664
665 ifp->if_lastchange = time;
666
667 /*
668 * Call family demux module. If the demux module finds a match
669 * for the frame it will fill-in the ifproto pointer.
670 */
671
672 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
673
674 if (m->m_flags & (M_BCAST|M_MCAST))
675 ifp->if_imcasts++;
676
677 if ((retval) && (retval != EJUSTRETURN) && (ifp->offercnt)) {
678 /*
679 * No match was found, look for any offers.
680 */
681 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
682 TAILQ_FOREACH(proto, tmp, next) {
683 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
684 ifproto = proto;
685 retval = 0;
686 break;
687 }
688 }
689 }
690
691 if (retval) {
692 if (retval != EJUSTRETURN) {
693 m_freem(m);
694 return retval;
695 }
696 else
697 return 0;
698 }
699 else
700 if (ifproto == 0) {
701 printf("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
702 m_freem(m);
703 return 0;
704 }
705
706 /*
707 * Call any attached protocol filters.
708 */
709
710 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
711 if (PFILT(tmp).filter_dl_input) {
712 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
713 &m,
714 &frame_header,
715 &ifp);
716
717 if (retval) {
718 if (retval == EJUSTRETURN)
719 return 0;
720 else {
721 m_freem(m);
722 return retval;
723 }
724 }
725 }
726 }
727
728
729
730 retval = (*ifproto->dl_input)(m, frame_header,
731 ifp, ifproto->dl_tag,
732 TRUE);
733
734 if (retval == EJUSTRETURN)
735 retval = 0;
736 else
737 if (retval)
738 m_freem(m);
739
740 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
741 return retval;
742 }
743
744
745
746 void ether_input(ifp, eh, m)
747 struct ifnet *ifp;
748 struct ether_header *eh;
749 struct mbuf *m;
750
751 {
752 kprintf("Someone is calling ether_input!!\n");
753
754 dlil_input(ifp, m, NULL);
755 }
756
757
758 int
759 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
760 {
761 struct dlil_filterq_entry *filt;
762 int retval = 0;
763 struct ifnet *orig_ifp = 0;
764 struct if_proto *proto;
765 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
766 struct kev_msg kev_msg;
767 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
768 boolean_t funnel_state;
769
770
771 funnel_state = thread_funnel_set(network_flock, TRUE);
772
773 while (orig_ifp != ifp) {
774 orig_ifp = ifp;
775
776 TAILQ_FOREACH_REVERSE(filt, fhead, que, dlil_filterq_head) {
777 if (IFILT(filt).filter_if_event) {
778 retval = (*IFILT(filt).filter_if_event)(IFILT(filt).cookie,
779 &ifp,
780 &event);
781
782 if (retval) {
783 (void) thread_funnel_set(network_flock, funnel_state);
784 if (retval == EJUSTRETURN)
785 return 0;
786 else
787 return retval;
788 }
789 }
790
791 if (ifp != orig_ifp)
792 break;
793 }
794 }
795
796
797 /*
798 * Call Interface Module event hook, if any.
799 */
800
801 if (ifp->if_event) {
802 retval = ifp->if_event(ifp, (caddr_t) event);
803
804 if (retval) {
805 (void) thread_funnel_set(network_flock, funnel_state);
806
807 if (retval == EJUSTRETURN)
808 return 0;
809 else
810 return retval;
811 }
812 }
813
814 /*
815 * Call dl_event entry point for all protocols attached to this interface
816 */
817
818 TAILQ_FOREACH(proto, tmp, next) {
819 /*
820 * Call any attached protocol filters.
821 */
822
823 TAILQ_FOREACH_REVERSE(filt, &proto->pr_flt_head, que, dlil_filterq_head) {
824 if (PFILT(filt).filter_dl_event) {
825 retval = (*PFILT(filt).filter_dl_event)(PFILT(filt).cookie,
826 event);
827
828 if (retval) {
829 (void) thread_funnel_set(network_flock, funnel_state);
830 if (retval == EJUSTRETURN)
831 return 0;
832 else
833 return retval;
834 }
835 }
836 }
837
838
839 /*
840 * Finally, call the dl_event entry point (if any)
841 */
842
843 if (proto->dl_event)
844 retval = (*proto->dl_event)(event, proto->dl_tag);
845
846 if (retval == EJUSTRETURN) {
847 (void) thread_funnel_set(network_flock, funnel_state);
848 return 0;
849 }
850 }
851
852
853 /*
854 * Now, post this event to the Kernel Event message queue
855 */
856
857 kev_msg.vendor_code = event->vendor_code;
858 kev_msg.kev_class = event->kev_class;
859 kev_msg.kev_subclass = event->kev_subclass;
860 kev_msg.event_code = event->event_code;
861 kev_msg.dv[0].data_ptr = &event->event_data[0];
862 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
863 kev_msg.dv[1].data_length = 0;
864
865 kev_post_msg(&kev_msg);
866
867 (void) thread_funnel_set(network_flock, funnel_state);
868 return 0;
869 }
870
871
872
873 int
874 dlil_output(u_long dl_tag,
875 struct mbuf *m,
876 caddr_t route,
877 struct sockaddr *dest,
878 int raw
879 )
880 {
881 char *frame_type;
882 char *dst_linkaddr;
883 struct ifnet *orig_ifp = 0;
884 struct ifnet *ifp;
885 struct if_proto *proto;
886 struct dlil_filterq_entry *tmp;
887 int retval = 0;
888 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
889 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
890 struct dlil_filterq_head *fhead;
891
892 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
893
894 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) {
895 m_freem(m);
896 return ENOENT;
897 }
898
899 ifp = dl_tag_array[dl_tag].ifp;
900 proto = dl_tag_array[dl_tag].proto;
901
902 frame_type = frame_type_buffer;
903 dst_linkaddr = dst_linkaddr_buffer;
904
905 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
906
907 if ((raw == 0) && (proto->dl_pre_output)) {
908 retval = (*proto->dl_pre_output)(ifp, &m, dest, route,
909 frame_type, dst_linkaddr, dl_tag);
910 if (retval) {
911 if (retval == EJUSTRETURN)
912 return 0;
913 else {
914 m_freem(m);
915 return retval;
916 }
917 }
918 }
919
920 /*
921 * Run any attached protocol filters.
922 */
923
924 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
925 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
926 if (PFILT(tmp).filter_dl_output) {
927 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
928 &m, &ifp, &dest, dst_linkaddr, frame_type);
929 if (retval) {
930 if (retval == EJUSTRETURN)
931 return 0;
932 else {
933 m_freem(m);
934 return retval;
935 }
936 }
937 }
938 }
939 }
940
941
942 /*
943 * Call framing module
944 */
945 if ((raw == 0) && (ifp->if_framer)) {
946 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
947 if (retval) {
948 if (retval == EJUSTRETURN)
949 return 0;
950 else
951 {
952 m_freem(m);
953 return retval;
954 }
955 }
956 }
957
958 #if BRIDGE
959 if (do_bridge) {
960 struct mbuf *m0 = m ;
961 struct ether_header *eh = mtod(m, struct ether_header *);
962
963 if (m->m_pkthdr.rcvif)
964 m->m_pkthdr.rcvif = NULL ;
965 ifp = bridge_dst_lookup(eh);
966 bdg_forward(&m0, ifp);
967 if (m0)
968 m_freem(m0);
969
970 return 0;
971 }
972 #endif
973
974
975 /*
976 * Let interface filters (if any) do their thing ...
977 */
978
979 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
980 if (TAILQ_EMPTY(fhead) == 0) {
981 while (orig_ifp != ifp) {
982 orig_ifp = ifp;
983 TAILQ_FOREACH(tmp, fhead, que) {
984 if (IFILT(tmp).filter_if_output) {
985 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
986 &ifp,
987 &m);
988 if (retval) {
989 if (retval == EJUSTRETURN)
990 return 0;
991 else {
992 m_freem(m);
993 return retval;
994 }
995 }
996
997 }
998
999 if (ifp != orig_ifp)
1000 break;
1001 }
1002 }
1003 }
1004
1005 /*
1006 * Finally, call the driver.
1007 */
1008
1009 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1010 retval = (*ifp->if_output)(ifp, m);
1011 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1012
1013 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1014
1015 if ((retval == 0) || (retval == EJUSTRETURN))
1016 return 0;
1017 else
1018 return retval;
1019 }
1020
1021
1022 int
1023 dlil_ioctl(u_long proto_fam,
1024 struct ifnet *ifp,
1025 u_long ioctl_code,
1026 caddr_t ioctl_arg)
1027 {
1028 struct dlil_filterq_entry *tmp;
1029 struct dlil_filterq_head *fhead;
1030 int retval = EOPNOTSUPP;
1031 int retval2 = EOPNOTSUPP;
1032 u_long dl_tag;
1033 struct if_family_str *if_family;
1034
1035
1036 if (proto_fam) {
1037 if (dlil_find_dltag(ifp->if_family, ifp->if_unit,
1038 proto_fam, &dl_tag) == 0) {
1039 if (dl_tag_array[dl_tag].ifp != ifp)
1040 return ENOENT;
1041
1042 /*
1043 * Run any attached protocol filters.
1044 */
1045 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
1046 if (PFILT(tmp).filter_dl_ioctl) {
1047 retval =
1048 (*PFILT(tmp).filter_dl_ioctl)(PFILT(tmp).cookie,
1049 dl_tag_array[dl_tag].ifp,
1050 ioctl_code,
1051 ioctl_arg);
1052
1053 if (retval) {
1054 if (retval == EJUSTRETURN)
1055 return 0;
1056 else
1057 return retval;
1058 }
1059 }
1060 }
1061
1062 if (dl_tag_array[dl_tag].proto->dl_ioctl)
1063 retval =
1064 (*dl_tag_array[dl_tag].proto->dl_ioctl)(dl_tag,
1065 dl_tag_array[dl_tag].ifp,
1066 ioctl_code,
1067 ioctl_arg);
1068 else
1069 retval = EOPNOTSUPP;
1070 }
1071 }
1072
1073 if ((retval) && (retval != EOPNOTSUPP)) {
1074 if (retval == EJUSTRETURN)
1075 return 0;
1076 else
1077 return retval;
1078 }
1079
1080
1081 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1082 TAILQ_FOREACH(tmp, fhead, que) {
1083 if (IFILT(tmp).filter_if_ioctl) {
1084 retval2 = (*IFILT(tmp).filter_if_ioctl)(IFILT(tmp).cookie, ifp,
1085 ioctl_code, ioctl_arg);
1086 if (retval2) {
1087 if (retval2 == EJUSTRETURN)
1088 return 0;
1089 else
1090 return retval2;
1091 }
1092 }
1093 }
1094
1095
1096 if_family = find_family_module(ifp->if_family);
1097 if ((if_family) && (if_family->ifmod_ioctl)) {
1098 retval2 = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1099
1100 if ((retval2) && (retval2 != EOPNOTSUPP)) {
1101 if (retval2 == EJUSTRETURN)
1102 return 0;
1103 else
1104 return retval;
1105 }
1106
1107 if (retval == EOPNOTSUPP)
1108 retval = retval2;
1109 }
1110
1111 if (ifp->if_ioctl)
1112 retval2 = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1113
1114 if (retval == EOPNOTSUPP)
1115 return retval2;
1116 else {
1117 if (retval2 == EOPNOTSUPP)
1118 return 0;
1119 else
1120 return retval2;
1121 }
1122 }
1123
1124
1125 int
1126 dlil_attach_protocol(struct dlil_proto_reg_str *proto,
1127 u_long *dl_tag)
1128 {
1129 struct ifnet *ifp;
1130 struct if_proto *ifproto;
1131 u_long i;
1132 struct if_family_str *if_family;
1133 struct dlil_proto_head *tmp;
1134 struct kev_dl_proto_data ev_pr_data;
1135 int s, retval = 0;
1136 boolean_t funnel_state;
1137 u_char *p;
1138
1139 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1140 return EINVAL;
1141
1142 funnel_state = thread_funnel_set(network_flock, TRUE);
1143 s = splnet();
1144 if_family = find_family_module(proto->interface_family);
1145 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1146 kprintf("dlil_attach_protocol -- no interface family module %d",
1147 proto->interface_family);
1148 retval = ENOENT;
1149 goto end;
1150 }
1151
1152 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1153 if (!ifp) {
1154 kprintf("dlil_attach_protocol -- no such interface %d unit %d\n",
1155 proto->interface_family, proto->unit_number);
1156 retval = ENOENT;
1157 goto end;
1158 }
1159
1160 if (dlil_find_dltag(proto->interface_family, proto->unit_number,
1161 proto->protocol_family, &i) == 0) {
1162 retval = EEXIST;
1163 goto end;
1164 }
1165
1166 for (i=1; i < dl_tag_nb; i++)
1167 if (dl_tag_array[i].ifp == 0)
1168 break;
1169
1170 if (i == dl_tag_nb) {
1171 // expand the tag array by MAX_DL_TAGS
1172 MALLOC(p, u_char *, sizeof(struct dl_tag_str) * (dl_tag_nb + MAX_DL_TAGS), M_NKE, M_WAITOK);
1173 if (p == 0) {
1174 retval = ENOBUFS;
1175 goto end;
1176 }
1177 bcopy(dl_tag_array, p, sizeof(struct dl_tag_str) * dl_tag_nb);
1178 bzero(p + sizeof(struct dl_tag_str) * dl_tag_nb, sizeof(struct dl_tag_str) * MAX_DL_TAGS);
1179 dl_tag_nb += MAX_DL_TAGS;
1180 FREE(dl_tag_array, M_NKE);
1181 dl_tag_array = (struct dl_tag_str *)p;
1182 }
1183
1184 /*
1185 * Allocate and init a new if_proto structure
1186 */
1187
1188 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1189 if (!ifproto) {
1190 printf("ERROR - DLIL failed if_proto allocation\n");
1191 retval = ENOMEM;
1192 goto end;
1193 }
1194
1195 bzero(ifproto, sizeof(struct if_proto));
1196
1197 dl_tag_array[i].ifp = ifp;
1198 dl_tag_array[i].proto = ifproto;
1199 dl_tag_array[i].pr_flt_head = &ifproto->pr_flt_head;
1200 ifproto->dl_tag = i;
1201 *dl_tag = i;
1202
1203 if (proto->default_proto) {
1204 if (ifp->if_data.default_proto == 0)
1205 ifp->if_data.default_proto = i;
1206 else
1207 printf("ERROR - dlil_attach_protocol -- Attempt to attach more than one default protocol\n");
1208 }
1209
1210 ifproto->protocol_family = proto->protocol_family;
1211 ifproto->dl_input = proto->input;
1212 ifproto->dl_pre_output = proto->pre_output;
1213 ifproto->dl_event = proto->event;
1214 ifproto->dl_offer = proto->offer;
1215 ifproto->dl_ioctl = proto->ioctl;
1216 ifproto->ifp = ifp;
1217 TAILQ_INIT(&ifproto->pr_flt_head);
1218
1219 /*
1220 * Call family module add_proto routine so it can refine the
1221 * demux descriptors as it wishes.
1222 */
1223 retval = (*if_family->add_proto)(&proto->demux_desc_head, ifproto, *dl_tag);
1224 if (retval) {
1225 dl_tag_array[i].ifp = 0;
1226 FREE(ifproto, M_IFADDR);
1227 goto end;
1228 }
1229
1230 /*
1231 * Add to if_proto list for this interface
1232 */
1233
1234 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1235 TAILQ_INSERT_TAIL(tmp, ifproto, next);
1236 ifp->refcnt++;
1237 if (ifproto->dl_offer)
1238 ifp->offercnt++;
1239
1240 /* the reserved field carries the number of protocol still attached (subject to change) */
1241 ev_pr_data.proto_family = proto->protocol_family;
1242 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1243 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1244 (struct net_event_data *)&ev_pr_data,
1245 sizeof(struct kev_dl_proto_data));
1246
1247 end:
1248 splx(s);
1249 thread_funnel_set(network_flock, funnel_state);
1250 return retval;
1251 }
1252
1253
1254
1255 int
1256 dlil_detach_protocol(u_long dl_tag)
1257 {
1258 struct ifnet *ifp;
1259 struct ifnet *orig_ifp=0;
1260 struct if_proto *proto;
1261 struct dlil_proto_head *tmp;
1262 struct if_family_str *if_family;
1263 struct dlil_filterq_entry *filter;
1264 int s, retval = 0;
1265 struct dlil_filterq_head *fhead;
1266 struct kev_dl_proto_data ev_pr_data;
1267 boolean_t funnel_state;
1268
1269 funnel_state = thread_funnel_set(network_flock, TRUE);
1270 s = splnet();
1271
1272 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) {
1273 retval = ENOENT;
1274 goto end;
1275 }
1276
1277 ifp = dl_tag_array[dl_tag].ifp;
1278 proto = dl_tag_array[dl_tag].proto;
1279
1280 if_family = find_family_module(ifp->if_family);
1281 if (if_family == NULL) {
1282 retval = ENOENT;
1283 goto end;
1284 }
1285
1286 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1287
1288 /*
1289 * Call family module del_proto
1290 */
1291
1292 (*if_family->del_proto)(proto, dl_tag);
1293
1294
1295 /*
1296 * Remove and deallocate any attached protocol filters
1297 */
1298
1299 while (filter = TAILQ_FIRST(&proto->pr_flt_head))
1300 dlil_detach_filter(filter->filter_id);
1301
1302 if (proto->dl_offer)
1303 ifp->offercnt--;
1304
1305 if (ifp->if_data.default_proto == dl_tag)
1306 ifp->if_data.default_proto = 0;
1307 dl_tag_array[dl_tag].ifp = 0;
1308
1309 /* the reserved field carries the number of protocol still attached (subject to change) */
1310 ev_pr_data.proto_family = proto->protocol_family;
1311
1312 /*
1313 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1314 */
1315
1316 if_rtproto_del(ifp, proto->protocol_family);
1317
1318 TAILQ_REMOVE(tmp, proto, next);
1319 FREE(proto, M_IFADDR);
1320
1321 ifp->refcnt--;
1322 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1323 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1324 (struct net_event_data *)&ev_pr_data,
1325 sizeof(struct kev_dl_proto_data));
1326
1327 if (ifp->refcnt == 0) {
1328
1329 TAILQ_REMOVE(&ifnet, ifp, if_link);
1330
1331 (*if_family->del_if)(ifp);
1332
1333 if (--if_family->refcnt == 0) {
1334 if (if_family->shutdown)
1335 (*if_family->shutdown)();
1336
1337 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1338 FREE(if_family, M_IFADDR);
1339 }
1340
1341 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1342 while (orig_ifp != ifp) {
1343 orig_ifp = ifp;
1344
1345 TAILQ_FOREACH(filter, fhead, que) {
1346 if (IFILT(filter).filter_if_free) {
1347 retval = (*IFILT(filter).filter_if_free)(IFILT(filter).cookie, ifp);
1348 if (retval) {
1349 splx(s);
1350 thread_funnel_set(network_flock, funnel_state);
1351 return 0;
1352 }
1353 }
1354 if (ifp != orig_ifp)
1355 break;
1356 }
1357 }
1358
1359 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1360
1361 (*ifp->if_free)(ifp);
1362 }
1363
1364 end:
1365 splx(s);
1366 thread_funnel_set(network_flock, funnel_state);
1367 return retval;
1368 }
1369
1370
1371
1372
1373
1374 int
1375 dlil_if_attach(struct ifnet *ifp)
1376 {
1377 u_long interface_family = ifp->if_family;
1378 struct if_family_str *if_family;
1379 struct dlil_proto_head *tmp;
1380 int stat;
1381 int s;
1382 boolean_t funnel_state;
1383
1384 funnel_state = thread_funnel_set(network_flock, TRUE);
1385 s = splnet();
1386 if (ifnet_inited == 0) {
1387 TAILQ_INIT(&ifnet);
1388 ifnet_inited = 1;
1389 }
1390
1391 if_family = find_family_module(interface_family);
1392
1393 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1394 splx(s);
1395 kprintf("Attempt to attach interface without family module - %d\n",
1396 interface_family);
1397 thread_funnel_set(network_flock, funnel_state);
1398 return ENODEV;
1399 }
1400
1401 if (ifp->refcnt == 0) {
1402 /*
1403 * Call the family module to fill in the appropriate fields in the
1404 * ifnet structure.
1405 */
1406
1407 stat = (*if_family->add_if)(ifp);
1408 if (stat) {
1409 splx(s);
1410 kprintf("dlil_if_attach -- add_if failed with %d\n", stat);
1411 thread_funnel_set(network_flock, funnel_state);
1412 return stat;
1413 }
1414 if_family->refcnt++;
1415
1416 /*
1417 * Add the ifp to the interface list.
1418 */
1419
1420 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1421 TAILQ_INIT(tmp);
1422
1423 ifp->if_data.default_proto = 0;
1424 ifp->offercnt = 0;
1425 TAILQ_INIT(&ifp->if_flt_head);
1426 old_if_attach(ifp);
1427
1428 if (if_family->init_if) {
1429 stat = (*if_family->init_if)(ifp);
1430 if (stat) {
1431 kprintf("dlil_if_attach -- init_if failed with %d\n", stat);
1432 }
1433 }
1434 }
1435
1436 ifp->refcnt++;
1437
1438 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
1439
1440 splx(s);
1441 thread_funnel_set(network_flock, funnel_state);
1442 return 0;
1443 }
1444
1445
1446 int
1447 dlil_if_detach(struct ifnet *ifp)
1448 {
1449 struct if_proto *proto;
1450 struct dlil_filterq_entry *if_filter;
1451 struct if_family_str *if_family;
1452 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1453 struct kev_msg ev_msg;
1454 boolean_t funnel_state;
1455
1456 funnel_state = thread_funnel_set(network_flock, TRUE);
1457
1458 if_family = find_family_module(ifp->if_family);
1459
1460 if (!if_family) {
1461 kprintf("Attempt to detach interface without family module - %s\n",
1462 ifp->if_name);
1463 thread_funnel_set(network_flock, funnel_state);
1464 return ENODEV;
1465 }
1466
1467 while (if_filter = TAILQ_FIRST(fhead))
1468 dlil_detach_filter(if_filter->filter_id);
1469
1470 ifp->refcnt--;
1471
1472 if (ifp->refcnt > 0) {
1473 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
1474 thread_funnel_set(network_flock, funnel_state);
1475 return DLIL_WAIT_FOR_FREE;
1476 }
1477
1478 while (ifp->if_multiaddrs.lh_first) {
1479 struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first;
1480
1481 /*
1482 * When the interface is gone, we will no
1483 * longer be listening on these multicasts.
1484 * Various bits of the stack may be referencing
1485 * these multicasts, so we can't just free them.
1486 * We place them on a list so they may be cleaned
1487 * up later as the other bits of the stack release
1488 * them.
1489 */
1490 LIST_REMOVE(ifma, ifma_link);
1491 ifma->ifma_ifp = NULL;
1492 LIST_INSERT_HEAD(&ifma_lostlist, ifma, ifma_link);
1493 }
1494
1495 /* Let BPF know the interface is detaching. */
1496 bpfdetach(ifp);
1497 TAILQ_REMOVE(&ifnet, ifp, if_link);
1498
1499 (*if_family->del_if)(ifp);
1500
1501 if (--if_family->refcnt == 0) {
1502 if (if_family->shutdown)
1503 (*if_family->shutdown)();
1504
1505 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1506 FREE(if_family, M_IFADDR);
1507 }
1508
1509 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1510 thread_funnel_set(network_flock, funnel_state);
1511 return 0;
1512 }
1513
1514
1515 int
1516 dlil_reg_if_modules(u_long interface_family,
1517 struct dlil_ifmod_reg_str *ifmod)
1518 {
1519 struct if_family_str *if_family;
1520 int s;
1521 boolean_t funnel_state;
1522
1523
1524 funnel_state = thread_funnel_set(network_flock, TRUE);
1525 s = splnet();
1526 if (find_family_module(interface_family)) {
1527 kprintf("Attempt to register dlil family module more than once - %d\n",
1528 interface_family);
1529 splx(s);
1530 thread_funnel_set(network_flock, funnel_state);
1531 return EEXIST;
1532 }
1533
1534 if ((!ifmod->add_if) || (!ifmod->del_if) ||
1535 (!ifmod->add_proto) || (!ifmod->del_proto)) {
1536 kprintf("dlil_reg_if_modules passed at least one null pointer\n");
1537 splx(s);
1538 thread_funnel_set(network_flock, funnel_state);
1539 return EINVAL;
1540 }
1541
1542 /*
1543 * The following is a gross hack to keep from breaking
1544 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
1545 * does not zero the reserved fields in dlil_ifmod_reg_str.
1546 * As a result, we have to zero any function that used to
1547 * be reserved fields at the time Vicomsoft built their
1548 * kext. Radar #2974305
1549 */
1550 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
1551 if (interface_family == 123) { /* Vicom */
1552 ifmod->init_if = 0;
1553 } else {
1554 splx(s);
1555 thread_funnel_set(network_flock, funnel_state);
1556 return EINVAL;
1557 }
1558 }
1559
1560 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
1561 if (!if_family) {
1562 kprintf("dlil_reg_if_modules failed allocation\n");
1563 splx(s);
1564 thread_funnel_set(network_flock, funnel_state);
1565 return ENOMEM;
1566 }
1567
1568 bzero(if_family, sizeof(struct if_family_str));
1569
1570 if_family->if_family = interface_family & 0xffff;
1571 if_family->shutdown = ifmod->shutdown;
1572 if_family->add_if = ifmod->add_if;
1573 if_family->del_if = ifmod->del_if;
1574 if_family->init_if = ifmod->init_if;
1575 if_family->add_proto = ifmod->add_proto;
1576 if_family->del_proto = ifmod->del_proto;
1577 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
1578 if_family->refcnt = 1;
1579 if_family->flags = 0;
1580
1581 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
1582 splx(s);
1583 thread_funnel_set(network_flock, funnel_state);
1584 return 0;
1585 }
1586
1587 int dlil_dereg_if_modules(u_long interface_family)
1588 {
1589 struct if_family_str *if_family;
1590 int s, ret = 0;
1591 boolean_t funnel_state;
1592
1593 funnel_state = thread_funnel_set(network_flock, TRUE);
1594 s = splnet();
1595 if_family = find_family_module(interface_family);
1596 if (if_family == 0) {
1597 splx(s);
1598 thread_funnel_set(network_flock, funnel_state);
1599 return ENOENT;
1600 }
1601
1602 if (--if_family->refcnt == 0) {
1603 if (if_family->shutdown)
1604 (*if_family->shutdown)();
1605
1606 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1607 FREE(if_family, M_IFADDR);
1608 }
1609 else {
1610 if_family->flags |= DLIL_SHUTDOWN;
1611 ret = DLIL_WAIT_FOR_FREE;
1612 }
1613
1614 splx(s);
1615 thread_funnel_set(network_flock, funnel_state);
1616 return ret;
1617 }
1618
1619
1620
1621 int
1622 dlil_reg_proto_module(u_long protocol_family, u_long interface_family,
1623 struct dlil_protomod_reg_str *protomod_reg)
1624 {
1625 struct proto_family_str *proto_family;
1626 int s;
1627 boolean_t funnel_state;
1628
1629
1630 funnel_state = thread_funnel_set(network_flock, TRUE);
1631 s = splnet();
1632 if (find_proto_module(protocol_family, interface_family)) {
1633 splx(s);
1634 thread_funnel_set(network_flock, funnel_state);
1635 return EEXIST;
1636 }
1637
1638 if (protomod_reg->reserved[0] != 0 || protomod_reg->reserved[1] != 0
1639 || protomod_reg->reserved[2] != 0 || protomod_reg->reserved[3] !=0) {
1640 splx(s);
1641 thread_funnel_set(network_flock, funnel_state);
1642 return EINVAL;
1643 }
1644
1645 if (protomod_reg->attach_proto == NULL) {
1646 splx(s);
1647 thread_funnel_set(network_flock, funnel_state);
1648 return EINVAL;
1649 }
1650
1651 proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK);
1652 if (!proto_family) {
1653 splx(s);
1654 thread_funnel_set(network_flock, funnel_state);
1655 return ENOMEM;
1656 }
1657
1658 bzero(proto_family, sizeof(struct proto_family_str));
1659 proto_family->proto_family = protocol_family;
1660 proto_family->if_family = interface_family & 0xffff;
1661 proto_family->attach_proto = protomod_reg->attach_proto;
1662 proto_family->detach_proto = protomod_reg->detach_proto;
1663
1664 TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next);
1665 splx(s);
1666 thread_funnel_set(network_flock, funnel_state);
1667 return 0;
1668 }
1669
1670 int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family)
1671 {
1672 struct proto_family_str *proto_family;
1673 int s, ret = 0;
1674 boolean_t funnel_state;
1675
1676 funnel_state = thread_funnel_set(network_flock, TRUE);
1677 s = splnet();
1678 proto_family = find_proto_module(protocol_family, interface_family);
1679 if (proto_family == 0) {
1680 splx(s);
1681 thread_funnel_set(network_flock, funnel_state);
1682 return ENOENT;
1683 }
1684
1685 TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next);
1686 FREE(proto_family, M_IFADDR);
1687
1688 splx(s);
1689 thread_funnel_set(network_flock, funnel_state);
1690 return ret;
1691 }
1692
1693 int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp, u_long *dl_tag)
1694 {
1695 struct proto_family_str *proto_family;
1696 int s, ret = 0;
1697 boolean_t funnel_state;
1698
1699 funnel_state = thread_funnel_set(network_flock, TRUE);
1700 s = splnet();
1701 proto_family = find_proto_module(protocol_family, ifp->if_family);
1702 if (proto_family == 0) {
1703 splx(s);
1704 thread_funnel_set(network_flock, funnel_state);
1705 return ENOENT;
1706 }
1707
1708 ret = (*proto_family->attach_proto)(ifp, dl_tag);
1709
1710 splx(s);
1711 thread_funnel_set(network_flock, funnel_state);
1712 return ret;
1713 }
1714
1715
1716 int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp)
1717 {
1718 struct proto_family_str *proto_family;
1719 int s, ret = 0;
1720 u_long tag;
1721 boolean_t funnel_state;
1722
1723 funnel_state = thread_funnel_set(network_flock, TRUE);
1724 s = splnet();
1725
1726 ret = dlil_find_dltag(ifp->if_family, ifp->if_unit, protocol_family, &tag);
1727
1728 if (ret == 0) {
1729 proto_family = find_proto_module(protocol_family, ifp->if_family);
1730 if (proto_family && proto_family->detach_proto)
1731 ret = (*proto_family->detach_proto)(ifp, tag);
1732 else
1733 ret = dlil_detach_protocol(tag);
1734 }
1735
1736 splx(s);
1737 thread_funnel_set(network_flock, funnel_state);
1738 return ret;
1739 }
1740
1741
1742
1743 /*
1744 * Old if_attach no-op'ed function defined here for temporary backwards compatibility
1745 */
1746
1747 void if_attach(ifp)
1748 struct ifnet *ifp;
1749 {
1750 dlil_if_attach(ifp);
1751 }
1752
1753
1754
1755 int
1756 dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id)
1757 {
1758 struct ifnet *orig_ifp = 0;
1759 struct ifnet *ifp;
1760 struct if_proto *ifproto;
1761 struct if_proto *proto;
1762 struct dlil_filterq_entry *tmp;
1763 int retval = 0;
1764 struct dlil_filterq_head *fhead;
1765 int match_found;
1766
1767 dlil_stats.inject_if_in1++;
1768
1769 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER)
1770 return ENOENT;
1771
1772 ifp = dlil_filters[from_id].ifp;
1773
1774 /*
1775 * Let interface filters (if any) do their thing ...
1776 */
1777
1778 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1779 match_found = 0;
1780
1781 if (TAILQ_EMPTY(fhead) == 0) {
1782 while (orig_ifp != ifp) {
1783 orig_ifp = ifp;
1784 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
1785 if ((match_found) && (IFILT(tmp).filter_if_input)) {
1786 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
1787 &ifp,
1788 &m,
1789 &frame_header);
1790 if (retval) {
1791 if (retval == EJUSTRETURN)
1792 return 0;
1793 else {
1794 m_freem(m);
1795 return retval;
1796 }
1797 }
1798
1799 }
1800
1801 if (ifp != orig_ifp)
1802 break;
1803
1804 if (from_id == tmp->filter_id)
1805 match_found = 1;
1806 }
1807 }
1808 }
1809
1810 ifp->if_lastchange = time;
1811
1812 /*
1813 * Call family demux module. If the demux module finds a match
1814 * for the frame it will fill-in the ifproto pointer.
1815 */
1816
1817 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
1818
1819 if (m->m_flags & (M_BCAST|M_MCAST))
1820 ifp->if_imcasts++;
1821
1822 if ((retval) && (ifp->offercnt)) {
1823 /*
1824 * No match was found, look for any offers.
1825 */
1826 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
1827 TAILQ_FOREACH(proto, tmp, next) {
1828 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
1829 ifproto = proto;
1830 retval = 0;
1831 break;
1832 }
1833 }
1834 }
1835
1836 if (retval) {
1837 if (retval != EJUSTRETURN) {
1838 m_freem(m);
1839 return retval;
1840 }
1841 else
1842 return 0;
1843 }
1844 else
1845 if (ifproto == 0) {
1846 printf("ERROR - dlil_inject_if_input -- if_demux didn't return an if_proto pointer\n");
1847 m_freem(m);
1848 return 0;
1849 }
1850
1851 /*
1852 * Call any attached protocol filters.
1853 */
1854 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1855 if (PFILT(tmp).filter_dl_input) {
1856 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1857 &m,
1858 &frame_header,
1859 &ifp);
1860
1861 if (retval) {
1862 if (retval == EJUSTRETURN)
1863 return 0;
1864 else {
1865 m_freem(m);
1866 return retval;
1867 }
1868 }
1869 }
1870 }
1871
1872
1873
1874 retval = (*ifproto->dl_input)(m, frame_header,
1875 ifp, ifproto->dl_tag,
1876 FALSE);
1877
1878 dlil_stats.inject_if_in2++;
1879 if (retval == EJUSTRETURN)
1880 retval = 0;
1881 else
1882 if (retval)
1883 m_freem(m);
1884
1885 return retval;
1886
1887 }
1888
1889
1890
1891
1892
1893 int
1894 dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id)
1895 {
1896 struct ifnet *orig_ifp = 0;
1897 struct dlil_filterq_entry *tmp;
1898 int retval;
1899 struct if_proto *ifproto = 0;
1900 int match_found;
1901 struct ifnet *ifp;
1902
1903 dlil_stats.inject_pr_in1++;
1904 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER)
1905 return ENOENT;
1906
1907 ifproto = dlil_filters[from_id].proto;
1908 ifp = dlil_filters[from_id].ifp;
1909
1910 /*
1911 * Call any attached protocol filters.
1912 */
1913
1914 match_found = 0;
1915 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1916 if ((match_found) && (PFILT(tmp).filter_dl_input)) {
1917 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1918 &m,
1919 &frame_header,
1920 &ifp);
1921
1922 if (retval) {
1923 if (retval == EJUSTRETURN)
1924 return 0;
1925 else {
1926 m_freem(m);
1927 return retval;
1928 }
1929 }
1930 }
1931
1932 if (tmp->filter_id == from_id)
1933 match_found = 1;
1934 }
1935
1936
1937 retval = (*ifproto->dl_input)(m, frame_header,
1938 ifp, ifproto->dl_tag,
1939 FALSE);
1940
1941 if (retval == EJUSTRETURN)
1942 retval = 0;
1943 else
1944 if (retval)
1945 m_freem(m);
1946
1947 dlil_stats.inject_pr_in2++;
1948 return retval;
1949 }
1950
1951
1952
1953 int
1954 dlil_inject_pr_output(struct mbuf *m,
1955 struct sockaddr *dest,
1956 int raw,
1957 char *frame_type,
1958 char *dst_linkaddr,
1959 u_long from_id)
1960 {
1961 struct ifnet *orig_ifp = 0;
1962 struct ifnet *ifp;
1963 struct dlil_filterq_entry *tmp;
1964 int retval = 0;
1965 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1966 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1967 struct dlil_filterq_head *fhead;
1968 int match_found;
1969 u_long dl_tag;
1970
1971 dlil_stats.inject_pr_out1++;
1972 if (raw == 0) {
1973 if (frame_type)
1974 bcopy(frame_type, &frame_type_buffer[0], MAX_FRAME_TYPE_SIZE * 4);
1975 else
1976 return EINVAL;
1977
1978 if (dst_linkaddr)
1979 bcopy(dst_linkaddr, &dst_linkaddr_buffer, MAX_LINKADDR * 4);
1980 else
1981 return EINVAL;
1982 }
1983
1984 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER)
1985 return ENOENT;
1986
1987 ifp = dlil_filters[from_id].ifp;
1988 dl_tag = dlil_filters[from_id].proto->dl_tag;
1989
1990 frame_type = frame_type_buffer;
1991 dst_linkaddr = dst_linkaddr_buffer;
1992
1993 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1994
1995 /*
1996 * Run any attached protocol filters.
1997 */
1998 match_found = 0;
1999
2000 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
2001 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
2002 if ((match_found) && (PFILT(tmp).filter_dl_output)) {
2003 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
2004 &m, &ifp, &dest, dst_linkaddr, frame_type);
2005 if (retval) {
2006 if (retval == EJUSTRETURN)
2007 return 0;
2008 else {
2009 m_freem(m);
2010 return retval;
2011 }
2012 }
2013 }
2014
2015 if (tmp->filter_id == from_id)
2016 match_found = 1;
2017 }
2018 }
2019
2020
2021 /*
2022 * Call framing module
2023 */
2024 if ((raw == 0) && (ifp->if_framer)) {
2025 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
2026 if (retval) {
2027 if (retval == EJUSTRETURN)
2028 return 0;
2029 else
2030 {
2031 m_freem(m);
2032 return retval;
2033 }
2034 }
2035 }
2036
2037
2038 #if BRIDGE
2039 if (do_bridge) {
2040 struct mbuf *m0 = m ;
2041 struct ether_header *eh = mtod(m, struct ether_header *);
2042
2043 if (m->m_pkthdr.rcvif)
2044 m->m_pkthdr.rcvif = NULL ;
2045 ifp = bridge_dst_lookup(eh);
2046 bdg_forward(&m0, ifp);
2047 if (m0)
2048 m_freem(m0);
2049
2050 return 0;
2051 }
2052 #endif
2053
2054
2055 /*
2056 * Let interface filters (if any) do their thing ...
2057 */
2058
2059 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
2060 if (TAILQ_EMPTY(fhead) == 0) {
2061 while (orig_ifp != ifp) {
2062 orig_ifp = ifp;
2063 TAILQ_FOREACH(tmp, fhead, que) {
2064 if (IFILT(tmp).filter_if_output) {
2065 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
2066 &ifp,
2067 &m);
2068 if (retval) {
2069 if (retval == EJUSTRETURN)
2070 return 0;
2071 else {
2072 m_freem(m);
2073 return retval;
2074 }
2075 }
2076
2077 }
2078
2079 if (ifp != orig_ifp)
2080 break;
2081 }
2082 }
2083 }
2084
2085 /*
2086 * Finally, call the driver.
2087 */
2088
2089 retval = (*ifp->if_output)(ifp, m);
2090 dlil_stats.inject_pr_out2++;
2091 if ((retval == 0) || (retval == EJUSTRETURN))
2092 return 0;
2093 else
2094 return retval;
2095 }
2096
2097
2098 int
2099 dlil_inject_if_output(struct mbuf *m, u_long from_id)
2100 {
2101 struct ifnet *orig_ifp = 0;
2102 struct ifnet *ifp;
2103 struct dlil_filterq_entry *tmp;
2104 int retval = 0;
2105 struct dlil_filterq_head *fhead;
2106 int match_found;
2107
2108 dlil_stats.inject_if_out1++;
2109 if (from_id > dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER)
2110 return ENOENT;
2111
2112 ifp = dlil_filters[from_id].ifp;
2113
2114 /*
2115 * Let interface filters (if any) do their thing ...
2116 */
2117
2118 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
2119 match_found = 0;
2120
2121 if (TAILQ_EMPTY(fhead) == 0) {
2122 while (orig_ifp != ifp) {
2123 orig_ifp = ifp;
2124 TAILQ_FOREACH(tmp, fhead, que) {
2125 if ((match_found) && (IFILT(tmp).filter_if_output)) {
2126 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
2127 &ifp,
2128 &m);
2129 if (retval) {
2130 if (retval == EJUSTRETURN)
2131 return 0;
2132 else {
2133 m_freem(m);
2134 return retval;
2135 }
2136 }
2137
2138 }
2139
2140 if (ifp != orig_ifp)
2141 break;
2142
2143 if (from_id == tmp->filter_id)
2144 match_found = 1;
2145 }
2146 }
2147 }
2148
2149 /*
2150 * Finally, call the driver.
2151 */
2152
2153 retval = (*ifp->if_output)(ifp, m);
2154 dlil_stats.inject_if_out2++;
2155 if ((retval == 0) || (retval == EJUSTRETURN))
2156 return 0;
2157 else
2158 return retval;
2159 }
2160
2161 static
2162 int dlil_recycle_ioctl(struct ifnet *ifnet_ptr, u_long ioctl_code, void *ioctl_arg)
2163 {
2164
2165 return EOPNOTSUPP;
2166 }
2167
2168 static
2169 int dlil_recycle_output(struct ifnet *ifnet_ptr, struct mbuf *m)
2170 {
2171
2172 m_freem(m);
2173 return 0;
2174 }
2175
2176 static
2177 int dlil_recycle_free(struct ifnet *ifnet_ptr)
2178 {
2179 return 0;
2180 }
2181
2182 static
2183 int dlil_recycle_set_bpf_tap(struct ifnet *ifp, int mode,
2184 int (*bpf_callback)(struct ifnet *, struct mbuf *))
2185 {
2186 /* XXX not sure what to do here */
2187 return 0;
2188 }
2189
2190 int dlil_if_acquire(u_long family, void *uniqueid, size_t uniqueid_len,
2191 struct ifnet **ifp)
2192 {
2193 struct ifnet *ifp1 = NULL;
2194 struct dlil_ifnet *dlifp1 = NULL;
2195 int s, ret = 0;
2196 boolean_t funnel_state;
2197
2198 funnel_state = thread_funnel_set(network_flock, TRUE);
2199 s = splnet();
2200
2201 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2202
2203 ifp1 = (struct ifnet *)dlifp1;
2204
2205 if (ifp1->if_family == family) {
2206
2207 /* same uniqueid and same len or no unique id specified */
2208 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2209 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2210
2211 /* check for matching interface in use */
2212 if (ifp1->if_eflags & IFEF_INUSE) {
2213 if (uniqueid_len) {
2214 ret = EBUSY;
2215 goto end;
2216 }
2217 }
2218 else {
2219
2220 ifp1->if_eflags |= (IFEF_INUSE + IFEF_REUSE);
2221 *ifp = ifp1;
2222 goto end;
2223 }
2224 }
2225 }
2226 }
2227
2228 /* no interface found, allocate a new one */
2229 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2230 if (dlifp1 == 0) {
2231 ret = ENOMEM;
2232 goto end;
2233 }
2234
2235 bzero(dlifp1, sizeof(*dlifp1));
2236
2237 if (uniqueid_len) {
2238 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2239 if (dlifp1->if_uniqueid == 0) {
2240 FREE(dlifp1, M_NKE);
2241 ret = ENOMEM;
2242 goto end;
2243 }
2244 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2245 dlifp1->if_uniqueid_len = uniqueid_len;
2246 }
2247
2248 ifp1 = (struct ifnet *)dlifp1;
2249 ifp1->if_eflags |= IFEF_INUSE;
2250
2251 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2252
2253 *ifp = ifp1;
2254
2255 end:
2256
2257 splx(s);
2258 thread_funnel_set(network_flock, funnel_state);
2259 return ret;
2260 }
2261
2262 void dlil_if_release(struct ifnet *ifp)
2263 {
2264 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2265 int s;
2266 boolean_t funnel_state;
2267
2268 funnel_state = thread_funnel_set(network_flock, TRUE);
2269 s = splnet();
2270
2271 ifp->if_eflags &= ~IFEF_INUSE;
2272 ifp->if_ioctl = dlil_recycle_ioctl;
2273 ifp->if_output = dlil_recycle_output;
2274 ifp->if_free = dlil_recycle_free;
2275 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2276
2277 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2278 ifp->if_name = dlifp->if_namestorage;
2279
2280 splx(s);
2281 thread_funnel_set(network_flock, funnel_state);
2282 }
2283