]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
fcc3f96a52a55f0c44333da6376962cbd01a714f
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc.
24 *
25 * Data Link Inteface Layer
26 * Author: Ted Walker
27 */
28
29
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/socket.h>
37 #include <net/if_dl.h>
38 #include <net/if.h>
39 #include <net/if_var.h>
40 #include <net/dlil.h>
41 #include <sys/kern_event.h>
42 #include <sys/kdebug.h>
43 #include <string.h>
44
45 #include <kern/thread.h>
46 #include <kern/task.h>
47 #include <net/netisr.h>
48 #include <net/if_types.h>
49
50
51 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
52 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
53 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
54 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
55 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
56
57
58 #define MAX_DL_TAGS 50
59 #define MAX_DLIL_FILTERS 50
60 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
61 #define MAX_LINKADDR 4 /* LONGWORDS */
62 #define M_NKE M_IFADDR
63
64 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
65 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
66
67 struct dl_tag_str {
68 struct ifnet *ifp;
69 struct if_proto *proto;
70 struct dlil_filterq_head *pr_flt_head;
71 };
72
73
74 struct dlil_stats_str {
75 int inject_pr_in1;
76 int inject_pr_in2;
77 int inject_pr_out1;
78 int inject_pr_out2;
79 int inject_if_in1;
80 int inject_if_in2;
81 int inject_if_out1;
82 int inject_if_out2;
83 };
84
85
86 struct dlil_filter_id_str {
87 int type;
88 struct dlil_filterq_head *head;
89 struct dlil_filterq_entry *filter_ptr;
90 struct ifnet *ifp;
91 struct if_proto *proto;
92 };
93
94
95
96 struct if_family_str {
97 TAILQ_ENTRY(if_family_str) if_fam_next;
98 u_long if_family;
99 int refcnt;
100 int flags;
101
102 #define DLIL_SHUTDOWN 1
103
104 int (*add_if)(struct ifnet *ifp);
105 int (*del_if)(struct ifnet *ifp);
106 int (*add_proto)(struct ddesc_head_str *demux_desc_head,
107 struct if_proto *proto, u_long dl_tag);
108 int (*del_proto)(struct if_proto *proto, u_long dl_tag);
109 int (*ifmod_ioctl)(struct ifnet *ifp, u_long command, caddr_t data);
110 int (*shutdown)();
111 };
112
113
114
115 struct dlil_stats_str dlil_stats;
116
117 static
118 struct dlil_filter_id_str dlil_filters[MAX_DLIL_FILTERS+1];
119
120 static
121 struct dl_tag_str dl_tag_array[MAX_DL_TAGS+1];
122
123 static
124 TAILQ_HEAD(, if_family_str) if_family_head;
125
126 static ifnet_inited = 0;
127
128 int dlil_initialized = 0;
129 decl_simple_lock_data(, dlil_input_lock)
130 int dlil_input_thread_wakeup = 0;
131 int dlil_expand_mcl;
132 static struct mbuf *dlil_input_mbuf_head = NULL;
133 static struct mbuf *dlil_input_mbuf_tail = NULL;
134 static struct mbuf *dlil_input_loop_head = NULL;
135 static struct mbuf *dlil_input_loop_tail = NULL;
136
137 static void dlil_input_thread(void);
138 extern void run_netisr(void);
139
140
141 /*
142 * Internal functions.
143 */
144
145 static
146 struct if_family_str *find_family_module(u_long if_family)
147 {
148 struct if_family_str *mod = NULL;
149
150 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
151 if (mod->if_family == (if_family & 0xffff))
152 break;
153 }
154
155 return mod;
156 }
157
158
159 /*
160 * Public functions.
161 */
162
163 struct ifnet *ifbyfamily(u_long family, short unit)
164 {
165 struct ifnet *ifp;
166
167 TAILQ_FOREACH(ifp, &ifnet, if_link)
168 if ((family == ifp->if_family) &&
169 (ifp->if_unit == unit))
170 return ifp;
171
172 return 0;
173 }
174
175 struct if_proto *dlttoproto(dl_tag)
176 u_long dl_tag;
177 {
178 return dl_tag_array[dl_tag].proto;
179 }
180
181
182 u_long ifptodlt(struct ifnet *ifp, u_long proto_family)
183 {
184 struct if_proto *proto;
185 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
186
187
188 TAILQ_FOREACH(proto, tmp, next)
189 if (proto->ifp == ifp)
190 if (proto->protocol_family == proto_family)
191 return proto->dl_tag;
192
193 return 0;
194 }
195
196
197 int dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag)
198 {
199 struct ifnet *ifp;
200
201 ifp = ifbyfamily(if_family, unit);
202 if (!ifp)
203 return ENOENT;
204
205 *dl_tag = ifptodlt(ifp, proto_family);
206 if (*dl_tag == 0)
207 return EPROTONOSUPPORT;
208 else
209 return 0;
210 }
211
212
213 int dlil_get_next_dl_tag(u_long current_tag, struct dl_tag_attr_str *next)
214 {
215 int i;
216
217 for (i = (current_tag+1); i < MAX_DL_TAGS; i++)
218 if (dl_tag_array[i].ifp) {
219 next->dl_tag = i;
220 next->if_flags = dl_tag_array[i].ifp->if_flags;
221 next->if_unit = dl_tag_array[i].ifp->if_unit;
222 next->protocol_family = dl_tag_array[i].proto->protocol_family;
223 next->if_family = dl_tag_array[i].ifp->if_family;
224 return 0;
225 }
226
227 /*
228 * If we got here, there are no more entries
229 */
230
231 return ENOENT;
232 }
233
234
235 void
236 dlil_init()
237 {
238 int i;
239
240 printf("dlil_init\n");
241
242 TAILQ_INIT(&if_family_head);
243 for (i=0; i < MAX_DL_TAGS; i++)
244 dl_tag_array[i].ifp = 0;
245
246 for (i=0; i < MAX_DLIL_FILTERS; i++)
247 dlil_filters[i].type = 0;
248
249 bzero(&dlil_stats, sizeof(dlil_stats));
250
251 simple_lock_init(&dlil_input_lock);
252
253 /*
254 * Start up the dlil input thread once everything is initialized
255 */
256 (void) kernel_thread(kernel_task, dlil_input_thread);
257 }
258
259
260 u_long get_new_filter_id()
261 {
262 u_long i;
263
264 for (i=1; i < MAX_DLIL_FILTERS; i++)
265 if (dlil_filters[i].type == 0)
266 return i;
267
268 return 0;
269 }
270
271
272 int dlil_attach_interface_filter(struct ifnet *ifp,
273 struct dlil_if_flt_str *if_filter,
274 u_long *filter_id,
275 int insertion_point)
276 {
277 int s;
278 int retval;
279 struct dlil_filterq_entry *tmp_ptr;
280 struct dlil_filterq_entry *if_filt;
281 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
282 boolean_t funnel_state;
283
284
285 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
286 bcopy((caddr_t) if_filter, (caddr_t) &tmp_ptr->variants.if_filter,
287 sizeof(struct dlil_if_flt_str));
288
289 funnel_state = thread_funnel_set(network_flock, TRUE);
290
291 s = splnet();
292
293 if (insertion_point != DLIL_LAST_FILTER) {
294 TAILQ_FOREACH(if_filt, fhead, que)
295 if (insertion_point == if_filt->filter_id) {
296 TAILQ_INSERT_BEFORE(if_filt, tmp_ptr, que);
297 break;
298 }
299 }
300 else
301 TAILQ_INSERT_TAIL(fhead, tmp_ptr, que);
302
303 if (*filter_id = get_new_filter_id()) {
304 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
305 dlil_filters[*filter_id].head = (struct dlil_filterq_head *) &ifp->if_flt_head;
306 dlil_filters[*filter_id].type = DLIL_IF_FILTER;
307 dlil_filters[*filter_id].ifp = ifp;
308 tmp_ptr->filter_id = *filter_id;
309 tmp_ptr->type = DLIL_IF_FILTER;
310 retval = 0;
311 }
312 else {
313 kprintf("dlil_attach_interface_filter - can't alloc filter_id\n");
314 TAILQ_REMOVE(fhead, tmp_ptr, que);
315 FREE(tmp_ptr, M_NKE);
316 retval = ENOMEM;
317 }
318
319 splx(s);
320 thread_funnel_set(network_flock, funnel_state);
321 return retval;
322 }
323
324
325 int dlil_attach_protocol_filter(u_long dl_tag,
326 struct dlil_pr_flt_str *pr_filter,
327 u_long *filter_id,
328 int insertion_point)
329 {
330 struct dlil_filterq_entry *tmp_ptr;
331 struct dlil_filterq_entry *pr_filt;
332 int s;
333 int retval;
334 boolean_t funnel_state;
335
336 if (dl_tag > MAX_DL_TAGS)
337 return ERANGE;
338
339 if (dl_tag_array[dl_tag].ifp == 0)
340 return ENOENT;
341
342 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
343 bcopy((caddr_t) pr_filter, (caddr_t) &tmp_ptr->variants.pr_filter,
344 sizeof(struct dlil_pr_flt_str));
345
346 funnel_state = thread_funnel_set(network_flock, TRUE);
347
348 s = splnet();
349 if (insertion_point != DLIL_LAST_FILTER) {
350 TAILQ_FOREACH(pr_filt, dl_tag_array[dl_tag].pr_flt_head, que)
351 if (insertion_point == pr_filt->filter_id) {
352 TAILQ_INSERT_BEFORE(pr_filt, tmp_ptr, que);
353 break;
354 }
355 }
356 else
357 TAILQ_INSERT_TAIL(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que);
358
359
360 if (*filter_id = get_new_filter_id()) {
361 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
362 dlil_filters[*filter_id].head = dl_tag_array[dl_tag].pr_flt_head;
363 dlil_filters[*filter_id].type = DLIL_PR_FILTER;
364 dlil_filters[*filter_id].proto = dl_tag_array[dl_tag].proto;
365 dlil_filters[*filter_id].ifp = dl_tag_array[dl_tag].ifp;
366 tmp_ptr->filter_id = *filter_id;
367 tmp_ptr->type = DLIL_PR_FILTER;
368 retval = 0;
369 }
370 else {
371 kprintf("dlil_attach_protocol_filter - can't alloc filter_id\n");
372 TAILQ_REMOVE(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que);
373 FREE(tmp_ptr, M_NKE);
374 retval = ENOMEM;
375 }
376
377 splx(s);
378 thread_funnel_set(network_flock, funnel_state);
379 return retval;
380 }
381
382
383 int
384 dlil_detach_filter(u_long filter_id)
385 {
386 struct dlil_filter_id_str *flt;
387 int s;
388 boolean_t funnel_state;
389
390 if (filter_id > MAX_DLIL_FILTERS) {
391 kprintf("dlil_detach_filter - Bad filter_id value %d\n", filter_id);
392 return ERANGE;
393 }
394
395 funnel_state = thread_funnel_set(network_flock, TRUE);
396 s = splnet();
397 flt = &dlil_filters[filter_id];
398 if (flt->type == 0) {
399 kprintf("dlil_detach_filter - no such filter_id %d\n", filter_id);
400 thread_funnel_set(network_flock, funnel_state);
401 return ENOENT;
402 }
403
404
405 if (flt->type == DLIL_IF_FILTER) {
406 if (IFILT(flt->filter_ptr).filter_detach)
407 (*IFILT(flt->filter_ptr).filter_detach)(IFILT(flt->filter_ptr).cookie);
408 }
409 else {
410 if (flt->type == DLIL_PR_FILTER) {
411 if (PFILT(flt->filter_ptr).filter_detach)
412 (*PFILT(flt->filter_ptr).filter_detach)(PFILT(flt->filter_ptr).cookie);
413 }
414 }
415
416 TAILQ_REMOVE(flt->head, flt->filter_ptr, que);
417 FREE(flt->filter_ptr, M_NKE);
418 flt->type = 0;
419 splx(s);
420 thread_funnel_set(network_flock, funnel_state);
421 return 0;
422 }
423
424
425 void
426 dlil_input_thread_continue(void)
427 {
428 while (1) {
429 struct mbuf *m, *m_loop;
430 int expand_mcl;
431
432 simple_lock(&dlil_input_lock);
433 m = dlil_input_mbuf_head;
434 dlil_input_mbuf_head = NULL;
435 dlil_input_mbuf_tail = NULL;
436 m_loop = dlil_input_loop_head;
437 dlil_input_loop_head = NULL;
438 dlil_input_loop_tail = NULL;
439 simple_unlock(&dlil_input_lock);
440
441 MBUF_LOCK();
442 expand_mcl = dlil_expand_mcl;
443 dlil_expand_mcl = 0;
444 MBUF_UNLOCK();
445 if (expand_mcl) {
446 caddr_t p;
447 MCLALLOC(p, M_WAIT);
448 MCLFREE(p);
449 }
450
451 /*
452 * NOTE warning %%% attention !!!!
453 * We should think about putting some thread starvation safeguards if
454 * we deal with long chains of packets.
455 */
456 while (m) {
457 struct mbuf *m0 = m->m_nextpkt;
458 void *header = m->m_pkthdr.header;
459
460 m->m_nextpkt = NULL;
461 m->m_pkthdr.header = NULL;
462 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
463 m = m0;
464 }
465 m = m_loop;
466 while (m) {
467 struct mbuf *m0 = m->m_nextpkt;
468 void *header = m->m_pkthdr.header;
469 struct ifnet *ifp = (struct ifnet *) m->m_pkthdr.aux;
470
471 m->m_nextpkt = NULL;
472 m->m_pkthdr.header = NULL;
473 m->m_pkthdr.aux = NULL;
474 (void) dlil_input_packet(ifp, m, header);
475 m = m0;
476 }
477
478 if (netisr != 0)
479 run_netisr();
480
481 if (dlil_input_mbuf_head == NULL &&
482 dlil_input_loop_head == NULL &&
483 netisr == 0) {
484 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
485 #if defined (__i386__)
486 thread_block(0);
487 #else
488 thread_block(dlil_input_thread_continue);
489 #endif
490 /* NOTREACHED */
491 }
492 }
493 }
494
495 void dlil_input_thread(void)
496 {
497 register thread_t self = current_thread();
498 extern void stack_privilege(thread_t thread);
499
500 printf("dlil_input_thread %x\n", self);
501
502 /*
503 * Make sure that this thread
504 * always has a kernel stack, and
505 * bind it to the master cpu.
506 */
507 stack_privilege(self);
508
509 /* The dlil thread is always funneled */
510 thread_funnel_set(network_flock, TRUE);
511 dlil_initialized = 1;
512 dlil_input_thread_continue();
513 }
514
515 int
516 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
517 {
518 /* WARNING
519 * Because of loopbacked multicast we cannot stuff the ifp in
520 * the rcvif of the packet header: loopback has its own dlil
521 * input queue
522 */
523
524 simple_lock(&dlil_input_lock);
525 if (ifp->if_type != IFT_LOOP) {
526 if (dlil_input_mbuf_head == NULL)
527 dlil_input_mbuf_head = m_head;
528 else if (dlil_input_mbuf_tail != NULL)
529 dlil_input_mbuf_tail->m_nextpkt = m_head;
530 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
531 } else {
532 if (dlil_input_loop_head == NULL)
533 dlil_input_loop_head = m_head;
534 else if (dlil_input_loop_tail != NULL)
535 dlil_input_loop_tail->m_nextpkt = m_head;
536 dlil_input_loop_tail = m_tail ? m_tail : m_head;
537 }
538 simple_unlock(&dlil_input_lock);
539
540 wakeup((caddr_t)&dlil_input_thread_wakeup);
541
542 return 0;
543 }
544
545 int
546 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
547 char *frame_header)
548 {
549 struct ifnet *orig_ifp = 0;
550 struct dlil_filterq_entry *tmp;
551 int retval;
552 struct if_proto *ifproto = 0;
553 struct if_proto *proto;
554 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
555
556
557 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
558
559 /*
560 * Run interface filters
561 */
562
563 while (orig_ifp != ifp) {
564 orig_ifp = ifp;
565
566 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
567 if (IFILT(tmp).filter_if_input) {
568 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
569 &ifp,
570 &m,
571 &frame_header);
572 if (retval) {
573 if (retval == EJUSTRETURN)
574 return 0;
575 else {
576 m_freem(m);
577 return retval;
578 }
579 }
580 }
581
582 if (ifp != orig_ifp)
583 break;
584 }
585 }
586
587 ifp->if_lastchange = time;
588
589 /*
590 * Call family demux module. If the demux module finds a match
591 * for the frame it will fill-in the ifproto pointer.
592 */
593
594 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
595
596 if (m->m_flags & (M_BCAST|M_MCAST))
597 ifp->if_imcasts++;
598
599 if ((retval) && (retval != EJUSTRETURN) && (ifp->offercnt)) {
600 /*
601 * No match was found, look for any offers.
602 */
603 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
604 TAILQ_FOREACH(proto, tmp, next) {
605 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
606 ifproto = proto;
607 retval = 0;
608 break;
609 }
610 }
611 }
612
613 if (retval) {
614 if (retval != EJUSTRETURN) {
615 m_freem(m);
616 return retval;
617 }
618 else
619 return 0;
620 }
621 else
622 if (ifproto == 0) {
623 printf("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
624 m_freem(m);
625 return 0;
626 }
627
628 /*
629 * Call any attached protocol filters.
630 */
631
632 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
633 if (PFILT(tmp).filter_dl_input) {
634 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
635 &m,
636 &frame_header,
637 &ifp);
638
639 if (retval) {
640 if (retval == EJUSTRETURN)
641 return 0;
642 else {
643 m_freem(m);
644 return retval;
645 }
646 }
647 }
648 }
649
650
651
652 retval = (*ifproto->dl_input)(m, frame_header,
653 ifp, ifproto->dl_tag,
654 TRUE);
655
656 if (retval == EJUSTRETURN)
657 retval = 0;
658 else
659 if (retval)
660 m_freem(m);
661
662 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
663 return retval;
664 }
665
666
667
668 void ether_input(ifp, eh, m)
669 struct ifnet *ifp;
670 struct ether_header *eh;
671 struct mbuf *m;
672
673 {
674 kprintf("Someone is calling ether_input!!\n");
675
676 dlil_input(ifp, m, NULL);
677 }
678
679
680 int
681 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
682 {
683 struct dlil_filterq_entry *filt;
684 int retval = 0;
685 struct ifnet *orig_ifp = 0;
686 struct if_proto *proto;
687 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
688 struct kev_msg kev_msg;
689 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
690 boolean_t funnel_state;
691
692
693 funnel_state = thread_funnel_set(network_flock, TRUE);
694
695 while (orig_ifp != ifp) {
696 orig_ifp = ifp;
697
698 TAILQ_FOREACH_REVERSE(filt, fhead, que, dlil_filterq_head) {
699 if (IFILT(filt).filter_if_event) {
700 retval = (*IFILT(filt).filter_if_event)(IFILT(filt).cookie,
701 &ifp,
702 &event);
703
704 if (retval) {
705 (void) thread_funnel_set(network_flock, funnel_state);
706 if (retval == EJUSTRETURN)
707 return 0;
708 else
709 return retval;
710 }
711 }
712
713 if (ifp != orig_ifp)
714 break;
715 }
716 }
717
718
719 /*
720 * Call Interface Module event hook, if any.
721 */
722
723 if (ifp->if_event) {
724 retval = ifp->if_event(ifp, (caddr_t) event);
725
726 if (retval) {
727 (void) thread_funnel_set(network_flock, funnel_state);
728
729 if (retval == EJUSTRETURN)
730 return 0;
731 else
732 return retval;
733 }
734 }
735
736 /*
737 * Call dl_event entry point for all protocols attached to this interface
738 */
739
740 TAILQ_FOREACH(proto, tmp, next) {
741 /*
742 * Call any attached protocol filters.
743 */
744
745 TAILQ_FOREACH_REVERSE(filt, &proto->pr_flt_head, que, dlil_filterq_head) {
746 if (PFILT(filt).filter_dl_event) {
747 retval = (*PFILT(filt).filter_dl_event)(PFILT(filt).cookie,
748 event);
749
750 if (retval) {
751 (void) thread_funnel_set(network_flock, funnel_state);
752 if (retval == EJUSTRETURN)
753 return 0;
754 else
755 return retval;
756 }
757 }
758 }
759
760
761 /*
762 * Finally, call the dl_event entry point (if any)
763 */
764
765 if (proto->dl_event)
766 retval = (*proto->dl_event)(event, proto->dl_tag);
767
768 if (retval == EJUSTRETURN) {
769 (void) thread_funnel_set(network_flock, funnel_state);
770 return 0;
771 }
772 }
773
774
775 /*
776 * Now, post this event to the Kernel Event message queue
777 */
778
779 kev_msg.vendor_code = event->vendor_code;
780 kev_msg.kev_class = event->kev_class;
781 kev_msg.kev_subclass = event->kev_subclass;
782 kev_msg.event_code = event->event_code;
783 kev_msg.dv[0].data_ptr = &event->event_data[0];
784 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
785 kev_msg.dv[1].data_length = 0;
786
787 kev_post_msg(&kev_msg);
788
789 (void) thread_funnel_set(network_flock, funnel_state);
790 return 0;
791 }
792
793
794
795 int
796 dlil_output(u_long dl_tag,
797 struct mbuf *m,
798 caddr_t route,
799 struct sockaddr *dest,
800 int raw
801 )
802 {
803 char *frame_type;
804 char *dst_linkaddr;
805 struct ifnet *orig_ifp = 0;
806 struct ifnet *ifp;
807 struct if_proto *proto;
808 struct dlil_filterq_entry *tmp;
809 int retval = 0;
810 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
811 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
812 struct dlil_filterq_head *fhead;
813
814
815 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
816
817 /*
818 * Temporary hackery until all the existing protocols can become fully
819 * "dl_tag aware". Some things only have the ifp, so this handles that
820 * case for the time being.
821 */
822
823 if (dl_tag > MAX_DL_TAGS) {
824 /* dl_tag is really an ifnet pointer! */
825
826 ifp = (struct ifnet *) dl_tag;
827 dl_tag = ifp->if_data.default_proto;
828 if (dl_tag)
829 proto = dl_tag_array[dl_tag].proto;
830 else
831 retval = ENOENT;
832 }
833 else {
834 if ((dl_tag == 0) || (dl_tag_array[dl_tag].ifp == 0))
835 retval = ENOENT;
836 else {
837 ifp = dl_tag_array[dl_tag].ifp;
838 proto = dl_tag_array[dl_tag].proto;
839 }
840 }
841
842 if (retval) {
843 m_freem(m);
844 return retval;
845 }
846
847 frame_type = frame_type_buffer;
848 dst_linkaddr = dst_linkaddr_buffer;
849
850 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
851
852 if ((raw == 0) && (proto->dl_pre_output)) {
853 retval = (*proto->dl_pre_output)(ifp, &m, dest, route,
854 frame_type, dst_linkaddr, dl_tag);
855 if (retval) {
856 if (retval == EJUSTRETURN)
857 return 0;
858 else {
859 m_freem(m);
860 return retval;
861 }
862 }
863 }
864
865 /*
866 * Run any attached protocol filters.
867 */
868
869 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
870 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
871 if (PFILT(tmp).filter_dl_output) {
872 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
873 &m, &ifp, &dest, dst_linkaddr, frame_type);
874 if (retval) {
875 if (retval == EJUSTRETURN)
876 return 0;
877 else {
878 m_freem(m);
879 return retval;
880 }
881 }
882 }
883 }
884 }
885
886
887 /*
888 * Call framing module
889 */
890 if ((raw == 0) && (ifp->if_framer)) {
891 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
892 if (retval) {
893 if (retval == EJUSTRETURN)
894 return 0;
895 else
896 {
897 m_freem(m);
898 return retval;
899 }
900 }
901 }
902
903 #if BRIDGE
904 if (do_bridge) {
905 struct mbuf *m0 = m ;
906
907 if (m->m_pkthdr.rcvif)
908 m->m_pkthdr.rcvif = NULL ;
909 ifp = bridge_dst_lookup(m);
910 bdg_forward(&m0, ifp);
911 if (m0)
912 m_freem(m0);
913
914 return 0;
915 }
916 #endif
917
918
919 /*
920 * Let interface filters (if any) do their thing ...
921 */
922
923 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
924 if (TAILQ_EMPTY(fhead) == 0) {
925 while (orig_ifp != ifp) {
926 orig_ifp = ifp;
927 TAILQ_FOREACH(tmp, fhead, que) {
928 if (IFILT(tmp).filter_if_output) {
929 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
930 &ifp,
931 &m);
932 if (retval) {
933 if (retval == EJUSTRETURN)
934 return 0;
935 else {
936 m_freem(m);
937 return retval;
938 }
939 }
940
941 }
942
943 if (ifp != orig_ifp)
944 break;
945 }
946 }
947 }
948
949 /*
950 * Finally, call the driver.
951 */
952
953 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
954 retval = (*ifp->if_output)(ifp, m);
955 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
956
957 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
958
959 if ((retval == 0) || (retval == EJUSTRETURN))
960 return 0;
961 else
962 return retval;
963 }
964
965
966 int
967 dlil_ioctl(u_long proto_fam,
968 struct ifnet *ifp,
969 u_long ioctl_code,
970 caddr_t ioctl_arg)
971 {
972 struct dlil_filterq_entry *tmp;
973 struct dlil_filterq_head *fhead;
974 int retval = EOPNOTSUPP;
975 int retval2 = EOPNOTSUPP;
976 u_long dl_tag;
977 struct if_family_str *if_family;
978
979
980 if (proto_fam) {
981 retval = dlil_find_dltag(ifp->if_family, ifp->if_unit,
982 proto_fam, &dl_tag);
983
984 if (retval == 0) {
985 if (dl_tag_array[dl_tag].ifp != ifp)
986 return ENOENT;
987
988 /*
989 * Run any attached protocol filters.
990 */
991 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
992 if (PFILT(tmp).filter_dl_ioctl) {
993 retval =
994 (*PFILT(tmp).filter_dl_ioctl)(PFILT(tmp).cookie,
995 dl_tag_array[dl_tag].ifp,
996 ioctl_code,
997 ioctl_arg);
998
999 if (retval) {
1000 if (retval == EJUSTRETURN)
1001 return 0;
1002 else
1003 return retval;
1004 }
1005 }
1006 }
1007
1008 if (dl_tag_array[dl_tag].proto->dl_ioctl)
1009 retval =
1010 (*dl_tag_array[dl_tag].proto->dl_ioctl)(dl_tag,
1011 dl_tag_array[dl_tag].ifp,
1012 ioctl_code,
1013 ioctl_arg);
1014 else
1015 retval = EOPNOTSUPP;
1016 }
1017 else
1018 retval = 0;
1019 }
1020
1021 if ((retval) && (retval != EOPNOTSUPP)) {
1022 if (retval == EJUSTRETURN)
1023 return 0;
1024 else
1025 return retval;
1026 }
1027
1028
1029 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1030 TAILQ_FOREACH(tmp, fhead, que) {
1031 if (IFILT(tmp).filter_if_ioctl) {
1032 retval2 = (*IFILT(tmp).filter_if_ioctl)(IFILT(tmp).cookie, ifp,
1033 ioctl_code, ioctl_arg);
1034 if (retval2) {
1035 if (retval2 == EJUSTRETURN)
1036 return 0;
1037 else
1038 return retval2;
1039 }
1040 }
1041 }
1042
1043
1044 if_family = find_family_module(ifp->if_family);
1045 if ((if_family) && (if_family->ifmod_ioctl)) {
1046 retval2 = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1047
1048 if ((retval2) && (retval2 != EOPNOTSUPP)) {
1049 if (retval2 == EJUSTRETURN)
1050 return 0;
1051 else
1052 return retval;
1053 }
1054
1055 if (retval == EOPNOTSUPP)
1056 retval = retval2;
1057 }
1058
1059 if (ifp->if_ioctl)
1060 retval2 = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1061
1062 if (retval == EOPNOTSUPP)
1063 return retval2;
1064 else {
1065 if (retval2 == EOPNOTSUPP)
1066 return 0;
1067 else
1068 return retval2;
1069 }
1070 }
1071
1072
1073 int
1074 dlil_attach_protocol(struct dlil_proto_reg_str *proto,
1075 u_long *dl_tag)
1076 {
1077 struct ifnet *ifp;
1078 struct if_proto *ifproto;
1079 u_long i;
1080 struct if_family_str *if_family;
1081 int error;
1082 struct dlil_proto_head *tmp;
1083 int s;
1084 boolean_t funnel_state;
1085
1086
1087 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1088 return EINVAL;
1089
1090 funnel_state = thread_funnel_set(network_flock, TRUE);
1091 s = splnet();
1092 if_family = find_family_module(proto->interface_family);
1093 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1094 kprintf("dlil_attach_protocol -- no interface family module %d",
1095 proto->interface_family);
1096 splx(s);
1097 thread_funnel_set(network_flock, funnel_state);
1098 return ENOENT;
1099 }
1100
1101 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1102 if (!ifp) {
1103 kprintf("dlil_attach_protocol -- no such interface %d unit %d\n",
1104 proto->interface_family, proto->unit_number);
1105 splx(s);
1106 thread_funnel_set(network_flock, funnel_state);
1107 return ENOENT;
1108 }
1109
1110 if (dlil_find_dltag(proto->interface_family, proto->unit_number,
1111 proto->protocol_family, &i) == 0) {
1112 thread_funnel_set(network_flock, funnel_state);
1113 return EEXIST;
1114 }
1115
1116 for (i=1; i < MAX_DL_TAGS; i++)
1117 if (dl_tag_array[i].ifp == 0)
1118 break;
1119
1120 if (i >= MAX_DL_TAGS) {
1121 splx(s);
1122 thread_funnel_set(network_flock, funnel_state);
1123 return ENOMEM;
1124 }
1125
1126 /*
1127 * Allocate and init a new if_proto structure
1128 */
1129
1130 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1131 if (!ifproto) {
1132 printf("ERROR - DLIL failed if_proto allocation\n");
1133 thread_funnel_set(network_flock, funnel_state);
1134 return ENOMEM;
1135 }
1136
1137 bzero(ifproto, sizeof(struct if_proto));
1138
1139 dl_tag_array[i].ifp = ifp;
1140 dl_tag_array[i].proto = ifproto;
1141 dl_tag_array[i].pr_flt_head = &ifproto->pr_flt_head;
1142 ifproto->dl_tag = i;
1143 *dl_tag = i;
1144
1145 if (proto->default_proto) {
1146 if (ifp->if_data.default_proto == 0)
1147 ifp->if_data.default_proto = i;
1148 else
1149 printf("ERROR - dlil_attach_protocol -- Attempt to attach more than one default protocol\n");
1150 }
1151
1152 ifproto->protocol_family = proto->protocol_family;
1153 ifproto->dl_input = proto->input;
1154 ifproto->dl_pre_output = proto->pre_output;
1155 ifproto->dl_event = proto->event;
1156 ifproto->dl_offer = proto->offer;
1157 ifproto->dl_ioctl = proto->ioctl;
1158 ifproto->ifp = ifp;
1159 TAILQ_INIT(&ifproto->pr_flt_head);
1160
1161 /*
1162 * Call family module add_proto routine so it can refine the
1163 * demux descriptors as it wishes.
1164 */
1165 error = (*if_family->add_proto)(&proto->demux_desc_head, ifproto, *dl_tag);
1166 if (error) {
1167 dl_tag_array[*dl_tag].ifp = 0;
1168 FREE(ifproto, M_IFADDR);
1169 splx(s);
1170 thread_funnel_set(network_flock, funnel_state);
1171 return error;
1172 }
1173
1174
1175 /*
1176 * Add to if_proto list for this interface
1177 */
1178
1179 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1180 TAILQ_INSERT_TAIL(tmp, ifproto, next);
1181 ifp->refcnt++;
1182 if (ifproto->dl_offer)
1183 ifp->offercnt++;
1184
1185 splx(s);
1186 thread_funnel_set(network_flock, funnel_state);
1187 return 0;
1188 }
1189
1190
1191
1192 int
1193 dlil_detach_protocol(u_long dl_tag)
1194 {
1195 struct ifnet *ifp;
1196 struct ifnet *orig_ifp=0;
1197 struct if_proto *proto;
1198 struct dlil_proto_head *tmp;
1199 struct if_family_str *if_family;
1200 struct dlil_filterq_entry *filter;
1201 int s, retval;
1202 struct dlil_filterq_head *fhead;
1203 struct kev_msg ev_msg;
1204 struct net_event_data ev_data;
1205 boolean_t funnel_state;
1206
1207
1208 if (dl_tag > MAX_DL_TAGS)
1209 return ERANGE;
1210
1211 funnel_state = thread_funnel_set(network_flock, TRUE);
1212
1213 s = splnet();
1214 if (dl_tag_array[dl_tag].ifp == 0) {
1215 splx(s);
1216 thread_funnel_set(network_flock, funnel_state);
1217 return ENOENT;
1218 }
1219
1220 ifp = dl_tag_array[dl_tag].ifp;
1221 proto = dl_tag_array[dl_tag].proto;
1222
1223 if_family = find_family_module(ifp->if_family);
1224 if (if_family == NULL) {
1225 splx(s);
1226 thread_funnel_set(network_flock, funnel_state);
1227 return ENOENT;
1228 }
1229
1230 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1231
1232 /*
1233 * Call family module del_proto
1234 */
1235
1236 (*if_family->del_proto)(proto, dl_tag);
1237
1238
1239 /*
1240 * Remove and deallocate any attached protocol filters
1241 */
1242
1243 while (filter = TAILQ_FIRST(&proto->pr_flt_head))
1244 dlil_detach_filter(filter->filter_id);
1245
1246 if (proto->dl_offer)
1247 ifp->offercnt--;
1248
1249 dl_tag_array[dl_tag].ifp = 0;
1250
1251 TAILQ_REMOVE(tmp, proto, next);
1252 FREE(proto, M_IFADDR);
1253
1254 if (--ifp->refcnt == 0) {
1255 if (ifp->if_flags & IFF_UP)
1256 printf("WARNING - dlil_detach_protocol - ifp refcnt 0, but IF still up\n");
1257
1258 TAILQ_REMOVE(&ifnet, ifp, if_link);
1259
1260 (*if_family->del_if)(ifp);
1261
1262 if (--if_family->refcnt == 0) {
1263 if (if_family->shutdown)
1264 (*if_family->shutdown)();
1265
1266 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1267 FREE(if_family, M_IFADDR);
1268 }
1269
1270 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1271 while (orig_ifp != ifp) {
1272 orig_ifp = ifp;
1273
1274 TAILQ_FOREACH(filter, fhead, que) {
1275 if (IFILT(filter).filter_if_free) {
1276 retval = (*IFILT(filter).filter_if_free)(IFILT(filter).cookie, ifp);
1277 if (retval) {
1278 splx(s);
1279 thread_funnel_set(network_flock, funnel_state);
1280 return 0;
1281 }
1282 }
1283 if (ifp != orig_ifp)
1284 break;
1285 }
1286 }
1287
1288 (*ifp->if_free)(ifp);
1289
1290 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1291 ev_msg.kev_class = KEV_NETWORK_CLASS;
1292 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1293
1294 ev_msg.event_code = KEV_DL_IF_DETACHED;
1295 strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1296 ev_data.if_family = ifp->if_family;
1297 ev_data.if_unit = (unsigned long) ifp->if_unit;
1298
1299 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1300 ev_msg.dv[0].data_ptr = &ev_data;
1301 ev_msg.dv[1].data_length = 0;
1302
1303 kev_post_msg(&ev_msg);
1304 }
1305
1306 splx(s);
1307 thread_funnel_set(network_flock, funnel_state);
1308 return 0;
1309 }
1310
1311
1312
1313
1314
1315 int
1316 dlil_if_attach(struct ifnet *ifp)
1317 {
1318 u_long interface_family = ifp->if_family;
1319 struct if_family_str *if_family;
1320 struct dlil_proto_head *tmp;
1321 int stat;
1322 int s;
1323 struct kev_msg ev_msg;
1324 struct net_event_data ev_data;
1325 boolean_t funnel_state;
1326
1327 funnel_state = thread_funnel_set(network_flock, TRUE);
1328 s = splnet();
1329 if (ifnet_inited == 0) {
1330 TAILQ_INIT(&ifnet);
1331 ifnet_inited = 1;
1332 }
1333
1334 if_family = find_family_module(interface_family);
1335
1336 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1337 splx(s);
1338 kprintf("Attempt to attach interface without family module - %d\n",
1339 interface_family);
1340 thread_funnel_set(network_flock, funnel_state);
1341 return ENODEV;
1342 }
1343
1344
1345 /*
1346 * Call the family module to fill in the appropriate fields in the
1347 * ifnet structure.
1348 */
1349
1350 stat = (*if_family->add_if)(ifp);
1351 if (stat) {
1352 splx(s);
1353 kprintf("dlil_if_attach -- add_if failed with %d\n", stat);
1354 thread_funnel_set(network_flock, funnel_state);
1355 return stat;
1356 }
1357
1358 /*
1359 * Add the ifp to the interface list.
1360 */
1361
1362 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1363 TAILQ_INIT(tmp);
1364
1365 ifp->if_data.default_proto = 0;
1366 ifp->refcnt = 1;
1367 ifp->offercnt = 0;
1368 TAILQ_INIT(&ifp->if_flt_head);
1369 old_if_attach(ifp);
1370 if_family->refcnt++;
1371
1372 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1373 ev_msg.kev_class = KEV_NETWORK_CLASS;
1374 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1375
1376 ev_msg.event_code = KEV_DL_IF_ATTACHED;
1377 strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1378 ev_data.if_family = ifp->if_family;
1379 ev_data.if_unit = (unsigned long) ifp->if_unit;
1380
1381 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1382 ev_msg.dv[0].data_ptr = &ev_data;
1383
1384 ev_msg.dv[1].data_length = 0;
1385
1386 kev_post_msg(&ev_msg);
1387
1388 splx(s);
1389 thread_funnel_set(network_flock, funnel_state);
1390 return 0;
1391 }
1392
1393
1394 int
1395 dlil_if_detach(struct ifnet *ifp)
1396 {
1397 struct if_proto *proto;
1398 struct dlil_filterq_entry *if_filter;
1399 struct if_family_str *if_family;
1400 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1401 int s;
1402 struct kev_msg ev_msg;
1403 struct net_event_data ev_data;
1404 boolean_t funnel_state;
1405
1406 funnel_state = thread_funnel_set(network_flock, TRUE);
1407 s = splnet();
1408 if (ifp->if_flags & IFF_UP)
1409 printf("WARNING - dlil_if_detach called for UP interface\n");
1410
1411 if_family = find_family_module(ifp->if_family);
1412
1413 if (!if_family) {
1414 kprintf("Attempt to detach interface without family module - %s\n",
1415 ifp->if_name);
1416 splx(s);
1417 thread_funnel_set(network_flock, funnel_state);
1418 return ENODEV;
1419 }
1420
1421 while (if_filter = TAILQ_FIRST(fhead))
1422 dlil_detach_filter(if_filter->filter_id);
1423
1424 if (--ifp->refcnt == 0) {
1425 TAILQ_REMOVE(&ifnet, ifp, if_link);
1426
1427 (*if_family->del_if)(ifp);
1428
1429 if (--if_family->refcnt == 0) {
1430 if (if_family->shutdown)
1431 (*if_family->shutdown)();
1432
1433 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1434 FREE(if_family, M_IFADDR);
1435 }
1436
1437 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1438 ev_msg.kev_class = KEV_NETWORK_CLASS;
1439 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1440
1441 ev_msg.event_code = KEV_DL_IF_DETACHED;
1442 strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1443 ev_data.if_family = ifp->if_family;
1444 ev_data.if_unit = (unsigned long) ifp->if_unit;
1445
1446 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1447 ev_msg.dv[0].data_ptr = &ev_data;
1448
1449 ev_msg.dv[1].data_length = 0;
1450 kev_post_msg(&ev_msg);
1451 splx(s);
1452 thread_funnel_set(network_flock, funnel_state);
1453 return 0;
1454 }
1455 else
1456 {
1457 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1458 ev_msg.kev_class = KEV_NETWORK_CLASS;
1459 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1460
1461 ev_msg.event_code = KEV_DL_IF_DETACHING;
1462 strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
1463 ev_data.if_family = ifp->if_family;
1464 ev_data.if_unit = (unsigned long) ifp->if_unit;
1465 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1466 ev_msg.dv[0].data_ptr = &ev_data;
1467 ev_msg.dv[1].data_length = 0;
1468
1469 kev_post_msg(&ev_msg);
1470
1471 splx(s);
1472 thread_funnel_set(network_flock, funnel_state);
1473 return DLIL_WAIT_FOR_FREE;
1474 }
1475 }
1476
1477
1478 int
1479 dlil_reg_if_modules(u_long interface_family,
1480 struct dlil_ifmod_reg_str *ifmod)
1481 {
1482 struct if_family_str *if_family;
1483 int s;
1484 boolean_t funnel_state;
1485
1486
1487 funnel_state = thread_funnel_set(network_flock, TRUE);
1488 s = splnet();
1489 if (find_family_module(interface_family)) {
1490 kprintf("Attempt to register dlil family module more than once - %d\n",
1491 interface_family);
1492 splx(s);
1493 thread_funnel_set(network_flock, funnel_state);
1494 return EEXIST;
1495 }
1496
1497 if ((!ifmod->add_if) || (!ifmod->del_if) ||
1498 (!ifmod->add_proto) || (!ifmod->del_proto)) {
1499 kprintf("dlil_reg_if_modules passed at least one null pointer\n");
1500 splx(s);
1501 thread_funnel_set(network_flock, funnel_state);
1502 return EINVAL;
1503 }
1504
1505 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
1506 if (!if_family) {
1507 kprintf("dlil_reg_if_modules failed allocation\n");
1508 splx(s);
1509 thread_funnel_set(network_flock, funnel_state);
1510 return ENOMEM;
1511 }
1512
1513 bzero(if_family, sizeof(struct if_family_str));
1514
1515 if_family->if_family = interface_family & 0xffff;
1516 if_family->shutdown = ifmod->shutdown;
1517 if_family->add_if = ifmod->add_if;
1518 if_family->del_if = ifmod->del_if;
1519 if_family->add_proto = ifmod->add_proto;
1520 if_family->del_proto = ifmod->del_proto;
1521 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
1522 if_family->refcnt = 1;
1523 if_family->flags = 0;
1524
1525 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
1526 splx(s);
1527 thread_funnel_set(network_flock, funnel_state);
1528 return 0;
1529 }
1530
1531 int dlil_dereg_if_modules(u_long interface_family)
1532 {
1533 struct if_family_str *if_family;
1534 int s;
1535 boolean_t funnel_state;
1536
1537 funnel_state = thread_funnel_set(network_flock, TRUE);
1538 s = splnet();
1539 if_family = find_family_module(interface_family);
1540 if (if_family == 0) {
1541 splx(s);
1542 thread_funnel_set(network_flock, funnel_state);
1543 return ENOENT;
1544 }
1545
1546 if (--if_family->refcnt == 0) {
1547 if (if_family->shutdown)
1548 (*if_family->shutdown)();
1549
1550 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1551 FREE(if_family, M_IFADDR);
1552 }
1553 else
1554 if_family->flags |= DLIL_SHUTDOWN;
1555
1556 splx(s);
1557 thread_funnel_set(network_flock, funnel_state);
1558 return 0;
1559 }
1560
1561
1562
1563
1564
1565 /*
1566 * Old if_attach no-op'ed function defined here for temporary backwards compatibility
1567 */
1568
1569 void if_attach(ifp)
1570 struct ifnet *ifp;
1571 {
1572 dlil_if_attach(ifp);
1573 }
1574
1575
1576
1577 int
1578 dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id)
1579 {
1580 struct ifnet *orig_ifp = 0;
1581 struct ifnet *ifp;
1582 struct if_proto *ifproto;
1583 struct if_proto *proto;
1584 struct dlil_filterq_entry *tmp;
1585 int retval = 0;
1586 struct dlil_filterq_head *fhead;
1587 int match_found;
1588
1589
1590 dlil_stats.inject_if_in1++;
1591 if (from_id > MAX_DLIL_FILTERS)
1592 return ERANGE;
1593
1594 if (dlil_filters[from_id].type != DLIL_IF_FILTER)
1595 return ENOENT;
1596
1597 ifp = dlil_filters[from_id].ifp;
1598
1599 /*
1600 * Let interface filters (if any) do their thing ...
1601 */
1602
1603 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1604 match_found = 0;
1605
1606 if (TAILQ_EMPTY(fhead) == 0) {
1607 while (orig_ifp != ifp) {
1608 orig_ifp = ifp;
1609 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
1610 if ((match_found) && (IFILT(tmp).filter_if_input)) {
1611 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
1612 &ifp,
1613 &m,
1614 &frame_header);
1615 if (retval) {
1616 if (retval == EJUSTRETURN)
1617 return 0;
1618 else {
1619 m_freem(m);
1620 return retval;
1621 }
1622 }
1623
1624 }
1625
1626 if (ifp != orig_ifp)
1627 break;
1628
1629 if (from_id == tmp->filter_id)
1630 match_found = 1;
1631 }
1632 }
1633 }
1634
1635 ifp->if_lastchange = time;
1636
1637 /*
1638 * Call family demux module. If the demux module finds a match
1639 * for the frame it will fill-in the ifproto pointer.
1640 */
1641
1642 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
1643
1644 if (m->m_flags & (M_BCAST|M_MCAST))
1645 ifp->if_imcasts++;
1646
1647 if ((retval) && (ifp->offercnt)) {
1648 /*
1649 * No match was found, look for any offers.
1650 */
1651 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
1652 TAILQ_FOREACH(proto, tmp, next) {
1653 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
1654 ifproto = proto;
1655 retval = 0;
1656 break;
1657 }
1658 }
1659 }
1660
1661 if (retval) {
1662 if (retval != EJUSTRETURN) {
1663 m_freem(m);
1664 return retval;
1665 }
1666 else
1667 return 0;
1668 }
1669 else
1670 if (ifproto == 0) {
1671 printf("ERROR - dlil_inject_if_input -- if_demux didn't return an if_proto pointer\n");
1672 m_freem(m);
1673 return 0;
1674 }
1675
1676 /*
1677 * Call any attached protocol filters.
1678 */
1679 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1680 if (PFILT(tmp).filter_dl_input) {
1681 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1682 &m,
1683 &frame_header,
1684 &ifp);
1685
1686 if (retval) {
1687 if (retval == EJUSTRETURN)
1688 return 0;
1689 else {
1690 m_freem(m);
1691 return retval;
1692 }
1693 }
1694 }
1695 }
1696
1697
1698
1699 retval = (*ifproto->dl_input)(m, frame_header,
1700 ifp, ifproto->dl_tag,
1701 FALSE);
1702
1703 dlil_stats.inject_if_in2++;
1704 if (retval == EJUSTRETURN)
1705 retval = 0;
1706 else
1707 if (retval)
1708 m_freem(m);
1709
1710 return retval;
1711
1712 }
1713
1714
1715
1716
1717
1718 int
1719 dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id)
1720 {
1721 struct ifnet *orig_ifp = 0;
1722 struct dlil_filterq_entry *tmp;
1723 int retval;
1724 struct if_proto *ifproto = 0;
1725 int match_found;
1726 struct ifnet *ifp;
1727
1728
1729 dlil_stats.inject_pr_in1++;
1730 if (from_id > MAX_DLIL_FILTERS)
1731 return ERANGE;
1732
1733 if (dlil_filters[from_id].type != DLIL_PR_FILTER)
1734 return ENOENT;
1735
1736 ifproto = dlil_filters[from_id].proto;
1737 ifp = dlil_filters[from_id].ifp;
1738
1739
1740 /*
1741 * Call any attached protocol filters.
1742 */
1743
1744 match_found = 0;
1745 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1746 if ((match_found) && (PFILT(tmp).filter_dl_input)) {
1747 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1748 &m,
1749 &frame_header,
1750 &ifp);
1751
1752 if (retval) {
1753 if (retval == EJUSTRETURN)
1754 return 0;
1755 else {
1756 m_freem(m);
1757 return retval;
1758 }
1759 }
1760 }
1761
1762 if (tmp->filter_id == from_id)
1763 match_found = 1;
1764 }
1765
1766
1767 retval = (*ifproto->dl_input)(m, frame_header,
1768 ifp, ifproto->dl_tag,
1769 FALSE);
1770
1771 if (retval == EJUSTRETURN)
1772 retval = 0;
1773 else
1774 if (retval)
1775 m_freem(m);
1776
1777 dlil_stats.inject_pr_in2++;
1778 return retval;
1779 }
1780
1781
1782
1783 int
1784 dlil_inject_pr_output(struct mbuf *m,
1785 struct sockaddr *dest,
1786 int raw,
1787 char *frame_type,
1788 char *dst_linkaddr,
1789 u_long from_id)
1790 {
1791 struct ifnet *orig_ifp = 0;
1792 struct ifnet *ifp;
1793 struct dlil_filterq_entry *tmp;
1794 int retval = 0;
1795 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1796 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1797 struct dlil_filterq_head *fhead;
1798 int match_found;
1799 u_long dl_tag;
1800
1801
1802 dlil_stats.inject_pr_out1++;
1803 if (raw == 0) {
1804 if (frame_type)
1805 bcopy(frame_type, &frame_type_buffer[0], MAX_FRAME_TYPE_SIZE * 4);
1806 else
1807 return EINVAL;
1808
1809 if (dst_linkaddr)
1810 bcopy(dst_linkaddr, &dst_linkaddr_buffer, MAX_LINKADDR * 4);
1811 else
1812 return EINVAL;
1813 }
1814
1815 if (from_id > MAX_DLIL_FILTERS)
1816 return ERANGE;
1817
1818 if (dlil_filters[from_id].type != DLIL_PR_FILTER)
1819 return ENOENT;
1820
1821 ifp = dlil_filters[from_id].ifp;
1822 dl_tag = dlil_filters[from_id].proto->dl_tag;
1823
1824
1825 frame_type = frame_type_buffer;
1826 dst_linkaddr = dst_linkaddr_buffer;
1827
1828 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1829
1830 /*
1831 * Run any attached protocol filters.
1832 */
1833 match_found = 0;
1834
1835 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
1836 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
1837 if ((match_found) && (PFILT(tmp).filter_dl_output)) {
1838 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
1839 &m, &ifp, &dest, dst_linkaddr, frame_type);
1840 if (retval) {
1841 if (retval == EJUSTRETURN)
1842 return 0;
1843 else {
1844 m_freem(m);
1845 return retval;
1846 }
1847 }
1848 }
1849
1850 if (tmp->filter_id == from_id)
1851 match_found = 1;
1852 }
1853 }
1854
1855
1856 /*
1857 * Call framing module
1858 */
1859 if ((raw == 0) && (ifp->if_framer)) {
1860 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
1861 if (retval) {
1862 if (retval == EJUSTRETURN)
1863 return 0;
1864 else
1865 {
1866 m_freem(m);
1867 return retval;
1868 }
1869 }
1870 }
1871
1872
1873 #if BRIDGE
1874 if (do_bridge) {
1875 struct mbuf *m0 = m ;
1876
1877 if (m->m_pkthdr.rcvif)
1878 m->m_pkthdr.rcvif = NULL ;
1879 ifp = bridge_dst_lookup(m);
1880 bdg_forward(&m0, ifp);
1881 if (m0)
1882 m_freem(m0);
1883
1884 return 0;
1885 }
1886 #endif
1887
1888
1889 /*
1890 * Let interface filters (if any) do their thing ...
1891 */
1892
1893 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1894 if (TAILQ_EMPTY(fhead) == 0) {
1895 while (orig_ifp != ifp) {
1896 orig_ifp = ifp;
1897 TAILQ_FOREACH(tmp, fhead, que) {
1898 if (IFILT(tmp).filter_if_output) {
1899 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
1900 &ifp,
1901 &m);
1902 if (retval) {
1903 if (retval == EJUSTRETURN)
1904 return 0;
1905 else {
1906 m_freem(m);
1907 return retval;
1908 }
1909 }
1910
1911 }
1912
1913 if (ifp != orig_ifp)
1914 break;
1915 }
1916 }
1917 }
1918
1919 /*
1920 * Finally, call the driver.
1921 */
1922
1923 retval = (*ifp->if_output)(ifp, m);
1924 dlil_stats.inject_pr_out2++;
1925 if ((retval == 0) || (retval == EJUSTRETURN))
1926 return 0;
1927 else
1928 return retval;
1929 }
1930
1931
1932 int
1933 dlil_inject_if_output(struct mbuf *m, u_long from_id)
1934 {
1935 struct ifnet *orig_ifp = 0;
1936 struct ifnet *ifp;
1937 struct dlil_filterq_entry *tmp;
1938 int retval = 0;
1939 struct dlil_filterq_head *fhead;
1940 int match_found;
1941
1942
1943 dlil_stats.inject_if_out1++;
1944 if (from_id > MAX_DLIL_FILTERS)
1945 return ERANGE;
1946
1947 if (dlil_filters[from_id].type != DLIL_IF_FILTER)
1948 return ENOENT;
1949
1950 ifp = dlil_filters[from_id].ifp;
1951
1952 /*
1953 * Let interface filters (if any) do their thing ...
1954 */
1955
1956 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1957 match_found = 0;
1958
1959 if (TAILQ_EMPTY(fhead) == 0) {
1960 while (orig_ifp != ifp) {
1961 orig_ifp = ifp;
1962 TAILQ_FOREACH(tmp, fhead, que) {
1963 if ((match_found) && (IFILT(tmp).filter_if_output)) {
1964 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
1965 &ifp,
1966 &m);
1967 if (retval) {
1968 if (retval == EJUSTRETURN)
1969 return 0;
1970 else {
1971 m_freem(m);
1972 return retval;
1973 }
1974 }
1975
1976 }
1977
1978 if (ifp != orig_ifp)
1979 break;
1980
1981 if (from_id == tmp->filter_id)
1982 match_found = 1;
1983 }
1984 }
1985 }
1986
1987 /*
1988 * Finally, call the driver.
1989 */
1990
1991 retval = (*ifp->if_output)(ifp, m);
1992 dlil_stats.inject_if_out2++;
1993 if ((retval == 0) || (retval == EJUSTRETURN))
1994 return 0;
1995 else
1996 return retval;
1997 }