]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
88afe04012be09287418e134dea8385e5b548611
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1999 Apple Computer, Inc.
27 *
28 * Data Link Inteface Layer
29 * Author: Ted Walker
30 */
31
32
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/socket.h>
40 #include <net/if_dl.h>
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/dlil.h>
44 #include <sys/kern_event.h>
45 #include <sys/kdebug.h>
46 #include <string.h>
47
48 #include <kern/task.h>
49 #include <kern/thread.h>
50 #include <kern/sched_prim.h>
51
52 #include <net/netisr.h>
53 #include <net/if_types.h>
54
55 #include <machine/machine_routines.h>
56
57 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
58 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
59 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
60 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
61 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
62
63
64 #define MAX_DL_TAGS 16
65 #define MAX_DLIL_FILTERS 16
66 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
67 #define MAX_LINKADDR 4 /* LONGWORDS */
68 #define M_NKE M_IFADDR
69
70 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
71 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
72
73 struct dl_tag_str {
74 struct ifnet *ifp;
75 struct if_proto *proto;
76 struct dlil_filterq_head *pr_flt_head;
77 };
78
79
80 struct dlil_ifnet {
81 /* ifnet and drvr_ext are used by the stack and drivers
82 drvr_ext extends the public ifnet and must follow dl_if */
83 struct ifnet dl_if; /* public ifnet */
84 void *drvr_ext[4]; /* driver reserved (e.g arpcom extension for enet) */
85
86 /* dlil private fields */
87 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
88 /* it is not the ifnet list */
89 void *if_uniqueid; /* unique id identifying the interface */
90 size_t if_uniqueid_len;/* length of the unique id */
91 char if_namestorage[IFNAMSIZ]; /* interface name storage for detached interfaces */
92 };
93
94 struct dlil_stats_str {
95 int inject_pr_in1;
96 int inject_pr_in2;
97 int inject_pr_out1;
98 int inject_pr_out2;
99 int inject_if_in1;
100 int inject_if_in2;
101 int inject_if_out1;
102 int inject_if_out2;
103 };
104
105
106 struct dlil_filter_id_str {
107 int type;
108 struct dlil_filterq_head *head;
109 struct dlil_filterq_entry *filter_ptr;
110 struct ifnet *ifp;
111 struct if_proto *proto;
112 };
113
114
115
116 struct if_family_str {
117 TAILQ_ENTRY(if_family_str) if_fam_next;
118 u_long if_family;
119 int refcnt;
120 int flags;
121
122 #define DLIL_SHUTDOWN 1
123
124 int (*add_if)(struct ifnet *ifp);
125 int (*del_if)(struct ifnet *ifp);
126 int (*init_if)(struct ifnet *ifp);
127 int (*add_proto)(struct ddesc_head_str *demux_desc_head,
128 struct if_proto *proto, u_long dl_tag);
129 int (*del_proto)(struct if_proto *proto, u_long dl_tag);
130 int (*ifmod_ioctl)(struct ifnet *ifp, u_long command, caddr_t data);
131 int (*shutdown)();
132 };
133
134
135
136 struct dlil_stats_str dlil_stats;
137
138 static
139 struct dlil_filter_id_str *dlil_filters;
140
141 static
142 struct dl_tag_str *dl_tag_array;
143
144 static
145 TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
146
147 static
148 TAILQ_HEAD(, if_family_str) if_family_head;
149
150 static ifnet_inited = 0;
151 static u_long dl_tag_nb = 0;
152 static u_long dlil_filters_nb = 0;
153
154 int dlil_initialized = 0;
155 decl_simple_lock_data(, dlil_input_lock)
156 int dlil_input_thread_wakeup = 0;
157 int dlil_expand_mcl;
158 static struct mbuf *dlil_input_mbuf_head = NULL;
159 static struct mbuf *dlil_input_mbuf_tail = NULL;
160 #if NLOOP > 1
161 #error dlil_input() needs to be revised to support more than on loopback interface
162 #endif
163 static struct mbuf *dlil_input_loop_head = NULL;
164 static struct mbuf *dlil_input_loop_tail = NULL;
165
166 static void dlil_input_thread(void);
167 extern void run_netisr(void);
168 extern void bpfdetach(struct ifnet*);
169
170
171 /*
172 * Internal functions.
173 */
174
175 static
176 struct if_family_str *find_family_module(u_long if_family)
177 {
178 struct if_family_str *mod = NULL;
179
180 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
181 if (mod->if_family == (if_family & 0xffff))
182 break;
183 }
184
185 return mod;
186 }
187
188
189 /*
190 * Public functions.
191 */
192
193 struct ifnet *ifbyfamily(u_long family, short unit)
194 {
195 struct ifnet *ifp;
196
197 TAILQ_FOREACH(ifp, &ifnet, if_link)
198 if ((family == ifp->if_family) &&
199 (ifp->if_unit == unit))
200 return ifp;
201
202 return 0;
203 }
204
205 struct if_proto *dlttoproto(u_long dl_tag)
206 {
207 if (dl_tag < dl_tag_nb && dl_tag_array[dl_tag].ifp)
208 return dl_tag_array[dl_tag].proto;
209 return 0;
210 }
211
212
213 static int dlil_ifp_proto_count(struct ifnet * ifp)
214 {
215 int count = 0;
216 struct if_proto * proto;
217 struct dlil_proto_head * tmp;
218
219 tmp = (struct dlil_proto_head *) &ifp->proto_head;
220
221 TAILQ_FOREACH(proto, tmp, next)
222 count++;
223
224 return count;
225 }
226
227 u_long ifptodlt(struct ifnet *ifp, u_long proto_family)
228 {
229 struct if_proto *proto;
230 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
231
232
233 TAILQ_FOREACH(proto, tmp, next)
234 if (proto->protocol_family == proto_family)
235 return proto->dl_tag;
236
237 return 0;
238 }
239
240
241 int dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag)
242 {
243 struct ifnet *ifp;
244
245 ifp = ifbyfamily(if_family, unit);
246 if (!ifp)
247 return ENOENT;
248
249 *dl_tag = ifptodlt(ifp, proto_family);
250 if (*dl_tag == 0)
251 return EPROTONOSUPPORT;
252 else
253 return 0;
254 }
255
256
257 void dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
258 struct net_event_data *event_data, u_long event_data_len)
259 {
260 struct net_event_data ev_data;
261 struct kev_msg ev_msg;
262
263 /*
264 * a net event always start with a net_event_data structure
265 * but the caller can generate a simple net event or
266 * provide a longer event structure to post
267 */
268
269 ev_msg.vendor_code = KEV_VENDOR_APPLE;
270 ev_msg.kev_class = KEV_NETWORK_CLASS;
271 ev_msg.kev_subclass = event_subclass;
272 ev_msg.event_code = event_code;
273
274 if (event_data == 0) {
275 event_data = &ev_data;
276 event_data_len = sizeof(struct net_event_data);
277 }
278
279 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
280 event_data->if_family = ifp->if_family;
281 event_data->if_unit = (unsigned long) ifp->if_unit;
282
283 ev_msg.dv[0].data_length = event_data_len;
284 ev_msg.dv[0].data_ptr = event_data;
285 ev_msg.dv[1].data_length = 0;
286
287 kev_post_msg(&ev_msg);
288 }
289
290
291
292 void
293 dlil_init()
294 {
295 int i;
296
297 TAILQ_INIT(&dlil_ifnet_head);
298 TAILQ_INIT(&if_family_head);
299
300 // create the dl tag array
301 MALLOC(dl_tag_array, void *, sizeof(struct dl_tag_str) * MAX_DL_TAGS, M_NKE, M_WAITOK);
302 if (dl_tag_array == 0) {
303 printf("dlil_init tags array allocation failed\n");
304 return; //very bad
305 }
306 bzero(dl_tag_array, sizeof(struct dl_tag_str) * MAX_DL_TAGS);
307 dl_tag_nb = MAX_DL_TAGS;
308
309 // create the dl filters array
310 MALLOC(dlil_filters, void *, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS, M_NKE, M_WAITOK);
311 if (dlil_filters == 0) {
312 printf("dlil_init filters array allocation failed\n");
313 return; //very bad
314 }
315 bzero(dlil_filters, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS);
316 dlil_filters_nb = MAX_DLIL_FILTERS;
317
318 bzero(&dlil_stats, sizeof(dlil_stats));
319
320 simple_lock_init(&dlil_input_lock);
321
322 /*
323 * Start up the dlil input thread once everything is initialized
324 */
325 (void) kernel_thread(kernel_task, dlil_input_thread);
326 }
327
328 u_long get_new_filter_id()
329 {
330 u_long i;
331 u_char *p;
332
333 for (i=1; i < dlil_filters_nb; i++)
334 if (dlil_filters[i].type == 0)
335 break;
336
337 if (i == dlil_filters_nb) {
338 // expand the filters array by MAX_DLIL_FILTERS
339 MALLOC(p, u_char *, sizeof(struct dlil_filter_id_str) * (dlil_filters_nb + MAX_DLIL_FILTERS), M_NKE, M_WAITOK);
340 if (p == 0)
341 return 0;
342
343 bcopy(dlil_filters, p, sizeof(struct dlil_filter_id_str) * dlil_filters_nb);
344 bzero(p + sizeof(struct dlil_filter_id_str) * dlil_filters_nb, sizeof(struct dlil_filter_id_str) * MAX_DL_TAGS);
345 dlil_filters_nb += MAX_DLIL_FILTERS;
346 FREE(dlil_filters, M_NKE);
347 dlil_filters = (struct dlil_filter_id_str *)p;
348 }
349
350 return i;
351 }
352
353
354 int dlil_attach_interface_filter(struct ifnet *ifp,
355 struct dlil_if_flt_str *if_filter,
356 u_long *filter_id,
357 int insertion_point)
358 {
359 int s;
360 int retval = 0;
361 struct dlil_filterq_entry *tmp_ptr;
362 struct dlil_filterq_entry *if_filt;
363 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
364 boolean_t funnel_state;
365
366 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
367 if (tmp_ptr == NULL)
368 return (ENOBUFS);
369
370 bcopy((caddr_t) if_filter, (caddr_t) &tmp_ptr->variants.if_filter,
371 sizeof(struct dlil_if_flt_str));
372
373 funnel_state = thread_funnel_set(network_flock, TRUE);
374 s = splnet();
375
376 *filter_id = get_new_filter_id();
377 if (*filter_id == 0) {
378 FREE(tmp_ptr, M_NKE);
379 retval = ENOMEM;
380 goto end;
381 }
382
383 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
384 dlil_filters[*filter_id].head = (struct dlil_filterq_head *) &ifp->if_flt_head;
385 dlil_filters[*filter_id].type = DLIL_IF_FILTER;
386 dlil_filters[*filter_id].ifp = ifp;
387 tmp_ptr->filter_id = *filter_id;
388 tmp_ptr->type = DLIL_IF_FILTER;
389
390 if (insertion_point != DLIL_LAST_FILTER) {
391 TAILQ_FOREACH(if_filt, fhead, que)
392 if (insertion_point == if_filt->filter_id) {
393 TAILQ_INSERT_BEFORE(if_filt, tmp_ptr, que);
394 break;
395 }
396 }
397 else
398 TAILQ_INSERT_TAIL(fhead, tmp_ptr, que);
399
400 end:
401 splx(s);
402 thread_funnel_set(network_flock, funnel_state);
403 return retval;
404 }
405
406
407 int dlil_attach_protocol_filter(u_long dl_tag,
408 struct dlil_pr_flt_str *pr_filter,
409 u_long *filter_id,
410 int insertion_point)
411 {
412 struct dlil_filterq_entry *tmp_ptr, *pr_filt;
413 int s;
414 int retval = 0;
415 boolean_t funnel_state;
416
417 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0)
418 return (ENOENT);
419
420 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
421 if (tmp_ptr == NULL)
422 return (ENOBUFS);
423
424 bcopy((caddr_t) pr_filter, (caddr_t) &tmp_ptr->variants.pr_filter,
425 sizeof(struct dlil_pr_flt_str));
426
427 funnel_state = thread_funnel_set(network_flock, TRUE);
428 s = splnet();
429
430 *filter_id = get_new_filter_id();
431 if (*filter_id == 0) {
432 FREE(tmp_ptr, M_NKE);
433 retval = ENOMEM;
434 goto end;
435 }
436
437 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
438 dlil_filters[*filter_id].head = dl_tag_array[dl_tag].pr_flt_head;
439 dlil_filters[*filter_id].type = DLIL_PR_FILTER;
440 dlil_filters[*filter_id].proto = dl_tag_array[dl_tag].proto;
441 dlil_filters[*filter_id].ifp = dl_tag_array[dl_tag].ifp;
442 tmp_ptr->filter_id = *filter_id;
443 tmp_ptr->type = DLIL_PR_FILTER;
444
445 if (insertion_point != DLIL_LAST_FILTER) {
446 TAILQ_FOREACH(pr_filt, dl_tag_array[dl_tag].pr_flt_head, que)
447 if (insertion_point == pr_filt->filter_id) {
448 TAILQ_INSERT_BEFORE(pr_filt, tmp_ptr, que);
449 break;
450 }
451 }
452 else
453 TAILQ_INSERT_TAIL(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que);
454
455 end:
456 splx(s);
457 thread_funnel_set(network_flock, funnel_state);
458 return retval;
459 }
460
461
462 int
463 dlil_detach_filter(u_long filter_id)
464 {
465 struct dlil_filter_id_str *flt;
466 int s, retval = 0;
467 boolean_t funnel_state;
468
469 funnel_state = thread_funnel_set(network_flock, TRUE);
470 s = splnet();
471
472 if (filter_id >= dlil_filters_nb || dlil_filters[filter_id].type == 0) {
473 retval = ENOENT;
474 goto end;
475 }
476
477 flt = &dlil_filters[filter_id];
478
479 if (flt->type == DLIL_IF_FILTER) {
480 if (IFILT(flt->filter_ptr).filter_detach)
481 (*IFILT(flt->filter_ptr).filter_detach)(IFILT(flt->filter_ptr).cookie);
482 }
483 else {
484 if (flt->type == DLIL_PR_FILTER) {
485 if (PFILT(flt->filter_ptr).filter_detach)
486 (*PFILT(flt->filter_ptr).filter_detach)(PFILT(flt->filter_ptr).cookie);
487 }
488 }
489
490 TAILQ_REMOVE(flt->head, flt->filter_ptr, que);
491 FREE(flt->filter_ptr, M_NKE);
492 flt->type = 0;
493
494 end:
495 splx(s);
496 thread_funnel_set(network_flock, funnel_state);
497 return retval;
498 }
499
500
501 void
502 dlil_input_thread_continue(void)
503 {
504 while (1) {
505 struct mbuf *m, *m_loop;
506 int expand_mcl;
507
508 usimple_lock(&dlil_input_lock);
509 m = dlil_input_mbuf_head;
510 dlil_input_mbuf_head = NULL;
511 dlil_input_mbuf_tail = NULL;
512 m_loop = dlil_input_loop_head;
513 dlil_input_loop_head = NULL;
514 dlil_input_loop_tail = NULL;
515 usimple_unlock(&dlil_input_lock);
516
517 MBUF_LOCK();
518 expand_mcl = dlil_expand_mcl;
519 dlil_expand_mcl = 0;
520 MBUF_UNLOCK();
521 if (expand_mcl) {
522 caddr_t p;
523 MCLALLOC(p, M_WAIT);
524 if (p) MCLFREE(p);
525 }
526
527 /*
528 * NOTE warning %%% attention !!!!
529 * We should think about putting some thread starvation safeguards if
530 * we deal with long chains of packets.
531 */
532 while (m) {
533 struct mbuf *m0 = m->m_nextpkt;
534 void *header = m->m_pkthdr.header;
535
536 m->m_nextpkt = NULL;
537 m->m_pkthdr.header = NULL;
538 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
539 m = m0;
540 }
541 m = m_loop;
542 while (m) {
543 struct mbuf *m0 = m->m_nextpkt;
544 void *header = m->m_pkthdr.header;
545 struct ifnet *ifp = &loif[0];
546
547 m->m_nextpkt = NULL;
548 m->m_pkthdr.header = NULL;
549 (void) dlil_input_packet(ifp, m, header);
550 m = m0;
551 }
552
553 if (netisr != 0)
554 run_netisr();
555
556 if (dlil_input_mbuf_head == NULL &&
557 dlil_input_loop_head == NULL &&
558 netisr == 0) {
559 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
560 (void) thread_block(dlil_input_thread_continue);
561 /* NOTREACHED */
562 }
563 }
564 }
565
566 void dlil_input_thread(void)
567 {
568 register thread_t self = current_thread();
569 extern void stack_privilege(thread_t thread);
570
571 /*
572 * Make sure that this thread
573 * always has a kernel stack, and
574 * bind it to the master cpu.
575 */
576 stack_privilege(self);
577 ml_thread_policy(current_thread(), MACHINE_GROUP,
578 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
579
580 /* The dlil thread is always funneled */
581 thread_funnel_set(network_flock, TRUE);
582 dlil_initialized = 1;
583 dlil_input_thread_continue();
584 }
585
586 int
587 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
588 {
589 /* WARNING
590 * Because of loopbacked multicast we cannot stuff the ifp in
591 * the rcvif of the packet header: loopback has its own dlil
592 * input queue
593 */
594
595 usimple_lock(&dlil_input_lock);
596 if (ifp->if_type != IFT_LOOP) {
597 if (dlil_input_mbuf_head == NULL)
598 dlil_input_mbuf_head = m_head;
599 else if (dlil_input_mbuf_tail != NULL)
600 dlil_input_mbuf_tail->m_nextpkt = m_head;
601 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
602 } else {
603 if (dlil_input_loop_head == NULL)
604 dlil_input_loop_head = m_head;
605 else if (dlil_input_loop_tail != NULL)
606 dlil_input_loop_tail->m_nextpkt = m_head;
607 dlil_input_loop_tail = m_tail ? m_tail : m_head;
608 }
609 usimple_unlock(&dlil_input_lock);
610
611 wakeup((caddr_t)&dlil_input_thread_wakeup);
612
613 return 0;
614 }
615
616 int
617 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
618 char *frame_header)
619 {
620 struct ifnet *orig_ifp = 0;
621 struct dlil_filterq_entry *tmp;
622 int retval;
623 struct if_proto *ifproto = 0;
624 struct if_proto *proto;
625 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
626
627
628 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
629
630 /*
631 * Run interface filters
632 */
633
634 while (orig_ifp != ifp) {
635 orig_ifp = ifp;
636
637 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
638 if (IFILT(tmp).filter_if_input) {
639 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
640 &ifp,
641 &m,
642 &frame_header);
643 if (retval) {
644 if (retval == EJUSTRETURN)
645 return 0;
646 else {
647 m_freem(m);
648 return retval;
649 }
650 }
651 }
652
653 if (ifp != orig_ifp)
654 break;
655 }
656 }
657
658 ifp->if_lastchange = time;
659
660 /*
661 * Call family demux module. If the demux module finds a match
662 * for the frame it will fill-in the ifproto pointer.
663 */
664
665 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
666
667 if (m->m_flags & (M_BCAST|M_MCAST))
668 ifp->if_imcasts++;
669
670 if ((retval) && (retval != EJUSTRETURN) && (ifp->offercnt)) {
671 /*
672 * No match was found, look for any offers.
673 */
674 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
675 TAILQ_FOREACH(proto, tmp, next) {
676 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
677 ifproto = proto;
678 retval = 0;
679 break;
680 }
681 }
682 }
683
684 if (retval) {
685 if (retval != EJUSTRETURN) {
686 m_freem(m);
687 return retval;
688 }
689 else
690 return 0;
691 }
692 else
693 if (ifproto == 0) {
694 printf("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
695 m_freem(m);
696 return 0;
697 }
698
699 /*
700 * Call any attached protocol filters.
701 */
702
703 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
704 if (PFILT(tmp).filter_dl_input) {
705 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
706 &m,
707 &frame_header,
708 &ifp);
709
710 if (retval) {
711 if (retval == EJUSTRETURN)
712 return 0;
713 else {
714 m_freem(m);
715 return retval;
716 }
717 }
718 }
719 }
720
721
722
723 retval = (*ifproto->dl_input)(m, frame_header,
724 ifp, ifproto->dl_tag,
725 TRUE);
726
727 if (retval == EJUSTRETURN)
728 retval = 0;
729 else
730 if (retval)
731 m_freem(m);
732
733 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
734 return retval;
735 }
736
737
738
739 void ether_input(ifp, eh, m)
740 struct ifnet *ifp;
741 struct ether_header *eh;
742 struct mbuf *m;
743
744 {
745 kprintf("Someone is calling ether_input!!\n");
746
747 dlil_input(ifp, m, NULL);
748 }
749
750
751 int
752 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
753 {
754 struct dlil_filterq_entry *filt;
755 int retval = 0;
756 struct ifnet *orig_ifp = 0;
757 struct if_proto *proto;
758 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
759 struct kev_msg kev_msg;
760 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
761 boolean_t funnel_state;
762
763
764 funnel_state = thread_funnel_set(network_flock, TRUE);
765
766 while (orig_ifp != ifp) {
767 orig_ifp = ifp;
768
769 TAILQ_FOREACH_REVERSE(filt, fhead, que, dlil_filterq_head) {
770 if (IFILT(filt).filter_if_event) {
771 retval = (*IFILT(filt).filter_if_event)(IFILT(filt).cookie,
772 &ifp,
773 &event);
774
775 if (retval) {
776 (void) thread_funnel_set(network_flock, funnel_state);
777 if (retval == EJUSTRETURN)
778 return 0;
779 else
780 return retval;
781 }
782 }
783
784 if (ifp != orig_ifp)
785 break;
786 }
787 }
788
789
790 /*
791 * Call Interface Module event hook, if any.
792 */
793
794 if (ifp->if_event) {
795 retval = ifp->if_event(ifp, (caddr_t) event);
796
797 if (retval) {
798 (void) thread_funnel_set(network_flock, funnel_state);
799
800 if (retval == EJUSTRETURN)
801 return 0;
802 else
803 return retval;
804 }
805 }
806
807 /*
808 * Call dl_event entry point for all protocols attached to this interface
809 */
810
811 TAILQ_FOREACH(proto, tmp, next) {
812 /*
813 * Call any attached protocol filters.
814 */
815
816 TAILQ_FOREACH_REVERSE(filt, &proto->pr_flt_head, que, dlil_filterq_head) {
817 if (PFILT(filt).filter_dl_event) {
818 retval = (*PFILT(filt).filter_dl_event)(PFILT(filt).cookie,
819 event);
820
821 if (retval) {
822 (void) thread_funnel_set(network_flock, funnel_state);
823 if (retval == EJUSTRETURN)
824 return 0;
825 else
826 return retval;
827 }
828 }
829 }
830
831
832 /*
833 * Finally, call the dl_event entry point (if any)
834 */
835
836 if (proto->dl_event)
837 retval = (*proto->dl_event)(event, proto->dl_tag);
838
839 if (retval == EJUSTRETURN) {
840 (void) thread_funnel_set(network_flock, funnel_state);
841 return 0;
842 }
843 }
844
845
846 /*
847 * Now, post this event to the Kernel Event message queue
848 */
849
850 kev_msg.vendor_code = event->vendor_code;
851 kev_msg.kev_class = event->kev_class;
852 kev_msg.kev_subclass = event->kev_subclass;
853 kev_msg.event_code = event->event_code;
854 kev_msg.dv[0].data_ptr = &event->event_data[0];
855 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
856 kev_msg.dv[1].data_length = 0;
857
858 kev_post_msg(&kev_msg);
859
860 (void) thread_funnel_set(network_flock, funnel_state);
861 return 0;
862 }
863
864
865
866 int
867 dlil_output(u_long dl_tag,
868 struct mbuf *m,
869 caddr_t route,
870 struct sockaddr *dest,
871 int raw
872 )
873 {
874 char *frame_type;
875 char *dst_linkaddr;
876 struct ifnet *orig_ifp = 0;
877 struct ifnet *ifp;
878 struct if_proto *proto;
879 struct dlil_filterq_entry *tmp;
880 int retval = 0;
881 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
882 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
883 struct dlil_filterq_head *fhead;
884
885 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
886
887 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) {
888 m_freem(m);
889 return ENOENT;
890 }
891
892 ifp = dl_tag_array[dl_tag].ifp;
893 proto = dl_tag_array[dl_tag].proto;
894
895 frame_type = frame_type_buffer;
896 dst_linkaddr = dst_linkaddr_buffer;
897
898 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
899
900 if ((raw == 0) && (proto->dl_pre_output)) {
901 retval = (*proto->dl_pre_output)(ifp, &m, dest, route,
902 frame_type, dst_linkaddr, dl_tag);
903 if (retval) {
904 if (retval == EJUSTRETURN)
905 return 0;
906 else {
907 m_freem(m);
908 return retval;
909 }
910 }
911 }
912
913 /*
914 * Run any attached protocol filters.
915 */
916
917 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
918 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
919 if (PFILT(tmp).filter_dl_output) {
920 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
921 &m, &ifp, &dest, dst_linkaddr, frame_type);
922 if (retval) {
923 if (retval == EJUSTRETURN)
924 return 0;
925 else {
926 m_freem(m);
927 return retval;
928 }
929 }
930 }
931 }
932 }
933
934
935 /*
936 * Call framing module
937 */
938 if ((raw == 0) && (ifp->if_framer)) {
939 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
940 if (retval) {
941 if (retval == EJUSTRETURN)
942 return 0;
943 else
944 {
945 m_freem(m);
946 return retval;
947 }
948 }
949 }
950
951 #if BRIDGE
952 if (do_bridge) {
953 struct mbuf *m0 = m ;
954 struct ether_header *eh = mtod(m, struct ether_header *);
955
956 if (m->m_pkthdr.rcvif)
957 m->m_pkthdr.rcvif = NULL ;
958 ifp = bridge_dst_lookup(eh);
959 bdg_forward(&m0, ifp);
960 if (m0)
961 m_freem(m0);
962
963 return 0;
964 }
965 #endif
966
967
968 /*
969 * Let interface filters (if any) do their thing ...
970 */
971
972 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
973 if (TAILQ_EMPTY(fhead) == 0) {
974 while (orig_ifp != ifp) {
975 orig_ifp = ifp;
976 TAILQ_FOREACH(tmp, fhead, que) {
977 if (IFILT(tmp).filter_if_output) {
978 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
979 &ifp,
980 &m);
981 if (retval) {
982 if (retval == EJUSTRETURN)
983 return 0;
984 else {
985 m_freem(m);
986 return retval;
987 }
988 }
989
990 }
991
992 if (ifp != orig_ifp)
993 break;
994 }
995 }
996 }
997
998 /*
999 * Finally, call the driver.
1000 */
1001
1002 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1003 retval = (*ifp->if_output)(ifp, m);
1004 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1005
1006 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1007
1008 if ((retval == 0) || (retval == EJUSTRETURN))
1009 return 0;
1010 else
1011 return retval;
1012 }
1013
1014
1015 int
1016 dlil_ioctl(u_long proto_fam,
1017 struct ifnet *ifp,
1018 u_long ioctl_code,
1019 caddr_t ioctl_arg)
1020 {
1021 struct dlil_filterq_entry *tmp;
1022 struct dlil_filterq_head *fhead;
1023 int retval = EOPNOTSUPP;
1024 int retval2 = EOPNOTSUPP;
1025 u_long dl_tag;
1026 struct if_family_str *if_family;
1027
1028
1029 if (proto_fam) {
1030 retval = dlil_find_dltag(ifp->if_family, ifp->if_unit,
1031 proto_fam, &dl_tag);
1032
1033 if (retval == 0) {
1034 if (dl_tag_array[dl_tag].ifp != ifp)
1035 return ENOENT;
1036
1037 /*
1038 * Run any attached protocol filters.
1039 */
1040 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
1041 if (PFILT(tmp).filter_dl_ioctl) {
1042 retval =
1043 (*PFILT(tmp).filter_dl_ioctl)(PFILT(tmp).cookie,
1044 dl_tag_array[dl_tag].ifp,
1045 ioctl_code,
1046 ioctl_arg);
1047
1048 if (retval) {
1049 if (retval == EJUSTRETURN)
1050 return 0;
1051 else
1052 return retval;
1053 }
1054 }
1055 }
1056
1057 if (dl_tag_array[dl_tag].proto->dl_ioctl)
1058 retval =
1059 (*dl_tag_array[dl_tag].proto->dl_ioctl)(dl_tag,
1060 dl_tag_array[dl_tag].ifp,
1061 ioctl_code,
1062 ioctl_arg);
1063 else
1064 retval = EOPNOTSUPP;
1065 }
1066 else
1067 retval = 0;
1068 }
1069
1070 if ((retval) && (retval != EOPNOTSUPP)) {
1071 if (retval == EJUSTRETURN)
1072 return 0;
1073 else
1074 return retval;
1075 }
1076
1077
1078 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1079 TAILQ_FOREACH(tmp, fhead, que) {
1080 if (IFILT(tmp).filter_if_ioctl) {
1081 retval2 = (*IFILT(tmp).filter_if_ioctl)(IFILT(tmp).cookie, ifp,
1082 ioctl_code, ioctl_arg);
1083 if (retval2) {
1084 if (retval2 == EJUSTRETURN)
1085 return 0;
1086 else
1087 return retval2;
1088 }
1089 }
1090 }
1091
1092
1093 if_family = find_family_module(ifp->if_family);
1094 if ((if_family) && (if_family->ifmod_ioctl)) {
1095 retval2 = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1096
1097 if ((retval2) && (retval2 != EOPNOTSUPP)) {
1098 if (retval2 == EJUSTRETURN)
1099 return 0;
1100 else
1101 return retval;
1102 }
1103
1104 if (retval == EOPNOTSUPP)
1105 retval = retval2;
1106 }
1107
1108 if (ifp->if_ioctl)
1109 retval2 = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1110
1111 if (retval == EOPNOTSUPP)
1112 return retval2;
1113 else {
1114 if (retval2 == EOPNOTSUPP)
1115 return 0;
1116 else
1117 return retval2;
1118 }
1119 }
1120
1121
1122 int
1123 dlil_attach_protocol(struct dlil_proto_reg_str *proto,
1124 u_long *dl_tag)
1125 {
1126 struct ifnet *ifp;
1127 struct if_proto *ifproto;
1128 u_long i;
1129 struct if_family_str *if_family;
1130 struct dlil_proto_head *tmp;
1131 struct kev_dl_proto_data ev_pr_data;
1132 int s, retval = 0;
1133 boolean_t funnel_state;
1134 u_char *p;
1135
1136 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1137 return EINVAL;
1138
1139 funnel_state = thread_funnel_set(network_flock, TRUE);
1140 s = splnet();
1141 if_family = find_family_module(proto->interface_family);
1142 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1143 kprintf("dlil_attach_protocol -- no interface family module %d",
1144 proto->interface_family);
1145 retval = ENOENT;
1146 goto end;
1147 }
1148
1149 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1150 if (!ifp) {
1151 kprintf("dlil_attach_protocol -- no such interface %d unit %d\n",
1152 proto->interface_family, proto->unit_number);
1153 retval = ENOENT;
1154 goto end;
1155 }
1156
1157 if (dlil_find_dltag(proto->interface_family, proto->unit_number,
1158 proto->protocol_family, &i) == 0) {
1159 retval = EEXIST;
1160 goto end;
1161 }
1162
1163 for (i=1; i < dl_tag_nb; i++)
1164 if (dl_tag_array[i].ifp == 0)
1165 break;
1166
1167 if (i == dl_tag_nb) {
1168 // expand the tag array by MAX_DL_TAGS
1169 MALLOC(p, u_char *, sizeof(struct dl_tag_str) * (dl_tag_nb + MAX_DL_TAGS), M_NKE, M_WAITOK);
1170 if (p == 0) {
1171 retval = ENOBUFS;
1172 goto end;
1173 }
1174 bcopy(dl_tag_array, p, sizeof(struct dl_tag_str) * dl_tag_nb);
1175 bzero(p + sizeof(struct dl_tag_str) * dl_tag_nb, sizeof(struct dl_tag_str) * MAX_DL_TAGS);
1176 dl_tag_nb += MAX_DL_TAGS;
1177 FREE(dl_tag_array, M_NKE);
1178 dl_tag_array = (struct dl_tag_str *)p;
1179 }
1180
1181 /*
1182 * Allocate and init a new if_proto structure
1183 */
1184
1185 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1186 if (!ifproto) {
1187 printf("ERROR - DLIL failed if_proto allocation\n");
1188 retval = ENOMEM;
1189 goto end;
1190 }
1191
1192 bzero(ifproto, sizeof(struct if_proto));
1193
1194 dl_tag_array[i].ifp = ifp;
1195 dl_tag_array[i].proto = ifproto;
1196 dl_tag_array[i].pr_flt_head = &ifproto->pr_flt_head;
1197 ifproto->dl_tag = i;
1198 *dl_tag = i;
1199
1200 if (proto->default_proto) {
1201 if (ifp->if_data.default_proto == 0)
1202 ifp->if_data.default_proto = i;
1203 else
1204 printf("ERROR - dlil_attach_protocol -- Attempt to attach more than one default protocol\n");
1205 }
1206
1207 ifproto->protocol_family = proto->protocol_family;
1208 ifproto->dl_input = proto->input;
1209 ifproto->dl_pre_output = proto->pre_output;
1210 ifproto->dl_event = proto->event;
1211 ifproto->dl_offer = proto->offer;
1212 ifproto->dl_ioctl = proto->ioctl;
1213 ifproto->ifp = ifp;
1214 TAILQ_INIT(&ifproto->pr_flt_head);
1215
1216 /*
1217 * Call family module add_proto routine so it can refine the
1218 * demux descriptors as it wishes.
1219 */
1220 retval = (*if_family->add_proto)(&proto->demux_desc_head, ifproto, *dl_tag);
1221 if (retval) {
1222 dl_tag_array[i].ifp = 0;
1223 FREE(ifproto, M_IFADDR);
1224 goto end;
1225 }
1226
1227 /*
1228 * Add to if_proto list for this interface
1229 */
1230
1231 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1232 TAILQ_INSERT_TAIL(tmp, ifproto, next);
1233 ifp->refcnt++;
1234 if (ifproto->dl_offer)
1235 ifp->offercnt++;
1236
1237 /* the reserved field carries the number of protocol still attached (subject to change) */
1238 ev_pr_data.proto_family = proto->protocol_family;
1239 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1240 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1241 (struct net_event_data *)&ev_pr_data,
1242 sizeof(struct kev_dl_proto_data));
1243
1244 end:
1245 splx(s);
1246 thread_funnel_set(network_flock, funnel_state);
1247 return retval;
1248 }
1249
1250
1251
1252 int
1253 dlil_detach_protocol(u_long dl_tag)
1254 {
1255 struct ifnet *ifp;
1256 struct ifnet *orig_ifp=0;
1257 struct if_proto *proto;
1258 struct dlil_proto_head *tmp;
1259 struct if_family_str *if_family;
1260 struct dlil_filterq_entry *filter;
1261 int s, retval = 0;
1262 struct dlil_filterq_head *fhead;
1263 struct kev_dl_proto_data ev_pr_data;
1264 boolean_t funnel_state;
1265
1266 funnel_state = thread_funnel_set(network_flock, TRUE);
1267 s = splnet();
1268
1269 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) {
1270 retval = ENOENT;
1271 goto end;
1272 }
1273
1274 ifp = dl_tag_array[dl_tag].ifp;
1275 proto = dl_tag_array[dl_tag].proto;
1276
1277 if_family = find_family_module(ifp->if_family);
1278 if (if_family == NULL) {
1279 retval = ENOENT;
1280 goto end;
1281 }
1282
1283 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1284
1285 /*
1286 * Call family module del_proto
1287 */
1288
1289 (*if_family->del_proto)(proto, dl_tag);
1290
1291
1292 /*
1293 * Remove and deallocate any attached protocol filters
1294 */
1295
1296 while (filter = TAILQ_FIRST(&proto->pr_flt_head))
1297 dlil_detach_filter(filter->filter_id);
1298
1299 if (proto->dl_offer)
1300 ifp->offercnt--;
1301
1302 if (ifp->if_data.default_proto == dl_tag)
1303 ifp->if_data.default_proto = 0;
1304 dl_tag_array[dl_tag].ifp = 0;
1305
1306 /* the reserved field carries the number of protocol still attached (subject to change) */
1307 ev_pr_data.proto_family = proto->protocol_family;
1308
1309 /*
1310 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1311 */
1312
1313 if_rtproto_del(ifp, proto->protocol_family);
1314
1315 TAILQ_REMOVE(tmp, proto, next);
1316 FREE(proto, M_IFADDR);
1317
1318 ifp->refcnt--;
1319 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1320 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1321 (struct net_event_data *)&ev_pr_data,
1322 sizeof(struct kev_dl_proto_data));
1323
1324 if (ifp->refcnt == 0) {
1325
1326 TAILQ_REMOVE(&ifnet, ifp, if_link);
1327
1328 (*if_family->del_if)(ifp);
1329
1330 if (--if_family->refcnt == 0) {
1331 if (if_family->shutdown)
1332 (*if_family->shutdown)();
1333
1334 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1335 FREE(if_family, M_IFADDR);
1336 }
1337
1338 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1339 while (orig_ifp != ifp) {
1340 orig_ifp = ifp;
1341
1342 TAILQ_FOREACH(filter, fhead, que) {
1343 if (IFILT(filter).filter_if_free) {
1344 retval = (*IFILT(filter).filter_if_free)(IFILT(filter).cookie, ifp);
1345 if (retval) {
1346 splx(s);
1347 thread_funnel_set(network_flock, funnel_state);
1348 return 0;
1349 }
1350 }
1351 if (ifp != orig_ifp)
1352 break;
1353 }
1354 }
1355
1356 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1357
1358 (*ifp->if_free)(ifp);
1359 }
1360
1361 end:
1362 splx(s);
1363 thread_funnel_set(network_flock, funnel_state);
1364 return retval;
1365 }
1366
1367
1368
1369
1370
1371 int
1372 dlil_if_attach(struct ifnet *ifp)
1373 {
1374 u_long interface_family = ifp->if_family;
1375 struct if_family_str *if_family;
1376 struct dlil_proto_head *tmp;
1377 int stat;
1378 int s;
1379 boolean_t funnel_state;
1380
1381 funnel_state = thread_funnel_set(network_flock, TRUE);
1382 s = splnet();
1383 if (ifnet_inited == 0) {
1384 TAILQ_INIT(&ifnet);
1385 ifnet_inited = 1;
1386 }
1387
1388 if_family = find_family_module(interface_family);
1389
1390 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1391 splx(s);
1392 kprintf("Attempt to attach interface without family module - %d\n",
1393 interface_family);
1394 thread_funnel_set(network_flock, funnel_state);
1395 return ENODEV;
1396 }
1397
1398 if (ifp->refcnt == 0) {
1399 /*
1400 * Call the family module to fill in the appropriate fields in the
1401 * ifnet structure.
1402 */
1403
1404 stat = (*if_family->add_if)(ifp);
1405 if (stat) {
1406 splx(s);
1407 kprintf("dlil_if_attach -- add_if failed with %d\n", stat);
1408 thread_funnel_set(network_flock, funnel_state);
1409 return stat;
1410 }
1411 if_family->refcnt++;
1412
1413 /*
1414 * Add the ifp to the interface list.
1415 */
1416
1417 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1418 TAILQ_INIT(tmp);
1419
1420 ifp->if_data.default_proto = 0;
1421 ifp->offercnt = 0;
1422 TAILQ_INIT(&ifp->if_flt_head);
1423 old_if_attach(ifp);
1424
1425 if (if_family->init_if) {
1426 stat = (*if_family->init_if)(ifp);
1427 if (stat) {
1428 kprintf("dlil_if_attach -- init_if failed with %d\n", stat);
1429 }
1430 }
1431 }
1432
1433 ifp->refcnt++;
1434
1435 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
1436
1437 splx(s);
1438 thread_funnel_set(network_flock, funnel_state);
1439 return 0;
1440 }
1441
1442
1443 int
1444 dlil_if_detach(struct ifnet *ifp)
1445 {
1446 struct if_proto *proto;
1447 struct dlil_filterq_entry *if_filter;
1448 struct if_family_str *if_family;
1449 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1450 int s;
1451 struct kev_msg ev_msg;
1452 boolean_t funnel_state;
1453
1454 funnel_state = thread_funnel_set(network_flock, TRUE);
1455 s = splnet();
1456
1457 if_family = find_family_module(ifp->if_family);
1458
1459 if (!if_family) {
1460 kprintf("Attempt to detach interface without family module - %s\n",
1461 ifp->if_name);
1462 splx(s);
1463 thread_funnel_set(network_flock, funnel_state);
1464 return ENODEV;
1465 }
1466
1467 while (if_filter = TAILQ_FIRST(fhead))
1468 dlil_detach_filter(if_filter->filter_id);
1469
1470 ifp->refcnt--;
1471
1472 if (ifp->refcnt == 0) {
1473 /* Let BPF know the interface is detaching. */
1474 bpfdetach(ifp);
1475 TAILQ_REMOVE(&ifnet, ifp, if_link);
1476
1477 (*if_family->del_if)(ifp);
1478
1479 if (--if_family->refcnt == 0) {
1480 if (if_family->shutdown)
1481 (*if_family->shutdown)();
1482
1483 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1484 FREE(if_family, M_IFADDR);
1485 }
1486
1487 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1488 splx(s);
1489 thread_funnel_set(network_flock, funnel_state);
1490 return 0;
1491 }
1492 else
1493 {
1494 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
1495 splx(s);
1496 thread_funnel_set(network_flock, funnel_state);
1497 return DLIL_WAIT_FOR_FREE;
1498 }
1499 }
1500
1501
1502 int
1503 dlil_reg_if_modules(u_long interface_family,
1504 struct dlil_ifmod_reg_str *ifmod)
1505 {
1506 struct if_family_str *if_family;
1507 int s;
1508 boolean_t funnel_state;
1509
1510
1511 funnel_state = thread_funnel_set(network_flock, TRUE);
1512 s = splnet();
1513 if (find_family_module(interface_family)) {
1514 kprintf("Attempt to register dlil family module more than once - %d\n",
1515 interface_family);
1516 splx(s);
1517 thread_funnel_set(network_flock, funnel_state);
1518 return EEXIST;
1519 }
1520
1521 if ((!ifmod->add_if) || (!ifmod->del_if) ||
1522 (!ifmod->add_proto) || (!ifmod->del_proto)) {
1523 kprintf("dlil_reg_if_modules passed at least one null pointer\n");
1524 splx(s);
1525 thread_funnel_set(network_flock, funnel_state);
1526 return EINVAL;
1527 }
1528
1529 /*
1530 * The following is a gross hack to keep from breaking
1531 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
1532 * does not zero the reserved fields in dlil_ifmod_reg_str.
1533 * As a result, we have to zero any function that used to
1534 * be reserved fields at the time Vicomsoft built their
1535 * kext. Radar #2974305
1536 */
1537 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
1538 if (interface_family == 123) { /* Vicom */
1539 ifmod->init_if = 0;
1540 } else {
1541 splx(s);
1542 thread_funnel_set(network_flock, funnel_state);
1543 return EINVAL;
1544 }
1545 }
1546
1547 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
1548 if (!if_family) {
1549 kprintf("dlil_reg_if_modules failed allocation\n");
1550 splx(s);
1551 thread_funnel_set(network_flock, funnel_state);
1552 return ENOMEM;
1553 }
1554
1555 bzero(if_family, sizeof(struct if_family_str));
1556
1557 if_family->if_family = interface_family & 0xffff;
1558 if_family->shutdown = ifmod->shutdown;
1559 if_family->add_if = ifmod->add_if;
1560 if_family->del_if = ifmod->del_if;
1561 if_family->init_if = ifmod->init_if;
1562 if_family->add_proto = ifmod->add_proto;
1563 if_family->del_proto = ifmod->del_proto;
1564 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
1565 if_family->refcnt = 1;
1566 if_family->flags = 0;
1567
1568 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
1569 splx(s);
1570 thread_funnel_set(network_flock, funnel_state);
1571 return 0;
1572 }
1573
1574 int dlil_dereg_if_modules(u_long interface_family)
1575 {
1576 struct if_family_str *if_family;
1577 int s, ret = 0;
1578 boolean_t funnel_state;
1579
1580 funnel_state = thread_funnel_set(network_flock, TRUE);
1581 s = splnet();
1582 if_family = find_family_module(interface_family);
1583 if (if_family == 0) {
1584 splx(s);
1585 thread_funnel_set(network_flock, funnel_state);
1586 return ENOENT;
1587 }
1588
1589 if (--if_family->refcnt == 0) {
1590 if (if_family->shutdown)
1591 (*if_family->shutdown)();
1592
1593 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1594 FREE(if_family, M_IFADDR);
1595 }
1596 else {
1597 if_family->flags |= DLIL_SHUTDOWN;
1598 ret = DLIL_WAIT_FOR_FREE;
1599 }
1600
1601 splx(s);
1602 thread_funnel_set(network_flock, funnel_state);
1603 return ret;
1604 }
1605
1606
1607
1608
1609
1610 /*
1611 * Old if_attach no-op'ed function defined here for temporary backwards compatibility
1612 */
1613
1614 void if_attach(ifp)
1615 struct ifnet *ifp;
1616 {
1617 dlil_if_attach(ifp);
1618 }
1619
1620
1621
1622 int
1623 dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id)
1624 {
1625 struct ifnet *orig_ifp = 0;
1626 struct ifnet *ifp;
1627 struct if_proto *ifproto;
1628 struct if_proto *proto;
1629 struct dlil_filterq_entry *tmp;
1630 int retval = 0;
1631 struct dlil_filterq_head *fhead;
1632 int match_found;
1633
1634 dlil_stats.inject_if_in1++;
1635
1636 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER)
1637 return ENOENT;
1638
1639 ifp = dlil_filters[from_id].ifp;
1640
1641 /*
1642 * Let interface filters (if any) do their thing ...
1643 */
1644
1645 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1646 match_found = 0;
1647
1648 if (TAILQ_EMPTY(fhead) == 0) {
1649 while (orig_ifp != ifp) {
1650 orig_ifp = ifp;
1651 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
1652 if ((match_found) && (IFILT(tmp).filter_if_input)) {
1653 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
1654 &ifp,
1655 &m,
1656 &frame_header);
1657 if (retval) {
1658 if (retval == EJUSTRETURN)
1659 return 0;
1660 else {
1661 m_freem(m);
1662 return retval;
1663 }
1664 }
1665
1666 }
1667
1668 if (ifp != orig_ifp)
1669 break;
1670
1671 if (from_id == tmp->filter_id)
1672 match_found = 1;
1673 }
1674 }
1675 }
1676
1677 ifp->if_lastchange = time;
1678
1679 /*
1680 * Call family demux module. If the demux module finds a match
1681 * for the frame it will fill-in the ifproto pointer.
1682 */
1683
1684 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
1685
1686 if (m->m_flags & (M_BCAST|M_MCAST))
1687 ifp->if_imcasts++;
1688
1689 if ((retval) && (ifp->offercnt)) {
1690 /*
1691 * No match was found, look for any offers.
1692 */
1693 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
1694 TAILQ_FOREACH(proto, tmp, next) {
1695 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
1696 ifproto = proto;
1697 retval = 0;
1698 break;
1699 }
1700 }
1701 }
1702
1703 if (retval) {
1704 if (retval != EJUSTRETURN) {
1705 m_freem(m);
1706 return retval;
1707 }
1708 else
1709 return 0;
1710 }
1711 else
1712 if (ifproto == 0) {
1713 printf("ERROR - dlil_inject_if_input -- if_demux didn't return an if_proto pointer\n");
1714 m_freem(m);
1715 return 0;
1716 }
1717
1718 /*
1719 * Call any attached protocol filters.
1720 */
1721 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1722 if (PFILT(tmp).filter_dl_input) {
1723 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1724 &m,
1725 &frame_header,
1726 &ifp);
1727
1728 if (retval) {
1729 if (retval == EJUSTRETURN)
1730 return 0;
1731 else {
1732 m_freem(m);
1733 return retval;
1734 }
1735 }
1736 }
1737 }
1738
1739
1740
1741 retval = (*ifproto->dl_input)(m, frame_header,
1742 ifp, ifproto->dl_tag,
1743 FALSE);
1744
1745 dlil_stats.inject_if_in2++;
1746 if (retval == EJUSTRETURN)
1747 retval = 0;
1748 else
1749 if (retval)
1750 m_freem(m);
1751
1752 return retval;
1753
1754 }
1755
1756
1757
1758
1759
1760 int
1761 dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id)
1762 {
1763 struct ifnet *orig_ifp = 0;
1764 struct dlil_filterq_entry *tmp;
1765 int retval;
1766 struct if_proto *ifproto = 0;
1767 int match_found;
1768 struct ifnet *ifp;
1769
1770 dlil_stats.inject_pr_in1++;
1771 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER)
1772 return ENOENT;
1773
1774 ifproto = dlil_filters[from_id].proto;
1775 ifp = dlil_filters[from_id].ifp;
1776
1777 /*
1778 * Call any attached protocol filters.
1779 */
1780
1781 match_found = 0;
1782 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1783 if ((match_found) && (PFILT(tmp).filter_dl_input)) {
1784 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1785 &m,
1786 &frame_header,
1787 &ifp);
1788
1789 if (retval) {
1790 if (retval == EJUSTRETURN)
1791 return 0;
1792 else {
1793 m_freem(m);
1794 return retval;
1795 }
1796 }
1797 }
1798
1799 if (tmp->filter_id == from_id)
1800 match_found = 1;
1801 }
1802
1803
1804 retval = (*ifproto->dl_input)(m, frame_header,
1805 ifp, ifproto->dl_tag,
1806 FALSE);
1807
1808 if (retval == EJUSTRETURN)
1809 retval = 0;
1810 else
1811 if (retval)
1812 m_freem(m);
1813
1814 dlil_stats.inject_pr_in2++;
1815 return retval;
1816 }
1817
1818
1819
1820 int
1821 dlil_inject_pr_output(struct mbuf *m,
1822 struct sockaddr *dest,
1823 int raw,
1824 char *frame_type,
1825 char *dst_linkaddr,
1826 u_long from_id)
1827 {
1828 struct ifnet *orig_ifp = 0;
1829 struct ifnet *ifp;
1830 struct dlil_filterq_entry *tmp;
1831 int retval = 0;
1832 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1833 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1834 struct dlil_filterq_head *fhead;
1835 int match_found;
1836 u_long dl_tag;
1837
1838 dlil_stats.inject_pr_out1++;
1839 if (raw == 0) {
1840 if (frame_type)
1841 bcopy(frame_type, &frame_type_buffer[0], MAX_FRAME_TYPE_SIZE * 4);
1842 else
1843 return EINVAL;
1844
1845 if (dst_linkaddr)
1846 bcopy(dst_linkaddr, &dst_linkaddr_buffer, MAX_LINKADDR * 4);
1847 else
1848 return EINVAL;
1849 }
1850
1851 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER)
1852 return ENOENT;
1853
1854 ifp = dlil_filters[from_id].ifp;
1855 dl_tag = dlil_filters[from_id].proto->dl_tag;
1856
1857 frame_type = frame_type_buffer;
1858 dst_linkaddr = dst_linkaddr_buffer;
1859
1860 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1861
1862 /*
1863 * Run any attached protocol filters.
1864 */
1865 match_found = 0;
1866
1867 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
1868 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
1869 if ((match_found) && (PFILT(tmp).filter_dl_output)) {
1870 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
1871 &m, &ifp, &dest, dst_linkaddr, frame_type);
1872 if (retval) {
1873 if (retval == EJUSTRETURN)
1874 return 0;
1875 else {
1876 m_freem(m);
1877 return retval;
1878 }
1879 }
1880 }
1881
1882 if (tmp->filter_id == from_id)
1883 match_found = 1;
1884 }
1885 }
1886
1887
1888 /*
1889 * Call framing module
1890 */
1891 if ((raw == 0) && (ifp->if_framer)) {
1892 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
1893 if (retval) {
1894 if (retval == EJUSTRETURN)
1895 return 0;
1896 else
1897 {
1898 m_freem(m);
1899 return retval;
1900 }
1901 }
1902 }
1903
1904
1905 #if BRIDGE
1906 if (do_bridge) {
1907 struct mbuf *m0 = m ;
1908 struct ether_header *eh = mtod(m, struct ether_header *);
1909
1910 if (m->m_pkthdr.rcvif)
1911 m->m_pkthdr.rcvif = NULL ;
1912 ifp = bridge_dst_lookup(eh);
1913 bdg_forward(&m0, ifp);
1914 if (m0)
1915 m_freem(m0);
1916
1917 return 0;
1918 }
1919 #endif
1920
1921
1922 /*
1923 * Let interface filters (if any) do their thing ...
1924 */
1925
1926 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1927 if (TAILQ_EMPTY(fhead) == 0) {
1928 while (orig_ifp != ifp) {
1929 orig_ifp = ifp;
1930 TAILQ_FOREACH(tmp, fhead, que) {
1931 if (IFILT(tmp).filter_if_output) {
1932 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
1933 &ifp,
1934 &m);
1935 if (retval) {
1936 if (retval == EJUSTRETURN)
1937 return 0;
1938 else {
1939 m_freem(m);
1940 return retval;
1941 }
1942 }
1943
1944 }
1945
1946 if (ifp != orig_ifp)
1947 break;
1948 }
1949 }
1950 }
1951
1952 /*
1953 * Finally, call the driver.
1954 */
1955
1956 retval = (*ifp->if_output)(ifp, m);
1957 dlil_stats.inject_pr_out2++;
1958 if ((retval == 0) || (retval == EJUSTRETURN))
1959 return 0;
1960 else
1961 return retval;
1962 }
1963
1964
1965 int
1966 dlil_inject_if_output(struct mbuf *m, u_long from_id)
1967 {
1968 struct ifnet *orig_ifp = 0;
1969 struct ifnet *ifp;
1970 struct dlil_filterq_entry *tmp;
1971 int retval = 0;
1972 struct dlil_filterq_head *fhead;
1973 int match_found;
1974
1975 dlil_stats.inject_if_out1++;
1976 if (from_id > dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER)
1977 return ENOENT;
1978
1979 ifp = dlil_filters[from_id].ifp;
1980
1981 /*
1982 * Let interface filters (if any) do their thing ...
1983 */
1984
1985 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1986 match_found = 0;
1987
1988 if (TAILQ_EMPTY(fhead) == 0) {
1989 while (orig_ifp != ifp) {
1990 orig_ifp = ifp;
1991 TAILQ_FOREACH(tmp, fhead, que) {
1992 if ((match_found) && (IFILT(tmp).filter_if_output)) {
1993 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
1994 &ifp,
1995 &m);
1996 if (retval) {
1997 if (retval == EJUSTRETURN)
1998 return 0;
1999 else {
2000 m_freem(m);
2001 return retval;
2002 }
2003 }
2004
2005 }
2006
2007 if (ifp != orig_ifp)
2008 break;
2009
2010 if (from_id == tmp->filter_id)
2011 match_found = 1;
2012 }
2013 }
2014 }
2015
2016 /*
2017 * Finally, call the driver.
2018 */
2019
2020 retval = (*ifp->if_output)(ifp, m);
2021 dlil_stats.inject_if_out2++;
2022 if ((retval == 0) || (retval == EJUSTRETURN))
2023 return 0;
2024 else
2025 return retval;
2026 }
2027
2028 static
2029 int dlil_recycle_ioctl(struct ifnet *ifnet_ptr, u_long ioctl_code, void *ioctl_arg)
2030 {
2031
2032 return EOPNOTSUPP;
2033 }
2034
2035 static
2036 int dlil_recycle_output(struct ifnet *ifnet_ptr, struct mbuf *m)
2037 {
2038
2039 m_freem(m);
2040 return 0;
2041 }
2042
2043 static
2044 int dlil_recycle_free(struct ifnet *ifnet_ptr)
2045 {
2046 return 0;
2047 }
2048
2049 static
2050 int dlil_recycle_set_bpf_tap(struct ifnet *ifp, int mode,
2051 int (*bpf_callback)(struct ifnet *, struct mbuf *))
2052 {
2053 /* XXX not sure what to do here */
2054 return 0;
2055 }
2056
2057 int dlil_if_acquire(u_long family, void *uniqueid, size_t uniqueid_len,
2058 struct ifnet **ifp)
2059 {
2060 struct ifnet *ifp1 = NULL;
2061 struct dlil_ifnet *dlifp1 = NULL;
2062 int s, ret = 0;
2063 boolean_t funnel_state;
2064
2065 funnel_state = thread_funnel_set(network_flock, TRUE);
2066 s = splnet();
2067
2068 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2069
2070 ifp1 = (struct ifnet *)dlifp1;
2071
2072 if (ifp1->if_family == family) {
2073
2074 /* same uniqueid and same len or no unique id specified */
2075 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2076 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2077
2078 /* check for matching interface in use */
2079 if (ifp1->if_eflags & IFEF_INUSE) {
2080 if (uniqueid_len) {
2081 ret = EBUSY;
2082 goto end;
2083 }
2084 }
2085 else {
2086
2087 ifp1->if_eflags |= (IFEF_INUSE + IFEF_REUSE);
2088 *ifp = ifp1;
2089 goto end;
2090 }
2091 }
2092 }
2093 }
2094
2095 /* no interface found, allocate a new one */
2096 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2097 if (dlifp1 == 0) {
2098 ret = ENOMEM;
2099 goto end;
2100 }
2101
2102 bzero(dlifp1, sizeof(*dlifp1));
2103
2104 if (uniqueid_len) {
2105 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2106 if (dlifp1->if_uniqueid == 0) {
2107 FREE(dlifp1, M_NKE);
2108 ret = ENOMEM;
2109 goto end;
2110 }
2111 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2112 dlifp1->if_uniqueid_len = uniqueid_len;
2113 }
2114
2115 ifp1 = (struct ifnet *)dlifp1;
2116 ifp1->if_eflags |= IFEF_INUSE;
2117
2118 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2119
2120 *ifp = ifp1;
2121
2122 end:
2123
2124 splx(s);
2125 thread_funnel_set(network_flock, funnel_state);
2126 return ret;
2127 }
2128
2129 void dlil_if_release(struct ifnet *ifp)
2130 {
2131 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2132 int s;
2133 boolean_t funnel_state;
2134
2135 funnel_state = thread_funnel_set(network_flock, TRUE);
2136 s = splnet();
2137
2138 ifp->if_eflags &= ~IFEF_INUSE;
2139 ifp->if_ioctl = dlil_recycle_ioctl;
2140 ifp->if_output = dlil_recycle_output;
2141 ifp->if_free = dlil_recycle_free;
2142 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2143
2144 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2145 ifp->if_name = dlifp->if_namestorage;
2146
2147 splx(s);
2148 thread_funnel_set(network_flock, funnel_state);
2149 }
2150