]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/dlil.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / bsd / net / dlil.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc.
24 *
25 * Data Link Inteface Layer
26 * Author: Ted Walker
27 */
28
29
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mbuf.h>
36 #include <sys/socket.h>
37 #include <net/if_dl.h>
38 #include <net/if.h>
39 #include <net/if_var.h>
40 #include <net/dlil.h>
41 #include <sys/kern_event.h>
42 #include <sys/kdebug.h>
43 #include <string.h>
44
45 #include <kern/task.h>
46 #include <kern/thread.h>
47 #include <kern/sched_prim.h>
48
49 #include <net/netisr.h>
50 #include <net/if_types.h>
51
52 #include <machine/machine_routines.h>
53
54 #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0)
55 #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2)
56 #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8))
57 #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8))
58 #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8))
59
60
61 #define MAX_DL_TAGS 16
62 #define MAX_DLIL_FILTERS 16
63 #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */
64 #define MAX_LINKADDR 4 /* LONGWORDS */
65 #define M_NKE M_IFADDR
66
67 #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter
68 #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter
69
70 struct dl_tag_str {
71 struct ifnet *ifp;
72 struct if_proto *proto;
73 struct dlil_filterq_head *pr_flt_head;
74 };
75
76
77 struct dlil_ifnet {
78 /* ifnet and drvr_ext are used by the stack and drivers
79 drvr_ext extends the public ifnet and must follow dl_if */
80 struct ifnet dl_if; /* public ifnet */
81 void *drvr_ext[4]; /* driver reserved (e.g arpcom extension for enet) */
82
83 /* dlil private fields */
84 TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */
85 /* it is not the ifnet list */
86 void *if_uniqueid; /* unique id identifying the interface */
87 size_t if_uniqueid_len;/* length of the unique id */
88 char if_namestorage[IFNAMSIZ]; /* interface name storage for detached interfaces */
89 };
90
91 struct dlil_stats_str {
92 int inject_pr_in1;
93 int inject_pr_in2;
94 int inject_pr_out1;
95 int inject_pr_out2;
96 int inject_if_in1;
97 int inject_if_in2;
98 int inject_if_out1;
99 int inject_if_out2;
100 };
101
102
103 struct dlil_filter_id_str {
104 int type;
105 struct dlil_filterq_head *head;
106 struct dlil_filterq_entry *filter_ptr;
107 struct ifnet *ifp;
108 struct if_proto *proto;
109 };
110
111
112
113 struct if_family_str {
114 TAILQ_ENTRY(if_family_str) if_fam_next;
115 u_long if_family;
116 int refcnt;
117 int flags;
118
119 #define DLIL_SHUTDOWN 1
120
121 int (*add_if)(struct ifnet *ifp);
122 int (*del_if)(struct ifnet *ifp);
123 int (*init_if)(struct ifnet *ifp);
124 int (*add_proto)(struct ddesc_head_str *demux_desc_head,
125 struct if_proto *proto, u_long dl_tag);
126 int (*del_proto)(struct if_proto *proto, u_long dl_tag);
127 int (*ifmod_ioctl)(struct ifnet *ifp, u_long command, caddr_t data);
128 int (*shutdown)();
129 };
130
131
132
133 struct dlil_stats_str dlil_stats;
134
135 static
136 struct dlil_filter_id_str *dlil_filters;
137
138 static
139 struct dl_tag_str *dl_tag_array;
140
141 static
142 TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head;
143
144 static
145 TAILQ_HEAD(, if_family_str) if_family_head;
146
147 static ifnet_inited = 0;
148 static u_long dl_tag_nb = 0;
149 static u_long dlil_filters_nb = 0;
150
151 int dlil_initialized = 0;
152 decl_simple_lock_data(, dlil_input_lock)
153 int dlil_input_thread_wakeup = 0;
154 int dlil_expand_mcl;
155 static struct mbuf *dlil_input_mbuf_head = NULL;
156 static struct mbuf *dlil_input_mbuf_tail = NULL;
157 #if NLOOP > 1
158 #error dlil_input() needs to be revised to support more than on loopback interface
159 #endif
160 static struct mbuf *dlil_input_loop_head = NULL;
161 static struct mbuf *dlil_input_loop_tail = NULL;
162
163 static void dlil_input_thread(void);
164 extern void run_netisr(void);
165 extern void bpfdetach(struct ifnet*);
166
167
168 /*
169 * Internal functions.
170 */
171
172 static
173 struct if_family_str *find_family_module(u_long if_family)
174 {
175 struct if_family_str *mod = NULL;
176
177 TAILQ_FOREACH(mod, &if_family_head, if_fam_next) {
178 if (mod->if_family == (if_family & 0xffff))
179 break;
180 }
181
182 return mod;
183 }
184
185
186 /*
187 * Public functions.
188 */
189
190 struct ifnet *ifbyfamily(u_long family, short unit)
191 {
192 struct ifnet *ifp;
193
194 TAILQ_FOREACH(ifp, &ifnet, if_link)
195 if ((family == ifp->if_family) &&
196 (ifp->if_unit == unit))
197 return ifp;
198
199 return 0;
200 }
201
202 struct if_proto *dlttoproto(u_long dl_tag)
203 {
204 if (dl_tag < dl_tag_nb && dl_tag_array[dl_tag].ifp)
205 return dl_tag_array[dl_tag].proto;
206 return 0;
207 }
208
209
210 static int dlil_ifp_proto_count(struct ifnet * ifp)
211 {
212 int count = 0;
213 struct if_proto * proto;
214 struct dlil_proto_head * tmp;
215
216 tmp = (struct dlil_proto_head *) &ifp->proto_head;
217
218 TAILQ_FOREACH(proto, tmp, next)
219 count++;
220
221 return count;
222 }
223
224 u_long ifptodlt(struct ifnet *ifp, u_long proto_family)
225 {
226 struct if_proto *proto;
227 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
228
229
230 TAILQ_FOREACH(proto, tmp, next)
231 if (proto->protocol_family == proto_family)
232 return proto->dl_tag;
233
234 return 0;
235 }
236
237
238 int dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag)
239 {
240 struct ifnet *ifp;
241
242 ifp = ifbyfamily(if_family, unit);
243 if (!ifp)
244 return ENOENT;
245
246 *dl_tag = ifptodlt(ifp, proto_family);
247 if (*dl_tag == 0)
248 return EPROTONOSUPPORT;
249 else
250 return 0;
251 }
252
253
254 void dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code,
255 struct net_event_data *event_data, u_long event_data_len)
256 {
257 struct net_event_data ev_data;
258 struct kev_msg ev_msg;
259
260 /*
261 * a net event always start with a net_event_data structure
262 * but the caller can generate a simple net event or
263 * provide a longer event structure to post
264 */
265
266 ev_msg.vendor_code = KEV_VENDOR_APPLE;
267 ev_msg.kev_class = KEV_NETWORK_CLASS;
268 ev_msg.kev_subclass = event_subclass;
269 ev_msg.event_code = event_code;
270
271 if (event_data == 0) {
272 event_data = &ev_data;
273 event_data_len = sizeof(struct net_event_data);
274 }
275
276 strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ);
277 event_data->if_family = ifp->if_family;
278 event_data->if_unit = (unsigned long) ifp->if_unit;
279
280 ev_msg.dv[0].data_length = event_data_len;
281 ev_msg.dv[0].data_ptr = event_data;
282 ev_msg.dv[1].data_length = 0;
283
284 kev_post_msg(&ev_msg);
285 }
286
287
288
289 void
290 dlil_init()
291 {
292 int i;
293
294 TAILQ_INIT(&dlil_ifnet_head);
295 TAILQ_INIT(&if_family_head);
296
297 // create the dl tag array
298 MALLOC(dl_tag_array, void *, sizeof(struct dl_tag_str) * MAX_DL_TAGS, M_NKE, M_WAITOK);
299 if (dl_tag_array == 0) {
300 printf("dlil_init tags array allocation failed\n");
301 return; //very bad
302 }
303 bzero(dl_tag_array, sizeof(struct dl_tag_str) * MAX_DL_TAGS);
304 dl_tag_nb = MAX_DL_TAGS;
305
306 // create the dl filters array
307 MALLOC(dlil_filters, void *, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS, M_NKE, M_WAITOK);
308 if (dlil_filters == 0) {
309 printf("dlil_init filters array allocation failed\n");
310 return; //very bad
311 }
312 bzero(dlil_filters, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS);
313 dlil_filters_nb = MAX_DLIL_FILTERS;
314
315 bzero(&dlil_stats, sizeof(dlil_stats));
316
317 simple_lock_init(&dlil_input_lock);
318
319 /*
320 * Start up the dlil input thread once everything is initialized
321 */
322 (void) kernel_thread(kernel_task, dlil_input_thread);
323 }
324
325 u_long get_new_filter_id()
326 {
327 u_long i;
328 u_char *p;
329
330 for (i=1; i < dlil_filters_nb; i++)
331 if (dlil_filters[i].type == 0)
332 break;
333
334 if (i == dlil_filters_nb) {
335 // expand the filters array by MAX_DLIL_FILTERS
336 MALLOC(p, u_char *, sizeof(struct dlil_filter_id_str) * (dlil_filters_nb + MAX_DLIL_FILTERS), M_NKE, M_WAITOK);
337 if (p == 0)
338 return 0;
339
340 bcopy(dlil_filters, p, sizeof(struct dlil_filter_id_str) * dlil_filters_nb);
341 bzero(p + sizeof(struct dlil_filter_id_str) * dlil_filters_nb, sizeof(struct dlil_filter_id_str) * MAX_DL_TAGS);
342 dlil_filters_nb += MAX_DLIL_FILTERS;
343 FREE(dlil_filters, M_NKE);
344 dlil_filters = (struct dlil_filter_id_str *)p;
345 }
346
347 return i;
348 }
349
350
351 int dlil_attach_interface_filter(struct ifnet *ifp,
352 struct dlil_if_flt_str *if_filter,
353 u_long *filter_id,
354 int insertion_point)
355 {
356 int s;
357 int retval = 0;
358 struct dlil_filterq_entry *tmp_ptr;
359 struct dlil_filterq_entry *if_filt;
360 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
361 boolean_t funnel_state;
362
363 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
364 if (tmp_ptr == NULL)
365 return (ENOBUFS);
366
367 bcopy((caddr_t) if_filter, (caddr_t) &tmp_ptr->variants.if_filter,
368 sizeof(struct dlil_if_flt_str));
369
370 funnel_state = thread_funnel_set(network_flock, TRUE);
371 s = splnet();
372
373 *filter_id = get_new_filter_id();
374 if (*filter_id == 0) {
375 FREE(tmp_ptr, M_NKE);
376 retval = ENOMEM;
377 goto end;
378 }
379
380 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
381 dlil_filters[*filter_id].head = (struct dlil_filterq_head *) &ifp->if_flt_head;
382 dlil_filters[*filter_id].type = DLIL_IF_FILTER;
383 dlil_filters[*filter_id].ifp = ifp;
384 tmp_ptr->filter_id = *filter_id;
385 tmp_ptr->type = DLIL_IF_FILTER;
386
387 if (insertion_point != DLIL_LAST_FILTER) {
388 TAILQ_FOREACH(if_filt, fhead, que)
389 if (insertion_point == if_filt->filter_id) {
390 TAILQ_INSERT_BEFORE(if_filt, tmp_ptr, que);
391 break;
392 }
393 }
394 else
395 TAILQ_INSERT_TAIL(fhead, tmp_ptr, que);
396
397 end:
398 splx(s);
399 thread_funnel_set(network_flock, funnel_state);
400 return retval;
401 }
402
403
404 int dlil_attach_protocol_filter(u_long dl_tag,
405 struct dlil_pr_flt_str *pr_filter,
406 u_long *filter_id,
407 int insertion_point)
408 {
409 struct dlil_filterq_entry *tmp_ptr, *pr_filt;
410 int s;
411 int retval = 0;
412 boolean_t funnel_state;
413
414 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0)
415 return (ENOENT);
416
417 MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK);
418 if (tmp_ptr == NULL)
419 return (ENOBUFS);
420
421 bcopy((caddr_t) pr_filter, (caddr_t) &tmp_ptr->variants.pr_filter,
422 sizeof(struct dlil_pr_flt_str));
423
424 funnel_state = thread_funnel_set(network_flock, TRUE);
425 s = splnet();
426
427 *filter_id = get_new_filter_id();
428 if (*filter_id == 0) {
429 FREE(tmp_ptr, M_NKE);
430 retval = ENOMEM;
431 goto end;
432 }
433
434 dlil_filters[*filter_id].filter_ptr = tmp_ptr;
435 dlil_filters[*filter_id].head = dl_tag_array[dl_tag].pr_flt_head;
436 dlil_filters[*filter_id].type = DLIL_PR_FILTER;
437 dlil_filters[*filter_id].proto = dl_tag_array[dl_tag].proto;
438 dlil_filters[*filter_id].ifp = dl_tag_array[dl_tag].ifp;
439 tmp_ptr->filter_id = *filter_id;
440 tmp_ptr->type = DLIL_PR_FILTER;
441
442 if (insertion_point != DLIL_LAST_FILTER) {
443 TAILQ_FOREACH(pr_filt, dl_tag_array[dl_tag].pr_flt_head, que)
444 if (insertion_point == pr_filt->filter_id) {
445 TAILQ_INSERT_BEFORE(pr_filt, tmp_ptr, que);
446 break;
447 }
448 }
449 else
450 TAILQ_INSERT_TAIL(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que);
451
452 end:
453 splx(s);
454 thread_funnel_set(network_flock, funnel_state);
455 return retval;
456 }
457
458
459 int
460 dlil_detach_filter(u_long filter_id)
461 {
462 struct dlil_filter_id_str *flt;
463 int s, retval = 0;
464 boolean_t funnel_state;
465
466 funnel_state = thread_funnel_set(network_flock, TRUE);
467 s = splnet();
468
469 if (filter_id >= dlil_filters_nb || dlil_filters[filter_id].type == 0) {
470 retval = ENOENT;
471 goto end;
472 }
473
474 flt = &dlil_filters[filter_id];
475
476 if (flt->type == DLIL_IF_FILTER) {
477 if (IFILT(flt->filter_ptr).filter_detach)
478 (*IFILT(flt->filter_ptr).filter_detach)(IFILT(flt->filter_ptr).cookie);
479 }
480 else {
481 if (flt->type == DLIL_PR_FILTER) {
482 if (PFILT(flt->filter_ptr).filter_detach)
483 (*PFILT(flt->filter_ptr).filter_detach)(PFILT(flt->filter_ptr).cookie);
484 }
485 }
486
487 TAILQ_REMOVE(flt->head, flt->filter_ptr, que);
488 FREE(flt->filter_ptr, M_NKE);
489 flt->type = 0;
490
491 end:
492 splx(s);
493 thread_funnel_set(network_flock, funnel_state);
494 return retval;
495 }
496
497
498 void
499 dlil_input_thread_continue(void)
500 {
501 while (1) {
502 struct mbuf *m, *m_loop;
503 int expand_mcl;
504
505 usimple_lock(&dlil_input_lock);
506 m = dlil_input_mbuf_head;
507 dlil_input_mbuf_head = NULL;
508 dlil_input_mbuf_tail = NULL;
509 m_loop = dlil_input_loop_head;
510 dlil_input_loop_head = NULL;
511 dlil_input_loop_tail = NULL;
512 usimple_unlock(&dlil_input_lock);
513
514 MBUF_LOCK();
515 expand_mcl = dlil_expand_mcl;
516 dlil_expand_mcl = 0;
517 MBUF_UNLOCK();
518 if (expand_mcl) {
519 caddr_t p;
520 MCLALLOC(p, M_WAIT);
521 if (p) MCLFREE(p);
522 }
523
524 /*
525 * NOTE warning %%% attention !!!!
526 * We should think about putting some thread starvation safeguards if
527 * we deal with long chains of packets.
528 */
529 while (m) {
530 struct mbuf *m0 = m->m_nextpkt;
531 void *header = m->m_pkthdr.header;
532
533 m->m_nextpkt = NULL;
534 m->m_pkthdr.header = NULL;
535 (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header);
536 m = m0;
537 }
538 m = m_loop;
539 while (m) {
540 struct mbuf *m0 = m->m_nextpkt;
541 void *header = m->m_pkthdr.header;
542 struct ifnet *ifp = &loif[0];
543
544 m->m_nextpkt = NULL;
545 m->m_pkthdr.header = NULL;
546 (void) dlil_input_packet(ifp, m, header);
547 m = m0;
548 }
549
550 if (netisr != 0)
551 run_netisr();
552
553 if (dlil_input_mbuf_head == NULL &&
554 dlil_input_loop_head == NULL &&
555 netisr == 0) {
556 assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
557 (void) thread_block(dlil_input_thread_continue);
558 /* NOTREACHED */
559 }
560 }
561 }
562
563 void dlil_input_thread(void)
564 {
565 register thread_t self = current_thread();
566 extern void stack_privilege(thread_t thread);
567
568 /*
569 * Make sure that this thread
570 * always has a kernel stack, and
571 * bind it to the master cpu.
572 */
573 stack_privilege(self);
574 ml_thread_policy(current_thread(), MACHINE_GROUP,
575 (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR));
576
577 /* The dlil thread is always funneled */
578 thread_funnel_set(network_flock, TRUE);
579 dlil_initialized = 1;
580 dlil_input_thread_continue();
581 }
582
583 int
584 dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail)
585 {
586 /* WARNING
587 * Because of loopbacked multicast we cannot stuff the ifp in
588 * the rcvif of the packet header: loopback has its own dlil
589 * input queue
590 */
591
592 usimple_lock(&dlil_input_lock);
593 if (ifp->if_type != IFT_LOOP) {
594 if (dlil_input_mbuf_head == NULL)
595 dlil_input_mbuf_head = m_head;
596 else if (dlil_input_mbuf_tail != NULL)
597 dlil_input_mbuf_tail->m_nextpkt = m_head;
598 dlil_input_mbuf_tail = m_tail ? m_tail : m_head;
599 } else {
600 if (dlil_input_loop_head == NULL)
601 dlil_input_loop_head = m_head;
602 else if (dlil_input_loop_tail != NULL)
603 dlil_input_loop_tail->m_nextpkt = m_head;
604 dlil_input_loop_tail = m_tail ? m_tail : m_head;
605 }
606 usimple_unlock(&dlil_input_lock);
607
608 wakeup((caddr_t)&dlil_input_thread_wakeup);
609
610 return 0;
611 }
612
613 int
614 dlil_input_packet(struct ifnet *ifp, struct mbuf *m,
615 char *frame_header)
616 {
617 struct ifnet *orig_ifp = 0;
618 struct dlil_filterq_entry *tmp;
619 int retval;
620 struct if_proto *ifproto = 0;
621 struct if_proto *proto;
622 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
623
624
625 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0);
626
627 /*
628 * Run interface filters
629 */
630
631 while (orig_ifp != ifp) {
632 orig_ifp = ifp;
633
634 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
635 if (IFILT(tmp).filter_if_input) {
636 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
637 &ifp,
638 &m,
639 &frame_header);
640 if (retval) {
641 if (retval == EJUSTRETURN)
642 return 0;
643 else {
644 m_freem(m);
645 return retval;
646 }
647 }
648 }
649
650 if (ifp != orig_ifp)
651 break;
652 }
653 }
654
655 ifp->if_lastchange = time;
656
657 /*
658 * Call family demux module. If the demux module finds a match
659 * for the frame it will fill-in the ifproto pointer.
660 */
661
662 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
663
664 if (m->m_flags & (M_BCAST|M_MCAST))
665 ifp->if_imcasts++;
666
667 if ((retval) && (retval != EJUSTRETURN) && (ifp->offercnt)) {
668 /*
669 * No match was found, look for any offers.
670 */
671 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
672 TAILQ_FOREACH(proto, tmp, next) {
673 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
674 ifproto = proto;
675 retval = 0;
676 break;
677 }
678 }
679 }
680
681 if (retval) {
682 if (retval != EJUSTRETURN) {
683 m_freem(m);
684 return retval;
685 }
686 else
687 return 0;
688 }
689 else
690 if (ifproto == 0) {
691 printf("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n");
692 m_freem(m);
693 return 0;
694 }
695
696 /*
697 * Call any attached protocol filters.
698 */
699
700 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
701 if (PFILT(tmp).filter_dl_input) {
702 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
703 &m,
704 &frame_header,
705 &ifp);
706
707 if (retval) {
708 if (retval == EJUSTRETURN)
709 return 0;
710 else {
711 m_freem(m);
712 return retval;
713 }
714 }
715 }
716 }
717
718
719
720 retval = (*ifproto->dl_input)(m, frame_header,
721 ifp, ifproto->dl_tag,
722 TRUE);
723
724 if (retval == EJUSTRETURN)
725 retval = 0;
726 else
727 if (retval)
728 m_freem(m);
729
730 KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0);
731 return retval;
732 }
733
734
735
736 void ether_input(ifp, eh, m)
737 struct ifnet *ifp;
738 struct ether_header *eh;
739 struct mbuf *m;
740
741 {
742 kprintf("Someone is calling ether_input!!\n");
743
744 dlil_input(ifp, m, NULL);
745 }
746
747
748 int
749 dlil_event(struct ifnet *ifp, struct kern_event_msg *event)
750 {
751 struct dlil_filterq_entry *filt;
752 int retval = 0;
753 struct ifnet *orig_ifp = 0;
754 struct if_proto *proto;
755 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
756 struct kev_msg kev_msg;
757 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
758 boolean_t funnel_state;
759
760
761 funnel_state = thread_funnel_set(network_flock, TRUE);
762
763 while (orig_ifp != ifp) {
764 orig_ifp = ifp;
765
766 TAILQ_FOREACH_REVERSE(filt, fhead, que, dlil_filterq_head) {
767 if (IFILT(filt).filter_if_event) {
768 retval = (*IFILT(filt).filter_if_event)(IFILT(filt).cookie,
769 &ifp,
770 &event);
771
772 if (retval) {
773 (void) thread_funnel_set(network_flock, funnel_state);
774 if (retval == EJUSTRETURN)
775 return 0;
776 else
777 return retval;
778 }
779 }
780
781 if (ifp != orig_ifp)
782 break;
783 }
784 }
785
786
787 /*
788 * Call Interface Module event hook, if any.
789 */
790
791 if (ifp->if_event) {
792 retval = ifp->if_event(ifp, (caddr_t) event);
793
794 if (retval) {
795 (void) thread_funnel_set(network_flock, funnel_state);
796
797 if (retval == EJUSTRETURN)
798 return 0;
799 else
800 return retval;
801 }
802 }
803
804 /*
805 * Call dl_event entry point for all protocols attached to this interface
806 */
807
808 TAILQ_FOREACH(proto, tmp, next) {
809 /*
810 * Call any attached protocol filters.
811 */
812
813 TAILQ_FOREACH_REVERSE(filt, &proto->pr_flt_head, que, dlil_filterq_head) {
814 if (PFILT(filt).filter_dl_event) {
815 retval = (*PFILT(filt).filter_dl_event)(PFILT(filt).cookie,
816 event);
817
818 if (retval) {
819 (void) thread_funnel_set(network_flock, funnel_state);
820 if (retval == EJUSTRETURN)
821 return 0;
822 else
823 return retval;
824 }
825 }
826 }
827
828
829 /*
830 * Finally, call the dl_event entry point (if any)
831 */
832
833 if (proto->dl_event)
834 retval = (*proto->dl_event)(event, proto->dl_tag);
835
836 if (retval == EJUSTRETURN) {
837 (void) thread_funnel_set(network_flock, funnel_state);
838 return 0;
839 }
840 }
841
842
843 /*
844 * Now, post this event to the Kernel Event message queue
845 */
846
847 kev_msg.vendor_code = event->vendor_code;
848 kev_msg.kev_class = event->kev_class;
849 kev_msg.kev_subclass = event->kev_subclass;
850 kev_msg.event_code = event->event_code;
851 kev_msg.dv[0].data_ptr = &event->event_data[0];
852 kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE;
853 kev_msg.dv[1].data_length = 0;
854
855 kev_post_msg(&kev_msg);
856
857 (void) thread_funnel_set(network_flock, funnel_state);
858 return 0;
859 }
860
861
862
863 int
864 dlil_output(u_long dl_tag,
865 struct mbuf *m,
866 caddr_t route,
867 struct sockaddr *dest,
868 int raw
869 )
870 {
871 char *frame_type;
872 char *dst_linkaddr;
873 struct ifnet *orig_ifp = 0;
874 struct ifnet *ifp;
875 struct if_proto *proto;
876 struct dlil_filterq_entry *tmp;
877 int retval = 0;
878 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
879 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
880 struct dlil_filterq_head *fhead;
881
882 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0);
883
884 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) {
885 m_freem(m);
886 return ENOENT;
887 }
888
889 ifp = dl_tag_array[dl_tag].ifp;
890 proto = dl_tag_array[dl_tag].proto;
891
892 frame_type = frame_type_buffer;
893 dst_linkaddr = dst_linkaddr_buffer;
894
895 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
896
897 if ((raw == 0) && (proto->dl_pre_output)) {
898 retval = (*proto->dl_pre_output)(ifp, &m, dest, route,
899 frame_type, dst_linkaddr, dl_tag);
900 if (retval) {
901 if (retval == EJUSTRETURN)
902 return 0;
903 else {
904 m_freem(m);
905 return retval;
906 }
907 }
908 }
909
910 /*
911 * Run any attached protocol filters.
912 */
913
914 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
915 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
916 if (PFILT(tmp).filter_dl_output) {
917 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
918 &m, &ifp, &dest, dst_linkaddr, frame_type);
919 if (retval) {
920 if (retval == EJUSTRETURN)
921 return 0;
922 else {
923 m_freem(m);
924 return retval;
925 }
926 }
927 }
928 }
929 }
930
931
932 /*
933 * Call framing module
934 */
935 if ((raw == 0) && (ifp->if_framer)) {
936 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
937 if (retval) {
938 if (retval == EJUSTRETURN)
939 return 0;
940 else
941 {
942 m_freem(m);
943 return retval;
944 }
945 }
946 }
947
948 #if BRIDGE
949 if (do_bridge) {
950 struct mbuf *m0 = m ;
951 struct ether_header *eh = mtod(m, struct ether_header *);
952
953 if (m->m_pkthdr.rcvif)
954 m->m_pkthdr.rcvif = NULL ;
955 ifp = bridge_dst_lookup(eh);
956 bdg_forward(&m0, ifp);
957 if (m0)
958 m_freem(m0);
959
960 return 0;
961 }
962 #endif
963
964
965 /*
966 * Let interface filters (if any) do their thing ...
967 */
968
969 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
970 if (TAILQ_EMPTY(fhead) == 0) {
971 while (orig_ifp != ifp) {
972 orig_ifp = ifp;
973 TAILQ_FOREACH(tmp, fhead, que) {
974 if (IFILT(tmp).filter_if_output) {
975 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
976 &ifp,
977 &m);
978 if (retval) {
979 if (retval == EJUSTRETURN)
980 return 0;
981 else {
982 m_freem(m);
983 return retval;
984 }
985 }
986
987 }
988
989 if (ifp != orig_ifp)
990 break;
991 }
992 }
993 }
994
995 /*
996 * Finally, call the driver.
997 */
998
999 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0);
1000 retval = (*ifp->if_output)(ifp, m);
1001 KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0);
1002
1003 KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0);
1004
1005 if ((retval == 0) || (retval == EJUSTRETURN))
1006 return 0;
1007 else
1008 return retval;
1009 }
1010
1011
1012 int
1013 dlil_ioctl(u_long proto_fam,
1014 struct ifnet *ifp,
1015 u_long ioctl_code,
1016 caddr_t ioctl_arg)
1017 {
1018 struct dlil_filterq_entry *tmp;
1019 struct dlil_filterq_head *fhead;
1020 int retval = EOPNOTSUPP;
1021 int retval2 = EOPNOTSUPP;
1022 u_long dl_tag;
1023 struct if_family_str *if_family;
1024
1025
1026 if (proto_fam) {
1027 retval = dlil_find_dltag(ifp->if_family, ifp->if_unit,
1028 proto_fam, &dl_tag);
1029
1030 if (retval == 0) {
1031 if (dl_tag_array[dl_tag].ifp != ifp)
1032 return ENOENT;
1033
1034 /*
1035 * Run any attached protocol filters.
1036 */
1037 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
1038 if (PFILT(tmp).filter_dl_ioctl) {
1039 retval =
1040 (*PFILT(tmp).filter_dl_ioctl)(PFILT(tmp).cookie,
1041 dl_tag_array[dl_tag].ifp,
1042 ioctl_code,
1043 ioctl_arg);
1044
1045 if (retval) {
1046 if (retval == EJUSTRETURN)
1047 return 0;
1048 else
1049 return retval;
1050 }
1051 }
1052 }
1053
1054 if (dl_tag_array[dl_tag].proto->dl_ioctl)
1055 retval =
1056 (*dl_tag_array[dl_tag].proto->dl_ioctl)(dl_tag,
1057 dl_tag_array[dl_tag].ifp,
1058 ioctl_code,
1059 ioctl_arg);
1060 else
1061 retval = EOPNOTSUPP;
1062 }
1063 else
1064 retval = 0;
1065 }
1066
1067 if ((retval) && (retval != EOPNOTSUPP)) {
1068 if (retval == EJUSTRETURN)
1069 return 0;
1070 else
1071 return retval;
1072 }
1073
1074
1075 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1076 TAILQ_FOREACH(tmp, fhead, que) {
1077 if (IFILT(tmp).filter_if_ioctl) {
1078 retval2 = (*IFILT(tmp).filter_if_ioctl)(IFILT(tmp).cookie, ifp,
1079 ioctl_code, ioctl_arg);
1080 if (retval2) {
1081 if (retval2 == EJUSTRETURN)
1082 return 0;
1083 else
1084 return retval2;
1085 }
1086 }
1087 }
1088
1089
1090 if_family = find_family_module(ifp->if_family);
1091 if ((if_family) && (if_family->ifmod_ioctl)) {
1092 retval2 = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg);
1093
1094 if ((retval2) && (retval2 != EOPNOTSUPP)) {
1095 if (retval2 == EJUSTRETURN)
1096 return 0;
1097 else
1098 return retval;
1099 }
1100
1101 if (retval == EOPNOTSUPP)
1102 retval = retval2;
1103 }
1104
1105 if (ifp->if_ioctl)
1106 retval2 = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg);
1107
1108 if (retval == EOPNOTSUPP)
1109 return retval2;
1110 else {
1111 if (retval2 == EOPNOTSUPP)
1112 return 0;
1113 else
1114 return retval2;
1115 }
1116 }
1117
1118
1119 int
1120 dlil_attach_protocol(struct dlil_proto_reg_str *proto,
1121 u_long *dl_tag)
1122 {
1123 struct ifnet *ifp;
1124 struct if_proto *ifproto;
1125 u_long i;
1126 struct if_family_str *if_family;
1127 struct dlil_proto_head *tmp;
1128 struct kev_dl_proto_data ev_pr_data;
1129 int s, retval = 0;
1130 boolean_t funnel_state;
1131 u_char *p;
1132
1133 if ((proto->protocol_family == 0) || (proto->interface_family == 0))
1134 return EINVAL;
1135
1136 funnel_state = thread_funnel_set(network_flock, TRUE);
1137 s = splnet();
1138 if_family = find_family_module(proto->interface_family);
1139 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1140 kprintf("dlil_attach_protocol -- no interface family module %d",
1141 proto->interface_family);
1142 retval = ENOENT;
1143 goto end;
1144 }
1145
1146 ifp = ifbyfamily(proto->interface_family, proto->unit_number);
1147 if (!ifp) {
1148 kprintf("dlil_attach_protocol -- no such interface %d unit %d\n",
1149 proto->interface_family, proto->unit_number);
1150 retval = ENOENT;
1151 goto end;
1152 }
1153
1154 if (dlil_find_dltag(proto->interface_family, proto->unit_number,
1155 proto->protocol_family, &i) == 0) {
1156 retval = EEXIST;
1157 goto end;
1158 }
1159
1160 for (i=1; i < dl_tag_nb; i++)
1161 if (dl_tag_array[i].ifp == 0)
1162 break;
1163
1164 if (i == dl_tag_nb) {
1165 // expand the tag array by MAX_DL_TAGS
1166 MALLOC(p, u_char *, sizeof(struct dl_tag_str) * (dl_tag_nb + MAX_DL_TAGS), M_NKE, M_WAITOK);
1167 if (p == 0) {
1168 retval = ENOBUFS;
1169 goto end;
1170 }
1171 bcopy(dl_tag_array, p, sizeof(struct dl_tag_str) * dl_tag_nb);
1172 bzero(p + sizeof(struct dl_tag_str) * dl_tag_nb, sizeof(struct dl_tag_str) * MAX_DL_TAGS);
1173 dl_tag_nb += MAX_DL_TAGS;
1174 FREE(dl_tag_array, M_NKE);
1175 dl_tag_array = (struct dl_tag_str *)p;
1176 }
1177
1178 /*
1179 * Allocate and init a new if_proto structure
1180 */
1181
1182 ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK);
1183 if (!ifproto) {
1184 printf("ERROR - DLIL failed if_proto allocation\n");
1185 retval = ENOMEM;
1186 goto end;
1187 }
1188
1189 bzero(ifproto, sizeof(struct if_proto));
1190
1191 dl_tag_array[i].ifp = ifp;
1192 dl_tag_array[i].proto = ifproto;
1193 dl_tag_array[i].pr_flt_head = &ifproto->pr_flt_head;
1194 ifproto->dl_tag = i;
1195 *dl_tag = i;
1196
1197 if (proto->default_proto) {
1198 if (ifp->if_data.default_proto == 0)
1199 ifp->if_data.default_proto = i;
1200 else
1201 printf("ERROR - dlil_attach_protocol -- Attempt to attach more than one default protocol\n");
1202 }
1203
1204 ifproto->protocol_family = proto->protocol_family;
1205 ifproto->dl_input = proto->input;
1206 ifproto->dl_pre_output = proto->pre_output;
1207 ifproto->dl_event = proto->event;
1208 ifproto->dl_offer = proto->offer;
1209 ifproto->dl_ioctl = proto->ioctl;
1210 ifproto->ifp = ifp;
1211 TAILQ_INIT(&ifproto->pr_flt_head);
1212
1213 /*
1214 * Call family module add_proto routine so it can refine the
1215 * demux descriptors as it wishes.
1216 */
1217 retval = (*if_family->add_proto)(&proto->demux_desc_head, ifproto, *dl_tag);
1218 if (retval) {
1219 dl_tag_array[i].ifp = 0;
1220 FREE(ifproto, M_IFADDR);
1221 goto end;
1222 }
1223
1224 /*
1225 * Add to if_proto list for this interface
1226 */
1227
1228 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1229 TAILQ_INSERT_TAIL(tmp, ifproto, next);
1230 ifp->refcnt++;
1231 if (ifproto->dl_offer)
1232 ifp->offercnt++;
1233
1234 /* the reserved field carries the number of protocol still attached (subject to change) */
1235 ev_pr_data.proto_family = proto->protocol_family;
1236 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1237 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED,
1238 (struct net_event_data *)&ev_pr_data,
1239 sizeof(struct kev_dl_proto_data));
1240
1241 end:
1242 splx(s);
1243 thread_funnel_set(network_flock, funnel_state);
1244 return retval;
1245 }
1246
1247
1248
1249 int
1250 dlil_detach_protocol(u_long dl_tag)
1251 {
1252 struct ifnet *ifp;
1253 struct ifnet *orig_ifp=0;
1254 struct if_proto *proto;
1255 struct dlil_proto_head *tmp;
1256 struct if_family_str *if_family;
1257 struct dlil_filterq_entry *filter;
1258 int s, retval = 0;
1259 struct dlil_filterq_head *fhead;
1260 struct kev_dl_proto_data ev_pr_data;
1261 boolean_t funnel_state;
1262
1263 funnel_state = thread_funnel_set(network_flock, TRUE);
1264 s = splnet();
1265
1266 if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) {
1267 retval = ENOENT;
1268 goto end;
1269 }
1270
1271 ifp = dl_tag_array[dl_tag].ifp;
1272 proto = dl_tag_array[dl_tag].proto;
1273
1274 if_family = find_family_module(ifp->if_family);
1275 if (if_family == NULL) {
1276 retval = ENOENT;
1277 goto end;
1278 }
1279
1280 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1281
1282 /*
1283 * Call family module del_proto
1284 */
1285
1286 (*if_family->del_proto)(proto, dl_tag);
1287
1288
1289 /*
1290 * Remove and deallocate any attached protocol filters
1291 */
1292
1293 while (filter = TAILQ_FIRST(&proto->pr_flt_head))
1294 dlil_detach_filter(filter->filter_id);
1295
1296 if (proto->dl_offer)
1297 ifp->offercnt--;
1298
1299 if (ifp->if_data.default_proto == dl_tag)
1300 ifp->if_data.default_proto = 0;
1301 dl_tag_array[dl_tag].ifp = 0;
1302
1303 /* the reserved field carries the number of protocol still attached (subject to change) */
1304 ev_pr_data.proto_family = proto->protocol_family;
1305
1306 /*
1307 * Cleanup routes that may still be in the routing table for that interface/protocol pair.
1308 */
1309
1310 if_rtproto_del(ifp, proto->protocol_family);
1311
1312 TAILQ_REMOVE(tmp, proto, next);
1313 FREE(proto, M_IFADDR);
1314
1315 ifp->refcnt--;
1316 ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp);
1317 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED,
1318 (struct net_event_data *)&ev_pr_data,
1319 sizeof(struct kev_dl_proto_data));
1320
1321 if (ifp->refcnt == 0) {
1322
1323 TAILQ_REMOVE(&ifnet, ifp, if_link);
1324
1325 (*if_family->del_if)(ifp);
1326
1327 if (--if_family->refcnt == 0) {
1328 if (if_family->shutdown)
1329 (*if_family->shutdown)();
1330
1331 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1332 FREE(if_family, M_IFADDR);
1333 }
1334
1335 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1336 while (orig_ifp != ifp) {
1337 orig_ifp = ifp;
1338
1339 TAILQ_FOREACH(filter, fhead, que) {
1340 if (IFILT(filter).filter_if_free) {
1341 retval = (*IFILT(filter).filter_if_free)(IFILT(filter).cookie, ifp);
1342 if (retval) {
1343 splx(s);
1344 thread_funnel_set(network_flock, funnel_state);
1345 return 0;
1346 }
1347 }
1348 if (ifp != orig_ifp)
1349 break;
1350 }
1351 }
1352
1353 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1354
1355 (*ifp->if_free)(ifp);
1356 }
1357
1358 end:
1359 splx(s);
1360 thread_funnel_set(network_flock, funnel_state);
1361 return retval;
1362 }
1363
1364
1365
1366
1367
1368 int
1369 dlil_if_attach(struct ifnet *ifp)
1370 {
1371 u_long interface_family = ifp->if_family;
1372 struct if_family_str *if_family;
1373 struct dlil_proto_head *tmp;
1374 int stat;
1375 int s;
1376 boolean_t funnel_state;
1377
1378 funnel_state = thread_funnel_set(network_flock, TRUE);
1379 s = splnet();
1380 if (ifnet_inited == 0) {
1381 TAILQ_INIT(&ifnet);
1382 ifnet_inited = 1;
1383 }
1384
1385 if_family = find_family_module(interface_family);
1386
1387 if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) {
1388 splx(s);
1389 kprintf("Attempt to attach interface without family module - %d\n",
1390 interface_family);
1391 thread_funnel_set(network_flock, funnel_state);
1392 return ENODEV;
1393 }
1394
1395 if (ifp->refcnt == 0) {
1396 /*
1397 * Call the family module to fill in the appropriate fields in the
1398 * ifnet structure.
1399 */
1400
1401 stat = (*if_family->add_if)(ifp);
1402 if (stat) {
1403 splx(s);
1404 kprintf("dlil_if_attach -- add_if failed with %d\n", stat);
1405 thread_funnel_set(network_flock, funnel_state);
1406 return stat;
1407 }
1408 if_family->refcnt++;
1409
1410 /*
1411 * Add the ifp to the interface list.
1412 */
1413
1414 tmp = (struct dlil_proto_head *) &ifp->proto_head;
1415 TAILQ_INIT(tmp);
1416
1417 ifp->if_data.default_proto = 0;
1418 ifp->offercnt = 0;
1419 TAILQ_INIT(&ifp->if_flt_head);
1420 old_if_attach(ifp);
1421
1422 if (if_family->init_if) {
1423 stat = (*if_family->init_if)(ifp);
1424 if (stat) {
1425 kprintf("dlil_if_attach -- init_if failed with %d\n", stat);
1426 }
1427 }
1428 }
1429
1430 ifp->refcnt++;
1431
1432 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0);
1433
1434 splx(s);
1435 thread_funnel_set(network_flock, funnel_state);
1436 return 0;
1437 }
1438
1439
1440 int
1441 dlil_if_detach(struct ifnet *ifp)
1442 {
1443 struct if_proto *proto;
1444 struct dlil_filterq_entry *if_filter;
1445 struct if_family_str *if_family;
1446 struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1447 int s;
1448 struct kev_msg ev_msg;
1449 boolean_t funnel_state;
1450
1451 funnel_state = thread_funnel_set(network_flock, TRUE);
1452 s = splnet();
1453
1454 if_family = find_family_module(ifp->if_family);
1455
1456 if (!if_family) {
1457 kprintf("Attempt to detach interface without family module - %s\n",
1458 ifp->if_name);
1459 splx(s);
1460 thread_funnel_set(network_flock, funnel_state);
1461 return ENODEV;
1462 }
1463
1464 while (if_filter = TAILQ_FIRST(fhead))
1465 dlil_detach_filter(if_filter->filter_id);
1466
1467 ifp->refcnt--;
1468
1469 if (ifp->refcnt == 0) {
1470 /* Let BPF know the interface is detaching. */
1471 bpfdetach(ifp);
1472 TAILQ_REMOVE(&ifnet, ifp, if_link);
1473
1474 (*if_family->del_if)(ifp);
1475
1476 if (--if_family->refcnt == 0) {
1477 if (if_family->shutdown)
1478 (*if_family->shutdown)();
1479
1480 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1481 FREE(if_family, M_IFADDR);
1482 }
1483
1484 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0);
1485 splx(s);
1486 thread_funnel_set(network_flock, funnel_state);
1487 return 0;
1488 }
1489 else
1490 {
1491 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0);
1492 splx(s);
1493 thread_funnel_set(network_flock, funnel_state);
1494 return DLIL_WAIT_FOR_FREE;
1495 }
1496 }
1497
1498
1499 int
1500 dlil_reg_if_modules(u_long interface_family,
1501 struct dlil_ifmod_reg_str *ifmod)
1502 {
1503 struct if_family_str *if_family;
1504 int s;
1505 boolean_t funnel_state;
1506
1507
1508 funnel_state = thread_funnel_set(network_flock, TRUE);
1509 s = splnet();
1510 if (find_family_module(interface_family)) {
1511 kprintf("Attempt to register dlil family module more than once - %d\n",
1512 interface_family);
1513 splx(s);
1514 thread_funnel_set(network_flock, funnel_state);
1515 return EEXIST;
1516 }
1517
1518 if ((!ifmod->add_if) || (!ifmod->del_if) ||
1519 (!ifmod->add_proto) || (!ifmod->del_proto)) {
1520 kprintf("dlil_reg_if_modules passed at least one null pointer\n");
1521 splx(s);
1522 thread_funnel_set(network_flock, funnel_state);
1523 return EINVAL;
1524 }
1525
1526 /*
1527 * The following is a gross hack to keep from breaking
1528 * Vicomsoft's internet gateway on Jaguar. Vicomsoft
1529 * does not zero the reserved fields in dlil_ifmod_reg_str.
1530 * As a result, we have to zero any function that used to
1531 * be reserved fields at the time Vicomsoft built their
1532 * kext. Radar #2974305
1533 */
1534 if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) {
1535 if (interface_family == 123) { /* Vicom */
1536 ifmod->init_if = 0;
1537 } else {
1538 splx(s);
1539 thread_funnel_set(network_flock, funnel_state);
1540 return EINVAL;
1541 }
1542 }
1543
1544 if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK);
1545 if (!if_family) {
1546 kprintf("dlil_reg_if_modules failed allocation\n");
1547 splx(s);
1548 thread_funnel_set(network_flock, funnel_state);
1549 return ENOMEM;
1550 }
1551
1552 bzero(if_family, sizeof(struct if_family_str));
1553
1554 if_family->if_family = interface_family & 0xffff;
1555 if_family->shutdown = ifmod->shutdown;
1556 if_family->add_if = ifmod->add_if;
1557 if_family->del_if = ifmod->del_if;
1558 if_family->init_if = ifmod->init_if;
1559 if_family->add_proto = ifmod->add_proto;
1560 if_family->del_proto = ifmod->del_proto;
1561 if_family->ifmod_ioctl = ifmod->ifmod_ioctl;
1562 if_family->refcnt = 1;
1563 if_family->flags = 0;
1564
1565 TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next);
1566 splx(s);
1567 thread_funnel_set(network_flock, funnel_state);
1568 return 0;
1569 }
1570
1571 int dlil_dereg_if_modules(u_long interface_family)
1572 {
1573 struct if_family_str *if_family;
1574 int s, ret = 0;
1575 boolean_t funnel_state;
1576
1577 funnel_state = thread_funnel_set(network_flock, TRUE);
1578 s = splnet();
1579 if_family = find_family_module(interface_family);
1580 if (if_family == 0) {
1581 splx(s);
1582 thread_funnel_set(network_flock, funnel_state);
1583 return ENOENT;
1584 }
1585
1586 if (--if_family->refcnt == 0) {
1587 if (if_family->shutdown)
1588 (*if_family->shutdown)();
1589
1590 TAILQ_REMOVE(&if_family_head, if_family, if_fam_next);
1591 FREE(if_family, M_IFADDR);
1592 }
1593 else {
1594 if_family->flags |= DLIL_SHUTDOWN;
1595 ret = DLIL_WAIT_FOR_FREE;
1596 }
1597
1598 splx(s);
1599 thread_funnel_set(network_flock, funnel_state);
1600 return ret;
1601 }
1602
1603
1604
1605
1606
1607 /*
1608 * Old if_attach no-op'ed function defined here for temporary backwards compatibility
1609 */
1610
1611 void if_attach(ifp)
1612 struct ifnet *ifp;
1613 {
1614 dlil_if_attach(ifp);
1615 }
1616
1617
1618
1619 int
1620 dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id)
1621 {
1622 struct ifnet *orig_ifp = 0;
1623 struct ifnet *ifp;
1624 struct if_proto *ifproto;
1625 struct if_proto *proto;
1626 struct dlil_filterq_entry *tmp;
1627 int retval = 0;
1628 struct dlil_filterq_head *fhead;
1629 int match_found;
1630
1631 dlil_stats.inject_if_in1++;
1632
1633 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER)
1634 return ENOENT;
1635
1636 ifp = dlil_filters[from_id].ifp;
1637
1638 /*
1639 * Let interface filters (if any) do their thing ...
1640 */
1641
1642 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1643 match_found = 0;
1644
1645 if (TAILQ_EMPTY(fhead) == 0) {
1646 while (orig_ifp != ifp) {
1647 orig_ifp = ifp;
1648 TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) {
1649 if ((match_found) && (IFILT(tmp).filter_if_input)) {
1650 retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie,
1651 &ifp,
1652 &m,
1653 &frame_header);
1654 if (retval) {
1655 if (retval == EJUSTRETURN)
1656 return 0;
1657 else {
1658 m_freem(m);
1659 return retval;
1660 }
1661 }
1662
1663 }
1664
1665 if (ifp != orig_ifp)
1666 break;
1667
1668 if (from_id == tmp->filter_id)
1669 match_found = 1;
1670 }
1671 }
1672 }
1673
1674 ifp->if_lastchange = time;
1675
1676 /*
1677 * Call family demux module. If the demux module finds a match
1678 * for the frame it will fill-in the ifproto pointer.
1679 */
1680
1681 retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto );
1682
1683 if (m->m_flags & (M_BCAST|M_MCAST))
1684 ifp->if_imcasts++;
1685
1686 if ((retval) && (ifp->offercnt)) {
1687 /*
1688 * No match was found, look for any offers.
1689 */
1690 struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head;
1691 TAILQ_FOREACH(proto, tmp, next) {
1692 if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) {
1693 ifproto = proto;
1694 retval = 0;
1695 break;
1696 }
1697 }
1698 }
1699
1700 if (retval) {
1701 if (retval != EJUSTRETURN) {
1702 m_freem(m);
1703 return retval;
1704 }
1705 else
1706 return 0;
1707 }
1708 else
1709 if (ifproto == 0) {
1710 printf("ERROR - dlil_inject_if_input -- if_demux didn't return an if_proto pointer\n");
1711 m_freem(m);
1712 return 0;
1713 }
1714
1715 /*
1716 * Call any attached protocol filters.
1717 */
1718 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1719 if (PFILT(tmp).filter_dl_input) {
1720 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1721 &m,
1722 &frame_header,
1723 &ifp);
1724
1725 if (retval) {
1726 if (retval == EJUSTRETURN)
1727 return 0;
1728 else {
1729 m_freem(m);
1730 return retval;
1731 }
1732 }
1733 }
1734 }
1735
1736
1737
1738 retval = (*ifproto->dl_input)(m, frame_header,
1739 ifp, ifproto->dl_tag,
1740 FALSE);
1741
1742 dlil_stats.inject_if_in2++;
1743 if (retval == EJUSTRETURN)
1744 retval = 0;
1745 else
1746 if (retval)
1747 m_freem(m);
1748
1749 return retval;
1750
1751 }
1752
1753
1754
1755
1756
1757 int
1758 dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id)
1759 {
1760 struct ifnet *orig_ifp = 0;
1761 struct dlil_filterq_entry *tmp;
1762 int retval;
1763 struct if_proto *ifproto = 0;
1764 int match_found;
1765 struct ifnet *ifp;
1766
1767 dlil_stats.inject_pr_in1++;
1768 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER)
1769 return ENOENT;
1770
1771 ifproto = dlil_filters[from_id].proto;
1772 ifp = dlil_filters[from_id].ifp;
1773
1774 /*
1775 * Call any attached protocol filters.
1776 */
1777
1778 match_found = 0;
1779 TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) {
1780 if ((match_found) && (PFILT(tmp).filter_dl_input)) {
1781 retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie,
1782 &m,
1783 &frame_header,
1784 &ifp);
1785
1786 if (retval) {
1787 if (retval == EJUSTRETURN)
1788 return 0;
1789 else {
1790 m_freem(m);
1791 return retval;
1792 }
1793 }
1794 }
1795
1796 if (tmp->filter_id == from_id)
1797 match_found = 1;
1798 }
1799
1800
1801 retval = (*ifproto->dl_input)(m, frame_header,
1802 ifp, ifproto->dl_tag,
1803 FALSE);
1804
1805 if (retval == EJUSTRETURN)
1806 retval = 0;
1807 else
1808 if (retval)
1809 m_freem(m);
1810
1811 dlil_stats.inject_pr_in2++;
1812 return retval;
1813 }
1814
1815
1816
1817 int
1818 dlil_inject_pr_output(struct mbuf *m,
1819 struct sockaddr *dest,
1820 int raw,
1821 char *frame_type,
1822 char *dst_linkaddr,
1823 u_long from_id)
1824 {
1825 struct ifnet *orig_ifp = 0;
1826 struct ifnet *ifp;
1827 struct dlil_filterq_entry *tmp;
1828 int retval = 0;
1829 char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4];
1830 char dst_linkaddr_buffer[MAX_LINKADDR * 4];
1831 struct dlil_filterq_head *fhead;
1832 int match_found;
1833 u_long dl_tag;
1834
1835 dlil_stats.inject_pr_out1++;
1836 if (raw == 0) {
1837 if (frame_type)
1838 bcopy(frame_type, &frame_type_buffer[0], MAX_FRAME_TYPE_SIZE * 4);
1839 else
1840 return EINVAL;
1841
1842 if (dst_linkaddr)
1843 bcopy(dst_linkaddr, &dst_linkaddr_buffer, MAX_LINKADDR * 4);
1844 else
1845 return EINVAL;
1846 }
1847
1848 if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER)
1849 return ENOENT;
1850
1851 ifp = dlil_filters[from_id].ifp;
1852 dl_tag = dlil_filters[from_id].proto->dl_tag;
1853
1854 frame_type = frame_type_buffer;
1855 dst_linkaddr = dst_linkaddr_buffer;
1856
1857 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1858
1859 /*
1860 * Run any attached protocol filters.
1861 */
1862 match_found = 0;
1863
1864 if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) {
1865 TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) {
1866 if ((match_found) && (PFILT(tmp).filter_dl_output)) {
1867 retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie,
1868 &m, &ifp, &dest, dst_linkaddr, frame_type);
1869 if (retval) {
1870 if (retval == EJUSTRETURN)
1871 return 0;
1872 else {
1873 m_freem(m);
1874 return retval;
1875 }
1876 }
1877 }
1878
1879 if (tmp->filter_id == from_id)
1880 match_found = 1;
1881 }
1882 }
1883
1884
1885 /*
1886 * Call framing module
1887 */
1888 if ((raw == 0) && (ifp->if_framer)) {
1889 retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type);
1890 if (retval) {
1891 if (retval == EJUSTRETURN)
1892 return 0;
1893 else
1894 {
1895 m_freem(m);
1896 return retval;
1897 }
1898 }
1899 }
1900
1901
1902 #if BRIDGE
1903 if (do_bridge) {
1904 struct mbuf *m0 = m ;
1905 struct ether_header *eh = mtod(m, struct ether_header *);
1906
1907 if (m->m_pkthdr.rcvif)
1908 m->m_pkthdr.rcvif = NULL ;
1909 ifp = bridge_dst_lookup(eh);
1910 bdg_forward(&m0, ifp);
1911 if (m0)
1912 m_freem(m0);
1913
1914 return 0;
1915 }
1916 #endif
1917
1918
1919 /*
1920 * Let interface filters (if any) do their thing ...
1921 */
1922
1923 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1924 if (TAILQ_EMPTY(fhead) == 0) {
1925 while (orig_ifp != ifp) {
1926 orig_ifp = ifp;
1927 TAILQ_FOREACH(tmp, fhead, que) {
1928 if (IFILT(tmp).filter_if_output) {
1929 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
1930 &ifp,
1931 &m);
1932 if (retval) {
1933 if (retval == EJUSTRETURN)
1934 return 0;
1935 else {
1936 m_freem(m);
1937 return retval;
1938 }
1939 }
1940
1941 }
1942
1943 if (ifp != orig_ifp)
1944 break;
1945 }
1946 }
1947 }
1948
1949 /*
1950 * Finally, call the driver.
1951 */
1952
1953 retval = (*ifp->if_output)(ifp, m);
1954 dlil_stats.inject_pr_out2++;
1955 if ((retval == 0) || (retval == EJUSTRETURN))
1956 return 0;
1957 else
1958 return retval;
1959 }
1960
1961
1962 int
1963 dlil_inject_if_output(struct mbuf *m, u_long from_id)
1964 {
1965 struct ifnet *orig_ifp = 0;
1966 struct ifnet *ifp;
1967 struct dlil_filterq_entry *tmp;
1968 int retval = 0;
1969 struct dlil_filterq_head *fhead;
1970 int match_found;
1971
1972 dlil_stats.inject_if_out1++;
1973 if (from_id > dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER)
1974 return ENOENT;
1975
1976 ifp = dlil_filters[from_id].ifp;
1977
1978 /*
1979 * Let interface filters (if any) do their thing ...
1980 */
1981
1982 fhead = (struct dlil_filterq_head *) &ifp->if_flt_head;
1983 match_found = 0;
1984
1985 if (TAILQ_EMPTY(fhead) == 0) {
1986 while (orig_ifp != ifp) {
1987 orig_ifp = ifp;
1988 TAILQ_FOREACH(tmp, fhead, que) {
1989 if ((match_found) && (IFILT(tmp).filter_if_output)) {
1990 retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie,
1991 &ifp,
1992 &m);
1993 if (retval) {
1994 if (retval == EJUSTRETURN)
1995 return 0;
1996 else {
1997 m_freem(m);
1998 return retval;
1999 }
2000 }
2001
2002 }
2003
2004 if (ifp != orig_ifp)
2005 break;
2006
2007 if (from_id == tmp->filter_id)
2008 match_found = 1;
2009 }
2010 }
2011 }
2012
2013 /*
2014 * Finally, call the driver.
2015 */
2016
2017 retval = (*ifp->if_output)(ifp, m);
2018 dlil_stats.inject_if_out2++;
2019 if ((retval == 0) || (retval == EJUSTRETURN))
2020 return 0;
2021 else
2022 return retval;
2023 }
2024
2025 static
2026 int dlil_recycle_ioctl(struct ifnet *ifnet_ptr, u_long ioctl_code, void *ioctl_arg)
2027 {
2028
2029 return EOPNOTSUPP;
2030 }
2031
2032 static
2033 int dlil_recycle_output(struct ifnet *ifnet_ptr, struct mbuf *m)
2034 {
2035
2036 m_freem(m);
2037 return 0;
2038 }
2039
2040 static
2041 int dlil_recycle_free(struct ifnet *ifnet_ptr)
2042 {
2043 return 0;
2044 }
2045
2046 static
2047 int dlil_recycle_set_bpf_tap(struct ifnet *ifp, int mode,
2048 int (*bpf_callback)(struct ifnet *, struct mbuf *))
2049 {
2050 /* XXX not sure what to do here */
2051 return 0;
2052 }
2053
2054 int dlil_if_acquire(u_long family, void *uniqueid, size_t uniqueid_len,
2055 struct ifnet **ifp)
2056 {
2057 struct ifnet *ifp1 = NULL;
2058 struct dlil_ifnet *dlifp1 = NULL;
2059 int s, ret = 0;
2060 boolean_t funnel_state;
2061
2062 funnel_state = thread_funnel_set(network_flock, TRUE);
2063 s = splnet();
2064
2065 TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) {
2066
2067 ifp1 = (struct ifnet *)dlifp1;
2068
2069 if (ifp1->if_family == family) {
2070
2071 /* same uniqueid and same len or no unique id specified */
2072 if ((uniqueid_len == dlifp1->if_uniqueid_len)
2073 && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) {
2074
2075 /* check for matching interface in use */
2076 if (ifp1->if_eflags & IFEF_INUSE) {
2077 if (uniqueid_len) {
2078 ret = EBUSY;
2079 goto end;
2080 }
2081 }
2082 else {
2083
2084 ifp1->if_eflags |= (IFEF_INUSE + IFEF_REUSE);
2085 *ifp = ifp1;
2086 goto end;
2087 }
2088 }
2089 }
2090 }
2091
2092 /* no interface found, allocate a new one */
2093 MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK);
2094 if (dlifp1 == 0) {
2095 ret = ENOMEM;
2096 goto end;
2097 }
2098
2099 bzero(dlifp1, sizeof(*dlifp1));
2100
2101 if (uniqueid_len) {
2102 MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK);
2103 if (dlifp1->if_uniqueid == 0) {
2104 FREE(dlifp1, M_NKE);
2105 ret = ENOMEM;
2106 goto end;
2107 }
2108 bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len);
2109 dlifp1->if_uniqueid_len = uniqueid_len;
2110 }
2111
2112 ifp1 = (struct ifnet *)dlifp1;
2113 ifp1->if_eflags |= IFEF_INUSE;
2114
2115 TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link);
2116
2117 *ifp = ifp1;
2118
2119 end:
2120
2121 splx(s);
2122 thread_funnel_set(network_flock, funnel_state);
2123 return ret;
2124 }
2125
2126 void dlil_if_release(struct ifnet *ifp)
2127 {
2128 struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp;
2129 int s;
2130 boolean_t funnel_state;
2131
2132 funnel_state = thread_funnel_set(network_flock, TRUE);
2133 s = splnet();
2134
2135 ifp->if_eflags &= ~IFEF_INUSE;
2136 ifp->if_ioctl = dlil_recycle_ioctl;
2137 ifp->if_output = dlil_recycle_output;
2138 ifp->if_free = dlil_recycle_free;
2139 ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap;
2140
2141 strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ);
2142 ifp->if_name = dlifp->if_namestorage;
2143
2144 splx(s);
2145 thread_funnel_set(network_flock, funnel_state);
2146 }
2147