]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * Copyright (c) 1999 Apple Computer, Inc. | |
27 | * | |
28 | * Data Link Inteface Layer | |
29 | * Author: Ted Walker | |
30 | */ | |
31 | ||
32 | ||
33 | ||
34 | #include <sys/param.h> | |
35 | #include <sys/systm.h> | |
36 | #include <sys/kernel.h> | |
37 | #include <sys/malloc.h> | |
38 | #include <sys/mbuf.h> | |
39 | #include <sys/socket.h> | |
40 | #include <net/if_dl.h> | |
41 | #include <net/if.h> | |
42 | #include <net/if_var.h> | |
43 | #include <net/dlil.h> | |
44 | #include <sys/kern_event.h> | |
45 | #include <sys/kdebug.h> | |
46 | #include <string.h> | |
47 | ||
48 | #include <kern/task.h> | |
49 | #include <kern/thread.h> | |
50 | #include <kern/sched_prim.h> | |
51 | ||
52 | #include <net/netisr.h> | |
53 | #include <net/if_types.h> | |
54 | ||
55 | #include <machine/machine_routines.h> | |
56 | ||
57 | #define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0) | |
58 | #define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2) | |
59 | #define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8)) | |
60 | #define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8)) | |
61 | #define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8)) | |
62 | ||
63 | ||
64 | #define MAX_DL_TAGS 16 | |
65 | #define MAX_DLIL_FILTERS 16 | |
66 | #define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */ | |
67 | #define MAX_LINKADDR 4 /* LONGWORDS */ | |
68 | #define M_NKE M_IFADDR | |
69 | ||
70 | #define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter | |
71 | #define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter | |
72 | ||
73 | struct dl_tag_str { | |
74 | struct ifnet *ifp; | |
75 | struct if_proto *proto; | |
76 | struct dlil_filterq_head *pr_flt_head; | |
77 | }; | |
78 | ||
79 | ||
80 | struct dlil_ifnet { | |
81 | /* ifnet and drvr_ext are used by the stack and drivers | |
82 | drvr_ext extends the public ifnet and must follow dl_if */ | |
83 | struct ifnet dl_if; /* public ifnet */ | |
84 | void *drvr_ext[4]; /* driver reserved (e.g arpcom extension for enet) */ | |
85 | ||
86 | /* dlil private fields */ | |
87 | TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet are link together */ | |
88 | /* it is not the ifnet list */ | |
89 | void *if_uniqueid; /* unique id identifying the interface */ | |
90 | size_t if_uniqueid_len;/* length of the unique id */ | |
91 | char if_namestorage[IFNAMSIZ]; /* interface name storage for detached interfaces */ | |
92 | }; | |
93 | ||
94 | struct dlil_stats_str { | |
95 | int inject_pr_in1; | |
96 | int inject_pr_in2; | |
97 | int inject_pr_out1; | |
98 | int inject_pr_out2; | |
99 | int inject_if_in1; | |
100 | int inject_if_in2; | |
101 | int inject_if_out1; | |
102 | int inject_if_out2; | |
103 | }; | |
104 | ||
105 | ||
106 | struct dlil_filter_id_str { | |
107 | int type; | |
108 | struct dlil_filterq_head *head; | |
109 | struct dlil_filterq_entry *filter_ptr; | |
110 | struct ifnet *ifp; | |
111 | struct if_proto *proto; | |
112 | }; | |
113 | ||
114 | ||
115 | ||
116 | struct if_family_str { | |
117 | TAILQ_ENTRY(if_family_str) if_fam_next; | |
118 | u_long if_family; | |
119 | int refcnt; | |
120 | int flags; | |
121 | ||
122 | #define DLIL_SHUTDOWN 1 | |
123 | ||
124 | int (*add_if)(struct ifnet *ifp); | |
125 | int (*del_if)(struct ifnet *ifp); | |
126 | int (*init_if)(struct ifnet *ifp); | |
127 | int (*add_proto)(struct ddesc_head_str *demux_desc_head, | |
128 | struct if_proto *proto, u_long dl_tag); | |
129 | int (*del_proto)(struct if_proto *proto, u_long dl_tag); | |
130 | int (*ifmod_ioctl)(struct ifnet *ifp, u_long command, caddr_t data); | |
131 | int (*shutdown)(); | |
132 | }; | |
133 | ||
134 | ||
135 | struct proto_family_str { | |
136 | TAILQ_ENTRY(proto_family_str) proto_fam_next; | |
137 | u_long proto_family; | |
138 | u_long if_family; | |
139 | ||
140 | int (*attach_proto)(struct ifnet *ifp, u_long *dl_tag); | |
141 | int (*detach_proto)(struct ifnet *ifp, u_long dl_tag); | |
142 | }; | |
143 | ||
144 | ||
145 | ||
146 | struct dlil_stats_str dlil_stats; | |
147 | ||
148 | static | |
149 | struct dlil_filter_id_str *dlil_filters; | |
150 | ||
151 | static | |
152 | struct dl_tag_str *dl_tag_array; | |
153 | ||
154 | static | |
155 | TAILQ_HEAD(, dlil_ifnet) dlil_ifnet_head; | |
156 | ||
157 | static | |
158 | TAILQ_HEAD(, if_family_str) if_family_head; | |
159 | ||
160 | static | |
161 | TAILQ_HEAD(, proto_family_str) proto_family_head; | |
162 | ||
163 | static ifnet_inited = 0; | |
164 | static u_long dl_tag_nb = 0; | |
165 | static u_long dlil_filters_nb = 0; | |
166 | ||
167 | int dlil_initialized = 0; | |
168 | decl_simple_lock_data(, dlil_input_lock) | |
169 | int dlil_input_thread_wakeup = 0; | |
170 | static struct mbuf *dlil_input_mbuf_head = NULL; | |
171 | static struct mbuf *dlil_input_mbuf_tail = NULL; | |
172 | #if NLOOP > 1 | |
173 | #error dlil_input() needs to be revised to support more than on loopback interface | |
174 | #endif | |
175 | static struct mbuf *dlil_input_loop_head = NULL; | |
176 | static struct mbuf *dlil_input_loop_tail = NULL; | |
177 | extern struct ifmultihead ifma_lostlist; | |
178 | ||
179 | static void dlil_input_thread(void); | |
180 | extern void run_netisr(void); | |
181 | extern void bpfdetach(struct ifnet*); | |
182 | ||
183 | int dlil_expand_mcl; | |
184 | ||
185 | /* | |
186 | * Internal functions. | |
187 | */ | |
188 | ||
189 | static | |
190 | struct if_family_str *find_family_module(u_long if_family) | |
191 | { | |
192 | struct if_family_str *mod = NULL; | |
193 | ||
194 | TAILQ_FOREACH(mod, &if_family_head, if_fam_next) { | |
195 | if (mod->if_family == (if_family & 0xffff)) | |
196 | break; | |
197 | } | |
198 | ||
199 | return mod; | |
200 | } | |
201 | ||
202 | static | |
203 | struct proto_family_str *find_proto_module(u_long proto_family, u_long if_family) | |
204 | { | |
205 | struct proto_family_str *mod = NULL; | |
206 | ||
207 | TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) { | |
208 | if ((mod->proto_family == (proto_family & 0xffff)) | |
209 | && (mod->if_family == (if_family & 0xffff))) | |
210 | break; | |
211 | } | |
212 | ||
213 | return mod; | |
214 | } | |
215 | ||
216 | ||
217 | /* | |
218 | * Public functions. | |
219 | */ | |
220 | ||
221 | struct ifnet *ifbyfamily(u_long family, short unit) | |
222 | { | |
223 | struct ifnet *ifp; | |
224 | ||
225 | TAILQ_FOREACH(ifp, &ifnet, if_link) | |
226 | if ((family == ifp->if_family) && | |
227 | (ifp->if_unit == unit)) | |
228 | return ifp; | |
229 | ||
230 | return 0; | |
231 | } | |
232 | ||
233 | struct if_proto *dlttoproto(u_long dl_tag) | |
234 | { | |
235 | if (dl_tag < dl_tag_nb && dl_tag_array[dl_tag].ifp) | |
236 | return dl_tag_array[dl_tag].proto; | |
237 | return 0; | |
238 | } | |
239 | ||
240 | ||
241 | static int dlil_ifp_proto_count(struct ifnet * ifp) | |
242 | { | |
243 | int count = 0; | |
244 | struct if_proto * proto; | |
245 | struct dlil_proto_head * tmp; | |
246 | ||
247 | tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
248 | ||
249 | TAILQ_FOREACH(proto, tmp, next) | |
250 | count++; | |
251 | ||
252 | return count; | |
253 | } | |
254 | ||
255 | u_long ifptodlt(struct ifnet *ifp, u_long proto_family) | |
256 | { | |
257 | struct if_proto *proto; | |
258 | struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
259 | ||
260 | ||
261 | TAILQ_FOREACH(proto, tmp, next) | |
262 | if (proto->protocol_family == proto_family) | |
263 | return proto->dl_tag; | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
268 | ||
269 | int dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag) | |
270 | { | |
271 | struct ifnet *ifp; | |
272 | ||
273 | ifp = ifbyfamily(if_family, unit); | |
274 | if (!ifp) | |
275 | return ENOENT; | |
276 | ||
277 | *dl_tag = ifptodlt(ifp, proto_family); | |
278 | if (*dl_tag == 0) | |
279 | return EPROTONOSUPPORT; | |
280 | else | |
281 | return 0; | |
282 | } | |
283 | ||
284 | ||
285 | void dlil_post_msg(struct ifnet *ifp, u_long event_subclass, u_long event_code, | |
286 | struct net_event_data *event_data, u_long event_data_len) | |
287 | { | |
288 | struct net_event_data ev_data; | |
289 | struct kev_msg ev_msg; | |
290 | ||
291 | /* | |
292 | * a net event always start with a net_event_data structure | |
293 | * but the caller can generate a simple net event or | |
294 | * provide a longer event structure to post | |
295 | */ | |
296 | ||
297 | ev_msg.vendor_code = KEV_VENDOR_APPLE; | |
298 | ev_msg.kev_class = KEV_NETWORK_CLASS; | |
299 | ev_msg.kev_subclass = event_subclass; | |
300 | ev_msg.event_code = event_code; | |
301 | ||
302 | if (event_data == 0) { | |
303 | event_data = &ev_data; | |
304 | event_data_len = sizeof(struct net_event_data); | |
305 | } | |
306 | ||
307 | strncpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ); | |
308 | event_data->if_family = ifp->if_family; | |
309 | event_data->if_unit = (unsigned long) ifp->if_unit; | |
310 | ||
311 | ev_msg.dv[0].data_length = event_data_len; | |
312 | ev_msg.dv[0].data_ptr = event_data; | |
313 | ev_msg.dv[1].data_length = 0; | |
314 | ||
315 | kev_post_msg(&ev_msg); | |
316 | } | |
317 | ||
318 | ||
319 | ||
320 | void | |
321 | dlil_init() | |
322 | { | |
323 | int i; | |
324 | ||
325 | TAILQ_INIT(&dlil_ifnet_head); | |
326 | TAILQ_INIT(&if_family_head); | |
327 | TAILQ_INIT(&proto_family_head); | |
328 | ||
329 | // create the dl tag array | |
330 | MALLOC(dl_tag_array, void *, sizeof(struct dl_tag_str) * MAX_DL_TAGS, M_NKE, M_WAITOK); | |
331 | if (dl_tag_array == 0) { | |
332 | printf("dlil_init tags array allocation failed\n"); | |
333 | return; //very bad | |
334 | } | |
335 | bzero(dl_tag_array, sizeof(struct dl_tag_str) * MAX_DL_TAGS); | |
336 | dl_tag_nb = MAX_DL_TAGS; | |
337 | ||
338 | // create the dl filters array | |
339 | MALLOC(dlil_filters, void *, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS, M_NKE, M_WAITOK); | |
340 | if (dlil_filters == 0) { | |
341 | printf("dlil_init filters array allocation failed\n"); | |
342 | return; //very bad | |
343 | } | |
344 | bzero(dlil_filters, sizeof(struct dlil_filter_id_str) * MAX_DLIL_FILTERS); | |
345 | dlil_filters_nb = MAX_DLIL_FILTERS; | |
346 | ||
347 | bzero(&dlil_stats, sizeof(dlil_stats)); | |
348 | ||
349 | simple_lock_init(&dlil_input_lock); | |
350 | ||
351 | /* | |
352 | * Start up the dlil input thread once everything is initialized | |
353 | */ | |
354 | (void) kernel_thread(kernel_task, dlil_input_thread); | |
355 | } | |
356 | ||
357 | u_long get_new_filter_id() | |
358 | { | |
359 | u_long i; | |
360 | u_char *p; | |
361 | ||
362 | for (i=1; i < dlil_filters_nb; i++) | |
363 | if (dlil_filters[i].type == 0) | |
364 | break; | |
365 | ||
366 | if (i == dlil_filters_nb) { | |
367 | // expand the filters array by MAX_DLIL_FILTERS | |
368 | MALLOC(p, u_char *, sizeof(struct dlil_filter_id_str) * (dlil_filters_nb + MAX_DLIL_FILTERS), M_NKE, M_WAITOK); | |
369 | if (p == 0) | |
370 | return 0; | |
371 | ||
372 | bcopy(dlil_filters, p, sizeof(struct dlil_filter_id_str) * dlil_filters_nb); | |
373 | bzero(p + sizeof(struct dlil_filter_id_str) * dlil_filters_nb, sizeof(struct dlil_filter_id_str) * MAX_DL_TAGS); | |
374 | dlil_filters_nb += MAX_DLIL_FILTERS; | |
375 | FREE(dlil_filters, M_NKE); | |
376 | dlil_filters = (struct dlil_filter_id_str *)p; | |
377 | } | |
378 | ||
379 | return i; | |
380 | } | |
381 | ||
382 | ||
383 | int dlil_attach_interface_filter(struct ifnet *ifp, | |
384 | struct dlil_if_flt_str *if_filter, | |
385 | u_long *filter_id, | |
386 | int insertion_point) | |
387 | { | |
388 | int s; | |
389 | int retval = 0; | |
390 | struct dlil_filterq_entry *tmp_ptr; | |
391 | struct dlil_filterq_entry *if_filt; | |
392 | struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
393 | boolean_t funnel_state; | |
394 | ||
395 | MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK); | |
396 | if (tmp_ptr == NULL) | |
397 | return (ENOBUFS); | |
398 | ||
399 | bcopy((caddr_t) if_filter, (caddr_t) &tmp_ptr->variants.if_filter, | |
400 | sizeof(struct dlil_if_flt_str)); | |
401 | ||
402 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
403 | s = splnet(); | |
404 | ||
405 | *filter_id = get_new_filter_id(); | |
406 | if (*filter_id == 0) { | |
407 | FREE(tmp_ptr, M_NKE); | |
408 | retval = ENOMEM; | |
409 | goto end; | |
410 | } | |
411 | ||
412 | dlil_filters[*filter_id].filter_ptr = tmp_ptr; | |
413 | dlil_filters[*filter_id].head = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
414 | dlil_filters[*filter_id].type = DLIL_IF_FILTER; | |
415 | dlil_filters[*filter_id].ifp = ifp; | |
416 | tmp_ptr->filter_id = *filter_id; | |
417 | tmp_ptr->type = DLIL_IF_FILTER; | |
418 | ||
419 | if (insertion_point != DLIL_LAST_FILTER) { | |
420 | TAILQ_FOREACH(if_filt, fhead, que) | |
421 | if (insertion_point == if_filt->filter_id) { | |
422 | TAILQ_INSERT_BEFORE(if_filt, tmp_ptr, que); | |
423 | break; | |
424 | } | |
425 | } | |
426 | else | |
427 | TAILQ_INSERT_TAIL(fhead, tmp_ptr, que); | |
428 | ||
429 | end: | |
430 | splx(s); | |
431 | thread_funnel_set(network_flock, funnel_state); | |
432 | return retval; | |
433 | } | |
434 | ||
435 | ||
436 | int dlil_attach_protocol_filter(u_long dl_tag, | |
437 | struct dlil_pr_flt_str *pr_filter, | |
438 | u_long *filter_id, | |
439 | int insertion_point) | |
440 | { | |
441 | struct dlil_filterq_entry *tmp_ptr, *pr_filt; | |
442 | int s; | |
443 | int retval = 0; | |
444 | boolean_t funnel_state; | |
445 | ||
446 | if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) | |
447 | return (ENOENT); | |
448 | ||
449 | MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK); | |
450 | if (tmp_ptr == NULL) | |
451 | return (ENOBUFS); | |
452 | ||
453 | bcopy((caddr_t) pr_filter, (caddr_t) &tmp_ptr->variants.pr_filter, | |
454 | sizeof(struct dlil_pr_flt_str)); | |
455 | ||
456 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
457 | s = splnet(); | |
458 | ||
459 | *filter_id = get_new_filter_id(); | |
460 | if (*filter_id == 0) { | |
461 | FREE(tmp_ptr, M_NKE); | |
462 | retval = ENOMEM; | |
463 | goto end; | |
464 | } | |
465 | ||
466 | dlil_filters[*filter_id].filter_ptr = tmp_ptr; | |
467 | dlil_filters[*filter_id].head = dl_tag_array[dl_tag].pr_flt_head; | |
468 | dlil_filters[*filter_id].type = DLIL_PR_FILTER; | |
469 | dlil_filters[*filter_id].proto = dl_tag_array[dl_tag].proto; | |
470 | dlil_filters[*filter_id].ifp = dl_tag_array[dl_tag].ifp; | |
471 | tmp_ptr->filter_id = *filter_id; | |
472 | tmp_ptr->type = DLIL_PR_FILTER; | |
473 | ||
474 | if (insertion_point != DLIL_LAST_FILTER) { | |
475 | TAILQ_FOREACH(pr_filt, dl_tag_array[dl_tag].pr_flt_head, que) | |
476 | if (insertion_point == pr_filt->filter_id) { | |
477 | TAILQ_INSERT_BEFORE(pr_filt, tmp_ptr, que); | |
478 | break; | |
479 | } | |
480 | } | |
481 | else | |
482 | TAILQ_INSERT_TAIL(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que); | |
483 | ||
484 | end: | |
485 | splx(s); | |
486 | thread_funnel_set(network_flock, funnel_state); | |
487 | return retval; | |
488 | } | |
489 | ||
490 | ||
491 | int | |
492 | dlil_detach_filter(u_long filter_id) | |
493 | { | |
494 | struct dlil_filter_id_str *flt; | |
495 | int s, retval = 0; | |
496 | boolean_t funnel_state; | |
497 | ||
498 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
499 | s = splnet(); | |
500 | ||
501 | if (filter_id >= dlil_filters_nb || dlil_filters[filter_id].type == 0) { | |
502 | retval = ENOENT; | |
503 | goto end; | |
504 | } | |
505 | ||
506 | flt = &dlil_filters[filter_id]; | |
507 | ||
508 | if (flt->type == DLIL_IF_FILTER) { | |
509 | if (IFILT(flt->filter_ptr).filter_detach) | |
510 | (*IFILT(flt->filter_ptr).filter_detach)(IFILT(flt->filter_ptr).cookie); | |
511 | } | |
512 | else { | |
513 | if (flt->type == DLIL_PR_FILTER) { | |
514 | if (PFILT(flt->filter_ptr).filter_detach) | |
515 | (*PFILT(flt->filter_ptr).filter_detach)(PFILT(flt->filter_ptr).cookie); | |
516 | } | |
517 | } | |
518 | ||
519 | TAILQ_REMOVE(flt->head, flt->filter_ptr, que); | |
520 | FREE(flt->filter_ptr, M_NKE); | |
521 | flt->type = 0; | |
522 | ||
523 | end: | |
524 | splx(s); | |
525 | thread_funnel_set(network_flock, funnel_state); | |
526 | return retval; | |
527 | } | |
528 | ||
529 | void | |
530 | dlil_input_thread_continue(void) | |
531 | { | |
532 | while (1) { | |
533 | struct mbuf *m, *m_loop; | |
534 | ||
535 | usimple_lock(&dlil_input_lock); | |
536 | m = dlil_input_mbuf_head; | |
537 | dlil_input_mbuf_head = NULL; | |
538 | dlil_input_mbuf_tail = NULL; | |
539 | m_loop = dlil_input_loop_head; | |
540 | dlil_input_loop_head = NULL; | |
541 | dlil_input_loop_tail = NULL; | |
542 | usimple_unlock(&dlil_input_lock); | |
543 | ||
544 | /* | |
545 | * NOTE warning %%% attention !!!! | |
546 | * We should think about putting some thread starvation safeguards if | |
547 | * we deal with long chains of packets. | |
548 | */ | |
549 | while (m) { | |
550 | struct mbuf *m0 = m->m_nextpkt; | |
551 | void *header = m->m_pkthdr.header; | |
552 | ||
553 | m->m_nextpkt = NULL; | |
554 | m->m_pkthdr.header = NULL; | |
555 | (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header); | |
556 | m = m0; | |
557 | } | |
558 | m = m_loop; | |
559 | while (m) { | |
560 | struct mbuf *m0 = m->m_nextpkt; | |
561 | void *header = m->m_pkthdr.header; | |
562 | struct ifnet *ifp = &loif[0]; | |
563 | ||
564 | m->m_nextpkt = NULL; | |
565 | m->m_pkthdr.header = NULL; | |
566 | (void) dlil_input_packet(ifp, m, header); | |
567 | m = m0; | |
568 | } | |
569 | ||
570 | if (netisr != 0) | |
571 | run_netisr(); | |
572 | ||
573 | if (dlil_input_mbuf_head == NULL && | |
574 | dlil_input_loop_head == NULL && | |
575 | netisr == 0) { | |
576 | assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT); | |
577 | (void) thread_block(dlil_input_thread_continue); | |
578 | /* NOTREACHED */ | |
579 | } | |
580 | } | |
581 | } | |
582 | ||
583 | void dlil_input_thread(void) | |
584 | { | |
585 | register thread_t self = current_act(); | |
586 | ||
587 | ml_thread_policy(self, MACHINE_GROUP, | |
588 | (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR)); | |
589 | ||
590 | /* The dlil thread is always funneled */ | |
591 | thread_funnel_set(network_flock, TRUE); | |
592 | dlil_initialized = 1; | |
593 | dlil_input_thread_continue(); | |
594 | } | |
595 | ||
596 | int | |
597 | dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail) | |
598 | { | |
599 | /* WARNING | |
600 | * Because of loopbacked multicast we cannot stuff the ifp in | |
601 | * the rcvif of the packet header: loopback has its own dlil | |
602 | * input queue | |
603 | */ | |
604 | ||
605 | usimple_lock(&dlil_input_lock); | |
606 | if (ifp->if_type != IFT_LOOP) { | |
607 | if (dlil_input_mbuf_head == NULL) | |
608 | dlil_input_mbuf_head = m_head; | |
609 | else if (dlil_input_mbuf_tail != NULL) | |
610 | dlil_input_mbuf_tail->m_nextpkt = m_head; | |
611 | dlil_input_mbuf_tail = m_tail ? m_tail : m_head; | |
612 | } else { | |
613 | if (dlil_input_loop_head == NULL) | |
614 | dlil_input_loop_head = m_head; | |
615 | else if (dlil_input_loop_tail != NULL) | |
616 | dlil_input_loop_tail->m_nextpkt = m_head; | |
617 | dlil_input_loop_tail = m_tail ? m_tail : m_head; | |
618 | } | |
619 | usimple_unlock(&dlil_input_lock); | |
620 | ||
621 | wakeup((caddr_t)&dlil_input_thread_wakeup); | |
622 | ||
623 | return 0; | |
624 | } | |
625 | ||
626 | int | |
627 | dlil_input_packet(struct ifnet *ifp, struct mbuf *m, | |
628 | char *frame_header) | |
629 | { | |
630 | struct ifnet *orig_ifp = 0; | |
631 | struct dlil_filterq_entry *tmp; | |
632 | int retval; | |
633 | struct if_proto *ifproto = 0; | |
634 | struct if_proto *proto; | |
635 | struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
636 | ||
637 | ||
638 | KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0); | |
639 | ||
640 | /* | |
641 | * Run interface filters | |
642 | */ | |
643 | ||
644 | while (orig_ifp != ifp) { | |
645 | orig_ifp = ifp; | |
646 | ||
647 | TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) { | |
648 | if (IFILT(tmp).filter_if_input) { | |
649 | retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie, | |
650 | &ifp, | |
651 | &m, | |
652 | &frame_header); | |
653 | if (retval) { | |
654 | if (retval == EJUSTRETURN) | |
655 | return 0; | |
656 | else { | |
657 | m_freem(m); | |
658 | return retval; | |
659 | } | |
660 | } | |
661 | } | |
662 | ||
663 | if (ifp != orig_ifp) | |
664 | break; | |
665 | } | |
666 | } | |
667 | ||
668 | ifp->if_lastchange = time; | |
669 | ||
670 | /* | |
671 | * Call family demux module. If the demux module finds a match | |
672 | * for the frame it will fill-in the ifproto pointer. | |
673 | */ | |
674 | ||
675 | retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto ); | |
676 | ||
677 | if (m->m_flags & (M_BCAST|M_MCAST)) | |
678 | ifp->if_imcasts++; | |
679 | ||
680 | if ((retval) && (retval != EJUSTRETURN) && (ifp->offercnt)) { | |
681 | /* | |
682 | * No match was found, look for any offers. | |
683 | */ | |
684 | struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
685 | TAILQ_FOREACH(proto, tmp, next) { | |
686 | if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) { | |
687 | ifproto = proto; | |
688 | retval = 0; | |
689 | break; | |
690 | } | |
691 | } | |
692 | } | |
693 | ||
694 | if (retval) { | |
695 | if (retval != EJUSTRETURN) { | |
696 | m_freem(m); | |
697 | return retval; | |
698 | } | |
699 | else | |
700 | return 0; | |
701 | } | |
702 | else | |
703 | if (ifproto == 0) { | |
704 | printf("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n"); | |
705 | m_freem(m); | |
706 | return 0; | |
707 | } | |
708 | ||
709 | /* | |
710 | * Call any attached protocol filters. | |
711 | */ | |
712 | ||
713 | TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) { | |
714 | if (PFILT(tmp).filter_dl_input) { | |
715 | retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie, | |
716 | &m, | |
717 | &frame_header, | |
718 | &ifp); | |
719 | ||
720 | if (retval) { | |
721 | if (retval == EJUSTRETURN) | |
722 | return 0; | |
723 | else { | |
724 | m_freem(m); | |
725 | return retval; | |
726 | } | |
727 | } | |
728 | } | |
729 | } | |
730 | ||
731 | ||
732 | ||
733 | retval = (*ifproto->dl_input)(m, frame_header, | |
734 | ifp, ifproto->dl_tag, | |
735 | TRUE); | |
736 | ||
737 | if (retval == EJUSTRETURN) | |
738 | retval = 0; | |
739 | else | |
740 | if (retval) | |
741 | m_freem(m); | |
742 | ||
743 | KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0); | |
744 | return retval; | |
745 | } | |
746 | ||
747 | ||
748 | ||
749 | void ether_input(ifp, eh, m) | |
750 | struct ifnet *ifp; | |
751 | struct ether_header *eh; | |
752 | struct mbuf *m; | |
753 | ||
754 | { | |
755 | kprintf("Someone is calling ether_input!!\n"); | |
756 | ||
757 | dlil_input(ifp, m, NULL); | |
758 | } | |
759 | ||
760 | ||
761 | int | |
762 | dlil_event(struct ifnet *ifp, struct kern_event_msg *event) | |
763 | { | |
764 | struct dlil_filterq_entry *filt; | |
765 | int retval = 0; | |
766 | struct ifnet *orig_ifp = 0; | |
767 | struct if_proto *proto; | |
768 | struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
769 | struct kev_msg kev_msg; | |
770 | struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
771 | boolean_t funnel_state; | |
772 | ||
773 | ||
774 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
775 | ||
776 | while (orig_ifp != ifp) { | |
777 | orig_ifp = ifp; | |
778 | ||
779 | TAILQ_FOREACH_REVERSE(filt, fhead, que, dlil_filterq_head) { | |
780 | if (IFILT(filt).filter_if_event) { | |
781 | retval = (*IFILT(filt).filter_if_event)(IFILT(filt).cookie, | |
782 | &ifp, | |
783 | &event); | |
784 | ||
785 | if (retval) { | |
786 | (void) thread_funnel_set(network_flock, funnel_state); | |
787 | if (retval == EJUSTRETURN) | |
788 | return 0; | |
789 | else | |
790 | return retval; | |
791 | } | |
792 | } | |
793 | ||
794 | if (ifp != orig_ifp) | |
795 | break; | |
796 | } | |
797 | } | |
798 | ||
799 | ||
800 | /* | |
801 | * Call Interface Module event hook, if any. | |
802 | */ | |
803 | ||
804 | if (ifp->if_event) { | |
805 | retval = ifp->if_event(ifp, (caddr_t) event); | |
806 | ||
807 | if (retval) { | |
808 | (void) thread_funnel_set(network_flock, funnel_state); | |
809 | ||
810 | if (retval == EJUSTRETURN) | |
811 | return 0; | |
812 | else | |
813 | return retval; | |
814 | } | |
815 | } | |
816 | ||
817 | /* | |
818 | * Call dl_event entry point for all protocols attached to this interface | |
819 | */ | |
820 | ||
821 | TAILQ_FOREACH(proto, tmp, next) { | |
822 | /* | |
823 | * Call any attached protocol filters. | |
824 | */ | |
825 | ||
826 | TAILQ_FOREACH_REVERSE(filt, &proto->pr_flt_head, que, dlil_filterq_head) { | |
827 | if (PFILT(filt).filter_dl_event) { | |
828 | retval = (*PFILT(filt).filter_dl_event)(PFILT(filt).cookie, | |
829 | event); | |
830 | ||
831 | if (retval) { | |
832 | (void) thread_funnel_set(network_flock, funnel_state); | |
833 | if (retval == EJUSTRETURN) | |
834 | return 0; | |
835 | else | |
836 | return retval; | |
837 | } | |
838 | } | |
839 | } | |
840 | ||
841 | ||
842 | /* | |
843 | * Finally, call the dl_event entry point (if any) | |
844 | */ | |
845 | ||
846 | if (proto->dl_event) | |
847 | retval = (*proto->dl_event)(event, proto->dl_tag); | |
848 | ||
849 | if (retval == EJUSTRETURN) { | |
850 | (void) thread_funnel_set(network_flock, funnel_state); | |
851 | return 0; | |
852 | } | |
853 | } | |
854 | ||
855 | ||
856 | /* | |
857 | * Now, post this event to the Kernel Event message queue | |
858 | */ | |
859 | ||
860 | kev_msg.vendor_code = event->vendor_code; | |
861 | kev_msg.kev_class = event->kev_class; | |
862 | kev_msg.kev_subclass = event->kev_subclass; | |
863 | kev_msg.event_code = event->event_code; | |
864 | kev_msg.dv[0].data_ptr = &event->event_data[0]; | |
865 | kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE; | |
866 | kev_msg.dv[1].data_length = 0; | |
867 | ||
868 | kev_post_msg(&kev_msg); | |
869 | ||
870 | (void) thread_funnel_set(network_flock, funnel_state); | |
871 | return 0; | |
872 | } | |
873 | ||
874 | ||
875 | ||
876 | int | |
877 | dlil_output(u_long dl_tag, | |
878 | struct mbuf *m, | |
879 | caddr_t route, | |
880 | struct sockaddr *dest, | |
881 | int raw | |
882 | ) | |
883 | { | |
884 | char *frame_type; | |
885 | char *dst_linkaddr; | |
886 | struct ifnet *orig_ifp = 0; | |
887 | struct ifnet *ifp; | |
888 | struct if_proto *proto; | |
889 | struct dlil_filterq_entry *tmp; | |
890 | int retval = 0; | |
891 | char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4]; | |
892 | char dst_linkaddr_buffer[MAX_LINKADDR * 4]; | |
893 | struct dlil_filterq_head *fhead; | |
894 | ||
895 | KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0); | |
896 | ||
897 | if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) { | |
898 | m_freem(m); | |
899 | return ENOENT; | |
900 | } | |
901 | ||
902 | ifp = dl_tag_array[dl_tag].ifp; | |
903 | proto = dl_tag_array[dl_tag].proto; | |
904 | ||
905 | frame_type = frame_type_buffer; | |
906 | dst_linkaddr = dst_linkaddr_buffer; | |
907 | ||
908 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
909 | ||
910 | if ((raw == 0) && (proto->dl_pre_output)) { | |
911 | retval = (*proto->dl_pre_output)(ifp, &m, dest, route, | |
912 | frame_type, dst_linkaddr, dl_tag); | |
913 | if (retval) { | |
914 | if (retval == EJUSTRETURN) | |
915 | return 0; | |
916 | else { | |
917 | m_freem(m); | |
918 | return retval; | |
919 | } | |
920 | } | |
921 | } | |
922 | ||
923 | /* | |
924 | * Run any attached protocol filters. | |
925 | */ | |
926 | ||
927 | if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) { | |
928 | TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) { | |
929 | if (PFILT(tmp).filter_dl_output) { | |
930 | retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie, | |
931 | &m, &ifp, &dest, dst_linkaddr, frame_type); | |
932 | if (retval) { | |
933 | if (retval == EJUSTRETURN) | |
934 | return 0; | |
935 | else { | |
936 | m_freem(m); | |
937 | return retval; | |
938 | } | |
939 | } | |
940 | } | |
941 | } | |
942 | } | |
943 | ||
944 | ||
945 | /* | |
946 | * Call framing module | |
947 | */ | |
948 | if ((raw == 0) && (ifp->if_framer)) { | |
949 | retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type); | |
950 | if (retval) { | |
951 | if (retval == EJUSTRETURN) | |
952 | return 0; | |
953 | else | |
954 | { | |
955 | m_freem(m); | |
956 | return retval; | |
957 | } | |
958 | } | |
959 | } | |
960 | ||
961 | #if BRIDGE | |
962 | if (do_bridge) { | |
963 | struct mbuf *m0 = m ; | |
964 | struct ether_header *eh = mtod(m, struct ether_header *); | |
965 | ||
966 | if (m->m_pkthdr.rcvif) | |
967 | m->m_pkthdr.rcvif = NULL ; | |
968 | ifp = bridge_dst_lookup(eh); | |
969 | bdg_forward(&m0, ifp); | |
970 | if (m0) | |
971 | m_freem(m0); | |
972 | ||
973 | return 0; | |
974 | } | |
975 | #endif | |
976 | ||
977 | ||
978 | /* | |
979 | * Let interface filters (if any) do their thing ... | |
980 | */ | |
981 | ||
982 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
983 | if (TAILQ_EMPTY(fhead) == 0) { | |
984 | while (orig_ifp != ifp) { | |
985 | orig_ifp = ifp; | |
986 | TAILQ_FOREACH(tmp, fhead, que) { | |
987 | if (IFILT(tmp).filter_if_output) { | |
988 | retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie, | |
989 | &ifp, | |
990 | &m); | |
991 | if (retval) { | |
992 | if (retval == EJUSTRETURN) | |
993 | return 0; | |
994 | else { | |
995 | m_freem(m); | |
996 | return retval; | |
997 | } | |
998 | } | |
999 | ||
1000 | } | |
1001 | ||
1002 | if (ifp != orig_ifp) | |
1003 | break; | |
1004 | } | |
1005 | } | |
1006 | } | |
1007 | ||
1008 | /* | |
1009 | * Finally, call the driver. | |
1010 | */ | |
1011 | ||
1012 | KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0); | |
1013 | retval = (*ifp->if_output)(ifp, m); | |
1014 | KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0); | |
1015 | ||
1016 | KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0); | |
1017 | ||
1018 | if ((retval == 0) || (retval == EJUSTRETURN)) | |
1019 | return 0; | |
1020 | else | |
1021 | return retval; | |
1022 | } | |
1023 | ||
1024 | ||
1025 | int | |
1026 | dlil_ioctl(u_long proto_fam, | |
1027 | struct ifnet *ifp, | |
1028 | u_long ioctl_code, | |
1029 | caddr_t ioctl_arg) | |
1030 | { | |
1031 | struct dlil_filterq_entry *tmp; | |
1032 | struct dlil_filterq_head *fhead; | |
1033 | int retval = EOPNOTSUPP; | |
1034 | int retval2 = EOPNOTSUPP; | |
1035 | u_long dl_tag; | |
1036 | struct if_family_str *if_family; | |
1037 | ||
1038 | ||
1039 | if (proto_fam) { | |
1040 | retval = dlil_find_dltag(ifp->if_family, ifp->if_unit, | |
1041 | proto_fam, &dl_tag); | |
1042 | ||
1043 | if (retval == 0) { | |
1044 | if (dl_tag_array[dl_tag].ifp != ifp) | |
1045 | return ENOENT; | |
1046 | ||
1047 | /* | |
1048 | * Run any attached protocol filters. | |
1049 | */ | |
1050 | TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) { | |
1051 | if (PFILT(tmp).filter_dl_ioctl) { | |
1052 | retval = | |
1053 | (*PFILT(tmp).filter_dl_ioctl)(PFILT(tmp).cookie, | |
1054 | dl_tag_array[dl_tag].ifp, | |
1055 | ioctl_code, | |
1056 | ioctl_arg); | |
1057 | ||
1058 | if (retval) { | |
1059 | if (retval == EJUSTRETURN) | |
1060 | return 0; | |
1061 | else | |
1062 | return retval; | |
1063 | } | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | if (dl_tag_array[dl_tag].proto->dl_ioctl) | |
1068 | retval = | |
1069 | (*dl_tag_array[dl_tag].proto->dl_ioctl)(dl_tag, | |
1070 | dl_tag_array[dl_tag].ifp, | |
1071 | ioctl_code, | |
1072 | ioctl_arg); | |
1073 | else | |
1074 | retval = EOPNOTSUPP; | |
1075 | } | |
1076 | else | |
1077 | retval = 0; | |
1078 | } | |
1079 | ||
1080 | if ((retval) && (retval != EOPNOTSUPP)) { | |
1081 | if (retval == EJUSTRETURN) | |
1082 | return 0; | |
1083 | else | |
1084 | return retval; | |
1085 | } | |
1086 | ||
1087 | ||
1088 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
1089 | TAILQ_FOREACH(tmp, fhead, que) { | |
1090 | if (IFILT(tmp).filter_if_ioctl) { | |
1091 | retval2 = (*IFILT(tmp).filter_if_ioctl)(IFILT(tmp).cookie, ifp, | |
1092 | ioctl_code, ioctl_arg); | |
1093 | if (retval2) { | |
1094 | if (retval2 == EJUSTRETURN) | |
1095 | return 0; | |
1096 | else | |
1097 | return retval2; | |
1098 | } | |
1099 | } | |
1100 | } | |
1101 | ||
1102 | ||
1103 | if_family = find_family_module(ifp->if_family); | |
1104 | if ((if_family) && (if_family->ifmod_ioctl)) { | |
1105 | retval2 = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg); | |
1106 | ||
1107 | if ((retval2) && (retval2 != EOPNOTSUPP)) { | |
1108 | if (retval2 == EJUSTRETURN) | |
1109 | return 0; | |
1110 | else | |
1111 | return retval; | |
1112 | } | |
1113 | ||
1114 | if (retval == EOPNOTSUPP) | |
1115 | retval = retval2; | |
1116 | } | |
1117 | ||
1118 | if (ifp->if_ioctl) | |
1119 | retval2 = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg); | |
1120 | ||
1121 | if (retval == EOPNOTSUPP) | |
1122 | return retval2; | |
1123 | else { | |
1124 | if (retval2 == EOPNOTSUPP) | |
1125 | return 0; | |
1126 | else | |
1127 | return retval2; | |
1128 | } | |
1129 | } | |
1130 | ||
1131 | ||
1132 | int | |
1133 | dlil_attach_protocol(struct dlil_proto_reg_str *proto, | |
1134 | u_long *dl_tag) | |
1135 | { | |
1136 | struct ifnet *ifp; | |
1137 | struct if_proto *ifproto; | |
1138 | u_long i; | |
1139 | struct if_family_str *if_family; | |
1140 | struct dlil_proto_head *tmp; | |
1141 | struct kev_dl_proto_data ev_pr_data; | |
1142 | int s, retval = 0; | |
1143 | boolean_t funnel_state; | |
1144 | u_char *p; | |
1145 | ||
1146 | if ((proto->protocol_family == 0) || (proto->interface_family == 0)) | |
1147 | return EINVAL; | |
1148 | ||
1149 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1150 | s = splnet(); | |
1151 | if_family = find_family_module(proto->interface_family); | |
1152 | if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) { | |
1153 | kprintf("dlil_attach_protocol -- no interface family module %d", | |
1154 | proto->interface_family); | |
1155 | retval = ENOENT; | |
1156 | goto end; | |
1157 | } | |
1158 | ||
1159 | ifp = ifbyfamily(proto->interface_family, proto->unit_number); | |
1160 | if (!ifp) { | |
1161 | kprintf("dlil_attach_protocol -- no such interface %d unit %d\n", | |
1162 | proto->interface_family, proto->unit_number); | |
1163 | retval = ENOENT; | |
1164 | goto end; | |
1165 | } | |
1166 | ||
1167 | if (dlil_find_dltag(proto->interface_family, proto->unit_number, | |
1168 | proto->protocol_family, &i) == 0) { | |
1169 | retval = EEXIST; | |
1170 | goto end; | |
1171 | } | |
1172 | ||
1173 | for (i=1; i < dl_tag_nb; i++) | |
1174 | if (dl_tag_array[i].ifp == 0) | |
1175 | break; | |
1176 | ||
1177 | if (i == dl_tag_nb) { | |
1178 | // expand the tag array by MAX_DL_TAGS | |
1179 | MALLOC(p, u_char *, sizeof(struct dl_tag_str) * (dl_tag_nb + MAX_DL_TAGS), M_NKE, M_WAITOK); | |
1180 | if (p == 0) { | |
1181 | retval = ENOBUFS; | |
1182 | goto end; | |
1183 | } | |
1184 | bcopy(dl_tag_array, p, sizeof(struct dl_tag_str) * dl_tag_nb); | |
1185 | bzero(p + sizeof(struct dl_tag_str) * dl_tag_nb, sizeof(struct dl_tag_str) * MAX_DL_TAGS); | |
1186 | dl_tag_nb += MAX_DL_TAGS; | |
1187 | FREE(dl_tag_array, M_NKE); | |
1188 | dl_tag_array = (struct dl_tag_str *)p; | |
1189 | } | |
1190 | ||
1191 | /* | |
1192 | * Allocate and init a new if_proto structure | |
1193 | */ | |
1194 | ||
1195 | ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK); | |
1196 | if (!ifproto) { | |
1197 | printf("ERROR - DLIL failed if_proto allocation\n"); | |
1198 | retval = ENOMEM; | |
1199 | goto end; | |
1200 | } | |
1201 | ||
1202 | bzero(ifproto, sizeof(struct if_proto)); | |
1203 | ||
1204 | dl_tag_array[i].ifp = ifp; | |
1205 | dl_tag_array[i].proto = ifproto; | |
1206 | dl_tag_array[i].pr_flt_head = &ifproto->pr_flt_head; | |
1207 | ifproto->dl_tag = i; | |
1208 | *dl_tag = i; | |
1209 | ||
1210 | if (proto->default_proto) { | |
1211 | if (ifp->if_data.default_proto == 0) | |
1212 | ifp->if_data.default_proto = i; | |
1213 | else | |
1214 | printf("ERROR - dlil_attach_protocol -- Attempt to attach more than one default protocol\n"); | |
1215 | } | |
1216 | ||
1217 | ifproto->protocol_family = proto->protocol_family; | |
1218 | ifproto->dl_input = proto->input; | |
1219 | ifproto->dl_pre_output = proto->pre_output; | |
1220 | ifproto->dl_event = proto->event; | |
1221 | ifproto->dl_offer = proto->offer; | |
1222 | ifproto->dl_ioctl = proto->ioctl; | |
1223 | ifproto->ifp = ifp; | |
1224 | TAILQ_INIT(&ifproto->pr_flt_head); | |
1225 | ||
1226 | /* | |
1227 | * Call family module add_proto routine so it can refine the | |
1228 | * demux descriptors as it wishes. | |
1229 | */ | |
1230 | retval = (*if_family->add_proto)(&proto->demux_desc_head, ifproto, *dl_tag); | |
1231 | if (retval) { | |
1232 | dl_tag_array[i].ifp = 0; | |
1233 | FREE(ifproto, M_IFADDR); | |
1234 | goto end; | |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * Add to if_proto list for this interface | |
1239 | */ | |
1240 | ||
1241 | tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
1242 | TAILQ_INSERT_TAIL(tmp, ifproto, next); | |
1243 | ifp->refcnt++; | |
1244 | if (ifproto->dl_offer) | |
1245 | ifp->offercnt++; | |
1246 | ||
1247 | /* the reserved field carries the number of protocol still attached (subject to change) */ | |
1248 | ev_pr_data.proto_family = proto->protocol_family; | |
1249 | ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp); | |
1250 | dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED, | |
1251 | (struct net_event_data *)&ev_pr_data, | |
1252 | sizeof(struct kev_dl_proto_data)); | |
1253 | ||
1254 | end: | |
1255 | splx(s); | |
1256 | thread_funnel_set(network_flock, funnel_state); | |
1257 | return retval; | |
1258 | } | |
1259 | ||
1260 | ||
1261 | ||
1262 | int | |
1263 | dlil_detach_protocol(u_long dl_tag) | |
1264 | { | |
1265 | struct ifnet *ifp; | |
1266 | struct ifnet *orig_ifp=0; | |
1267 | struct if_proto *proto; | |
1268 | struct dlil_proto_head *tmp; | |
1269 | struct if_family_str *if_family; | |
1270 | struct dlil_filterq_entry *filter; | |
1271 | int s, retval = 0; | |
1272 | struct dlil_filterq_head *fhead; | |
1273 | struct kev_dl_proto_data ev_pr_data; | |
1274 | boolean_t funnel_state; | |
1275 | ||
1276 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1277 | s = splnet(); | |
1278 | ||
1279 | if (dl_tag >= dl_tag_nb || dl_tag_array[dl_tag].ifp == 0) { | |
1280 | retval = ENOENT; | |
1281 | goto end; | |
1282 | } | |
1283 | ||
1284 | ifp = dl_tag_array[dl_tag].ifp; | |
1285 | proto = dl_tag_array[dl_tag].proto; | |
1286 | ||
1287 | if_family = find_family_module(ifp->if_family); | |
1288 | if (if_family == NULL) { | |
1289 | retval = ENOENT; | |
1290 | goto end; | |
1291 | } | |
1292 | ||
1293 | tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
1294 | ||
1295 | /* | |
1296 | * Call family module del_proto | |
1297 | */ | |
1298 | ||
1299 | (*if_family->del_proto)(proto, dl_tag); | |
1300 | ||
1301 | ||
1302 | /* | |
1303 | * Remove and deallocate any attached protocol filters | |
1304 | */ | |
1305 | ||
1306 | while (filter = TAILQ_FIRST(&proto->pr_flt_head)) | |
1307 | dlil_detach_filter(filter->filter_id); | |
1308 | ||
1309 | if (proto->dl_offer) | |
1310 | ifp->offercnt--; | |
1311 | ||
1312 | if (ifp->if_data.default_proto == dl_tag) | |
1313 | ifp->if_data.default_proto = 0; | |
1314 | dl_tag_array[dl_tag].ifp = 0; | |
1315 | ||
1316 | /* the reserved field carries the number of protocol still attached (subject to change) */ | |
1317 | ev_pr_data.proto_family = proto->protocol_family; | |
1318 | ||
1319 | /* | |
1320 | * Cleanup routes that may still be in the routing table for that interface/protocol pair. | |
1321 | */ | |
1322 | ||
1323 | if_rtproto_del(ifp, proto->protocol_family); | |
1324 | ||
1325 | TAILQ_REMOVE(tmp, proto, next); | |
1326 | FREE(proto, M_IFADDR); | |
1327 | ||
1328 | ifp->refcnt--; | |
1329 | ev_pr_data.proto_remaining_count = dlil_ifp_proto_count(ifp); | |
1330 | dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED, | |
1331 | (struct net_event_data *)&ev_pr_data, | |
1332 | sizeof(struct kev_dl_proto_data)); | |
1333 | ||
1334 | if (ifp->refcnt == 0) { | |
1335 | ||
1336 | TAILQ_REMOVE(&ifnet, ifp, if_link); | |
1337 | ||
1338 | (*if_family->del_if)(ifp); | |
1339 | ||
1340 | if (--if_family->refcnt == 0) { | |
1341 | if (if_family->shutdown) | |
1342 | (*if_family->shutdown)(); | |
1343 | ||
1344 | TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); | |
1345 | FREE(if_family, M_IFADDR); | |
1346 | } | |
1347 | ||
1348 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
1349 | while (orig_ifp != ifp) { | |
1350 | orig_ifp = ifp; | |
1351 | ||
1352 | TAILQ_FOREACH(filter, fhead, que) { | |
1353 | if (IFILT(filter).filter_if_free) { | |
1354 | retval = (*IFILT(filter).filter_if_free)(IFILT(filter).cookie, ifp); | |
1355 | if (retval) { | |
1356 | splx(s); | |
1357 | thread_funnel_set(network_flock, funnel_state); | |
1358 | return 0; | |
1359 | } | |
1360 | } | |
1361 | if (ifp != orig_ifp) | |
1362 | break; | |
1363 | } | |
1364 | } | |
1365 | ||
1366 | dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0); | |
1367 | ||
1368 | (*ifp->if_free)(ifp); | |
1369 | } | |
1370 | ||
1371 | end: | |
1372 | splx(s); | |
1373 | thread_funnel_set(network_flock, funnel_state); | |
1374 | return retval; | |
1375 | } | |
1376 | ||
1377 | ||
1378 | ||
1379 | ||
1380 | ||
1381 | int | |
1382 | dlil_if_attach(struct ifnet *ifp) | |
1383 | { | |
1384 | u_long interface_family = ifp->if_family; | |
1385 | struct if_family_str *if_family; | |
1386 | struct dlil_proto_head *tmp; | |
1387 | int stat; | |
1388 | int s; | |
1389 | boolean_t funnel_state; | |
1390 | ||
1391 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1392 | s = splnet(); | |
1393 | if (ifnet_inited == 0) { | |
1394 | TAILQ_INIT(&ifnet); | |
1395 | ifnet_inited = 1; | |
1396 | } | |
1397 | ||
1398 | if_family = find_family_module(interface_family); | |
1399 | ||
1400 | if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) { | |
1401 | splx(s); | |
1402 | kprintf("Attempt to attach interface without family module - %d\n", | |
1403 | interface_family); | |
1404 | thread_funnel_set(network_flock, funnel_state); | |
1405 | return ENODEV; | |
1406 | } | |
1407 | ||
1408 | if (ifp->refcnt == 0) { | |
1409 | /* | |
1410 | * Call the family module to fill in the appropriate fields in the | |
1411 | * ifnet structure. | |
1412 | */ | |
1413 | ||
1414 | stat = (*if_family->add_if)(ifp); | |
1415 | if (stat) { | |
1416 | splx(s); | |
1417 | kprintf("dlil_if_attach -- add_if failed with %d\n", stat); | |
1418 | thread_funnel_set(network_flock, funnel_state); | |
1419 | return stat; | |
1420 | } | |
1421 | if_family->refcnt++; | |
1422 | ||
1423 | /* | |
1424 | * Add the ifp to the interface list. | |
1425 | */ | |
1426 | ||
1427 | tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
1428 | TAILQ_INIT(tmp); | |
1429 | ||
1430 | ifp->if_data.default_proto = 0; | |
1431 | ifp->offercnt = 0; | |
1432 | TAILQ_INIT(&ifp->if_flt_head); | |
1433 | old_if_attach(ifp); | |
1434 | ||
1435 | if (if_family->init_if) { | |
1436 | stat = (*if_family->init_if)(ifp); | |
1437 | if (stat) { | |
1438 | kprintf("dlil_if_attach -- init_if failed with %d\n", stat); | |
1439 | } | |
1440 | } | |
1441 | } | |
1442 | ||
1443 | ifp->refcnt++; | |
1444 | ||
1445 | dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_ATTACHED, 0, 0); | |
1446 | ||
1447 | splx(s); | |
1448 | thread_funnel_set(network_flock, funnel_state); | |
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | ||
1453 | int | |
1454 | dlil_if_detach(struct ifnet *ifp) | |
1455 | { | |
1456 | struct if_proto *proto; | |
1457 | struct dlil_filterq_entry *if_filter; | |
1458 | struct if_family_str *if_family; | |
1459 | struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
1460 | struct kev_msg ev_msg; | |
1461 | boolean_t funnel_state; | |
1462 | ||
1463 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1464 | ||
1465 | if_family = find_family_module(ifp->if_family); | |
1466 | ||
1467 | if (!if_family) { | |
1468 | kprintf("Attempt to detach interface without family module - %s\n", | |
1469 | ifp->if_name); | |
1470 | thread_funnel_set(network_flock, funnel_state); | |
1471 | return ENODEV; | |
1472 | } | |
1473 | ||
1474 | while (if_filter = TAILQ_FIRST(fhead)) | |
1475 | dlil_detach_filter(if_filter->filter_id); | |
1476 | ||
1477 | ifp->refcnt--; | |
1478 | ||
1479 | if (ifp->refcnt > 0) { | |
1480 | dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHING, 0, 0); | |
1481 | thread_funnel_set(network_flock, funnel_state); | |
1482 | return DLIL_WAIT_FOR_FREE; | |
1483 | } | |
1484 | ||
1485 | while (ifp->if_multiaddrs.lh_first) { | |
1486 | struct ifmultiaddr *ifma = ifp->if_multiaddrs.lh_first; | |
1487 | ||
1488 | /* | |
1489 | * When the interface is gone, we will no | |
1490 | * longer be listening on these multicasts. | |
1491 | * Various bits of the stack may be referencing | |
1492 | * these multicasts, so we can't just free them. | |
1493 | * We place them on a list so they may be cleaned | |
1494 | * up later as the other bits of the stack release | |
1495 | * them. | |
1496 | */ | |
1497 | LIST_REMOVE(ifma, ifma_link); | |
1498 | ifma->ifma_ifp = NULL; | |
1499 | LIST_INSERT_HEAD(&ifma_lostlist, ifma, ifma_link); | |
1500 | } | |
1501 | ||
1502 | /* Let BPF know the interface is detaching. */ | |
1503 | bpfdetach(ifp); | |
1504 | TAILQ_REMOVE(&ifnet, ifp, if_link); | |
1505 | ||
1506 | (*if_family->del_if)(ifp); | |
1507 | ||
1508 | if (--if_family->refcnt == 0) { | |
1509 | if (if_family->shutdown) | |
1510 | (*if_family->shutdown)(); | |
1511 | ||
1512 | TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); | |
1513 | FREE(if_family, M_IFADDR); | |
1514 | } | |
1515 | ||
1516 | dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IF_DETACHED, 0, 0); | |
1517 | thread_funnel_set(network_flock, funnel_state); | |
1518 | return 0; | |
1519 | } | |
1520 | ||
1521 | ||
1522 | int | |
1523 | dlil_reg_if_modules(u_long interface_family, | |
1524 | struct dlil_ifmod_reg_str *ifmod) | |
1525 | { | |
1526 | struct if_family_str *if_family; | |
1527 | int s; | |
1528 | boolean_t funnel_state; | |
1529 | ||
1530 | ||
1531 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1532 | s = splnet(); | |
1533 | if (find_family_module(interface_family)) { | |
1534 | kprintf("Attempt to register dlil family module more than once - %d\n", | |
1535 | interface_family); | |
1536 | splx(s); | |
1537 | thread_funnel_set(network_flock, funnel_state); | |
1538 | return EEXIST; | |
1539 | } | |
1540 | ||
1541 | if ((!ifmod->add_if) || (!ifmod->del_if) || | |
1542 | (!ifmod->add_proto) || (!ifmod->del_proto)) { | |
1543 | kprintf("dlil_reg_if_modules passed at least one null pointer\n"); | |
1544 | splx(s); | |
1545 | thread_funnel_set(network_flock, funnel_state); | |
1546 | return EINVAL; | |
1547 | } | |
1548 | ||
1549 | /* | |
1550 | * The following is a gross hack to keep from breaking | |
1551 | * Vicomsoft's internet gateway on Jaguar. Vicomsoft | |
1552 | * does not zero the reserved fields in dlil_ifmod_reg_str. | |
1553 | * As a result, we have to zero any function that used to | |
1554 | * be reserved fields at the time Vicomsoft built their | |
1555 | * kext. Radar #2974305 | |
1556 | */ | |
1557 | if (ifmod->reserved[0] != 0 || ifmod->reserved[1] != 0 || ifmod->reserved[2]) { | |
1558 | if (interface_family == 123) { /* Vicom */ | |
1559 | ifmod->init_if = 0; | |
1560 | } else { | |
1561 | splx(s); | |
1562 | thread_funnel_set(network_flock, funnel_state); | |
1563 | return EINVAL; | |
1564 | } | |
1565 | } | |
1566 | ||
1567 | if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK); | |
1568 | if (!if_family) { | |
1569 | kprintf("dlil_reg_if_modules failed allocation\n"); | |
1570 | splx(s); | |
1571 | thread_funnel_set(network_flock, funnel_state); | |
1572 | return ENOMEM; | |
1573 | } | |
1574 | ||
1575 | bzero(if_family, sizeof(struct if_family_str)); | |
1576 | ||
1577 | if_family->if_family = interface_family & 0xffff; | |
1578 | if_family->shutdown = ifmod->shutdown; | |
1579 | if_family->add_if = ifmod->add_if; | |
1580 | if_family->del_if = ifmod->del_if; | |
1581 | if_family->init_if = ifmod->init_if; | |
1582 | if_family->add_proto = ifmod->add_proto; | |
1583 | if_family->del_proto = ifmod->del_proto; | |
1584 | if_family->ifmod_ioctl = ifmod->ifmod_ioctl; | |
1585 | if_family->refcnt = 1; | |
1586 | if_family->flags = 0; | |
1587 | ||
1588 | TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next); | |
1589 | splx(s); | |
1590 | thread_funnel_set(network_flock, funnel_state); | |
1591 | return 0; | |
1592 | } | |
1593 | ||
1594 | int dlil_dereg_if_modules(u_long interface_family) | |
1595 | { | |
1596 | struct if_family_str *if_family; | |
1597 | int s, ret = 0; | |
1598 | boolean_t funnel_state; | |
1599 | ||
1600 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1601 | s = splnet(); | |
1602 | if_family = find_family_module(interface_family); | |
1603 | if (if_family == 0) { | |
1604 | splx(s); | |
1605 | thread_funnel_set(network_flock, funnel_state); | |
1606 | return ENOENT; | |
1607 | } | |
1608 | ||
1609 | if (--if_family->refcnt == 0) { | |
1610 | if (if_family->shutdown) | |
1611 | (*if_family->shutdown)(); | |
1612 | ||
1613 | TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); | |
1614 | FREE(if_family, M_IFADDR); | |
1615 | } | |
1616 | else { | |
1617 | if_family->flags |= DLIL_SHUTDOWN; | |
1618 | ret = DLIL_WAIT_FOR_FREE; | |
1619 | } | |
1620 | ||
1621 | splx(s); | |
1622 | thread_funnel_set(network_flock, funnel_state); | |
1623 | return ret; | |
1624 | } | |
1625 | ||
1626 | ||
1627 | ||
1628 | int | |
1629 | dlil_reg_proto_module(u_long protocol_family, u_long interface_family, | |
1630 | struct dlil_protomod_reg_str *protomod_reg) | |
1631 | { | |
1632 | struct proto_family_str *proto_family; | |
1633 | int s; | |
1634 | boolean_t funnel_state; | |
1635 | ||
1636 | ||
1637 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1638 | s = splnet(); | |
1639 | if (find_proto_module(protocol_family, interface_family)) { | |
1640 | splx(s); | |
1641 | thread_funnel_set(network_flock, funnel_state); | |
1642 | return EEXIST; | |
1643 | } | |
1644 | ||
1645 | if (protomod_reg->reserved[0] != 0 || protomod_reg->reserved[1] != 0 | |
1646 | || protomod_reg->reserved[2] != 0 || protomod_reg->reserved[3] !=0) { | |
1647 | splx(s); | |
1648 | thread_funnel_set(network_flock, funnel_state); | |
1649 | return EINVAL; | |
1650 | } | |
1651 | ||
1652 | if (protomod_reg->attach_proto == NULL) { | |
1653 | splx(s); | |
1654 | thread_funnel_set(network_flock, funnel_state); | |
1655 | return EINVAL; | |
1656 | } | |
1657 | ||
1658 | proto_family = (struct proto_family_str *) _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK); | |
1659 | if (!proto_family) { | |
1660 | splx(s); | |
1661 | thread_funnel_set(network_flock, funnel_state); | |
1662 | return ENOMEM; | |
1663 | } | |
1664 | ||
1665 | bzero(proto_family, sizeof(struct proto_family_str)); | |
1666 | proto_family->proto_family = protocol_family; | |
1667 | proto_family->if_family = interface_family & 0xffff; | |
1668 | proto_family->attach_proto = protomod_reg->attach_proto; | |
1669 | proto_family->detach_proto = protomod_reg->detach_proto; | |
1670 | ||
1671 | TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next); | |
1672 | splx(s); | |
1673 | thread_funnel_set(network_flock, funnel_state); | |
1674 | return 0; | |
1675 | } | |
1676 | ||
1677 | int dlil_dereg_proto_module(u_long protocol_family, u_long interface_family) | |
1678 | { | |
1679 | struct proto_family_str *proto_family; | |
1680 | int s, ret = 0; | |
1681 | boolean_t funnel_state; | |
1682 | ||
1683 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1684 | s = splnet(); | |
1685 | proto_family = find_proto_module(protocol_family, interface_family); | |
1686 | if (proto_family == 0) { | |
1687 | splx(s); | |
1688 | thread_funnel_set(network_flock, funnel_state); | |
1689 | return ENOENT; | |
1690 | } | |
1691 | ||
1692 | TAILQ_REMOVE(&proto_family_head, proto_family, proto_fam_next); | |
1693 | FREE(proto_family, M_IFADDR); | |
1694 | ||
1695 | splx(s); | |
1696 | thread_funnel_set(network_flock, funnel_state); | |
1697 | return ret; | |
1698 | } | |
1699 | ||
1700 | int dlil_plumb_protocol(u_long protocol_family, struct ifnet *ifp, u_long *dl_tag) | |
1701 | { | |
1702 | struct proto_family_str *proto_family; | |
1703 | int s, ret = 0; | |
1704 | boolean_t funnel_state; | |
1705 | ||
1706 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1707 | s = splnet(); | |
1708 | proto_family = find_proto_module(protocol_family, ifp->if_family); | |
1709 | if (proto_family == 0) { | |
1710 | splx(s); | |
1711 | thread_funnel_set(network_flock, funnel_state); | |
1712 | return ENOENT; | |
1713 | } | |
1714 | ||
1715 | ret = (*proto_family->attach_proto)(ifp, dl_tag); | |
1716 | ||
1717 | splx(s); | |
1718 | thread_funnel_set(network_flock, funnel_state); | |
1719 | return ret; | |
1720 | } | |
1721 | ||
1722 | ||
1723 | int dlil_unplumb_protocol(u_long protocol_family, struct ifnet *ifp) | |
1724 | { | |
1725 | struct proto_family_str *proto_family; | |
1726 | int s, ret = 0; | |
1727 | u_long tag; | |
1728 | boolean_t funnel_state; | |
1729 | ||
1730 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
1731 | s = splnet(); | |
1732 | ||
1733 | ret = dlil_find_dltag(ifp->if_family, ifp->if_unit, protocol_family, &tag); | |
1734 | ||
1735 | if (ret == 0) { | |
1736 | proto_family = find_proto_module(protocol_family, ifp->if_family); | |
1737 | if (proto_family && proto_family->detach_proto) | |
1738 | ret = (*proto_family->detach_proto)(ifp, tag); | |
1739 | else | |
1740 | ret = dlil_detach_protocol(tag); | |
1741 | } | |
1742 | ||
1743 | splx(s); | |
1744 | thread_funnel_set(network_flock, funnel_state); | |
1745 | return ret; | |
1746 | } | |
1747 | ||
1748 | ||
1749 | ||
1750 | /* | |
1751 | * Old if_attach no-op'ed function defined here for temporary backwards compatibility | |
1752 | */ | |
1753 | ||
1754 | void if_attach(ifp) | |
1755 | struct ifnet *ifp; | |
1756 | { | |
1757 | dlil_if_attach(ifp); | |
1758 | } | |
1759 | ||
1760 | ||
1761 | ||
1762 | int | |
1763 | dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id) | |
1764 | { | |
1765 | struct ifnet *orig_ifp = 0; | |
1766 | struct ifnet *ifp; | |
1767 | struct if_proto *ifproto; | |
1768 | struct if_proto *proto; | |
1769 | struct dlil_filterq_entry *tmp; | |
1770 | int retval = 0; | |
1771 | struct dlil_filterq_head *fhead; | |
1772 | int match_found; | |
1773 | ||
1774 | dlil_stats.inject_if_in1++; | |
1775 | ||
1776 | if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER) | |
1777 | return ENOENT; | |
1778 | ||
1779 | ifp = dlil_filters[from_id].ifp; | |
1780 | ||
1781 | /* | |
1782 | * Let interface filters (if any) do their thing ... | |
1783 | */ | |
1784 | ||
1785 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
1786 | match_found = 0; | |
1787 | ||
1788 | if (TAILQ_EMPTY(fhead) == 0) { | |
1789 | while (orig_ifp != ifp) { | |
1790 | orig_ifp = ifp; | |
1791 | TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) { | |
1792 | if ((match_found) && (IFILT(tmp).filter_if_input)) { | |
1793 | retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie, | |
1794 | &ifp, | |
1795 | &m, | |
1796 | &frame_header); | |
1797 | if (retval) { | |
1798 | if (retval == EJUSTRETURN) | |
1799 | return 0; | |
1800 | else { | |
1801 | m_freem(m); | |
1802 | return retval; | |
1803 | } | |
1804 | } | |
1805 | ||
1806 | } | |
1807 | ||
1808 | if (ifp != orig_ifp) | |
1809 | break; | |
1810 | ||
1811 | if (from_id == tmp->filter_id) | |
1812 | match_found = 1; | |
1813 | } | |
1814 | } | |
1815 | } | |
1816 | ||
1817 | ifp->if_lastchange = time; | |
1818 | ||
1819 | /* | |
1820 | * Call family demux module. If the demux module finds a match | |
1821 | * for the frame it will fill-in the ifproto pointer. | |
1822 | */ | |
1823 | ||
1824 | retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto ); | |
1825 | ||
1826 | if (m->m_flags & (M_BCAST|M_MCAST)) | |
1827 | ifp->if_imcasts++; | |
1828 | ||
1829 | if ((retval) && (ifp->offercnt)) { | |
1830 | /* | |
1831 | * No match was found, look for any offers. | |
1832 | */ | |
1833 | struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; | |
1834 | TAILQ_FOREACH(proto, tmp, next) { | |
1835 | if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) { | |
1836 | ifproto = proto; | |
1837 | retval = 0; | |
1838 | break; | |
1839 | } | |
1840 | } | |
1841 | } | |
1842 | ||
1843 | if (retval) { | |
1844 | if (retval != EJUSTRETURN) { | |
1845 | m_freem(m); | |
1846 | return retval; | |
1847 | } | |
1848 | else | |
1849 | return 0; | |
1850 | } | |
1851 | else | |
1852 | if (ifproto == 0) { | |
1853 | printf("ERROR - dlil_inject_if_input -- if_demux didn't return an if_proto pointer\n"); | |
1854 | m_freem(m); | |
1855 | return 0; | |
1856 | } | |
1857 | ||
1858 | /* | |
1859 | * Call any attached protocol filters. | |
1860 | */ | |
1861 | TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) { | |
1862 | if (PFILT(tmp).filter_dl_input) { | |
1863 | retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie, | |
1864 | &m, | |
1865 | &frame_header, | |
1866 | &ifp); | |
1867 | ||
1868 | if (retval) { | |
1869 | if (retval == EJUSTRETURN) | |
1870 | return 0; | |
1871 | else { | |
1872 | m_freem(m); | |
1873 | return retval; | |
1874 | } | |
1875 | } | |
1876 | } | |
1877 | } | |
1878 | ||
1879 | ||
1880 | ||
1881 | retval = (*ifproto->dl_input)(m, frame_header, | |
1882 | ifp, ifproto->dl_tag, | |
1883 | FALSE); | |
1884 | ||
1885 | dlil_stats.inject_if_in2++; | |
1886 | if (retval == EJUSTRETURN) | |
1887 | retval = 0; | |
1888 | else | |
1889 | if (retval) | |
1890 | m_freem(m); | |
1891 | ||
1892 | return retval; | |
1893 | ||
1894 | } | |
1895 | ||
1896 | ||
1897 | ||
1898 | ||
1899 | ||
1900 | int | |
1901 | dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id) | |
1902 | { | |
1903 | struct ifnet *orig_ifp = 0; | |
1904 | struct dlil_filterq_entry *tmp; | |
1905 | int retval; | |
1906 | struct if_proto *ifproto = 0; | |
1907 | int match_found; | |
1908 | struct ifnet *ifp; | |
1909 | ||
1910 | dlil_stats.inject_pr_in1++; | |
1911 | if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER) | |
1912 | return ENOENT; | |
1913 | ||
1914 | ifproto = dlil_filters[from_id].proto; | |
1915 | ifp = dlil_filters[from_id].ifp; | |
1916 | ||
1917 | /* | |
1918 | * Call any attached protocol filters. | |
1919 | */ | |
1920 | ||
1921 | match_found = 0; | |
1922 | TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) { | |
1923 | if ((match_found) && (PFILT(tmp).filter_dl_input)) { | |
1924 | retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie, | |
1925 | &m, | |
1926 | &frame_header, | |
1927 | &ifp); | |
1928 | ||
1929 | if (retval) { | |
1930 | if (retval == EJUSTRETURN) | |
1931 | return 0; | |
1932 | else { | |
1933 | m_freem(m); | |
1934 | return retval; | |
1935 | } | |
1936 | } | |
1937 | } | |
1938 | ||
1939 | if (tmp->filter_id == from_id) | |
1940 | match_found = 1; | |
1941 | } | |
1942 | ||
1943 | ||
1944 | retval = (*ifproto->dl_input)(m, frame_header, | |
1945 | ifp, ifproto->dl_tag, | |
1946 | FALSE); | |
1947 | ||
1948 | if (retval == EJUSTRETURN) | |
1949 | retval = 0; | |
1950 | else | |
1951 | if (retval) | |
1952 | m_freem(m); | |
1953 | ||
1954 | dlil_stats.inject_pr_in2++; | |
1955 | return retval; | |
1956 | } | |
1957 | ||
1958 | ||
1959 | ||
1960 | int | |
1961 | dlil_inject_pr_output(struct mbuf *m, | |
1962 | struct sockaddr *dest, | |
1963 | int raw, | |
1964 | char *frame_type, | |
1965 | char *dst_linkaddr, | |
1966 | u_long from_id) | |
1967 | { | |
1968 | struct ifnet *orig_ifp = 0; | |
1969 | struct ifnet *ifp; | |
1970 | struct dlil_filterq_entry *tmp; | |
1971 | int retval = 0; | |
1972 | char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4]; | |
1973 | char dst_linkaddr_buffer[MAX_LINKADDR * 4]; | |
1974 | struct dlil_filterq_head *fhead; | |
1975 | int match_found; | |
1976 | u_long dl_tag; | |
1977 | ||
1978 | dlil_stats.inject_pr_out1++; | |
1979 | if (raw == 0) { | |
1980 | if (frame_type) | |
1981 | bcopy(frame_type, &frame_type_buffer[0], MAX_FRAME_TYPE_SIZE * 4); | |
1982 | else | |
1983 | return EINVAL; | |
1984 | ||
1985 | if (dst_linkaddr) | |
1986 | bcopy(dst_linkaddr, &dst_linkaddr_buffer, MAX_LINKADDR * 4); | |
1987 | else | |
1988 | return EINVAL; | |
1989 | } | |
1990 | ||
1991 | if (from_id >= dlil_filters_nb || dlil_filters[from_id].type != DLIL_PR_FILTER) | |
1992 | return ENOENT; | |
1993 | ||
1994 | ifp = dlil_filters[from_id].ifp; | |
1995 | dl_tag = dlil_filters[from_id].proto->dl_tag; | |
1996 | ||
1997 | frame_type = frame_type_buffer; | |
1998 | dst_linkaddr = dst_linkaddr_buffer; | |
1999 | ||
2000 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
2001 | ||
2002 | /* | |
2003 | * Run any attached protocol filters. | |
2004 | */ | |
2005 | match_found = 0; | |
2006 | ||
2007 | if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) { | |
2008 | TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) { | |
2009 | if ((match_found) && (PFILT(tmp).filter_dl_output)) { | |
2010 | retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie, | |
2011 | &m, &ifp, &dest, dst_linkaddr, frame_type); | |
2012 | if (retval) { | |
2013 | if (retval == EJUSTRETURN) | |
2014 | return 0; | |
2015 | else { | |
2016 | m_freem(m); | |
2017 | return retval; | |
2018 | } | |
2019 | } | |
2020 | } | |
2021 | ||
2022 | if (tmp->filter_id == from_id) | |
2023 | match_found = 1; | |
2024 | } | |
2025 | } | |
2026 | ||
2027 | ||
2028 | /* | |
2029 | * Call framing module | |
2030 | */ | |
2031 | if ((raw == 0) && (ifp->if_framer)) { | |
2032 | retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type); | |
2033 | if (retval) { | |
2034 | if (retval == EJUSTRETURN) | |
2035 | return 0; | |
2036 | else | |
2037 | { | |
2038 | m_freem(m); | |
2039 | return retval; | |
2040 | } | |
2041 | } | |
2042 | } | |
2043 | ||
2044 | ||
2045 | #if BRIDGE | |
2046 | if (do_bridge) { | |
2047 | struct mbuf *m0 = m ; | |
2048 | struct ether_header *eh = mtod(m, struct ether_header *); | |
2049 | ||
2050 | if (m->m_pkthdr.rcvif) | |
2051 | m->m_pkthdr.rcvif = NULL ; | |
2052 | ifp = bridge_dst_lookup(eh); | |
2053 | bdg_forward(&m0, ifp); | |
2054 | if (m0) | |
2055 | m_freem(m0); | |
2056 | ||
2057 | return 0; | |
2058 | } | |
2059 | #endif | |
2060 | ||
2061 | ||
2062 | /* | |
2063 | * Let interface filters (if any) do their thing ... | |
2064 | */ | |
2065 | ||
2066 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
2067 | if (TAILQ_EMPTY(fhead) == 0) { | |
2068 | while (orig_ifp != ifp) { | |
2069 | orig_ifp = ifp; | |
2070 | TAILQ_FOREACH(tmp, fhead, que) { | |
2071 | if (IFILT(tmp).filter_if_output) { | |
2072 | retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie, | |
2073 | &ifp, | |
2074 | &m); | |
2075 | if (retval) { | |
2076 | if (retval == EJUSTRETURN) | |
2077 | return 0; | |
2078 | else { | |
2079 | m_freem(m); | |
2080 | return retval; | |
2081 | } | |
2082 | } | |
2083 | ||
2084 | } | |
2085 | ||
2086 | if (ifp != orig_ifp) | |
2087 | break; | |
2088 | } | |
2089 | } | |
2090 | } | |
2091 | ||
2092 | /* | |
2093 | * Finally, call the driver. | |
2094 | */ | |
2095 | ||
2096 | retval = (*ifp->if_output)(ifp, m); | |
2097 | dlil_stats.inject_pr_out2++; | |
2098 | if ((retval == 0) || (retval == EJUSTRETURN)) | |
2099 | return 0; | |
2100 | else | |
2101 | return retval; | |
2102 | } | |
2103 | ||
2104 | ||
2105 | int | |
2106 | dlil_inject_if_output(struct mbuf *m, u_long from_id) | |
2107 | { | |
2108 | struct ifnet *orig_ifp = 0; | |
2109 | struct ifnet *ifp; | |
2110 | struct dlil_filterq_entry *tmp; | |
2111 | int retval = 0; | |
2112 | struct dlil_filterq_head *fhead; | |
2113 | int match_found; | |
2114 | ||
2115 | dlil_stats.inject_if_out1++; | |
2116 | if (from_id > dlil_filters_nb || dlil_filters[from_id].type != DLIL_IF_FILTER) | |
2117 | return ENOENT; | |
2118 | ||
2119 | ifp = dlil_filters[from_id].ifp; | |
2120 | ||
2121 | /* | |
2122 | * Let interface filters (if any) do their thing ... | |
2123 | */ | |
2124 | ||
2125 | fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; | |
2126 | match_found = 0; | |
2127 | ||
2128 | if (TAILQ_EMPTY(fhead) == 0) { | |
2129 | while (orig_ifp != ifp) { | |
2130 | orig_ifp = ifp; | |
2131 | TAILQ_FOREACH(tmp, fhead, que) { | |
2132 | if ((match_found) && (IFILT(tmp).filter_if_output)) { | |
2133 | retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie, | |
2134 | &ifp, | |
2135 | &m); | |
2136 | if (retval) { | |
2137 | if (retval == EJUSTRETURN) | |
2138 | return 0; | |
2139 | else { | |
2140 | m_freem(m); | |
2141 | return retval; | |
2142 | } | |
2143 | } | |
2144 | ||
2145 | } | |
2146 | ||
2147 | if (ifp != orig_ifp) | |
2148 | break; | |
2149 | ||
2150 | if (from_id == tmp->filter_id) | |
2151 | match_found = 1; | |
2152 | } | |
2153 | } | |
2154 | } | |
2155 | ||
2156 | /* | |
2157 | * Finally, call the driver. | |
2158 | */ | |
2159 | ||
2160 | retval = (*ifp->if_output)(ifp, m); | |
2161 | dlil_stats.inject_if_out2++; | |
2162 | if ((retval == 0) || (retval == EJUSTRETURN)) | |
2163 | return 0; | |
2164 | else | |
2165 | return retval; | |
2166 | } | |
2167 | ||
2168 | static | |
2169 | int dlil_recycle_ioctl(struct ifnet *ifnet_ptr, u_long ioctl_code, void *ioctl_arg) | |
2170 | { | |
2171 | ||
2172 | return EOPNOTSUPP; | |
2173 | } | |
2174 | ||
2175 | static | |
2176 | int dlil_recycle_output(struct ifnet *ifnet_ptr, struct mbuf *m) | |
2177 | { | |
2178 | ||
2179 | m_freem(m); | |
2180 | return 0; | |
2181 | } | |
2182 | ||
2183 | static | |
2184 | int dlil_recycle_free(struct ifnet *ifnet_ptr) | |
2185 | { | |
2186 | return 0; | |
2187 | } | |
2188 | ||
2189 | static | |
2190 | int dlil_recycle_set_bpf_tap(struct ifnet *ifp, int mode, | |
2191 | int (*bpf_callback)(struct ifnet *, struct mbuf *)) | |
2192 | { | |
2193 | /* XXX not sure what to do here */ | |
2194 | return 0; | |
2195 | } | |
2196 | ||
2197 | int dlil_if_acquire(u_long family, void *uniqueid, size_t uniqueid_len, | |
2198 | struct ifnet **ifp) | |
2199 | { | |
2200 | struct ifnet *ifp1 = NULL; | |
2201 | struct dlil_ifnet *dlifp1 = NULL; | |
2202 | int s, ret = 0; | |
2203 | boolean_t funnel_state; | |
2204 | ||
2205 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
2206 | s = splnet(); | |
2207 | ||
2208 | TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) { | |
2209 | ||
2210 | ifp1 = (struct ifnet *)dlifp1; | |
2211 | ||
2212 | if (ifp1->if_family == family) { | |
2213 | ||
2214 | /* same uniqueid and same len or no unique id specified */ | |
2215 | if ((uniqueid_len == dlifp1->if_uniqueid_len) | |
2216 | && !bcmp(uniqueid, dlifp1->if_uniqueid, uniqueid_len)) { | |
2217 | ||
2218 | /* check for matching interface in use */ | |
2219 | if (ifp1->if_eflags & IFEF_INUSE) { | |
2220 | if (uniqueid_len) { | |
2221 | ret = EBUSY; | |
2222 | goto end; | |
2223 | } | |
2224 | } | |
2225 | else { | |
2226 | ||
2227 | ifp1->if_eflags |= (IFEF_INUSE + IFEF_REUSE); | |
2228 | *ifp = ifp1; | |
2229 | goto end; | |
2230 | } | |
2231 | } | |
2232 | } | |
2233 | } | |
2234 | ||
2235 | /* no interface found, allocate a new one */ | |
2236 | MALLOC(dlifp1, struct dlil_ifnet *, sizeof(*dlifp1), M_NKE, M_WAITOK); | |
2237 | if (dlifp1 == 0) { | |
2238 | ret = ENOMEM; | |
2239 | goto end; | |
2240 | } | |
2241 | ||
2242 | bzero(dlifp1, sizeof(*dlifp1)); | |
2243 | ||
2244 | if (uniqueid_len) { | |
2245 | MALLOC(dlifp1->if_uniqueid, void *, uniqueid_len, M_NKE, M_WAITOK); | |
2246 | if (dlifp1->if_uniqueid == 0) { | |
2247 | FREE(dlifp1, M_NKE); | |
2248 | ret = ENOMEM; | |
2249 | goto end; | |
2250 | } | |
2251 | bcopy(uniqueid, dlifp1->if_uniqueid, uniqueid_len); | |
2252 | dlifp1->if_uniqueid_len = uniqueid_len; | |
2253 | } | |
2254 | ||
2255 | ifp1 = (struct ifnet *)dlifp1; | |
2256 | ifp1->if_eflags |= IFEF_INUSE; | |
2257 | ||
2258 | TAILQ_INSERT_TAIL(&dlil_ifnet_head, dlifp1, dl_if_link); | |
2259 | ||
2260 | *ifp = ifp1; | |
2261 | ||
2262 | end: | |
2263 | ||
2264 | splx(s); | |
2265 | thread_funnel_set(network_flock, funnel_state); | |
2266 | return ret; | |
2267 | } | |
2268 | ||
2269 | void dlil_if_release(struct ifnet *ifp) | |
2270 | { | |
2271 | struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp; | |
2272 | int s; | |
2273 | boolean_t funnel_state; | |
2274 | ||
2275 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
2276 | s = splnet(); | |
2277 | ||
2278 | ifp->if_eflags &= ~IFEF_INUSE; | |
2279 | ifp->if_ioctl = dlil_recycle_ioctl; | |
2280 | ifp->if_output = dlil_recycle_output; | |
2281 | ifp->if_free = dlil_recycle_free; | |
2282 | ifp->if_set_bpf_tap = dlil_recycle_set_bpf_tap; | |
2283 | ||
2284 | strncpy(dlifp->if_namestorage, ifp->if_name, IFNAMSIZ); | |
2285 | ifp->if_name = dlifp->if_namestorage; | |
2286 | ||
2287 | splx(s); | |
2288 | thread_funnel_set(network_flock, funnel_state); | |
2289 | } | |
2290 |