]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
37839358 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
37839358 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
37839358 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 1990, 1991, 1993 | |
24 | * The Regents of the University of California. All rights reserved. | |
25 | * | |
26 | * This code is derived from the Stanford/CMU enet packet filter, | |
27 | * (net/enet.c) distributed as part of 4.3BSD, and code contributed | |
28 | * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence | |
29 | * Berkeley Laboratory. | |
30 | * | |
31 | * Redistribution and use in source and binary forms, with or without | |
32 | * modification, are permitted provided that the following conditions | |
33 | * are met: | |
34 | * 1. Redistributions of source code must retain the above copyright | |
35 | * notice, this list of conditions and the following disclaimer. | |
36 | * 2. Redistributions in binary form must reproduce the above copyright | |
37 | * notice, this list of conditions and the following disclaimer in the | |
38 | * documentation and/or other materials provided with the distribution. | |
39 | * 3. All advertising materials mentioning features or use of this software | |
40 | * must display the following acknowledgement: | |
41 | * This product includes software developed by the University of | |
42 | * California, Berkeley and its contributors. | |
43 | * 4. Neither the name of the University nor the names of its contributors | |
44 | * may be used to endorse or promote products derived from this software | |
45 | * without specific prior written permission. | |
46 | * | |
47 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
48 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
49 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
50 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
51 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
52 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
53 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
54 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
55 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
56 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
57 | * SUCH DAMAGE. | |
58 | * | |
59 | * @(#)bpf.c 8.2 (Berkeley) 3/28/94 | |
60 | * | |
9bccf70c | 61 | * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $ |
1c79356b A |
62 | */ |
63 | ||
9bccf70c | 64 | #include "bpf.h" |
1c79356b A |
65 | |
66 | #ifndef __GNUC__ | |
67 | #define inline | |
68 | #else | |
69 | #define inline __inline | |
70 | #endif | |
71 | ||
72 | #include <sys/param.h> | |
73 | #include <sys/systm.h> | |
74 | #include <sys/conf.h> | |
75 | #include <sys/malloc.h> | |
76 | #include <sys/mbuf.h> | |
77 | #include <sys/time.h> | |
78 | #include <sys/proc.h> | |
1c79356b A |
79 | #include <sys/signalvar.h> |
80 | #include <sys/filio.h> | |
81 | #include <sys/sockio.h> | |
82 | #include <sys/ttycom.h> | |
83 | #include <sys/filedesc.h> | |
91447636 | 84 | #include <sys/uio_internal.h> |
1c79356b | 85 | |
9bccf70c A |
86 | #if defined(sparc) && BSD < 199103 |
87 | #include <sys/stream.h> | |
88 | #endif | |
89 | #include <sys/poll.h> | |
90 | ||
1c79356b A |
91 | #include <sys/socket.h> |
92 | #include <sys/vnode.h> | |
93 | ||
94 | #include <net/if.h> | |
95 | #include <net/bpf.h> | |
96 | #include <net/bpfdesc.h> | |
97 | ||
98 | #include <netinet/in.h> | |
99 | #include <netinet/if_ether.h> | |
100 | #include <sys/kernel.h> | |
101 | #include <sys/sysctl.h> | |
55e303ae | 102 | #include <net/firewire.h> |
1c79356b | 103 | |
91447636 | 104 | #include <machine/spl.h> |
1c79356b A |
105 | #include <miscfs/devfs/devfs.h> |
106 | #include <net/dlil.h> | |
107 | ||
91447636 A |
108 | #include <kern/locks.h> |
109 | ||
110 | extern int tvtohz(struct timeval *); | |
111 | ||
9bccf70c A |
112 | #if NBPFILTER > 0 |
113 | ||
1c79356b A |
114 | /* |
115 | * Older BSDs don't have kernel malloc. | |
116 | */ | |
117 | #if BSD < 199103 | |
118 | extern bcopy(); | |
119 | static caddr_t bpf_alloc(); | |
9bccf70c | 120 | #include <net/bpf_compat.h> |
1c79356b A |
121 | #define BPF_BUFSIZE (MCLBYTES-8) |
122 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) | |
123 | #else | |
124 | #define BPF_BUFSIZE 4096 | |
125 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) | |
126 | #endif | |
127 | ||
55e303ae | 128 | |
1c79356b A |
129 | #define PRINET 26 /* interruptible */ |
130 | ||
131 | /* | |
132 | * The default read buffer size is patchable. | |
133 | */ | |
91447636 | 134 | static unsigned int bpf_bufsize = BPF_BUFSIZE; |
1c79356b A |
135 | SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, |
136 | &bpf_bufsize, 0, ""); | |
91447636 | 137 | static unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE; |
9bccf70c A |
138 | SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, |
139 | &bpf_maxbufsize, 0, ""); | |
91447636 A |
140 | static unsigned int bpf_maxdevices = 256; |
141 | SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW, | |
142 | &bpf_maxdevices, 0, ""); | |
1c79356b A |
143 | |
144 | /* | |
145 | * bpf_iflist is the list of interfaces; each corresponds to an ifnet | |
55e303ae | 146 | * bpf_dtab holds pointer to the descriptors, indexed by minor device # |
1c79356b A |
147 | */ |
148 | static struct bpf_if *bpf_iflist; | |
9bccf70c A |
149 | #ifdef __APPLE__ |
150 | /* | |
151 | * BSD now stores the bpf_d in the dev_t which is a struct | |
152 | * on their system. Our dev_t is an int, so we still store | |
153 | * the bpf_d in a separate table indexed by minor device #. | |
91447636 A |
154 | * |
155 | * The value stored in bpf_dtab[n] represent three states: | |
156 | * 0: device not opened | |
157 | * 1: device opening or closing | |
158 | * other: device <n> opened with pointer to storage | |
9bccf70c | 159 | */ |
55e303ae | 160 | static struct bpf_d **bpf_dtab = NULL; |
91447636 A |
161 | static unsigned int bpf_dtab_size = 0; |
162 | static unsigned int nbpfilter = 0; | |
163 | ||
164 | static lck_mtx_t *bpf_mlock; | |
165 | static lck_grp_t *bpf_mlock_grp; | |
166 | static lck_grp_attr_t *bpf_mlock_grp_attr; | |
167 | static lck_attr_t *bpf_mlock_attr; | |
55e303ae A |
168 | |
169 | /* | |
170 | * Mark a descriptor free by making it point to itself. | |
171 | * This is probably cheaper than marking with a constant since | |
172 | * the address should be in a register anyway. | |
173 | */ | |
55e303ae | 174 | #endif /* __APPLE__ */ |
1c79356b | 175 | |
91447636 A |
176 | static int bpf_allocbufs(struct bpf_d *); |
177 | static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp); | |
178 | static void bpf_detachd(struct bpf_d *d); | |
179 | static void bpf_freed(struct bpf_d *); | |
180 | static void bpf_mcopy(const void *, void *, size_t); | |
181 | static int bpf_movein(struct uio *, int, | |
182 | struct mbuf **, struct sockaddr *, int *); | |
183 | static int bpf_setif(struct bpf_d *, struct ifreq *); | |
184 | static void bpf_wakeup(struct bpf_d *); | |
185 | static void catchpacket(struct bpf_d *, u_char *, u_int, | |
186 | u_int, void (*)(const void *, void *, size_t)); | |
187 | static void reset_d(struct bpf_d *); | |
188 | static int bpf_setf(struct bpf_d *, struct user_bpf_program *); | |
1c79356b | 189 | |
55e303ae A |
190 | /*static void *bpf_devfs_token[MAXBPFILTER];*/ |
191 | ||
192 | static int bpf_devsw_installed; | |
193 | ||
91447636 A |
194 | void bpf_init(void *unused); |
195 | int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m); | |
55e303ae | 196 | |
9bccf70c A |
197 | /* |
198 | * Darwin differs from BSD here, the following are static | |
199 | * on BSD and not static on Darwin. | |
200 | */ | |
1c79356b A |
201 | d_open_t bpfopen; |
202 | d_close_t bpfclose; | |
203 | d_read_t bpfread; | |
204 | d_write_t bpfwrite; | |
91447636 | 205 | ioctl_fcn_t bpfioctl; |
9bccf70c | 206 | select_fcn_t bpfpoll; |
1c79356b | 207 | |
1c79356b | 208 | |
9bccf70c A |
209 | /* Darwin's cdevsw struct differs slightly from BSDs */ |
210 | #define CDEV_MAJOR 23 | |
1c79356b | 211 | static struct cdevsw bpf_cdevsw = { |
9bccf70c A |
212 | /* open */ bpfopen, |
213 | /* close */ bpfclose, | |
214 | /* read */ bpfread, | |
215 | /* write */ bpfwrite, | |
216 | /* ioctl */ bpfioctl, | |
91447636 A |
217 | /* stop */ eno_stop, |
218 | /* reset */ eno_reset, | |
219 | /* tty */ NULL, | |
9bccf70c | 220 | /* select */ bpfpoll, |
91447636 | 221 | /* mmap */ eno_mmap, |
9bccf70c | 222 | /* strategy*/ eno_strat, |
91447636 A |
223 | /* getc */ eno_getc, |
224 | /* putc */ eno_putc, | |
225 | /* type */ 0 | |
1c79356b A |
226 | }; |
227 | ||
55e303ae | 228 | #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) |
9bccf70c | 229 | |
1c79356b | 230 | static int |
91447636 | 231 | bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, struct sockaddr *sockp, int *datlen) |
1c79356b A |
232 | { |
233 | struct mbuf *m; | |
234 | int error; | |
235 | int len; | |
236 | int hlen; | |
237 | ||
91447636 | 238 | if (sockp) { |
1c79356b | 239 | /* |
91447636 A |
240 | * Build a sockaddr based on the data link layer type. |
241 | * We do this at this level because the ethernet header | |
242 | * is copied directly into the data field of the sockaddr. | |
243 | * In the case of SLIP, there is no header and the packet | |
244 | * is forwarded as is. | |
245 | * Also, we are careful to leave room at the front of the mbuf | |
246 | * for the link level header. | |
1c79356b | 247 | */ |
91447636 A |
248 | switch (linktype) { |
249 | ||
250 | case DLT_SLIP: | |
251 | sockp->sa_family = AF_INET; | |
252 | hlen = 0; | |
253 | break; | |
254 | ||
255 | case DLT_EN10MB: | |
256 | sockp->sa_family = AF_UNSPEC; | |
257 | /* XXX Would MAXLINKHDR be better? */ | |
258 | hlen = sizeof(struct ether_header); | |
259 | break; | |
260 | ||
261 | case DLT_FDDI: | |
262 | #if defined(__FreeBSD__) || defined(__bsdi__) | |
263 | sockp->sa_family = AF_IMPLINK; | |
264 | hlen = 0; | |
265 | #else | |
266 | sockp->sa_family = AF_UNSPEC; | |
267 | /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ | |
268 | hlen = 24; | |
269 | #endif | |
270 | break; | |
271 | ||
272 | case DLT_RAW: | |
273 | case DLT_NULL: | |
274 | sockp->sa_family = AF_UNSPEC; | |
275 | hlen = 0; | |
276 | break; | |
277 | ||
278 | #ifdef __FreeBSD__ | |
279 | case DLT_ATM_RFC1483: | |
280 | /* | |
281 | * en atm driver requires 4-byte atm pseudo header. | |
282 | * though it isn't standard, vpi:vci needs to be | |
283 | * specified anyway. | |
284 | */ | |
285 | sockp->sa_family = AF_UNSPEC; | |
286 | hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ | |
287 | break; | |
288 | #endif | |
289 | case DLT_PPP: | |
290 | sockp->sa_family = AF_UNSPEC; | |
291 | hlen = 4; /* This should match PPP_HDRLEN */ | |
292 | break; | |
293 | ||
294 | case DLT_APPLE_IP_OVER_IEEE1394: | |
295 | sockp->sa_family = AF_UNSPEC; | |
296 | hlen = sizeof(struct firewire_header); | |
297 | break; | |
298 | ||
299 | default: | |
300 | return (EIO); | |
301 | } | |
302 | if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) { | |
303 | return (EIO); | |
304 | } | |
1c79356b | 305 | } |
91447636 A |
306 | else { |
307 | hlen = 0; | |
55e303ae | 308 | } |
91447636 A |
309 | |
310 | // LP64todo - fix this! | |
311 | len = uio_resid(uio); | |
1c79356b A |
312 | *datlen = len - hlen; |
313 | if ((unsigned)len > MCLBYTES) | |
314 | return (EIO); | |
315 | ||
316 | MGETHDR(m, M_WAIT, MT_DATA); | |
317 | if (m == 0) | |
318 | return (ENOBUFS); | |
91447636 | 319 | if ((unsigned)len > MHLEN) { |
1c79356b A |
320 | #if BSD >= 199103 |
321 | MCLGET(m, M_WAIT); | |
322 | if ((m->m_flags & M_EXT) == 0) { | |
323 | #else | |
324 | MCLGET(m); | |
325 | if (m->m_len != MCLBYTES) { | |
326 | #endif | |
327 | error = ENOBUFS; | |
328 | goto bad; | |
329 | } | |
330 | } | |
331 | m->m_pkthdr.len = m->m_len = len; | |
332 | m->m_pkthdr.rcvif = NULL; | |
333 | *mp = m; | |
334 | /* | |
335 | * Make room for link header. | |
336 | */ | |
337 | if (hlen != 0) { | |
338 | m->m_pkthdr.len -= hlen; | |
339 | m->m_len -= hlen; | |
340 | #if BSD >= 199103 | |
341 | m->m_data += hlen; /* XXX */ | |
342 | #else | |
343 | m->m_off += hlen; | |
344 | #endif | |
345 | error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); | |
346 | if (error) | |
347 | goto bad; | |
348 | } | |
349 | error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); | |
350 | if (!error) | |
351 | return (0); | |
352 | bad: | |
353 | m_freem(m); | |
354 | return (error); | |
355 | } | |
356 | ||
9bccf70c A |
357 | #ifdef __APPLE__ |
358 | /* Callback registered with Ethernet driver. */ | |
1c79356b A |
359 | int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) |
360 | { | |
1c79356b A |
361 | /* |
362 | * Do nothing if the BPF tap has been turned off. | |
363 | * This is to protect from a potential race where this | |
91447636 | 364 | * call blocks on the lock. And in the meantime |
1c79356b A |
365 | * BPF is turned off, which will clear if_bpf. |
366 | */ | |
367 | if (ifp->if_bpf) | |
368 | bpf_mtap(ifp, m); | |
1c79356b A |
369 | return 0; |
370 | } | |
55e303ae A |
371 | |
372 | /* | |
91447636 A |
373 | * The dynamic addition of a new device node must block all processes that are opening |
374 | * the last device so that no process will get an unexpected ENOENT | |
55e303ae | 375 | */ |
91447636 A |
376 | static void |
377 | bpf_make_dev_t(int maj) | |
55e303ae | 378 | { |
91447636 A |
379 | static int bpf_growing = 0; |
380 | unsigned int cur_size = nbpfilter, i; | |
55e303ae | 381 | |
91447636 A |
382 | if (nbpfilter >= bpf_maxdevices) |
383 | return; | |
55e303ae | 384 | |
91447636 A |
385 | while (bpf_growing) { |
386 | /* Wait until new device has been created */ | |
387 | (void)tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0); | |
388 | } | |
389 | if (nbpfilter > cur_size) { | |
390 | /* other thread grew it already */ | |
391 | return; | |
392 | } | |
393 | bpf_growing = 1; | |
55e303ae | 394 | |
91447636 A |
395 | /* need to grow bpf_dtab first */ |
396 | if (nbpfilter == bpf_dtab_size) { | |
397 | int new_dtab_size; | |
398 | struct bpf_d **new_dtab = NULL; | |
399 | struct bpf_d **old_dtab = NULL; | |
400 | ||
401 | new_dtab_size = bpf_dtab_size + NBPFILTER; | |
402 | new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT); | |
403 | if (new_dtab == 0) { | |
404 | printf("bpf_make_dev_t: malloc bpf_dtab failed\n"); | |
405 | goto done; | |
406 | } | |
407 | if (bpf_dtab) { | |
408 | bcopy(bpf_dtab, new_dtab, | |
409 | sizeof(struct bpf_d *) * bpf_dtab_size); | |
410 | } | |
411 | bzero(new_dtab + bpf_dtab_size, | |
412 | sizeof(struct bpf_d *) * NBPFILTER); | |
413 | old_dtab = bpf_dtab; | |
414 | bpf_dtab = new_dtab; | |
415 | bpf_dtab_size = new_dtab_size; | |
416 | if (old_dtab != NULL) | |
417 | _FREE(old_dtab, M_DEVBUF); | |
55e303ae | 418 | } |
91447636 A |
419 | i = nbpfilter++; |
420 | (void) devfs_make_node(makedev(maj, i), | |
421 | DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, | |
422 | "bpf%d", i); | |
423 | done: | |
424 | bpf_growing = 0; | |
425 | wakeup((caddr_t)&bpf_growing); | |
55e303ae A |
426 | } |
427 | ||
9bccf70c | 428 | #endif |
1c79356b A |
429 | |
430 | /* | |
431 | * Attach file to the bpf interface, i.e. make d listen on bp. | |
432 | * Must be called at splimp. | |
433 | */ | |
434 | static void | |
91447636 | 435 | bpf_attachd(struct bpf_d *d, struct bpf_if *bp) |
1c79356b | 436 | { |
1c79356b A |
437 | /* |
438 | * Point d at bp, and add d to the interface's list of listeners. | |
439 | * Finally, point the driver's bpf cookie at the interface so | |
440 | * it will divert packets to bpf. | |
441 | */ | |
442 | d->bd_bif = bp; | |
443 | d->bd_next = bp->bif_dlist; | |
444 | bp->bif_dlist = d; | |
445 | ||
446 | bp->bif_ifp->if_bpf = bp; | |
1c79356b | 447 | |
9bccf70c | 448 | #ifdef __APPLE__ |
91447636 | 449 | dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); |
9bccf70c | 450 | #endif |
1c79356b A |
451 | } |
452 | ||
453 | /* | |
454 | * Detach a file from its interface. | |
455 | */ | |
456 | static void | |
91447636 | 457 | bpf_detachd(struct bpf_d *d) |
1c79356b A |
458 | { |
459 | struct bpf_d **p; | |
460 | struct bpf_if *bp; | |
9bccf70c | 461 | #ifdef __APPLE__ |
1c79356b A |
462 | struct ifnet *ifp; |
463 | ||
464 | ifp = d->bd_bif->bif_ifp; | |
9bccf70c A |
465 | |
466 | #endif | |
1c79356b A |
467 | |
468 | bp = d->bd_bif; | |
469 | /* | |
470 | * Check if this descriptor had requested promiscuous mode. | |
471 | * If so, turn it off. | |
472 | */ | |
473 | if (d->bd_promisc) { | |
474 | d->bd_promisc = 0; | |
91447636 | 475 | if (ifnet_set_promiscuous(bp->bif_ifp, 0)) |
1c79356b A |
476 | /* |
477 | * Something is really wrong if we were able to put | |
478 | * the driver into promiscuous mode, but can't | |
479 | * take it out. | |
9bccf70c | 480 | * Most likely the network interface is gone. |
1c79356b | 481 | */ |
91447636 | 482 | printf("bpf: ifnet_set_promiscuous failed"); |
1c79356b A |
483 | } |
484 | /* Remove d from the interface's descriptor list. */ | |
485 | p = &bp->bif_dlist; | |
486 | while (*p != d) { | |
487 | p = &(*p)->bd_next; | |
488 | if (*p == 0) | |
489 | panic("bpf_detachd: descriptor not in list"); | |
490 | } | |
491 | *p = (*p)->bd_next; | |
9bccf70c | 492 | if (bp->bif_dlist == 0) { |
1c79356b A |
493 | /* |
494 | * Let the driver know that there are no more listeners. | |
495 | */ | |
9bccf70c A |
496 | if (ifp->if_set_bpf_tap) |
497 | (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); | |
1c79356b | 498 | d->bd_bif->bif_ifp->if_bpf = 0; |
9bccf70c | 499 | } |
1c79356b A |
500 | d->bd_bif = 0; |
501 | } | |
502 | ||
503 | ||
1c79356b A |
504 | /* |
505 | * Open ethernet device. Returns ENXIO for illegal minor device number, | |
506 | * EBUSY if file is open by another process. | |
507 | */ | |
508 | /* ARGSUSED */ | |
509 | int | |
91447636 | 510 | bpfopen(dev_t dev, __unused int flags, __unused int fmt, __unused struct proc *p) |
1c79356b A |
511 | { |
512 | register struct bpf_d *d; | |
513 | ||
91447636 | 514 | if ((unsigned int) minor(dev) >= nbpfilter) |
1c79356b | 515 | return (ENXIO); |
91447636 A |
516 | |
517 | /* | |
518 | * New device nodes are created on demand when opening the last one. | |
519 | * The programming model is for processes to loop on the minor starting at 0 | |
520 | * as long as EBUSY is returned. The loop stops when either the open succeeds or | |
521 | * an error other that EBUSY is returned. That means that bpf_make_dev_t() must | |
522 | * block all processes that are opening the last node. If not all | |
523 | * processes are blocked, they could unexpectedly get ENOENT and abort their | |
524 | * opening loop. | |
525 | */ | |
526 | if ((unsigned int) minor(dev) == (nbpfilter - 1)) | |
527 | bpf_make_dev_t(major(dev)); | |
9bccf70c | 528 | |
1c79356b | 529 | /* |
9bccf70c | 530 | * Each minor can be opened by only one process. If the requested |
1c79356b | 531 | * minor is in use, return EBUSY. |
91447636 A |
532 | * |
533 | * Important: bpfopen() and bpfclose() have to check and set the status of a device | |
534 | * in the same lockin context otherwise the device may be leaked because the vnode use count | |
535 | * will be unpextectly greater than 1 when close() is called. | |
1c79356b | 536 | */ |
91447636 A |
537 | if (bpf_dtab[minor(dev)] == 0) |
538 | bpf_dtab[minor(dev)] = (void *)1; /* Mark opening */ | |
539 | else | |
540 | return (EBUSY); | |
541 | ||
542 | d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT); | |
543 | if (d == NULL) { | |
544 | /* this really is a catastrophic failure */ | |
545 | printf("bpfopen: malloc bpf_d failed\n"); | |
546 | bpf_dtab[minor(dev)] = 0; | |
547 | return ENOMEM; | |
1c79356b | 548 | } |
91447636 | 549 | bzero(d, sizeof(struct bpf_d)); |
9bccf70c | 550 | |
91447636 A |
551 | /* |
552 | * It is not necessary to take the BPF lock here because no other | |
553 | * thread can access the device until it is marked opened... | |
554 | */ | |
555 | ||
556 | /* Mark "in use" and do most initialization. */ | |
1c79356b A |
557 | d->bd_bufsize = bpf_bufsize; |
558 | d->bd_sig = SIGIO; | |
9bccf70c | 559 | d->bd_seesent = 1; |
91447636 | 560 | bpf_dtab[minor(dev)] = d; /* Mark opened */ |
55e303ae | 561 | |
1c79356b A |
562 | return (0); |
563 | } | |
564 | ||
565 | /* | |
566 | * Close the descriptor by detaching it from its interface, | |
567 | * deallocating its buffers, and marking it free. | |
568 | */ | |
569 | /* ARGSUSED */ | |
570 | int | |
91447636 | 571 | bpfclose(dev_t dev, __unused int flags, __unused int fmt, __unused struct proc *p) |
1c79356b A |
572 | { |
573 | register struct bpf_d *d; | |
1c79356b | 574 | |
55e303ae | 575 | d = bpf_dtab[minor(dev)]; |
91447636 A |
576 | if (d == 0 || d == (void *)1) |
577 | return (ENXIO); | |
578 | ||
579 | bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */ | |
55e303ae | 580 | |
91447636 A |
581 | /* Take BPF lock to ensure no other thread is using the device */ |
582 | lck_mtx_lock(bpf_mlock); | |
55e303ae | 583 | |
1c79356b A |
584 | if (d->bd_bif) |
585 | bpf_detachd(d); | |
0b4e3aa0 | 586 | selthreadclear(&d->bd_sel); |
1c79356b | 587 | bpf_freed(d); |
91447636 A |
588 | |
589 | lck_mtx_unlock(bpf_mlock); | |
590 | ||
591 | /* Mark free in same context as bpfopen comes to check */ | |
592 | bpf_dtab[minor(dev)] = 0; /* Mark closed */ | |
593 | _FREE(d, M_DEVBUF); | |
594 | ||
1c79356b A |
595 | return (0); |
596 | } | |
597 | ||
1c79356b | 598 | |
91447636 | 599 | #define BPF_SLEEP bpf_sleep |
1c79356b | 600 | |
91447636 A |
601 | static int |
602 | bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo) | |
1c79356b | 603 | { |
1c79356b A |
604 | register int st; |
605 | ||
91447636 A |
606 | lck_mtx_unlock(bpf_mlock); |
607 | ||
608 | st = tsleep((caddr_t)d, pri, wmesg, timo); | |
609 | ||
610 | lck_mtx_lock(bpf_mlock); | |
611 | ||
612 | return st; | |
1c79356b | 613 | } |
1c79356b A |
614 | |
615 | /* | |
616 | * Rotate the packet buffers in descriptor d. Move the store buffer | |
617 | * into the hold slot, and the free buffer into the store slot. | |
618 | * Zero the length of the new store buffer. | |
619 | */ | |
620 | #define ROTATE_BUFFERS(d) \ | |
621 | (d)->bd_hbuf = (d)->bd_sbuf; \ | |
622 | (d)->bd_hlen = (d)->bd_slen; \ | |
623 | (d)->bd_sbuf = (d)->bd_fbuf; \ | |
624 | (d)->bd_slen = 0; \ | |
625 | (d)->bd_fbuf = 0; | |
626 | /* | |
627 | * bpfread - read next chunk of packets from buffers | |
628 | */ | |
629 | int | |
91447636 | 630 | bpfread(dev_t dev, struct uio *uio, int ioflag) |
1c79356b A |
631 | { |
632 | register struct bpf_d *d; | |
633 | int error; | |
634 | int s; | |
635 | ||
55e303ae | 636 | d = bpf_dtab[minor(dev)]; |
91447636 A |
637 | if (d == 0 || d == (void *)1) |
638 | return (ENXIO); | |
639 | ||
640 | lck_mtx_lock(bpf_mlock); | |
55e303ae | 641 | |
1c79356b A |
642 | |
643 | /* | |
644 | * Restrict application to use a buffer the same size as | |
645 | * as kernel buffers. | |
646 | */ | |
91447636 | 647 | // LP64todo - fix this |
1c79356b | 648 | if (uio->uio_resid != d->bd_bufsize) { |
91447636 | 649 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
650 | return (EINVAL); |
651 | } | |
652 | ||
653 | s = splimp(); | |
654 | /* | |
655 | * If the hold buffer is empty, then do a timed sleep, which | |
656 | * ends when the timeout expires or when enough packets | |
657 | * have arrived to fill the store buffer. | |
658 | */ | |
659 | while (d->bd_hbuf == 0) { | |
660 | if (d->bd_immediate && d->bd_slen != 0) { | |
661 | /* | |
662 | * A packet(s) either arrived since the previous | |
663 | * read or arrived while we were asleep. | |
664 | * Rotate the buffers and return what's here. | |
665 | */ | |
666 | ROTATE_BUFFERS(d); | |
667 | break; | |
668 | } | |
9bccf70c A |
669 | |
670 | /* | |
671 | * No data is available, check to see if the bpf device | |
672 | * is still pointed at a real interface. If not, return | |
673 | * ENXIO so that the userland process knows to rebind | |
674 | * it before using it again. | |
675 | */ | |
676 | if (d->bd_bif == NULL) { | |
677 | splx(s); | |
91447636 | 678 | lck_mtx_unlock(bpf_mlock); |
9bccf70c A |
679 | return (ENXIO); |
680 | } | |
681 | ||
1c79356b A |
682 | if (ioflag & IO_NDELAY) |
683 | error = EWOULDBLOCK; | |
684 | else | |
91447636 | 685 | error = BPF_SLEEP(d, PRINET|PCATCH, "bpf", |
1c79356b A |
686 | d->bd_rtout); |
687 | if (error == EINTR || error == ERESTART) { | |
688 | splx(s); | |
91447636 | 689 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
690 | return (error); |
691 | } | |
692 | if (error == EWOULDBLOCK) { | |
693 | /* | |
694 | * On a timeout, return what's in the buffer, | |
695 | * which may be nothing. If there is something | |
696 | * in the store buffer, we can rotate the buffers. | |
697 | */ | |
698 | if (d->bd_hbuf) | |
699 | /* | |
700 | * We filled up the buffer in between | |
701 | * getting the timeout and arriving | |
702 | * here, so we don't need to rotate. | |
703 | */ | |
704 | break; | |
705 | ||
706 | if (d->bd_slen == 0) { | |
707 | splx(s); | |
91447636 | 708 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
709 | return (0); |
710 | } | |
711 | ROTATE_BUFFERS(d); | |
712 | break; | |
713 | } | |
714 | } | |
715 | /* | |
716 | * At this point, we know we have something in the hold slot. | |
717 | */ | |
718 | splx(s); | |
719 | ||
720 | /* | |
721 | * Move data from hold buffer into user space. | |
722 | * We know the entire buffer is transferred since | |
723 | * we checked above that the read buffer is bpf_bufsize bytes. | |
724 | */ | |
725 | error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); | |
726 | ||
727 | s = splimp(); | |
728 | d->bd_fbuf = d->bd_hbuf; | |
729 | d->bd_hbuf = 0; | |
730 | d->bd_hlen = 0; | |
731 | splx(s); | |
91447636 | 732 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
733 | return (error); |
734 | } | |
735 | ||
736 | ||
737 | /* | |
738 | * If there are processes sleeping on this descriptor, wake them up. | |
739 | */ | |
91447636 A |
740 | static void |
741 | bpf_wakeup(struct bpf_d *d) | |
1c79356b A |
742 | { |
743 | wakeup((caddr_t)d); | |
744 | if (d->bd_async && d->bd_sig && d->bd_sigio) | |
745 | pgsigio(d->bd_sigio, d->bd_sig, 0); | |
746 | ||
747 | #if BSD >= 199103 | |
1c79356b | 748 | selwakeup(&d->bd_sel); |
9bccf70c A |
749 | #ifndef __APPLE__ |
750 | /* XXX */ | |
751 | d->bd_sel.si_pid = 0; | |
752 | #endif | |
1c79356b A |
753 | #else |
754 | if (d->bd_selproc) { | |
1c79356b | 755 | selwakeup(d->bd_selproc, (int)d->bd_selcoll); |
1c79356b A |
756 | d->bd_selcoll = 0; |
757 | d->bd_selproc = 0; | |
758 | } | |
759 | #endif | |
760 | } | |
761 | ||
55e303ae A |
762 | /* keep in sync with bpf_movein above: */ |
763 | #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) | |
764 | ||
1c79356b | 765 | int |
91447636 | 766 | bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) |
1c79356b A |
767 | { |
768 | register struct bpf_d *d; | |
1c79356b A |
769 | struct ifnet *ifp; |
770 | struct mbuf *m; | |
91447636 | 771 | int error; |
55e303ae | 772 | char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; |
1c79356b A |
773 | int datlen; |
774 | ||
55e303ae | 775 | d = bpf_dtab[minor(dev)]; |
91447636 A |
776 | if (d == 0 || d == (void *)1) |
777 | return (ENXIO); | |
778 | ||
779 | lck_mtx_lock(bpf_mlock); | |
9bccf70c | 780 | |
1c79356b | 781 | if (d->bd_bif == 0) { |
91447636 | 782 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
783 | return (ENXIO); |
784 | } | |
785 | ||
786 | ifp = d->bd_bif->bif_ifp; | |
787 | ||
788 | if (uio->uio_resid == 0) { | |
91447636 | 789 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
790 | return (0); |
791 | } | |
55e303ae A |
792 | ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf); |
793 | error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, | |
91447636 | 794 | d->bd_hdrcmplt ? 0 : (struct sockaddr *)dst_buf, &datlen); |
1c79356b | 795 | if (error) { |
91447636 | 796 | lck_mtx_unlock(bpf_mlock); |
1c79356b A |
797 | return (error); |
798 | } | |
799 | ||
91447636 A |
800 | if ((unsigned)datlen > ifp->if_mtu) { |
801 | lck_mtx_unlock(bpf_mlock); | |
1c79356b A |
802 | return (EMSGSIZE); |
803 | } | |
804 | ||
91447636 A |
805 | lck_mtx_unlock(bpf_mlock); |
806 | ||
55e303ae | 807 | if (d->bd_hdrcmplt) { |
91447636 | 808 | error = dlil_output(ifp, 0, m, NULL, NULL, 1); |
55e303ae | 809 | } |
91447636 A |
810 | else { |
811 | error = dlil_output(ifp, PF_INET, m, NULL, (struct sockaddr *)dst_buf, 0); | |
812 | } | |
813 | ||
1c79356b A |
814 | /* |
815 | * The driver frees the mbuf. | |
816 | */ | |
817 | return (error); | |
818 | } | |
819 | ||
820 | /* | |
821 | * Reset a descriptor by flushing its packet buffer and clearing the | |
822 | * receive and drop counts. Should be called at splimp. | |
823 | */ | |
824 | static void | |
91447636 | 825 | reset_d(struct bpf_d *d) |
1c79356b A |
826 | { |
827 | if (d->bd_hbuf) { | |
828 | /* Free the hold buffer. */ | |
829 | d->bd_fbuf = d->bd_hbuf; | |
830 | d->bd_hbuf = 0; | |
831 | } | |
832 | d->bd_slen = 0; | |
833 | d->bd_hlen = 0; | |
834 | d->bd_rcount = 0; | |
835 | d->bd_dcount = 0; | |
836 | } | |
837 | ||
838 | /* | |
839 | * FIONREAD Check for read packet available. | |
840 | * SIOCGIFADDR Get interface address - convenient hook to driver. | |
841 | * BIOCGBLEN Get buffer len [for read()]. | |
842 | * BIOCSETF Set ethernet read filter. | |
843 | * BIOCFLUSH Flush read packet buffer. | |
844 | * BIOCPROMISC Put interface into promiscuous mode. | |
845 | * BIOCGDLT Get link layer type. | |
846 | * BIOCGETIF Get interface name. | |
847 | * BIOCSETIF Set interface. | |
848 | * BIOCSRTIMEOUT Set read timeout. | |
849 | * BIOCGRTIMEOUT Get read timeout. | |
850 | * BIOCGSTATS Get packet stats. | |
851 | * BIOCIMMEDIATE Set immediate mode. | |
852 | * BIOCVERSION Get filter language version. | |
9bccf70c A |
853 | * BIOCGHDRCMPLT Get "header already complete" flag |
854 | * BIOCSHDRCMPLT Set "header already complete" flag | |
855 | * BIOCGSEESENT Get "see packets sent" flag | |
856 | * BIOCSSEESENT Set "see packets sent" flag | |
1c79356b A |
857 | */ |
858 | /* ARGSUSED */ | |
9bccf70c | 859 | int |
91447636 | 860 | bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, struct proc *p) |
1c79356b A |
861 | { |
862 | register struct bpf_d *d; | |
863 | int s, error = 0; | |
864 | ||
55e303ae | 865 | d = bpf_dtab[minor(dev)]; |
91447636 A |
866 | if (d == 0 || d == (void *)1) |
867 | return (ENXIO); | |
1c79356b | 868 | |
91447636 | 869 | lck_mtx_lock(bpf_mlock); |
1c79356b A |
870 | |
871 | switch (cmd) { | |
872 | ||
873 | default: | |
874 | error = EINVAL; | |
875 | break; | |
876 | ||
877 | /* | |
878 | * Check for read packet available. | |
879 | */ | |
880 | case FIONREAD: | |
881 | { | |
882 | int n; | |
883 | ||
884 | s = splimp(); | |
885 | n = d->bd_slen; | |
886 | if (d->bd_hbuf) | |
887 | n += d->bd_hlen; | |
888 | splx(s); | |
889 | ||
890 | *(int *)addr = n; | |
891 | break; | |
892 | } | |
893 | ||
894 | case SIOCGIFADDR: | |
895 | { | |
896 | struct ifnet *ifp; | |
897 | ||
898 | if (d->bd_bif == 0) | |
899 | error = EINVAL; | |
900 | else { | |
901 | ifp = d->bd_bif->bif_ifp; | |
91447636 | 902 | error = dlil_ioctl(0, ifp, cmd, addr); |
1c79356b A |
903 | } |
904 | break; | |
905 | } | |
906 | ||
907 | /* | |
908 | * Get buffer len [for read()]. | |
909 | */ | |
910 | case BIOCGBLEN: | |
911 | *(u_int *)addr = d->bd_bufsize; | |
912 | break; | |
913 | ||
914 | /* | |
915 | * Set buffer length. | |
916 | */ | |
917 | case BIOCSBLEN: | |
918 | #if BSD < 199103 | |
919 | error = EINVAL; | |
920 | #else | |
921 | if (d->bd_bif != 0) | |
922 | error = EINVAL; | |
923 | else { | |
924 | register u_int size = *(u_int *)addr; | |
925 | ||
9bccf70c A |
926 | if (size > bpf_maxbufsize) |
927 | *(u_int *)addr = size = bpf_maxbufsize; | |
1c79356b A |
928 | else if (size < BPF_MINBUFSIZE) |
929 | *(u_int *)addr = size = BPF_MINBUFSIZE; | |
930 | d->bd_bufsize = size; | |
931 | } | |
932 | #endif | |
933 | break; | |
934 | ||
935 | /* | |
936 | * Set link layer read filter. | |
937 | */ | |
938 | case BIOCSETF: | |
91447636 A |
939 | if (proc_is64bit(p)) { |
940 | error = bpf_setf(d, (struct user_bpf_program *)addr); | |
941 | } | |
942 | else { | |
943 | struct bpf_program * tmpp; | |
944 | struct user_bpf_program tmp; | |
945 | ||
946 | tmpp = (struct bpf_program *)addr; | |
947 | tmp.bf_len = tmpp->bf_len; | |
948 | tmp.bf_insns = CAST_USER_ADDR_T(tmpp->bf_insns); | |
949 | error = bpf_setf(d, &tmp); | |
950 | } | |
1c79356b A |
951 | break; |
952 | ||
953 | /* | |
954 | * Flush read packet buffer. | |
955 | */ | |
956 | case BIOCFLUSH: | |
957 | s = splimp(); | |
958 | reset_d(d); | |
959 | splx(s); | |
960 | break; | |
961 | ||
962 | /* | |
963 | * Put interface into promiscuous mode. | |
964 | */ | |
965 | case BIOCPROMISC: | |
966 | if (d->bd_bif == 0) { | |
967 | /* | |
968 | * No interface attached yet. | |
969 | */ | |
970 | error = EINVAL; | |
971 | break; | |
972 | } | |
973 | s = splimp(); | |
974 | if (d->bd_promisc == 0) { | |
91447636 | 975 | error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1); |
1c79356b A |
976 | if (error == 0) |
977 | d->bd_promisc = 1; | |
978 | } | |
979 | splx(s); | |
980 | break; | |
981 | ||
982 | /* | |
983 | * Get device parameters. | |
984 | */ | |
985 | case BIOCGDLT: | |
986 | if (d->bd_bif == 0) | |
987 | error = EINVAL; | |
988 | else | |
989 | *(u_int *)addr = d->bd_bif->bif_dlt; | |
990 | break; | |
991 | ||
992 | /* | |
9bccf70c | 993 | * Get interface name. |
1c79356b A |
994 | */ |
995 | case BIOCGETIF: | |
996 | if (d->bd_bif == 0) | |
997 | error = EINVAL; | |
9bccf70c A |
998 | else { |
999 | struct ifnet *const ifp = d->bd_bif->bif_ifp; | |
1000 | struct ifreq *const ifr = (struct ifreq *)addr; | |
1001 | ||
1002 | snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), | |
1003 | "%s%d", ifp->if_name, ifp->if_unit); | |
1004 | } | |
1c79356b A |
1005 | break; |
1006 | ||
1007 | /* | |
1008 | * Set interface. | |
1009 | */ | |
1010 | case BIOCSETIF: | |
1011 | error = bpf_setif(d, (struct ifreq *)addr); | |
1012 | break; | |
1013 | ||
1014 | /* | |
1015 | * Set read timeout. | |
1016 | */ | |
1017 | case BIOCSRTIMEOUT: | |
1018 | { | |
1019 | struct timeval *tv = (struct timeval *)addr; | |
1020 | ||
1021 | /* | |
1022 | * Subtract 1 tick from tvtohz() since this isn't | |
1023 | * a one-shot timer. | |
1024 | */ | |
1025 | if ((error = itimerfix(tv)) == 0) | |
1026 | d->bd_rtout = tvtohz(tv) - 1; | |
1027 | break; | |
1028 | } | |
1029 | ||
1030 | /* | |
1031 | * Get read timeout. | |
1032 | */ | |
1033 | case BIOCGRTIMEOUT: | |
1034 | { | |
1035 | struct timeval *tv = (struct timeval *)addr; | |
1036 | ||
1037 | tv->tv_sec = d->bd_rtout / hz; | |
1038 | tv->tv_usec = (d->bd_rtout % hz) * tick; | |
1039 | break; | |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * Get packet stats. | |
1044 | */ | |
1045 | case BIOCGSTATS: | |
1046 | { | |
1047 | struct bpf_stat *bs = (struct bpf_stat *)addr; | |
1048 | ||
1049 | bs->bs_recv = d->bd_rcount; | |
1050 | bs->bs_drop = d->bd_dcount; | |
1051 | break; | |
1052 | } | |
1053 | ||
1054 | /* | |
1055 | * Set immediate mode. | |
1056 | */ | |
1057 | case BIOCIMMEDIATE: | |
1058 | d->bd_immediate = *(u_int *)addr; | |
1059 | break; | |
1060 | ||
1061 | case BIOCVERSION: | |
1062 | { | |
1063 | struct bpf_version *bv = (struct bpf_version *)addr; | |
1064 | ||
1065 | bv->bv_major = BPF_MAJOR_VERSION; | |
1066 | bv->bv_minor = BPF_MINOR_VERSION; | |
1067 | break; | |
1068 | } | |
1069 | ||
9bccf70c A |
1070 | /* |
1071 | * Get "header already complete" flag | |
1072 | */ | |
1073 | case BIOCGHDRCMPLT: | |
1074 | *(u_int *)addr = d->bd_hdrcmplt; | |
1075 | break; | |
1076 | ||
1077 | /* | |
1078 | * Set "header already complete" flag | |
1079 | */ | |
1080 | case BIOCSHDRCMPLT: | |
1081 | d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; | |
1082 | break; | |
1083 | ||
1084 | /* | |
1085 | * Get "see sent packets" flag | |
1086 | */ | |
1087 | case BIOCGSEESENT: | |
1088 | *(u_int *)addr = d->bd_seesent; | |
1089 | break; | |
1090 | ||
1091 | /* | |
1092 | * Set "see sent packets" flag | |
1093 | */ | |
1094 | case BIOCSSEESENT: | |
1095 | d->bd_seesent = *(u_int *)addr; | |
1096 | break; | |
1097 | ||
1c79356b A |
1098 | case FIONBIO: /* Non-blocking I/O */ |
1099 | break; | |
1100 | ||
1101 | case FIOASYNC: /* Send signal on receive packets */ | |
1102 | d->bd_async = *(int *)addr; | |
1103 | break; | |
9bccf70c | 1104 | #ifndef __APPLE__ |
1c79356b A |
1105 | case FIOSETOWN: |
1106 | error = fsetown(*(int *)addr, &d->bd_sigio); | |
1107 | break; | |
1108 | ||
1109 | case FIOGETOWN: | |
1110 | *(int *)addr = fgetown(d->bd_sigio); | |
1111 | break; | |
1112 | ||
1113 | /* This is deprecated, FIOSETOWN should be used instead. */ | |
1114 | case TIOCSPGRP: | |
1115 | error = fsetown(-(*(int *)addr), &d->bd_sigio); | |
1116 | break; | |
1117 | ||
1118 | /* This is deprecated, FIOGETOWN should be used instead. */ | |
1119 | case TIOCGPGRP: | |
1120 | *(int *)addr = -fgetown(d->bd_sigio); | |
1121 | break; | |
1122 | #endif | |
1123 | case BIOCSRSIG: /* Set receive signal */ | |
1124 | { | |
1125 | u_int sig; | |
1126 | ||
1127 | sig = *(u_int *)addr; | |
1128 | ||
1129 | if (sig >= NSIG) | |
1130 | error = EINVAL; | |
1131 | else | |
1132 | d->bd_sig = sig; | |
1133 | break; | |
1134 | } | |
1135 | case BIOCGRSIG: | |
1136 | *(u_int *)addr = d->bd_sig; | |
1137 | break; | |
1138 | } | |
91447636 A |
1139 | |
1140 | lck_mtx_unlock(bpf_mlock); | |
1141 | ||
1c79356b A |
1142 | return (error); |
1143 | } | |
1144 | ||
1145 | /* | |
1146 | * Set d's packet filter program to fp. If this file already has a filter, | |
1147 | * free it and replace it. Returns EINVAL for bogus requests. | |
1148 | */ | |
1149 | static int | |
91447636 | 1150 | bpf_setf(struct bpf_d *d, struct user_bpf_program *fp) |
1c79356b A |
1151 | { |
1152 | struct bpf_insn *fcode, *old; | |
1153 | u_int flen, size; | |
1154 | int s; | |
1155 | ||
1156 | old = d->bd_filter; | |
91447636 | 1157 | if (fp->bf_insns == USER_ADDR_NULL) { |
1c79356b A |
1158 | if (fp->bf_len != 0) |
1159 | return (EINVAL); | |
1160 | s = splimp(); | |
1161 | d->bd_filter = 0; | |
1162 | reset_d(d); | |
1163 | splx(s); | |
1164 | if (old != 0) | |
1165 | FREE((caddr_t)old, M_DEVBUF); | |
1166 | return (0); | |
1167 | } | |
1168 | flen = fp->bf_len; | |
1169 | if (flen > BPF_MAXINSNS) | |
1170 | return (EINVAL); | |
1171 | ||
91447636 | 1172 | size = flen * sizeof(struct bpf_insn); |
1c79356b | 1173 | fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); |
9bccf70c | 1174 | #ifdef __APPLE__ |
0b4e3aa0 A |
1175 | if (fcode == NULL) |
1176 | return (ENOBUFS); | |
9bccf70c | 1177 | #endif |
91447636 | 1178 | if (copyin(fp->bf_insns, (caddr_t)fcode, size) == 0 && |
1c79356b A |
1179 | bpf_validate(fcode, (int)flen)) { |
1180 | s = splimp(); | |
1181 | d->bd_filter = fcode; | |
1182 | reset_d(d); | |
1183 | splx(s); | |
1184 | if (old != 0) | |
1185 | FREE((caddr_t)old, M_DEVBUF); | |
1186 | ||
1187 | return (0); | |
1188 | } | |
1189 | FREE((caddr_t)fcode, M_DEVBUF); | |
1190 | return (EINVAL); | |
1191 | } | |
1192 | ||
1193 | /* | |
1194 | * Detach a file from its current interface (if attached at all) and attach | |
1195 | * to the interface indicated by the name stored in ifr. | |
1196 | * Return an errno or 0. | |
1197 | */ | |
1198 | static int | |
91447636 | 1199 | bpf_setif(struct bpf_d *d, struct ifreq *ifr) |
1c79356b A |
1200 | { |
1201 | struct bpf_if *bp; | |
1202 | int s, error; | |
1203 | struct ifnet *theywant; | |
1204 | ||
1205 | theywant = ifunit(ifr->ifr_name); | |
1206 | if (theywant == 0) | |
1207 | return ENXIO; | |
1208 | ||
1209 | /* | |
1210 | * Look through attached interfaces for the named one. | |
1211 | */ | |
1212 | for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { | |
1213 | struct ifnet *ifp = bp->bif_ifp; | |
1214 | ||
1215 | if (ifp == 0 || ifp != theywant) | |
1216 | continue; | |
1217 | /* | |
1218 | * We found the requested interface. | |
1219 | * If it's not up, return an error. | |
1220 | * Allocate the packet buffers if we need to. | |
1221 | * If we're already attached to requested interface, | |
1222 | * just flush the buffer. | |
1223 | */ | |
1224 | if ((ifp->if_flags & IFF_UP) == 0) | |
1225 | return (ENETDOWN); | |
1226 | ||
1227 | if (d->bd_sbuf == 0) { | |
1228 | error = bpf_allocbufs(d); | |
1229 | if (error != 0) | |
1230 | return (error); | |
1231 | } | |
1232 | s = splimp(); | |
1233 | if (bp != d->bd_bif) { | |
1234 | if (d->bd_bif) | |
1235 | /* | |
1236 | * Detach if attached to something else. | |
1237 | */ | |
1238 | bpf_detachd(d); | |
1239 | ||
1240 | bpf_attachd(d, bp); | |
1241 | } | |
1242 | reset_d(d); | |
1243 | splx(s); | |
1244 | return (0); | |
1245 | } | |
1246 | /* Not found. */ | |
1247 | return (ENXIO); | |
1248 | } | |
1249 | ||
1c79356b A |
1250 | /* |
1251 | * Support for select() and poll() system calls | |
1252 | * | |
1253 | * Return true iff the specific operation will not block indefinitely. | |
1254 | * Otherwise, return false but make a note that a selwakeup() must be done. | |
1255 | */ | |
1256 | int | |
91447636 | 1257 | bpfpoll(dev_t dev, int events, void * wql, struct proc *p) |
1c79356b A |
1258 | { |
1259 | register struct bpf_d *d; | |
1260 | register int s; | |
1261 | int revents = 0; | |
1262 | ||
55e303ae | 1263 | d = bpf_dtab[minor(dev)]; |
91447636 A |
1264 | if (d == 0 || d == (void *)1) |
1265 | return (ENXIO); | |
1266 | ||
1267 | lck_mtx_lock(bpf_mlock); | |
55e303ae | 1268 | |
1c79356b A |
1269 | /* |
1270 | * An imitation of the FIONREAD ioctl code. | |
1271 | */ | |
9bccf70c | 1272 | if (d->bd_bif == NULL) { |
91447636 | 1273 | lck_mtx_unlock(bpf_mlock); |
9bccf70c A |
1274 | return (ENXIO); |
1275 | } | |
1276 | ||
1c79356b | 1277 | s = splimp(); |
9bccf70c | 1278 | if (events & (POLLIN | POLLRDNORM)) { |
1c79356b A |
1279 | if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) |
1280 | revents |= events & (POLLIN | POLLRDNORM); | |
1281 | else | |
0b4e3aa0 | 1282 | selrecord(p, &d->bd_sel, wql); |
9bccf70c | 1283 | } |
1c79356b | 1284 | splx(s); |
91447636 A |
1285 | |
1286 | lck_mtx_unlock(bpf_mlock); | |
1c79356b A |
1287 | return (revents); |
1288 | } | |
1289 | ||
1290 | /* | |
1291 | * Incoming linkage from device drivers. Process the packet pkt, of length | |
1292 | * pktlen, which is stored in a contiguous buffer. The packet is parsed | |
1293 | * by each process' filter, and if accepted, stashed into the corresponding | |
1294 | * buffer. | |
1295 | */ | |
1296 | void | |
91447636 | 1297 | bpf_tap(struct ifnet *ifp, u_char *pkt, u_int pktlen) |
1c79356b A |
1298 | { |
1299 | struct bpf_if *bp; | |
1300 | register struct bpf_d *d; | |
1301 | register u_int slen; | |
1302 | /* | |
1303 | * Note that the ipl does not have to be raised at this point. | |
1304 | * The only problem that could arise here is that if two different | |
1305 | * interfaces shared any data. This is not the case. | |
1306 | */ | |
91447636 A |
1307 | lck_mtx_lock(bpf_mlock); |
1308 | ||
9bccf70c A |
1309 | bp = ifp->if_bpf; |
1310 | #ifdef __APPLE__ | |
1311 | if (bp) { | |
1312 | #endif | |
91447636 A |
1313 | for (d = bp->bif_dlist; d != 0; d = d->bd_next) { |
1314 | ++d->bd_rcount; | |
1315 | slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); | |
1316 | if (slen != 0) | |
1317 | catchpacket(d, pkt, pktlen, slen, bcopy); | |
1318 | } | |
9bccf70c | 1319 | #ifdef __APPLE__ |
1c79356b | 1320 | } |
91447636 | 1321 | lck_mtx_unlock(bpf_mlock); |
9bccf70c | 1322 | #endif |
1c79356b A |
1323 | } |
1324 | ||
1325 | /* | |
1326 | * Copy data from an mbuf chain into a buffer. This code is derived | |
1327 | * from m_copydata in sys/uipc_mbuf.c. | |
1328 | */ | |
1329 | static void | |
91447636 | 1330 | bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) |
1c79356b | 1331 | { |
91447636 A |
1332 | const struct mbuf *m; |
1333 | u_int count; | |
1c79356b A |
1334 | u_char *dst; |
1335 | ||
1336 | m = src_arg; | |
1337 | dst = dst_arg; | |
1338 | while (len > 0) { | |
1339 | if (m == 0) | |
1340 | panic("bpf_mcopy"); | |
1341 | count = min(m->m_len, len); | |
91447636 | 1342 | bcopy(mtod(m, const void *), dst, count); |
1c79356b A |
1343 | m = m->m_next; |
1344 | dst += count; | |
1345 | len -= count; | |
1346 | } | |
1347 | } | |
1348 | ||
1349 | /* | |
1350 | * Incoming linkage from device drivers, when packet is in an mbuf chain. | |
1351 | */ | |
1352 | void | |
91447636 | 1353 | bpf_mtap(struct ifnet *ifp, struct mbuf *m) |
1c79356b | 1354 | { |
91447636 | 1355 | struct bpf_if *bp; |
1c79356b A |
1356 | struct bpf_d *d; |
1357 | u_int pktlen, slen; | |
1358 | struct mbuf *m0; | |
1359 | ||
91447636 A |
1360 | lck_mtx_lock(bpf_mlock); |
1361 | ||
1362 | bp = ifp->if_bpf; | |
1363 | if (bp) { | |
1c79356b A |
1364 | pktlen = 0; |
1365 | for (m0 = m; m0 != 0; m0 = m0->m_next) | |
1366 | pktlen += m0->m_len; | |
91447636 A |
1367 | |
1368 | for (d = bp->bif_dlist; d != 0; d = d->bd_next) { | |
1369 | if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) | |
1370 | continue; | |
1371 | ++d->bd_rcount; | |
1372 | slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); | |
1373 | if (slen != 0) | |
1374 | catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); | |
1375 | } | |
1c79356b | 1376 | } |
91447636 A |
1377 | |
1378 | lck_mtx_unlock(bpf_mlock); | |
1c79356b A |
1379 | } |
1380 | ||
1381 | /* | |
1382 | * Move the packet data from interface memory (pkt) into the | |
1383 | * store buffer. Return 1 if it's time to wakeup a listener (buffer full), | |
1384 | * otherwise 0. "copy" is the routine called to do the actual data | |
1385 | * transfer. bcopy is passed in to copy contiguous chunks, while | |
1386 | * bpf_mcopy is passed in to copy mbuf chains. In the latter case, | |
1387 | * pkt is really an mbuf. | |
1388 | */ | |
1389 | static void | |
91447636 A |
1390 | catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, |
1391 | void (*cpfn)(const void *, void *, size_t)) | |
1c79356b A |
1392 | { |
1393 | register struct bpf_hdr *hp; | |
1394 | register int totlen, curlen; | |
1395 | register int hdrlen = d->bd_bif->bif_hdrlen; | |
1396 | /* | |
1397 | * Figure out how many bytes to move. If the packet is | |
1398 | * greater or equal to the snapshot length, transfer that | |
1399 | * much. Otherwise, transfer the whole packet (unless | |
1400 | * we hit the buffer size limit). | |
1401 | */ | |
1402 | totlen = hdrlen + min(snaplen, pktlen); | |
1403 | if (totlen > d->bd_bufsize) | |
1404 | totlen = d->bd_bufsize; | |
1405 | ||
1406 | /* | |
1407 | * Round up the end of the previous packet to the next longword. | |
1408 | */ | |
1409 | curlen = BPF_WORDALIGN(d->bd_slen); | |
1410 | if (curlen + totlen > d->bd_bufsize) { | |
1411 | /* | |
1412 | * This packet will overflow the storage buffer. | |
1413 | * Rotate the buffers if we can, then wakeup any | |
1414 | * pending reads. | |
1415 | */ | |
1416 | if (d->bd_fbuf == 0) { | |
1417 | /* | |
1418 | * We haven't completed the previous read yet, | |
1419 | * so drop the packet. | |
1420 | */ | |
1421 | ++d->bd_dcount; | |
1422 | return; | |
1423 | } | |
1424 | ROTATE_BUFFERS(d); | |
1425 | bpf_wakeup(d); | |
1426 | curlen = 0; | |
1427 | } | |
1428 | else if (d->bd_immediate) | |
1429 | /* | |
1430 | * Immediate mode is set. A packet arrived so any | |
1431 | * reads should be woken up. | |
1432 | */ | |
1433 | bpf_wakeup(d); | |
1434 | ||
1435 | /* | |
1436 | * Append the bpf header. | |
1437 | */ | |
1438 | hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); | |
1439 | #if BSD >= 199103 | |
1440 | microtime(&hp->bh_tstamp); | |
1441 | #elif defined(sun) | |
1442 | uniqtime(&hp->bh_tstamp); | |
1443 | #else | |
1444 | hp->bh_tstamp = time; | |
1445 | #endif | |
1446 | hp->bh_datalen = pktlen; | |
1447 | hp->bh_hdrlen = hdrlen; | |
1448 | /* | |
1449 | * Copy the packet data into the store buffer and update its length. | |
1450 | */ | |
1451 | (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); | |
1452 | d->bd_slen = curlen + totlen; | |
1453 | } | |
1454 | ||
1455 | /* | |
1456 | * Initialize all nonzero fields of a descriptor. | |
1457 | */ | |
1458 | static int | |
91447636 | 1459 | bpf_allocbufs(struct bpf_d *d) |
1c79356b A |
1460 | { |
1461 | d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); | |
1462 | if (d->bd_fbuf == 0) | |
1463 | return (ENOBUFS); | |
1464 | ||
1465 | d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); | |
1466 | if (d->bd_sbuf == 0) { | |
1467 | FREE(d->bd_fbuf, M_DEVBUF); | |
1468 | return (ENOBUFS); | |
1469 | } | |
1470 | d->bd_slen = 0; | |
1471 | d->bd_hlen = 0; | |
1472 | return (0); | |
1473 | } | |
1474 | ||
1475 | /* | |
1476 | * Free buffers currently in use by a descriptor. | |
1477 | * Called on close. | |
1478 | */ | |
1479 | static void | |
91447636 | 1480 | bpf_freed(struct bpf_d *d) |
1c79356b A |
1481 | { |
1482 | /* | |
1483 | * We don't need to lock out interrupts since this descriptor has | |
1484 | * been detached from its interface and it yet hasn't been marked | |
1485 | * free. | |
1486 | */ | |
1487 | if (d->bd_sbuf != 0) { | |
1488 | FREE(d->bd_sbuf, M_DEVBUF); | |
1489 | if (d->bd_hbuf != 0) | |
1490 | FREE(d->bd_hbuf, M_DEVBUF); | |
1491 | if (d->bd_fbuf != 0) | |
1492 | FREE(d->bd_fbuf, M_DEVBUF); | |
1493 | } | |
1494 | if (d->bd_filter) | |
1495 | FREE((caddr_t)d->bd_filter, M_DEVBUF); | |
1c79356b A |
1496 | } |
1497 | ||
1498 | /* | |
1499 | * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) | |
1500 | * in the driver's softc; dlt is the link layer type; hdrlen is the fixed | |
1501 | * size of the link header (variable length headers not yet supported). | |
1502 | */ | |
1503 | void | |
91447636 | 1504 | bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) |
1c79356b A |
1505 | { |
1506 | struct bpf_if *bp; | |
0b4e3aa0 | 1507 | bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT); |
1c79356b A |
1508 | if (bp == 0) |
1509 | panic("bpfattach"); | |
1510 | ||
91447636 A |
1511 | lck_mtx_lock(bpf_mlock); |
1512 | ||
1c79356b A |
1513 | bp->bif_dlist = 0; |
1514 | bp->bif_ifp = ifp; | |
1515 | bp->bif_dlt = dlt; | |
1516 | ||
1517 | bp->bif_next = bpf_iflist; | |
1518 | bpf_iflist = bp; | |
1519 | ||
1520 | bp->bif_ifp->if_bpf = 0; | |
1521 | ||
1522 | /* | |
1523 | * Compute the length of the bpf header. This is not necessarily | |
1524 | * equal to SIZEOF_BPF_HDR because we want to insert spacing such | |
1525 | * that the network layer header begins on a longword boundary (for | |
1526 | * performance reasons and to alleviate alignment restrictions). | |
1527 | */ | |
1528 | bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; | |
91447636 A |
1529 | |
1530 | /* Take a reference on the interface */ | |
1531 | ifp_reference(ifp); | |
1532 | ||
1533 | lck_mtx_unlock(bpf_mlock); | |
1c79356b | 1534 | |
55e303ae | 1535 | #ifndef __APPLE__ |
1c79356b A |
1536 | if (bootverbose) |
1537 | printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); | |
1538 | #endif | |
1539 | } | |
1540 | ||
9bccf70c A |
1541 | /* |
1542 | * Detach bpf from an interface. This involves detaching each descriptor | |
1543 | * associated with the interface, and leaving bd_bif NULL. Notify each | |
1544 | * descriptor as it's detached so that any sleepers wake up and get | |
1545 | * ENXIO. | |
1546 | */ | |
1547 | void | |
91447636 | 1548 | bpfdetach(struct ifnet *ifp) |
9bccf70c A |
1549 | { |
1550 | struct bpf_if *bp, *bp_prev; | |
1551 | struct bpf_d *d; | |
1552 | int s; | |
1553 | ||
1554 | s = splimp(); | |
91447636 A |
1555 | |
1556 | lck_mtx_lock(bpf_mlock); | |
9bccf70c A |
1557 | |
1558 | /* Locate BPF interface information */ | |
1559 | bp_prev = NULL; | |
1560 | for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { | |
1561 | if (ifp == bp->bif_ifp) | |
1562 | break; | |
1563 | bp_prev = bp; | |
1564 | } | |
1565 | ||
1566 | #ifdef __APPLE__ | |
1567 | /* Check for no BPF interface information */ | |
1568 | if (bp == NULL) { | |
1569 | return; | |
1570 | } | |
1571 | #endif | |
1572 | ||
1573 | /* Interface wasn't attached */ | |
1574 | if (bp->bif_ifp == NULL) { | |
1575 | splx(s); | |
1576 | #ifndef __APPLE__ | |
1577 | printf("bpfdetach: %s%d was not attached\n", ifp->if_name, | |
1578 | ifp->if_unit); | |
1579 | #endif | |
1580 | return; | |
1581 | } | |
1582 | ||
1583 | while ((d = bp->bif_dlist) != NULL) { | |
1584 | bpf_detachd(d); | |
1585 | bpf_wakeup(d); | |
1586 | } | |
1587 | ||
1588 | if (bp_prev) { | |
1589 | bp_prev->bif_next = bp->bif_next; | |
1590 | } else { | |
1591 | bpf_iflist = bp->bif_next; | |
1592 | } | |
91447636 A |
1593 | |
1594 | ifp_release(ifp); | |
1595 | ||
1596 | lck_mtx_unlock(bpf_mlock); | |
9bccf70c A |
1597 | |
1598 | FREE(bp, M_DEVBUF); | |
1599 | ||
1600 | splx(s); | |
1601 | } | |
1602 | ||
1c79356b | 1603 | void |
91447636 | 1604 | bpf_init(__unused void *unused) |
1c79356b | 1605 | { |
9bccf70c | 1606 | #ifdef __APPLE__ |
1c79356b | 1607 | int i; |
9bccf70c | 1608 | int maj; |
1c79356b | 1609 | |
91447636 | 1610 | if (bpf_devsw_installed == 0) { |
9bccf70c | 1611 | bpf_devsw_installed = 1; |
91447636 A |
1612 | |
1613 | bpf_mlock_grp_attr = lck_grp_attr_alloc_init(); | |
91447636 A |
1614 | |
1615 | bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr); | |
1616 | ||
1617 | bpf_mlock_attr = lck_attr_alloc_init(); | |
91447636 A |
1618 | |
1619 | bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr); | |
1620 | ||
1621 | if (bpf_mlock == 0) { | |
1622 | printf("bpf_init: failed to allocate bpf_mlock\n"); | |
1623 | bpf_devsw_installed = 0; | |
1624 | return; | |
1625 | } | |
1626 | ||
9bccf70c A |
1627 | maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); |
1628 | if (maj == -1) { | |
91447636 A |
1629 | if (bpf_mlock) |
1630 | lck_mtx_free(bpf_mlock, bpf_mlock_grp); | |
1631 | if (bpf_mlock_attr) | |
1632 | lck_attr_free(bpf_mlock_attr); | |
1633 | if (bpf_mlock_grp) | |
1634 | lck_grp_free(bpf_mlock_grp); | |
1635 | if (bpf_mlock_grp_attr) | |
1636 | lck_grp_attr_free(bpf_mlock_grp_attr); | |
1637 | ||
1638 | bpf_mlock = 0; | |
1639 | bpf_mlock_attr = 0; | |
1640 | bpf_mlock_grp = 0; | |
1641 | bpf_mlock_grp_attr = 0; | |
1642 | bpf_devsw_installed = 0; | |
9bccf70c | 1643 | printf("bpf_init: failed to allocate a major number!\n"); |
55e303ae | 1644 | return; |
9bccf70c | 1645 | } |
91447636 | 1646 | |
55e303ae A |
1647 | for (i = 0 ; i < NBPFILTER; i++) |
1648 | bpf_make_dev_t(maj); | |
9bccf70c A |
1649 | } |
1650 | #else | |
1651 | cdevsw_add(&bpf_cdevsw); | |
1652 | #endif | |
1c79356b A |
1653 | } |
1654 | ||
9bccf70c | 1655 | #ifndef __APPLE__ |
1c79356b | 1656 | SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) |
1c79356b | 1657 | #endif |
9bccf70c A |
1658 | |
1659 | #else /* !BPF */ | |
1660 | #ifndef __APPLE__ | |
1661 | /* | |
1662 | * NOP stubs to allow bpf-using drivers to load and function. | |
1663 | * | |
1664 | * A 'better' implementation would allow the core bpf functionality | |
1665 | * to be loaded at runtime. | |
1666 | */ | |
1667 | ||
1668 | void | |
1669 | bpf_tap(ifp, pkt, pktlen) | |
1670 | struct ifnet *ifp; | |
1671 | register u_char *pkt; | |
1672 | register u_int pktlen; | |
1673 | { | |
1674 | } | |
1675 | ||
1676 | void | |
1677 | bpf_mtap(ifp, m) | |
1678 | struct ifnet *ifp; | |
1679 | struct mbuf *m; | |
1680 | { | |
1681 | } | |
1682 | ||
1683 | void | |
1684 | bpfattach(ifp, dlt, hdrlen) | |
1685 | struct ifnet *ifp; | |
1686 | u_int dlt, hdrlen; | |
1687 | { | |
1688 | } | |
1689 | ||
1690 | void | |
1691 | bpfdetach(ifp) | |
1692 | struct ifnet *ifp; | |
1693 | { | |
1694 | } | |
1695 | ||
1696 | u_int | |
1697 | bpf_filter(pc, p, wirelen, buflen) | |
1698 | register const struct bpf_insn *pc; | |
1699 | register u_char *p; | |
1700 | u_int wirelen; | |
1701 | register u_int buflen; | |
1702 | { | |
1703 | return -1; /* "no filter" behaviour */ | |
1704 | } | |
1705 | #endif /* !defined(__APPLE__) */ | |
1706 | #endif /* NBPFILTER > 0 */ |