]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 1990, 1991, 1993 | |
24 | * The Regents of the University of California. All rights reserved. | |
25 | * | |
26 | * This code is derived from the Stanford/CMU enet packet filter, | |
27 | * (net/enet.c) distributed as part of 4.3BSD, and code contributed | |
28 | * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence | |
29 | * Berkeley Laboratory. | |
30 | * | |
31 | * Redistribution and use in source and binary forms, with or without | |
32 | * modification, are permitted provided that the following conditions | |
33 | * are met: | |
34 | * 1. Redistributions of source code must retain the above copyright | |
35 | * notice, this list of conditions and the following disclaimer. | |
36 | * 2. Redistributions in binary form must reproduce the above copyright | |
37 | * notice, this list of conditions and the following disclaimer in the | |
38 | * documentation and/or other materials provided with the distribution. | |
39 | * 3. All advertising materials mentioning features or use of this software | |
40 | * must display the following acknowledgement: | |
41 | * This product includes software developed by the University of | |
42 | * California, Berkeley and its contributors. | |
43 | * 4. Neither the name of the University nor the names of its contributors | |
44 | * may be used to endorse or promote products derived from this software | |
45 | * without specific prior written permission. | |
46 | * | |
47 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
48 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
49 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
50 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
51 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
52 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
53 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
54 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
55 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
56 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
57 | * SUCH DAMAGE. | |
58 | * | |
59 | * @(#)bpf.c 8.2 (Berkeley) 3/28/94 | |
60 | * | |
61 | */ | |
62 | ||
63 | #include "bpfilter.h" | |
64 | ||
65 | #if NBPFILTER > 0 | |
66 | ||
67 | #ifndef __GNUC__ | |
68 | #define inline | |
69 | #else | |
70 | #define inline __inline | |
71 | #endif | |
72 | ||
73 | #include <sys/param.h> | |
74 | #include <sys/systm.h> | |
75 | #include <sys/conf.h> | |
76 | #include <sys/malloc.h> | |
77 | #include <sys/mbuf.h> | |
78 | #include <sys/time.h> | |
79 | #include <sys/proc.h> | |
80 | ||
81 | ||
82 | #include <sys/poll.h> | |
83 | ||
84 | ||
85 | #include <sys/signalvar.h> | |
86 | #include <sys/filio.h> | |
87 | #include <sys/sockio.h> | |
88 | #include <sys/ttycom.h> | |
89 | #include <sys/filedesc.h> | |
90 | ||
91 | #include <sys/socket.h> | |
92 | #include <sys/vnode.h> | |
93 | ||
94 | #include <net/if.h> | |
95 | #include <net/bpf.h> | |
96 | #include <net/bpfdesc.h> | |
97 | ||
98 | #include <netinet/in.h> | |
99 | #include <netinet/if_ether.h> | |
100 | #include <sys/kernel.h> | |
101 | #include <sys/sysctl.h> | |
102 | ||
103 | ||
104 | #include <miscfs/devfs/devfs.h> | |
105 | #include <net/dlil.h> | |
106 | ||
107 | /* | |
108 | * Older BSDs don't have kernel malloc. | |
109 | */ | |
110 | #if BSD < 199103 | |
111 | extern bcopy(); | |
112 | static caddr_t bpf_alloc(); | |
113 | ||
114 | #define BPF_BUFSIZE (MCLBYTES-8) | |
115 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) | |
116 | #else | |
117 | #define BPF_BUFSIZE 4096 | |
118 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) | |
119 | #endif | |
120 | ||
121 | #define PRINET 26 /* interruptible */ | |
122 | ||
123 | /* | |
124 | * The default read buffer size is patchable. | |
125 | */ | |
126 | static int bpf_bufsize = BPF_BUFSIZE; | |
127 | ||
128 | ||
129 | ||
130 | SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, | |
131 | &bpf_bufsize, 0, ""); | |
132 | ||
133 | ||
134 | /* | |
135 | * bpf_iflist is the list of interfaces; each corresponds to an ifnet | |
136 | * bpf_dtab holds the descriptors, indexed by minor device # | |
137 | */ | |
138 | static struct bpf_if *bpf_iflist; | |
139 | static struct bpf_d bpf_dtab[NBPFILTER]; | |
140 | static int bpf_dtab_init; | |
141 | static int nbpfilter = NBPFILTER; | |
142 | ||
143 | static int bpf_allocbufs __P((struct bpf_d *)); | |
144 | static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); | |
145 | static void bpf_detachd __P((struct bpf_d *d)); | |
146 | static void bpf_freed __P((struct bpf_d *)); | |
147 | static void bpf_ifname __P((struct ifnet *, struct ifreq *)); | |
148 | static void bpf_mcopy __P((const void *, void *, size_t)); | |
149 | static int bpf_movein __P((struct uio *, int, | |
150 | struct mbuf **, struct sockaddr *, int *)); | |
151 | static int bpf_setif __P((struct bpf_d *, struct ifreq *)); | |
152 | static inline void | |
153 | bpf_wakeup __P((struct bpf_d *)); | |
154 | static void catchpacket __P((struct bpf_d *, u_char *, u_int, | |
155 | u_int, void (*)(const void *, void *, size_t))); | |
156 | static void reset_d __P((struct bpf_d *)); | |
157 | static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); | |
158 | ||
159 | d_open_t bpfopen; | |
160 | d_close_t bpfclose; | |
161 | d_read_t bpfread; | |
162 | d_write_t bpfwrite; | |
163 | d_ioctl_t bpfioctl; | |
164 | ||
165 | ||
166 | #define BPF_MAJOR 7 | |
167 | ||
168 | void bpf_mtap(struct ifnet *, struct mbuf *); | |
169 | ||
170 | int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(), | |
171 | bpfpoll(); | |
172 | ||
173 | ||
174 | static struct cdevsw bpf_cdevsw = { | |
175 | bpfopen, bpfclose, bpfread, bpfwrite, | |
176 | bpfioctl, nulldev, nulldev, NULL, bpfpoll, | |
177 | eno_mmap, eno_strat, eno_getc, eno_putc, 0 | |
178 | }; | |
179 | ||
180 | static int | |
181 | bpf_movein(uio, linktype, mp, sockp, datlen) | |
182 | register struct uio *uio; | |
183 | int linktype, *datlen; | |
184 | register struct mbuf **mp; | |
185 | register struct sockaddr *sockp; | |
186 | { | |
187 | struct mbuf *m; | |
188 | int error; | |
189 | int len; | |
190 | int hlen; | |
191 | ||
192 | /* | |
193 | * Build a sockaddr based on the data link layer type. | |
194 | * We do this at this level because the ethernet header | |
195 | * is copied directly into the data field of the sockaddr. | |
196 | * In the case of SLIP, there is no header and the packet | |
197 | * is forwarded as is. | |
198 | * Also, we are careful to leave room at the front of the mbuf | |
199 | * for the link level header. | |
200 | */ | |
201 | switch (linktype) { | |
202 | ||
203 | case DLT_SLIP: | |
204 | sockp->sa_family = AF_INET; | |
205 | hlen = 0; | |
206 | break; | |
207 | ||
208 | case DLT_EN10MB: | |
209 | sockp->sa_family = AF_UNSPEC; | |
210 | /* XXX Would MAXLINKHDR be better? */ | |
211 | hlen = sizeof(struct ether_header); | |
212 | break; | |
213 | ||
214 | case DLT_FDDI: | |
215 | #if defined(__FreeBSD__) || defined(__bsdi__) | |
216 | sockp->sa_family = AF_IMPLINK; | |
217 | hlen = 0; | |
218 | #else | |
219 | sockp->sa_family = AF_UNSPEC; | |
220 | /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ | |
221 | hlen = 24; | |
222 | #endif | |
223 | break; | |
224 | ||
225 | case DLT_RAW: | |
226 | case DLT_NULL: | |
227 | sockp->sa_family = AF_UNSPEC; | |
228 | hlen = 0; | |
229 | break; | |
230 | ||
231 | #ifdef __FreeBSD__ | |
232 | case DLT_ATM_RFC1483: | |
233 | /* | |
234 | * en atm driver requires 4-byte atm pseudo header. | |
235 | * though it isn't standard, vpi:vci needs to be | |
236 | * specified anyway. | |
237 | */ | |
238 | sockp->sa_family = AF_UNSPEC; | |
239 | hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ | |
240 | break; | |
241 | #endif | |
242 | ||
243 | default: | |
244 | return (EIO); | |
245 | } | |
246 | ||
247 | len = uio->uio_resid; | |
248 | *datlen = len - hlen; | |
249 | if ((unsigned)len > MCLBYTES) | |
250 | return (EIO); | |
251 | ||
252 | MGETHDR(m, M_WAIT, MT_DATA); | |
253 | if (m == 0) | |
254 | return (ENOBUFS); | |
255 | if (len > MHLEN) { | |
256 | #if BSD >= 199103 | |
257 | MCLGET(m, M_WAIT); | |
258 | if ((m->m_flags & M_EXT) == 0) { | |
259 | #else | |
260 | MCLGET(m); | |
261 | if (m->m_len != MCLBYTES) { | |
262 | #endif | |
263 | error = ENOBUFS; | |
264 | goto bad; | |
265 | } | |
266 | } | |
267 | m->m_pkthdr.len = m->m_len = len; | |
268 | m->m_pkthdr.rcvif = NULL; | |
269 | *mp = m; | |
270 | /* | |
271 | * Make room for link header. | |
272 | */ | |
273 | if (hlen != 0) { | |
274 | m->m_pkthdr.len -= hlen; | |
275 | m->m_len -= hlen; | |
276 | #if BSD >= 199103 | |
277 | m->m_data += hlen; /* XXX */ | |
278 | #else | |
279 | m->m_off += hlen; | |
280 | #endif | |
281 | error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); | |
282 | if (error) | |
283 | goto bad; | |
284 | } | |
285 | error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); | |
286 | if (!error) | |
287 | return (0); | |
288 | bad: | |
289 | m_freem(m); | |
290 | return (error); | |
291 | } | |
292 | ||
293 | int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) | |
294 | { | |
295 | boolean_t funnel_state; | |
296 | ||
297 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
298 | ||
299 | /* | |
300 | * Do nothing if the BPF tap has been turned off. | |
301 | * This is to protect from a potential race where this | |
302 | * call blocks on the funnel lock. And in the meantime | |
303 | * BPF is turned off, which will clear if_bpf. | |
304 | */ | |
305 | if (ifp->if_bpf) | |
306 | bpf_mtap(ifp, m); | |
307 | ||
308 | thread_funnel_set(network_flock, funnel_state); | |
309 | return 0; | |
310 | } | |
311 | ||
312 | ||
313 | /* | |
314 | * Attach file to the bpf interface, i.e. make d listen on bp. | |
315 | * Must be called at splimp. | |
316 | */ | |
317 | static void | |
318 | bpf_attachd(d, bp) | |
319 | struct bpf_d *d; | |
320 | struct bpf_if *bp; | |
321 | { | |
322 | struct ifnet *ifp; | |
323 | ||
324 | /* | |
325 | * Point d at bp, and add d to the interface's list of listeners. | |
326 | * Finally, point the driver's bpf cookie at the interface so | |
327 | * it will divert packets to bpf. | |
328 | */ | |
329 | d->bd_bif = bp; | |
330 | d->bd_next = bp->bif_dlist; | |
331 | bp->bif_dlist = d; | |
332 | ||
333 | bp->bif_ifp->if_bpf = bp; | |
334 | ifp = bp->bif_ifp; | |
335 | ||
336 | if (ifp->if_set_bpf_tap) | |
337 | (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); | |
338 | } | |
339 | ||
340 | /* | |
341 | * Detach a file from its interface. | |
342 | */ | |
343 | static void | |
344 | bpf_detachd(d) | |
345 | struct bpf_d *d; | |
346 | { | |
347 | struct bpf_d **p; | |
348 | struct bpf_if *bp; | |
349 | struct ifnet *ifp; | |
350 | ||
351 | ifp = d->bd_bif->bif_ifp; | |
352 | if (ifp->if_set_bpf_tap) | |
353 | (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); | |
354 | ||
355 | bp = d->bd_bif; | |
356 | /* | |
357 | * Check if this descriptor had requested promiscuous mode. | |
358 | * If so, turn it off. | |
359 | */ | |
360 | if (d->bd_promisc) { | |
361 | d->bd_promisc = 0; | |
362 | if (ifpromisc(bp->bif_ifp, 0)) | |
363 | /* | |
364 | * Something is really wrong if we were able to put | |
365 | * the driver into promiscuous mode, but can't | |
366 | * take it out. | |
367 | */ | |
368 | panic("bpf: ifpromisc failed"); | |
369 | } | |
370 | /* Remove d from the interface's descriptor list. */ | |
371 | p = &bp->bif_dlist; | |
372 | while (*p != d) { | |
373 | p = &(*p)->bd_next; | |
374 | if (*p == 0) | |
375 | panic("bpf_detachd: descriptor not in list"); | |
376 | } | |
377 | *p = (*p)->bd_next; | |
378 | if (bp->bif_dlist == 0) | |
379 | /* | |
380 | * Let the driver know that there are no more listeners. | |
381 | */ | |
382 | d->bd_bif->bif_ifp->if_bpf = 0; | |
383 | d->bd_bif = 0; | |
384 | } | |
385 | ||
386 | ||
387 | /* | |
388 | * Mark a descriptor free by making it point to itself. | |
389 | * This is probably cheaper than marking with a constant since | |
390 | * the address should be in a register anyway. | |
391 | */ | |
392 | #define D_ISFREE(d) ((d) == (d)->bd_next) | |
393 | #define D_MARKFREE(d) ((d)->bd_next = (d)) | |
394 | #define D_MARKUSED(d) ((d)->bd_next = 0) | |
395 | ||
396 | /* | |
397 | * Open ethernet device. Returns ENXIO for illegal minor device number, | |
398 | * EBUSY if file is open by another process. | |
399 | */ | |
400 | /* ARGSUSED */ | |
401 | int | |
402 | bpfopen(dev, flags, fmt, p) | |
403 | dev_t dev; | |
404 | int flags; | |
405 | int fmt; | |
406 | struct proc *p; | |
407 | { | |
408 | register struct bpf_d *d; | |
409 | ||
410 | if (minor(dev) >= nbpfilter) | |
411 | return (ENXIO); | |
412 | ||
413 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
414 | /* | |
415 | * Each minor can be opened by only one process. If the requested | |
416 | * minor is in use, return EBUSY. | |
417 | */ | |
418 | d = &bpf_dtab[minor(dev)]; | |
419 | if (!D_ISFREE(d)) { | |
420 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
421 | return (EBUSY); | |
422 | } | |
423 | ||
424 | /* Mark "free" and do most initialization. */ | |
425 | bzero((char *)d, sizeof(*d)); | |
426 | d->bd_bufsize = bpf_bufsize; | |
427 | d->bd_sig = SIGIO; | |
428 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
429 | return (0); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Close the descriptor by detaching it from its interface, | |
434 | * deallocating its buffers, and marking it free. | |
435 | */ | |
436 | /* ARGSUSED */ | |
437 | int | |
438 | bpfclose(dev, flags, fmt, p) | |
439 | dev_t dev; | |
440 | int flags; | |
441 | int fmt; | |
442 | struct proc *p; | |
443 | { | |
444 | register struct bpf_d *d; | |
445 | register int s; | |
446 | ||
447 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
448 | ||
449 | s = splimp(); | |
450 | d = &bpf_dtab[minor(dev)]; | |
451 | if (d->bd_bif) | |
452 | bpf_detachd(d); | |
453 | splx(s); | |
454 | bpf_freed(d); | |
455 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
456 | return (0); | |
457 | } | |
458 | ||
459 | /* | |
460 | * Support for SunOS, which does not have tsleep. | |
461 | */ | |
462 | #if BSD < 199103 | |
463 | static | |
464 | bpf_timeout(arg) | |
465 | caddr_t arg; | |
466 | { | |
467 | boolean_t funnel_state; | |
468 | struct bpf_d *d = (struct bpf_d *)arg; | |
469 | ||
470 | ||
471 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
472 | d->bd_timedout = 1; | |
473 | wakeup(arg); | |
474 | (void) thread_funnel_set(network_flock, FALSE); | |
475 | } | |
476 | ||
477 | #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) | |
478 | ||
479 | int | |
480 | bpf_sleep(d) | |
481 | register struct bpf_d *d; | |
482 | { | |
483 | register int rto = d->bd_rtout; | |
484 | register int st; | |
485 | ||
486 | if (rto != 0) { | |
487 | d->bd_timedout = 0; | |
488 | timeout(bpf_timeout, (caddr_t)d, rto); | |
489 | } | |
490 | st = sleep((caddr_t)d, PRINET|PCATCH); | |
491 | if (rto != 0) { | |
492 | if (d->bd_timedout == 0) | |
493 | untimeout(bpf_timeout, (caddr_t)d); | |
494 | else if (st == 0) | |
495 | return EWOULDBLOCK; | |
496 | } | |
497 | return (st != 0) ? EINTR : 0; | |
498 | } | |
499 | #else | |
500 | #define BPF_SLEEP tsleep | |
501 | #endif | |
502 | ||
503 | /* | |
504 | * Rotate the packet buffers in descriptor d. Move the store buffer | |
505 | * into the hold slot, and the free buffer into the store slot. | |
506 | * Zero the length of the new store buffer. | |
507 | */ | |
508 | #define ROTATE_BUFFERS(d) \ | |
509 | (d)->bd_hbuf = (d)->bd_sbuf; \ | |
510 | (d)->bd_hlen = (d)->bd_slen; \ | |
511 | (d)->bd_sbuf = (d)->bd_fbuf; \ | |
512 | (d)->bd_slen = 0; \ | |
513 | (d)->bd_fbuf = 0; | |
514 | /* | |
515 | * bpfread - read next chunk of packets from buffers | |
516 | */ | |
517 | int | |
518 | bpfread(dev, uio, ioflag) | |
519 | dev_t dev; | |
520 | struct uio *uio; | |
521 | int ioflag; | |
522 | { | |
523 | register struct bpf_d *d; | |
524 | int error; | |
525 | int s; | |
526 | ||
527 | ||
528 | ||
529 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
530 | d = &bpf_dtab[minor(dev)]; | |
531 | ||
532 | /* | |
533 | * Restrict application to use a buffer the same size as | |
534 | * as kernel buffers. | |
535 | */ | |
536 | if (uio->uio_resid != d->bd_bufsize) { | |
537 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
538 | return (EINVAL); | |
539 | } | |
540 | ||
541 | s = splimp(); | |
542 | /* | |
543 | * If the hold buffer is empty, then do a timed sleep, which | |
544 | * ends when the timeout expires or when enough packets | |
545 | * have arrived to fill the store buffer. | |
546 | */ | |
547 | while (d->bd_hbuf == 0) { | |
548 | if (d->bd_immediate && d->bd_slen != 0) { | |
549 | /* | |
550 | * A packet(s) either arrived since the previous | |
551 | * read or arrived while we were asleep. | |
552 | * Rotate the buffers and return what's here. | |
553 | */ | |
554 | ROTATE_BUFFERS(d); | |
555 | break; | |
556 | } | |
557 | if (ioflag & IO_NDELAY) | |
558 | error = EWOULDBLOCK; | |
559 | else | |
560 | error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", | |
561 | d->bd_rtout); | |
562 | if (error == EINTR || error == ERESTART) { | |
563 | splx(s); | |
564 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
565 | return (error); | |
566 | } | |
567 | if (error == EWOULDBLOCK) { | |
568 | /* | |
569 | * On a timeout, return what's in the buffer, | |
570 | * which may be nothing. If there is something | |
571 | * in the store buffer, we can rotate the buffers. | |
572 | */ | |
573 | if (d->bd_hbuf) | |
574 | /* | |
575 | * We filled up the buffer in between | |
576 | * getting the timeout and arriving | |
577 | * here, so we don't need to rotate. | |
578 | */ | |
579 | break; | |
580 | ||
581 | if (d->bd_slen == 0) { | |
582 | splx(s); | |
583 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
584 | return (0); | |
585 | } | |
586 | ROTATE_BUFFERS(d); | |
587 | break; | |
588 | } | |
589 | } | |
590 | /* | |
591 | * At this point, we know we have something in the hold slot. | |
592 | */ | |
593 | splx(s); | |
594 | ||
595 | /* | |
596 | * Move data from hold buffer into user space. | |
597 | * We know the entire buffer is transferred since | |
598 | * we checked above that the read buffer is bpf_bufsize bytes. | |
599 | */ | |
600 | error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); | |
601 | ||
602 | s = splimp(); | |
603 | d->bd_fbuf = d->bd_hbuf; | |
604 | d->bd_hbuf = 0; | |
605 | d->bd_hlen = 0; | |
606 | splx(s); | |
607 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
608 | return (error); | |
609 | } | |
610 | ||
611 | ||
612 | /* | |
613 | * If there are processes sleeping on this descriptor, wake them up. | |
614 | */ | |
615 | static inline void | |
616 | bpf_wakeup(d) | |
617 | register struct bpf_d *d; | |
618 | { | |
619 | wakeup((caddr_t)d); | |
620 | if (d->bd_async && d->bd_sig && d->bd_sigio) | |
621 | pgsigio(d->bd_sigio, d->bd_sig, 0); | |
622 | ||
623 | #if BSD >= 199103 | |
624 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
625 | selwakeup(&d->bd_sel); | |
626 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
627 | /* XXX */ | |
628 | d->bd_sel.si_thread = 0; | |
629 | #else | |
630 | if (d->bd_selproc) { | |
631 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
632 | selwakeup(d->bd_selproc, (int)d->bd_selcoll); | |
633 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
634 | d->bd_selcoll = 0; | |
635 | d->bd_selproc = 0; | |
636 | } | |
637 | #endif | |
638 | } | |
639 | ||
640 | int | |
641 | bpfwrite(dev, uio, ioflag) | |
642 | dev_t dev; | |
643 | struct uio *uio; | |
644 | int ioflag; | |
645 | { | |
646 | register struct bpf_d *d; | |
647 | ||
648 | struct ifnet *ifp; | |
649 | struct mbuf *m; | |
650 | int error, s; | |
651 | static struct sockaddr dst; | |
652 | int datlen; | |
653 | ||
654 | ||
655 | ||
656 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
657 | d = &bpf_dtab[minor(dev)]; | |
658 | if (d->bd_bif == 0) { | |
659 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
660 | return (ENXIO); | |
661 | } | |
662 | ||
663 | ifp = d->bd_bif->bif_ifp; | |
664 | ||
665 | if (uio->uio_resid == 0) { | |
666 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
667 | return (0); | |
668 | } | |
669 | ||
670 | error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); | |
671 | if (error) { | |
672 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
673 | return (error); | |
674 | } | |
675 | ||
676 | if (datlen > ifp->if_mtu) { | |
677 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
678 | return (EMSGSIZE); | |
679 | } | |
680 | ||
681 | s = splnet(); | |
682 | ||
683 | error = dlil_output((u_long) ifp, m, | |
684 | (caddr_t) 0, &dst, 0); | |
685 | ||
686 | /* | |
687 | error = dlil_inject_if_output(m, DLIL_NULL_FILTER); | |
688 | */ | |
689 | ||
690 | splx(s); | |
691 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
692 | ||
693 | /* | |
694 | * The driver frees the mbuf. | |
695 | */ | |
696 | return (error); | |
697 | } | |
698 | ||
699 | /* | |
700 | * Reset a descriptor by flushing its packet buffer and clearing the | |
701 | * receive and drop counts. Should be called at splimp. | |
702 | */ | |
703 | static void | |
704 | reset_d(d) | |
705 | struct bpf_d *d; | |
706 | { | |
707 | if (d->bd_hbuf) { | |
708 | /* Free the hold buffer. */ | |
709 | d->bd_fbuf = d->bd_hbuf; | |
710 | d->bd_hbuf = 0; | |
711 | } | |
712 | d->bd_slen = 0; | |
713 | d->bd_hlen = 0; | |
714 | d->bd_rcount = 0; | |
715 | d->bd_dcount = 0; | |
716 | } | |
717 | ||
718 | /* | |
719 | * FIONREAD Check for read packet available. | |
720 | * SIOCGIFADDR Get interface address - convenient hook to driver. | |
721 | * BIOCGBLEN Get buffer len [for read()]. | |
722 | * BIOCSETF Set ethernet read filter. | |
723 | * BIOCFLUSH Flush read packet buffer. | |
724 | * BIOCPROMISC Put interface into promiscuous mode. | |
725 | * BIOCGDLT Get link layer type. | |
726 | * BIOCGETIF Get interface name. | |
727 | * BIOCSETIF Set interface. | |
728 | * BIOCSRTIMEOUT Set read timeout. | |
729 | * BIOCGRTIMEOUT Get read timeout. | |
730 | * BIOCGSTATS Get packet stats. | |
731 | * BIOCIMMEDIATE Set immediate mode. | |
732 | * BIOCVERSION Get filter language version. | |
733 | */ | |
734 | /* ARGSUSED */ | |
735 | int | |
736 | bpfioctl(dev, cmd, addr, flags, p) | |
737 | dev_t dev; | |
738 | u_long cmd; | |
739 | caddr_t addr; | |
740 | int flags; | |
741 | struct proc *p; | |
742 | { | |
743 | register struct bpf_d *d; | |
744 | int s, error = 0; | |
745 | ||
746 | ||
747 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
748 | d = &bpf_dtab[minor(dev)]; | |
749 | ||
750 | switch (cmd) { | |
751 | ||
752 | default: | |
753 | error = EINVAL; | |
754 | break; | |
755 | ||
756 | /* | |
757 | * Check for read packet available. | |
758 | */ | |
759 | case FIONREAD: | |
760 | { | |
761 | int n; | |
762 | ||
763 | s = splimp(); | |
764 | n = d->bd_slen; | |
765 | if (d->bd_hbuf) | |
766 | n += d->bd_hlen; | |
767 | splx(s); | |
768 | ||
769 | *(int *)addr = n; | |
770 | break; | |
771 | } | |
772 | ||
773 | case SIOCGIFADDR: | |
774 | { | |
775 | struct ifnet *ifp; | |
776 | ||
777 | if (d->bd_bif == 0) | |
778 | error = EINVAL; | |
779 | else { | |
780 | ifp = d->bd_bif->bif_ifp; | |
781 | error = (*ifp->if_ioctl)(ifp, cmd, addr); | |
782 | } | |
783 | break; | |
784 | } | |
785 | ||
786 | /* | |
787 | * Get buffer len [for read()]. | |
788 | */ | |
789 | case BIOCGBLEN: | |
790 | *(u_int *)addr = d->bd_bufsize; | |
791 | break; | |
792 | ||
793 | /* | |
794 | * Set buffer length. | |
795 | */ | |
796 | case BIOCSBLEN: | |
797 | #if BSD < 199103 | |
798 | error = EINVAL; | |
799 | #else | |
800 | if (d->bd_bif != 0) | |
801 | error = EINVAL; | |
802 | else { | |
803 | register u_int size = *(u_int *)addr; | |
804 | ||
805 | if (size > BPF_MAXBUFSIZE) | |
806 | *(u_int *)addr = size = BPF_MAXBUFSIZE; | |
807 | else if (size < BPF_MINBUFSIZE) | |
808 | *(u_int *)addr = size = BPF_MINBUFSIZE; | |
809 | d->bd_bufsize = size; | |
810 | } | |
811 | #endif | |
812 | break; | |
813 | ||
814 | /* | |
815 | * Set link layer read filter. | |
816 | */ | |
817 | case BIOCSETF: | |
818 | error = bpf_setf(d, (struct bpf_program *)addr); | |
819 | break; | |
820 | ||
821 | /* | |
822 | * Flush read packet buffer. | |
823 | */ | |
824 | case BIOCFLUSH: | |
825 | s = splimp(); | |
826 | reset_d(d); | |
827 | splx(s); | |
828 | break; | |
829 | ||
830 | /* | |
831 | * Put interface into promiscuous mode. | |
832 | */ | |
833 | case BIOCPROMISC: | |
834 | if (d->bd_bif == 0) { | |
835 | /* | |
836 | * No interface attached yet. | |
837 | */ | |
838 | error = EINVAL; | |
839 | break; | |
840 | } | |
841 | s = splimp(); | |
842 | if (d->bd_promisc == 0) { | |
843 | error = ifpromisc(d->bd_bif->bif_ifp, 1); | |
844 | if (error == 0) | |
845 | d->bd_promisc = 1; | |
846 | } | |
847 | splx(s); | |
848 | break; | |
849 | ||
850 | /* | |
851 | * Get device parameters. | |
852 | */ | |
853 | case BIOCGDLT: | |
854 | if (d->bd_bif == 0) | |
855 | error = EINVAL; | |
856 | else | |
857 | *(u_int *)addr = d->bd_bif->bif_dlt; | |
858 | break; | |
859 | ||
860 | /* | |
861 | * Set interface name. | |
862 | */ | |
863 | case BIOCGETIF: | |
864 | if (d->bd_bif == 0) | |
865 | error = EINVAL; | |
866 | else | |
867 | bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); | |
868 | break; | |
869 | ||
870 | /* | |
871 | * Set interface. | |
872 | */ | |
873 | case BIOCSETIF: | |
874 | error = bpf_setif(d, (struct ifreq *)addr); | |
875 | break; | |
876 | ||
877 | /* | |
878 | * Set read timeout. | |
879 | */ | |
880 | case BIOCSRTIMEOUT: | |
881 | { | |
882 | struct timeval *tv = (struct timeval *)addr; | |
883 | ||
884 | /* | |
885 | * Subtract 1 tick from tvtohz() since this isn't | |
886 | * a one-shot timer. | |
887 | */ | |
888 | if ((error = itimerfix(tv)) == 0) | |
889 | d->bd_rtout = tvtohz(tv) - 1; | |
890 | break; | |
891 | } | |
892 | ||
893 | /* | |
894 | * Get read timeout. | |
895 | */ | |
896 | case BIOCGRTIMEOUT: | |
897 | { | |
898 | struct timeval *tv = (struct timeval *)addr; | |
899 | ||
900 | tv->tv_sec = d->bd_rtout / hz; | |
901 | tv->tv_usec = (d->bd_rtout % hz) * tick; | |
902 | break; | |
903 | } | |
904 | ||
905 | /* | |
906 | * Get packet stats. | |
907 | */ | |
908 | case BIOCGSTATS: | |
909 | { | |
910 | struct bpf_stat *bs = (struct bpf_stat *)addr; | |
911 | ||
912 | bs->bs_recv = d->bd_rcount; | |
913 | bs->bs_drop = d->bd_dcount; | |
914 | break; | |
915 | } | |
916 | ||
917 | /* | |
918 | * Set immediate mode. | |
919 | */ | |
920 | case BIOCIMMEDIATE: | |
921 | d->bd_immediate = *(u_int *)addr; | |
922 | break; | |
923 | ||
924 | case BIOCVERSION: | |
925 | { | |
926 | struct bpf_version *bv = (struct bpf_version *)addr; | |
927 | ||
928 | bv->bv_major = BPF_MAJOR_VERSION; | |
929 | bv->bv_minor = BPF_MINOR_VERSION; | |
930 | break; | |
931 | } | |
932 | ||
933 | case FIONBIO: /* Non-blocking I/O */ | |
934 | break; | |
935 | ||
936 | case FIOASYNC: /* Send signal on receive packets */ | |
937 | d->bd_async = *(int *)addr; | |
938 | break; | |
939 | #if ISFB31 | |
940 | case FIOSETOWN: | |
941 | error = fsetown(*(int *)addr, &d->bd_sigio); | |
942 | break; | |
943 | ||
944 | case FIOGETOWN: | |
945 | *(int *)addr = fgetown(d->bd_sigio); | |
946 | break; | |
947 | ||
948 | /* This is deprecated, FIOSETOWN should be used instead. */ | |
949 | case TIOCSPGRP: | |
950 | error = fsetown(-(*(int *)addr), &d->bd_sigio); | |
951 | break; | |
952 | ||
953 | /* This is deprecated, FIOGETOWN should be used instead. */ | |
954 | case TIOCGPGRP: | |
955 | *(int *)addr = -fgetown(d->bd_sigio); | |
956 | break; | |
957 | #endif | |
958 | case BIOCSRSIG: /* Set receive signal */ | |
959 | { | |
960 | u_int sig; | |
961 | ||
962 | sig = *(u_int *)addr; | |
963 | ||
964 | if (sig >= NSIG) | |
965 | error = EINVAL; | |
966 | else | |
967 | d->bd_sig = sig; | |
968 | break; | |
969 | } | |
970 | case BIOCGRSIG: | |
971 | *(u_int *)addr = d->bd_sig; | |
972 | break; | |
973 | } | |
974 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
975 | return (error); | |
976 | } | |
977 | ||
978 | /* | |
979 | * Set d's packet filter program to fp. If this file already has a filter, | |
980 | * free it and replace it. Returns EINVAL for bogus requests. | |
981 | */ | |
982 | static int | |
983 | bpf_setf(d, fp) | |
984 | struct bpf_d *d; | |
985 | struct bpf_program *fp; | |
986 | { | |
987 | struct bpf_insn *fcode, *old; | |
988 | u_int flen, size; | |
989 | int s; | |
990 | ||
991 | old = d->bd_filter; | |
992 | if (fp->bf_insns == 0) { | |
993 | if (fp->bf_len != 0) | |
994 | return (EINVAL); | |
995 | s = splimp(); | |
996 | d->bd_filter = 0; | |
997 | reset_d(d); | |
998 | splx(s); | |
999 | if (old != 0) | |
1000 | FREE((caddr_t)old, M_DEVBUF); | |
1001 | return (0); | |
1002 | } | |
1003 | flen = fp->bf_len; | |
1004 | if (flen > BPF_MAXINSNS) | |
1005 | return (EINVAL); | |
1006 | ||
1007 | size = flen * sizeof(*fp->bf_insns); | |
1008 | fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); | |
1009 | if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && | |
1010 | bpf_validate(fcode, (int)flen)) { | |
1011 | s = splimp(); | |
1012 | d->bd_filter = fcode; | |
1013 | reset_d(d); | |
1014 | splx(s); | |
1015 | if (old != 0) | |
1016 | FREE((caddr_t)old, M_DEVBUF); | |
1017 | ||
1018 | return (0); | |
1019 | } | |
1020 | FREE((caddr_t)fcode, M_DEVBUF); | |
1021 | return (EINVAL); | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * Detach a file from its current interface (if attached at all) and attach | |
1026 | * to the interface indicated by the name stored in ifr. | |
1027 | * Return an errno or 0. | |
1028 | */ | |
1029 | static int | |
1030 | bpf_setif(d, ifr) | |
1031 | struct bpf_d *d; | |
1032 | struct ifreq *ifr; | |
1033 | { | |
1034 | struct bpf_if *bp; | |
1035 | int s, error; | |
1036 | struct ifnet *theywant; | |
1037 | ||
1038 | theywant = ifunit(ifr->ifr_name); | |
1039 | if (theywant == 0) | |
1040 | return ENXIO; | |
1041 | ||
1042 | /* | |
1043 | * Look through attached interfaces for the named one. | |
1044 | */ | |
1045 | for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { | |
1046 | struct ifnet *ifp = bp->bif_ifp; | |
1047 | ||
1048 | if (ifp == 0 || ifp != theywant) | |
1049 | continue; | |
1050 | /* | |
1051 | * We found the requested interface. | |
1052 | * If it's not up, return an error. | |
1053 | * Allocate the packet buffers if we need to. | |
1054 | * If we're already attached to requested interface, | |
1055 | * just flush the buffer. | |
1056 | */ | |
1057 | if ((ifp->if_flags & IFF_UP) == 0) | |
1058 | return (ENETDOWN); | |
1059 | ||
1060 | if (d->bd_sbuf == 0) { | |
1061 | error = bpf_allocbufs(d); | |
1062 | if (error != 0) | |
1063 | return (error); | |
1064 | } | |
1065 | s = splimp(); | |
1066 | if (bp != d->bd_bif) { | |
1067 | if (d->bd_bif) | |
1068 | /* | |
1069 | * Detach if attached to something else. | |
1070 | */ | |
1071 | bpf_detachd(d); | |
1072 | ||
1073 | bpf_attachd(d, bp); | |
1074 | } | |
1075 | reset_d(d); | |
1076 | splx(s); | |
1077 | return (0); | |
1078 | } | |
1079 | /* Not found. */ | |
1080 | return (ENXIO); | |
1081 | } | |
1082 | ||
1083 | /* | |
1084 | * Convert an interface name plus unit number of an ifp to a single | |
1085 | * name which is returned in the ifr. | |
1086 | */ | |
1087 | static void | |
1088 | bpf_ifname(ifp, ifr) | |
1089 | struct ifnet *ifp; | |
1090 | struct ifreq *ifr; | |
1091 | { | |
1092 | char *s = ifp->if_name; | |
1093 | char *d = ifr->ifr_name; | |
1094 | ||
1095 | while (*d++ = *s++) | |
1096 | continue; | |
1097 | d--; /* back to the null */ | |
1098 | /* XXX Assume that unit number is less than 10. */ | |
1099 | *d++ = ifp->if_unit + '0'; | |
1100 | *d = '\0'; | |
1101 | } | |
1102 | ||
1103 | ||
1104 | ||
1105 | /* | |
1106 | * Support for select() and poll() system calls | |
1107 | * | |
1108 | * Return true iff the specific operation will not block indefinitely. | |
1109 | * Otherwise, return false but make a note that a selwakeup() must be done. | |
1110 | */ | |
1111 | int | |
1112 | bpfpoll(dev, events, p) | |
1113 | register dev_t dev; | |
1114 | int events; | |
1115 | struct proc *p; | |
1116 | { | |
1117 | register struct bpf_d *d; | |
1118 | register int s; | |
1119 | int revents = 0; | |
1120 | ||
1121 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
1122 | /* | |
1123 | * An imitation of the FIONREAD ioctl code. | |
1124 | */ | |
1125 | d = &bpf_dtab[minor(dev)]; | |
1126 | ||
1127 | s = splimp(); | |
1128 | if (events & (POLLIN | POLLRDNORM)) | |
1129 | if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) | |
1130 | revents |= events & (POLLIN | POLLRDNORM); | |
1131 | else | |
1132 | selrecord(p, &d->bd_sel); | |
1133 | ||
1134 | splx(s); | |
1135 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
1136 | return (revents); | |
1137 | } | |
1138 | ||
1139 | /* | |
1140 | * Incoming linkage from device drivers. Process the packet pkt, of length | |
1141 | * pktlen, which is stored in a contiguous buffer. The packet is parsed | |
1142 | * by each process' filter, and if accepted, stashed into the corresponding | |
1143 | * buffer. | |
1144 | */ | |
1145 | void | |
1146 | bpf_tap(ifp, pkt, pktlen) | |
1147 | struct ifnet *ifp; | |
1148 | register u_char *pkt; | |
1149 | register u_int pktlen; | |
1150 | { | |
1151 | struct bpf_if *bp; | |
1152 | register struct bpf_d *d; | |
1153 | register u_int slen; | |
1154 | /* | |
1155 | * Note that the ipl does not have to be raised at this point. | |
1156 | * The only problem that could arise here is that if two different | |
1157 | * interfaces shared any data. This is not the case. | |
1158 | */ | |
1159 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
1160 | if ((bp = ifp->if_bpf)) { | |
1161 | for (d = bp->bif_dlist; d != 0; d = d->bd_next) { | |
1162 | ++d->bd_rcount; | |
1163 | slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); | |
1164 | if (slen != 0) | |
1165 | catchpacket(d, pkt, pktlen, slen, bcopy); | |
1166 | } | |
1167 | } | |
1168 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
1169 | } | |
1170 | ||
1171 | /* | |
1172 | * Copy data from an mbuf chain into a buffer. This code is derived | |
1173 | * from m_copydata in sys/uipc_mbuf.c. | |
1174 | */ | |
1175 | static void | |
1176 | bpf_mcopy(src_arg, dst_arg, len) | |
1177 | const void *src_arg; | |
1178 | void *dst_arg; | |
1179 | register size_t len; | |
1180 | { | |
1181 | register const struct mbuf *m; | |
1182 | register u_int count; | |
1183 | u_char *dst; | |
1184 | ||
1185 | m = src_arg; | |
1186 | dst = dst_arg; | |
1187 | while (len > 0) { | |
1188 | if (m == 0) | |
1189 | panic("bpf_mcopy"); | |
1190 | count = min(m->m_len, len); | |
1191 | bcopy(mtod(m, void *), dst, count); | |
1192 | m = m->m_next; | |
1193 | dst += count; | |
1194 | len -= count; | |
1195 | } | |
1196 | } | |
1197 | ||
1198 | /* | |
1199 | * Incoming linkage from device drivers, when packet is in an mbuf chain. | |
1200 | */ | |
1201 | void | |
1202 | bpf_mtap(ifp, m) | |
1203 | struct ifnet *ifp; | |
1204 | struct mbuf *m; | |
1205 | { | |
1206 | struct bpf_if *bp = ifp->if_bpf; | |
1207 | struct bpf_d *d; | |
1208 | u_int pktlen, slen; | |
1209 | struct mbuf *m0; | |
1210 | ||
1211 | pktlen = 0; | |
1212 | for (m0 = m; m0 != 0; m0 = m0->m_next) | |
1213 | pktlen += m0->m_len; | |
1214 | ||
1215 | for (d = bp->bif_dlist; d != 0; d = d->bd_next) { | |
1216 | ++d->bd_rcount; | |
1217 | slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); | |
1218 | if (slen != 0) | |
1219 | catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | /* | |
1224 | * Move the packet data from interface memory (pkt) into the | |
1225 | * store buffer. Return 1 if it's time to wakeup a listener (buffer full), | |
1226 | * otherwise 0. "copy" is the routine called to do the actual data | |
1227 | * transfer. bcopy is passed in to copy contiguous chunks, while | |
1228 | * bpf_mcopy is passed in to copy mbuf chains. In the latter case, | |
1229 | * pkt is really an mbuf. | |
1230 | */ | |
1231 | static void | |
1232 | catchpacket(d, pkt, pktlen, snaplen, cpfn) | |
1233 | register struct bpf_d *d; | |
1234 | register u_char *pkt; | |
1235 | register u_int pktlen, snaplen; | |
1236 | register void (*cpfn) __P((const void *, void *, size_t)); | |
1237 | { | |
1238 | register struct bpf_hdr *hp; | |
1239 | register int totlen, curlen; | |
1240 | register int hdrlen = d->bd_bif->bif_hdrlen; | |
1241 | /* | |
1242 | * Figure out how many bytes to move. If the packet is | |
1243 | * greater or equal to the snapshot length, transfer that | |
1244 | * much. Otherwise, transfer the whole packet (unless | |
1245 | * we hit the buffer size limit). | |
1246 | */ | |
1247 | totlen = hdrlen + min(snaplen, pktlen); | |
1248 | if (totlen > d->bd_bufsize) | |
1249 | totlen = d->bd_bufsize; | |
1250 | ||
1251 | /* | |
1252 | * Round up the end of the previous packet to the next longword. | |
1253 | */ | |
1254 | curlen = BPF_WORDALIGN(d->bd_slen); | |
1255 | if (curlen + totlen > d->bd_bufsize) { | |
1256 | /* | |
1257 | * This packet will overflow the storage buffer. | |
1258 | * Rotate the buffers if we can, then wakeup any | |
1259 | * pending reads. | |
1260 | */ | |
1261 | if (d->bd_fbuf == 0) { | |
1262 | /* | |
1263 | * We haven't completed the previous read yet, | |
1264 | * so drop the packet. | |
1265 | */ | |
1266 | ++d->bd_dcount; | |
1267 | return; | |
1268 | } | |
1269 | ROTATE_BUFFERS(d); | |
1270 | bpf_wakeup(d); | |
1271 | curlen = 0; | |
1272 | } | |
1273 | else if (d->bd_immediate) | |
1274 | /* | |
1275 | * Immediate mode is set. A packet arrived so any | |
1276 | * reads should be woken up. | |
1277 | */ | |
1278 | bpf_wakeup(d); | |
1279 | ||
1280 | /* | |
1281 | * Append the bpf header. | |
1282 | */ | |
1283 | hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); | |
1284 | #if BSD >= 199103 | |
1285 | microtime(&hp->bh_tstamp); | |
1286 | #elif defined(sun) | |
1287 | uniqtime(&hp->bh_tstamp); | |
1288 | #else | |
1289 | hp->bh_tstamp = time; | |
1290 | #endif | |
1291 | hp->bh_datalen = pktlen; | |
1292 | hp->bh_hdrlen = hdrlen; | |
1293 | /* | |
1294 | * Copy the packet data into the store buffer and update its length. | |
1295 | */ | |
1296 | (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); | |
1297 | d->bd_slen = curlen + totlen; | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * Initialize all nonzero fields of a descriptor. | |
1302 | */ | |
1303 | static int | |
1304 | bpf_allocbufs(d) | |
1305 | register struct bpf_d *d; | |
1306 | { | |
1307 | d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); | |
1308 | if (d->bd_fbuf == 0) | |
1309 | return (ENOBUFS); | |
1310 | ||
1311 | d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); | |
1312 | if (d->bd_sbuf == 0) { | |
1313 | FREE(d->bd_fbuf, M_DEVBUF); | |
1314 | return (ENOBUFS); | |
1315 | } | |
1316 | d->bd_slen = 0; | |
1317 | d->bd_hlen = 0; | |
1318 | return (0); | |
1319 | } | |
1320 | ||
1321 | /* | |
1322 | * Free buffers currently in use by a descriptor. | |
1323 | * Called on close. | |
1324 | */ | |
1325 | static void | |
1326 | bpf_freed(d) | |
1327 | register struct bpf_d *d; | |
1328 | { | |
1329 | /* | |
1330 | * We don't need to lock out interrupts since this descriptor has | |
1331 | * been detached from its interface and it yet hasn't been marked | |
1332 | * free. | |
1333 | */ | |
1334 | if (d->bd_sbuf != 0) { | |
1335 | FREE(d->bd_sbuf, M_DEVBUF); | |
1336 | if (d->bd_hbuf != 0) | |
1337 | FREE(d->bd_hbuf, M_DEVBUF); | |
1338 | if (d->bd_fbuf != 0) | |
1339 | FREE(d->bd_fbuf, M_DEVBUF); | |
1340 | } | |
1341 | if (d->bd_filter) | |
1342 | FREE((caddr_t)d->bd_filter, M_DEVBUF); | |
1343 | ||
1344 | D_MARKFREE(d); | |
1345 | } | |
1346 | ||
1347 | /* | |
1348 | * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) | |
1349 | * in the driver's softc; dlt is the link layer type; hdrlen is the fixed | |
1350 | * size of the link header (variable length headers not yet supported). | |
1351 | */ | |
1352 | void | |
1353 | bpfattach(ifp, dlt, hdrlen) | |
1354 | struct ifnet *ifp; | |
1355 | u_int dlt, hdrlen; | |
1356 | { | |
1357 | struct bpf_if *bp; | |
1358 | int i; | |
1359 | bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_DONTWAIT); | |
1360 | if (bp == 0) | |
1361 | panic("bpfattach"); | |
1362 | ||
1363 | bp->bif_dlist = 0; | |
1364 | bp->bif_ifp = ifp; | |
1365 | bp->bif_dlt = dlt; | |
1366 | ||
1367 | bp->bif_next = bpf_iflist; | |
1368 | bpf_iflist = bp; | |
1369 | ||
1370 | bp->bif_ifp->if_bpf = 0; | |
1371 | ||
1372 | /* | |
1373 | * Compute the length of the bpf header. This is not necessarily | |
1374 | * equal to SIZEOF_BPF_HDR because we want to insert spacing such | |
1375 | * that the network layer header begins on a longword boundary (for | |
1376 | * performance reasons and to alleviate alignment restrictions). | |
1377 | */ | |
1378 | bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; | |
1379 | ||
1380 | /* | |
1381 | * Mark all the descriptors free if this hasn't been done. | |
1382 | */ | |
1383 | if (!bpf_dtab_init) { | |
1384 | for (i = 0; i < nbpfilter; ++i) | |
1385 | D_MARKFREE(&bpf_dtab[i]); | |
1386 | bpf_dtab_init = 1; | |
1387 | } | |
1388 | #if 0 | |
1389 | if (bootverbose) | |
1390 | printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); | |
1391 | #endif | |
1392 | } | |
1393 | ||
1394 | static void *bpf_devfs_token[NBPFILTER]; | |
1395 | ||
1396 | static int bpf_devsw_installed; | |
1397 | ||
1398 | void bpf_init __P((void *unused)); | |
1399 | void | |
1400 | bpf_init(unused) | |
1401 | void *unused; | |
1402 | { | |
1403 | int i; | |
1404 | int maj; | |
1405 | ||
1406 | if (!bpf_devsw_installed ) { | |
1407 | bpf_devsw_installed = 1; | |
1408 | maj = cdevsw_add(BPF_MAJOR, &bpf_cdevsw); | |
1409 | if (maj == -1) { | |
1410 | printf("bpf_init: failed to allocate a major number!\n"); | |
1411 | nbpfilter = 0; | |
1412 | return; | |
1413 | } | |
1414 | for (i = 0 ; i < nbpfilter; i++) { | |
1415 | bpf_devfs_token[i] = devfs_make_node(makedev(maj, i), | |
1416 | DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, | |
1417 | "bpf%x", i); | |
1418 | } | |
1419 | } | |
1420 | } | |
1421 | ||
1422 | /* | |
1423 | SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) | |
1424 | */ | |
1425 | ||
1426 | #endif |