]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 1990, 1991, 1993 | |
24 | * The Regents of the University of California. All rights reserved. | |
25 | * | |
26 | * This code is derived from the Stanford/CMU enet packet filter, | |
27 | * (net/enet.c) distributed as part of 4.3BSD, and code contributed | |
28 | * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence | |
29 | * Berkeley Laboratory. | |
30 | * | |
31 | * Redistribution and use in source and binary forms, with or without | |
32 | * modification, are permitted provided that the following conditions | |
33 | * are met: | |
34 | * 1. Redistributions of source code must retain the above copyright | |
35 | * notice, this list of conditions and the following disclaimer. | |
36 | * 2. Redistributions in binary form must reproduce the above copyright | |
37 | * notice, this list of conditions and the following disclaimer in the | |
38 | * documentation and/or other materials provided with the distribution. | |
39 | * 3. All advertising materials mentioning features or use of this software | |
40 | * must display the following acknowledgement: | |
41 | * This product includes software developed by the University of | |
42 | * California, Berkeley and its contributors. | |
43 | * 4. Neither the name of the University nor the names of its contributors | |
44 | * may be used to endorse or promote products derived from this software | |
45 | * without specific prior written permission. | |
46 | * | |
47 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
48 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
49 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
50 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
51 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
52 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
53 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
54 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
55 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
56 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
57 | * SUCH DAMAGE. | |
58 | * | |
59 | * @(#)bpf.c 8.2 (Berkeley) 3/28/94 | |
60 | * | |
61 | */ | |
62 | ||
63 | #include "bpfilter.h" | |
64 | ||
65 | #if NBPFILTER > 0 | |
66 | ||
67 | #ifndef __GNUC__ | |
68 | #define inline | |
69 | #else | |
70 | #define inline __inline | |
71 | #endif | |
72 | ||
73 | #include <sys/param.h> | |
74 | #include <sys/systm.h> | |
75 | #include <sys/conf.h> | |
76 | #include <sys/malloc.h> | |
77 | #include <sys/mbuf.h> | |
78 | #include <sys/time.h> | |
79 | #include <sys/proc.h> | |
80 | ||
81 | ||
82 | #include <sys/poll.h> | |
83 | ||
84 | ||
85 | #include <sys/signalvar.h> | |
86 | #include <sys/filio.h> | |
87 | #include <sys/sockio.h> | |
88 | #include <sys/ttycom.h> | |
89 | #include <sys/filedesc.h> | |
90 | ||
91 | #include <sys/socket.h> | |
92 | #include <sys/vnode.h> | |
93 | ||
94 | #include <net/if.h> | |
95 | #include <net/bpf.h> | |
96 | #include <net/bpfdesc.h> | |
97 | ||
98 | #include <netinet/in.h> | |
99 | #include <netinet/if_ether.h> | |
100 | #include <sys/kernel.h> | |
101 | #include <sys/sysctl.h> | |
102 | ||
103 | ||
104 | #include <miscfs/devfs/devfs.h> | |
105 | #include <net/dlil.h> | |
106 | ||
107 | /* | |
108 | * Older BSDs don't have kernel malloc. | |
109 | */ | |
110 | #if BSD < 199103 | |
111 | extern bcopy(); | |
112 | static caddr_t bpf_alloc(); | |
113 | ||
114 | #define BPF_BUFSIZE (MCLBYTES-8) | |
115 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) | |
116 | #else | |
117 | #define BPF_BUFSIZE 4096 | |
118 | #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) | |
119 | #endif | |
120 | ||
121 | #define PRINET 26 /* interruptible */ | |
122 | ||
123 | /* | |
124 | * The default read buffer size is patchable. | |
125 | */ | |
126 | static int bpf_bufsize = BPF_BUFSIZE; | |
127 | ||
128 | ||
129 | ||
130 | SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, | |
131 | &bpf_bufsize, 0, ""); | |
132 | ||
133 | ||
134 | /* | |
135 | * bpf_iflist is the list of interfaces; each corresponds to an ifnet | |
136 | * bpf_dtab holds the descriptors, indexed by minor device # | |
137 | */ | |
138 | static struct bpf_if *bpf_iflist; | |
139 | static struct bpf_d bpf_dtab[NBPFILTER]; | |
140 | static int bpf_dtab_init; | |
141 | static int nbpfilter = NBPFILTER; | |
142 | ||
143 | static int bpf_allocbufs __P((struct bpf_d *)); | |
144 | static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); | |
145 | static void bpf_detachd __P((struct bpf_d *d)); | |
146 | static void bpf_freed __P((struct bpf_d *)); | |
147 | static void bpf_ifname __P((struct ifnet *, struct ifreq *)); | |
148 | static void bpf_mcopy __P((const void *, void *, size_t)); | |
149 | static int bpf_movein __P((struct uio *, int, | |
150 | struct mbuf **, struct sockaddr *, int *)); | |
151 | static int bpf_setif __P((struct bpf_d *, struct ifreq *)); | |
152 | static inline void | |
153 | bpf_wakeup __P((struct bpf_d *)); | |
154 | static void catchpacket __P((struct bpf_d *, u_char *, u_int, | |
155 | u_int, void (*)(const void *, void *, size_t))); | |
156 | static void reset_d __P((struct bpf_d *)); | |
157 | static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); | |
158 | ||
159 | d_open_t bpfopen; | |
160 | d_close_t bpfclose; | |
161 | d_read_t bpfread; | |
162 | d_write_t bpfwrite; | |
163 | d_ioctl_t bpfioctl; | |
164 | ||
165 | ||
166 | #define BPF_MAJOR 7 | |
167 | ||
168 | void bpf_mtap(struct ifnet *, struct mbuf *); | |
169 | ||
170 | int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(), | |
171 | bpfpoll(); | |
172 | ||
173 | ||
174 | static struct cdevsw bpf_cdevsw = { | |
175 | bpfopen, bpfclose, bpfread, bpfwrite, | |
176 | bpfioctl, nulldev, nulldev, NULL, bpfpoll, | |
177 | eno_mmap, eno_strat, eno_getc, eno_putc, 0 | |
178 | }; | |
179 | ||
180 | static int | |
181 | bpf_movein(uio, linktype, mp, sockp, datlen) | |
182 | register struct uio *uio; | |
183 | int linktype, *datlen; | |
184 | register struct mbuf **mp; | |
185 | register struct sockaddr *sockp; | |
186 | { | |
187 | struct mbuf *m; | |
188 | int error; | |
189 | int len; | |
190 | int hlen; | |
191 | ||
192 | /* | |
193 | * Build a sockaddr based on the data link layer type. | |
194 | * We do this at this level because the ethernet header | |
195 | * is copied directly into the data field of the sockaddr. | |
196 | * In the case of SLIP, there is no header and the packet | |
197 | * is forwarded as is. | |
198 | * Also, we are careful to leave room at the front of the mbuf | |
199 | * for the link level header. | |
200 | */ | |
201 | switch (linktype) { | |
202 | ||
203 | case DLT_SLIP: | |
204 | sockp->sa_family = AF_INET; | |
205 | hlen = 0; | |
206 | break; | |
207 | ||
208 | case DLT_EN10MB: | |
209 | sockp->sa_family = AF_UNSPEC; | |
210 | /* XXX Would MAXLINKHDR be better? */ | |
211 | hlen = sizeof(struct ether_header); | |
212 | break; | |
213 | ||
214 | case DLT_FDDI: | |
215 | #if defined(__FreeBSD__) || defined(__bsdi__) | |
216 | sockp->sa_family = AF_IMPLINK; | |
217 | hlen = 0; | |
218 | #else | |
219 | sockp->sa_family = AF_UNSPEC; | |
220 | /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ | |
221 | hlen = 24; | |
222 | #endif | |
223 | break; | |
224 | ||
225 | case DLT_RAW: | |
226 | case DLT_NULL: | |
227 | sockp->sa_family = AF_UNSPEC; | |
228 | hlen = 0; | |
229 | break; | |
230 | ||
231 | #ifdef __FreeBSD__ | |
232 | case DLT_ATM_RFC1483: | |
233 | /* | |
234 | * en atm driver requires 4-byte atm pseudo header. | |
235 | * though it isn't standard, vpi:vci needs to be | |
236 | * specified anyway. | |
237 | */ | |
238 | sockp->sa_family = AF_UNSPEC; | |
239 | hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ | |
240 | break; | |
241 | #endif | |
242 | ||
243 | default: | |
244 | return (EIO); | |
245 | } | |
246 | ||
247 | len = uio->uio_resid; | |
248 | *datlen = len - hlen; | |
249 | if ((unsigned)len > MCLBYTES) | |
250 | return (EIO); | |
251 | ||
252 | MGETHDR(m, M_WAIT, MT_DATA); | |
253 | if (m == 0) | |
254 | return (ENOBUFS); | |
255 | if (len > MHLEN) { | |
256 | #if BSD >= 199103 | |
257 | MCLGET(m, M_WAIT); | |
258 | if ((m->m_flags & M_EXT) == 0) { | |
259 | #else | |
260 | MCLGET(m); | |
261 | if (m->m_len != MCLBYTES) { | |
262 | #endif | |
263 | error = ENOBUFS; | |
264 | goto bad; | |
265 | } | |
266 | } | |
267 | m->m_pkthdr.len = m->m_len = len; | |
268 | m->m_pkthdr.rcvif = NULL; | |
269 | *mp = m; | |
270 | /* | |
271 | * Make room for link header. | |
272 | */ | |
273 | if (hlen != 0) { | |
274 | m->m_pkthdr.len -= hlen; | |
275 | m->m_len -= hlen; | |
276 | #if BSD >= 199103 | |
277 | m->m_data += hlen; /* XXX */ | |
278 | #else | |
279 | m->m_off += hlen; | |
280 | #endif | |
281 | error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); | |
282 | if (error) | |
283 | goto bad; | |
284 | } | |
285 | error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); | |
286 | if (!error) | |
287 | return (0); | |
288 | bad: | |
289 | m_freem(m); | |
290 | return (error); | |
291 | } | |
292 | ||
293 | int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) | |
294 | { | |
295 | boolean_t funnel_state; | |
296 | ||
297 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
298 | ||
299 | /* | |
300 | * Do nothing if the BPF tap has been turned off. | |
301 | * This is to protect from a potential race where this | |
302 | * call blocks on the funnel lock. And in the meantime | |
303 | * BPF is turned off, which will clear if_bpf. | |
304 | */ | |
305 | if (ifp->if_bpf) | |
306 | bpf_mtap(ifp, m); | |
307 | ||
308 | thread_funnel_set(network_flock, funnel_state); | |
309 | return 0; | |
310 | } | |
311 | ||
312 | ||
313 | /* | |
314 | * Attach file to the bpf interface, i.e. make d listen on bp. | |
315 | * Must be called at splimp. | |
316 | */ | |
317 | static void | |
318 | bpf_attachd(d, bp) | |
319 | struct bpf_d *d; | |
320 | struct bpf_if *bp; | |
321 | { | |
322 | struct ifnet *ifp; | |
323 | ||
324 | /* | |
325 | * Point d at bp, and add d to the interface's list of listeners. | |
326 | * Finally, point the driver's bpf cookie at the interface so | |
327 | * it will divert packets to bpf. | |
328 | */ | |
329 | d->bd_bif = bp; | |
330 | d->bd_next = bp->bif_dlist; | |
331 | bp->bif_dlist = d; | |
332 | ||
333 | bp->bif_ifp->if_bpf = bp; | |
334 | ifp = bp->bif_ifp; | |
335 | ||
336 | if (ifp->if_set_bpf_tap) | |
337 | (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); | |
338 | } | |
339 | ||
340 | /* | |
341 | * Detach a file from its interface. | |
342 | */ | |
343 | static void | |
344 | bpf_detachd(d) | |
345 | struct bpf_d *d; | |
346 | { | |
347 | struct bpf_d **p; | |
348 | struct bpf_if *bp; | |
349 | struct ifnet *ifp; | |
350 | ||
351 | ifp = d->bd_bif->bif_ifp; | |
352 | if (ifp->if_set_bpf_tap) | |
353 | (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); | |
354 | ||
355 | bp = d->bd_bif; | |
356 | /* | |
357 | * Check if this descriptor had requested promiscuous mode. | |
358 | * If so, turn it off. | |
359 | */ | |
360 | if (d->bd_promisc) { | |
361 | d->bd_promisc = 0; | |
362 | if (ifpromisc(bp->bif_ifp, 0)) | |
363 | /* | |
364 | * Something is really wrong if we were able to put | |
365 | * the driver into promiscuous mode, but can't | |
366 | * take it out. | |
367 | */ | |
368 | panic("bpf: ifpromisc failed"); | |
369 | } | |
370 | /* Remove d from the interface's descriptor list. */ | |
371 | p = &bp->bif_dlist; | |
372 | while (*p != d) { | |
373 | p = &(*p)->bd_next; | |
374 | if (*p == 0) | |
375 | panic("bpf_detachd: descriptor not in list"); | |
376 | } | |
377 | *p = (*p)->bd_next; | |
378 | if (bp->bif_dlist == 0) | |
379 | /* | |
380 | * Let the driver know that there are no more listeners. | |
381 | */ | |
382 | d->bd_bif->bif_ifp->if_bpf = 0; | |
383 | d->bd_bif = 0; | |
384 | } | |
385 | ||
386 | ||
387 | /* | |
388 | * Mark a descriptor free by making it point to itself. | |
389 | * This is probably cheaper than marking with a constant since | |
390 | * the address should be in a register anyway. | |
391 | */ | |
392 | #define D_ISFREE(d) ((d) == (d)->bd_next) | |
393 | #define D_MARKFREE(d) ((d)->bd_next = (d)) | |
394 | #define D_MARKUSED(d) ((d)->bd_next = 0) | |
395 | ||
396 | /* | |
397 | * Open ethernet device. Returns ENXIO for illegal minor device number, | |
398 | * EBUSY if file is open by another process. | |
399 | */ | |
400 | /* ARGSUSED */ | |
401 | int | |
402 | bpfopen(dev, flags, fmt, p) | |
403 | dev_t dev; | |
404 | int flags; | |
405 | int fmt; | |
406 | struct proc *p; | |
407 | { | |
408 | register struct bpf_d *d; | |
409 | ||
410 | if (minor(dev) >= nbpfilter) | |
411 | return (ENXIO); | |
412 | ||
413 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
414 | /* | |
415 | * Each minor can be opened by only one process. If the requested | |
416 | * minor is in use, return EBUSY. | |
417 | */ | |
418 | d = &bpf_dtab[minor(dev)]; | |
419 | if (!D_ISFREE(d)) { | |
420 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
421 | return (EBUSY); | |
422 | } | |
423 | ||
424 | /* Mark "free" and do most initialization. */ | |
425 | bzero((char *)d, sizeof(*d)); | |
426 | d->bd_bufsize = bpf_bufsize; | |
427 | d->bd_sig = SIGIO; | |
428 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
429 | return (0); | |
430 | } | |
431 | ||
432 | /* | |
433 | * Close the descriptor by detaching it from its interface, | |
434 | * deallocating its buffers, and marking it free. | |
435 | */ | |
436 | /* ARGSUSED */ | |
437 | int | |
438 | bpfclose(dev, flags, fmt, p) | |
439 | dev_t dev; | |
440 | int flags; | |
441 | int fmt; | |
442 | struct proc *p; | |
443 | { | |
444 | register struct bpf_d *d; | |
445 | register int s; | |
446 | ||
447 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
448 | ||
449 | s = splimp(); | |
450 | d = &bpf_dtab[minor(dev)]; | |
451 | if (d->bd_bif) | |
452 | bpf_detachd(d); | |
453 | splx(s); | |
0b4e3aa0 | 454 | selthreadclear(&d->bd_sel); |
1c79356b A |
455 | bpf_freed(d); |
456 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
457 | return (0); | |
458 | } | |
459 | ||
460 | /* | |
461 | * Support for SunOS, which does not have tsleep. | |
462 | */ | |
463 | #if BSD < 199103 | |
464 | static | |
465 | bpf_timeout(arg) | |
466 | caddr_t arg; | |
467 | { | |
468 | boolean_t funnel_state; | |
469 | struct bpf_d *d = (struct bpf_d *)arg; | |
470 | ||
471 | ||
472 | funnel_state = thread_funnel_set(network_flock, TRUE); | |
473 | d->bd_timedout = 1; | |
474 | wakeup(arg); | |
475 | (void) thread_funnel_set(network_flock, FALSE); | |
476 | } | |
477 | ||
478 | #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) | |
479 | ||
480 | int | |
481 | bpf_sleep(d) | |
482 | register struct bpf_d *d; | |
483 | { | |
484 | register int rto = d->bd_rtout; | |
485 | register int st; | |
486 | ||
487 | if (rto != 0) { | |
488 | d->bd_timedout = 0; | |
489 | timeout(bpf_timeout, (caddr_t)d, rto); | |
490 | } | |
491 | st = sleep((caddr_t)d, PRINET|PCATCH); | |
492 | if (rto != 0) { | |
493 | if (d->bd_timedout == 0) | |
494 | untimeout(bpf_timeout, (caddr_t)d); | |
495 | else if (st == 0) | |
496 | return EWOULDBLOCK; | |
497 | } | |
498 | return (st != 0) ? EINTR : 0; | |
499 | } | |
500 | #else | |
501 | #define BPF_SLEEP tsleep | |
502 | #endif | |
503 | ||
504 | /* | |
505 | * Rotate the packet buffers in descriptor d. Move the store buffer | |
506 | * into the hold slot, and the free buffer into the store slot. | |
507 | * Zero the length of the new store buffer. | |
508 | */ | |
509 | #define ROTATE_BUFFERS(d) \ | |
510 | (d)->bd_hbuf = (d)->bd_sbuf; \ | |
511 | (d)->bd_hlen = (d)->bd_slen; \ | |
512 | (d)->bd_sbuf = (d)->bd_fbuf; \ | |
513 | (d)->bd_slen = 0; \ | |
514 | (d)->bd_fbuf = 0; | |
515 | /* | |
516 | * bpfread - read next chunk of packets from buffers | |
517 | */ | |
518 | int | |
519 | bpfread(dev, uio, ioflag) | |
520 | dev_t dev; | |
521 | struct uio *uio; | |
522 | int ioflag; | |
523 | { | |
524 | register struct bpf_d *d; | |
525 | int error; | |
526 | int s; | |
527 | ||
528 | ||
529 | ||
530 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
531 | d = &bpf_dtab[minor(dev)]; | |
532 | ||
533 | /* | |
534 | * Restrict application to use a buffer the same size as | |
535 | * as kernel buffers. | |
536 | */ | |
537 | if (uio->uio_resid != d->bd_bufsize) { | |
538 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
539 | return (EINVAL); | |
540 | } | |
541 | ||
542 | s = splimp(); | |
543 | /* | |
544 | * If the hold buffer is empty, then do a timed sleep, which | |
545 | * ends when the timeout expires or when enough packets | |
546 | * have arrived to fill the store buffer. | |
547 | */ | |
548 | while (d->bd_hbuf == 0) { | |
549 | if (d->bd_immediate && d->bd_slen != 0) { | |
550 | /* | |
551 | * A packet(s) either arrived since the previous | |
552 | * read or arrived while we were asleep. | |
553 | * Rotate the buffers and return what's here. | |
554 | */ | |
555 | ROTATE_BUFFERS(d); | |
556 | break; | |
557 | } | |
558 | if (ioflag & IO_NDELAY) | |
559 | error = EWOULDBLOCK; | |
560 | else | |
561 | error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", | |
562 | d->bd_rtout); | |
563 | if (error == EINTR || error == ERESTART) { | |
564 | splx(s); | |
565 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
566 | return (error); | |
567 | } | |
568 | if (error == EWOULDBLOCK) { | |
569 | /* | |
570 | * On a timeout, return what's in the buffer, | |
571 | * which may be nothing. If there is something | |
572 | * in the store buffer, we can rotate the buffers. | |
573 | */ | |
574 | if (d->bd_hbuf) | |
575 | /* | |
576 | * We filled up the buffer in between | |
577 | * getting the timeout and arriving | |
578 | * here, so we don't need to rotate. | |
579 | */ | |
580 | break; | |
581 | ||
582 | if (d->bd_slen == 0) { | |
583 | splx(s); | |
584 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
585 | return (0); | |
586 | } | |
587 | ROTATE_BUFFERS(d); | |
588 | break; | |
589 | } | |
590 | } | |
591 | /* | |
592 | * At this point, we know we have something in the hold slot. | |
593 | */ | |
594 | splx(s); | |
595 | ||
596 | /* | |
597 | * Move data from hold buffer into user space. | |
598 | * We know the entire buffer is transferred since | |
599 | * we checked above that the read buffer is bpf_bufsize bytes. | |
600 | */ | |
601 | error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); | |
602 | ||
603 | s = splimp(); | |
604 | d->bd_fbuf = d->bd_hbuf; | |
605 | d->bd_hbuf = 0; | |
606 | d->bd_hlen = 0; | |
607 | splx(s); | |
608 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
609 | return (error); | |
610 | } | |
611 | ||
612 | ||
613 | /* | |
614 | * If there are processes sleeping on this descriptor, wake them up. | |
615 | */ | |
616 | static inline void | |
617 | bpf_wakeup(d) | |
618 | register struct bpf_d *d; | |
619 | { | |
620 | wakeup((caddr_t)d); | |
621 | if (d->bd_async && d->bd_sig && d->bd_sigio) | |
622 | pgsigio(d->bd_sigio, d->bd_sig, 0); | |
623 | ||
624 | #if BSD >= 199103 | |
1c79356b | 625 | selwakeup(&d->bd_sel); |
1c79356b A |
626 | #else |
627 | if (d->bd_selproc) { | |
1c79356b | 628 | selwakeup(d->bd_selproc, (int)d->bd_selcoll); |
1c79356b A |
629 | d->bd_selcoll = 0; |
630 | d->bd_selproc = 0; | |
631 | } | |
632 | #endif | |
633 | } | |
634 | ||
635 | int | |
636 | bpfwrite(dev, uio, ioflag) | |
637 | dev_t dev; | |
638 | struct uio *uio; | |
639 | int ioflag; | |
640 | { | |
641 | register struct bpf_d *d; | |
642 | ||
643 | struct ifnet *ifp; | |
644 | struct mbuf *m; | |
645 | int error, s; | |
646 | static struct sockaddr dst; | |
647 | int datlen; | |
648 | ||
649 | ||
650 | ||
651 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
652 | d = &bpf_dtab[minor(dev)]; | |
653 | if (d->bd_bif == 0) { | |
654 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
655 | return (ENXIO); | |
656 | } | |
657 | ||
658 | ifp = d->bd_bif->bif_ifp; | |
659 | ||
660 | if (uio->uio_resid == 0) { | |
661 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
662 | return (0); | |
663 | } | |
664 | ||
665 | error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); | |
666 | if (error) { | |
667 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
668 | return (error); | |
669 | } | |
670 | ||
671 | if (datlen > ifp->if_mtu) { | |
672 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
673 | return (EMSGSIZE); | |
674 | } | |
675 | ||
676 | s = splnet(); | |
677 | ||
678 | error = dlil_output((u_long) ifp, m, | |
679 | (caddr_t) 0, &dst, 0); | |
680 | ||
681 | /* | |
682 | error = dlil_inject_if_output(m, DLIL_NULL_FILTER); | |
683 | */ | |
684 | ||
685 | splx(s); | |
686 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
687 | ||
688 | /* | |
689 | * The driver frees the mbuf. | |
690 | */ | |
691 | return (error); | |
692 | } | |
693 | ||
694 | /* | |
695 | * Reset a descriptor by flushing its packet buffer and clearing the | |
696 | * receive and drop counts. Should be called at splimp. | |
697 | */ | |
698 | static void | |
699 | reset_d(d) | |
700 | struct bpf_d *d; | |
701 | { | |
702 | if (d->bd_hbuf) { | |
703 | /* Free the hold buffer. */ | |
704 | d->bd_fbuf = d->bd_hbuf; | |
705 | d->bd_hbuf = 0; | |
706 | } | |
707 | d->bd_slen = 0; | |
708 | d->bd_hlen = 0; | |
709 | d->bd_rcount = 0; | |
710 | d->bd_dcount = 0; | |
711 | } | |
712 | ||
713 | /* | |
714 | * FIONREAD Check for read packet available. | |
715 | * SIOCGIFADDR Get interface address - convenient hook to driver. | |
716 | * BIOCGBLEN Get buffer len [for read()]. | |
717 | * BIOCSETF Set ethernet read filter. | |
718 | * BIOCFLUSH Flush read packet buffer. | |
719 | * BIOCPROMISC Put interface into promiscuous mode. | |
720 | * BIOCGDLT Get link layer type. | |
721 | * BIOCGETIF Get interface name. | |
722 | * BIOCSETIF Set interface. | |
723 | * BIOCSRTIMEOUT Set read timeout. | |
724 | * BIOCGRTIMEOUT Get read timeout. | |
725 | * BIOCGSTATS Get packet stats. | |
726 | * BIOCIMMEDIATE Set immediate mode. | |
727 | * BIOCVERSION Get filter language version. | |
728 | */ | |
729 | /* ARGSUSED */ | |
730 | int | |
731 | bpfioctl(dev, cmd, addr, flags, p) | |
732 | dev_t dev; | |
733 | u_long cmd; | |
734 | caddr_t addr; | |
735 | int flags; | |
736 | struct proc *p; | |
737 | { | |
738 | register struct bpf_d *d; | |
739 | int s, error = 0; | |
740 | ||
741 | ||
742 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
743 | d = &bpf_dtab[minor(dev)]; | |
744 | ||
745 | switch (cmd) { | |
746 | ||
747 | default: | |
748 | error = EINVAL; | |
749 | break; | |
750 | ||
751 | /* | |
752 | * Check for read packet available. | |
753 | */ | |
754 | case FIONREAD: | |
755 | { | |
756 | int n; | |
757 | ||
758 | s = splimp(); | |
759 | n = d->bd_slen; | |
760 | if (d->bd_hbuf) | |
761 | n += d->bd_hlen; | |
762 | splx(s); | |
763 | ||
764 | *(int *)addr = n; | |
765 | break; | |
766 | } | |
767 | ||
768 | case SIOCGIFADDR: | |
769 | { | |
770 | struct ifnet *ifp; | |
771 | ||
772 | if (d->bd_bif == 0) | |
773 | error = EINVAL; | |
774 | else { | |
775 | ifp = d->bd_bif->bif_ifp; | |
776 | error = (*ifp->if_ioctl)(ifp, cmd, addr); | |
777 | } | |
778 | break; | |
779 | } | |
780 | ||
781 | /* | |
782 | * Get buffer len [for read()]. | |
783 | */ | |
784 | case BIOCGBLEN: | |
785 | *(u_int *)addr = d->bd_bufsize; | |
786 | break; | |
787 | ||
788 | /* | |
789 | * Set buffer length. | |
790 | */ | |
791 | case BIOCSBLEN: | |
792 | #if BSD < 199103 | |
793 | error = EINVAL; | |
794 | #else | |
795 | if (d->bd_bif != 0) | |
796 | error = EINVAL; | |
797 | else { | |
798 | register u_int size = *(u_int *)addr; | |
799 | ||
800 | if (size > BPF_MAXBUFSIZE) | |
801 | *(u_int *)addr = size = BPF_MAXBUFSIZE; | |
802 | else if (size < BPF_MINBUFSIZE) | |
803 | *(u_int *)addr = size = BPF_MINBUFSIZE; | |
804 | d->bd_bufsize = size; | |
805 | } | |
806 | #endif | |
807 | break; | |
808 | ||
809 | /* | |
810 | * Set link layer read filter. | |
811 | */ | |
812 | case BIOCSETF: | |
813 | error = bpf_setf(d, (struct bpf_program *)addr); | |
814 | break; | |
815 | ||
816 | /* | |
817 | * Flush read packet buffer. | |
818 | */ | |
819 | case BIOCFLUSH: | |
820 | s = splimp(); | |
821 | reset_d(d); | |
822 | splx(s); | |
823 | break; | |
824 | ||
825 | /* | |
826 | * Put interface into promiscuous mode. | |
827 | */ | |
828 | case BIOCPROMISC: | |
829 | if (d->bd_bif == 0) { | |
830 | /* | |
831 | * No interface attached yet. | |
832 | */ | |
833 | error = EINVAL; | |
834 | break; | |
835 | } | |
836 | s = splimp(); | |
837 | if (d->bd_promisc == 0) { | |
838 | error = ifpromisc(d->bd_bif->bif_ifp, 1); | |
839 | if (error == 0) | |
840 | d->bd_promisc = 1; | |
841 | } | |
842 | splx(s); | |
843 | break; | |
844 | ||
845 | /* | |
846 | * Get device parameters. | |
847 | */ | |
848 | case BIOCGDLT: | |
849 | if (d->bd_bif == 0) | |
850 | error = EINVAL; | |
851 | else | |
852 | *(u_int *)addr = d->bd_bif->bif_dlt; | |
853 | break; | |
854 | ||
855 | /* | |
856 | * Set interface name. | |
857 | */ | |
858 | case BIOCGETIF: | |
859 | if (d->bd_bif == 0) | |
860 | error = EINVAL; | |
861 | else | |
862 | bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); | |
863 | break; | |
864 | ||
865 | /* | |
866 | * Set interface. | |
867 | */ | |
868 | case BIOCSETIF: | |
869 | error = bpf_setif(d, (struct ifreq *)addr); | |
870 | break; | |
871 | ||
872 | /* | |
873 | * Set read timeout. | |
874 | */ | |
875 | case BIOCSRTIMEOUT: | |
876 | { | |
877 | struct timeval *tv = (struct timeval *)addr; | |
878 | ||
879 | /* | |
880 | * Subtract 1 tick from tvtohz() since this isn't | |
881 | * a one-shot timer. | |
882 | */ | |
883 | if ((error = itimerfix(tv)) == 0) | |
884 | d->bd_rtout = tvtohz(tv) - 1; | |
885 | break; | |
886 | } | |
887 | ||
888 | /* | |
889 | * Get read timeout. | |
890 | */ | |
891 | case BIOCGRTIMEOUT: | |
892 | { | |
893 | struct timeval *tv = (struct timeval *)addr; | |
894 | ||
895 | tv->tv_sec = d->bd_rtout / hz; | |
896 | tv->tv_usec = (d->bd_rtout % hz) * tick; | |
897 | break; | |
898 | } | |
899 | ||
900 | /* | |
901 | * Get packet stats. | |
902 | */ | |
903 | case BIOCGSTATS: | |
904 | { | |
905 | struct bpf_stat *bs = (struct bpf_stat *)addr; | |
906 | ||
907 | bs->bs_recv = d->bd_rcount; | |
908 | bs->bs_drop = d->bd_dcount; | |
909 | break; | |
910 | } | |
911 | ||
912 | /* | |
913 | * Set immediate mode. | |
914 | */ | |
915 | case BIOCIMMEDIATE: | |
916 | d->bd_immediate = *(u_int *)addr; | |
917 | break; | |
918 | ||
919 | case BIOCVERSION: | |
920 | { | |
921 | struct bpf_version *bv = (struct bpf_version *)addr; | |
922 | ||
923 | bv->bv_major = BPF_MAJOR_VERSION; | |
924 | bv->bv_minor = BPF_MINOR_VERSION; | |
925 | break; | |
926 | } | |
927 | ||
928 | case FIONBIO: /* Non-blocking I/O */ | |
929 | break; | |
930 | ||
931 | case FIOASYNC: /* Send signal on receive packets */ | |
932 | d->bd_async = *(int *)addr; | |
933 | break; | |
934 | #if ISFB31 | |
935 | case FIOSETOWN: | |
936 | error = fsetown(*(int *)addr, &d->bd_sigio); | |
937 | break; | |
938 | ||
939 | case FIOGETOWN: | |
940 | *(int *)addr = fgetown(d->bd_sigio); | |
941 | break; | |
942 | ||
943 | /* This is deprecated, FIOSETOWN should be used instead. */ | |
944 | case TIOCSPGRP: | |
945 | error = fsetown(-(*(int *)addr), &d->bd_sigio); | |
946 | break; | |
947 | ||
948 | /* This is deprecated, FIOGETOWN should be used instead. */ | |
949 | case TIOCGPGRP: | |
950 | *(int *)addr = -fgetown(d->bd_sigio); | |
951 | break; | |
952 | #endif | |
953 | case BIOCSRSIG: /* Set receive signal */ | |
954 | { | |
955 | u_int sig; | |
956 | ||
957 | sig = *(u_int *)addr; | |
958 | ||
959 | if (sig >= NSIG) | |
960 | error = EINVAL; | |
961 | else | |
962 | d->bd_sig = sig; | |
963 | break; | |
964 | } | |
965 | case BIOCGRSIG: | |
966 | *(u_int *)addr = d->bd_sig; | |
967 | break; | |
968 | } | |
969 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
970 | return (error); | |
971 | } | |
972 | ||
973 | /* | |
974 | * Set d's packet filter program to fp. If this file already has a filter, | |
975 | * free it and replace it. Returns EINVAL for bogus requests. | |
976 | */ | |
977 | static int | |
978 | bpf_setf(d, fp) | |
979 | struct bpf_d *d; | |
980 | struct bpf_program *fp; | |
981 | { | |
982 | struct bpf_insn *fcode, *old; | |
983 | u_int flen, size; | |
984 | int s; | |
985 | ||
986 | old = d->bd_filter; | |
987 | if (fp->bf_insns == 0) { | |
988 | if (fp->bf_len != 0) | |
989 | return (EINVAL); | |
990 | s = splimp(); | |
991 | d->bd_filter = 0; | |
992 | reset_d(d); | |
993 | splx(s); | |
994 | if (old != 0) | |
995 | FREE((caddr_t)old, M_DEVBUF); | |
996 | return (0); | |
997 | } | |
998 | flen = fp->bf_len; | |
999 | if (flen > BPF_MAXINSNS) | |
1000 | return (EINVAL); | |
1001 | ||
1002 | size = flen * sizeof(*fp->bf_insns); | |
1003 | fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); | |
0b4e3aa0 A |
1004 | if (fcode == NULL) |
1005 | return (ENOBUFS); | |
1c79356b A |
1006 | if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && |
1007 | bpf_validate(fcode, (int)flen)) { | |
1008 | s = splimp(); | |
1009 | d->bd_filter = fcode; | |
1010 | reset_d(d); | |
1011 | splx(s); | |
1012 | if (old != 0) | |
1013 | FREE((caddr_t)old, M_DEVBUF); | |
1014 | ||
1015 | return (0); | |
1016 | } | |
1017 | FREE((caddr_t)fcode, M_DEVBUF); | |
1018 | return (EINVAL); | |
1019 | } | |
1020 | ||
1021 | /* | |
1022 | * Detach a file from its current interface (if attached at all) and attach | |
1023 | * to the interface indicated by the name stored in ifr. | |
1024 | * Return an errno or 0. | |
1025 | */ | |
1026 | static int | |
1027 | bpf_setif(d, ifr) | |
1028 | struct bpf_d *d; | |
1029 | struct ifreq *ifr; | |
1030 | { | |
1031 | struct bpf_if *bp; | |
1032 | int s, error; | |
1033 | struct ifnet *theywant; | |
1034 | ||
1035 | theywant = ifunit(ifr->ifr_name); | |
1036 | if (theywant == 0) | |
1037 | return ENXIO; | |
1038 | ||
1039 | /* | |
1040 | * Look through attached interfaces for the named one. | |
1041 | */ | |
1042 | for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { | |
1043 | struct ifnet *ifp = bp->bif_ifp; | |
1044 | ||
1045 | if (ifp == 0 || ifp != theywant) | |
1046 | continue; | |
1047 | /* | |
1048 | * We found the requested interface. | |
1049 | * If it's not up, return an error. | |
1050 | * Allocate the packet buffers if we need to. | |
1051 | * If we're already attached to requested interface, | |
1052 | * just flush the buffer. | |
1053 | */ | |
1054 | if ((ifp->if_flags & IFF_UP) == 0) | |
1055 | return (ENETDOWN); | |
1056 | ||
1057 | if (d->bd_sbuf == 0) { | |
1058 | error = bpf_allocbufs(d); | |
1059 | if (error != 0) | |
1060 | return (error); | |
1061 | } | |
1062 | s = splimp(); | |
1063 | if (bp != d->bd_bif) { | |
1064 | if (d->bd_bif) | |
1065 | /* | |
1066 | * Detach if attached to something else. | |
1067 | */ | |
1068 | bpf_detachd(d); | |
1069 | ||
1070 | bpf_attachd(d, bp); | |
1071 | } | |
1072 | reset_d(d); | |
1073 | splx(s); | |
1074 | return (0); | |
1075 | } | |
1076 | /* Not found. */ | |
1077 | return (ENXIO); | |
1078 | } | |
1079 | ||
1080 | /* | |
1081 | * Convert an interface name plus unit number of an ifp to a single | |
1082 | * name which is returned in the ifr. | |
1083 | */ | |
1084 | static void | |
1085 | bpf_ifname(ifp, ifr) | |
1086 | struct ifnet *ifp; | |
1087 | struct ifreq *ifr; | |
1088 | { | |
1089 | char *s = ifp->if_name; | |
1090 | char *d = ifr->ifr_name; | |
1091 | ||
1092 | while (*d++ = *s++) | |
1093 | continue; | |
1094 | d--; /* back to the null */ | |
1095 | /* XXX Assume that unit number is less than 10. */ | |
1096 | *d++ = ifp->if_unit + '0'; | |
1097 | *d = '\0'; | |
1098 | } | |
1099 | ||
1100 | ||
1101 | ||
1102 | /* | |
1103 | * Support for select() and poll() system calls | |
1104 | * | |
1105 | * Return true iff the specific operation will not block indefinitely. | |
1106 | * Otherwise, return false but make a note that a selwakeup() must be done. | |
1107 | */ | |
1108 | int | |
0b4e3aa0 | 1109 | bpfpoll(dev, events, wql, p) |
1c79356b A |
1110 | register dev_t dev; |
1111 | int events; | |
0b4e3aa0 | 1112 | void * wql; |
1c79356b A |
1113 | struct proc *p; |
1114 | { | |
1115 | register struct bpf_d *d; | |
1116 | register int s; | |
1117 | int revents = 0; | |
1118 | ||
1119 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
1120 | /* | |
1121 | * An imitation of the FIONREAD ioctl code. | |
1122 | */ | |
1123 | d = &bpf_dtab[minor(dev)]; | |
1124 | ||
1125 | s = splimp(); | |
1126 | if (events & (POLLIN | POLLRDNORM)) | |
1127 | if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) | |
1128 | revents |= events & (POLLIN | POLLRDNORM); | |
1129 | else | |
0b4e3aa0 | 1130 | selrecord(p, &d->bd_sel, wql); |
1c79356b A |
1131 | |
1132 | splx(s); | |
1133 | thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); | |
1134 | return (revents); | |
1135 | } | |
1136 | ||
1137 | /* | |
1138 | * Incoming linkage from device drivers. Process the packet pkt, of length | |
1139 | * pktlen, which is stored in a contiguous buffer. The packet is parsed | |
1140 | * by each process' filter, and if accepted, stashed into the corresponding | |
1141 | * buffer. | |
1142 | */ | |
1143 | void | |
1144 | bpf_tap(ifp, pkt, pktlen) | |
1145 | struct ifnet *ifp; | |
1146 | register u_char *pkt; | |
1147 | register u_int pktlen; | |
1148 | { | |
1149 | struct bpf_if *bp; | |
1150 | register struct bpf_d *d; | |
1151 | register u_int slen; | |
1152 | /* | |
1153 | * Note that the ipl does not have to be raised at this point. | |
1154 | * The only problem that could arise here is that if two different | |
1155 | * interfaces shared any data. This is not the case. | |
1156 | */ | |
1157 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
1158 | if ((bp = ifp->if_bpf)) { | |
1159 | for (d = bp->bif_dlist; d != 0; d = d->bd_next) { | |
1160 | ++d->bd_rcount; | |
1161 | slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); | |
1162 | if (slen != 0) | |
1163 | catchpacket(d, pkt, pktlen, slen, bcopy); | |
1164 | } | |
1165 | } | |
1166 | thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); | |
1167 | } | |
1168 | ||
1169 | /* | |
1170 | * Copy data from an mbuf chain into a buffer. This code is derived | |
1171 | * from m_copydata in sys/uipc_mbuf.c. | |
1172 | */ | |
1173 | static void | |
1174 | bpf_mcopy(src_arg, dst_arg, len) | |
1175 | const void *src_arg; | |
1176 | void *dst_arg; | |
1177 | register size_t len; | |
1178 | { | |
1179 | register const struct mbuf *m; | |
1180 | register u_int count; | |
1181 | u_char *dst; | |
1182 | ||
1183 | m = src_arg; | |
1184 | dst = dst_arg; | |
1185 | while (len > 0) { | |
1186 | if (m == 0) | |
1187 | panic("bpf_mcopy"); | |
1188 | count = min(m->m_len, len); | |
1189 | bcopy(mtod(m, void *), dst, count); | |
1190 | m = m->m_next; | |
1191 | dst += count; | |
1192 | len -= count; | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | /* | |
1197 | * Incoming linkage from device drivers, when packet is in an mbuf chain. | |
1198 | */ | |
1199 | void | |
1200 | bpf_mtap(ifp, m) | |
1201 | struct ifnet *ifp; | |
1202 | struct mbuf *m; | |
1203 | { | |
1204 | struct bpf_if *bp = ifp->if_bpf; | |
1205 | struct bpf_d *d; | |
1206 | u_int pktlen, slen; | |
1207 | struct mbuf *m0; | |
1208 | ||
1209 | pktlen = 0; | |
1210 | for (m0 = m; m0 != 0; m0 = m0->m_next) | |
1211 | pktlen += m0->m_len; | |
1212 | ||
1213 | for (d = bp->bif_dlist; d != 0; d = d->bd_next) { | |
1214 | ++d->bd_rcount; | |
1215 | slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); | |
1216 | if (slen != 0) | |
1217 | catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); | |
1218 | } | |
1219 | } | |
1220 | ||
1221 | /* | |
1222 | * Move the packet data from interface memory (pkt) into the | |
1223 | * store buffer. Return 1 if it's time to wakeup a listener (buffer full), | |
1224 | * otherwise 0. "copy" is the routine called to do the actual data | |
1225 | * transfer. bcopy is passed in to copy contiguous chunks, while | |
1226 | * bpf_mcopy is passed in to copy mbuf chains. In the latter case, | |
1227 | * pkt is really an mbuf. | |
1228 | */ | |
1229 | static void | |
1230 | catchpacket(d, pkt, pktlen, snaplen, cpfn) | |
1231 | register struct bpf_d *d; | |
1232 | register u_char *pkt; | |
1233 | register u_int pktlen, snaplen; | |
1234 | register void (*cpfn) __P((const void *, void *, size_t)); | |
1235 | { | |
1236 | register struct bpf_hdr *hp; | |
1237 | register int totlen, curlen; | |
1238 | register int hdrlen = d->bd_bif->bif_hdrlen; | |
1239 | /* | |
1240 | * Figure out how many bytes to move. If the packet is | |
1241 | * greater or equal to the snapshot length, transfer that | |
1242 | * much. Otherwise, transfer the whole packet (unless | |
1243 | * we hit the buffer size limit). | |
1244 | */ | |
1245 | totlen = hdrlen + min(snaplen, pktlen); | |
1246 | if (totlen > d->bd_bufsize) | |
1247 | totlen = d->bd_bufsize; | |
1248 | ||
1249 | /* | |
1250 | * Round up the end of the previous packet to the next longword. | |
1251 | */ | |
1252 | curlen = BPF_WORDALIGN(d->bd_slen); | |
1253 | if (curlen + totlen > d->bd_bufsize) { | |
1254 | /* | |
1255 | * This packet will overflow the storage buffer. | |
1256 | * Rotate the buffers if we can, then wakeup any | |
1257 | * pending reads. | |
1258 | */ | |
1259 | if (d->bd_fbuf == 0) { | |
1260 | /* | |
1261 | * We haven't completed the previous read yet, | |
1262 | * so drop the packet. | |
1263 | */ | |
1264 | ++d->bd_dcount; | |
1265 | return; | |
1266 | } | |
1267 | ROTATE_BUFFERS(d); | |
1268 | bpf_wakeup(d); | |
1269 | curlen = 0; | |
1270 | } | |
1271 | else if (d->bd_immediate) | |
1272 | /* | |
1273 | * Immediate mode is set. A packet arrived so any | |
1274 | * reads should be woken up. | |
1275 | */ | |
1276 | bpf_wakeup(d); | |
1277 | ||
1278 | /* | |
1279 | * Append the bpf header. | |
1280 | */ | |
1281 | hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); | |
1282 | #if BSD >= 199103 | |
1283 | microtime(&hp->bh_tstamp); | |
1284 | #elif defined(sun) | |
1285 | uniqtime(&hp->bh_tstamp); | |
1286 | #else | |
1287 | hp->bh_tstamp = time; | |
1288 | #endif | |
1289 | hp->bh_datalen = pktlen; | |
1290 | hp->bh_hdrlen = hdrlen; | |
1291 | /* | |
1292 | * Copy the packet data into the store buffer and update its length. | |
1293 | */ | |
1294 | (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); | |
1295 | d->bd_slen = curlen + totlen; | |
1296 | } | |
1297 | ||
1298 | /* | |
1299 | * Initialize all nonzero fields of a descriptor. | |
1300 | */ | |
1301 | static int | |
1302 | bpf_allocbufs(d) | |
1303 | register struct bpf_d *d; | |
1304 | { | |
1305 | d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); | |
1306 | if (d->bd_fbuf == 0) | |
1307 | return (ENOBUFS); | |
1308 | ||
1309 | d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); | |
1310 | if (d->bd_sbuf == 0) { | |
1311 | FREE(d->bd_fbuf, M_DEVBUF); | |
1312 | return (ENOBUFS); | |
1313 | } | |
1314 | d->bd_slen = 0; | |
1315 | d->bd_hlen = 0; | |
1316 | return (0); | |
1317 | } | |
1318 | ||
1319 | /* | |
1320 | * Free buffers currently in use by a descriptor. | |
1321 | * Called on close. | |
1322 | */ | |
1323 | static void | |
1324 | bpf_freed(d) | |
1325 | register struct bpf_d *d; | |
1326 | { | |
1327 | /* | |
1328 | * We don't need to lock out interrupts since this descriptor has | |
1329 | * been detached from its interface and it yet hasn't been marked | |
1330 | * free. | |
1331 | */ | |
1332 | if (d->bd_sbuf != 0) { | |
1333 | FREE(d->bd_sbuf, M_DEVBUF); | |
1334 | if (d->bd_hbuf != 0) | |
1335 | FREE(d->bd_hbuf, M_DEVBUF); | |
1336 | if (d->bd_fbuf != 0) | |
1337 | FREE(d->bd_fbuf, M_DEVBUF); | |
1338 | } | |
1339 | if (d->bd_filter) | |
1340 | FREE((caddr_t)d->bd_filter, M_DEVBUF); | |
1341 | ||
1342 | D_MARKFREE(d); | |
1343 | } | |
1344 | ||
1345 | /* | |
1346 | * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) | |
1347 | * in the driver's softc; dlt is the link layer type; hdrlen is the fixed | |
1348 | * size of the link header (variable length headers not yet supported). | |
1349 | */ | |
1350 | void | |
1351 | bpfattach(ifp, dlt, hdrlen) | |
1352 | struct ifnet *ifp; | |
1353 | u_int dlt, hdrlen; | |
1354 | { | |
1355 | struct bpf_if *bp; | |
1356 | int i; | |
0b4e3aa0 | 1357 | bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT); |
1c79356b A |
1358 | if (bp == 0) |
1359 | panic("bpfattach"); | |
1360 | ||
1361 | bp->bif_dlist = 0; | |
1362 | bp->bif_ifp = ifp; | |
1363 | bp->bif_dlt = dlt; | |
1364 | ||
1365 | bp->bif_next = bpf_iflist; | |
1366 | bpf_iflist = bp; | |
1367 | ||
1368 | bp->bif_ifp->if_bpf = 0; | |
1369 | ||
1370 | /* | |
1371 | * Compute the length of the bpf header. This is not necessarily | |
1372 | * equal to SIZEOF_BPF_HDR because we want to insert spacing such | |
1373 | * that the network layer header begins on a longword boundary (for | |
1374 | * performance reasons and to alleviate alignment restrictions). | |
1375 | */ | |
1376 | bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; | |
1377 | ||
1378 | /* | |
1379 | * Mark all the descriptors free if this hasn't been done. | |
1380 | */ | |
1381 | if (!bpf_dtab_init) { | |
1382 | for (i = 0; i < nbpfilter; ++i) | |
1383 | D_MARKFREE(&bpf_dtab[i]); | |
1384 | bpf_dtab_init = 1; | |
1385 | } | |
1386 | #if 0 | |
1387 | if (bootverbose) | |
1388 | printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); | |
1389 | #endif | |
1390 | } | |
1391 | ||
1392 | static void *bpf_devfs_token[NBPFILTER]; | |
1393 | ||
1394 | static int bpf_devsw_installed; | |
1395 | ||
1396 | void bpf_init __P((void *unused)); | |
1397 | void | |
1398 | bpf_init(unused) | |
1399 | void *unused; | |
1400 | { | |
1401 | int i; | |
1402 | int maj; | |
1403 | ||
1404 | if (!bpf_devsw_installed ) { | |
1405 | bpf_devsw_installed = 1; | |
1406 | maj = cdevsw_add(BPF_MAJOR, &bpf_cdevsw); | |
1407 | if (maj == -1) { | |
1408 | printf("bpf_init: failed to allocate a major number!\n"); | |
1409 | nbpfilter = 0; | |
1410 | return; | |
1411 | } | |
1412 | for (i = 0 ; i < nbpfilter; i++) { | |
1413 | bpf_devfs_token[i] = devfs_make_node(makedev(maj, i), | |
1414 | DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, | |
1415 | "bpf%x", i); | |
1416 | } | |
1417 | } | |
1418 | } | |
1419 | ||
1420 | /* | |
1421 | SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) | |
1422 | */ | |
1423 | ||
1424 | #endif |