]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/bpf.c
xnu-517.tar.gz
[apple/xnu.git] / bsd / net / bpf.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * Copyright (c) 1990, 1991, 1993
27 * The Regents of the University of California. All rights reserved.
28 *
29 * This code is derived from the Stanford/CMU enet packet filter,
30 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
31 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
32 * Berkeley Laboratory.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by the University of
45 * California, Berkeley and its contributors.
46 * 4. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
63 *
9bccf70c 64 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
1c79356b
A
65 */
66
9bccf70c 67#include "bpf.h"
1c79356b
A
68
69#ifndef __GNUC__
70#define inline
71#else
72#define inline __inline
73#endif
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/conf.h>
78#include <sys/malloc.h>
79#include <sys/mbuf.h>
80#include <sys/time.h>
81#include <sys/proc.h>
1c79356b
A
82#include <sys/signalvar.h>
83#include <sys/filio.h>
84#include <sys/sockio.h>
85#include <sys/ttycom.h>
86#include <sys/filedesc.h>
87
9bccf70c
A
88#if defined(sparc) && BSD < 199103
89#include <sys/stream.h>
90#endif
91#include <sys/poll.h>
92
1c79356b
A
93#include <sys/socket.h>
94#include <sys/vnode.h>
95
96#include <net/if.h>
97#include <net/bpf.h>
98#include <net/bpfdesc.h>
99
100#include <netinet/in.h>
101#include <netinet/if_ether.h>
102#include <sys/kernel.h>
103#include <sys/sysctl.h>
55e303ae 104#include <net/firewire.h>
1c79356b 105
55e303ae 106#include <machine/ansi.h>
1c79356b
A
107#include <miscfs/devfs/devfs.h>
108#include <net/dlil.h>
109
9bccf70c
A
110#if NBPFILTER > 0
111
1c79356b
A
112/*
113 * Older BSDs don't have kernel malloc.
114 */
115#if BSD < 199103
116extern bcopy();
117static caddr_t bpf_alloc();
9bccf70c 118#include <net/bpf_compat.h>
1c79356b
A
119#define BPF_BUFSIZE (MCLBYTES-8)
120#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
121#else
122#define BPF_BUFSIZE 4096
123#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
124#endif
125
55e303ae 126
1c79356b
A
127#define PRINET 26 /* interruptible */
128
129/*
130 * The default read buffer size is patchable.
131 */
132static int bpf_bufsize = BPF_BUFSIZE;
1c79356b
A
133SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
134 &bpf_bufsize, 0, "");
9bccf70c
A
135static int bpf_maxbufsize = BPF_MAXBUFSIZE;
136SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
137 &bpf_maxbufsize, 0, "");
1c79356b
A
138
139/*
140 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
55e303ae 141 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
1c79356b
A
142 */
143static struct bpf_if *bpf_iflist;
9bccf70c
A
144#ifdef __APPLE__
145/*
146 * BSD now stores the bpf_d in the dev_t which is a struct
147 * on their system. Our dev_t is an int, so we still store
148 * the bpf_d in a separate table indexed by minor device #.
149 */
55e303ae
A
150static struct bpf_d **bpf_dtab = NULL;
151static int bpf_dtab_size = 0;
152static int nbpfilter = 0;
153
154/*
155 * Mark a descriptor free by making it point to itself.
156 * This is probably cheaper than marking with a constant since
157 * the address should be in a register anyway.
158 */
159#define D_ISFREE(d) ((d) == (d)->bd_next)
160#define D_MARKFREE(d) ((d)->bd_next = (d))
161#define D_MARKUSED(d) ((d)->bd_next = 0)
162#endif /* __APPLE__ */
1c79356b
A
163
164static int bpf_allocbufs __P((struct bpf_d *));
165static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
166static void bpf_detachd __P((struct bpf_d *d));
167static void bpf_freed __P((struct bpf_d *));
1c79356b
A
168static void bpf_mcopy __P((const void *, void *, size_t));
169static int bpf_movein __P((struct uio *, int,
170 struct mbuf **, struct sockaddr *, int *));
171static int bpf_setif __P((struct bpf_d *, struct ifreq *));
172static inline void
173 bpf_wakeup __P((struct bpf_d *));
174static void catchpacket __P((struct bpf_d *, u_char *, u_int,
175 u_int, void (*)(const void *, void *, size_t)));
176static void reset_d __P((struct bpf_d *));
177static int bpf_setf __P((struct bpf_d *, struct bpf_program *));
178
55e303ae
A
179/*static void *bpf_devfs_token[MAXBPFILTER];*/
180
181static int bpf_devsw_installed;
182
183void bpf_init __P((void *unused));
184
185
9bccf70c
A
186/*
187 * Darwin differs from BSD here, the following are static
188 * on BSD and not static on Darwin.
189 */
1c79356b
A
190 d_open_t bpfopen;
191 d_close_t bpfclose;
192 d_read_t bpfread;
193 d_write_t bpfwrite;
194 d_ioctl_t bpfioctl;
9bccf70c 195 select_fcn_t bpfpoll;
1c79356b 196
9bccf70c 197#ifdef __APPLE__
1c79356b
A
198void bpf_mtap(struct ifnet *, struct mbuf *);
199
200int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(),
201 bpfpoll();
9bccf70c 202#endif
1c79356b 203
9bccf70c
A
204/* Darwin's cdevsw struct differs slightly from BSDs */
205#define CDEV_MAJOR 23
1c79356b 206static struct cdevsw bpf_cdevsw = {
9bccf70c
A
207 /* open */ bpfopen,
208 /* close */ bpfclose,
209 /* read */ bpfread,
210 /* write */ bpfwrite,
211 /* ioctl */ bpfioctl,
212 /* stop */ nulldev,
213 /* reset */ nulldev,
214 /* tty */ NULL,
215 /* select */ bpfpoll,
216 /* mmap */ eno_mmap,
217 /* strategy*/ eno_strat,
218 /* getc */ eno_getc,
219 /* putc */ eno_putc,
220 /* type */ 0
1c79356b
A
221};
222
55e303ae 223#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
9bccf70c 224
1c79356b
A
225static int
226bpf_movein(uio, linktype, mp, sockp, datlen)
227 register struct uio *uio;
228 int linktype, *datlen;
229 register struct mbuf **mp;
230 register struct sockaddr *sockp;
231{
232 struct mbuf *m;
233 int error;
234 int len;
235 int hlen;
236
237 /*
238 * Build a sockaddr based on the data link layer type.
239 * We do this at this level because the ethernet header
240 * is copied directly into the data field of the sockaddr.
241 * In the case of SLIP, there is no header and the packet
242 * is forwarded as is.
243 * Also, we are careful to leave room at the front of the mbuf
244 * for the link level header.
245 */
246 switch (linktype) {
247
248 case DLT_SLIP:
249 sockp->sa_family = AF_INET;
250 hlen = 0;
251 break;
252
253 case DLT_EN10MB:
254 sockp->sa_family = AF_UNSPEC;
255 /* XXX Would MAXLINKHDR be better? */
256 hlen = sizeof(struct ether_header);
257 break;
258
259 case DLT_FDDI:
260#if defined(__FreeBSD__) || defined(__bsdi__)
261 sockp->sa_family = AF_IMPLINK;
262 hlen = 0;
263#else
264 sockp->sa_family = AF_UNSPEC;
265 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
266 hlen = 24;
267#endif
268 break;
269
270 case DLT_RAW:
271 case DLT_NULL:
272 sockp->sa_family = AF_UNSPEC;
273 hlen = 0;
274 break;
275
276#ifdef __FreeBSD__
277 case DLT_ATM_RFC1483:
278 /*
279 * en atm driver requires 4-byte atm pseudo header.
280 * though it isn't standard, vpi:vci needs to be
281 * specified anyway.
282 */
283 sockp->sa_family = AF_UNSPEC;
284 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
285 break;
286#endif
9bccf70c
A
287 case DLT_PPP:
288 sockp->sa_family = AF_UNSPEC;
289 hlen = 4; /* This should match PPP_HDRLEN */
290 break;
1c79356b 291
55e303ae
A
292 case DLT_APPLE_IP_OVER_IEEE1394:
293 sockp->sa_family = AF_UNSPEC;
294 hlen = sizeof(struct firewire_header);
295 break;
296
1c79356b
A
297 default:
298 return (EIO);
299 }
55e303ae
A
300 if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) {
301 return (EIO);
302 }
1c79356b
A
303 len = uio->uio_resid;
304 *datlen = len - hlen;
305 if ((unsigned)len > MCLBYTES)
306 return (EIO);
307
308 MGETHDR(m, M_WAIT, MT_DATA);
309 if (m == 0)
310 return (ENOBUFS);
311 if (len > MHLEN) {
312#if BSD >= 199103
313 MCLGET(m, M_WAIT);
314 if ((m->m_flags & M_EXT) == 0) {
315#else
316 MCLGET(m);
317 if (m->m_len != MCLBYTES) {
318#endif
319 error = ENOBUFS;
320 goto bad;
321 }
322 }
323 m->m_pkthdr.len = m->m_len = len;
324 m->m_pkthdr.rcvif = NULL;
325 *mp = m;
326 /*
327 * Make room for link header.
328 */
329 if (hlen != 0) {
330 m->m_pkthdr.len -= hlen;
331 m->m_len -= hlen;
332#if BSD >= 199103
333 m->m_data += hlen; /* XXX */
334#else
335 m->m_off += hlen;
336#endif
337 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
338 if (error)
339 goto bad;
340 }
341 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
342 if (!error)
343 return (0);
344 bad:
345 m_freem(m);
346 return (error);
347}
348
9bccf70c
A
349#ifdef __APPLE__
350/* Callback registered with Ethernet driver. */
1c79356b
A
351int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
352{
353 boolean_t funnel_state;
354
355 funnel_state = thread_funnel_set(network_flock, TRUE);
356
357 /*
358 * Do nothing if the BPF tap has been turned off.
359 * This is to protect from a potential race where this
360 * call blocks on the funnel lock. And in the meantime
361 * BPF is turned off, which will clear if_bpf.
362 */
363 if (ifp->if_bpf)
364 bpf_mtap(ifp, m);
365
366 thread_funnel_set(network_flock, funnel_state);
367 return 0;
368}
55e303ae
A
369
370/*
371 * Returns 1 on sucess, 0 on failure
372 */
373static int
374bpf_dtab_grow(int increment)
375{
376 struct bpf_d **new_dtab = NULL;
377
378 new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * (bpf_dtab_size + increment), M_DEVBUF, M_WAIT);
379 if (new_dtab == NULL)
380 return 0;
381
382 if (bpf_dtab) {
383 struct bpf_d **old_dtab;
384
385 bcopy(bpf_dtab, new_dtab, sizeof(struct bpf_d *) * bpf_dtab_size);
386 /*
387 * replace must be atomic with respect to free do bpf_dtab
388 * is always valid.
389 */
390 old_dtab = bpf_dtab;
391 bpf_dtab = new_dtab;
392 _FREE(old_dtab, M_DEVBUF);
393 }
394 else bpf_dtab = new_dtab;
395
396 bzero(bpf_dtab + bpf_dtab_size, sizeof(struct bpf_d *) * increment);
397
398 bpf_dtab_size += increment;
399
400 return 1;
401}
402
403static struct bpf_d *
404bpf_make_dev_t(int maj)
405{
406 struct bpf_d *d;
407
408 if (nbpfilter >= bpf_dtab_size && bpf_dtab_grow(NBPFILTER) == 0)
409 return NULL;
410
411 d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT);
412 if (d != NULL) {
413 int i = nbpfilter++;
414
415 bzero(d, sizeof(struct bpf_d));
416 bpf_dtab[i] = d;
417 D_MARKFREE(bpf_dtab[i]);
418 /*bpf_devfs_token[i] = */devfs_make_node(makedev(maj, i),
419 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
420 "bpf%d", i);
421 }
422 return d;
423}
424
9bccf70c 425#endif
1c79356b
A
426
427/*
428 * Attach file to the bpf interface, i.e. make d listen on bp.
429 * Must be called at splimp.
430 */
431static void
432bpf_attachd(d, bp)
433 struct bpf_d *d;
434 struct bpf_if *bp;
435{
1c79356b
A
436 /*
437 * Point d at bp, and add d to the interface's list of listeners.
438 * Finally, point the driver's bpf cookie at the interface so
439 * it will divert packets to bpf.
440 */
441 d->bd_bif = bp;
442 d->bd_next = bp->bif_dlist;
443 bp->bif_dlist = d;
444
445 bp->bif_ifp->if_bpf = bp;
1c79356b 446
9bccf70c
A
447#ifdef __APPLE__
448 if (bp->bif_ifp->if_set_bpf_tap)
449 (*bp->bif_ifp->if_set_bpf_tap)(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
450#endif
1c79356b
A
451}
452
453/*
454 * Detach a file from its interface.
455 */
456static void
457bpf_detachd(d)
458 struct bpf_d *d;
459{
460 struct bpf_d **p;
461 struct bpf_if *bp;
9bccf70c 462#ifdef __APPLE__
1c79356b
A
463 struct ifnet *ifp;
464
465 ifp = d->bd_bif->bif_ifp;
9bccf70c
A
466
467#endif
1c79356b
A
468
469 bp = d->bd_bif;
470 /*
471 * Check if this descriptor had requested promiscuous mode.
472 * If so, turn it off.
473 */
474 if (d->bd_promisc) {
475 d->bd_promisc = 0;
476 if (ifpromisc(bp->bif_ifp, 0))
477 /*
478 * Something is really wrong if we were able to put
479 * the driver into promiscuous mode, but can't
480 * take it out.
9bccf70c 481 * Most likely the network interface is gone.
1c79356b 482 */
9bccf70c 483 printf("bpf: ifpromisc failed");
1c79356b
A
484 }
485 /* Remove d from the interface's descriptor list. */
486 p = &bp->bif_dlist;
487 while (*p != d) {
488 p = &(*p)->bd_next;
489 if (*p == 0)
490 panic("bpf_detachd: descriptor not in list");
491 }
492 *p = (*p)->bd_next;
9bccf70c 493 if (bp->bif_dlist == 0) {
1c79356b
A
494 /*
495 * Let the driver know that there are no more listeners.
496 */
9bccf70c
A
497 if (ifp->if_set_bpf_tap)
498 (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0);
1c79356b 499 d->bd_bif->bif_ifp->if_bpf = 0;
9bccf70c 500 }
1c79356b
A
501 d->bd_bif = 0;
502}
503
504
1c79356b
A
505/*
506 * Open ethernet device. Returns ENXIO for illegal minor device number,
507 * EBUSY if file is open by another process.
508 */
509/* ARGSUSED */
510 int
511bpfopen(dev, flags, fmt, p)
512 dev_t dev;
513 int flags;
514 int fmt;
515 struct proc *p;
516{
517 register struct bpf_d *d;
518
9bccf70c 519#ifdef __APPLE__
55e303ae
A
520 /* new device nodes on demand when opening the last one */
521 if (minor(dev) == nbpfilter - 1)
522 bpf_make_dev_t(major(dev));
523
1c79356b
A
524 if (minor(dev) >= nbpfilter)
525 return (ENXIO);
55e303ae
A
526
527 d = bpf_dtab[minor(dev)];
1c79356b
A
528
529 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c
A
530#else
531 if (p->p_prison)
532 return (EPERM);
533
534 d = dev->si_drv1;
535#endif
1c79356b 536 /*
9bccf70c 537 * Each minor can be opened by only one process. If the requested
1c79356b
A
538 * minor is in use, return EBUSY.
539 */
9bccf70c 540#ifdef __APPLE__
1c79356b
A
541 if (!D_ISFREE(d)) {
542 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
543 return (EBUSY);
544 }
9bccf70c 545
1c79356b
A
546 /* Mark "free" and do most initialization. */
547 bzero((char *)d, sizeof(*d));
9bccf70c
A
548#else
549 if (d)
550 return (EBUSY);
551 make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
552 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK);
553 bzero(d, sizeof(*d));
554 dev->si_drv1 = d;
555#endif
1c79356b
A
556 d->bd_bufsize = bpf_bufsize;
557 d->bd_sig = SIGIO;
9bccf70c 558 d->bd_seesent = 1;
55e303ae
A
559
560#ifdef __APPLE__
1c79356b 561 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
55e303ae
A
562#endif
563
1c79356b
A
564 return (0);
565}
566
567/*
568 * Close the descriptor by detaching it from its interface,
569 * deallocating its buffers, and marking it free.
570 */
571/* ARGSUSED */
572 int
573bpfclose(dev, flags, fmt, p)
574 dev_t dev;
575 int flags;
576 int fmt;
577 struct proc *p;
578{
579 register struct bpf_d *d;
580 register int s;
55e303ae
A
581#ifdef __APPLE__
582 struct bpf_d **bpf_dtab_schk;
583#endif
1c79356b 584
9bccf70c
A
585#ifndef __APPLE__
586 funsetown(d->bd_sigio);
587#endif
1c79356b 588 s = splimp();
9bccf70c 589#ifdef __APPLE__
55e303ae
A
590again:
591 d = bpf_dtab[minor(dev)];
592 bpf_dtab_schk = bpf_dtab;
593#endif
594 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
595
596#ifdef __APPLE__
597 /*
598 * If someone grows bpf_dtab[] while we were waiting for the
599 * funnel, then we will be pointing off into freed memory;
600 * check to see if this is the case.
601 */
602 if (bpf_dtab_schk != bpf_dtab) {
603 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
604 goto again;
605 }
9bccf70c 606#endif
55e303ae 607
1c79356b
A
608 if (d->bd_bif)
609 bpf_detachd(d);
610 splx(s);
9bccf70c 611#ifdef __APPLE__
0b4e3aa0 612 selthreadclear(&d->bd_sel);
9bccf70c 613#endif
1c79356b
A
614 bpf_freed(d);
615 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
616 return (0);
617}
618
619/*
620 * Support for SunOS, which does not have tsleep.
621 */
622#if BSD < 199103
623static
624bpf_timeout(arg)
625 caddr_t arg;
626{
627 boolean_t funnel_state;
628 struct bpf_d *d = (struct bpf_d *)arg;
1c79356b
A
629 funnel_state = thread_funnel_set(network_flock, TRUE);
630 d->bd_timedout = 1;
631 wakeup(arg);
632 (void) thread_funnel_set(network_flock, FALSE);
633}
634
635#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
636
637int
638bpf_sleep(d)
639 register struct bpf_d *d;
640{
641 register int rto = d->bd_rtout;
642 register int st;
643
644 if (rto != 0) {
645 d->bd_timedout = 0;
646 timeout(bpf_timeout, (caddr_t)d, rto);
647 }
648 st = sleep((caddr_t)d, PRINET|PCATCH);
649 if (rto != 0) {
650 if (d->bd_timedout == 0)
651 untimeout(bpf_timeout, (caddr_t)d);
652 else if (st == 0)
653 return EWOULDBLOCK;
654 }
655 return (st != 0) ? EINTR : 0;
656}
657#else
658#define BPF_SLEEP tsleep
659#endif
660
661/*
662 * Rotate the packet buffers in descriptor d. Move the store buffer
663 * into the hold slot, and the free buffer into the store slot.
664 * Zero the length of the new store buffer.
665 */
666#define ROTATE_BUFFERS(d) \
667 (d)->bd_hbuf = (d)->bd_sbuf; \
668 (d)->bd_hlen = (d)->bd_slen; \
669 (d)->bd_sbuf = (d)->bd_fbuf; \
670 (d)->bd_slen = 0; \
671 (d)->bd_fbuf = 0;
672/*
673 * bpfread - read next chunk of packets from buffers
674 */
675 int
676bpfread(dev, uio, ioflag)
677 dev_t dev;
678 struct uio *uio;
679 int ioflag;
680{
681 register struct bpf_d *d;
682 int error;
683 int s;
684
55e303ae
A
685 d = bpf_dtab[minor(dev)];
686
1c79356b 687 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1c79356b
A
688
689 /*
690 * Restrict application to use a buffer the same size as
691 * as kernel buffers.
692 */
693 if (uio->uio_resid != d->bd_bufsize) {
9bccf70c 694 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1c79356b
A
695 return (EINVAL);
696 }
697
698 s = splimp();
699 /*
700 * If the hold buffer is empty, then do a timed sleep, which
701 * ends when the timeout expires or when enough packets
702 * have arrived to fill the store buffer.
703 */
704 while (d->bd_hbuf == 0) {
705 if (d->bd_immediate && d->bd_slen != 0) {
706 /*
707 * A packet(s) either arrived since the previous
708 * read or arrived while we were asleep.
709 * Rotate the buffers and return what's here.
710 */
711 ROTATE_BUFFERS(d);
712 break;
713 }
9bccf70c
A
714
715 /*
716 * No data is available, check to see if the bpf device
717 * is still pointed at a real interface. If not, return
718 * ENXIO so that the userland process knows to rebind
719 * it before using it again.
720 */
721 if (d->bd_bif == NULL) {
722 splx(s);
723 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
724 return (ENXIO);
725 }
726
1c79356b
A
727 if (ioflag & IO_NDELAY)
728 error = EWOULDBLOCK;
729 else
730 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
731 d->bd_rtout);
732 if (error == EINTR || error == ERESTART) {
733 splx(s);
734 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
735 return (error);
736 }
737 if (error == EWOULDBLOCK) {
738 /*
739 * On a timeout, return what's in the buffer,
740 * which may be nothing. If there is something
741 * in the store buffer, we can rotate the buffers.
742 */
743 if (d->bd_hbuf)
744 /*
745 * We filled up the buffer in between
746 * getting the timeout and arriving
747 * here, so we don't need to rotate.
748 */
749 break;
750
751 if (d->bd_slen == 0) {
752 splx(s);
753 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
754 return (0);
755 }
756 ROTATE_BUFFERS(d);
757 break;
758 }
759 }
760 /*
761 * At this point, we know we have something in the hold slot.
762 */
763 splx(s);
764
765 /*
766 * Move data from hold buffer into user space.
767 * We know the entire buffer is transferred since
768 * we checked above that the read buffer is bpf_bufsize bytes.
769 */
770 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
771
772 s = splimp();
773 d->bd_fbuf = d->bd_hbuf;
774 d->bd_hbuf = 0;
775 d->bd_hlen = 0;
776 splx(s);
777 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
778 return (error);
779}
780
781
782/*
783 * If there are processes sleeping on this descriptor, wake them up.
784 */
785static inline void
786bpf_wakeup(d)
787 register struct bpf_d *d;
788{
789 wakeup((caddr_t)d);
790 if (d->bd_async && d->bd_sig && d->bd_sigio)
791 pgsigio(d->bd_sigio, d->bd_sig, 0);
792
793#if BSD >= 199103
1c79356b 794 selwakeup(&d->bd_sel);
9bccf70c
A
795#ifndef __APPLE__
796 /* XXX */
797 d->bd_sel.si_pid = 0;
798#endif
1c79356b
A
799#else
800 if (d->bd_selproc) {
1c79356b 801 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
1c79356b
A
802 d->bd_selcoll = 0;
803 d->bd_selproc = 0;
804 }
805#endif
806}
807
55e303ae
A
808/* keep in sync with bpf_movein above: */
809#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
810
1c79356b
A
811 int
812bpfwrite(dev, uio, ioflag)
813 dev_t dev;
814 struct uio *uio;
815 int ioflag;
816{
817 register struct bpf_d *d;
1c79356b
A
818 struct ifnet *ifp;
819 struct mbuf *m;
820 int error, s;
55e303ae 821 char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
1c79356b
A
822 int datlen;
823
55e303ae
A
824 d = bpf_dtab[minor(dev)];
825
1c79356b 826 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c 827
1c79356b
A
828 if (d->bd_bif == 0) {
829 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
830 return (ENXIO);
831 }
832
833 ifp = d->bd_bif->bif_ifp;
834
835 if (uio->uio_resid == 0) {
836 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
837 return (0);
838 }
55e303ae
A
839 ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf);
840 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m,
841 (struct sockaddr *)dst_buf, &datlen);
1c79356b
A
842 if (error) {
843 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
844 return (error);
845 }
846
847 if (datlen > ifp->if_mtu) {
848 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
849 return (EMSGSIZE);
850 }
851
55e303ae
A
852 if (d->bd_hdrcmplt) {
853 ((struct sockaddr *)dst_buf)->sa_family = pseudo_AF_HDRCMPLT;
854 }
1c79356b 855
9bccf70c 856 s = splnet();
1c79356b 857
55e303ae
A
858 error = dlil_output(ifptodlt(ifp, PF_INET), m,
859 (caddr_t) 0, (struct sockaddr *)dst_buf, 0);
1c79356b
A
860
861 splx(s);
862 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1c79356b
A
863 /*
864 * The driver frees the mbuf.
865 */
866 return (error);
867}
868
869/*
870 * Reset a descriptor by flushing its packet buffer and clearing the
871 * receive and drop counts. Should be called at splimp.
872 */
873static void
874reset_d(d)
875 struct bpf_d *d;
876{
877 if (d->bd_hbuf) {
878 /* Free the hold buffer. */
879 d->bd_fbuf = d->bd_hbuf;
880 d->bd_hbuf = 0;
881 }
882 d->bd_slen = 0;
883 d->bd_hlen = 0;
884 d->bd_rcount = 0;
885 d->bd_dcount = 0;
886}
887
888/*
889 * FIONREAD Check for read packet available.
890 * SIOCGIFADDR Get interface address - convenient hook to driver.
891 * BIOCGBLEN Get buffer len [for read()].
892 * BIOCSETF Set ethernet read filter.
893 * BIOCFLUSH Flush read packet buffer.
894 * BIOCPROMISC Put interface into promiscuous mode.
895 * BIOCGDLT Get link layer type.
896 * BIOCGETIF Get interface name.
897 * BIOCSETIF Set interface.
898 * BIOCSRTIMEOUT Set read timeout.
899 * BIOCGRTIMEOUT Get read timeout.
900 * BIOCGSTATS Get packet stats.
901 * BIOCIMMEDIATE Set immediate mode.
902 * BIOCVERSION Get filter language version.
9bccf70c
A
903 * BIOCGHDRCMPLT Get "header already complete" flag
904 * BIOCSHDRCMPLT Set "header already complete" flag
905 * BIOCGSEESENT Get "see packets sent" flag
906 * BIOCSSEESENT Set "see packets sent" flag
1c79356b
A
907 */
908/* ARGSUSED */
9bccf70c 909int
1c79356b
A
910bpfioctl(dev, cmd, addr, flags, p)
911 dev_t dev;
912 u_long cmd;
913 caddr_t addr;
914 int flags;
915 struct proc *p;
916{
917 register struct bpf_d *d;
918 int s, error = 0;
919
55e303ae 920 d = bpf_dtab[minor(dev)];
1c79356b
A
921
922 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1c79356b
A
923
924 switch (cmd) {
925
926 default:
927 error = EINVAL;
928 break;
929
930 /*
931 * Check for read packet available.
932 */
933 case FIONREAD:
934 {
935 int n;
936
937 s = splimp();
938 n = d->bd_slen;
939 if (d->bd_hbuf)
940 n += d->bd_hlen;
941 splx(s);
942
943 *(int *)addr = n;
944 break;
945 }
946
947 case SIOCGIFADDR:
948 {
949 struct ifnet *ifp;
950
951 if (d->bd_bif == 0)
952 error = EINVAL;
953 else {
954 ifp = d->bd_bif->bif_ifp;
955 error = (*ifp->if_ioctl)(ifp, cmd, addr);
956 }
957 break;
958 }
959
960 /*
961 * Get buffer len [for read()].
962 */
963 case BIOCGBLEN:
964 *(u_int *)addr = d->bd_bufsize;
965 break;
966
967 /*
968 * Set buffer length.
969 */
970 case BIOCSBLEN:
971#if BSD < 199103
972 error = EINVAL;
973#else
974 if (d->bd_bif != 0)
975 error = EINVAL;
976 else {
977 register u_int size = *(u_int *)addr;
978
9bccf70c
A
979 if (size > bpf_maxbufsize)
980 *(u_int *)addr = size = bpf_maxbufsize;
1c79356b
A
981 else if (size < BPF_MINBUFSIZE)
982 *(u_int *)addr = size = BPF_MINBUFSIZE;
983 d->bd_bufsize = size;
984 }
985#endif
986 break;
987
988 /*
989 * Set link layer read filter.
990 */
991 case BIOCSETF:
992 error = bpf_setf(d, (struct bpf_program *)addr);
993 break;
994
995 /*
996 * Flush read packet buffer.
997 */
998 case BIOCFLUSH:
999 s = splimp();
1000 reset_d(d);
1001 splx(s);
1002 break;
1003
1004 /*
1005 * Put interface into promiscuous mode.
1006 */
1007 case BIOCPROMISC:
1008 if (d->bd_bif == 0) {
1009 /*
1010 * No interface attached yet.
1011 */
1012 error = EINVAL;
1013 break;
1014 }
1015 s = splimp();
1016 if (d->bd_promisc == 0) {
1017 error = ifpromisc(d->bd_bif->bif_ifp, 1);
1018 if (error == 0)
1019 d->bd_promisc = 1;
1020 }
1021 splx(s);
1022 break;
1023
1024 /*
1025 * Get device parameters.
1026 */
1027 case BIOCGDLT:
1028 if (d->bd_bif == 0)
1029 error = EINVAL;
1030 else
1031 *(u_int *)addr = d->bd_bif->bif_dlt;
1032 break;
1033
1034 /*
9bccf70c 1035 * Get interface name.
1c79356b
A
1036 */
1037 case BIOCGETIF:
1038 if (d->bd_bif == 0)
1039 error = EINVAL;
9bccf70c
A
1040 else {
1041 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1042 struct ifreq *const ifr = (struct ifreq *)addr;
1043
1044 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
1045 "%s%d", ifp->if_name, ifp->if_unit);
1046 }
1c79356b
A
1047 break;
1048
1049 /*
1050 * Set interface.
1051 */
1052 case BIOCSETIF:
1053 error = bpf_setif(d, (struct ifreq *)addr);
1054 break;
1055
1056 /*
1057 * Set read timeout.
1058 */
1059 case BIOCSRTIMEOUT:
1060 {
1061 struct timeval *tv = (struct timeval *)addr;
1062
1063 /*
1064 * Subtract 1 tick from tvtohz() since this isn't
1065 * a one-shot timer.
1066 */
1067 if ((error = itimerfix(tv)) == 0)
1068 d->bd_rtout = tvtohz(tv) - 1;
1069 break;
1070 }
1071
1072 /*
1073 * Get read timeout.
1074 */
1075 case BIOCGRTIMEOUT:
1076 {
1077 struct timeval *tv = (struct timeval *)addr;
1078
1079 tv->tv_sec = d->bd_rtout / hz;
1080 tv->tv_usec = (d->bd_rtout % hz) * tick;
1081 break;
1082 }
1083
1084 /*
1085 * Get packet stats.
1086 */
1087 case BIOCGSTATS:
1088 {
1089 struct bpf_stat *bs = (struct bpf_stat *)addr;
1090
1091 bs->bs_recv = d->bd_rcount;
1092 bs->bs_drop = d->bd_dcount;
1093 break;
1094 }
1095
1096 /*
1097 * Set immediate mode.
1098 */
1099 case BIOCIMMEDIATE:
1100 d->bd_immediate = *(u_int *)addr;
1101 break;
1102
1103 case BIOCVERSION:
1104 {
1105 struct bpf_version *bv = (struct bpf_version *)addr;
1106
1107 bv->bv_major = BPF_MAJOR_VERSION;
1108 bv->bv_minor = BPF_MINOR_VERSION;
1109 break;
1110 }
1111
9bccf70c
A
1112 /*
1113 * Get "header already complete" flag
1114 */
1115 case BIOCGHDRCMPLT:
1116 *(u_int *)addr = d->bd_hdrcmplt;
1117 break;
1118
1119 /*
1120 * Set "header already complete" flag
1121 */
1122 case BIOCSHDRCMPLT:
1123 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1124 break;
1125
1126 /*
1127 * Get "see sent packets" flag
1128 */
1129 case BIOCGSEESENT:
1130 *(u_int *)addr = d->bd_seesent;
1131 break;
1132
1133 /*
1134 * Set "see sent packets" flag
1135 */
1136 case BIOCSSEESENT:
1137 d->bd_seesent = *(u_int *)addr;
1138 break;
1139
1c79356b
A
1140 case FIONBIO: /* Non-blocking I/O */
1141 break;
1142
1143 case FIOASYNC: /* Send signal on receive packets */
1144 d->bd_async = *(int *)addr;
1145 break;
9bccf70c 1146#ifndef __APPLE__
1c79356b
A
1147 case FIOSETOWN:
1148 error = fsetown(*(int *)addr, &d->bd_sigio);
1149 break;
1150
1151 case FIOGETOWN:
1152 *(int *)addr = fgetown(d->bd_sigio);
1153 break;
1154
1155 /* This is deprecated, FIOSETOWN should be used instead. */
1156 case TIOCSPGRP:
1157 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1158 break;
1159
1160 /* This is deprecated, FIOGETOWN should be used instead. */
1161 case TIOCGPGRP:
1162 *(int *)addr = -fgetown(d->bd_sigio);
1163 break;
1164#endif
1165 case BIOCSRSIG: /* Set receive signal */
1166 {
1167 u_int sig;
1168
1169 sig = *(u_int *)addr;
1170
1171 if (sig >= NSIG)
1172 error = EINVAL;
1173 else
1174 d->bd_sig = sig;
1175 break;
1176 }
1177 case BIOCGRSIG:
1178 *(u_int *)addr = d->bd_sig;
1179 break;
1180 }
1181 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1182 return (error);
1183}
1184
1185/*
1186 * Set d's packet filter program to fp. If this file already has a filter,
1187 * free it and replace it. Returns EINVAL for bogus requests.
1188 */
1189static int
1190bpf_setf(d, fp)
1191 struct bpf_d *d;
1192 struct bpf_program *fp;
1193{
1194 struct bpf_insn *fcode, *old;
1195 u_int flen, size;
1196 int s;
1197
1198 old = d->bd_filter;
1199 if (fp->bf_insns == 0) {
1200 if (fp->bf_len != 0)
1201 return (EINVAL);
1202 s = splimp();
1203 d->bd_filter = 0;
1204 reset_d(d);
1205 splx(s);
1206 if (old != 0)
1207 FREE((caddr_t)old, M_DEVBUF);
1208 return (0);
1209 }
1210 flen = fp->bf_len;
1211 if (flen > BPF_MAXINSNS)
1212 return (EINVAL);
1213
1214 size = flen * sizeof(*fp->bf_insns);
1215 fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
9bccf70c 1216#ifdef __APPLE__
0b4e3aa0
A
1217 if (fcode == NULL)
1218 return (ENOBUFS);
9bccf70c 1219#endif
1c79356b
A
1220 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1221 bpf_validate(fcode, (int)flen)) {
1222 s = splimp();
1223 d->bd_filter = fcode;
1224 reset_d(d);
1225 splx(s);
1226 if (old != 0)
1227 FREE((caddr_t)old, M_DEVBUF);
1228
1229 return (0);
1230 }
1231 FREE((caddr_t)fcode, M_DEVBUF);
1232 return (EINVAL);
1233}
1234
1235/*
1236 * Detach a file from its current interface (if attached at all) and attach
1237 * to the interface indicated by the name stored in ifr.
1238 * Return an errno or 0.
1239 */
1240static int
1241bpf_setif(d, ifr)
1242 struct bpf_d *d;
1243 struct ifreq *ifr;
1244{
1245 struct bpf_if *bp;
1246 int s, error;
1247 struct ifnet *theywant;
1248
1249 theywant = ifunit(ifr->ifr_name);
1250 if (theywant == 0)
1251 return ENXIO;
1252
1253 /*
1254 * Look through attached interfaces for the named one.
1255 */
1256 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1257 struct ifnet *ifp = bp->bif_ifp;
1258
1259 if (ifp == 0 || ifp != theywant)
1260 continue;
1261 /*
1262 * We found the requested interface.
1263 * If it's not up, return an error.
1264 * Allocate the packet buffers if we need to.
1265 * If we're already attached to requested interface,
1266 * just flush the buffer.
1267 */
1268 if ((ifp->if_flags & IFF_UP) == 0)
1269 return (ENETDOWN);
1270
1271 if (d->bd_sbuf == 0) {
1272 error = bpf_allocbufs(d);
1273 if (error != 0)
1274 return (error);
1275 }
1276 s = splimp();
1277 if (bp != d->bd_bif) {
1278 if (d->bd_bif)
1279 /*
1280 * Detach if attached to something else.
1281 */
1282 bpf_detachd(d);
1283
1284 bpf_attachd(d, bp);
1285 }
1286 reset_d(d);
1287 splx(s);
1288 return (0);
1289 }
1290 /* Not found. */
1291 return (ENXIO);
1292}
1293
1c79356b
A
1294/*
1295 * Support for select() and poll() system calls
1296 *
1297 * Return true iff the specific operation will not block indefinitely.
1298 * Otherwise, return false but make a note that a selwakeup() must be done.
1299 */
1300int
0b4e3aa0 1301bpfpoll(dev, events, wql, p)
1c79356b
A
1302 register dev_t dev;
1303 int events;
0b4e3aa0 1304 void * wql;
1c79356b
A
1305 struct proc *p;
1306{
1307 register struct bpf_d *d;
1308 register int s;
1309 int revents = 0;
1310
55e303ae
A
1311 d = bpf_dtab[minor(dev)];
1312
1c79356b
A
1313 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1314 /*
1315 * An imitation of the FIONREAD ioctl code.
1316 */
9bccf70c
A
1317 if (d->bd_bif == NULL) {
1318 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1319 return (ENXIO);
1320 }
1321
1c79356b 1322 s = splimp();
9bccf70c 1323 if (events & (POLLIN | POLLRDNORM)) {
1c79356b
A
1324 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1325 revents |= events & (POLLIN | POLLRDNORM);
1326 else
0b4e3aa0 1327 selrecord(p, &d->bd_sel, wql);
9bccf70c 1328 }
1c79356b
A
1329 splx(s);
1330 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1331 return (revents);
1332}
1333
1334/*
1335 * Incoming linkage from device drivers. Process the packet pkt, of length
1336 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1337 * by each process' filter, and if accepted, stashed into the corresponding
1338 * buffer.
1339 */
1340void
1341bpf_tap(ifp, pkt, pktlen)
1342 struct ifnet *ifp;
1343 register u_char *pkt;
1344 register u_int pktlen;
1345{
1346 struct bpf_if *bp;
1347 register struct bpf_d *d;
1348 register u_int slen;
1349 /*
1350 * Note that the ipl does not have to be raised at this point.
1351 * The only problem that could arise here is that if two different
1352 * interfaces shared any data. This is not the case.
1353 */
1354 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c
A
1355 bp = ifp->if_bpf;
1356#ifdef __APPLE__
1357 if (bp) {
1358#endif
1359 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1360 ++d->bd_rcount;
1361 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1362 if (slen != 0)
1363 catchpacket(d, pkt, pktlen, slen, bcopy);
1364 }
1365#ifdef __APPLE__
1c79356b
A
1366 }
1367 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c 1368#endif
1c79356b
A
1369}
1370
1371/*
1372 * Copy data from an mbuf chain into a buffer. This code is derived
1373 * from m_copydata in sys/uipc_mbuf.c.
1374 */
1375static void
1376bpf_mcopy(src_arg, dst_arg, len)
1377 const void *src_arg;
1378 void *dst_arg;
1379 register size_t len;
1380{
1381 register const struct mbuf *m;
1382 register u_int count;
1383 u_char *dst;
1384
1385 m = src_arg;
1386 dst = dst_arg;
1387 while (len > 0) {
1388 if (m == 0)
1389 panic("bpf_mcopy");
1390 count = min(m->m_len, len);
55e303ae 1391 bcopy(mtod((struct mbuf *)m, void *), dst, count);
1c79356b
A
1392 m = m->m_next;
1393 dst += count;
1394 len -= count;
1395 }
1396}
1397
1398/*
1399 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1400 */
1401void
1402bpf_mtap(ifp, m)
1403 struct ifnet *ifp;
1404 struct mbuf *m;
1405{
1406 struct bpf_if *bp = ifp->if_bpf;
1407 struct bpf_d *d;
1408 u_int pktlen, slen;
1409 struct mbuf *m0;
1410
1411 pktlen = 0;
1412 for (m0 = m; m0 != 0; m0 = m0->m_next)
1413 pktlen += m0->m_len;
1414
1415 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
9bccf70c
A
1416 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1417 continue;
1c79356b
A
1418 ++d->bd_rcount;
1419 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1420 if (slen != 0)
1421 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1422 }
1423}
1424
1425/*
1426 * Move the packet data from interface memory (pkt) into the
1427 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1428 * otherwise 0. "copy" is the routine called to do the actual data
1429 * transfer. bcopy is passed in to copy contiguous chunks, while
1430 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1431 * pkt is really an mbuf.
1432 */
1433static void
1434catchpacket(d, pkt, pktlen, snaplen, cpfn)
1435 register struct bpf_d *d;
1436 register u_char *pkt;
1437 register u_int pktlen, snaplen;
1438 register void (*cpfn) __P((const void *, void *, size_t));
1439{
1440 register struct bpf_hdr *hp;
1441 register int totlen, curlen;
1442 register int hdrlen = d->bd_bif->bif_hdrlen;
1443 /*
1444 * Figure out how many bytes to move. If the packet is
1445 * greater or equal to the snapshot length, transfer that
1446 * much. Otherwise, transfer the whole packet (unless
1447 * we hit the buffer size limit).
1448 */
1449 totlen = hdrlen + min(snaplen, pktlen);
1450 if (totlen > d->bd_bufsize)
1451 totlen = d->bd_bufsize;
1452
1453 /*
1454 * Round up the end of the previous packet to the next longword.
1455 */
1456 curlen = BPF_WORDALIGN(d->bd_slen);
1457 if (curlen + totlen > d->bd_bufsize) {
1458 /*
1459 * This packet will overflow the storage buffer.
1460 * Rotate the buffers if we can, then wakeup any
1461 * pending reads.
1462 */
1463 if (d->bd_fbuf == 0) {
1464 /*
1465 * We haven't completed the previous read yet,
1466 * so drop the packet.
1467 */
1468 ++d->bd_dcount;
1469 return;
1470 }
1471 ROTATE_BUFFERS(d);
1472 bpf_wakeup(d);
1473 curlen = 0;
1474 }
1475 else if (d->bd_immediate)
1476 /*
1477 * Immediate mode is set. A packet arrived so any
1478 * reads should be woken up.
1479 */
1480 bpf_wakeup(d);
1481
1482 /*
1483 * Append the bpf header.
1484 */
1485 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1486#if BSD >= 199103
1487 microtime(&hp->bh_tstamp);
1488#elif defined(sun)
1489 uniqtime(&hp->bh_tstamp);
1490#else
1491 hp->bh_tstamp = time;
1492#endif
1493 hp->bh_datalen = pktlen;
1494 hp->bh_hdrlen = hdrlen;
1495 /*
1496 * Copy the packet data into the store buffer and update its length.
1497 */
1498 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1499 d->bd_slen = curlen + totlen;
1500}
1501
1502/*
1503 * Initialize all nonzero fields of a descriptor.
1504 */
1505static int
1506bpf_allocbufs(d)
1507 register struct bpf_d *d;
1508{
1509 d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1510 if (d->bd_fbuf == 0)
1511 return (ENOBUFS);
1512
1513 d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1514 if (d->bd_sbuf == 0) {
1515 FREE(d->bd_fbuf, M_DEVBUF);
1516 return (ENOBUFS);
1517 }
1518 d->bd_slen = 0;
1519 d->bd_hlen = 0;
1520 return (0);
1521}
1522
1523/*
1524 * Free buffers currently in use by a descriptor.
1525 * Called on close.
1526 */
1527static void
1528bpf_freed(d)
1529 register struct bpf_d *d;
1530{
1531 /*
1532 * We don't need to lock out interrupts since this descriptor has
1533 * been detached from its interface and it yet hasn't been marked
1534 * free.
1535 */
1536 if (d->bd_sbuf != 0) {
1537 FREE(d->bd_sbuf, M_DEVBUF);
1538 if (d->bd_hbuf != 0)
1539 FREE(d->bd_hbuf, M_DEVBUF);
1540 if (d->bd_fbuf != 0)
1541 FREE(d->bd_fbuf, M_DEVBUF);
1542 }
1543 if (d->bd_filter)
1544 FREE((caddr_t)d->bd_filter, M_DEVBUF);
1545
1546 D_MARKFREE(d);
1547}
1548
1549/*
1550 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1551 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1552 * size of the link header (variable length headers not yet supported).
1553 */
1554void
1555bpfattach(ifp, dlt, hdrlen)
1556 struct ifnet *ifp;
1557 u_int dlt, hdrlen;
1558{
1559 struct bpf_if *bp;
1560 int i;
0b4e3aa0 1561 bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT);
1c79356b
A
1562 if (bp == 0)
1563 panic("bpfattach");
1564
1565 bp->bif_dlist = 0;
1566 bp->bif_ifp = ifp;
1567 bp->bif_dlt = dlt;
1568
1569 bp->bif_next = bpf_iflist;
1570 bpf_iflist = bp;
1571
1572 bp->bif_ifp->if_bpf = 0;
1573
1574 /*
1575 * Compute the length of the bpf header. This is not necessarily
1576 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1577 * that the network layer header begins on a longword boundary (for
1578 * performance reasons and to alleviate alignment restrictions).
1579 */
1580 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1581
55e303ae 1582#ifndef __APPLE__
1c79356b
A
1583 if (bootverbose)
1584 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1585#endif
1586}
1587
9bccf70c
A
1588/*
1589 * Detach bpf from an interface. This involves detaching each descriptor
1590 * associated with the interface, and leaving bd_bif NULL. Notify each
1591 * descriptor as it's detached so that any sleepers wake up and get
1592 * ENXIO.
1593 */
1594void
1595bpfdetach(ifp)
1596 struct ifnet *ifp;
1597{
1598 struct bpf_if *bp, *bp_prev;
1599 struct bpf_d *d;
1600 int s;
1601
1602 s = splimp();
1603
1604 /* Locate BPF interface information */
1605 bp_prev = NULL;
1606 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1607 if (ifp == bp->bif_ifp)
1608 break;
1609 bp_prev = bp;
1610 }
1611
1612#ifdef __APPLE__
1613 /* Check for no BPF interface information */
1614 if (bp == NULL) {
1615 return;
1616 }
1617#endif
1618
1619 /* Interface wasn't attached */
1620 if (bp->bif_ifp == NULL) {
1621 splx(s);
1622#ifndef __APPLE__
1623 printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1624 ifp->if_unit);
1625#endif
1626 return;
1627 }
1628
1629 while ((d = bp->bif_dlist) != NULL) {
1630 bpf_detachd(d);
1631 bpf_wakeup(d);
1632 }
1633
1634 if (bp_prev) {
1635 bp_prev->bif_next = bp->bif_next;
1636 } else {
1637 bpf_iflist = bp->bif_next;
1638 }
1639
1640 FREE(bp, M_DEVBUF);
1641
1642 splx(s);
1643}
1644
1c79356b
A
1645void
1646bpf_init(unused)
1647 void *unused;
1648{
9bccf70c 1649#ifdef __APPLE__
1c79356b 1650 int i;
9bccf70c 1651 int maj;
1c79356b
A
1652
1653 if (!bpf_devsw_installed ) {
9bccf70c
A
1654 bpf_devsw_installed = 1;
1655 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
1656 if (maj == -1) {
1657 printf("bpf_init: failed to allocate a major number!\n");
1658 nbpfilter = 0;
1659 return;
1660 }
55e303ae
A
1661 if (bpf_dtab_grow(NBPFILTER) == 0) {
1662 printf("bpf_init: failed to allocate bpf_dtab\n");
1663 return;
9bccf70c 1664 }
55e303ae
A
1665 for (i = 0 ; i < NBPFILTER; i++)
1666 bpf_make_dev_t(maj);
9bccf70c
A
1667 }
1668#else
1669 cdevsw_add(&bpf_cdevsw);
1670#endif
1c79356b
A
1671}
1672
9bccf70c 1673#ifndef __APPLE__
1c79356b 1674SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1c79356b 1675#endif
9bccf70c
A
1676
1677#else /* !BPF */
1678#ifndef __APPLE__
1679/*
1680 * NOP stubs to allow bpf-using drivers to load and function.
1681 *
1682 * A 'better' implementation would allow the core bpf functionality
1683 * to be loaded at runtime.
1684 */
1685
1686void
1687bpf_tap(ifp, pkt, pktlen)
1688 struct ifnet *ifp;
1689 register u_char *pkt;
1690 register u_int pktlen;
1691{
1692}
1693
1694void
1695bpf_mtap(ifp, m)
1696 struct ifnet *ifp;
1697 struct mbuf *m;
1698{
1699}
1700
1701void
1702bpfattach(ifp, dlt, hdrlen)
1703 struct ifnet *ifp;
1704 u_int dlt, hdrlen;
1705{
1706}
1707
1708void
1709bpfdetach(ifp)
1710 struct ifnet *ifp;
1711{
1712}
1713
1714u_int
1715bpf_filter(pc, p, wirelen, buflen)
1716 register const struct bpf_insn *pc;
1717 register u_char *p;
1718 u_int wirelen;
1719 register u_int buflen;
1720{
1721 return -1; /* "no filter" behaviour */
1722}
1723#endif /* !defined(__APPLE__) */
1724#endif /* NBPFILTER > 0 */