]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/bpf.c
xnu-344.34.tar.gz
[apple/xnu.git] / bsd / net / bpf.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1990, 1991, 1993
24 * The Regents of the University of California. All rights reserved.
25 *
26 * This code is derived from the Stanford/CMU enet packet filter,
27 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
28 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
29 * Berkeley Laboratory.
30 *
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
33 * are met:
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
60 *
9bccf70c 61 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
1c79356b
A
62 */
63
9bccf70c 64#include "bpf.h"
1c79356b
A
65
66#ifndef __GNUC__
67#define inline
68#else
69#define inline __inline
70#endif
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/conf.h>
75#include <sys/malloc.h>
76#include <sys/mbuf.h>
77#include <sys/time.h>
78#include <sys/proc.h>
1c79356b
A
79#include <sys/signalvar.h>
80#include <sys/filio.h>
81#include <sys/sockio.h>
82#include <sys/ttycom.h>
83#include <sys/filedesc.h>
84
9bccf70c
A
85#if defined(sparc) && BSD < 199103
86#include <sys/stream.h>
87#endif
88#include <sys/poll.h>
89
1c79356b
A
90#include <sys/socket.h>
91#include <sys/vnode.h>
92
93#include <net/if.h>
94#include <net/bpf.h>
95#include <net/bpfdesc.h>
96
97#include <netinet/in.h>
98#include <netinet/if_ether.h>
99#include <sys/kernel.h>
100#include <sys/sysctl.h>
101
102
103#include <miscfs/devfs/devfs.h>
104#include <net/dlil.h>
105
9bccf70c
A
106#if NBPFILTER > 0
107
1c79356b
A
108/*
109 * Older BSDs don't have kernel malloc.
110 */
111#if BSD < 199103
112extern bcopy();
113static caddr_t bpf_alloc();
9bccf70c 114#include <net/bpf_compat.h>
1c79356b
A
115#define BPF_BUFSIZE (MCLBYTES-8)
116#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
117#else
118#define BPF_BUFSIZE 4096
119#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
120#endif
121
122#define PRINET 26 /* interruptible */
123
124/*
125 * The default read buffer size is patchable.
126 */
127static int bpf_bufsize = BPF_BUFSIZE;
1c79356b
A
128SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
129 &bpf_bufsize, 0, "");
9bccf70c
A
130static int bpf_maxbufsize = BPF_MAXBUFSIZE;
131SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
132 &bpf_maxbufsize, 0, "");
1c79356b
A
133
134/*
135 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
136 * bpf_dtab holds the descriptors, indexed by minor device #
137 */
138static struct bpf_if *bpf_iflist;
9bccf70c
A
139#ifdef __APPLE__
140/*
141 * BSD now stores the bpf_d in the dev_t which is a struct
142 * on their system. Our dev_t is an int, so we still store
143 * the bpf_d in a separate table indexed by minor device #.
144 */
1c79356b
A
145static struct bpf_d bpf_dtab[NBPFILTER];
146static int bpf_dtab_init;
147static int nbpfilter = NBPFILTER;
9bccf70c 148#endif
1c79356b
A
149
150static int bpf_allocbufs __P((struct bpf_d *));
151static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
152static void bpf_detachd __P((struct bpf_d *d));
153static void bpf_freed __P((struct bpf_d *));
1c79356b
A
154static void bpf_mcopy __P((const void *, void *, size_t));
155static int bpf_movein __P((struct uio *, int,
156 struct mbuf **, struct sockaddr *, int *));
157static int bpf_setif __P((struct bpf_d *, struct ifreq *));
158static inline void
159 bpf_wakeup __P((struct bpf_d *));
160static void catchpacket __P((struct bpf_d *, u_char *, u_int,
161 u_int, void (*)(const void *, void *, size_t)));
162static void reset_d __P((struct bpf_d *));
163static int bpf_setf __P((struct bpf_d *, struct bpf_program *));
164
9bccf70c
A
165/*
166 * Darwin differs from BSD here, the following are static
167 * on BSD and not static on Darwin.
168 */
1c79356b
A
169 d_open_t bpfopen;
170 d_close_t bpfclose;
171 d_read_t bpfread;
172 d_write_t bpfwrite;
173 d_ioctl_t bpfioctl;
9bccf70c 174 select_fcn_t bpfpoll;
1c79356b 175
9bccf70c 176#ifdef __APPLE__
1c79356b
A
177void bpf_mtap(struct ifnet *, struct mbuf *);
178
179int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(),
180 bpfpoll();
9bccf70c 181#endif
1c79356b 182
9bccf70c
A
183/* Darwin's cdevsw struct differs slightly from BSDs */
184#define CDEV_MAJOR 23
1c79356b 185static struct cdevsw bpf_cdevsw = {
9bccf70c
A
186 /* open */ bpfopen,
187 /* close */ bpfclose,
188 /* read */ bpfread,
189 /* write */ bpfwrite,
190 /* ioctl */ bpfioctl,
191 /* stop */ nulldev,
192 /* reset */ nulldev,
193 /* tty */ NULL,
194 /* select */ bpfpoll,
195 /* mmap */ eno_mmap,
196 /* strategy*/ eno_strat,
197 /* getc */ eno_getc,
198 /* putc */ eno_putc,
199 /* type */ 0
1c79356b
A
200};
201
9bccf70c 202
1c79356b
A
203static int
204bpf_movein(uio, linktype, mp, sockp, datlen)
205 register struct uio *uio;
206 int linktype, *datlen;
207 register struct mbuf **mp;
208 register struct sockaddr *sockp;
209{
210 struct mbuf *m;
211 int error;
212 int len;
213 int hlen;
214
215 /*
216 * Build a sockaddr based on the data link layer type.
217 * We do this at this level because the ethernet header
218 * is copied directly into the data field of the sockaddr.
219 * In the case of SLIP, there is no header and the packet
220 * is forwarded as is.
221 * Also, we are careful to leave room at the front of the mbuf
222 * for the link level header.
223 */
224 switch (linktype) {
225
226 case DLT_SLIP:
227 sockp->sa_family = AF_INET;
228 hlen = 0;
229 break;
230
231 case DLT_EN10MB:
232 sockp->sa_family = AF_UNSPEC;
233 /* XXX Would MAXLINKHDR be better? */
234 hlen = sizeof(struct ether_header);
235 break;
236
237 case DLT_FDDI:
238#if defined(__FreeBSD__) || defined(__bsdi__)
239 sockp->sa_family = AF_IMPLINK;
240 hlen = 0;
241#else
242 sockp->sa_family = AF_UNSPEC;
243 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
244 hlen = 24;
245#endif
246 break;
247
248 case DLT_RAW:
249 case DLT_NULL:
250 sockp->sa_family = AF_UNSPEC;
251 hlen = 0;
252 break;
253
254#ifdef __FreeBSD__
255 case DLT_ATM_RFC1483:
256 /*
257 * en atm driver requires 4-byte atm pseudo header.
258 * though it isn't standard, vpi:vci needs to be
259 * specified anyway.
260 */
261 sockp->sa_family = AF_UNSPEC;
262 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
263 break;
264#endif
9bccf70c
A
265 case DLT_PPP:
266 sockp->sa_family = AF_UNSPEC;
267 hlen = 4; /* This should match PPP_HDRLEN */
268 break;
1c79356b
A
269
270 default:
271 return (EIO);
272 }
273
274 len = uio->uio_resid;
275 *datlen = len - hlen;
276 if ((unsigned)len > MCLBYTES)
277 return (EIO);
278
279 MGETHDR(m, M_WAIT, MT_DATA);
280 if (m == 0)
281 return (ENOBUFS);
282 if (len > MHLEN) {
283#if BSD >= 199103
284 MCLGET(m, M_WAIT);
285 if ((m->m_flags & M_EXT) == 0) {
286#else
287 MCLGET(m);
288 if (m->m_len != MCLBYTES) {
289#endif
290 error = ENOBUFS;
291 goto bad;
292 }
293 }
294 m->m_pkthdr.len = m->m_len = len;
295 m->m_pkthdr.rcvif = NULL;
296 *mp = m;
297 /*
298 * Make room for link header.
299 */
300 if (hlen != 0) {
301 m->m_pkthdr.len -= hlen;
302 m->m_len -= hlen;
303#if BSD >= 199103
304 m->m_data += hlen; /* XXX */
305#else
306 m->m_off += hlen;
307#endif
308 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
309 if (error)
310 goto bad;
311 }
312 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
313 if (!error)
314 return (0);
315 bad:
316 m_freem(m);
317 return (error);
318}
319
9bccf70c
A
320#ifdef __APPLE__
321/* Callback registered with Ethernet driver. */
1c79356b
A
322int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
323{
324 boolean_t funnel_state;
325
326 funnel_state = thread_funnel_set(network_flock, TRUE);
327
328 /*
329 * Do nothing if the BPF tap has been turned off.
330 * This is to protect from a potential race where this
331 * call blocks on the funnel lock. And in the meantime
332 * BPF is turned off, which will clear if_bpf.
333 */
334 if (ifp->if_bpf)
335 bpf_mtap(ifp, m);
336
337 thread_funnel_set(network_flock, funnel_state);
338 return 0;
339}
9bccf70c 340#endif
1c79356b
A
341
342/*
343 * Attach file to the bpf interface, i.e. make d listen on bp.
344 * Must be called at splimp.
345 */
346static void
347bpf_attachd(d, bp)
348 struct bpf_d *d;
349 struct bpf_if *bp;
350{
1c79356b
A
351 /*
352 * Point d at bp, and add d to the interface's list of listeners.
353 * Finally, point the driver's bpf cookie at the interface so
354 * it will divert packets to bpf.
355 */
356 d->bd_bif = bp;
357 d->bd_next = bp->bif_dlist;
358 bp->bif_dlist = d;
359
360 bp->bif_ifp->if_bpf = bp;
1c79356b 361
9bccf70c
A
362#ifdef __APPLE__
363 if (bp->bif_ifp->if_set_bpf_tap)
364 (*bp->bif_ifp->if_set_bpf_tap)(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
365#endif
1c79356b
A
366}
367
368/*
369 * Detach a file from its interface.
370 */
371static void
372bpf_detachd(d)
373 struct bpf_d *d;
374{
375 struct bpf_d **p;
376 struct bpf_if *bp;
9bccf70c 377#ifdef __APPLE__
1c79356b
A
378 struct ifnet *ifp;
379
380 ifp = d->bd_bif->bif_ifp;
9bccf70c
A
381
382#endif
1c79356b
A
383
384 bp = d->bd_bif;
385 /*
386 * Check if this descriptor had requested promiscuous mode.
387 * If so, turn it off.
388 */
389 if (d->bd_promisc) {
390 d->bd_promisc = 0;
391 if (ifpromisc(bp->bif_ifp, 0))
392 /*
393 * Something is really wrong if we were able to put
394 * the driver into promiscuous mode, but can't
395 * take it out.
9bccf70c 396 * Most likely the network interface is gone.
1c79356b 397 */
9bccf70c 398 printf("bpf: ifpromisc failed");
1c79356b
A
399 }
400 /* Remove d from the interface's descriptor list. */
401 p = &bp->bif_dlist;
402 while (*p != d) {
403 p = &(*p)->bd_next;
404 if (*p == 0)
405 panic("bpf_detachd: descriptor not in list");
406 }
407 *p = (*p)->bd_next;
9bccf70c 408 if (bp->bif_dlist == 0) {
1c79356b
A
409 /*
410 * Let the driver know that there are no more listeners.
411 */
9bccf70c
A
412 if (ifp->if_set_bpf_tap)
413 (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0);
1c79356b 414 d->bd_bif->bif_ifp->if_bpf = 0;
9bccf70c 415 }
1c79356b
A
416 d->bd_bif = 0;
417}
418
419
9bccf70c 420#ifdef __APPLE__
1c79356b
A
421/*
422 * Mark a descriptor free by making it point to itself.
423 * This is probably cheaper than marking with a constant since
424 * the address should be in a register anyway.
425 */
426#define D_ISFREE(d) ((d) == (d)->bd_next)
427#define D_MARKFREE(d) ((d)->bd_next = (d))
428#define D_MARKUSED(d) ((d)->bd_next = 0)
9bccf70c 429#endif
1c79356b
A
430/*
431 * Open ethernet device. Returns ENXIO for illegal minor device number,
432 * EBUSY if file is open by another process.
433 */
434/* ARGSUSED */
435 int
436bpfopen(dev, flags, fmt, p)
437 dev_t dev;
438 int flags;
439 int fmt;
440 struct proc *p;
441{
442 register struct bpf_d *d;
443
9bccf70c 444#ifdef __APPLE__
1c79356b
A
445 if (minor(dev) >= nbpfilter)
446 return (ENXIO);
447
448 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c
A
449
450 d = &bpf_dtab[minor(dev)];
451#else
452 if (p->p_prison)
453 return (EPERM);
454
455 d = dev->si_drv1;
456#endif
1c79356b 457 /*
9bccf70c 458 * Each minor can be opened by only one process. If the requested
1c79356b
A
459 * minor is in use, return EBUSY.
460 */
9bccf70c 461#ifdef __APPLE__
1c79356b
A
462 if (!D_ISFREE(d)) {
463 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
464 return (EBUSY);
465 }
9bccf70c 466
1c79356b
A
467 /* Mark "free" and do most initialization. */
468 bzero((char *)d, sizeof(*d));
9bccf70c
A
469#else
470 if (d)
471 return (EBUSY);
472 make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
473 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK);
474 bzero(d, sizeof(*d));
475 dev->si_drv1 = d;
476#endif
1c79356b
A
477 d->bd_bufsize = bpf_bufsize;
478 d->bd_sig = SIGIO;
9bccf70c 479 d->bd_seesent = 1;
1c79356b
A
480 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
481 return (0);
482}
483
484/*
485 * Close the descriptor by detaching it from its interface,
486 * deallocating its buffers, and marking it free.
487 */
488/* ARGSUSED */
489 int
490bpfclose(dev, flags, fmt, p)
491 dev_t dev;
492 int flags;
493 int fmt;
494 struct proc *p;
495{
496 register struct bpf_d *d;
497 register int s;
498
499 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
500
9bccf70c
A
501#ifndef __APPLE__
502 funsetown(d->bd_sigio);
503#endif
1c79356b 504 s = splimp();
9bccf70c 505#ifdef __APPLE__
1c79356b 506 d = &bpf_dtab[minor(dev)];
9bccf70c 507#endif
1c79356b
A
508 if (d->bd_bif)
509 bpf_detachd(d);
510 splx(s);
9bccf70c 511#ifdef __APPLE__
0b4e3aa0 512 selthreadclear(&d->bd_sel);
9bccf70c 513#endif
1c79356b
A
514 bpf_freed(d);
515 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
516 return (0);
517}
518
519/*
520 * Support for SunOS, which does not have tsleep.
521 */
522#if BSD < 199103
523static
524bpf_timeout(arg)
525 caddr_t arg;
526{
527 boolean_t funnel_state;
528 struct bpf_d *d = (struct bpf_d *)arg;
1c79356b
A
529 funnel_state = thread_funnel_set(network_flock, TRUE);
530 d->bd_timedout = 1;
531 wakeup(arg);
532 (void) thread_funnel_set(network_flock, FALSE);
533}
534
535#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
536
537int
538bpf_sleep(d)
539 register struct bpf_d *d;
540{
541 register int rto = d->bd_rtout;
542 register int st;
543
544 if (rto != 0) {
545 d->bd_timedout = 0;
546 timeout(bpf_timeout, (caddr_t)d, rto);
547 }
548 st = sleep((caddr_t)d, PRINET|PCATCH);
549 if (rto != 0) {
550 if (d->bd_timedout == 0)
551 untimeout(bpf_timeout, (caddr_t)d);
552 else if (st == 0)
553 return EWOULDBLOCK;
554 }
555 return (st != 0) ? EINTR : 0;
556}
557#else
558#define BPF_SLEEP tsleep
559#endif
560
561/*
562 * Rotate the packet buffers in descriptor d. Move the store buffer
563 * into the hold slot, and the free buffer into the store slot.
564 * Zero the length of the new store buffer.
565 */
566#define ROTATE_BUFFERS(d) \
567 (d)->bd_hbuf = (d)->bd_sbuf; \
568 (d)->bd_hlen = (d)->bd_slen; \
569 (d)->bd_sbuf = (d)->bd_fbuf; \
570 (d)->bd_slen = 0; \
571 (d)->bd_fbuf = 0;
572/*
573 * bpfread - read next chunk of packets from buffers
574 */
575 int
576bpfread(dev, uio, ioflag)
577 dev_t dev;
578 struct uio *uio;
579 int ioflag;
580{
581 register struct bpf_d *d;
582 int error;
583 int s;
584
1c79356b
A
585 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
586 d = &bpf_dtab[minor(dev)];
587
588 /*
589 * Restrict application to use a buffer the same size as
590 * as kernel buffers.
591 */
592 if (uio->uio_resid != d->bd_bufsize) {
9bccf70c 593 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1c79356b
A
594 return (EINVAL);
595 }
596
597 s = splimp();
598 /*
599 * If the hold buffer is empty, then do a timed sleep, which
600 * ends when the timeout expires or when enough packets
601 * have arrived to fill the store buffer.
602 */
603 while (d->bd_hbuf == 0) {
604 if (d->bd_immediate && d->bd_slen != 0) {
605 /*
606 * A packet(s) either arrived since the previous
607 * read or arrived while we were asleep.
608 * Rotate the buffers and return what's here.
609 */
610 ROTATE_BUFFERS(d);
611 break;
612 }
9bccf70c
A
613
614 /*
615 * No data is available, check to see if the bpf device
616 * is still pointed at a real interface. If not, return
617 * ENXIO so that the userland process knows to rebind
618 * it before using it again.
619 */
620 if (d->bd_bif == NULL) {
621 splx(s);
622 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
623 return (ENXIO);
624 }
625
1c79356b
A
626 if (ioflag & IO_NDELAY)
627 error = EWOULDBLOCK;
628 else
629 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
630 d->bd_rtout);
631 if (error == EINTR || error == ERESTART) {
632 splx(s);
633 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
634 return (error);
635 }
636 if (error == EWOULDBLOCK) {
637 /*
638 * On a timeout, return what's in the buffer,
639 * which may be nothing. If there is something
640 * in the store buffer, we can rotate the buffers.
641 */
642 if (d->bd_hbuf)
643 /*
644 * We filled up the buffer in between
645 * getting the timeout and arriving
646 * here, so we don't need to rotate.
647 */
648 break;
649
650 if (d->bd_slen == 0) {
651 splx(s);
652 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
653 return (0);
654 }
655 ROTATE_BUFFERS(d);
656 break;
657 }
658 }
659 /*
660 * At this point, we know we have something in the hold slot.
661 */
662 splx(s);
663
664 /*
665 * Move data from hold buffer into user space.
666 * We know the entire buffer is transferred since
667 * we checked above that the read buffer is bpf_bufsize bytes.
668 */
669 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
670
671 s = splimp();
672 d->bd_fbuf = d->bd_hbuf;
673 d->bd_hbuf = 0;
674 d->bd_hlen = 0;
675 splx(s);
676 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
677 return (error);
678}
679
680
681/*
682 * If there are processes sleeping on this descriptor, wake them up.
683 */
684static inline void
685bpf_wakeup(d)
686 register struct bpf_d *d;
687{
688 wakeup((caddr_t)d);
689 if (d->bd_async && d->bd_sig && d->bd_sigio)
690 pgsigio(d->bd_sigio, d->bd_sig, 0);
691
692#if BSD >= 199103
1c79356b 693 selwakeup(&d->bd_sel);
9bccf70c
A
694#ifndef __APPLE__
695 /* XXX */
696 d->bd_sel.si_pid = 0;
697#endif
1c79356b
A
698#else
699 if (d->bd_selproc) {
1c79356b 700 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
1c79356b
A
701 d->bd_selcoll = 0;
702 d->bd_selproc = 0;
703 }
704#endif
705}
706
707 int
708bpfwrite(dev, uio, ioflag)
709 dev_t dev;
710 struct uio *uio;
711 int ioflag;
712{
713 register struct bpf_d *d;
1c79356b
A
714 struct ifnet *ifp;
715 struct mbuf *m;
716 int error, s;
717 static struct sockaddr dst;
718 int datlen;
719
1c79356b
A
720 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
721 d = &bpf_dtab[minor(dev)];
9bccf70c 722
1c79356b
A
723 if (d->bd_bif == 0) {
724 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
725 return (ENXIO);
726 }
727
728 ifp = d->bd_bif->bif_ifp;
729
730 if (uio->uio_resid == 0) {
731 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
732 return (0);
733 }
734
735 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
736 if (error) {
737 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
738 return (error);
739 }
740
741 if (datlen > ifp->if_mtu) {
742 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
743 return (EMSGSIZE);
744 }
745
9bccf70c
A
746 if (d->bd_hdrcmplt)
747 dst.sa_family = pseudo_AF_HDRCMPLT;
1c79356b 748
9bccf70c 749 s = splnet();
1c79356b 750
9bccf70c
A
751 error = dlil_output(ifp->if_data.default_proto, m,
752 (caddr_t) 0, &dst, 0);
1c79356b
A
753
754 splx(s);
755 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1c79356b
A
756 /*
757 * The driver frees the mbuf.
758 */
759 return (error);
760}
761
762/*
763 * Reset a descriptor by flushing its packet buffer and clearing the
764 * receive and drop counts. Should be called at splimp.
765 */
766static void
767reset_d(d)
768 struct bpf_d *d;
769{
770 if (d->bd_hbuf) {
771 /* Free the hold buffer. */
772 d->bd_fbuf = d->bd_hbuf;
773 d->bd_hbuf = 0;
774 }
775 d->bd_slen = 0;
776 d->bd_hlen = 0;
777 d->bd_rcount = 0;
778 d->bd_dcount = 0;
779}
780
781/*
782 * FIONREAD Check for read packet available.
783 * SIOCGIFADDR Get interface address - convenient hook to driver.
784 * BIOCGBLEN Get buffer len [for read()].
785 * BIOCSETF Set ethernet read filter.
786 * BIOCFLUSH Flush read packet buffer.
787 * BIOCPROMISC Put interface into promiscuous mode.
788 * BIOCGDLT Get link layer type.
789 * BIOCGETIF Get interface name.
790 * BIOCSETIF Set interface.
791 * BIOCSRTIMEOUT Set read timeout.
792 * BIOCGRTIMEOUT Get read timeout.
793 * BIOCGSTATS Get packet stats.
794 * BIOCIMMEDIATE Set immediate mode.
795 * BIOCVERSION Get filter language version.
9bccf70c
A
796 * BIOCGHDRCMPLT Get "header already complete" flag
797 * BIOCSHDRCMPLT Set "header already complete" flag
798 * BIOCGSEESENT Get "see packets sent" flag
799 * BIOCSSEESENT Set "see packets sent" flag
1c79356b
A
800 */
801/* ARGSUSED */
9bccf70c 802int
1c79356b
A
803bpfioctl(dev, cmd, addr, flags, p)
804 dev_t dev;
805 u_long cmd;
806 caddr_t addr;
807 int flags;
808 struct proc *p;
809{
810 register struct bpf_d *d;
811 int s, error = 0;
812
813
814 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
815 d = &bpf_dtab[minor(dev)];
816
817 switch (cmd) {
818
819 default:
820 error = EINVAL;
821 break;
822
823 /*
824 * Check for read packet available.
825 */
826 case FIONREAD:
827 {
828 int n;
829
830 s = splimp();
831 n = d->bd_slen;
832 if (d->bd_hbuf)
833 n += d->bd_hlen;
834 splx(s);
835
836 *(int *)addr = n;
837 break;
838 }
839
840 case SIOCGIFADDR:
841 {
842 struct ifnet *ifp;
843
844 if (d->bd_bif == 0)
845 error = EINVAL;
846 else {
847 ifp = d->bd_bif->bif_ifp;
848 error = (*ifp->if_ioctl)(ifp, cmd, addr);
849 }
850 break;
851 }
852
853 /*
854 * Get buffer len [for read()].
855 */
856 case BIOCGBLEN:
857 *(u_int *)addr = d->bd_bufsize;
858 break;
859
860 /*
861 * Set buffer length.
862 */
863 case BIOCSBLEN:
864#if BSD < 199103
865 error = EINVAL;
866#else
867 if (d->bd_bif != 0)
868 error = EINVAL;
869 else {
870 register u_int size = *(u_int *)addr;
871
9bccf70c
A
872 if (size > bpf_maxbufsize)
873 *(u_int *)addr = size = bpf_maxbufsize;
1c79356b
A
874 else if (size < BPF_MINBUFSIZE)
875 *(u_int *)addr = size = BPF_MINBUFSIZE;
876 d->bd_bufsize = size;
877 }
878#endif
879 break;
880
881 /*
882 * Set link layer read filter.
883 */
884 case BIOCSETF:
885 error = bpf_setf(d, (struct bpf_program *)addr);
886 break;
887
888 /*
889 * Flush read packet buffer.
890 */
891 case BIOCFLUSH:
892 s = splimp();
893 reset_d(d);
894 splx(s);
895 break;
896
897 /*
898 * Put interface into promiscuous mode.
899 */
900 case BIOCPROMISC:
901 if (d->bd_bif == 0) {
902 /*
903 * No interface attached yet.
904 */
905 error = EINVAL;
906 break;
907 }
908 s = splimp();
909 if (d->bd_promisc == 0) {
910 error = ifpromisc(d->bd_bif->bif_ifp, 1);
911 if (error == 0)
912 d->bd_promisc = 1;
913 }
914 splx(s);
915 break;
916
917 /*
918 * Get device parameters.
919 */
920 case BIOCGDLT:
921 if (d->bd_bif == 0)
922 error = EINVAL;
923 else
924 *(u_int *)addr = d->bd_bif->bif_dlt;
925 break;
926
927 /*
9bccf70c 928 * Get interface name.
1c79356b
A
929 */
930 case BIOCGETIF:
931 if (d->bd_bif == 0)
932 error = EINVAL;
9bccf70c
A
933 else {
934 struct ifnet *const ifp = d->bd_bif->bif_ifp;
935 struct ifreq *const ifr = (struct ifreq *)addr;
936
937 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
938 "%s%d", ifp->if_name, ifp->if_unit);
939 }
1c79356b
A
940 break;
941
942 /*
943 * Set interface.
944 */
945 case BIOCSETIF:
946 error = bpf_setif(d, (struct ifreq *)addr);
947 break;
948
949 /*
950 * Set read timeout.
951 */
952 case BIOCSRTIMEOUT:
953 {
954 struct timeval *tv = (struct timeval *)addr;
955
956 /*
957 * Subtract 1 tick from tvtohz() since this isn't
958 * a one-shot timer.
959 */
960 if ((error = itimerfix(tv)) == 0)
961 d->bd_rtout = tvtohz(tv) - 1;
962 break;
963 }
964
965 /*
966 * Get read timeout.
967 */
968 case BIOCGRTIMEOUT:
969 {
970 struct timeval *tv = (struct timeval *)addr;
971
972 tv->tv_sec = d->bd_rtout / hz;
973 tv->tv_usec = (d->bd_rtout % hz) * tick;
974 break;
975 }
976
977 /*
978 * Get packet stats.
979 */
980 case BIOCGSTATS:
981 {
982 struct bpf_stat *bs = (struct bpf_stat *)addr;
983
984 bs->bs_recv = d->bd_rcount;
985 bs->bs_drop = d->bd_dcount;
986 break;
987 }
988
989 /*
990 * Set immediate mode.
991 */
992 case BIOCIMMEDIATE:
993 d->bd_immediate = *(u_int *)addr;
994 break;
995
996 case BIOCVERSION:
997 {
998 struct bpf_version *bv = (struct bpf_version *)addr;
999
1000 bv->bv_major = BPF_MAJOR_VERSION;
1001 bv->bv_minor = BPF_MINOR_VERSION;
1002 break;
1003 }
1004
9bccf70c
A
1005 /*
1006 * Get "header already complete" flag
1007 */
1008 case BIOCGHDRCMPLT:
1009 *(u_int *)addr = d->bd_hdrcmplt;
1010 break;
1011
1012 /*
1013 * Set "header already complete" flag
1014 */
1015 case BIOCSHDRCMPLT:
1016 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1017 break;
1018
1019 /*
1020 * Get "see sent packets" flag
1021 */
1022 case BIOCGSEESENT:
1023 *(u_int *)addr = d->bd_seesent;
1024 break;
1025
1026 /*
1027 * Set "see sent packets" flag
1028 */
1029 case BIOCSSEESENT:
1030 d->bd_seesent = *(u_int *)addr;
1031 break;
1032
1c79356b
A
1033 case FIONBIO: /* Non-blocking I/O */
1034 break;
1035
1036 case FIOASYNC: /* Send signal on receive packets */
1037 d->bd_async = *(int *)addr;
1038 break;
9bccf70c 1039#ifndef __APPLE__
1c79356b
A
1040 case FIOSETOWN:
1041 error = fsetown(*(int *)addr, &d->bd_sigio);
1042 break;
1043
1044 case FIOGETOWN:
1045 *(int *)addr = fgetown(d->bd_sigio);
1046 break;
1047
1048 /* This is deprecated, FIOSETOWN should be used instead. */
1049 case TIOCSPGRP:
1050 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1051 break;
1052
1053 /* This is deprecated, FIOGETOWN should be used instead. */
1054 case TIOCGPGRP:
1055 *(int *)addr = -fgetown(d->bd_sigio);
1056 break;
1057#endif
1058 case BIOCSRSIG: /* Set receive signal */
1059 {
1060 u_int sig;
1061
1062 sig = *(u_int *)addr;
1063
1064 if (sig >= NSIG)
1065 error = EINVAL;
1066 else
1067 d->bd_sig = sig;
1068 break;
1069 }
1070 case BIOCGRSIG:
1071 *(u_int *)addr = d->bd_sig;
1072 break;
1073 }
1074 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1075 return (error);
1076}
1077
1078/*
1079 * Set d's packet filter program to fp. If this file already has a filter,
1080 * free it and replace it. Returns EINVAL for bogus requests.
1081 */
1082static int
1083bpf_setf(d, fp)
1084 struct bpf_d *d;
1085 struct bpf_program *fp;
1086{
1087 struct bpf_insn *fcode, *old;
1088 u_int flen, size;
1089 int s;
1090
1091 old = d->bd_filter;
1092 if (fp->bf_insns == 0) {
1093 if (fp->bf_len != 0)
1094 return (EINVAL);
1095 s = splimp();
1096 d->bd_filter = 0;
1097 reset_d(d);
1098 splx(s);
1099 if (old != 0)
1100 FREE((caddr_t)old, M_DEVBUF);
1101 return (0);
1102 }
1103 flen = fp->bf_len;
1104 if (flen > BPF_MAXINSNS)
1105 return (EINVAL);
1106
1107 size = flen * sizeof(*fp->bf_insns);
1108 fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
9bccf70c 1109#ifdef __APPLE__
0b4e3aa0
A
1110 if (fcode == NULL)
1111 return (ENOBUFS);
9bccf70c 1112#endif
1c79356b
A
1113 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1114 bpf_validate(fcode, (int)flen)) {
1115 s = splimp();
1116 d->bd_filter = fcode;
1117 reset_d(d);
1118 splx(s);
1119 if (old != 0)
1120 FREE((caddr_t)old, M_DEVBUF);
1121
1122 return (0);
1123 }
1124 FREE((caddr_t)fcode, M_DEVBUF);
1125 return (EINVAL);
1126}
1127
1128/*
1129 * Detach a file from its current interface (if attached at all) and attach
1130 * to the interface indicated by the name stored in ifr.
1131 * Return an errno or 0.
1132 */
1133static int
1134bpf_setif(d, ifr)
1135 struct bpf_d *d;
1136 struct ifreq *ifr;
1137{
1138 struct bpf_if *bp;
1139 int s, error;
1140 struct ifnet *theywant;
1141
1142 theywant = ifunit(ifr->ifr_name);
1143 if (theywant == 0)
1144 return ENXIO;
1145
1146 /*
1147 * Look through attached interfaces for the named one.
1148 */
1149 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1150 struct ifnet *ifp = bp->bif_ifp;
1151
1152 if (ifp == 0 || ifp != theywant)
1153 continue;
1154 /*
1155 * We found the requested interface.
1156 * If it's not up, return an error.
1157 * Allocate the packet buffers if we need to.
1158 * If we're already attached to requested interface,
1159 * just flush the buffer.
1160 */
1161 if ((ifp->if_flags & IFF_UP) == 0)
1162 return (ENETDOWN);
1163
1164 if (d->bd_sbuf == 0) {
1165 error = bpf_allocbufs(d);
1166 if (error != 0)
1167 return (error);
1168 }
1169 s = splimp();
1170 if (bp != d->bd_bif) {
1171 if (d->bd_bif)
1172 /*
1173 * Detach if attached to something else.
1174 */
1175 bpf_detachd(d);
1176
1177 bpf_attachd(d, bp);
1178 }
1179 reset_d(d);
1180 splx(s);
1181 return (0);
1182 }
1183 /* Not found. */
1184 return (ENXIO);
1185}
1186
1c79356b
A
1187/*
1188 * Support for select() and poll() system calls
1189 *
1190 * Return true iff the specific operation will not block indefinitely.
1191 * Otherwise, return false but make a note that a selwakeup() must be done.
1192 */
1193int
0b4e3aa0 1194bpfpoll(dev, events, wql, p)
1c79356b
A
1195 register dev_t dev;
1196 int events;
0b4e3aa0 1197 void * wql;
1c79356b
A
1198 struct proc *p;
1199{
1200 register struct bpf_d *d;
1201 register int s;
1202 int revents = 0;
1203
1204 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1205 /*
1206 * An imitation of the FIONREAD ioctl code.
1207 */
1208 d = &bpf_dtab[minor(dev)];
1209
9bccf70c
A
1210 if (d->bd_bif == NULL) {
1211 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1212 return (ENXIO);
1213 }
1214
1c79356b 1215 s = splimp();
9bccf70c 1216 if (events & (POLLIN | POLLRDNORM)) {
1c79356b
A
1217 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1218 revents |= events & (POLLIN | POLLRDNORM);
1219 else
0b4e3aa0 1220 selrecord(p, &d->bd_sel, wql);
9bccf70c 1221 }
1c79356b
A
1222 splx(s);
1223 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1224 return (revents);
1225}
1226
1227/*
1228 * Incoming linkage from device drivers. Process the packet pkt, of length
1229 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1230 * by each process' filter, and if accepted, stashed into the corresponding
1231 * buffer.
1232 */
1233void
1234bpf_tap(ifp, pkt, pktlen)
1235 struct ifnet *ifp;
1236 register u_char *pkt;
1237 register u_int pktlen;
1238{
1239 struct bpf_if *bp;
1240 register struct bpf_d *d;
1241 register u_int slen;
1242 /*
1243 * Note that the ipl does not have to be raised at this point.
1244 * The only problem that could arise here is that if two different
1245 * interfaces shared any data. This is not the case.
1246 */
1247 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c
A
1248 bp = ifp->if_bpf;
1249#ifdef __APPLE__
1250 if (bp) {
1251#endif
1252 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1253 ++d->bd_rcount;
1254 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1255 if (slen != 0)
1256 catchpacket(d, pkt, pktlen, slen, bcopy);
1257 }
1258#ifdef __APPLE__
1c79356b
A
1259 }
1260 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
9bccf70c 1261#endif
1c79356b
A
1262}
1263
1264/*
1265 * Copy data from an mbuf chain into a buffer. This code is derived
1266 * from m_copydata in sys/uipc_mbuf.c.
1267 */
1268static void
1269bpf_mcopy(src_arg, dst_arg, len)
1270 const void *src_arg;
1271 void *dst_arg;
1272 register size_t len;
1273{
1274 register const struct mbuf *m;
1275 register u_int count;
1276 u_char *dst;
1277
1278 m = src_arg;
1279 dst = dst_arg;
1280 while (len > 0) {
1281 if (m == 0)
1282 panic("bpf_mcopy");
1283 count = min(m->m_len, len);
1284 bcopy(mtod(m, void *), dst, count);
1285 m = m->m_next;
1286 dst += count;
1287 len -= count;
1288 }
1289}
1290
1291/*
1292 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1293 */
1294void
1295bpf_mtap(ifp, m)
1296 struct ifnet *ifp;
1297 struct mbuf *m;
1298{
1299 struct bpf_if *bp = ifp->if_bpf;
1300 struct bpf_d *d;
1301 u_int pktlen, slen;
1302 struct mbuf *m0;
1303
1304 pktlen = 0;
1305 for (m0 = m; m0 != 0; m0 = m0->m_next)
1306 pktlen += m0->m_len;
1307
1308 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
9bccf70c
A
1309 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1310 continue;
1c79356b
A
1311 ++d->bd_rcount;
1312 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1313 if (slen != 0)
1314 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1315 }
1316}
1317
1318/*
1319 * Move the packet data from interface memory (pkt) into the
1320 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1321 * otherwise 0. "copy" is the routine called to do the actual data
1322 * transfer. bcopy is passed in to copy contiguous chunks, while
1323 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1324 * pkt is really an mbuf.
1325 */
1326static void
1327catchpacket(d, pkt, pktlen, snaplen, cpfn)
1328 register struct bpf_d *d;
1329 register u_char *pkt;
1330 register u_int pktlen, snaplen;
1331 register void (*cpfn) __P((const void *, void *, size_t));
1332{
1333 register struct bpf_hdr *hp;
1334 register int totlen, curlen;
1335 register int hdrlen = d->bd_bif->bif_hdrlen;
1336 /*
1337 * Figure out how many bytes to move. If the packet is
1338 * greater or equal to the snapshot length, transfer that
1339 * much. Otherwise, transfer the whole packet (unless
1340 * we hit the buffer size limit).
1341 */
1342 totlen = hdrlen + min(snaplen, pktlen);
1343 if (totlen > d->bd_bufsize)
1344 totlen = d->bd_bufsize;
1345
1346 /*
1347 * Round up the end of the previous packet to the next longword.
1348 */
1349 curlen = BPF_WORDALIGN(d->bd_slen);
1350 if (curlen + totlen > d->bd_bufsize) {
1351 /*
1352 * This packet will overflow the storage buffer.
1353 * Rotate the buffers if we can, then wakeup any
1354 * pending reads.
1355 */
1356 if (d->bd_fbuf == 0) {
1357 /*
1358 * We haven't completed the previous read yet,
1359 * so drop the packet.
1360 */
1361 ++d->bd_dcount;
1362 return;
1363 }
1364 ROTATE_BUFFERS(d);
1365 bpf_wakeup(d);
1366 curlen = 0;
1367 }
1368 else if (d->bd_immediate)
1369 /*
1370 * Immediate mode is set. A packet arrived so any
1371 * reads should be woken up.
1372 */
1373 bpf_wakeup(d);
1374
1375 /*
1376 * Append the bpf header.
1377 */
1378 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1379#if BSD >= 199103
1380 microtime(&hp->bh_tstamp);
1381#elif defined(sun)
1382 uniqtime(&hp->bh_tstamp);
1383#else
1384 hp->bh_tstamp = time;
1385#endif
1386 hp->bh_datalen = pktlen;
1387 hp->bh_hdrlen = hdrlen;
1388 /*
1389 * Copy the packet data into the store buffer and update its length.
1390 */
1391 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1392 d->bd_slen = curlen + totlen;
1393}
1394
1395/*
1396 * Initialize all nonzero fields of a descriptor.
1397 */
1398static int
1399bpf_allocbufs(d)
1400 register struct bpf_d *d;
1401{
1402 d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1403 if (d->bd_fbuf == 0)
1404 return (ENOBUFS);
1405
1406 d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1407 if (d->bd_sbuf == 0) {
1408 FREE(d->bd_fbuf, M_DEVBUF);
1409 return (ENOBUFS);
1410 }
1411 d->bd_slen = 0;
1412 d->bd_hlen = 0;
1413 return (0);
1414}
1415
1416/*
1417 * Free buffers currently in use by a descriptor.
1418 * Called on close.
1419 */
1420static void
1421bpf_freed(d)
1422 register struct bpf_d *d;
1423{
1424 /*
1425 * We don't need to lock out interrupts since this descriptor has
1426 * been detached from its interface and it yet hasn't been marked
1427 * free.
1428 */
1429 if (d->bd_sbuf != 0) {
1430 FREE(d->bd_sbuf, M_DEVBUF);
1431 if (d->bd_hbuf != 0)
1432 FREE(d->bd_hbuf, M_DEVBUF);
1433 if (d->bd_fbuf != 0)
1434 FREE(d->bd_fbuf, M_DEVBUF);
1435 }
1436 if (d->bd_filter)
1437 FREE((caddr_t)d->bd_filter, M_DEVBUF);
1438
1439 D_MARKFREE(d);
1440}
1441
1442/*
1443 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1444 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1445 * size of the link header (variable length headers not yet supported).
1446 */
1447void
1448bpfattach(ifp, dlt, hdrlen)
1449 struct ifnet *ifp;
1450 u_int dlt, hdrlen;
1451{
1452 struct bpf_if *bp;
1453 int i;
0b4e3aa0 1454 bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT);
1c79356b
A
1455 if (bp == 0)
1456 panic("bpfattach");
1457
1458 bp->bif_dlist = 0;
1459 bp->bif_ifp = ifp;
1460 bp->bif_dlt = dlt;
1461
1462 bp->bif_next = bpf_iflist;
1463 bpf_iflist = bp;
1464
1465 bp->bif_ifp->if_bpf = 0;
1466
1467 /*
1468 * Compute the length of the bpf header. This is not necessarily
1469 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1470 * that the network layer header begins on a longword boundary (for
1471 * performance reasons and to alleviate alignment restrictions).
1472 */
1473 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1474
9bccf70c 1475#ifdef __APPLE__
1c79356b
A
1476 /*
1477 * Mark all the descriptors free if this hasn't been done.
1478 */
1479 if (!bpf_dtab_init) {
1480 for (i = 0; i < nbpfilter; ++i)
1481 D_MARKFREE(&bpf_dtab[i]);
1482 bpf_dtab_init = 1;
1483 }
9bccf70c 1484#else
1c79356b
A
1485 if (bootverbose)
1486 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1487#endif
1488}
1489
9bccf70c
A
1490/*
1491 * Detach bpf from an interface. This involves detaching each descriptor
1492 * associated with the interface, and leaving bd_bif NULL. Notify each
1493 * descriptor as it's detached so that any sleepers wake up and get
1494 * ENXIO.
1495 */
1496void
1497bpfdetach(ifp)
1498 struct ifnet *ifp;
1499{
1500 struct bpf_if *bp, *bp_prev;
1501 struct bpf_d *d;
1502 int s;
1503
1504 s = splimp();
1505
1506 /* Locate BPF interface information */
1507 bp_prev = NULL;
1508 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1509 if (ifp == bp->bif_ifp)
1510 break;
1511 bp_prev = bp;
1512 }
1513
1514#ifdef __APPLE__
1515 /* Check for no BPF interface information */
1516 if (bp == NULL) {
1517 return;
1518 }
1519#endif
1520
1521 /* Interface wasn't attached */
1522 if (bp->bif_ifp == NULL) {
1523 splx(s);
1524#ifndef __APPLE__
1525 printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1526 ifp->if_unit);
1527#endif
1528 return;
1529 }
1530
1531 while ((d = bp->bif_dlist) != NULL) {
1532 bpf_detachd(d);
1533 bpf_wakeup(d);
1534 }
1535
1536 if (bp_prev) {
1537 bp_prev->bif_next = bp->bif_next;
1538 } else {
1539 bpf_iflist = bp->bif_next;
1540 }
1541
1542 FREE(bp, M_DEVBUF);
1543
1544 splx(s);
1545}
1546
1c79356b
A
1547static void *bpf_devfs_token[NBPFILTER];
1548
1549static int bpf_devsw_installed;
1550
1551void bpf_init __P((void *unused));
9bccf70c 1552
1c79356b
A
1553void
1554bpf_init(unused)
1555 void *unused;
1556{
9bccf70c 1557#ifdef __APPLE__
1c79356b 1558 int i;
9bccf70c 1559 int maj;
1c79356b
A
1560
1561 if (!bpf_devsw_installed ) {
9bccf70c
A
1562 bpf_devsw_installed = 1;
1563 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
1564 if (maj == -1) {
1565 printf("bpf_init: failed to allocate a major number!\n");
1566 nbpfilter = 0;
1567 return;
1568 }
1569 for (i = 0 ; i < nbpfilter; i++) {
1570 bpf_devfs_token[i] = devfs_make_node(makedev(maj, i),
1571 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
1572 "bpf%x", i);
1573 }
1574 }
1575#else
1576 cdevsw_add(&bpf_cdevsw);
1577#endif
1c79356b
A
1578}
1579
9bccf70c 1580#ifndef __APPLE__
1c79356b 1581SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1c79356b 1582#endif
9bccf70c
A
1583
1584#else /* !BPF */
1585#ifndef __APPLE__
1586/*
1587 * NOP stubs to allow bpf-using drivers to load and function.
1588 *
1589 * A 'better' implementation would allow the core bpf functionality
1590 * to be loaded at runtime.
1591 */
1592
1593void
1594bpf_tap(ifp, pkt, pktlen)
1595 struct ifnet *ifp;
1596 register u_char *pkt;
1597 register u_int pktlen;
1598{
1599}
1600
1601void
1602bpf_mtap(ifp, m)
1603 struct ifnet *ifp;
1604 struct mbuf *m;
1605{
1606}
1607
1608void
1609bpfattach(ifp, dlt, hdrlen)
1610 struct ifnet *ifp;
1611 u_int dlt, hdrlen;
1612{
1613}
1614
1615void
1616bpfdetach(ifp)
1617 struct ifnet *ifp;
1618{
1619}
1620
1621u_int
1622bpf_filter(pc, p, wirelen, buflen)
1623 register const struct bpf_insn *pc;
1624 register u_char *p;
1625 u_int wirelen;
1626 register u_int buflen;
1627{
1628 return -1; /* "no filter" behaviour */
1629}
1630#endif /* !defined(__APPLE__) */
1631#endif /* NBPFILTER > 0 */