2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1990, 1991, 1993
27 * The Regents of the University of California. All rights reserved.
29 * This code is derived from the Stanford/CMU enet packet filter,
30 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
31 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
32 * Berkeley Laboratory.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by the University of
45 * California, Berkeley and its contributors.
46 * 4. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
64 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
72 #define inline __inline
75 #include <sys/param.h>
76 #include <sys/systm.h>
78 #include <sys/malloc.h>
82 #include <sys/signalvar.h>
83 #include <sys/filio.h>
84 #include <sys/sockio.h>
85 #include <sys/ttycom.h>
86 #include <sys/filedesc.h>
88 #if defined(sparc) && BSD < 199103
89 #include <sys/stream.h>
93 #include <sys/socket.h>
94 #include <sys/vnode.h>
98 #include <net/bpfdesc.h>
100 #include <netinet/in.h>
101 #include <netinet/if_ether.h>
102 #include <sys/kernel.h>
103 #include <sys/sysctl.h>
104 #include <net/firewire.h>
106 #include <machine/ansi.h>
107 #include <miscfs/devfs/devfs.h>
108 #include <net/dlil.h>
113 * Older BSDs don't have kernel malloc.
117 static caddr_t
bpf_alloc();
118 #include <net/bpf_compat.h>
119 #define BPF_BUFSIZE (MCLBYTES-8)
120 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
122 #define BPF_BUFSIZE 4096
123 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
127 #define PRINET 26 /* interruptible */
130 * The default read buffer size is patchable.
132 static int bpf_bufsize
= BPF_BUFSIZE
;
133 SYSCTL_INT(_debug
, OID_AUTO
, bpf_bufsize
, CTLFLAG_RW
,
134 &bpf_bufsize
, 0, "");
135 static int bpf_maxbufsize
= BPF_MAXBUFSIZE
;
136 SYSCTL_INT(_debug
, OID_AUTO
, bpf_maxbufsize
, CTLFLAG_RW
,
137 &bpf_maxbufsize
, 0, "");
140 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
141 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
143 static struct bpf_if
*bpf_iflist
;
146 * BSD now stores the bpf_d in the dev_t which is a struct
147 * on their system. Our dev_t is an int, so we still store
148 * the bpf_d in a separate table indexed by minor device #.
150 static struct bpf_d
**bpf_dtab
= NULL
;
151 static int bpf_dtab_size
= 0;
152 static int nbpfilter
= 0;
155 * Mark a descriptor free by making it point to itself.
156 * This is probably cheaper than marking with a constant since
157 * the address should be in a register anyway.
159 #define D_ISFREE(d) ((d) == (d)->bd_next)
160 #define D_MARKFREE(d) ((d)->bd_next = (d))
161 #define D_MARKUSED(d) ((d)->bd_next = 0)
162 #endif /* __APPLE__ */
164 static int bpf_allocbufs
__P((struct bpf_d
*));
165 static void bpf_attachd
__P((struct bpf_d
*d
, struct bpf_if
*bp
));
166 static void bpf_detachd
__P((struct bpf_d
*d
));
167 static void bpf_freed
__P((struct bpf_d
*));
168 static void bpf_mcopy
__P((const void *, void *, size_t));
169 static int bpf_movein
__P((struct uio
*, int,
170 struct mbuf
**, struct sockaddr
*, int *));
171 static int bpf_setif
__P((struct bpf_d
*, struct ifreq
*));
173 bpf_wakeup
__P((struct bpf_d
*));
174 static void catchpacket
__P((struct bpf_d
*, u_char
*, u_int
,
175 u_int
, void (*)(const void *, void *, size_t)));
176 static void reset_d
__P((struct bpf_d
*));
177 static int bpf_setf
__P((struct bpf_d
*, struct bpf_program
*));
179 /*static void *bpf_devfs_token[MAXBPFILTER];*/
181 static int bpf_devsw_installed
;
183 void bpf_init
__P((void *unused
));
187 * Darwin differs from BSD here, the following are static
188 * on BSD and not static on Darwin.
195 select_fcn_t bpfpoll
;
198 void bpf_mtap(struct ifnet
*, struct mbuf
*);
200 int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(),
204 /* Darwin's cdevsw struct differs slightly from BSDs */
205 #define CDEV_MAJOR 23
206 static struct cdevsw bpf_cdevsw
= {
208 /* close */ bpfclose
,
210 /* write */ bpfwrite
,
211 /* ioctl */ bpfioctl
,
215 /* select */ bpfpoll
,
217 /* strategy*/ eno_strat
,
223 #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
226 bpf_movein(uio
, linktype
, mp
, sockp
, datlen
)
227 register struct uio
*uio
;
228 int linktype
, *datlen
;
229 register struct mbuf
**mp
;
230 register struct sockaddr
*sockp
;
238 * Build a sockaddr based on the data link layer type.
239 * We do this at this level because the ethernet header
240 * is copied directly into the data field of the sockaddr.
241 * In the case of SLIP, there is no header and the packet
242 * is forwarded as is.
243 * Also, we are careful to leave room at the front of the mbuf
244 * for the link level header.
249 sockp
->sa_family
= AF_INET
;
254 sockp
->sa_family
= AF_UNSPEC
;
255 /* XXX Would MAXLINKHDR be better? */
256 hlen
= sizeof(struct ether_header
);
260 #if defined(__FreeBSD__) || defined(__bsdi__)
261 sockp
->sa_family
= AF_IMPLINK
;
264 sockp
->sa_family
= AF_UNSPEC
;
265 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
272 sockp
->sa_family
= AF_UNSPEC
;
277 case DLT_ATM_RFC1483
:
279 * en atm driver requires 4-byte atm pseudo header.
280 * though it isn't standard, vpi:vci needs to be
283 sockp
->sa_family
= AF_UNSPEC
;
284 hlen
= 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
288 sockp
->sa_family
= AF_UNSPEC
;
289 hlen
= 4; /* This should match PPP_HDRLEN */
292 case DLT_APPLE_IP_OVER_IEEE1394
:
293 sockp
->sa_family
= AF_UNSPEC
;
294 hlen
= sizeof(struct firewire_header
);
300 if ((hlen
+ SOCKADDR_HDR_LEN
) > sockp
->sa_len
) {
303 len
= uio
->uio_resid
;
304 *datlen
= len
- hlen
;
305 if ((unsigned)len
> MCLBYTES
)
308 MGETHDR(m
, M_WAIT
, MT_DATA
);
314 if ((m
->m_flags
& M_EXT
) == 0) {
317 if (m
->m_len
!= MCLBYTES
) {
323 m
->m_pkthdr
.len
= m
->m_len
= len
;
324 m
->m_pkthdr
.rcvif
= NULL
;
327 * Make room for link header.
330 m
->m_pkthdr
.len
-= hlen
;
333 m
->m_data
+= hlen
; /* XXX */
337 error
= UIOMOVE((caddr_t
)sockp
->sa_data
, hlen
, UIO_WRITE
, uio
);
341 error
= UIOMOVE(mtod(m
, caddr_t
), len
- hlen
, UIO_WRITE
, uio
);
350 /* Callback registered with Ethernet driver. */
351 int bpf_tap_callback(struct ifnet
*ifp
, struct mbuf
*m
)
353 boolean_t funnel_state
;
355 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
358 * Do nothing if the BPF tap has been turned off.
359 * This is to protect from a potential race where this
360 * call blocks on the funnel lock. And in the meantime
361 * BPF is turned off, which will clear if_bpf.
366 thread_funnel_set(network_flock
, funnel_state
);
371 * Returns 1 on sucess, 0 on failure
374 bpf_dtab_grow(int increment
)
376 struct bpf_d
**new_dtab
= NULL
;
378 new_dtab
= (struct bpf_d
**)_MALLOC(sizeof(struct bpf_d
*) * (bpf_dtab_size
+ increment
), M_DEVBUF
, M_WAIT
);
379 if (new_dtab
== NULL
)
383 struct bpf_d
**old_dtab
;
385 bcopy(bpf_dtab
, new_dtab
, sizeof(struct bpf_d
*) * bpf_dtab_size
);
387 * replace must be atomic with respect to free do bpf_dtab
392 _FREE(old_dtab
, M_DEVBUF
);
394 else bpf_dtab
= new_dtab
;
396 bzero(bpf_dtab
+ bpf_dtab_size
, sizeof(struct bpf_d
*) * increment
);
398 bpf_dtab_size
+= increment
;
403 static struct bpf_d
*
404 bpf_make_dev_t(int maj
)
408 if (nbpfilter
>= bpf_dtab_size
&& bpf_dtab_grow(NBPFILTER
) == 0)
411 d
= (struct bpf_d
*)_MALLOC(sizeof(struct bpf_d
), M_DEVBUF
, M_WAIT
);
415 bzero(d
, sizeof(struct bpf_d
));
417 D_MARKFREE(bpf_dtab
[i
]);
418 /*bpf_devfs_token[i] = */devfs_make_node(makedev(maj
, i
),
419 DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0600,
428 * Attach file to the bpf interface, i.e. make d listen on bp.
429 * Must be called at splimp.
437 * Point d at bp, and add d to the interface's list of listeners.
438 * Finally, point the driver's bpf cookie at the interface so
439 * it will divert packets to bpf.
442 d
->bd_next
= bp
->bif_dlist
;
445 bp
->bif_ifp
->if_bpf
= bp
;
448 if (bp
->bif_ifp
->if_set_bpf_tap
)
449 (*bp
->bif_ifp
->if_set_bpf_tap
)(bp
->bif_ifp
, BPF_TAP_INPUT_OUTPUT
, bpf_tap_callback
);
454 * Detach a file from its interface.
465 ifp
= d
->bd_bif
->bif_ifp
;
471 * Check if this descriptor had requested promiscuous mode.
472 * If so, turn it off.
476 if (ifpromisc(bp
->bif_ifp
, 0))
478 * Something is really wrong if we were able to put
479 * the driver into promiscuous mode, but can't
481 * Most likely the network interface is gone.
483 printf("bpf: ifpromisc failed");
485 /* Remove d from the interface's descriptor list. */
490 panic("bpf_detachd: descriptor not in list");
493 if (bp
->bif_dlist
== 0) {
495 * Let the driver know that there are no more listeners.
497 if (ifp
->if_set_bpf_tap
)
498 (*ifp
->if_set_bpf_tap
)(ifp
, BPF_TAP_DISABLE
, 0);
499 d
->bd_bif
->bif_ifp
->if_bpf
= 0;
506 * Open ethernet device. Returns ENXIO for illegal minor device number,
507 * EBUSY if file is open by another process.
511 bpfopen(dev
, flags
, fmt
, p
)
517 register struct bpf_d
*d
;
520 /* new device nodes on demand when opening the last one */
521 if (minor(dev
) == nbpfilter
- 1)
522 bpf_make_dev_t(major(dev
));
524 if (minor(dev
) >= nbpfilter
)
527 d
= bpf_dtab
[minor(dev
)];
529 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
537 * Each minor can be opened by only one process. If the requested
538 * minor is in use, return EBUSY.
542 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
546 /* Mark "free" and do most initialization. */
547 bzero((char *)d
, sizeof(*d
));
551 make_dev(&bpf_cdevsw
, minor(dev
), 0, 0, 0600, "bpf%d", lminor(dev
));
552 MALLOC(d
, struct bpf_d
*, sizeof(*d
), M_BPF
, M_WAITOK
);
553 bzero(d
, sizeof(*d
));
556 d
->bd_bufsize
= bpf_bufsize
;
561 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
568 * Close the descriptor by detaching it from its interface,
569 * deallocating its buffers, and marking it free.
573 bpfclose(dev
, flags
, fmt
, p
)
579 register struct bpf_d
*d
;
582 struct bpf_d
**bpf_dtab_schk
;
586 funsetown(d
->bd_sigio
);
591 d
= bpf_dtab
[minor(dev
)];
592 bpf_dtab_schk
= bpf_dtab
;
594 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
598 * If someone grows bpf_dtab[] while we were waiting for the
599 * funnel, then we will be pointing off into freed memory;
600 * check to see if this is the case.
602 if (bpf_dtab_schk
!= bpf_dtab
) {
603 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
612 selthreadclear(&d
->bd_sel
);
615 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
620 * Support for SunOS, which does not have tsleep.
627 boolean_t funnel_state
;
628 struct bpf_d
*d
= (struct bpf_d
*)arg
;
629 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
632 (void) thread_funnel_set(network_flock
, FALSE
);
635 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
639 register struct bpf_d
*d
;
641 register int rto
= d
->bd_rtout
;
646 timeout(bpf_timeout
, (caddr_t
)d
, rto
);
648 st
= sleep((caddr_t
)d
, PRINET
|PCATCH
);
650 if (d
->bd_timedout
== 0)
651 untimeout(bpf_timeout
, (caddr_t
)d
);
655 return (st
!= 0) ? EINTR
: 0;
658 #define BPF_SLEEP tsleep
662 * Rotate the packet buffers in descriptor d. Move the store buffer
663 * into the hold slot, and the free buffer into the store slot.
664 * Zero the length of the new store buffer.
666 #define ROTATE_BUFFERS(d) \
667 (d)->bd_hbuf = (d)->bd_sbuf; \
668 (d)->bd_hlen = (d)->bd_slen; \
669 (d)->bd_sbuf = (d)->bd_fbuf; \
673 * bpfread - read next chunk of packets from buffers
676 bpfread(dev
, uio
, ioflag
)
681 register struct bpf_d
*d
;
685 d
= bpf_dtab
[minor(dev
)];
687 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
690 * Restrict application to use a buffer the same size as
693 if (uio
->uio_resid
!= d
->bd_bufsize
) {
694 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
700 * If the hold buffer is empty, then do a timed sleep, which
701 * ends when the timeout expires or when enough packets
702 * have arrived to fill the store buffer.
704 while (d
->bd_hbuf
== 0) {
705 if (d
->bd_immediate
&& d
->bd_slen
!= 0) {
707 * A packet(s) either arrived since the previous
708 * read or arrived while we were asleep.
709 * Rotate the buffers and return what's here.
716 * No data is available, check to see if the bpf device
717 * is still pointed at a real interface. If not, return
718 * ENXIO so that the userland process knows to rebind
719 * it before using it again.
721 if (d
->bd_bif
== NULL
) {
723 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
727 if (ioflag
& IO_NDELAY
)
730 error
= BPF_SLEEP((caddr_t
)d
, PRINET
|PCATCH
, "bpf",
732 if (error
== EINTR
|| error
== ERESTART
) {
734 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
737 if (error
== EWOULDBLOCK
) {
739 * On a timeout, return what's in the buffer,
740 * which may be nothing. If there is something
741 * in the store buffer, we can rotate the buffers.
745 * We filled up the buffer in between
746 * getting the timeout and arriving
747 * here, so we don't need to rotate.
751 if (d
->bd_slen
== 0) {
753 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
761 * At this point, we know we have something in the hold slot.
766 * Move data from hold buffer into user space.
767 * We know the entire buffer is transferred since
768 * we checked above that the read buffer is bpf_bufsize bytes.
770 error
= UIOMOVE(d
->bd_hbuf
, d
->bd_hlen
, UIO_READ
, uio
);
773 d
->bd_fbuf
= d
->bd_hbuf
;
777 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
783 * If there are processes sleeping on this descriptor, wake them up.
787 register struct bpf_d
*d
;
790 if (d
->bd_async
&& d
->bd_sig
&& d
->bd_sigio
)
791 pgsigio(d
->bd_sigio
, d
->bd_sig
, 0);
794 selwakeup(&d
->bd_sel
);
797 d
->bd_sel
.si_pid
= 0;
801 selwakeup(d
->bd_selproc
, (int)d
->bd_selcoll
);
808 /* keep in sync with bpf_movein above: */
809 #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
812 bpfwrite(dev
, uio
, ioflag
)
817 register struct bpf_d
*d
;
821 char dst_buf
[SOCKADDR_HDR_LEN
+ MAX_DATALINK_HDR_LEN
];
824 d
= bpf_dtab
[minor(dev
)];
826 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
828 if (d
->bd_bif
== 0) {
829 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
833 ifp
= d
->bd_bif
->bif_ifp
;
835 if (uio
->uio_resid
== 0) {
836 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
839 ((struct sockaddr
*)dst_buf
)->sa_len
= sizeof(dst_buf
);
840 error
= bpf_movein(uio
, (int)d
->bd_bif
->bif_dlt
, &m
,
841 (struct sockaddr
*)dst_buf
, &datlen
);
843 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
847 if (datlen
> ifp
->if_mtu
) {
848 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
852 if (d
->bd_hdrcmplt
) {
853 ((struct sockaddr
*)dst_buf
)->sa_family
= pseudo_AF_HDRCMPLT
;
858 error
= dlil_output(ifptodlt(ifp
, PF_INET
), m
,
859 (caddr_t
) 0, (struct sockaddr
*)dst_buf
, 0);
862 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
864 * The driver frees the mbuf.
870 * Reset a descriptor by flushing its packet buffer and clearing the
871 * receive and drop counts. Should be called at splimp.
878 /* Free the hold buffer. */
879 d
->bd_fbuf
= d
->bd_hbuf
;
889 * FIONREAD Check for read packet available.
890 * SIOCGIFADDR Get interface address - convenient hook to driver.
891 * BIOCGBLEN Get buffer len [for read()].
892 * BIOCSETF Set ethernet read filter.
893 * BIOCFLUSH Flush read packet buffer.
894 * BIOCPROMISC Put interface into promiscuous mode.
895 * BIOCGDLT Get link layer type.
896 * BIOCGETIF Get interface name.
897 * BIOCSETIF Set interface.
898 * BIOCSRTIMEOUT Set read timeout.
899 * BIOCGRTIMEOUT Get read timeout.
900 * BIOCGSTATS Get packet stats.
901 * BIOCIMMEDIATE Set immediate mode.
902 * BIOCVERSION Get filter language version.
903 * BIOCGHDRCMPLT Get "header already complete" flag
904 * BIOCSHDRCMPLT Set "header already complete" flag
905 * BIOCGSEESENT Get "see packets sent" flag
906 * BIOCSSEESENT Set "see packets sent" flag
910 bpfioctl(dev
, cmd
, addr
, flags
, p
)
917 register struct bpf_d
*d
;
920 d
= bpf_dtab
[minor(dev
)];
922 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
931 * Check for read packet available.
954 ifp
= d
->bd_bif
->bif_ifp
;
955 error
= (*ifp
->if_ioctl
)(ifp
, cmd
, addr
);
961 * Get buffer len [for read()].
964 *(u_int
*)addr
= d
->bd_bufsize
;
977 register u_int size
= *(u_int
*)addr
;
979 if (size
> bpf_maxbufsize
)
980 *(u_int
*)addr
= size
= bpf_maxbufsize
;
981 else if (size
< BPF_MINBUFSIZE
)
982 *(u_int
*)addr
= size
= BPF_MINBUFSIZE
;
983 d
->bd_bufsize
= size
;
989 * Set link layer read filter.
992 error
= bpf_setf(d
, (struct bpf_program
*)addr
);
996 * Flush read packet buffer.
1005 * Put interface into promiscuous mode.
1008 if (d
->bd_bif
== 0) {
1010 * No interface attached yet.
1016 if (d
->bd_promisc
== 0) {
1017 error
= ifpromisc(d
->bd_bif
->bif_ifp
, 1);
1025 * Get device parameters.
1031 *(u_int
*)addr
= d
->bd_bif
->bif_dlt
;
1035 * Get interface name.
1041 struct ifnet
*const ifp
= d
->bd_bif
->bif_ifp
;
1042 struct ifreq
*const ifr
= (struct ifreq
*)addr
;
1044 snprintf(ifr
->ifr_name
, sizeof(ifr
->ifr_name
),
1045 "%s%d", ifp
->if_name
, ifp
->if_unit
);
1053 error
= bpf_setif(d
, (struct ifreq
*)addr
);
1061 struct timeval
*tv
= (struct timeval
*)addr
;
1064 * Subtract 1 tick from tvtohz() since this isn't
1067 if ((error
= itimerfix(tv
)) == 0)
1068 d
->bd_rtout
= tvtohz(tv
) - 1;
1077 struct timeval
*tv
= (struct timeval
*)addr
;
1079 tv
->tv_sec
= d
->bd_rtout
/ hz
;
1080 tv
->tv_usec
= (d
->bd_rtout
% hz
) * tick
;
1089 struct bpf_stat
*bs
= (struct bpf_stat
*)addr
;
1091 bs
->bs_recv
= d
->bd_rcount
;
1092 bs
->bs_drop
= d
->bd_dcount
;
1097 * Set immediate mode.
1100 d
->bd_immediate
= *(u_int
*)addr
;
1105 struct bpf_version
*bv
= (struct bpf_version
*)addr
;
1107 bv
->bv_major
= BPF_MAJOR_VERSION
;
1108 bv
->bv_minor
= BPF_MINOR_VERSION
;
1113 * Get "header already complete" flag
1116 *(u_int
*)addr
= d
->bd_hdrcmplt
;
1120 * Set "header already complete" flag
1123 d
->bd_hdrcmplt
= *(u_int
*)addr
? 1 : 0;
1127 * Get "see sent packets" flag
1130 *(u_int
*)addr
= d
->bd_seesent
;
1134 * Set "see sent packets" flag
1137 d
->bd_seesent
= *(u_int
*)addr
;
1140 case FIONBIO
: /* Non-blocking I/O */
1143 case FIOASYNC
: /* Send signal on receive packets */
1144 d
->bd_async
= *(int *)addr
;
1148 error
= fsetown(*(int *)addr
, &d
->bd_sigio
);
1152 *(int *)addr
= fgetown(d
->bd_sigio
);
1155 /* This is deprecated, FIOSETOWN should be used instead. */
1157 error
= fsetown(-(*(int *)addr
), &d
->bd_sigio
);
1160 /* This is deprecated, FIOGETOWN should be used instead. */
1162 *(int *)addr
= -fgetown(d
->bd_sigio
);
1165 case BIOCSRSIG
: /* Set receive signal */
1169 sig
= *(u_int
*)addr
;
1178 *(u_int
*)addr
= d
->bd_sig
;
1181 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
1186 * Set d's packet filter program to fp. If this file already has a filter,
1187 * free it and replace it. Returns EINVAL for bogus requests.
1192 struct bpf_program
*fp
;
1194 struct bpf_insn
*fcode
, *old
;
1199 if (fp
->bf_insns
== 0) {
1200 if (fp
->bf_len
!= 0)
1207 FREE((caddr_t
)old
, M_DEVBUF
);
1211 if (flen
> BPF_MAXINSNS
)
1214 size
= flen
* sizeof(*fp
->bf_insns
);
1215 fcode
= (struct bpf_insn
*) _MALLOC(size
, M_DEVBUF
, M_WAIT
);
1220 if (copyin((caddr_t
)fp
->bf_insns
, (caddr_t
)fcode
, size
) == 0 &&
1221 bpf_validate(fcode
, (int)flen
)) {
1223 d
->bd_filter
= fcode
;
1227 FREE((caddr_t
)old
, M_DEVBUF
);
1231 FREE((caddr_t
)fcode
, M_DEVBUF
);
1236 * Detach a file from its current interface (if attached at all) and attach
1237 * to the interface indicated by the name stored in ifr.
1238 * Return an errno or 0.
1247 struct ifnet
*theywant
;
1249 theywant
= ifunit(ifr
->ifr_name
);
1254 * Look through attached interfaces for the named one.
1256 for (bp
= bpf_iflist
; bp
!= 0; bp
= bp
->bif_next
) {
1257 struct ifnet
*ifp
= bp
->bif_ifp
;
1259 if (ifp
== 0 || ifp
!= theywant
)
1262 * We found the requested interface.
1263 * If it's not up, return an error.
1264 * Allocate the packet buffers if we need to.
1265 * If we're already attached to requested interface,
1266 * just flush the buffer.
1268 if ((ifp
->if_flags
& IFF_UP
) == 0)
1271 if (d
->bd_sbuf
== 0) {
1272 error
= bpf_allocbufs(d
);
1277 if (bp
!= d
->bd_bif
) {
1280 * Detach if attached to something else.
1295 * Support for select() and poll() system calls
1297 * Return true iff the specific operation will not block indefinitely.
1298 * Otherwise, return false but make a note that a selwakeup() must be done.
1301 bpfpoll(dev
, events
, wql
, p
)
1307 register struct bpf_d
*d
;
1311 d
= bpf_dtab
[minor(dev
)];
1313 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
1315 * An imitation of the FIONREAD ioctl code.
1317 if (d
->bd_bif
== NULL
) {
1318 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
1323 if (events
& (POLLIN
| POLLRDNORM
)) {
1324 if (d
->bd_hlen
!= 0 || (d
->bd_immediate
&& d
->bd_slen
!= 0))
1325 revents
|= events
& (POLLIN
| POLLRDNORM
);
1327 selrecord(p
, &d
->bd_sel
, wql
);
1330 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
1335 * Incoming linkage from device drivers. Process the packet pkt, of length
1336 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1337 * by each process' filter, and if accepted, stashed into the corresponding
1341 bpf_tap(ifp
, pkt
, pktlen
)
1343 register u_char
*pkt
;
1344 register u_int pktlen
;
1347 register struct bpf_d
*d
;
1348 register u_int slen
;
1350 * Note that the ipl does not have to be raised at this point.
1351 * The only problem that could arise here is that if two different
1352 * interfaces shared any data. This is not the case.
1354 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
1359 for (d
= bp
->bif_dlist
; d
!= 0; d
= d
->bd_next
) {
1361 slen
= bpf_filter(d
->bd_filter
, pkt
, pktlen
, pktlen
);
1363 catchpacket(d
, pkt
, pktlen
, slen
, bcopy
);
1367 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
1372 * Copy data from an mbuf chain into a buffer. This code is derived
1373 * from m_copydata in sys/uipc_mbuf.c.
1376 bpf_mcopy(src_arg
, dst_arg
, len
)
1377 const void *src_arg
;
1379 register size_t len
;
1381 register const struct mbuf
*m
;
1382 register u_int count
;
1390 count
= min(m
->m_len
, len
);
1391 bcopy(mtod((struct mbuf
*)m
, void *), dst
, count
);
1399 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1406 struct bpf_if
*bp
= ifp
->if_bpf
;
1412 for (m0
= m
; m0
!= 0; m0
= m0
->m_next
)
1413 pktlen
+= m0
->m_len
;
1415 for (d
= bp
->bif_dlist
; d
!= 0; d
= d
->bd_next
) {
1416 if (!d
->bd_seesent
&& (m
->m_pkthdr
.rcvif
== NULL
))
1419 slen
= bpf_filter(d
->bd_filter
, (u_char
*)m
, pktlen
, 0);
1421 catchpacket(d
, (u_char
*)m
, pktlen
, slen
, bpf_mcopy
);
1426 * Move the packet data from interface memory (pkt) into the
1427 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1428 * otherwise 0. "copy" is the routine called to do the actual data
1429 * transfer. bcopy is passed in to copy contiguous chunks, while
1430 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1431 * pkt is really an mbuf.
1434 catchpacket(d
, pkt
, pktlen
, snaplen
, cpfn
)
1435 register struct bpf_d
*d
;
1436 register u_char
*pkt
;
1437 register u_int pktlen
, snaplen
;
1438 register void (*cpfn
) __P((const void *, void *, size_t));
1440 register struct bpf_hdr
*hp
;
1441 register int totlen
, curlen
;
1442 register int hdrlen
= d
->bd_bif
->bif_hdrlen
;
1444 * Figure out how many bytes to move. If the packet is
1445 * greater or equal to the snapshot length, transfer that
1446 * much. Otherwise, transfer the whole packet (unless
1447 * we hit the buffer size limit).
1449 totlen
= hdrlen
+ min(snaplen
, pktlen
);
1450 if (totlen
> d
->bd_bufsize
)
1451 totlen
= d
->bd_bufsize
;
1454 * Round up the end of the previous packet to the next longword.
1456 curlen
= BPF_WORDALIGN(d
->bd_slen
);
1457 if (curlen
+ totlen
> d
->bd_bufsize
) {
1459 * This packet will overflow the storage buffer.
1460 * Rotate the buffers if we can, then wakeup any
1463 if (d
->bd_fbuf
== 0) {
1465 * We haven't completed the previous read yet,
1466 * so drop the packet.
1475 else if (d
->bd_immediate
)
1477 * Immediate mode is set. A packet arrived so any
1478 * reads should be woken up.
1483 * Append the bpf header.
1485 hp
= (struct bpf_hdr
*)(d
->bd_sbuf
+ curlen
);
1487 microtime(&hp
->bh_tstamp
);
1489 uniqtime(&hp
->bh_tstamp
);
1491 hp
->bh_tstamp
= time
;
1493 hp
->bh_datalen
= pktlen
;
1494 hp
->bh_hdrlen
= hdrlen
;
1496 * Copy the packet data into the store buffer and update its length.
1498 (*cpfn
)(pkt
, (u_char
*)hp
+ hdrlen
, (hp
->bh_caplen
= totlen
- hdrlen
));
1499 d
->bd_slen
= curlen
+ totlen
;
1503 * Initialize all nonzero fields of a descriptor.
1507 register struct bpf_d
*d
;
1509 d
->bd_fbuf
= (caddr_t
) _MALLOC(d
->bd_bufsize
, M_DEVBUF
, M_WAIT
);
1510 if (d
->bd_fbuf
== 0)
1513 d
->bd_sbuf
= (caddr_t
) _MALLOC(d
->bd_bufsize
, M_DEVBUF
, M_WAIT
);
1514 if (d
->bd_sbuf
== 0) {
1515 FREE(d
->bd_fbuf
, M_DEVBUF
);
1524 * Free buffers currently in use by a descriptor.
1529 register struct bpf_d
*d
;
1532 * We don't need to lock out interrupts since this descriptor has
1533 * been detached from its interface and it yet hasn't been marked
1536 if (d
->bd_sbuf
!= 0) {
1537 FREE(d
->bd_sbuf
, M_DEVBUF
);
1538 if (d
->bd_hbuf
!= 0)
1539 FREE(d
->bd_hbuf
, M_DEVBUF
);
1540 if (d
->bd_fbuf
!= 0)
1541 FREE(d
->bd_fbuf
, M_DEVBUF
);
1544 FREE((caddr_t
)d
->bd_filter
, M_DEVBUF
);
1550 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1551 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1552 * size of the link header (variable length headers not yet supported).
1555 bpfattach(ifp
, dlt
, hdrlen
)
1561 bp
= (struct bpf_if
*) _MALLOC(sizeof(*bp
), M_DEVBUF
, M_WAIT
);
1569 bp
->bif_next
= bpf_iflist
;
1572 bp
->bif_ifp
->if_bpf
= 0;
1575 * Compute the length of the bpf header. This is not necessarily
1576 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1577 * that the network layer header begins on a longword boundary (for
1578 * performance reasons and to alleviate alignment restrictions).
1580 bp
->bif_hdrlen
= BPF_WORDALIGN(hdrlen
+ SIZEOF_BPF_HDR
) - hdrlen
;
1584 printf("bpf: %s%d attached\n", ifp
->if_name
, ifp
->if_unit
);
1589 * Detach bpf from an interface. This involves detaching each descriptor
1590 * associated with the interface, and leaving bd_bif NULL. Notify each
1591 * descriptor as it's detached so that any sleepers wake up and get
1598 struct bpf_if
*bp
, *bp_prev
;
1604 /* Locate BPF interface information */
1606 for (bp
= bpf_iflist
; bp
!= NULL
; bp
= bp
->bif_next
) {
1607 if (ifp
== bp
->bif_ifp
)
1613 /* Check for no BPF interface information */
1619 /* Interface wasn't attached */
1620 if (bp
->bif_ifp
== NULL
) {
1623 printf("bpfdetach: %s%d was not attached\n", ifp
->if_name
,
1629 while ((d
= bp
->bif_dlist
) != NULL
) {
1635 bp_prev
->bif_next
= bp
->bif_next
;
1637 bpf_iflist
= bp
->bif_next
;
1653 if (!bpf_devsw_installed
) {
1654 bpf_devsw_installed
= 1;
1655 maj
= cdevsw_add(CDEV_MAJOR
, &bpf_cdevsw
);
1657 printf("bpf_init: failed to allocate a major number!\n");
1661 if (bpf_dtab_grow(NBPFILTER
) == 0) {
1662 printf("bpf_init: failed to allocate bpf_dtab\n");
1665 for (i
= 0 ; i
< NBPFILTER
; i
++)
1666 bpf_make_dev_t(maj
);
1669 cdevsw_add(&bpf_cdevsw
);
1674 SYSINIT(bpfdev
,SI_SUB_DRIVERS
,SI_ORDER_MIDDLE
+CDEV_MAJOR
,bpf_drvinit
,NULL
)
1680 * NOP stubs to allow bpf-using drivers to load and function.
1682 * A 'better' implementation would allow the core bpf functionality
1683 * to be loaded at runtime.
1687 bpf_tap(ifp
, pkt
, pktlen
)
1689 register u_char
*pkt
;
1690 register u_int pktlen
;
1702 bpfattach(ifp
, dlt
, hdrlen
)
1715 bpf_filter(pc
, p
, wirelen
, buflen
)
1716 register const struct bpf_insn
*pc
;
1719 register u_int buflen
;
1721 return -1; /* "no filter" behaviour */
1723 #endif /* !defined(__APPLE__) */
1724 #endif /* NBPFILTER > 0 */