]>
git.saurik.com Git - apple/xnu.git/blob - bsd/net/bpf.c
6363b4fb9b849bbb591f804d24c5a6a0f485c6c2
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1990, 1991, 1993
24 * The Regents of the University of California. All rights reserved.
26 * This code is derived from the Stanford/CMU enet packet filter,
27 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
28 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
29 * Berkeley Laboratory.
31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions
34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution.
39 * 3. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 * This product includes software developed by the University of
42 * California, Berkeley and its contributors.
43 * 4. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
70 #define inline __inline
73 #include <sys/param.h>
74 #include <sys/systm.h>
76 #include <sys/malloc.h>
85 #include <sys/signalvar.h>
86 #include <sys/filio.h>
87 #include <sys/sockio.h>
88 #include <sys/ttycom.h>
89 #include <sys/filedesc.h>
91 #include <sys/socket.h>
92 #include <sys/vnode.h>
96 #include <net/bpfdesc.h>
98 #include <netinet/in.h>
99 #include <netinet/if_ether.h>
100 #include <sys/kernel.h>
101 #include <sys/sysctl.h>
104 #include <miscfs/devfs/devfs.h>
105 #include <net/dlil.h>
108 * Older BSDs don't have kernel malloc.
112 static caddr_t
bpf_alloc();
114 #define BPF_BUFSIZE (MCLBYTES-8)
115 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
117 #define BPF_BUFSIZE 4096
118 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
121 #define PRINET 26 /* interruptible */
124 * The default read buffer size is patchable.
126 static int bpf_bufsize
= BPF_BUFSIZE
;
130 SYSCTL_INT(_debug
, OID_AUTO
, bpf_bufsize
, CTLFLAG_RW
,
131 &bpf_bufsize
, 0, "");
135 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
136 * bpf_dtab holds the descriptors, indexed by minor device #
138 static struct bpf_if
*bpf_iflist
;
139 static struct bpf_d bpf_dtab
[NBPFILTER
];
140 static int bpf_dtab_init
;
141 static int nbpfilter
= NBPFILTER
;
143 static int bpf_allocbufs
__P((struct bpf_d
*));
144 static void bpf_attachd
__P((struct bpf_d
*d
, struct bpf_if
*bp
));
145 static void bpf_detachd
__P((struct bpf_d
*d
));
146 static void bpf_freed
__P((struct bpf_d
*));
147 static void bpf_ifname
__P((struct ifnet
*, struct ifreq
*));
148 static void bpf_mcopy
__P((const void *, void *, size_t));
149 static int bpf_movein
__P((struct uio
*, int,
150 struct mbuf
**, struct sockaddr
*, int *));
151 static int bpf_setif
__P((struct bpf_d
*, struct ifreq
*));
153 bpf_wakeup
__P((struct bpf_d
*));
154 static void catchpacket
__P((struct bpf_d
*, u_char
*, u_int
,
155 u_int
, void (*)(const void *, void *, size_t)));
156 static void reset_d
__P((struct bpf_d
*));
157 static int bpf_setf
__P((struct bpf_d
*, struct bpf_program
*));
168 void bpf_mtap(struct ifnet
*, struct mbuf
*);
170 int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(),
174 static struct cdevsw bpf_cdevsw
= {
175 bpfopen
, bpfclose
, bpfread
, bpfwrite
,
176 bpfioctl
, nulldev
, nulldev
, NULL
, bpfpoll
,
177 eno_mmap
, eno_strat
, eno_getc
, eno_putc
, 0
181 bpf_movein(uio
, linktype
, mp
, sockp
, datlen
)
182 register struct uio
*uio
;
183 int linktype
, *datlen
;
184 register struct mbuf
**mp
;
185 register struct sockaddr
*sockp
;
193 * Build a sockaddr based on the data link layer type.
194 * We do this at this level because the ethernet header
195 * is copied directly into the data field of the sockaddr.
196 * In the case of SLIP, there is no header and the packet
197 * is forwarded as is.
198 * Also, we are careful to leave room at the front of the mbuf
199 * for the link level header.
204 sockp
->sa_family
= AF_INET
;
209 sockp
->sa_family
= AF_UNSPEC
;
210 /* XXX Would MAXLINKHDR be better? */
211 hlen
= sizeof(struct ether_header
);
215 #if defined(__FreeBSD__) || defined(__bsdi__)
216 sockp
->sa_family
= AF_IMPLINK
;
219 sockp
->sa_family
= AF_UNSPEC
;
220 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
227 sockp
->sa_family
= AF_UNSPEC
;
232 case DLT_ATM_RFC1483
:
234 * en atm driver requires 4-byte atm pseudo header.
235 * though it isn't standard, vpi:vci needs to be
238 sockp
->sa_family
= AF_UNSPEC
;
239 hlen
= 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
247 len
= uio
->uio_resid
;
248 *datlen
= len
- hlen
;
249 if ((unsigned)len
> MCLBYTES
)
252 MGETHDR(m
, M_WAIT
, MT_DATA
);
258 if ((m
->m_flags
& M_EXT
) == 0) {
261 if (m
->m_len
!= MCLBYTES
) {
267 m
->m_pkthdr
.len
= m
->m_len
= len
;
268 m
->m_pkthdr
.rcvif
= NULL
;
271 * Make room for link header.
274 m
->m_pkthdr
.len
-= hlen
;
277 m
->m_data
+= hlen
; /* XXX */
281 error
= UIOMOVE((caddr_t
)sockp
->sa_data
, hlen
, UIO_WRITE
, uio
);
285 error
= UIOMOVE(mtod(m
, caddr_t
), len
- hlen
, UIO_WRITE
, uio
);
293 int bpf_tap_callback(struct ifnet
*ifp
, struct mbuf
*m
)
295 boolean_t funnel_state
;
297 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
300 * Do nothing if the BPF tap has been turned off.
301 * This is to protect from a potential race where this
302 * call blocks on the funnel lock. And in the meantime
303 * BPF is turned off, which will clear if_bpf.
308 thread_funnel_set(network_flock
, funnel_state
);
314 * Attach file to the bpf interface, i.e. make d listen on bp.
315 * Must be called at splimp.
325 * Point d at bp, and add d to the interface's list of listeners.
326 * Finally, point the driver's bpf cookie at the interface so
327 * it will divert packets to bpf.
330 d
->bd_next
= bp
->bif_dlist
;
333 bp
->bif_ifp
->if_bpf
= bp
;
336 if (ifp
->if_set_bpf_tap
)
337 (*ifp
->if_set_bpf_tap
)(ifp
, BPF_TAP_INPUT_OUTPUT
, bpf_tap_callback
);
341 * Detach a file from its interface.
351 ifp
= d
->bd_bif
->bif_ifp
;
352 if (ifp
->if_set_bpf_tap
)
353 (*ifp
->if_set_bpf_tap
)(ifp
, BPF_TAP_DISABLE
, 0);
357 * Check if this descriptor had requested promiscuous mode.
358 * If so, turn it off.
362 if (ifpromisc(bp
->bif_ifp
, 0))
364 * Something is really wrong if we were able to put
365 * the driver into promiscuous mode, but can't
368 panic("bpf: ifpromisc failed");
370 /* Remove d from the interface's descriptor list. */
375 panic("bpf_detachd: descriptor not in list");
378 if (bp
->bif_dlist
== 0)
380 * Let the driver know that there are no more listeners.
382 d
->bd_bif
->bif_ifp
->if_bpf
= 0;
388 * Mark a descriptor free by making it point to itself.
389 * This is probably cheaper than marking with a constant since
390 * the address should be in a register anyway.
392 #define D_ISFREE(d) ((d) == (d)->bd_next)
393 #define D_MARKFREE(d) ((d)->bd_next = (d))
394 #define D_MARKUSED(d) ((d)->bd_next = 0)
397 * Open ethernet device. Returns ENXIO for illegal minor device number,
398 * EBUSY if file is open by another process.
402 bpfopen(dev
, flags
, fmt
, p
)
408 register struct bpf_d
*d
;
410 if (minor(dev
) >= nbpfilter
)
413 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
415 * Each minor can be opened by only one process. If the requested
416 * minor is in use, return EBUSY.
418 d
= &bpf_dtab
[minor(dev
)];
420 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
424 /* Mark "free" and do most initialization. */
425 bzero((char *)d
, sizeof(*d
));
426 d
->bd_bufsize
= bpf_bufsize
;
428 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
433 * Close the descriptor by detaching it from its interface,
434 * deallocating its buffers, and marking it free.
438 bpfclose(dev
, flags
, fmt
, p
)
444 register struct bpf_d
*d
;
447 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
450 d
= &bpf_dtab
[minor(dev
)];
455 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
460 * Support for SunOS, which does not have tsleep.
467 boolean_t funnel_state
;
468 struct bpf_d
*d
= (struct bpf_d
*)arg
;
471 funnel_state
= thread_funnel_set(network_flock
, TRUE
);
474 (void) thread_funnel_set(network_flock
, FALSE
);
477 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
481 register struct bpf_d
*d
;
483 register int rto
= d
->bd_rtout
;
488 timeout(bpf_timeout
, (caddr_t
)d
, rto
);
490 st
= sleep((caddr_t
)d
, PRINET
|PCATCH
);
492 if (d
->bd_timedout
== 0)
493 untimeout(bpf_timeout
, (caddr_t
)d
);
497 return (st
!= 0) ? EINTR
: 0;
500 #define BPF_SLEEP tsleep
504 * Rotate the packet buffers in descriptor d. Move the store buffer
505 * into the hold slot, and the free buffer into the store slot.
506 * Zero the length of the new store buffer.
508 #define ROTATE_BUFFERS(d) \
509 (d)->bd_hbuf = (d)->bd_sbuf; \
510 (d)->bd_hlen = (d)->bd_slen; \
511 (d)->bd_sbuf = (d)->bd_fbuf; \
515 * bpfread - read next chunk of packets from buffers
518 bpfread(dev
, uio
, ioflag
)
523 register struct bpf_d
*d
;
529 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
530 d
= &bpf_dtab
[minor(dev
)];
533 * Restrict application to use a buffer the same size as
536 if (uio
->uio_resid
!= d
->bd_bufsize
) {
537 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
543 * If the hold buffer is empty, then do a timed sleep, which
544 * ends when the timeout expires or when enough packets
545 * have arrived to fill the store buffer.
547 while (d
->bd_hbuf
== 0) {
548 if (d
->bd_immediate
&& d
->bd_slen
!= 0) {
550 * A packet(s) either arrived since the previous
551 * read or arrived while we were asleep.
552 * Rotate the buffers and return what's here.
557 if (ioflag
& IO_NDELAY
)
560 error
= BPF_SLEEP((caddr_t
)d
, PRINET
|PCATCH
, "bpf",
562 if (error
== EINTR
|| error
== ERESTART
) {
564 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
567 if (error
== EWOULDBLOCK
) {
569 * On a timeout, return what's in the buffer,
570 * which may be nothing. If there is something
571 * in the store buffer, we can rotate the buffers.
575 * We filled up the buffer in between
576 * getting the timeout and arriving
577 * here, so we don't need to rotate.
581 if (d
->bd_slen
== 0) {
583 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
591 * At this point, we know we have something in the hold slot.
596 * Move data from hold buffer into user space.
597 * We know the entire buffer is transferred since
598 * we checked above that the read buffer is bpf_bufsize bytes.
600 error
= UIOMOVE(d
->bd_hbuf
, d
->bd_hlen
, UIO_READ
, uio
);
603 d
->bd_fbuf
= d
->bd_hbuf
;
607 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
613 * If there are processes sleeping on this descriptor, wake them up.
617 register struct bpf_d
*d
;
620 if (d
->bd_async
&& d
->bd_sig
&& d
->bd_sigio
)
621 pgsigio(d
->bd_sigio
, d
->bd_sig
, 0);
624 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
625 selwakeup(&d
->bd_sel
);
626 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
628 d
->bd_sel
.si_thread
= 0;
631 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
632 selwakeup(d
->bd_selproc
, (int)d
->bd_selcoll
);
633 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
641 bpfwrite(dev
, uio
, ioflag
)
646 register struct bpf_d
*d
;
651 static struct sockaddr dst
;
656 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
657 d
= &bpf_dtab
[minor(dev
)];
658 if (d
->bd_bif
== 0) {
659 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
663 ifp
= d
->bd_bif
->bif_ifp
;
665 if (uio
->uio_resid
== 0) {
666 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
670 error
= bpf_movein(uio
, (int)d
->bd_bif
->bif_dlt
, &m
, &dst
, &datlen
);
672 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
676 if (datlen
> ifp
->if_mtu
) {
677 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
683 error
= dlil_output((u_long
) ifp
, m
,
684 (caddr_t
) 0, &dst
, 0);
687 error = dlil_inject_if_output(m, DLIL_NULL_FILTER);
691 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
694 * The driver frees the mbuf.
700 * Reset a descriptor by flushing its packet buffer and clearing the
701 * receive and drop counts. Should be called at splimp.
708 /* Free the hold buffer. */
709 d
->bd_fbuf
= d
->bd_hbuf
;
719 * FIONREAD Check for read packet available.
720 * SIOCGIFADDR Get interface address - convenient hook to driver.
721 * BIOCGBLEN Get buffer len [for read()].
722 * BIOCSETF Set ethernet read filter.
723 * BIOCFLUSH Flush read packet buffer.
724 * BIOCPROMISC Put interface into promiscuous mode.
725 * BIOCGDLT Get link layer type.
726 * BIOCGETIF Get interface name.
727 * BIOCSETIF Set interface.
728 * BIOCSRTIMEOUT Set read timeout.
729 * BIOCGRTIMEOUT Get read timeout.
730 * BIOCGSTATS Get packet stats.
731 * BIOCIMMEDIATE Set immediate mode.
732 * BIOCVERSION Get filter language version.
736 bpfioctl(dev
, cmd
, addr
, flags
, p
)
743 register struct bpf_d
*d
;
747 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
748 d
= &bpf_dtab
[minor(dev
)];
757 * Check for read packet available.
780 ifp
= d
->bd_bif
->bif_ifp
;
781 error
= (*ifp
->if_ioctl
)(ifp
, cmd
, addr
);
787 * Get buffer len [for read()].
790 *(u_int
*)addr
= d
->bd_bufsize
;
803 register u_int size
= *(u_int
*)addr
;
805 if (size
> BPF_MAXBUFSIZE
)
806 *(u_int
*)addr
= size
= BPF_MAXBUFSIZE
;
807 else if (size
< BPF_MINBUFSIZE
)
808 *(u_int
*)addr
= size
= BPF_MINBUFSIZE
;
809 d
->bd_bufsize
= size
;
815 * Set link layer read filter.
818 error
= bpf_setf(d
, (struct bpf_program
*)addr
);
822 * Flush read packet buffer.
831 * Put interface into promiscuous mode.
834 if (d
->bd_bif
== 0) {
836 * No interface attached yet.
842 if (d
->bd_promisc
== 0) {
843 error
= ifpromisc(d
->bd_bif
->bif_ifp
, 1);
851 * Get device parameters.
857 *(u_int
*)addr
= d
->bd_bif
->bif_dlt
;
861 * Set interface name.
867 bpf_ifname(d
->bd_bif
->bif_ifp
, (struct ifreq
*)addr
);
874 error
= bpf_setif(d
, (struct ifreq
*)addr
);
882 struct timeval
*tv
= (struct timeval
*)addr
;
885 * Subtract 1 tick from tvtohz() since this isn't
888 if ((error
= itimerfix(tv
)) == 0)
889 d
->bd_rtout
= tvtohz(tv
) - 1;
898 struct timeval
*tv
= (struct timeval
*)addr
;
900 tv
->tv_sec
= d
->bd_rtout
/ hz
;
901 tv
->tv_usec
= (d
->bd_rtout
% hz
) * tick
;
910 struct bpf_stat
*bs
= (struct bpf_stat
*)addr
;
912 bs
->bs_recv
= d
->bd_rcount
;
913 bs
->bs_drop
= d
->bd_dcount
;
918 * Set immediate mode.
921 d
->bd_immediate
= *(u_int
*)addr
;
926 struct bpf_version
*bv
= (struct bpf_version
*)addr
;
928 bv
->bv_major
= BPF_MAJOR_VERSION
;
929 bv
->bv_minor
= BPF_MINOR_VERSION
;
933 case FIONBIO
: /* Non-blocking I/O */
936 case FIOASYNC
: /* Send signal on receive packets */
937 d
->bd_async
= *(int *)addr
;
941 error
= fsetown(*(int *)addr
, &d
->bd_sigio
);
945 *(int *)addr
= fgetown(d
->bd_sigio
);
948 /* This is deprecated, FIOSETOWN should be used instead. */
950 error
= fsetown(-(*(int *)addr
), &d
->bd_sigio
);
953 /* This is deprecated, FIOGETOWN should be used instead. */
955 *(int *)addr
= -fgetown(d
->bd_sigio
);
958 case BIOCSRSIG
: /* Set receive signal */
962 sig
= *(u_int
*)addr
;
971 *(u_int
*)addr
= d
->bd_sig
;
974 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
979 * Set d's packet filter program to fp. If this file already has a filter,
980 * free it and replace it. Returns EINVAL for bogus requests.
985 struct bpf_program
*fp
;
987 struct bpf_insn
*fcode
, *old
;
992 if (fp
->bf_insns
== 0) {
1000 FREE((caddr_t
)old
, M_DEVBUF
);
1004 if (flen
> BPF_MAXINSNS
)
1007 size
= flen
* sizeof(*fp
->bf_insns
);
1008 fcode
= (struct bpf_insn
*) _MALLOC(size
, M_DEVBUF
, M_WAIT
);
1009 if (copyin((caddr_t
)fp
->bf_insns
, (caddr_t
)fcode
, size
) == 0 &&
1010 bpf_validate(fcode
, (int)flen
)) {
1012 d
->bd_filter
= fcode
;
1016 FREE((caddr_t
)old
, M_DEVBUF
);
1020 FREE((caddr_t
)fcode
, M_DEVBUF
);
1025 * Detach a file from its current interface (if attached at all) and attach
1026 * to the interface indicated by the name stored in ifr.
1027 * Return an errno or 0.
1036 struct ifnet
*theywant
;
1038 theywant
= ifunit(ifr
->ifr_name
);
1043 * Look through attached interfaces for the named one.
1045 for (bp
= bpf_iflist
; bp
!= 0; bp
= bp
->bif_next
) {
1046 struct ifnet
*ifp
= bp
->bif_ifp
;
1048 if (ifp
== 0 || ifp
!= theywant
)
1051 * We found the requested interface.
1052 * If it's not up, return an error.
1053 * Allocate the packet buffers if we need to.
1054 * If we're already attached to requested interface,
1055 * just flush the buffer.
1057 if ((ifp
->if_flags
& IFF_UP
) == 0)
1060 if (d
->bd_sbuf
== 0) {
1061 error
= bpf_allocbufs(d
);
1066 if (bp
!= d
->bd_bif
) {
1069 * Detach if attached to something else.
1084 * Convert an interface name plus unit number of an ifp to a single
1085 * name which is returned in the ifr.
1088 bpf_ifname(ifp
, ifr
)
1092 char *s
= ifp
->if_name
;
1093 char *d
= ifr
->ifr_name
;
1097 d
--; /* back to the null */
1098 /* XXX Assume that unit number is less than 10. */
1099 *d
++ = ifp
->if_unit
+ '0';
1106 * Support for select() and poll() system calls
1108 * Return true iff the specific operation will not block indefinitely.
1109 * Otherwise, return false but make a note that a selwakeup() must be done.
1112 bpfpoll(dev
, events
, p
)
1117 register struct bpf_d
*d
;
1121 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
1123 * An imitation of the FIONREAD ioctl code.
1125 d
= &bpf_dtab
[minor(dev
)];
1128 if (events
& (POLLIN
| POLLRDNORM
))
1129 if (d
->bd_hlen
!= 0 || (d
->bd_immediate
&& d
->bd_slen
!= 0))
1130 revents
|= events
& (POLLIN
| POLLRDNORM
);
1132 selrecord(p
, &d
->bd_sel
);
1135 thread_funnel_switch(NETWORK_FUNNEL
, KERNEL_FUNNEL
);
1140 * Incoming linkage from device drivers. Process the packet pkt, of length
1141 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1142 * by each process' filter, and if accepted, stashed into the corresponding
1146 bpf_tap(ifp
, pkt
, pktlen
)
1148 register u_char
*pkt
;
1149 register u_int pktlen
;
1152 register struct bpf_d
*d
;
1153 register u_int slen
;
1155 * Note that the ipl does not have to be raised at this point.
1156 * The only problem that could arise here is that if two different
1157 * interfaces shared any data. This is not the case.
1159 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
1160 if ((bp
= ifp
->if_bpf
)) {
1161 for (d
= bp
->bif_dlist
; d
!= 0; d
= d
->bd_next
) {
1163 slen
= bpf_filter(d
->bd_filter
, pkt
, pktlen
, pktlen
);
1165 catchpacket(d
, pkt
, pktlen
, slen
, bcopy
);
1168 thread_funnel_switch(KERNEL_FUNNEL
, NETWORK_FUNNEL
);
1172 * Copy data from an mbuf chain into a buffer. This code is derived
1173 * from m_copydata in sys/uipc_mbuf.c.
1176 bpf_mcopy(src_arg
, dst_arg
, len
)
1177 const void *src_arg
;
1179 register size_t len
;
1181 register const struct mbuf
*m
;
1182 register u_int count
;
1190 count
= min(m
->m_len
, len
);
1191 bcopy(mtod(m
, void *), dst
, count
);
1199 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1206 struct bpf_if
*bp
= ifp
->if_bpf
;
1212 for (m0
= m
; m0
!= 0; m0
= m0
->m_next
)
1213 pktlen
+= m0
->m_len
;
1215 for (d
= bp
->bif_dlist
; d
!= 0; d
= d
->bd_next
) {
1217 slen
= bpf_filter(d
->bd_filter
, (u_char
*)m
, pktlen
, 0);
1219 catchpacket(d
, (u_char
*)m
, pktlen
, slen
, bpf_mcopy
);
1224 * Move the packet data from interface memory (pkt) into the
1225 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1226 * otherwise 0. "copy" is the routine called to do the actual data
1227 * transfer. bcopy is passed in to copy contiguous chunks, while
1228 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1229 * pkt is really an mbuf.
1232 catchpacket(d
, pkt
, pktlen
, snaplen
, cpfn
)
1233 register struct bpf_d
*d
;
1234 register u_char
*pkt
;
1235 register u_int pktlen
, snaplen
;
1236 register void (*cpfn
) __P((const void *, void *, size_t));
1238 register struct bpf_hdr
*hp
;
1239 register int totlen
, curlen
;
1240 register int hdrlen
= d
->bd_bif
->bif_hdrlen
;
1242 * Figure out how many bytes to move. If the packet is
1243 * greater or equal to the snapshot length, transfer that
1244 * much. Otherwise, transfer the whole packet (unless
1245 * we hit the buffer size limit).
1247 totlen
= hdrlen
+ min(snaplen
, pktlen
);
1248 if (totlen
> d
->bd_bufsize
)
1249 totlen
= d
->bd_bufsize
;
1252 * Round up the end of the previous packet to the next longword.
1254 curlen
= BPF_WORDALIGN(d
->bd_slen
);
1255 if (curlen
+ totlen
> d
->bd_bufsize
) {
1257 * This packet will overflow the storage buffer.
1258 * Rotate the buffers if we can, then wakeup any
1261 if (d
->bd_fbuf
== 0) {
1263 * We haven't completed the previous read yet,
1264 * so drop the packet.
1273 else if (d
->bd_immediate
)
1275 * Immediate mode is set. A packet arrived so any
1276 * reads should be woken up.
1281 * Append the bpf header.
1283 hp
= (struct bpf_hdr
*)(d
->bd_sbuf
+ curlen
);
1285 microtime(&hp
->bh_tstamp
);
1287 uniqtime(&hp
->bh_tstamp
);
1289 hp
->bh_tstamp
= time
;
1291 hp
->bh_datalen
= pktlen
;
1292 hp
->bh_hdrlen
= hdrlen
;
1294 * Copy the packet data into the store buffer and update its length.
1296 (*cpfn
)(pkt
, (u_char
*)hp
+ hdrlen
, (hp
->bh_caplen
= totlen
- hdrlen
));
1297 d
->bd_slen
= curlen
+ totlen
;
1301 * Initialize all nonzero fields of a descriptor.
1305 register struct bpf_d
*d
;
1307 d
->bd_fbuf
= (caddr_t
) _MALLOC(d
->bd_bufsize
, M_DEVBUF
, M_WAIT
);
1308 if (d
->bd_fbuf
== 0)
1311 d
->bd_sbuf
= (caddr_t
) _MALLOC(d
->bd_bufsize
, M_DEVBUF
, M_WAIT
);
1312 if (d
->bd_sbuf
== 0) {
1313 FREE(d
->bd_fbuf
, M_DEVBUF
);
1322 * Free buffers currently in use by a descriptor.
1327 register struct bpf_d
*d
;
1330 * We don't need to lock out interrupts since this descriptor has
1331 * been detached from its interface and it yet hasn't been marked
1334 if (d
->bd_sbuf
!= 0) {
1335 FREE(d
->bd_sbuf
, M_DEVBUF
);
1336 if (d
->bd_hbuf
!= 0)
1337 FREE(d
->bd_hbuf
, M_DEVBUF
);
1338 if (d
->bd_fbuf
!= 0)
1339 FREE(d
->bd_fbuf
, M_DEVBUF
);
1342 FREE((caddr_t
)d
->bd_filter
, M_DEVBUF
);
1348 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1349 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1350 * size of the link header (variable length headers not yet supported).
1353 bpfattach(ifp
, dlt
, hdrlen
)
1359 bp
= (struct bpf_if
*) _MALLOC(sizeof(*bp
), M_DEVBUF
, M_DONTWAIT
);
1367 bp
->bif_next
= bpf_iflist
;
1370 bp
->bif_ifp
->if_bpf
= 0;
1373 * Compute the length of the bpf header. This is not necessarily
1374 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1375 * that the network layer header begins on a longword boundary (for
1376 * performance reasons and to alleviate alignment restrictions).
1378 bp
->bif_hdrlen
= BPF_WORDALIGN(hdrlen
+ SIZEOF_BPF_HDR
) - hdrlen
;
1381 * Mark all the descriptors free if this hasn't been done.
1383 if (!bpf_dtab_init
) {
1384 for (i
= 0; i
< nbpfilter
; ++i
)
1385 D_MARKFREE(&bpf_dtab
[i
]);
1390 printf("bpf: %s%d attached\n", ifp
->if_name
, ifp
->if_unit
);
1394 static void *bpf_devfs_token
[NBPFILTER
];
1396 static int bpf_devsw_installed
;
1398 void bpf_init
__P((void *unused
));
1406 if (!bpf_devsw_installed
) {
1407 bpf_devsw_installed
= 1;
1408 maj
= cdevsw_add(BPF_MAJOR
, &bpf_cdevsw
);
1410 printf("bpf_init: failed to allocate a major number!\n");
1414 for (i
= 0 ; i
< nbpfilter
; i
++) {
1415 bpf_devfs_token
[i
] = devfs_make_node(makedev(maj
, i
),
1416 DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0600,
1423 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)