2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1990, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
32 * This code is derived from the Stanford/CMU enet packet filter,
33 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35 * Berkeley Laboratory.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
67 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
81 #define inline __inline
84 #include <sys/param.h>
85 #include <sys/systm.h>
87 #include <sys/malloc.h>
91 #include <sys/signalvar.h>
92 #include <sys/filio.h>
93 #include <sys/sockio.h>
94 #include <sys/ttycom.h>
95 #include <sys/filedesc.h>
96 #include <sys/uio_internal.h>
97 #include <sys/fcntl.h>
98 #include <sys/file_internal.h>
99 #include <sys/event.h>
101 #if defined(sparc) && BSD < 199103
102 #include <sys/stream.h>
104 #include <sys/poll.h>
106 #include <sys/socket.h>
107 #include <sys/vnode.h>
111 #include <net/bpfdesc.h>
113 #include <netinet/in.h>
114 #include <netinet/if_ether.h>
115 #include <sys/kernel.h>
116 #include <sys/sysctl.h>
117 #include <net/firewire.h>
119 #include <miscfs/devfs/devfs.h>
120 #include <net/dlil.h>
122 #include <kern/locks.h>
125 #include <security/mac_framework.h>
128 extern int tvtohz(struct timeval
*);
131 * Older BSDs don't have kernel malloc.
135 static caddr_t
bpf_alloc();
136 #include <net/bpf_compat.h>
137 #define BPF_BUFSIZE (MCLBYTES-8)
138 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
140 #define BPF_BUFSIZE 4096
141 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
145 #define PRINET 26 /* interruptible */
148 * The default read buffer size is patchable.
150 static unsigned int bpf_bufsize
= BPF_BUFSIZE
;
151 SYSCTL_INT(_debug
, OID_AUTO
, bpf_bufsize
, CTLFLAG_RW
,
152 &bpf_bufsize
, 0, "");
153 static unsigned int bpf_maxbufsize
= BPF_MAXBUFSIZE
;
154 SYSCTL_INT(_debug
, OID_AUTO
, bpf_maxbufsize
, CTLFLAG_RW
,
155 &bpf_maxbufsize
, 0, "");
156 static unsigned int bpf_maxdevices
= 256;
157 SYSCTL_UINT(_debug
, OID_AUTO
, bpf_maxdevices
, CTLFLAG_RW
,
158 &bpf_maxdevices
, 0, "");
161 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
162 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
164 static struct bpf_if
*bpf_iflist
;
167 * BSD now stores the bpf_d in the dev_t which is a struct
168 * on their system. Our dev_t is an int, so we still store
169 * the bpf_d in a separate table indexed by minor device #.
171 * The value stored in bpf_dtab[n] represent three states:
172 * 0: device not opened
173 * 1: device opening or closing
174 * other: device <n> opened with pointer to storage
176 static struct bpf_d
**bpf_dtab
= NULL
;
177 static unsigned int bpf_dtab_size
= 0;
178 static unsigned int nbpfilter
= 0;
180 static lck_mtx_t
*bpf_mlock
;
181 static lck_grp_t
*bpf_mlock_grp
;
182 static lck_grp_attr_t
*bpf_mlock_grp_attr
;
183 static lck_attr_t
*bpf_mlock_attr
;
186 * Mark a descriptor free by making it point to itself.
187 * This is probably cheaper than marking with a constant since
188 * the address should be in a register anyway.
190 #endif /* __APPLE__ */
192 static int bpf_allocbufs(struct bpf_d
*);
193 static errno_t
bpf_attachd(struct bpf_d
*d
, struct bpf_if
*bp
);
194 static void bpf_detachd(struct bpf_d
*d
);
195 static void bpf_freed(struct bpf_d
*);
196 static void bpf_mcopy(const void *, void *, size_t);
197 static int bpf_movein(struct uio
*, int,
198 struct mbuf
**, struct sockaddr
*, int *);
199 static int bpf_setif(struct bpf_d
*, ifnet_t ifp
, u_int32_t dlt
);
200 static void bpf_wakeup(struct bpf_d
*);
201 static void catchpacket(struct bpf_d
*, u_char
*, u_int
,
202 u_int
, void (*)(const void *, void *, size_t));
203 static void reset_d(struct bpf_d
*);
204 static int bpf_setf(struct bpf_d
*, u_int bf_len
, user_addr_t bf_insns
);
205 static int bpf_getdltlist(struct bpf_d
*, struct bpf_dltlist
*,
207 static int bpf_setdlt(struct bpf_d
*, u_int
);
209 /*static void *bpf_devfs_token[MAXBPFILTER];*/
211 static int bpf_devsw_installed
;
213 void bpf_init(void *unused
);
214 static int bpf_tap_callback(struct ifnet
*ifp
, struct mbuf
*m
);
217 * Darwin differs from BSD here, the following are static
218 * on BSD and not static on Darwin.
224 ioctl_fcn_t bpfioctl
;
225 select_fcn_t bpfpoll
;
228 /* Darwin's cdevsw struct differs slightly from BSDs */
229 #define CDEV_MAJOR 23
230 static struct cdevsw bpf_cdevsw
= {
232 /* close */ bpfclose
,
234 /* write */ bpfwrite
,
235 /* ioctl */ bpfioctl
,
237 /* reset */ eno_reset
,
239 /* select */ bpfpoll
,
241 /* strategy*/ eno_strat
,
247 #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
250 bpf_movein(struct uio
*uio
, int linktype
, struct mbuf
**mp
, struct sockaddr
*sockp
, int *datlen
)
268 sa_family
= AF_UNSPEC
;
269 /* XXX Would MAXLINKHDR be better? */
270 hlen
= sizeof(struct ether_header
);
275 #if defined(__FreeBSD__) || defined(__bsdi__)
276 sa_family
= AF_IMPLINK
;
279 sa_family
= AF_UNSPEC
;
280 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
288 sa_family
= AF_UNSPEC
;
293 case DLT_ATM_RFC1483
:
295 * en atm driver requires 4-byte atm pseudo header.
296 * though it isn't standard, vpi:vci needs to be
299 sa_family
= AF_UNSPEC
;
300 hlen
= 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
305 sa_family
= AF_UNSPEC
;
306 hlen
= 4; /* This should match PPP_HDRLEN */
309 case DLT_APPLE_IP_OVER_IEEE1394
:
310 sa_family
= AF_UNSPEC
;
311 hlen
= sizeof(struct firewire_header
);
314 case DLT_IEEE802_11
: /* IEEE 802.11 wireless */
315 sa_family
= AF_IEEE80211
;
323 // LP64todo - fix this!
324 len
= uio_resid(uio
);
325 *datlen
= len
- hlen
;
326 if ((unsigned)len
> MCLBYTES
)
331 * Build a sockaddr based on the data link layer type.
332 * We do this at this level because the ethernet header
333 * is copied directly into the data field of the sockaddr.
334 * In the case of SLIP, there is no header and the packet
335 * is forwarded as is.
336 * Also, we are careful to leave room at the front of the mbuf
337 * for the link level header.
339 if ((hlen
+ SOCKADDR_HDR_LEN
) > sockp
->sa_len
) {
342 sockp
->sa_family
= sa_family
;
345 * We're directly sending the packet data supplied by
346 * the user; we don't need to make room for the link
347 * header, and don't need the header length value any
348 * more, so set it to 0.
353 MGETHDR(m
, M_WAIT
, MT_DATA
);
356 if ((unsigned)len
> MHLEN
) {
359 if ((m
->m_flags
& M_EXT
) == 0) {
362 if (m
->m_len
!= MCLBYTES
) {
368 m
->m_pkthdr
.len
= m
->m_len
= len
;
369 m
->m_pkthdr
.rcvif
= NULL
;
372 * Make room for link header.
375 m
->m_pkthdr
.len
-= hlen
;
378 m
->m_data
+= hlen
; /* XXX */
382 error
= UIOMOVE((caddr_t
)sockp
->sa_data
, hlen
, UIO_WRITE
, uio
);
386 error
= UIOMOVE(mtod(m
, caddr_t
), len
- hlen
, UIO_WRITE
, uio
);
397 * The dynamic addition of a new device node must block all processes that are opening
398 * the last device so that no process will get an unexpected ENOENT
401 bpf_make_dev_t(int maj
)
403 static int bpf_growing
= 0;
404 unsigned int cur_size
= nbpfilter
, i
;
406 if (nbpfilter
>= bpf_maxdevices
)
409 while (bpf_growing
) {
410 /* Wait until new device has been created */
411 (void)tsleep((caddr_t
)&bpf_growing
, PZERO
, "bpf_growing", 0);
413 if (nbpfilter
> cur_size
) {
414 /* other thread grew it already */
419 /* need to grow bpf_dtab first */
420 if (nbpfilter
== bpf_dtab_size
) {
422 struct bpf_d
**new_dtab
= NULL
;
423 struct bpf_d
**old_dtab
= NULL
;
425 new_dtab_size
= bpf_dtab_size
+ NBPFILTER
;
426 new_dtab
= (struct bpf_d
**)_MALLOC(sizeof(struct bpf_d
*) * new_dtab_size
, M_DEVBUF
, M_WAIT
);
428 printf("bpf_make_dev_t: malloc bpf_dtab failed\n");
432 bcopy(bpf_dtab
, new_dtab
,
433 sizeof(struct bpf_d
*) * bpf_dtab_size
);
435 bzero(new_dtab
+ bpf_dtab_size
,
436 sizeof(struct bpf_d
*) * NBPFILTER
);
439 bpf_dtab_size
= new_dtab_size
;
440 if (old_dtab
!= NULL
)
441 _FREE(old_dtab
, M_DEVBUF
);
444 (void) devfs_make_node(makedev(maj
, i
),
445 DEVFS_CHAR
, UID_ROOT
, GID_WHEEL
, 0600,
449 wakeup((caddr_t
)&bpf_growing
);
455 * Attach file to the bpf interface, i.e. make d listen on bp.
458 bpf_attachd(struct bpf_d
*d
, struct bpf_if
*bp
)
460 int first
= bp
->bif_dlist
== NULL
;
464 * Point d at bp, and add d to the interface's list of listeners.
465 * Finally, point the driver's bpf cookie at the interface so
466 * it will divert packets to bpf.
469 d
->bd_next
= bp
->bif_dlist
;
473 bpf_tap_mode tap_mode
;
475 switch ((d
->bd_oflags
& (FREAD
| FWRITE
))) {
477 tap_mode
= BPF_TAP_INPUT
;
480 tap_mode
= BPF_TAP_OUTPUT
;
483 tap_mode
= BPF_TAP_INPUT_OUTPUT
;
487 /* Find the default bpf entry for this ifp */
488 if (bp
->bif_ifp
->if_bpf
== NULL
) {
489 struct bpf_if
*primary
;
491 for (primary
= bpf_iflist
; primary
&& primary
->bif_ifp
!= bp
->bif_ifp
;
492 primary
= primary
->bif_next
)
495 bp
->bif_ifp
->if_bpf
= primary
;
498 /* Only call dlil_set_bpf_tap for primary dlt */
499 if (bp
->bif_ifp
->if_bpf
== bp
)
500 dlil_set_bpf_tap(bp
->bif_ifp
, tap_mode
, bpf_tap_callback
);
503 error
= bp
->bif_tap(bp
->bif_ifp
, bp
->bif_dlt
, tap_mode
);
510 * Detach a file from its interface.
513 bpf_detachd(struct bpf_d
*d
)
519 ifp
= d
->bd_bif
->bif_ifp
;
522 /* Remove d from the interface's descriptor list. */
527 panic("bpf_detachd: descriptor not in list");
530 if (bp
->bif_dlist
== 0) {
532 * Let the driver know that there are no more listeners.
534 /* Only call dlil_set_bpf_tap for primary dlt */
535 if (bp
->bif_ifp
->if_bpf
== bp
)
536 dlil_set_bpf_tap(ifp
, BPF_TAP_DISABLE
, NULL
);
538 bp
->bif_tap(ifp
, bp
->bif_dlt
, BPF_TAP_DISABLE
);
540 for (bp
= bpf_iflist
; bp
; bp
= bp
->bif_next
)
541 if (bp
->bif_ifp
== ifp
&& bp
->bif_dlist
!= 0)
548 * Check if this descriptor had requested promiscuous mode.
549 * If so, turn it off.
553 lck_mtx_unlock(bpf_mlock
);
554 if (ifnet_set_promiscuous(ifp
, 0)) {
556 * Something is really wrong if we were able to put
557 * the driver into promiscuous mode, but can't
559 * Most likely the network interface is gone.
561 printf("bpf: ifnet_set_promiscuous failed");
563 lck_mtx_lock(bpf_mlock
);
569 * Open ethernet device. Returns ENXIO for illegal minor device number,
570 * EBUSY if file is open by another process.
574 bpfopen(dev_t dev
, int flags
, __unused
int fmt
,
575 __unused
struct proc
*p
)
579 lck_mtx_lock(bpf_mlock
);
580 if ((unsigned int) minor(dev
) >= nbpfilter
) {
581 lck_mtx_unlock(bpf_mlock
);
585 * New device nodes are created on demand when opening the last one.
586 * The programming model is for processes to loop on the minor starting at 0
587 * as long as EBUSY is returned. The loop stops when either the open succeeds or
588 * an error other that EBUSY is returned. That means that bpf_make_dev_t() must
589 * block all processes that are opening the last node. If not all
590 * processes are blocked, they could unexpectedly get ENOENT and abort their
593 if ((unsigned int) minor(dev
) == (nbpfilter
- 1))
594 bpf_make_dev_t(major(dev
));
597 * Each minor can be opened by only one process. If the requested
598 * minor is in use, return EBUSY.
600 * Important: bpfopen() and bpfclose() have to check and set the status of a device
601 * in the same lockin context otherwise the device may be leaked because the vnode use count
602 * will be unpextectly greater than 1 when close() is called.
604 if (bpf_dtab
[minor(dev
)] == 0) {
605 bpf_dtab
[minor(dev
)] = (void *)1; /* Mark opening */
607 lck_mtx_unlock(bpf_mlock
);
610 d
= (struct bpf_d
*)_MALLOC(sizeof(struct bpf_d
), M_DEVBUF
, M_WAIT
);
612 /* this really is a catastrophic failure */
613 printf("bpfopen: malloc bpf_d failed\n");
614 bpf_dtab
[minor(dev
)] = NULL
;
615 lck_mtx_unlock(bpf_mlock
);
618 bzero(d
, sizeof(struct bpf_d
));
621 * It is not necessary to take the BPF lock here because no other
622 * thread can access the device until it is marked opened...
625 /* Mark "in use" and do most initialization. */
626 d
->bd_bufsize
= bpf_bufsize
;
629 d
->bd_oflags
= flags
;
631 mac_bpfdesc_label_init(d
);
632 mac_bpfdesc_label_associate(kauth_cred_get(), d
);
634 bpf_dtab
[minor(dev
)] = d
; /* Mark opened */
635 lck_mtx_unlock(bpf_mlock
);
641 * Close the descriptor by detaching it from its interface,
642 * deallocating its buffers, and marking it free.
646 bpfclose(dev_t dev
, __unused
int flags
, __unused
int fmt
,
647 __unused
struct proc
*p
)
651 /* Take BPF lock to ensure no other thread is using the device */
652 lck_mtx_lock(bpf_mlock
);
654 d
= bpf_dtab
[minor(dev
)];
655 if (d
== 0 || d
== (void *)1) {
656 lck_mtx_unlock(bpf_mlock
);
659 bpf_dtab
[minor(dev
)] = (void *)1; /* Mark closing */
663 selthreadclear(&d
->bd_sel
);
665 mac_bpfdesc_label_destroy(d
);
669 /* Mark free in same context as bpfopen comes to check */
670 bpf_dtab
[minor(dev
)] = NULL
; /* Mark closed */
671 lck_mtx_unlock(bpf_mlock
);
679 #define BPF_SLEEP bpf_sleep
682 bpf_sleep(struct bpf_d
*d
, int pri
, const char *wmesg
, int timo
)
686 lck_mtx_unlock(bpf_mlock
);
688 st
= tsleep((caddr_t
)d
, pri
, wmesg
, timo
);
690 lck_mtx_lock(bpf_mlock
);
696 * Rotate the packet buffers in descriptor d. Move the store buffer
697 * into the hold slot, and the free buffer into the store slot.
698 * Zero the length of the new store buffer.
700 #define ROTATE_BUFFERS(d) \
701 (d)->bd_hbuf = (d)->bd_sbuf; \
702 (d)->bd_hlen = (d)->bd_slen; \
703 (d)->bd_sbuf = (d)->bd_fbuf; \
707 * bpfread - read next chunk of packets from buffers
710 bpfread(dev_t dev
, struct uio
*uio
, int ioflag
)
715 lck_mtx_lock(bpf_mlock
);
717 d
= bpf_dtab
[minor(dev
)];
718 if (d
== 0 || d
== (void *)1) {
719 lck_mtx_unlock(bpf_mlock
);
725 * Restrict application to use a buffer the same size as
728 if (uio_resid(uio
) != d
->bd_bufsize
) {
729 lck_mtx_unlock(bpf_mlock
);
734 * If the hold buffer is empty, then do a timed sleep, which
735 * ends when the timeout expires or when enough packets
736 * have arrived to fill the store buffer.
738 while (d
->bd_hbuf
== 0) {
739 if (d
->bd_immediate
&& d
->bd_slen
!= 0) {
741 * A packet(s) either arrived since the previous
742 * read or arrived while we were asleep.
743 * Rotate the buffers and return what's here.
750 * No data is available, check to see if the bpf device
751 * is still pointed at a real interface. If not, return
752 * ENXIO so that the userland process knows to rebind
753 * it before using it again.
755 if (d
->bd_bif
== NULL
) {
756 lck_mtx_unlock(bpf_mlock
);
759 if (ioflag
& IO_NDELAY
) {
760 lck_mtx_unlock(bpf_mlock
);
761 return (EWOULDBLOCK
);
763 error
= BPF_SLEEP(d
, PRINET
|PCATCH
, "bpf",
766 * Make sure device is still opened
768 d
= bpf_dtab
[minor(dev
)];
769 if (d
== 0 || d
== (void *)1) {
770 lck_mtx_unlock(bpf_mlock
);
773 if (error
== EINTR
|| error
== ERESTART
) {
774 lck_mtx_unlock(bpf_mlock
);
777 if (error
== EWOULDBLOCK
) {
779 * On a timeout, return what's in the buffer,
780 * which may be nothing. If there is something
781 * in the store buffer, we can rotate the buffers.
785 * We filled up the buffer in between
786 * getting the timeout and arriving
787 * here, so we don't need to rotate.
791 if (d
->bd_slen
== 0) {
792 lck_mtx_unlock(bpf_mlock
);
800 * At this point, we know we have something in the hold slot.
804 * Move data from hold buffer into user space.
805 * We know the entire buffer is transferred since
806 * we checked above that the read buffer is bpf_bufsize bytes.
808 error
= UIOMOVE(d
->bd_hbuf
, d
->bd_hlen
, UIO_READ
, uio
);
810 d
->bd_fbuf
= d
->bd_hbuf
;
813 lck_mtx_unlock(bpf_mlock
);
819 * If there are processes sleeping on this descriptor, wake them up.
822 bpf_wakeup(struct bpf_d
*d
)
825 if (d
->bd_async
&& d
->bd_sig
&& d
->bd_sigio
)
826 pgsigio(d
->bd_sigio
, d
->bd_sig
);
829 selwakeup(&d
->bd_sel
);
830 KNOTE(&d
->bd_sel
.si_note
, 1);
833 d
->bd_sel
.si_pid
= 0;
837 selwakeup(d
->bd_selproc
, (int)d
->bd_selcoll
);
844 /* keep in sync with bpf_movein above: */
845 #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
848 bpfwrite(dev_t dev
, struct uio
*uio
, __unused
int ioflag
)
852 struct mbuf
*m
= NULL
;
854 char dst_buf
[SOCKADDR_HDR_LEN
+ MAX_DATALINK_HDR_LEN
];
857 lck_mtx_lock(bpf_mlock
);
859 d
= bpf_dtab
[minor(dev
)];
860 if (d
== 0 || d
== (void *)1) {
861 lck_mtx_unlock(bpf_mlock
);
864 if (d
->bd_bif
== 0) {
865 lck_mtx_unlock(bpf_mlock
);
869 ifp
= d
->bd_bif
->bif_ifp
;
871 if (uio_resid(uio
) == 0) {
872 lck_mtx_unlock(bpf_mlock
);
875 ((struct sockaddr
*)dst_buf
)->sa_len
= sizeof(dst_buf
);
876 error
= bpf_movein(uio
, (int)d
->bd_bif
->bif_dlt
, &m
,
877 d
->bd_hdrcmplt
? NULL
: (struct sockaddr
*)dst_buf
,
880 lck_mtx_unlock(bpf_mlock
);
884 if ((unsigned)datlen
> ifp
->if_mtu
) {
885 lck_mtx_unlock(bpf_mlock
);
890 if ((error
= ifp_use(ifp
, kIfNetUseCount_MustNotBeZero
)) != 0) {
891 lck_mtx_unlock(bpf_mlock
);
897 mac_mbuf_label_associate_bpfdesc(d
, m
);
899 lck_mtx_unlock(bpf_mlock
);
901 if (d
->bd_hdrcmplt
) {
902 if (d
->bd_bif
->bif_send
)
903 error
= d
->bd_bif
->bif_send(ifp
, d
->bd_bif
->bif_dlt
, m
);
905 error
= dlil_output(ifp
, 0, m
, NULL
, NULL
, 1);
908 error
= dlil_output(ifp
, PF_INET
, m
, NULL
, (struct sockaddr
*)dst_buf
, 0);
911 if (ifp_unuse(ifp
) != 0)
912 ifp_use_reached_zero(ifp
);
915 * The driver frees the mbuf.
921 * Reset a descriptor by flushing its packet buffer and clearing the
922 * receive and drop counts.
925 reset_d(struct bpf_d
*d
)
928 /* Free the hold buffer. */
929 d
->bd_fbuf
= d
->bd_hbuf
;
939 * FIONREAD Check for read packet available.
940 * SIOCGIFADDR Get interface address - convenient hook to driver.
941 * BIOCGBLEN Get buffer len [for read()].
942 * BIOCSETF Set ethernet read filter.
943 * BIOCFLUSH Flush read packet buffer.
944 * BIOCPROMISC Put interface into promiscuous mode.
945 * BIOCGDLT Get link layer type.
946 * BIOCGETIF Get interface name.
947 * BIOCSETIF Set interface.
948 * BIOCSRTIMEOUT Set read timeout.
949 * BIOCGRTIMEOUT Get read timeout.
950 * BIOCGSTATS Get packet stats.
951 * BIOCIMMEDIATE Set immediate mode.
952 * BIOCVERSION Get filter language version.
953 * BIOCGHDRCMPLT Get "header already complete" flag
954 * BIOCSHDRCMPLT Set "header already complete" flag
955 * BIOCGSEESENT Get "see packets sent" flag
956 * BIOCSSEESENT Set "see packets sent" flag
960 bpfioctl(dev_t dev
, u_long cmd
, caddr_t addr
, __unused
int flags
,
966 lck_mtx_lock(bpf_mlock
);
968 d
= bpf_dtab
[minor(dev
)];
969 if (d
== 0 || d
== (void *)1) {
970 lck_mtx_unlock(bpf_mlock
);
981 * Check for read packet available.
1002 ifp
= d
->bd_bif
->bif_ifp
;
1003 error
= ifnet_ioctl(ifp
, 0, cmd
, addr
);
1009 * Get buffer len [for read()].
1012 *(u_int
*)addr
= d
->bd_bufsize
;
1016 * Set buffer length.
1025 u_int size
= *(u_int
*)addr
;
1027 if (size
> bpf_maxbufsize
)
1028 *(u_int
*)addr
= size
= bpf_maxbufsize
;
1029 else if (size
< BPF_MINBUFSIZE
)
1030 *(u_int
*)addr
= size
= BPF_MINBUFSIZE
;
1031 d
->bd_bufsize
= size
;
1037 * Set link layer read filter.
1040 struct bpf_program32
*prg32
= (struct bpf_program32
*)addr
;
1041 error
= bpf_setf(d
, prg32
->bf_len
,
1042 CAST_USER_ADDR_T(prg32
->bf_insns
));
1047 struct bpf_program64
*prg64
= (struct bpf_program64
*)addr
;
1048 error
= bpf_setf(d
, prg64
->bf_len
, prg64
->bf_insns
);
1053 * Flush read packet buffer.
1060 * Put interface into promiscuous mode.
1063 if (d
->bd_bif
== 0) {
1065 * No interface attached yet.
1070 if (d
->bd_promisc
== 0) {
1071 lck_mtx_unlock(bpf_mlock
);
1072 error
= ifnet_set_promiscuous(d
->bd_bif
->bif_ifp
, 1);
1073 lck_mtx_lock(bpf_mlock
);
1080 * Get device parameters.
1086 *(u_int
*)addr
= d
->bd_bif
->bif_dlt
;
1090 * Get a list of supported data link types.
1093 if (d
->bd_bif
== NULL
) {
1096 error
= bpf_getdltlist(d
,
1097 (struct bpf_dltlist
*)addr
, p
);
1102 * Set data link type.
1105 if (d
->bd_bif
== NULL
)
1108 error
= bpf_setdlt(d
, *(u_int
*)addr
);
1112 * Get interface name.
1118 struct ifnet
*const ifp
= d
->bd_bif
->bif_ifp
;
1119 struct ifreq
*const ifr
= (struct ifreq
*)addr
;
1121 snprintf(ifr
->ifr_name
, sizeof(ifr
->ifr_name
),
1122 "%s%d", ifp
->if_name
, ifp
->if_unit
);
1131 ifp
= ifunit(((struct ifreq
*)addr
)->ifr_name
);
1135 error
= bpf_setif(d
, ifp
, 0);
1144 struct BPF_TIMEVAL
*_tv
= (struct BPF_TIMEVAL
*)addr
;
1147 tv
.tv_sec
= _tv
->tv_sec
;
1148 tv
.tv_usec
= _tv
->tv_usec
;
1151 * Subtract 1 tick from tvtohz() since this isn't
1154 if ((error
= itimerfix(&tv
)) == 0)
1155 d
->bd_rtout
= tvtohz(&tv
) - 1;
1164 struct BPF_TIMEVAL
*tv
= (struct BPF_TIMEVAL
*)addr
;
1166 tv
->tv_sec
= d
->bd_rtout
/ hz
;
1167 tv
->tv_usec
= (d
->bd_rtout
% hz
) * tick
;
1176 struct bpf_stat
*bs
= (struct bpf_stat
*)addr
;
1178 bs
->bs_recv
= d
->bd_rcount
;
1179 bs
->bs_drop
= d
->bd_dcount
;
1184 * Set immediate mode.
1187 d
->bd_immediate
= *(u_int
*)addr
;
1192 struct bpf_version
*bv
= (struct bpf_version
*)addr
;
1194 bv
->bv_major
= BPF_MAJOR_VERSION
;
1195 bv
->bv_minor
= BPF_MINOR_VERSION
;
1200 * Get "header already complete" flag
1203 *(u_int
*)addr
= d
->bd_hdrcmplt
;
1207 * Set "header already complete" flag
1210 d
->bd_hdrcmplt
= *(u_int
*)addr
? 1 : 0;
1214 * Get "see sent packets" flag
1217 *(u_int
*)addr
= d
->bd_seesent
;
1221 * Set "see sent packets" flag
1224 d
->bd_seesent
= *(u_int
*)addr
;
1227 case FIONBIO
: /* Non-blocking I/O */
1230 case FIOASYNC
: /* Send signal on receive packets */
1231 d
->bd_async
= *(int *)addr
;
1235 error
= fsetown(*(int *)addr
, &d
->bd_sigio
);
1239 *(int *)addr
= fgetown(d
->bd_sigio
);
1242 /* This is deprecated, FIOSETOWN should be used instead. */
1244 error
= fsetown(-(*(int *)addr
), &d
->bd_sigio
);
1247 /* This is deprecated, FIOGETOWN should be used instead. */
1249 *(int *)addr
= -fgetown(d
->bd_sigio
);
1252 case BIOCSRSIG
: /* Set receive signal */
1256 sig
= *(u_int
*)addr
;
1265 *(u_int
*)addr
= d
->bd_sig
;
1269 lck_mtx_unlock(bpf_mlock
);
1275 * Set d's packet filter program to fp. If this file already has a filter,
1276 * free it and replace it. Returns EINVAL for bogus requests.
1279 bpf_setf(struct bpf_d
*d
, u_int bf_len
, user_addr_t bf_insns
)
1281 struct bpf_insn
*fcode
, *old
;
1285 if (bf_insns
== USER_ADDR_NULL
) {
1288 d
->bd_filter
= NULL
;
1291 FREE((caddr_t
)old
, M_DEVBUF
);
1295 if (flen
> BPF_MAXINSNS
)
1298 size
= flen
* sizeof(struct bpf_insn
);
1299 fcode
= (struct bpf_insn
*) _MALLOC(size
, M_DEVBUF
, M_WAIT
);
1304 if (copyin(bf_insns
, (caddr_t
)fcode
, size
) == 0 &&
1305 bpf_validate(fcode
, (int)flen
)) {
1306 d
->bd_filter
= fcode
;
1309 FREE((caddr_t
)old
, M_DEVBUF
);
1313 FREE((caddr_t
)fcode
, M_DEVBUF
);
1318 * Detach a file from its current interface (if attached at all) and attach
1319 * to the interface indicated by the name stored in ifr.
1320 * Return an errno or 0.
1323 bpf_setif(struct bpf_d
*d
, ifnet_t theywant
, u_int32_t dlt
)
1329 * Look through attached interfaces for the named one.
1331 for (bp
= bpf_iflist
; bp
!= 0; bp
= bp
->bif_next
) {
1332 struct ifnet
*ifp
= bp
->bif_ifp
;
1334 if (ifp
== 0 || ifp
!= theywant
|| (dlt
!= 0 && dlt
!= bp
->bif_dlt
))
1337 * We found the requested interface.
1338 * If it's not up, return an error.
1339 * Allocate the packet buffers if we need to.
1340 * If we're already attached to requested interface,
1341 * just flush the buffer.
1343 if ((ifp
->if_flags
& IFF_UP
) == 0)
1346 if (d
->bd_sbuf
== 0) {
1347 error
= bpf_allocbufs(d
);
1351 if (bp
!= d
->bd_bif
) {
1354 * Detach if attached to something else.
1358 if (bpf_attachd(d
, bp
) != 0) {
1372 * Get a list of available data link type of the interface.
1375 bpf_getdltlist(struct bpf_d
*d
, struct bpf_dltlist
*bfl
, struct proc
*p
)
1383 if (proc_is64bit(p
)) {
1384 dlist
= (user_addr_t
)bfl
->bfl_u
.bflu_pad
;
1386 dlist
= CAST_USER_ADDR_T(bfl
->bfl_u
.bflu_list
);
1389 ifp
= d
->bd_bif
->bif_ifp
;
1392 for (bp
= bpf_iflist
; bp
; bp
= bp
->bif_next
) {
1393 if (bp
->bif_ifp
!= ifp
)
1395 if (dlist
!= USER_ADDR_NULL
) {
1396 if (n
>= bfl
->bfl_len
) {
1399 error
= copyout(&bp
->bif_dlt
, dlist
,
1400 sizeof (bp
->bif_dlt
));
1401 dlist
+= sizeof (bp
->bif_dlt
);
1410 * Set the data link type of a BPF instance.
1413 bpf_setdlt(struct bpf_d
*d
, uint32_t dlt
)
1417 int error
, opromisc
;
1421 if (d
->bd_bif
->bif_dlt
== dlt
)
1423 ifp
= d
->bd_bif
->bif_ifp
;
1424 for (bp
= bpf_iflist
; bp
; bp
= bp
->bif_next
) {
1425 if (bp
->bif_ifp
== ifp
&& bp
->bif_dlt
== dlt
)
1429 opromisc
= d
->bd_promisc
;
1431 error
= bpf_attachd(d
, bp
);
1433 printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n",
1434 ifnet_name(bp
->bif_ifp
), ifnet_unit(bp
->bif_ifp
), error
);
1439 lck_mtx_unlock(bpf_mlock
);
1440 error
= ifnet_set_promiscuous(bp
->bif_ifp
, 1);
1441 lck_mtx_lock(bpf_mlock
);
1443 printf("bpf_setdlt: ifpromisc %s%d failed (%d)\n",
1444 ifnet_name(bp
->bif_ifp
), ifnet_unit(bp
->bif_ifp
), error
);
1449 return (bp
== NULL
? EINVAL
: 0);
1453 * Support for select()
1455 * Return true iff the specific operation will not block indefinitely.
1456 * Otherwise, return false but make a note that a selwakeup() must be done.
1459 bpfpoll(dev_t dev
, int events
, void * wql
, struct proc
*p
)
1464 lck_mtx_lock(bpf_mlock
);
1466 d
= bpf_dtab
[minor(dev
)];
1467 if (d
== 0 || d
== (void *)1) {
1468 lck_mtx_unlock(bpf_mlock
);
1473 * An imitation of the FIONREAD ioctl code.
1475 if (d
->bd_bif
== NULL
) {
1476 lck_mtx_unlock(bpf_mlock
);
1480 if (events
& (POLLIN
| POLLRDNORM
)) {
1481 if (d
->bd_hlen
!= 0 || (d
->bd_immediate
&& d
->bd_slen
!= 0))
1482 revents
|= events
& (POLLIN
| POLLRDNORM
);
1484 selrecord(p
, &d
->bd_sel
, wql
);
1487 lck_mtx_unlock(bpf_mlock
);
1492 * Support for kevent() system call. Register EVFILT_READ filters and
1493 * reject all others.
1495 int bpfkqfilter(dev_t dev
, struct knote
*kn
);
1496 static void filt_bpfdetach(struct knote
*);
1497 static int filt_bpfread(struct knote
*, long);
1499 static struct filterops bpfread_filtops
= {
1501 .f_detach
= filt_bpfdetach
,
1502 .f_event
= filt_bpfread
,
1506 bpfkqfilter(dev_t dev
, struct knote
*kn
)
1511 * Is this device a bpf?
1513 if (major(dev
) != CDEV_MAJOR
) {
1517 if (kn
->kn_filter
!= EVFILT_READ
) {
1521 lck_mtx_lock(bpf_mlock
);
1523 d
= bpf_dtab
[minor(dev
)];
1524 if (d
== 0 || d
== (void *)1) {
1525 lck_mtx_unlock(bpf_mlock
);
1530 * An imitation of the FIONREAD ioctl code.
1532 if (d
->bd_bif
== NULL
) {
1533 lck_mtx_unlock(bpf_mlock
);
1538 kn
->kn_fop
= &bpfread_filtops
;
1539 KNOTE_ATTACH(&d
->bd_sel
.si_note
, kn
);
1540 lck_mtx_unlock(bpf_mlock
);
1545 filt_bpfdetach(struct knote
*kn
)
1547 struct bpf_d
*d
= (struct bpf_d
*)kn
->kn_hook
;
1549 lck_mtx_lock(bpf_mlock
);
1550 KNOTE_DETACH(&d
->bd_sel
.si_note
, kn
);
1551 lck_mtx_unlock(bpf_mlock
);
1555 filt_bpfread(struct knote
*kn
, long hint
)
1557 struct bpf_d
*d
= (struct bpf_d
*)kn
->kn_hook
;
1561 lck_mtx_lock(bpf_mlock
);
1563 if (d
->bd_immediate
) {
1564 kn
->kn_data
= (d
->bd_hlen
== 0 ? d
->bd_slen
: d
->bd_hlen
);
1565 ready
= (kn
->kn_data
>= ((kn
->kn_sfflags
& NOTE_LOWAT
) ?
1568 kn
->kn_data
= d
->bd_hlen
;
1569 ready
= (kn
->kn_data
> 0);
1573 lck_mtx_unlock(bpf_mlock
);
1578 _cast_non_const(const void * ptr
) {
1589 * Copy data from an mbuf chain into a buffer. This code is derived
1590 * from m_copydata in sys/uipc_mbuf.c.
1593 bpf_mcopy(const void *src_arg
, void *dst_arg
, size_t len
)
1595 struct mbuf
*m
= _cast_non_const(src_arg
);
1603 count
= min(m
->m_len
, len
);
1604 bcopy(mbuf_data(m
), dst
, count
);
1623 * It's possible that we get here after the bpf descriptor has been
1624 * detached from the interface; in such a case we simply return.
1625 * Lock ordering is important since we can be called asynchronously
1626 * (from the IOKit) to process an inbound packet; when that happens
1627 * we would have been holding its "gateLock" and will be acquiring
1628 * "bpf_mlock" upon entering this routine. Due to that, we release
1629 * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will
1630 * acquire "gateLock" in the IOKit), in order to avoid a deadlock
1631 * when a ifnet_set_promiscuous request simultaneously collides with
1632 * an inbound packet being passed into the tap callback.
1634 lck_mtx_lock(bpf_mlock
);
1635 if (ifp
->if_bpf
== NULL
) {
1636 lck_mtx_unlock(bpf_mlock
);
1640 for (bp
= ifp
->if_bpf
; bp
&& bp
->bif_ifp
== ifp
&&
1641 (dlt
!= 0 && bp
->bif_dlt
!= dlt
); bp
= bp
->bif_next
)
1643 if (bp
&& bp
->bif_ifp
== ifp
&& bp
->bif_dlist
!= NULL
) {
1645 struct m_hdr hack_hdr
;
1652 * This is gross. We mock up an mbuf that points to the
1653 * header buffer. This means we don't have to copy the
1654 * header. A number of interfaces prepended headers just
1655 * for bpf by allocating an mbuf on the stack. We want to
1656 * give developers an easy way to prepend a header for bpf.
1657 * Since a developer allocating an mbuf on the stack is bad,
1658 * we do even worse here, allocating only a header to point
1659 * to a buffer the developer supplied. This makes assumptions
1660 * that bpf_filter and catchpacket will not look at anything
1661 * in the mbuf other than the header. This was true at the
1662 * time this code was written.
1664 hack_hdr
.mh_next
= m
;
1665 hack_hdr
.mh_nextpkt
= NULL
;
1666 hack_hdr
.mh_len
= hlen
;
1667 hack_hdr
.mh_data
= hdr
;
1668 hack_hdr
.mh_type
= m
->m_type
;
1669 hack_hdr
.mh_flags
= 0;
1671 m
= (mbuf_t
)&hack_hdr
;
1674 for (m0
= m
; m0
!= 0; m0
= m0
->m_next
)
1675 pktlen
+= m0
->m_len
;
1677 for (d
= bp
->bif_dlist
; d
; d
= d
->bd_next
) {
1678 if (outbound
&& !d
->bd_seesent
)
1681 slen
= bpf_filter(d
->bd_filter
, (u_char
*)m
, pktlen
, 0);
1684 if (mac_bpfdesc_check_receive(d
, bp
->bif_ifp
) != 0)
1687 catchpacket(d
, (u_char
*)m
, pktlen
, slen
, bpf_mcopy
);
1691 lck_mtx_unlock(bpf_mlock
);
1702 bpf_tap_imp(ifp
, dlt
, m
, hdr
, hlen
, 1);
1713 bpf_tap_imp(ifp
, dlt
, m
, hdr
, hlen
, 0);
1716 /* Callback registered with Ethernet driver. */
1717 static int bpf_tap_callback(struct ifnet
*ifp
, struct mbuf
*m
)
1719 bpf_tap_imp(ifp
, 0, m
, NULL
, 0, mbuf_pkthdr_rcvif(m
) == NULL
);
1725 * Move the packet data from interface memory (pkt) into the
1726 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1727 * otherwise 0. "copy" is the routine called to do the actual data
1728 * transfer. bcopy is passed in to copy contiguous chunks, while
1729 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1730 * pkt is really an mbuf.
1733 catchpacket(struct bpf_d
*d
, u_char
*pkt
, u_int pktlen
, u_int snaplen
,
1734 void (*cpfn
)(const void *, void *, size_t))
1738 int hdrlen
= d
->bd_bif
->bif_hdrlen
;
1740 * Figure out how many bytes to move. If the packet is
1741 * greater or equal to the snapshot length, transfer that
1742 * much. Otherwise, transfer the whole packet (unless
1743 * we hit the buffer size limit).
1745 totlen
= hdrlen
+ min(snaplen
, pktlen
);
1746 if (totlen
> d
->bd_bufsize
)
1747 totlen
= d
->bd_bufsize
;
1750 * Round up the end of the previous packet to the next longword.
1752 curlen
= BPF_WORDALIGN(d
->bd_slen
);
1753 if (curlen
+ totlen
> d
->bd_bufsize
) {
1755 * This packet will overflow the storage buffer.
1756 * Rotate the buffers if we can, then wakeup any
1759 if (d
->bd_fbuf
== 0) {
1761 * We haven't completed the previous read yet,
1762 * so drop the packet.
1771 else if (d
->bd_immediate
)
1773 * Immediate mode is set. A packet arrived so any
1774 * reads should be woken up.
1779 * Append the bpf header.
1781 hp
= (struct bpf_hdr
*)(d
->bd_sbuf
+ curlen
);
1784 hp
->bh_tstamp
.tv_sec
= tv
.tv_sec
;
1785 hp
->bh_tstamp
.tv_usec
= tv
.tv_usec
;
1786 hp
->bh_datalen
= pktlen
;
1787 hp
->bh_hdrlen
= hdrlen
;
1789 * Copy the packet data into the store buffer and update its length.
1791 (*cpfn
)(pkt
, (u_char
*)hp
+ hdrlen
, (hp
->bh_caplen
= totlen
- hdrlen
));
1792 d
->bd_slen
= curlen
+ totlen
;
1796 * Initialize all nonzero fields of a descriptor.
1799 bpf_allocbufs(struct bpf_d
*d
)
1801 d
->bd_fbuf
= (caddr_t
) _MALLOC(d
->bd_bufsize
, M_DEVBUF
, M_WAIT
);
1802 if (d
->bd_fbuf
== 0)
1805 d
->bd_sbuf
= (caddr_t
) _MALLOC(d
->bd_bufsize
, M_DEVBUF
, M_WAIT
);
1806 if (d
->bd_sbuf
== 0) {
1807 FREE(d
->bd_fbuf
, M_DEVBUF
);
1816 * Free buffers currently in use by a descriptor.
1820 bpf_freed(struct bpf_d
*d
)
1823 * We don't need to lock out interrupts since this descriptor has
1824 * been detached from its interface and it yet hasn't been marked
1827 if (d
->bd_sbuf
!= 0) {
1828 FREE(d
->bd_sbuf
, M_DEVBUF
);
1829 if (d
->bd_hbuf
!= 0)
1830 FREE(d
->bd_hbuf
, M_DEVBUF
);
1831 if (d
->bd_fbuf
!= 0)
1832 FREE(d
->bd_fbuf
, M_DEVBUF
);
1835 FREE((caddr_t
)d
->bd_filter
, M_DEVBUF
);
1839 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1840 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1841 * size of the link header (variable length headers not yet supported).
1844 bpfattach(struct ifnet
*ifp
, u_int dlt
, u_int hdrlen
)
1846 bpf_attach(ifp
, dlt
, hdrlen
, NULL
, NULL
);
1857 struct bpf_if
*bp_new
;
1858 struct bpf_if
*bp_temp
;
1859 struct bpf_if
*bp_first
= NULL
;
1861 bp_new
= (struct bpf_if
*) _MALLOC(sizeof(*bp_new
), M_DEVBUF
, M_WAIT
);
1865 lck_mtx_lock(bpf_mlock
);
1868 * Check if this interface/dlt is already attached, record first
1869 * attachment for this interface.
1871 for (bp_temp
= bpf_iflist
; bp_temp
&& (bp_temp
->bif_ifp
!= ifp
||
1872 bp_temp
->bif_dlt
!= dlt
); bp_temp
= bp_temp
->bif_next
) {
1873 if (bp_temp
->bif_ifp
== ifp
&& bp_first
== NULL
)
1877 if (bp_temp
!= NULL
) {
1878 printf("bpfattach - %s%d with dlt %d is already attached\n",
1879 ifp
->if_name
, ifp
->if_unit
, dlt
);
1880 FREE(bp_new
, M_DEVBUF
);
1881 lck_mtx_unlock(bpf_mlock
);
1885 bzero(bp_new
, sizeof(*bp_new
));
1886 bp_new
->bif_ifp
= ifp
;
1887 bp_new
->bif_dlt
= dlt
;
1888 bp_new
->bif_send
= send
;
1889 bp_new
->bif_tap
= tap
;
1891 if (bp_first
== NULL
) {
1892 /* No other entries for this ifp */
1893 bp_new
->bif_next
= bpf_iflist
;
1894 bpf_iflist
= bp_new
;
1897 /* Add this after the first entry for this interface */
1898 bp_new
->bif_next
= bp_first
->bif_next
;
1899 bp_first
->bif_next
= bp_new
;
1903 * Compute the length of the bpf header. This is not necessarily
1904 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1905 * that the network layer header begins on a longword boundary (for
1906 * performance reasons and to alleviate alignment restrictions).
1908 bp_new
->bif_hdrlen
= BPF_WORDALIGN(hdrlen
+ SIZEOF_BPF_HDR
) - hdrlen
;
1910 /* Take a reference on the interface */
1911 ifnet_reference(ifp
);
1913 lck_mtx_unlock(bpf_mlock
);
1917 printf("bpf: %s%d attached\n", ifp
->if_name
, ifp
->if_unit
);
1924 * Detach bpf from an interface. This involves detaching each descriptor
1925 * associated with the interface, and leaving bd_bif NULL. Notify each
1926 * descriptor as it's detached so that any sleepers wake up and get
1930 bpfdetach(struct ifnet
*ifp
)
1932 struct bpf_if
*bp
, *bp_prev
, *bp_next
;
1933 struct bpf_if
*bp_free
= NULL
;
1937 lck_mtx_lock(bpf_mlock
);
1939 /* Locate BPF interface information */
1941 for (bp
= bpf_iflist
; bp
!= NULL
; bp
= bp_next
) {
1942 bp_next
= bp
->bif_next
;
1943 if (ifp
!= bp
->bif_ifp
) {
1948 while ((d
= bp
->bif_dlist
) != NULL
) {
1954 bp_prev
->bif_next
= bp
->bif_next
;
1956 bpf_iflist
= bp
->bif_next
;
1959 bp
->bif_next
= bp_free
;
1965 lck_mtx_unlock(bpf_mlock
);
1972 bpf_init(__unused
void *unused
)
1978 if (bpf_devsw_installed
== 0) {
1979 bpf_devsw_installed
= 1;
1981 bpf_mlock_grp_attr
= lck_grp_attr_alloc_init();
1983 bpf_mlock_grp
= lck_grp_alloc_init("bpf", bpf_mlock_grp_attr
);
1985 bpf_mlock_attr
= lck_attr_alloc_init();
1987 bpf_mlock
= lck_mtx_alloc_init(bpf_mlock_grp
, bpf_mlock_attr
);
1989 if (bpf_mlock
== 0) {
1990 printf("bpf_init: failed to allocate bpf_mlock\n");
1991 bpf_devsw_installed
= 0;
1995 maj
= cdevsw_add(CDEV_MAJOR
, &bpf_cdevsw
);
1998 lck_mtx_free(bpf_mlock
, bpf_mlock_grp
);
2000 lck_attr_free(bpf_mlock_attr
);
2002 lck_grp_free(bpf_mlock_grp
);
2003 if (bpf_mlock_grp_attr
)
2004 lck_grp_attr_free(bpf_mlock_grp_attr
);
2007 bpf_mlock_attr
= NULL
;
2008 bpf_mlock_grp
= NULL
;
2009 bpf_mlock_grp_attr
= NULL
;
2010 bpf_devsw_installed
= 0;
2011 printf("bpf_init: failed to allocate a major number!\n");
2015 for (i
= 0 ; i
< NBPFILTER
; i
++)
2016 bpf_make_dev_t(maj
);
2019 cdevsw_add(&bpf_cdevsw
);
2024 SYSINIT(bpfdev
,SI_SUB_DRIVERS
,SI_ORDER_MIDDLE
+CDEV_MAJOR
,bpf_drvinit
,NULL
)
2029 mac_bpfdesc_label_get(struct bpf_d
*d
)
2032 return (d
->bd_label
);
2036 mac_bpfdesc_label_set(struct bpf_d
*d
, struct label
*label
)
2039 d
->bd_label
= label
;