X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..4d15aeb193b2c68f1d38666c317f8d3734f5f083:/bsd/net/bpf.c?ds=sidebyside diff --git a/bsd/net/bpf.c b/bsd/net/bpf.c index 9f99cabae..66e0c3b54 100644 --- a/bsd/net/bpf.c +++ b/bsd/net/bpf.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1990, 1991, 1993 @@ -63,6 +66,12 @@ * * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $ */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include "bpf.h" @@ -84,13 +93,14 @@ #include #include #include +#include +#include +#include -#if defined(sparc) && BSD < 199103 -#include -#endif #include #include +#include #include #include @@ -98,45 +108,65 @@ #include #include +#include +#include +#include +#include +#include +#include +#include #include #include #include - +#include #include #include +#include -#if NBPFILTER > 0 +#include +#include + +#if CONFIG_MACF_NET +#include +#endif /* MAC_NET */ + +extern int tvtohz(struct timeval *); -/* - * Older BSDs don't have kernel malloc. - */ -#if BSD < 199103 -extern bcopy(); -static caddr_t bpf_alloc(); -#include -#define BPF_BUFSIZE (MCLBYTES-8) -#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) -#else #define BPF_BUFSIZE 4096 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) -#endif + #define PRINET 26 /* interruptible */ /* * The default read buffer size is patchable. */ -static int bpf_bufsize = BPF_BUFSIZE; -SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, +static unsigned int bpf_bufsize = BPF_BUFSIZE; +SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED, &bpf_bufsize, 0, ""); -static int bpf_maxbufsize = BPF_MAXBUFSIZE; -SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, +__private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE; +SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW | CTLFLAG_LOCKED, &bpf_maxbufsize, 0, ""); +static unsigned int bpf_maxdevices = 256; +SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW | CTLFLAG_LOCKED, + &bpf_maxdevices, 0, ""); +/* + * bpf_wantpktap controls the defaul visibility of DLT_PKTAP + * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP + * explicitly to be able to use DLT_PKTAP. + */ +static unsigned int bpf_wantpktap = 0; +SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED, + &bpf_wantpktap, 0, ""); + +static int bpf_debug = 0; +SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, + &bpf_debug, 0, ""); /* * bpf_iflist is the list of interfaces; each corresponds to an ifnet - * bpf_dtab holds the descriptors, indexed by minor device # + * bpf_dtab holds pointer to the descriptors, indexed by minor device # */ static struct bpf_if *bpf_iflist; #ifdef __APPLE__ @@ -144,152 +174,200 @@ static struct bpf_if *bpf_iflist; * BSD now stores the bpf_d in the dev_t which is a struct * on their system. Our dev_t is an int, so we still store * the bpf_d in a separate table indexed by minor device #. + * + * The value stored in bpf_dtab[n] represent three states: + * 0: device not opened + * 1: device opening or closing + * other: device opened with pointer to storage */ -static struct bpf_d bpf_dtab[NBPFILTER]; -static int bpf_dtab_init; -static int nbpfilter = NBPFILTER; -#endif - -static int bpf_allocbufs __P((struct bpf_d *)); -static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); -static void bpf_detachd __P((struct bpf_d *d)); -static void bpf_freed __P((struct bpf_d *)); -static void bpf_mcopy __P((const void *, void *, size_t)); -static int bpf_movein __P((struct uio *, int, - struct mbuf **, struct sockaddr *, int *)); -static int bpf_setif __P((struct bpf_d *, struct ifreq *)); -static inline void - bpf_wakeup __P((struct bpf_d *)); -static void catchpacket __P((struct bpf_d *, u_char *, u_int, - u_int, void (*)(const void *, void *, size_t))); -static void reset_d __P((struct bpf_d *)); -static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); +static struct bpf_d **bpf_dtab = NULL; +static unsigned int bpf_dtab_size = 0; +static unsigned int nbpfilter = 0; + +decl_lck_mtx_data(static, bpf_mlock_data); +static lck_mtx_t *bpf_mlock = &bpf_mlock_data; +static lck_grp_t *bpf_mlock_grp; +static lck_grp_attr_t *bpf_mlock_grp_attr; +static lck_attr_t *bpf_mlock_attr; + +static mbuf_tag_id_t bpf_mtag_id; +#endif /* __APPLE__ */ + +static int bpf_allocbufs(struct bpf_d *); +static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp); +static int bpf_detachd(struct bpf_d *d, int); +static void bpf_freed(struct bpf_d *); +static void bpf_mcopy(const void *, void *, size_t); +static int bpf_movein(struct uio *, int, + struct mbuf **, struct sockaddr *, int *); +static int bpf_setif(struct bpf_d *, ifnet_t ifp, u_int32_t dlt); +static void bpf_timed_out(void *, void *); +static void bpf_wakeup(struct bpf_d *); +static void catchpacket(struct bpf_d *, u_char *, struct mbuf *, u_int, + u_int, int, void (*)(const void *, void *, size_t)); +static void reset_d(struct bpf_d *); +static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long); +static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *); +static int bpf_setdlt(struct bpf_d *, u_int); +static int bpf_set_traffic_class(struct bpf_d *, int); +static void bpf_set_packet_service_class(struct mbuf *, int); + +static void bpf_acquire_d(struct bpf_d *); +static void bpf_release_d(struct bpf_d *); + +static int bpf_devsw_installed; + +void bpf_init(void *unused); +static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m); /* * Darwin differs from BSD here, the following are static * on BSD and not static on Darwin. */ - d_open_t bpfopen; - d_close_t bpfclose; - d_read_t bpfread; - d_write_t bpfwrite; - d_ioctl_t bpfioctl; - select_fcn_t bpfpoll; + d_open_t bpfopen; + d_close_t bpfclose; + d_read_t bpfread; + d_write_t bpfwrite; + ioctl_fcn_t bpfioctl; + select_fcn_t bpfselect; -#ifdef __APPLE__ -void bpf_mtap(struct ifnet *, struct mbuf *); - -int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(), - bpfpoll(); -#endif /* Darwin's cdevsw struct differs slightly from BSDs */ #define CDEV_MAJOR 23 static struct cdevsw bpf_cdevsw = { - /* open */ bpfopen, - /* close */ bpfclose, - /* read */ bpfread, - /* write */ bpfwrite, - /* ioctl */ bpfioctl, - /* stop */ nulldev, - /* reset */ nulldev, - /* tty */ NULL, - /* select */ bpfpoll, - /* mmap */ eno_mmap, - /* strategy*/ eno_strat, - /* getc */ eno_getc, - /* putc */ eno_putc, - /* type */ 0 + /* open */ bpfopen, + /* close */ bpfclose, + /* read */ bpfread, + /* write */ bpfwrite, + /* ioctl */ bpfioctl, + /* stop */ eno_stop, + /* reset */ eno_reset, + /* tty */ NULL, + /* select */ bpfselect, + /* mmap */ eno_mmap, + /* strategy*/ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* type */ 0 }; +#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) static int -bpf_movein(uio, linktype, mp, sockp, datlen) - register struct uio *uio; - int linktype, *datlen; - register struct mbuf **mp; - register struct sockaddr *sockp; +bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, struct sockaddr *sockp, int *datlen) { struct mbuf *m; int error; int len; + uint8_t sa_family; int hlen; - /* - * Build a sockaddr based on the data link layer type. - * We do this at this level because the ethernet header - * is copied directly into the data field of the sockaddr. - * In the case of SLIP, there is no header and the packet - * is forwarded as is. - * Also, we are careful to leave room at the front of the mbuf - * for the link level header. - */ switch (linktype) { - + +#if SLIP case DLT_SLIP: - sockp->sa_family = AF_INET; + sa_family = AF_INET; hlen = 0; break; - +#endif /* SLIP */ + case DLT_EN10MB: - sockp->sa_family = AF_UNSPEC; + sa_family = AF_UNSPEC; /* XXX Would MAXLINKHDR be better? */ hlen = sizeof(struct ether_header); break; - + +#if FDDI case DLT_FDDI: -#if defined(__FreeBSD__) || defined(__bsdi__) - sockp->sa_family = AF_IMPLINK; + #if defined(__FreeBSD__) || defined(__bsdi__) + sa_family = AF_IMPLINK; hlen = 0; -#else - sockp->sa_family = AF_UNSPEC; + #else + sa_family = AF_UNSPEC; /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ hlen = 24; -#endif + #endif break; - +#endif /* FDDI */ + case DLT_RAW: case DLT_NULL: - sockp->sa_family = AF_UNSPEC; + sa_family = AF_UNSPEC; hlen = 0; break; - -#ifdef __FreeBSD__ + + #ifdef __FreeBSD__ case DLT_ATM_RFC1483: /* * en atm driver requires 4-byte atm pseudo header. * though it isn't standard, vpi:vci needs to be * specified anyway. */ - sockp->sa_family = AF_UNSPEC; + sa_family = AF_UNSPEC; hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ break; -#endif + #endif + case DLT_PPP: - sockp->sa_family = AF_UNSPEC; + sa_family = AF_UNSPEC; hlen = 4; /* This should match PPP_HDRLEN */ break; + + case DLT_APPLE_IP_OVER_IEEE1394: + sa_family = AF_UNSPEC; + hlen = sizeof(struct firewire_header); + break; + + case DLT_IEEE802_11: /* IEEE 802.11 wireless */ + sa_family = AF_IEEE80211; + hlen = 0; + break; + + case DLT_IEEE802_11_RADIO: + sa_family = AF_IEEE80211; + hlen = 0; + break; default: return (EIO); } - len = uio->uio_resid; + // LP64todo - fix this! + len = uio_resid(uio); *datlen = len - hlen; if ((unsigned)len > MCLBYTES) return (EIO); + if (sockp) { + /* + * Build a sockaddr based on the data link layer type. + * We do this at this level because the ethernet header + * is copied directly into the data field of the sockaddr. + * In the case of SLIP, there is no header and the packet + * is forwarded as is. + * Also, we are careful to leave room at the front of the mbuf + * for the link level header. + */ + if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) { + return (EIO); + } + sockp->sa_family = sa_family; + } else { + /* + * We're directly sending the packet data supplied by + * the user; we don't need to make room for the link + * header, and don't need the header length value any + * more, so set it to 0. + */ + hlen = 0; + } + MGETHDR(m, M_WAIT, MT_DATA); if (m == 0) return (ENOBUFS); - if (len > MHLEN) { -#if BSD >= 199103 + if ((unsigned)len > MHLEN) { MCLGET(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { -#else - MCLGET(m); - if (m->m_len != MCLBYTES) { -#endif error = ENOBUFS; goto bad; } @@ -297,60 +375,113 @@ bpf_movein(uio, linktype, mp, sockp, datlen) m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = NULL; *mp = m; + /* * Make room for link header. */ if (hlen != 0) { m->m_pkthdr.len -= hlen; m->m_len -= hlen; -#if BSD >= 199103 m->m_data += hlen; /* XXX */ -#else - m->m_off += hlen; -#endif error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); if (error) goto bad; } error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); - if (!error) - return (0); + if (error) + goto bad; + + /* Check for multicast destination */ + switch (linktype) { + case DLT_EN10MB: { + struct ether_header *eh = mtod(m, struct ether_header *); + + if (ETHER_IS_MULTICAST(eh->ether_dhost)) { + if (_ether_cmp(etherbroadcastaddr, eh->ether_dhost) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + break; + } + } + + return 0; bad: m_freem(m); return (error); } #ifdef __APPLE__ -/* Callback registered with Ethernet driver. */ -int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) + +/* + * The dynamic addition of a new device node must block all processes that + * are opening the last device so that no process will get an unexpected + * ENOENT + */ +static void +bpf_make_dev_t(int maj) { - boolean_t funnel_state; - - funnel_state = thread_funnel_set(network_flock, TRUE); - - /* - * Do nothing if the BPF tap has been turned off. - * This is to protect from a potential race where this - * call blocks on the funnel lock. And in the meantime - * BPF is turned off, which will clear if_bpf. - */ - if (ifp->if_bpf) - bpf_mtap(ifp, m); - - thread_funnel_set(network_flock, funnel_state); - return 0; + static int bpf_growing = 0; + unsigned int cur_size = nbpfilter, i; + + if (nbpfilter >= bpf_maxdevices) + return; + + while (bpf_growing) { + /* Wait until new device has been created */ + (void)tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0); + } + if (nbpfilter > cur_size) { + /* other thread grew it already */ + return; + } + bpf_growing = 1; + + /* need to grow bpf_dtab first */ + if (nbpfilter == bpf_dtab_size) { + int new_dtab_size; + struct bpf_d **new_dtab = NULL; + struct bpf_d **old_dtab = NULL; + + new_dtab_size = bpf_dtab_size + NBPFILTER; + new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT); + if (new_dtab == 0) { + printf("bpf_make_dev_t: malloc bpf_dtab failed\n"); + goto done; + } + if (bpf_dtab) { + bcopy(bpf_dtab, new_dtab, + sizeof(struct bpf_d *) * bpf_dtab_size); + } + bzero(new_dtab + bpf_dtab_size, + sizeof(struct bpf_d *) * NBPFILTER); + old_dtab = bpf_dtab; + bpf_dtab = new_dtab; + bpf_dtab_size = new_dtab_size; + if (old_dtab != NULL) + _FREE(old_dtab, M_DEVBUF); + } + i = nbpfilter++; + (void) devfs_make_node(makedev(maj, i), + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, + "bpf%d", i); +done: + bpf_growing = 0; + wakeup((caddr_t)&bpf_growing); } + #endif /* * Attach file to the bpf interface, i.e. make d listen on bp. - * Must be called at splimp. */ -static void -bpf_attachd(d, bp) - struct bpf_d *d; - struct bpf_if *bp; +static errno_t +bpf_attachd(struct bpf_d *d, struct bpf_if *bp) { + int first = bp->bif_dlist == NULL; + int error = 0; + /* * Point d at bp, and add d to the interface's list of listeners. * Finally, point the driver's bpf cookie at the interface so @@ -360,127 +491,327 @@ bpf_attachd(d, bp) d->bd_next = bp->bif_dlist; bp->bif_dlist = d; - bp->bif_ifp->if_bpf = bp; + /* + * Take a reference on the device even if an error is returned + * because we keep the device in the interface's list of listeners + */ + bpf_acquire_d(d); + + if (first) { + /* Find the default bpf entry for this ifp */ + if (bp->bif_ifp->if_bpf == NULL) { + struct bpf_if *tmp, *primary = NULL; + + for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) { + if (tmp->bif_ifp != bp->bif_ifp) + continue; + primary = tmp; + /* + * Make DLT_PKTAP only if process knows how + * to deal with it, otherwise find another one + */ + if (tmp->bif_dlt == DLT_PKTAP && + !(d->bd_flags & BPF_WANT_PKTAP)) + continue; + break; + } + bp->bif_ifp->if_bpf = primary; + } + + /* Only call dlil_set_bpf_tap for primary dlt */ + if (bp->bif_ifp->if_bpf == bp) + dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); + + if (bp->bif_tap) + error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, BPF_TAP_INPUT_OUTPUT); + } + + /* + * Reset the detach flags in case we previously detached an interface + */ + d->bd_flags &= ~(BPF_DETACHING | BPF_DETACHED); -#ifdef __APPLE__ - if (bp->bif_ifp->if_set_bpf_tap) - (*bp->bif_ifp->if_set_bpf_tap)(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); -#endif + if (bp->bif_ifp->if_bpf != NULL && + bp->bif_ifp->if_bpf->bif_dlt == DLT_PKTAP) + d->bd_flags |= BPF_FINALIZE_PKTAP; + else + d->bd_flags &= ~BPF_FINALIZE_PKTAP; + + return error; } /* * Detach a file from its interface. + * + * Return 1 if was closed by some thread, 0 otherwise */ -static void -bpf_detachd(d) - struct bpf_d *d; +static int +bpf_detachd(struct bpf_d *d, int closing) { struct bpf_d **p; struct bpf_if *bp; -#ifdef __APPLE__ struct ifnet *ifp; + /* + * Some other thread already detached + */ + if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0) + goto done; + /* + * This thread is doing the detach + */ + d->bd_flags |= BPF_DETACHING; + ifp = d->bd_bif->bif_ifp; + bp = d->bd_bif; -#endif + if (bpf_debug != 0) + printf("%s: %llx %s%s\n", + __func__, (uint64_t)VM_KERNEL_ADDRPERM(d), + if_name(ifp), closing ? " closing" : ""); - bp = d->bd_bif; + /* Remove d from the interface's descriptor list. */ + p = &bp->bif_dlist; + while (*p != d) { + p = &(*p)->bd_next; + if (*p == 0) + panic("bpf_detachd: descriptor not in list"); + } + *p = (*p)->bd_next; + if (bp->bif_dlist == 0) { + /* + * Let the driver know that there are no more listeners. + */ + /* Only call dlil_set_bpf_tap for primary dlt */ + if (bp->bif_ifp->if_bpf == bp) + dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL); + if (bp->bif_tap) + bp->bif_tap(ifp, bp->bif_dlt, BPF_TAP_DISABLE); + + for (bp = bpf_iflist; bp; bp = bp->bif_next) + if (bp->bif_ifp == ifp && bp->bif_dlist != 0) + break; + if (bp == NULL) + ifp->if_bpf = NULL; + } + d->bd_bif = NULL; /* * Check if this descriptor had requested promiscuous mode. * If so, turn it off. */ if (d->bd_promisc) { d->bd_promisc = 0; - if (ifpromisc(bp->bif_ifp, 0)) + lck_mtx_unlock(bpf_mlock); + if (ifnet_set_promiscuous(ifp, 0)) { /* * Something is really wrong if we were able to put * the driver into promiscuous mode, but can't * take it out. * Most likely the network interface is gone. */ - printf("bpf: ifpromisc failed"); - } - /* Remove d from the interface's descriptor list. */ - p = &bp->bif_dlist; - while (*p != d) { - p = &(*p)->bd_next; - if (*p == 0) - panic("bpf_detachd: descriptor not in list"); + printf("%s: ifnet_set_promiscuous failed\n", __func__); + } + lck_mtx_lock(bpf_mlock); } - *p = (*p)->bd_next; - if (bp->bif_dlist == 0) { + + /* + * Wake up other thread that are waiting for this thread to finish + * detaching + */ + d->bd_flags &= ~BPF_DETACHING; + d->bd_flags |= BPF_DETACHED; + /* + * Note that We've kept the reference because we may have dropped + * the lock when turning off promiscuous mode + */ + bpf_release_d(d); + +done: + /* + * When closing makes sure no other thread refer to the bpf_d + */ + if (bpf_debug != 0) + printf("%s: %llx done\n", + __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); + /* + * Let the caller know the bpf_d is closed + */ + if ((d->bd_flags & BPF_CLOSING)) + return (1); + else + return (0); +} + + +/* + * Start asynchronous timer, if necessary. + * Must be called with bpf_mlock held. + */ +static void +bpf_start_timer(struct bpf_d *d) +{ + uint64_t deadline; + struct timeval tv; + + if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { + tv.tv_sec = d->bd_rtout / hz; + tv.tv_usec = (d->bd_rtout % hz) * tick; + + clock_interval_to_deadline( + (uint64_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec, + NSEC_PER_USEC, &deadline); /* - * Let the driver know that there are no more listeners. + * The state is BPF_IDLE, so the timer hasn't + * been started yet, and hasn't gone off yet; + * there is no thread call scheduled, so this + * won't change the schedule. + * + * XXX - what if, by the time it gets entered, + * the deadline has already passed? */ - if (ifp->if_set_bpf_tap) - (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); - d->bd_bif->bif_ifp->if_bpf = 0; + thread_call_enter_delayed(d->bd_thread_call, deadline); + d->bd_state = BPF_WAITING; } - d->bd_bif = 0; } - -#ifdef __APPLE__ /* - * Mark a descriptor free by making it point to itself. - * This is probably cheaper than marking with a constant since - * the address should be in a register anyway. + * Cancel asynchronous timer. + * Must be called with bpf_mlock held. */ -#define D_ISFREE(d) ((d) == (d)->bd_next) -#define D_MARKFREE(d) ((d)->bd_next = (d)) -#define D_MARKUSED(d) ((d)->bd_next = 0) -#endif +static boolean_t +bpf_stop_timer(struct bpf_d *d) +{ + /* + * If the timer has already gone off, this does nothing. + * Our caller is expected to set d->bd_state to BPF_IDLE, + * with the bpf_mlock, after we are called. bpf_timed_out() + * also grabs bpf_mlock, so, if the timer has gone off and + * bpf_timed_out() hasn't finished, it's waiting for the + * lock; when this thread releases the lock, it will + * find the state is BPF_IDLE, and just release the + * lock and return. + */ + return (thread_call_cancel(d->bd_thread_call)); +} + +void +bpf_acquire_d(struct bpf_d *d) +{ + void *lr_saved = __builtin_return_address(0); + + lck_mtx_assert(bpf_mlock, LCK_MTX_ASSERT_OWNED); + + d->bd_refcnt += 1; + + d->bd_ref_lr[d->bd_next_ref_lr] = lr_saved; + d->bd_next_ref_lr = (d->bd_next_ref_lr + 1) % BPF_REF_HIST; +} + +void +bpf_release_d(struct bpf_d *d) +{ + void *lr_saved = __builtin_return_address(0); + + lck_mtx_assert(bpf_mlock, LCK_MTX_ASSERT_OWNED); + + if (d->bd_refcnt <= 0) + panic("%s: %p refcnt <= 0", __func__, d); + + d->bd_refcnt -= 1; + + d->bd_unref_lr[d->bd_next_unref_lr] = lr_saved; + d->bd_next_unref_lr = (d->bd_next_unref_lr + 1) % BPF_REF_HIST; + + if (d->bd_refcnt == 0) { + /* Assert the device is detached */ + if ((d->bd_flags & BPF_DETACHED) == 0) + panic("%s: %p BPF_DETACHED not set", __func__, d); + + _FREE(d, M_DEVBUF); + } +} + /* * Open ethernet device. Returns ENXIO for illegal minor device number, * EBUSY if file is open by another process. */ /* ARGSUSED */ - int -bpfopen(dev, flags, fmt, p) - dev_t dev; - int flags; - int fmt; - struct proc *p; +int +bpfopen(dev_t dev, int flags, __unused int fmt, + __unused struct proc *p) { - register struct bpf_d *d; + struct bpf_d *d; -#ifdef __APPLE__ - if (minor(dev) >= nbpfilter) + lck_mtx_lock(bpf_mlock); + if ((unsigned int) minor(dev) >= nbpfilter) { + lck_mtx_unlock(bpf_mlock); return (ENXIO); + } + /* + * New device nodes are created on demand when opening the last one. + * The programming model is for processes to loop on the minor starting at 0 + * as long as EBUSY is returned. The loop stops when either the open succeeds or + * an error other that EBUSY is returned. That means that bpf_make_dev_t() must + * block all processes that are opening the last node. If not all + * processes are blocked, they could unexpectedly get ENOENT and abort their + * opening loop. + */ + if ((unsigned int) minor(dev) == (nbpfilter - 1)) + bpf_make_dev_t(major(dev)); - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - - d = &bpf_dtab[minor(dev)]; -#else - if (p->p_prison) - return (EPERM); - - d = dev->si_drv1; -#endif /* * Each minor can be opened by only one process. If the requested * minor is in use, return EBUSY. + * + * Important: bpfopen() and bpfclose() have to check and set the status of a device + * in the same lockin context otherwise the device may be leaked because the vnode use count + * will be unpextectly greater than 1 when close() is called. */ -#ifdef __APPLE__ - if (!D_ISFREE(d)) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (EBUSY); - } - - /* Mark "free" and do most initialization. */ - bzero((char *)d, sizeof(*d)); -#else - if (d) + if (bpf_dtab[minor(dev)] == 0) { + bpf_dtab[minor(dev)] = (void *)1; /* Mark opening */ + } else { + lck_mtx_unlock(bpf_mlock); return (EBUSY); - make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev)); - MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK); - bzero(d, sizeof(*d)); - dev->si_drv1 = d; -#endif + } + d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, + M_WAIT | M_ZERO); + if (d == NULL) { + /* this really is a catastrophic failure */ + printf("bpfopen: malloc bpf_d failed\n"); + bpf_dtab[minor(dev)] = NULL; + lck_mtx_unlock(bpf_mlock); + return ENOMEM; + } + + /* Mark "in use" and do most initialization. */ + bpf_acquire_d(d); d->bd_bufsize = bpf_bufsize; d->bd_sig = SIGIO; d->bd_seesent = 1; - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + d->bd_oflags = flags; + d->bd_state = BPF_IDLE; + d->bd_traffic_class = SO_TC_BE; + d->bd_flags |= BPF_DETACHED; + if (bpf_wantpktap) + d->bd_flags |= BPF_WANT_PKTAP; + else + d->bd_flags &= ~BPF_WANT_PKTAP; + d->bd_thread_call = thread_call_allocate(bpf_timed_out, d); + if (d->bd_thread_call == NULL) { + printf("bpfopen: malloc thread call failed\n"); + bpf_dtab[minor(dev)] = NULL; + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + + return (ENOMEM); + } +#if CONFIG_MACF_NET + mac_bpfdesc_label_init(d); + mac_bpfdesc_label_associate(kauth_cred_get(), d); +#endif + bpf_dtab[minor(dev)] = d; /* Mark opened */ + lck_mtx_unlock(bpf_mlock); + return (0); } @@ -489,124 +820,198 @@ bpfopen(dev, flags, fmt, p) * deallocating its buffers, and marking it free. */ /* ARGSUSED */ - int -bpfclose(dev, flags, fmt, p) - dev_t dev; - int flags; - int fmt; - struct proc *p; +int +bpfclose(dev_t dev, __unused int flags, __unused int fmt, + __unused struct proc *p) { - register struct bpf_d *d; - register int s; + struct bpf_d *d; - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + /* Take BPF lock to ensure no other thread is using the device */ + lck_mtx_lock(bpf_mlock); -#ifndef __APPLE__ - funsetown(d->bd_sigio); -#endif - s = splimp(); -#ifdef __APPLE__ - d = &bpf_dtab[minor(dev)]; -#endif - if (d->bd_bif) - bpf_detachd(d); - splx(s); -#ifdef __APPLE__ - selthreadclear(&d->bd_sel); -#endif - bpf_freed(d); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (0); -} + d = bpf_dtab[minor(dev)]; + if (d == 0 || d == (void *)1) { + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } -/* - * Support for SunOS, which does not have tsleep. - */ -#if BSD < 199103 -static -bpf_timeout(arg) - caddr_t arg; -{ - boolean_t funnel_state; - struct bpf_d *d = (struct bpf_d *)arg; - funnel_state = thread_funnel_set(network_flock, TRUE); - d->bd_timedout = 1; - wakeup(arg); - (void) thread_funnel_set(network_flock, FALSE); -} + /* + * Other threads may call bpd_detachd() if we drop the bpf_mlock + */ + d->bd_flags |= BPF_CLOSING; -#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) + if (bpf_debug != 0) + printf("%s: %llx\n", + __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); -int -bpf_sleep(d) - register struct bpf_d *d; -{ - register int rto = d->bd_rtout; - register int st; + bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */ - if (rto != 0) { - d->bd_timedout = 0; - timeout(bpf_timeout, (caddr_t)d, rto); - } - st = sleep((caddr_t)d, PRINET|PCATCH); - if (rto != 0) { - if (d->bd_timedout == 0) - untimeout(bpf_timeout, (caddr_t)d); - else if (st == 0) - return EWOULDBLOCK; - } - return (st != 0) ? EINTR : 0; -} -#else -#define BPF_SLEEP tsleep -#endif + /* + * Deal with any in-progress timeouts. + */ + switch (d->bd_state) { + case BPF_IDLE: + /* + * Not waiting for a timeout, and no timeout happened. + */ + break; -/* - * Rotate the packet buffers in descriptor d. Move the store buffer - * into the hold slot, and the free buffer into the store slot. - * Zero the length of the new store buffer. - */ -#define ROTATE_BUFFERS(d) \ - (d)->bd_hbuf = (d)->bd_sbuf; \ - (d)->bd_hlen = (d)->bd_slen; \ + case BPF_WAITING: + /* + * Waiting for a timeout. + * Cancel any timer that has yet to go off, + * and mark the state as "closing". + * Then drop the lock to allow any timers that + * *have* gone off to run to completion, and wait + * for them to finish. + */ + if (!bpf_stop_timer(d)) { + /* + * There was no pending call, so the call must + * have been in progress. Wait for the call to + * complete; we have to drop the lock while + * waiting. to let the in-progrss call complete + */ + d->bd_state = BPF_DRAINING; + while (d->bd_state == BPF_DRAINING) + msleep((caddr_t)d, bpf_mlock, PRINET, + "bpfdraining", NULL); + } + d->bd_state = BPF_IDLE; + break; + + case BPF_TIMED_OUT: + /* + * Timer went off, and the timeout routine finished. + */ + d->bd_state = BPF_IDLE; + break; + + case BPF_DRAINING: + /* + * Another thread is blocked on a close waiting for + * a timeout to finish. + * This "shouldn't happen", as the first thread to enter + * bpfclose() will set bpf_dtab[minor(dev)] to 1, and + * all subsequent threads should see that and fail with + * ENXIO. + */ + panic("Two threads blocked in a BPF close"); + break; + } + + if (d->bd_bif) + bpf_detachd(d, 1); + selthreadclear(&d->bd_sel); +#if CONFIG_MACF_NET + mac_bpfdesc_label_destroy(d); +#endif + thread_call_free(d->bd_thread_call); + + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + bpf_freed(d); + + /* Mark free in same context as bpfopen comes to check */ + bpf_dtab[minor(dev)] = NULL; /* Mark closed */ + + bpf_release_d(d); + + lck_mtx_unlock(bpf_mlock); + + return (0); +} + + +#define BPF_SLEEP bpf_sleep + +static int +bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo) +{ + u_int64_t abstime = 0; + + if(timo) + clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + + return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime); +} + +/* + * Rotate the packet buffers in descriptor d. Move the store buffer + * into the hold slot, and the free buffer into the store slot. + * Zero the length of the new store buffer. + */ +#define ROTATE_BUFFERS(d) \ + if (d->bd_hbuf_read) \ + panic("rotating bpf buffers during read"); \ + (d)->bd_hbuf = (d)->bd_sbuf; \ + (d)->bd_hlen = (d)->bd_slen; \ + (d)->bd_hcnt = (d)->bd_scnt; \ (d)->bd_sbuf = (d)->bd_fbuf; \ (d)->bd_slen = 0; \ - (d)->bd_fbuf = 0; + (d)->bd_scnt = 0; \ + (d)->bd_fbuf = NULL; /* * bpfread - read next chunk of packets from buffers */ - int -bpfread(dev, uio, ioflag) - dev_t dev; - struct uio *uio; - int ioflag; +int +bpfread(dev_t dev, struct uio *uio, int ioflag) { - register struct bpf_d *d; + struct bpf_d *d; + caddr_t hbuf; + int timed_out, hbuf_len; int error; - int s; + int flags; - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; + lck_mtx_lock(bpf_mlock); + + d = bpf_dtab[minor(dev)]; + if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) { + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + + bpf_acquire_d(d); /* * Restrict application to use a buffer the same size as * as kernel buffers. */ - if (uio->uio_resid != d->bd_bufsize) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (uio_resid(uio) != d->bd_bufsize) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); return (EINVAL); } + + if (d->bd_state == BPF_WAITING) + bpf_stop_timer(d); + + timed_out = (d->bd_state == BPF_TIMED_OUT); + d->bd_state = BPF_IDLE; - s = splimp(); + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + if ((d->bd_flags & BPF_CLOSING) != 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } /* * If the hold buffer is empty, then do a timed sleep, which * ends when the timeout expires or when enough packets * have arrived to fill the store buffer. */ while (d->bd_hbuf == 0) { - if (d->bd_immediate && d->bd_slen != 0) { + if ((d->bd_immediate || timed_out || (ioflag & IO_NDELAY)) + && d->bd_slen != 0) { /* - * A packet(s) either arrived since the previous + * We're in immediate mode, or are reading + * in non-blocking mode, or a timer was + * started before the read (e.g., by select() + * or poll()) and has expired and a packet(s) + * either arrived since the previous * read or arrived while we were asleep. * Rotate the buffers and return what's here. */ @@ -621,19 +1026,57 @@ bpfread(dev, uio, ioflag) * it before using it again. */ if (d->bd_bif == NULL) { - splx(s); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); return (ENXIO); } - - if (ioflag & IO_NDELAY) - error = EWOULDBLOCK; - else - error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", - d->bd_rtout); + if (ioflag & IO_NDELAY) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (EWOULDBLOCK); + } + error = BPF_SLEEP(d, PRINET|PCATCH, "bpf", + d->bd_rtout); + /* + * Make sure device is still opened + */ + if ((d->bd_flags & BPF_CLOSING) != 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + if ((d->bd_flags & BPF_CLOSING) != 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + if (error == EINTR || error == ERESTART) { - splx(s); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (d->bd_hbuf) { + /* + * Because we msleep, the hold buffer might + * be filled when we wake up. Avoid rotating + * in this case. + */ + break; + } + if (d->bd_slen) { + /* + * Sometimes we may be interrupted often and + * the sleep above will not timeout. + * Regardless, we should rotate the buffers + * if there's any new data pending and + * return it. + */ + ROTATE_BUFFERS(d); + break; + } + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); return (error); } if (error == EWOULDBLOCK) { @@ -651,8 +1094,8 @@ bpfread(dev, uio, ioflag) break; if (d->bd_slen == 0) { - splx(s); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); return (0); } ROTATE_BUFFERS(d); @@ -662,121 +1105,310 @@ bpfread(dev, uio, ioflag) /* * At this point, we know we have something in the hold slot. */ - splx(s); + + /* + * Set the hold buffer read. So we do not + * rotate the buffers until the hold buffer + * read is complete. Also to avoid issues resulting + * from page faults during disk sleep (). + */ + d->bd_hbuf_read = 1; + hbuf = d->bd_hbuf; + hbuf_len = d->bd_hlen; + flags = d->bd_flags; + lck_mtx_unlock(bpf_mlock); + +#ifdef __APPLE__ + /* + * Before we move data to userland, we fill out the extended + * header fields. + */ + if (flags & BPF_EXTENDED_HDR) { + char *p; + + p = hbuf; + while (p < hbuf + hbuf_len) { + struct bpf_hdr_ext *ehp; + uint32_t flowid; + struct so_procinfo soprocinfo; + int found = 0; + + ehp = (struct bpf_hdr_ext *)(void *)p; + if ((flowid = ehp->bh_flowid)) { + if (ehp->bh_proto == IPPROTO_TCP) + found = inp_findinpcb_procinfo(&tcbinfo, + flowid, &soprocinfo); + else if (ehp->bh_proto == IPPROTO_UDP) + found = inp_findinpcb_procinfo(&udbinfo, + flowid, &soprocinfo); + if (found == 1) { + ehp->bh_pid = soprocinfo.spi_pid; + proc_name(ehp->bh_pid, ehp->bh_comm, MAXCOMLEN); + } + ehp->bh_flowid = 0; + } + if (flags & BPF_FINALIZE_PKTAP) { + struct pktap_header *pktaphdr; + + pktaphdr = (struct pktap_header *)(void *) + (p + BPF_WORDALIGN(ehp->bh_hdrlen)); + + if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) + pktap_finalize_proc_info(pktaphdr); + + if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) { + ehp->bh_tstamp.tv_sec = + pktaphdr->pth_tstamp.tv_sec; + ehp->bh_tstamp.tv_usec = + pktaphdr->pth_tstamp.tv_usec; + } + } + p += BPF_WORDALIGN(ehp->bh_hdrlen + ehp->bh_caplen); + } + } else if (flags & BPF_FINALIZE_PKTAP) { + char *p; + + p = hbuf; + while (p < hbuf + hbuf_len) { + struct bpf_hdr *hp; + struct pktap_header *pktaphdr; + + hp = (struct bpf_hdr *)(void *)p; + pktaphdr = (struct pktap_header *)(void *) + (p + BPF_WORDALIGN(hp->bh_hdrlen)); + + if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) + pktap_finalize_proc_info(pktaphdr); + + if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) { + hp->bh_tstamp.tv_sec = + pktaphdr->pth_tstamp.tv_sec; + hp->bh_tstamp.tv_usec = + pktaphdr->pth_tstamp.tv_usec; + } + + p += BPF_WORDALIGN(hp->bh_hdrlen + hp->bh_caplen); + } + } +#endif /* * Move data from hold buffer into user space. * We know the entire buffer is transferred since * we checked above that the read buffer is bpf_bufsize bytes. */ - error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); - - s = splimp(); + error = UIOMOVE(hbuf, hbuf_len, UIO_READ, uio); + + lck_mtx_lock(bpf_mlock); + /* + * Make sure device is still opened + */ + if ((d->bd_flags & BPF_CLOSING) != 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + + d->bd_hbuf_read = 0; d->bd_fbuf = d->bd_hbuf; - d->bd_hbuf = 0; + d->bd_hbuf = NULL; d->bd_hlen = 0; - splx(s); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + d->bd_hcnt = 0; + wakeup((caddr_t)d); + + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); return (error); + } /* * If there are processes sleeping on this descriptor, wake them up. */ -static inline void -bpf_wakeup(d) - register struct bpf_d *d; +static void +bpf_wakeup(struct bpf_d *d) { + if (d->bd_state == BPF_WAITING) { + bpf_stop_timer(d); + d->bd_state = BPF_IDLE; + } wakeup((caddr_t)d); if (d->bd_async && d->bd_sig && d->bd_sigio) - pgsigio(d->bd_sigio, d->bd_sig, 0); + pgsigio(d->bd_sigio, d->bd_sig); -#if BSD >= 199103 selwakeup(&d->bd_sel); -#ifndef __APPLE__ - /* XXX */ - d->bd_sel.si_pid = 0; -#endif -#else - if (d->bd_selproc) { - selwakeup(d->bd_selproc, (int)d->bd_selcoll); - d->bd_selcoll = 0; - d->bd_selproc = 0; + if ((d->bd_flags & BPF_KNOTE)) + KNOTE(&d->bd_sel.si_note, 1); +} + + +static void +bpf_timed_out(void *arg, __unused void *dummy) +{ + struct bpf_d *d = (struct bpf_d *)arg; + + lck_mtx_lock(bpf_mlock); + if (d->bd_state == BPF_WAITING) { + /* + * There's a select or kqueue waiting for this; if there's + * now stuff to read, wake it up. + */ + d->bd_state = BPF_TIMED_OUT; + if (d->bd_slen != 0) + bpf_wakeup(d); + } else if (d->bd_state == BPF_DRAINING) { + /* + * A close is waiting for this to finish. + * Mark it as finished, and wake the close up. + */ + d->bd_state = BPF_IDLE; + bpf_wakeup(d); } -#endif + lck_mtx_unlock(bpf_mlock); } + + + - int -bpfwrite(dev, uio, ioflag) - dev_t dev; - struct uio *uio; - int ioflag; + +/* keep in sync with bpf_movein above: */ +#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) + +int +bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) { - register struct bpf_d *d; + struct bpf_d *d; struct ifnet *ifp; - struct mbuf *m; - int error, s; - static struct sockaddr dst; - int datlen; + struct mbuf *m = NULL; + int error; + char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; + int datlen = 0; + int bif_dlt; + int bd_hdrcmplt; + + lck_mtx_lock(bpf_mlock); + + d = bpf_dtab[minor(dev)]; + if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) { + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + + bpf_acquire_d(d); - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; - if (d->bd_bif == 0) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (ENXIO); + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENXIO); } ifp = d->bd_bif->bif_ifp; - if (uio->uio_resid == 0) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (0); + if ((ifp->if_flags & IFF_UP) == 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENETDOWN); + } + if (uio_resid(uio) == 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (0); } + ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf); + + /* + * fix for PR-6849527 + * geting variables onto stack before dropping lock for bpf_movein() + */ + bif_dlt = (int)d->bd_bif->bif_dlt; + bd_hdrcmplt = d->bd_hdrcmplt; + + /* bpf_movein allocating mbufs; drop lock */ + lck_mtx_unlock(bpf_mlock); + + error = bpf_movein(uio, bif_dlt, &m, + bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf, + &datlen); - error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); + /* take the lock again */ + lck_mtx_lock(bpf_mlock); if (error) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (error); + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (error); } - if (datlen > ifp->if_mtu) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (EMSGSIZE); + /* verify the device is still open */ + if ((d->bd_flags & BPF_CLOSING) != 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + m_freem(m); + return (ENXIO); + } + + if (d->bd_bif == NULL) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + m_free(m); + return (ENXIO); + } + + if ((unsigned)datlen > ifp->if_mtu) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + m_freem(m); + return (EMSGSIZE); } - if (d->bd_hdrcmplt) - dst.sa_family = pseudo_AF_HDRCMPLT; - s = splnet(); +#if CONFIG_MACF_NET + mac_mbuf_label_associate_bpfdesc(d, m); +#endif + + bpf_set_packet_service_class(m, d->bd_traffic_class); - error = dlil_output(ifp->if_data.default_proto, m, - (caddr_t) 0, &dst, 0); + lck_mtx_unlock(bpf_mlock); - splx(s); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); /* * The driver frees the mbuf. */ + if (d->bd_hdrcmplt) { + if (d->bd_bif->bif_send) + error = d->bd_bif->bif_send(ifp, d->bd_bif->bif_dlt, m); + else + error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL); + } else { + error = dlil_output(ifp, PF_INET, m, NULL, + (struct sockaddr *)dst_buf, 0, NULL); + } + + lck_mtx_lock(bpf_mlock); + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (error); } /* * Reset a descriptor by flushing its packet buffer and clearing the - * receive and drop counts. Should be called at splimp. + * receive and drop counts. */ static void -reset_d(d) - struct bpf_d *d; +reset_d(struct bpf_d *d) { + if (d->bd_hbuf_read) + panic("resetting buffers during read"); + if (d->bd_hbuf) { /* Free the hold buffer. */ d->bd_fbuf = d->bd_hbuf; - d->bd_hbuf = 0; + d->bd_hbuf = NULL; } d->bd_slen = 0; d->bd_hlen = 0; + d->bd_scnt = 0; + d->bd_hcnt = 0; d->bd_rcount = 0; d->bd_dcount = 0; } @@ -800,22 +1432,35 @@ reset_d(d) * BIOCSHDRCMPLT Set "header already complete" flag * BIOCGSEESENT Get "see packets sent" flag * BIOCSSEESENT Set "see packets sent" flag + * BIOCSETTC Set traffic class. + * BIOCGETTC Get traffic class. + * BIOCSEXTHDR Set "extended header" flag + * BIOCSHEADDROP Drop head of the buffer if user is not reading + * BIOCGHEADDROP Get "head-drop" flag */ /* ARGSUSED */ int -bpfioctl(dev, cmd, addr, flags, p) - dev_t dev; - u_long cmd; - caddr_t addr; - int flags; - struct proc *p; +bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, + struct proc *p) { - register struct bpf_d *d; - int s, error = 0; + struct bpf_d *d; + int error = 0; + u_int int_arg; + struct ifreq ifr; + + lck_mtx_lock(bpf_mlock); + + d = bpf_dtab[minor(dev)]; + if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) { + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + bpf_acquire_d(d); - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; + if (d->bd_state == BPF_WAITING) + bpf_stop_timer(d); + d->bd_state = BPF_IDLE; switch (cmd) { @@ -826,21 +1471,19 @@ bpfioctl(dev, cmd, addr, flags, p) /* * Check for read packet available. */ - case FIONREAD: + case FIONREAD: /* int */ { int n; - s = splimp(); n = d->bd_slen; - if (d->bd_hbuf) + if (d->bd_hbuf && d->bd_hbuf_read == 0) n += d->bd_hlen; - splx(s); - *(int *)addr = n; + bcopy(&n, addr, sizeof (n)); break; } - case SIOCGIFADDR: + case SIOCGIFADDR: /* struct ifreq */ { struct ifnet *ifp; @@ -848,7 +1491,7 @@ bpfioctl(dev, cmd, addr, flags, p) error = EINVAL; else { ifp = d->bd_bif->bif_ifp; - error = (*ifp->if_ioctl)(ifp, cmd, addr); + error = ifnet_ioctl(ifp, 0, cmd, addr); } break; } @@ -856,45 +1499,71 @@ bpfioctl(dev, cmd, addr, flags, p) /* * Get buffer len [for read()]. */ - case BIOCGBLEN: - *(u_int *)addr = d->bd_bufsize; + case BIOCGBLEN: /* u_int */ + bcopy(&d->bd_bufsize, addr, sizeof (u_int)); break; /* * Set buffer length. */ - case BIOCSBLEN: -#if BSD < 199103 - error = EINVAL; -#else + case BIOCSBLEN: /* u_int */ if (d->bd_bif != 0) error = EINVAL; else { - register u_int size = *(u_int *)addr; + u_int size; + + bcopy(addr, &size, sizeof (size)); - if (size > bpf_maxbufsize) - *(u_int *)addr = size = bpf_maxbufsize; + /* + * Allow larger buffer in head drop mode with the + * assumption the capture is in standby mode to + * keep a cache of recent traffic + */ + if (d->bd_headdrop != 0 && size > 2 * bpf_maxbufsize) + size = 2 * bpf_maxbufsize; + else if (size > bpf_maxbufsize) + size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) - *(u_int *)addr = size = BPF_MINBUFSIZE; + size = BPF_MINBUFSIZE; + bcopy(&size, addr, sizeof (size)); d->bd_bufsize = size; } -#endif break; /* * Set link layer read filter. */ - case BIOCSETF: - error = bpf_setf(d, (struct bpf_program *)addr); + case BIOCSETF32: + case BIOCSETFNR32: { /* struct bpf_program32 */ + struct bpf_program32 prg32; + + bcopy(addr, &prg32, sizeof (prg32)); + error = bpf_setf(d, prg32.bf_len, + CAST_USER_ADDR_T(prg32.bf_insns), cmd); + break; + } + + case BIOCSETF64: + case BIOCSETFNR64: { /* struct bpf_program64 */ + struct bpf_program64 prg64; + + bcopy(addr, &prg64, sizeof (prg64)); + error = bpf_setf(d, prg64.bf_len, prg64.bf_insns, cmd); break; + } /* * Flush read packet buffer. */ case BIOCFLUSH: - s = splimp(); + while (d->bd_hbuf_read) { + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } + if ((d->bd_flags & BPF_CLOSING) != 0) { + error = ENXIO; + break; + } reset_d(d); - splx(s); break; /* @@ -908,136 +1577,222 @@ bpfioctl(dev, cmd, addr, flags, p) error = EINVAL; break; } - s = splimp(); if (d->bd_promisc == 0) { - error = ifpromisc(d->bd_bif->bif_ifp, 1); + lck_mtx_unlock(bpf_mlock); + error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1); + lck_mtx_lock(bpf_mlock); if (error == 0) d->bd_promisc = 1; } - splx(s); break; /* * Get device parameters. */ - case BIOCGDLT: + case BIOCGDLT: /* u_int */ if (d->bd_bif == 0) error = EINVAL; else - *(u_int *)addr = d->bd_bif->bif_dlt; + bcopy(&d->bd_bif->bif_dlt, addr, sizeof (u_int)); + break; + + /* + * Get a list of supported data link types. + */ + case BIOCGDLTLIST: /* struct bpf_dltlist */ + if (d->bd_bif == NULL) { + error = EINVAL; + } else { + error = bpf_getdltlist(d, addr, p); + } + break; + + /* + * Set data link type. + */ + case BIOCSDLT: /* u_int */ + if (d->bd_bif == NULL) { + error = EINVAL; + } else { + u_int dlt; + + bcopy(addr, &dlt, sizeof (dlt)); + error = bpf_setdlt(d, dlt); + } break; /* * Get interface name. */ - case BIOCGETIF: + case BIOCGETIF: /* struct ifreq */ if (d->bd_bif == 0) error = EINVAL; else { struct ifnet *const ifp = d->bd_bif->bif_ifp; - struct ifreq *const ifr = (struct ifreq *)addr; - snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), - "%s%d", ifp->if_name, ifp->if_unit); + snprintf(((struct ifreq *)(void *)addr)->ifr_name, + sizeof (ifr.ifr_name), "%s", if_name(ifp)); } break; /* * Set interface. */ - case BIOCSETIF: - error = bpf_setif(d, (struct ifreq *)addr); + case BIOCSETIF: { /* struct ifreq */ + ifnet_t ifp; + + bcopy(addr, &ifr, sizeof (ifr)); + ifr.ifr_name[IFNAMSIZ - 1] = '\0'; + ifp = ifunit(ifr.ifr_name); + if (ifp == NULL) + error = ENXIO; + else + error = bpf_setif(d, ifp, 0); break; + } /* * Set read timeout. */ - case BIOCSRTIMEOUT: - { - struct timeval *tv = (struct timeval *)addr; + case BIOCSRTIMEOUT32: { /* struct user32_timeval */ + struct user32_timeval _tv; + struct timeval tv; - /* - * Subtract 1 tick from tvtohz() since this isn't - * a one-shot timer. - */ - if ((error = itimerfix(tv)) == 0) - d->bd_rtout = tvtohz(tv) - 1; - break; - } + bcopy(addr, &_tv, sizeof (_tv)); + tv.tv_sec = _tv.tv_sec; + tv.tv_usec = _tv.tv_usec; + + /* + * Subtract 1 tick from tvtohz() since this isn't + * a one-shot timer. + */ + if ((error = itimerfix(&tv)) == 0) + d->bd_rtout = tvtohz(&tv) - 1; + break; + } + + case BIOCSRTIMEOUT64: { /* struct user64_timeval */ + struct user64_timeval _tv; + struct timeval tv; + + bcopy(addr, &_tv, sizeof (_tv)); + tv.tv_sec = _tv.tv_sec; + tv.tv_usec = _tv.tv_usec; + + /* + * Subtract 1 tick from tvtohz() since this isn't + * a one-shot timer. + */ + if ((error = itimerfix(&tv)) == 0) + d->bd_rtout = tvtohz(&tv) - 1; + break; + } /* * Get read timeout. */ - case BIOCGRTIMEOUT: - { - struct timeval *tv = (struct timeval *)addr; + case BIOCGRTIMEOUT32: { /* struct user32_timeval */ + struct user32_timeval tv; - tv->tv_sec = d->bd_rtout / hz; - tv->tv_usec = (d->bd_rtout % hz) * tick; - break; - } + bzero(&tv, sizeof (tv)); + tv.tv_sec = d->bd_rtout / hz; + tv.tv_usec = (d->bd_rtout % hz) * tick; + bcopy(&tv, addr, sizeof (tv)); + break; + } + + case BIOCGRTIMEOUT64: { /* struct user64_timeval */ + struct user64_timeval tv; + + bzero(&tv, sizeof (tv)); + tv.tv_sec = d->bd_rtout / hz; + tv.tv_usec = (d->bd_rtout % hz) * tick; + bcopy(&tv, addr, sizeof (tv)); + break; + } /* * Get packet stats. */ - case BIOCGSTATS: - { - struct bpf_stat *bs = (struct bpf_stat *)addr; + case BIOCGSTATS: { /* struct bpf_stat */ + struct bpf_stat bs; - bs->bs_recv = d->bd_rcount; - bs->bs_drop = d->bd_dcount; - break; - } + bzero(&bs, sizeof (bs)); + bs.bs_recv = d->bd_rcount; + bs.bs_drop = d->bd_dcount; + bcopy(&bs, addr, sizeof (bs)); + break; + } /* * Set immediate mode. */ - case BIOCIMMEDIATE: - d->bd_immediate = *(u_int *)addr; + case BIOCIMMEDIATE: /* u_int */ + d->bd_immediate = *(u_int *)(void *)addr; break; - case BIOCVERSION: - { - struct bpf_version *bv = (struct bpf_version *)addr; + case BIOCVERSION: { /* struct bpf_version */ + struct bpf_version bv; - bv->bv_major = BPF_MAJOR_VERSION; - bv->bv_minor = BPF_MINOR_VERSION; - break; - } + bzero(&bv, sizeof (bv)); + bv.bv_major = BPF_MAJOR_VERSION; + bv.bv_minor = BPF_MINOR_VERSION; + bcopy(&bv, addr, sizeof (bv)); + break; + } /* * Get "header already complete" flag */ - case BIOCGHDRCMPLT: - *(u_int *)addr = d->bd_hdrcmplt; + case BIOCGHDRCMPLT: /* u_int */ + bcopy(&d->bd_hdrcmplt, addr, sizeof (u_int)); break; /* * Set "header already complete" flag */ - case BIOCSHDRCMPLT: - d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; + case BIOCSHDRCMPLT: /* u_int */ + bcopy(addr, &int_arg, sizeof (int_arg)); + d->bd_hdrcmplt = int_arg ? 1 : 0; break; /* * Get "see sent packets" flag */ - case BIOCGSEESENT: - *(u_int *)addr = d->bd_seesent; + case BIOCGSEESENT: /* u_int */ + bcopy(&d->bd_seesent, addr, sizeof (u_int)); break; /* * Set "see sent packets" flag */ - case BIOCSSEESENT: - d->bd_seesent = *(u_int *)addr; + case BIOCSSEESENT: /* u_int */ + bcopy(addr, &d->bd_seesent, sizeof (u_int)); + break; + + /* + * Set traffic service class + */ + case BIOCSETTC: { /* int */ + int tc; + + bcopy(addr, &tc, sizeof (int)); + error = bpf_set_traffic_class(d, tc); + break; + } + + /* + * Get traffic service class + */ + case BIOCGETTC: /* int */ + bcopy(&d->bd_traffic_class, addr, sizeof (int)); break; - case FIONBIO: /* Non-blocking I/O */ + case FIONBIO: /* Non-blocking I/O; int */ break; - case FIOASYNC: /* Send signal on receive packets */ - d->bd_async = *(int *)addr; + case FIOASYNC: /* Send signal on receive packets; int */ + bcopy(addr, &d->bd_async, sizeof (int)); break; #ifndef __APPLE__ case FIOSETOWN: @@ -1058,67 +1813,127 @@ bpfioctl(dev, cmd, addr, flags, p) *(int *)addr = -fgetown(d->bd_sigio); break; #endif - case BIOCSRSIG: /* Set receive signal */ - { - u_int sig; + case BIOCSRSIG: { /* Set receive signal; u_int */ + u_int sig; - sig = *(u_int *)addr; + bcopy(addr, &sig, sizeof (u_int)); - if (sig >= NSIG) - error = EINVAL; - else - d->bd_sig = sig; - break; - } - case BIOCGRSIG: - *(u_int *)addr = d->bd_sig; + if (sig >= NSIG) + error = EINVAL; + else + d->bd_sig = sig; break; } - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (error); -} - -/* + case BIOCGRSIG: /* u_int */ + bcopy(&d->bd_sig, addr, sizeof (u_int)); + break; +#ifdef __APPLE__ + case BIOCSEXTHDR: /* u_int */ + bcopy(addr, &int_arg, sizeof (int_arg)); + if (int_arg) + d->bd_flags |= BPF_EXTENDED_HDR; + else + d->bd_flags &= ~BPF_EXTENDED_HDR; + break; + + case BIOCGIFATTACHCOUNT: { /* struct ifreq */ + ifnet_t ifp; + struct bpf_if *bp; + + bcopy(addr, &ifr, sizeof (ifr)); + ifr.ifr_name[IFNAMSIZ - 1] = '\0'; + ifp = ifunit(ifr.ifr_name); + if (ifp == NULL) { + error = ENXIO; + break; + } + ifr.ifr_intval = 0; + for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { + struct bpf_d *bpf_d; + + if (bp->bif_ifp == NULL || bp->bif_ifp != ifp) + continue; + for (bpf_d = bp->bif_dlist; bpf_d; bpf_d = bpf_d->bd_next) { + ifr.ifr_intval += 1; + } + } + bcopy(&ifr, addr, sizeof (ifr)); + break; + } + case BIOCGWANTPKTAP: /* u_int */ + int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0; + bcopy(&int_arg, addr, sizeof (int_arg)); + break; + + case BIOCSWANTPKTAP: /* u_int */ + bcopy(addr, &int_arg, sizeof (int_arg)); + if (int_arg) + d->bd_flags |= BPF_WANT_PKTAP; + else + d->bd_flags &= ~BPF_WANT_PKTAP; + break; +#endif + + case BIOCSHEADDROP: + bcopy(addr, &int_arg, sizeof (int_arg)); + d->bd_headdrop = int_arg ? 1 : 0; + break; + + case BIOCGHEADDROP: + bcopy(&d->bd_headdrop, addr, sizeof (int)); + break; + } + + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + + return (error); +} + +/* * Set d's packet filter program to fp. If this file already has a filter, * free it and replace it. Returns EINVAL for bogus requests. */ static int -bpf_setf(d, fp) - struct bpf_d *d; - struct bpf_program *fp; +bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns, + u_long cmd) { struct bpf_insn *fcode, *old; u_int flen, size; - int s; + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + if ((d->bd_flags & BPF_CLOSING) != 0) + return (ENXIO); + old = d->bd_filter; - if (fp->bf_insns == 0) { - if (fp->bf_len != 0) + if (bf_insns == USER_ADDR_NULL) { + if (bf_len != 0) return (EINVAL); - s = splimp(); - d->bd_filter = 0; + d->bd_filter = NULL; reset_d(d); - splx(s); if (old != 0) FREE((caddr_t)old, M_DEVBUF); return (0); } - flen = fp->bf_len; + flen = bf_len; if (flen > BPF_MAXINSNS) return (EINVAL); - size = flen * sizeof(*fp->bf_insns); + size = flen * sizeof(struct bpf_insn); fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); #ifdef __APPLE__ if (fcode == NULL) return (ENOBUFS); #endif - if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && + if (copyin(bf_insns, (caddr_t)fcode, size) == 0 && bpf_validate(fcode, (int)flen)) { - s = splimp(); d->bd_filter = fcode; - reset_d(d); - splx(s); + + if (cmd == BIOCSETF32 || cmd == BIOCSETF64) + reset_d(d); + if (old != 0) FREE((caddr_t)old, M_DEVBUF); @@ -1134,17 +1949,16 @@ bpf_setf(d, fp) * Return an errno or 0. */ static int -bpf_setif(d, ifr) - struct bpf_d *d; - struct ifreq *ifr; +bpf_setif(struct bpf_d *d, ifnet_t theywant, u_int32_t dlt) { struct bpf_if *bp; - int s, error; - struct ifnet *theywant; + int error; - theywant = ifunit(ifr->ifr_name); - if (theywant == 0) - return ENXIO; + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + if ((d->bd_flags & BPF_CLOSING) != 0) + return (ENXIO); /* * Look through attached interfaces for the named one. @@ -1152,116 +1966,409 @@ bpf_setif(d, ifr) for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { struct ifnet *ifp = bp->bif_ifp; - if (ifp == 0 || ifp != theywant) + if (ifp == 0 || ifp != theywant || (dlt != 0 && dlt != bp->bif_dlt)) + continue; + /* + * If the process knows how to deal with DLT_PKTAP, use it + * by default + */ + if (dlt == 0 && bp->bif_dlt == DLT_PKTAP && + !(d->bd_flags & BPF_WANT_PKTAP)) continue; /* * We found the requested interface. - * If it's not up, return an error. - * Allocate the packet buffers if we need to. - * If we're already attached to requested interface, - * just flush the buffer. + * Allocate the packet buffers. + */ + error = bpf_allocbufs(d); + if (error != 0) + return (error); + /* + * Detach if attached to something else. */ - if ((ifp->if_flags & IFF_UP) == 0) - return (ENETDOWN); - - if (d->bd_sbuf == 0) { - error = bpf_allocbufs(d); - if (error != 0) - return (error); - } - s = splimp(); if (bp != d->bd_bif) { - if (d->bd_bif) - /* - * Detach if attached to something else. - */ - bpf_detachd(d); - - bpf_attachd(d, bp); + if (d->bd_bif != NULL) { + if (bpf_detachd(d, 0) != 0) + return (ENXIO); + } + if (bpf_attachd(d, bp) != 0) + return (ENXIO); } reset_d(d); - splx(s); return (0); } /* Not found. */ return (ENXIO); } + + +/* + * Get a list of available data link type of the interface. + */ +static int +bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p) +{ + u_int n; + int error; + struct ifnet *ifp; + struct bpf_if *bp; + user_addr_t dlist; + struct bpf_dltlist bfl; + + bcopy(addr, &bfl, sizeof (bfl)); + if (proc_is64bit(p)) { + dlist = (user_addr_t)bfl.bfl_u.bflu_pad; + } else { + dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list); + } + + ifp = d->bd_bif->bif_ifp; + n = 0; + error = 0; + + for (bp = bpf_iflist; bp; bp = bp->bif_next) { + if (bp->bif_ifp != ifp) + continue; + /* + * Return DLT_PKTAP only to processes that know how to handle it + */ + if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) + continue; + if (dlist != USER_ADDR_NULL) { + if (n >= bfl.bfl_len) { + return (ENOMEM); + } + error = copyout(&bp->bif_dlt, dlist, + sizeof (bp->bif_dlt)); + if (error != 0) + break; + dlist += sizeof (bp->bif_dlt); + } + n++; + } + bfl.bfl_len = n; + bcopy(&bfl, addr, sizeof (bfl)); + + return (error); +} + /* - * Support for select() and poll() system calls + * Set the data link type of a BPF instance. + */ +static int +bpf_setdlt(struct bpf_d *d, uint32_t dlt) +{ + int error, opromisc; + struct ifnet *ifp; + struct bpf_if *bp; + + if (d->bd_bif->bif_dlt == dlt) + return (0); + + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + if ((d->bd_flags & BPF_CLOSING) != 0) + return (ENXIO); + + ifp = d->bd_bif->bif_ifp; + for (bp = bpf_iflist; bp; bp = bp->bif_next) { + if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) + break; + } + if (bp != NULL) { + opromisc = d->bd_promisc; + if (bpf_detachd(d, 0) != 0) + return (ENXIO); + error = bpf_attachd(d, bp); + if (error) { + printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n", + ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), error); + return error; + } + reset_d(d); + if (opromisc) { + lck_mtx_unlock(bpf_mlock); + error = ifnet_set_promiscuous(bp->bif_ifp, 1); + lck_mtx_lock(bpf_mlock); + if (error) { + printf("%s: ifpromisc %s%d failed (%d)\n", + __func__, ifnet_name(bp->bif_ifp), + ifnet_unit(bp->bif_ifp), error); + } else { + d->bd_promisc = 1; + } + } + } + return (bp == NULL ? EINVAL : 0); +} + +static int +bpf_set_traffic_class(struct bpf_d *d, int tc) +{ + int error = 0; + + if (!SO_VALID_TC(tc)) + error = EINVAL; + else + d->bd_traffic_class = tc; + + return (error); +} + +static void +bpf_set_packet_service_class(struct mbuf *m, int tc) +{ + if (!(m->m_flags & M_PKTHDR)) + return; + + VERIFY(SO_VALID_TC(tc)); + (void) m_set_service_class(m, so_tc2msc(tc)); +} + +/* + * Support for select() * * Return true iff the specific operation will not block indefinitely. * Otherwise, return false but make a note that a selwakeup() must be done. */ int -bpfpoll(dev, events, wql, p) - register dev_t dev; - int events; - void * wql; - struct proc *p; +bpfselect(dev_t dev, int which, void * wql, struct proc *p) { - register struct bpf_d *d; - register int s; - int revents = 0; + struct bpf_d *d; + int ret = 0; - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - /* - * An imitation of the FIONREAD ioctl code. - */ - d = &bpf_dtab[minor(dev)]; + lck_mtx_lock(bpf_mlock); + + d = bpf_dtab[minor(dev)]; + if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) { + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + + bpf_acquire_d(d); if (d->bd_bif == NULL) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); return (ENXIO); } - s = splimp(); - if (events & (POLLIN | POLLRDNORM)) { - if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) - revents |= events & (POLLIN | POLLRDNORM); - else - selrecord(p, &d->bd_sel, wql); + while (d->bd_hbuf_read) + msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + + if ((d->bd_flags & BPF_CLOSING) != 0) { + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + return (ENXIO); + } + + switch (which) { + case FREAD: + if (d->bd_hlen != 0 || + ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && + d->bd_slen != 0)) + ret = 1; /* read has data to return */ + else { + /* + * Read has no data to return. + * Make the select wait, and start a timer if + * necessary. + */ + selrecord(p, &d->bd_sel, wql); + bpf_start_timer(d); + } + break; + + case FWRITE: + ret = 1; /* can't determine whether a write would block */ + break; } - splx(s); - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - return (revents); + + bpf_release_d(d); + lck_mtx_unlock(bpf_mlock); + + return (ret); } + /* - * Incoming linkage from device drivers. Process the packet pkt, of length - * pktlen, which is stored in a contiguous buffer. The packet is parsed - * by each process' filter, and if accepted, stashed into the corresponding - * buffer. + * Support for kevent() system call. Register EVFILT_READ filters and + * reject all others. */ -void -bpf_tap(ifp, pkt, pktlen) - struct ifnet *ifp; - register u_char *pkt; - register u_int pktlen; +int bpfkqfilter(dev_t dev, struct knote *kn); +static void filt_bpfdetach(struct knote *); +static int filt_bpfread(struct knote *, long); +static int filt_bpftouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_bpfprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); + +struct filterops bpfread_filtops = { + .f_isfd = 1, + .f_detach = filt_bpfdetach, + .f_event = filt_bpfread, + .f_touch = filt_bpftouch, + .f_process = filt_bpfprocess, +}; + +static int +filt_bpfread_common(struct knote *kn, struct bpf_d *d) { - struct bpf_if *bp; - register struct bpf_d *d; - register u_int slen; + int ready = 0; + + if (d->bd_immediate) { + /* + * If there's data in the hold buffer, it's the + * amount of data a read will return. + * + * If there's no data in the hold buffer, but + * there's data in the store buffer, a read will + * immediately rotate the store buffer to the + * hold buffer, the amount of data in the store + * buffer is the amount of data a read will + * return. + * + * If there's no data in either buffer, we're not + * ready to read. + */ + kn->kn_data = ((d->bd_hlen == 0 || d->bd_hbuf_read) + ? d->bd_slen : d->bd_hlen); + int64_t lowwat = 1; + if (kn->kn_sfflags & NOTE_LOWAT) + { + if (kn->kn_sdata > d->bd_bufsize) + lowwat = d->bd_bufsize; + else if (kn->kn_sdata > lowwat) + lowwat = kn->kn_sdata; + } + ready = (kn->kn_data >= lowwat); + } else { + /* + * If there's data in the hold buffer, it's the + * amount of data a read will return. + * + * If there's no data in the hold buffer, but + * there's data in the store buffer, if the + * timer has expired a read will immediately + * rotate the store buffer to the hold buffer, + * so the amount of data in the store buffer is + * the amount of data a read will return. + * + * If there's no data in either buffer, or there's + * no data in the hold buffer and the timer hasn't + * expired, we're not ready to read. + */ + kn->kn_data = ((d->bd_hlen == 0 || d->bd_hbuf_read) && d->bd_state == BPF_TIMED_OUT ? + d->bd_slen : d->bd_hlen); + ready = (kn->kn_data > 0); + } + if (!ready) + bpf_start_timer(d); + + return (ready); +} + +int +bpfkqfilter(dev_t dev, struct knote *kn) +{ + struct bpf_d *d; + int res; + /* - * Note that the ipl does not have to be raised at this point. - * The only problem that could arise here is that if two different - * interfaces shared any data. This is not the case. + * Is this device a bpf? */ - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - bp = ifp->if_bpf; -#ifdef __APPLE__ - if (bp) { -#endif - for (d = bp->bif_dlist; d != 0; d = d->bd_next) { - ++d->bd_rcount; - slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); - if (slen != 0) - catchpacket(d, pkt, pktlen, slen, bcopy); + if (major(dev) != CDEV_MAJOR || + kn->kn_filter != EVFILT_READ) { + kn->kn_flags = EV_ERROR; + kn->kn_data = EINVAL; + return 0; } -#ifdef __APPLE__ - } - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); -#endif + + lck_mtx_lock(bpf_mlock); + + d = bpf_dtab[minor(dev)]; + + if (d == 0 || + d == (void *)1 || + d->bd_bif == NULL || + (d->bd_flags & BPF_CLOSING) != 0) { + lck_mtx_unlock(bpf_mlock); + kn->kn_flags = EV_ERROR; + kn->kn_data = ENXIO; + return 0; + } + + kn->kn_hook = d; + kn->kn_filtid = EVFILTID_BPFREAD; + KNOTE_ATTACH(&d->bd_sel.si_note, kn); + d->bd_flags |= BPF_KNOTE; + + /* capture the current state */ + res = filt_bpfread_common(kn, d); + + lck_mtx_unlock(bpf_mlock); + + return (res); +} + +static void +filt_bpfdetach(struct knote *kn) +{ + struct bpf_d *d = (struct bpf_d *)kn->kn_hook; + + lck_mtx_lock(bpf_mlock); + if (d->bd_flags & BPF_KNOTE) { + KNOTE_DETACH(&d->bd_sel.si_note, kn); + d->bd_flags &= ~BPF_KNOTE; + } + lck_mtx_unlock(bpf_mlock); +} + +static int +filt_bpfread(struct knote *kn, long hint) +{ +#pragma unused(hint) + struct bpf_d *d = (struct bpf_d *)kn->kn_hook; + + return filt_bpfread_common(kn, d); +} + +static int +filt_bpftouch(struct knote *kn, struct kevent_internal_s *kev) +{ + struct bpf_d *d = (struct bpf_d *)kn->kn_hook; + int res; + + lck_mtx_lock(bpf_mlock); + + /* save off the lowat threshold and flag */ + kn->kn_sdata = kev->data; + kn->kn_sfflags = kev->fflags; + if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) + kn->kn_udata = kev->udata; + + /* output data will be re-generated here */ + res = filt_bpfread_common(kn, d); + + lck_mtx_unlock(bpf_mlock); + + return res; +} + +static int +filt_bpfprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev) +{ +#pragma unused(data) + struct bpf_d *d = (struct bpf_d *)kn->kn_hook; + int res; + + lck_mtx_lock(bpf_mlock); + res = filt_bpfread_common(kn, d); + if (res) { + *kev = kn->kn_kevent; + } + lck_mtx_unlock(bpf_mlock); + + return res; } /* @@ -1269,53 +2376,137 @@ bpf_tap(ifp, pkt, pktlen) * from m_copydata in sys/uipc_mbuf.c. */ static void -bpf_mcopy(src_arg, dst_arg, len) - const void *src_arg; - void *dst_arg; - register size_t len; +bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) { - register const struct mbuf *m; - register u_int count; + struct mbuf *m = (struct mbuf *)(uintptr_t)(src_arg); + u_int count; u_char *dst; - m = src_arg; dst = dst_arg; while (len > 0) { if (m == 0) panic("bpf_mcopy"); count = min(m->m_len, len); - bcopy(mtod(m, void *), dst, count); + bcopy(mbuf_data(m), dst, count); m = m->m_next; dst += count; len -= count; } } -/* - * Incoming linkage from device drivers, when packet is in an mbuf chain. - */ -void -bpf_mtap(ifp, m) - struct ifnet *ifp; - struct mbuf *m; +static inline void +bpf_tap_imp( + ifnet_t ifp, + u_int32_t dlt, + mbuf_t m, + void* hdr, + size_t hlen, + int outbound) { - struct bpf_if *bp = ifp->if_bpf; - struct bpf_d *d; - u_int pktlen, slen; - struct mbuf *m0; + struct bpf_if *bp; + struct mbuf *savedm = m; - pktlen = 0; - for (m0 = m; m0 != 0; m0 = m0->m_next) - pktlen += m0->m_len; + /* + * It's possible that we get here after the bpf descriptor has been + * detached from the interface; in such a case we simply return. + * Lock ordering is important since we can be called asynchronously + * (from the IOKit) to process an inbound packet; when that happens + * we would have been holding its "gateLock" and will be acquiring + * "bpf_mlock" upon entering this routine. Due to that, we release + * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will + * acquire "gateLock" in the IOKit), in order to avoid a deadlock + * when a ifnet_set_promiscuous request simultaneously collides with + * an inbound packet being passed into the tap callback. + */ + lck_mtx_lock(bpf_mlock); + if (ifp->if_bpf == NULL) { + lck_mtx_unlock(bpf_mlock); + return; + } + bp = ifp->if_bpf; + for (bp = ifp->if_bpf; bp && bp->bif_ifp == ifp && + (dlt != 0 && bp->bif_dlt != dlt); bp = bp->bif_next) + ; + if (bp && bp->bif_ifp == ifp && bp->bif_dlist != NULL) { + struct bpf_d *d; + struct m_hdr hack_hdr; + u_int pktlen = 0; + u_int slen = 0; + struct mbuf *m0; + + if (hdr) { + /* + * This is gross. We mock up an mbuf that points to the + * header buffer. This means we don't have to copy the + * header. A number of interfaces prepended headers just + * for bpf by allocating an mbuf on the stack. We want to + * give developers an easy way to prepend a header for bpf. + * Since a developer allocating an mbuf on the stack is bad, + * we do even worse here, allocating only a header to point + * to a buffer the developer supplied. This makes assumptions + * that bpf_filter and catchpacket will not look at anything + * in the mbuf other than the header. This was true at the + * time this code was written. + */ + hack_hdr.mh_next = m; + hack_hdr.mh_nextpkt = NULL; + hack_hdr.mh_len = hlen; + hack_hdr.mh_data = hdr; + hack_hdr.mh_type = m->m_type; + hack_hdr.mh_flags = 0; + + __IGNORE_WCASTALIGN(m = (mbuf_t)&hack_hdr); + } - for (d = bp->bif_dlist; d != 0; d = d->bd_next) { - if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) - continue; - ++d->bd_rcount; - slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); - if (slen != 0) - catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); + for (m0 = m; m0 != 0; m0 = m0->m_next) + pktlen += m0->m_len; + + for (d = bp->bif_dlist; d; d = d->bd_next) { + if (outbound && !d->bd_seesent) + continue; + ++d->bd_rcount; + slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); + if (slen != 0) { +#if CONFIG_MACF_NET + if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0) + continue; +#endif + catchpacket(d, (u_char *)m, savedm, pktlen, + slen, outbound, bpf_mcopy); + } + } } + lck_mtx_unlock(bpf_mlock); +} + +void +bpf_tap_out( + ifnet_t ifp, + u_int32_t dlt, + mbuf_t m, + void* hdr, + size_t hlen) +{ + bpf_tap_imp(ifp, dlt, m, hdr, hlen, 1); +} + +void +bpf_tap_in( + ifnet_t ifp, + u_int32_t dlt, + mbuf_t m, + void* hdr, + size_t hlen) +{ + bpf_tap_imp(ifp, dlt, m, hdr, hlen, 0); +} + +/* Callback registered with Ethernet driver. */ +static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) +{ + bpf_tap_imp(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL); + + return 0; } /* @@ -1327,15 +2518,22 @@ bpf_mtap(ifp, m) * pkt is really an mbuf. */ static void -catchpacket(d, pkt, pktlen, snaplen, cpfn) - register struct bpf_d *d; - register u_char *pkt; - register u_int pktlen, snaplen; - register void (*cpfn) __P((const void *, void *, size_t)); +catchpacket(struct bpf_d *d, u_char *pkt, struct mbuf *m, u_int pktlen, + u_int snaplen, int outbound, + void (*cpfn)(const void *, void *, size_t)) { - register struct bpf_hdr *hp; - register int totlen, curlen; - register int hdrlen = d->bd_bif->bif_hdrlen; + struct bpf_hdr *hp; + struct bpf_hdr_ext *ehp; + int totlen, curlen; + int hdrlen, caplen; + int do_wakeup = 0; + u_char *payload; + struct timeval tv; + struct m_tag *mt = NULL; + struct bpf_mtag *bt = NULL; + + hdrlen = (d->bd_flags & BPF_EXTENDED_HDR) ? d->bd_bif->bif_exthdrlen : + d->bd_bif->bif_hdrlen; /* * Figure out how many bytes to move. If the packet is * greater or equal to the snapshot length, transfer that @@ -1355,64 +2553,149 @@ catchpacket(d, pkt, pktlen, snaplen, cpfn) * This packet will overflow the storage buffer. * Rotate the buffers if we can, then wakeup any * pending reads. + * + * We cannot rotate buffers if a read is in progress + * so drop the packet */ - if (d->bd_fbuf == 0) { - /* - * We haven't completed the previous read yet, - * so drop the packet. - */ + if (d->bd_hbuf_read) { ++d->bd_dcount; return; } - ROTATE_BUFFERS(d); - bpf_wakeup(d); + + if (d->bd_fbuf == NULL) { + if (d->bd_headdrop == 0) { + /* + * We haven't completed the previous read yet, + * so drop the packet. + */ + ++d->bd_dcount; + return; + } + /* + * Drop the hold buffer as it contains older packets + */ + d->bd_dcount += d->bd_hcnt; + d->bd_fbuf = d->bd_hbuf; + ROTATE_BUFFERS(d); + } else { + ROTATE_BUFFERS(d); + } + do_wakeup = 1; curlen = 0; } - else if (d->bd_immediate) + else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) /* - * Immediate mode is set. A packet arrived so any - * reads should be woken up. + * Immediate mode is set, or the read timeout has + * already expired during a select call. A packet + * arrived, so the reader should be woken up. */ - bpf_wakeup(d); + do_wakeup = 1; /* * Append the bpf header. */ - hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); -#if BSD >= 199103 - microtime(&hp->bh_tstamp); -#elif defined(sun) - uniqtime(&hp->bh_tstamp); -#else - hp->bh_tstamp = time; -#endif - hp->bh_datalen = pktlen; - hp->bh_hdrlen = hdrlen; + microtime(&tv); + if (d->bd_flags & BPF_EXTENDED_HDR) { + ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen); + memset(ehp, 0, sizeof(*ehp)); + ehp->bh_tstamp.tv_sec = tv.tv_sec; + ehp->bh_tstamp.tv_usec = tv.tv_usec; + ehp->bh_datalen = pktlen; + ehp->bh_hdrlen = hdrlen; + ehp->bh_caplen = totlen - hdrlen; + mt = m_tag_locate(m, bpf_mtag_id, 0, NULL); + if (mt && mt->m_tag_len >= sizeof(*bt)) { + bt = (struct bpf_mtag *)(mt + 1); + ehp->bh_pid = bt->bt_pid; + strlcpy(ehp->bh_comm, bt->bt_comm, + sizeof(ehp->bh_comm)); + ehp->bh_svc = so_svc2tc(bt->bt_svc); + if (bt->bt_direction == BPF_MTAG_DIR_OUT) + ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT; + else + ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN; + m_tag_delete(m, mt); + } else if (outbound) { + /* only do lookups on non-raw INPCB */ + if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID| + PKTF_FLOW_LOCALSRC|PKTF_FLOW_RAWSOCK)) == + (PKTF_FLOW_ID|PKTF_FLOW_LOCALSRC) && + m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) { + ehp->bh_flowid = m->m_pkthdr.pkt_flowid; + ehp->bh_proto = m->m_pkthdr.pkt_proto; + } + ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc); + ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT; + if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) + ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT; + if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ) + ehp->bh_pktflags |= BPF_PKTFLAGS_START_SEQ; + if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) + ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT; + if (m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA) { + ehp->bh_unsent_bytes = + m->m_pkthdr.bufstatus_if; + ehp->bh_unsent_snd = + m->m_pkthdr.bufstatus_sndbuf; + } + } else + ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN; + payload = (u_char *)ehp + hdrlen; + caplen = ehp->bh_caplen; + } else { + hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen); + hp->bh_tstamp.tv_sec = tv.tv_sec; + hp->bh_tstamp.tv_usec = tv.tv_usec; + hp->bh_datalen = pktlen; + hp->bh_hdrlen = hdrlen; + hp->bh_caplen = totlen - hdrlen; + payload = (u_char *)hp + hdrlen; + caplen = hp->bh_caplen; + } /* * Copy the packet data into the store buffer and update its length. */ - (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); + (*cpfn)(pkt, payload, caplen); d->bd_slen = curlen + totlen; + d->bd_scnt += 1; + + if (do_wakeup) + bpf_wakeup(d); } /* * Initialize all nonzero fields of a descriptor. */ static int -bpf_allocbufs(d) - register struct bpf_d *d; +bpf_allocbufs(struct bpf_d *d) { + if (d->bd_sbuf != NULL) { + FREE(d->bd_sbuf, M_DEVBUF); + d->bd_sbuf = NULL; + } + if (d->bd_hbuf != NULL) { + FREE(d->bd_hbuf, M_DEVBUF); + d->bd_hbuf = NULL; + } + if (d->bd_fbuf != NULL) { + FREE(d->bd_fbuf, M_DEVBUF); + d->bd_fbuf = NULL; + } + d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); - if (d->bd_fbuf == 0) + if (d->bd_fbuf == NULL) return (ENOBUFS); d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); - if (d->bd_sbuf == 0) { + if (d->bd_sbuf == NULL) { FREE(d->bd_fbuf, M_DEVBUF); + d->bd_fbuf = NULL; return (ENOBUFS); } d->bd_slen = 0; d->bd_hlen = 0; + d->bd_scnt = 0; + d->bd_hcnt = 0; return (0); } @@ -1421,25 +2704,25 @@ bpf_allocbufs(d) * Called on close. */ static void -bpf_freed(d) - register struct bpf_d *d; +bpf_freed(struct bpf_d *d) { /* * We don't need to lock out interrupts since this descriptor has * been detached from its interface and it yet hasn't been marked * free. */ + if (d->bd_hbuf_read) + panic("bpf buffer freed during read"); + if (d->bd_sbuf != 0) { FREE(d->bd_sbuf, M_DEVBUF); - if (d->bd_hbuf != 0) + if (d->bd_hbuf != 0) FREE(d->bd_hbuf, M_DEVBUF); if (d->bd_fbuf != 0) FREE(d->bd_fbuf, M_DEVBUF); } if (d->bd_filter) FREE((caddr_t)d->bd_filter, M_DEVBUF); - - D_MARKFREE(d); } /* @@ -1448,46 +2731,85 @@ bpf_freed(d) * size of the link header (variable length headers not yet supported). */ void -bpfattach(ifp, dlt, hdrlen) - struct ifnet *ifp; - u_int dlt, hdrlen; +bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) { - struct bpf_if *bp; - int i; - bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT); - if (bp == 0) - panic("bpfattach"); - - bp->bif_dlist = 0; - bp->bif_ifp = ifp; - bp->bif_dlt = dlt; + bpf_attach(ifp, dlt, hdrlen, NULL, NULL); +} - bp->bif_next = bpf_iflist; - bpf_iflist = bp; +errno_t +bpf_attach( + ifnet_t ifp, + u_int32_t dlt, + u_int32_t hdrlen, + bpf_send_func send, + bpf_tap_func tap) +{ + struct bpf_if *bp_new; + struct bpf_if *bp_temp; + struct bpf_if *bp_first = NULL; + + bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF, + M_WAIT | M_ZERO); + if (bp_new == 0) + panic("bpfattach"); - bp->bif_ifp->if_bpf = 0; + lck_mtx_lock(bpf_mlock); + /* + * Check if this interface/dlt is already attached, record first + * attachment for this interface. + */ + for (bp_temp = bpf_iflist; bp_temp && (bp_temp->bif_ifp != ifp || + bp_temp->bif_dlt != dlt); bp_temp = bp_temp->bif_next) { + if (bp_temp->bif_ifp == ifp && bp_first == NULL) + bp_first = bp_temp; + } + + if (bp_temp != NULL) { + printf("bpfattach - %s with dlt %d is already attached\n", + if_name(ifp), dlt); + FREE(bp_new, M_DEVBUF); + lck_mtx_unlock(bpf_mlock); + return EEXIST; + } + + bp_new->bif_ifp = ifp; + bp_new->bif_dlt = dlt; + bp_new->bif_send = send; + bp_new->bif_tap = tap; + + if (bp_first == NULL) { + /* No other entries for this ifp */ + bp_new->bif_next = bpf_iflist; + bpf_iflist = bp_new; + } + else { + /* Add this after the first entry for this interface */ + bp_new->bif_next = bp_first->bif_next; + bp_first->bif_next = bp_new; + } + /* * Compute the length of the bpf header. This is not necessarily * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for * performance reasons and to alleviate alignment restrictions). */ - bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; + bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; + bp_new->bif_exthdrlen = BPF_WORDALIGN(hdrlen + + sizeof(struct bpf_hdr_ext)) - hdrlen; + + /* Take a reference on the interface */ + ifnet_reference(ifp); -#ifdef __APPLE__ - /* - * Mark all the descriptors free if this hasn't been done. - */ - if (!bpf_dtab_init) { - for (i = 0; i < nbpfilter; ++i) - D_MARKFREE(&bpf_dtab[i]); - bpf_dtab_init = 1; - } -#else + lck_mtx_unlock(bpf_mlock); + +#ifndef __APPLE__ if (bootverbose) - printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); + printf("bpf: %s attached\n", if_name(ifp)); #endif + + return 0; } /* @@ -1497,83 +2819,89 @@ bpfattach(ifp, dlt, hdrlen) * ENXIO. */ void -bpfdetach(ifp) - struct ifnet *ifp; +bpfdetach(struct ifnet *ifp) { - struct bpf_if *bp, *bp_prev; + struct bpf_if *bp, *bp_prev, *bp_next; struct bpf_d *d; - int s; - s = splimp(); + if (bpf_debug != 0) + printf("%s: %s\n", + __func__, if_name(ifp)); - /* Locate BPF interface information */ - bp_prev = NULL; - for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { - if (ifp == bp->bif_ifp) - break; - bp_prev = bp; - } - -#ifdef __APPLE__ - /* Check for no BPF interface information */ - if (bp == NULL) { - return; - } -#endif + lck_mtx_lock(bpf_mlock); - /* Interface wasn't attached */ - if (bp->bif_ifp == NULL) { - splx(s); -#ifndef __APPLE__ - printf("bpfdetach: %s%d was not attached\n", ifp->if_name, - ifp->if_unit); -#endif - return; - } + /* + * Build the list of devices attached to that interface + * that we need to free while keeping the lock to maintain + * the integrity of the interface list + */ + bp_prev = NULL; + for (bp = bpf_iflist; bp != NULL; bp = bp_next) { + bp_next = bp->bif_next; - while ((d = bp->bif_dlist) != NULL) { - bpf_detachd(d); - bpf_wakeup(d); - } + if (ifp != bp->bif_ifp) { + bp_prev = bp; + continue; + } + /* Unlink from the interface list */ + if (bp_prev) + bp_prev->bif_next = bp->bif_next; + else + bpf_iflist = bp->bif_next; - if (bp_prev) { - bp_prev->bif_next = bp->bif_next; - } else { - bpf_iflist = bp->bif_next; + /* Detach the devices attached to the interface */ + while ((d = bp->bif_dlist) != NULL) { + /* + * Take an extra reference to prevent the device + * from being freed when bpf_detachd() releases + * the reference for the interface list + */ + bpf_acquire_d(d); + bpf_detachd(d, 0); + bpf_wakeup(d); + bpf_release_d(d); + } + ifnet_release(ifp); } - FREE(bp, M_DEVBUF); - - splx(s); + lck_mtx_unlock(bpf_mlock); } -static void *bpf_devfs_token[NBPFILTER]; - -static int bpf_devsw_installed; - -void bpf_init __P((void *unused)); - void -bpf_init(unused) - void *unused; +bpf_init(__unused void *unused) { #ifdef __APPLE__ int i; int maj; - if (!bpf_devsw_installed ) { + if (bpf_devsw_installed == 0) { bpf_devsw_installed = 1; + bpf_mlock_grp_attr = lck_grp_attr_alloc_init(); + bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr); + bpf_mlock_attr = lck_attr_alloc_init(); + lck_mtx_init(bpf_mlock, bpf_mlock_grp, bpf_mlock_attr); maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); if (maj == -1) { + if (bpf_mlock_attr) + lck_attr_free(bpf_mlock_attr); + if (bpf_mlock_grp) + lck_grp_free(bpf_mlock_grp); + if (bpf_mlock_grp_attr) + lck_grp_attr_free(bpf_mlock_grp_attr); + + bpf_mlock = NULL; + bpf_mlock_attr = NULL; + bpf_mlock_grp = NULL; + bpf_mlock_grp_attr = NULL; + bpf_devsw_installed = 0; printf("bpf_init: failed to allocate a major number!\n"); - nbpfilter = 0; return; } - for (i = 0 ; i < nbpfilter; i++) { - bpf_devfs_token[i] = devfs_make_node(makedev(maj, i), - DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, - "bpf%x", i); - } + + for (i = 0 ; i < NBPFILTER; i++) + bpf_make_dev_t(maj); + + VERIFY(mbuf_tag_id_find(BPF_CONTROL_NAME, &bpf_mtag_id) == 0); } #else cdevsw_add(&bpf_cdevsw); @@ -1584,51 +2912,18 @@ bpf_init(unused) SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) #endif -#else /* !BPF */ -#ifndef __APPLE__ -/* - * NOP stubs to allow bpf-using drivers to load and function. - * - * A 'better' implementation would allow the core bpf functionality - * to be loaded at runtime. - */ - -void -bpf_tap(ifp, pkt, pktlen) - struct ifnet *ifp; - register u_char *pkt; - register u_int pktlen; +#if CONFIG_MACF_NET +struct label * +mac_bpfdesc_label_get(struct bpf_d *d) { -} -void -bpf_mtap(ifp, m) - struct ifnet *ifp; - struct mbuf *m; -{ -} - -void -bpfattach(ifp, dlt, hdrlen) - struct ifnet *ifp; - u_int dlt, hdrlen; -{ + return (d->bd_label); } void -bpfdetach(ifp) - struct ifnet *ifp; +mac_bpfdesc_label_set(struct bpf_d *d, struct label *label) { -} -u_int -bpf_filter(pc, p, wirelen, buflen) - register const struct bpf_insn *pc; - register u_char *p; - u_int wirelen; - register u_int buflen; -{ - return -1; /* "no filter" behaviour */ + d->bd_label = label; } -#endif /* !defined(__APPLE__) */ -#endif /* NBPFILTER > 0 */ +#endif