/*
- * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/ttycom.h>
#include <sys/filedesc.h>
#include <sys/uio_internal.h>
-#include <sys/fcntl.h>
#include <sys/file_internal.h>
#include <sys/event.h>
-#if defined(sparc) && BSD < 199103
-#include <sys/stream.h>
-#endif
#include <sys/poll.h>
#include <sys/socket.h>
+#include <sys/socketvar.h>
#include <sys/vnode.h>
#include <net/if.h>
#include <net/bpfdesc.h>
#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/in_var.h>
+#include <netinet/ip_var.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_var.h>
+#include <netinet/udp.h>
+#include <netinet/udp_var.h>
#include <netinet/if_ether.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <miscfs/devfs/devfs.h>
#include <net/dlil.h>
+#include <net/pktap.h>
#include <kern/locks.h>
+#include <kern/thread_call.h>
#if CONFIG_MACF_NET
#include <security/mac_framework.h>
extern int tvtohz(struct timeval *);
-/*
- * Older BSDs don't have kernel malloc.
- */
-#if BSD < 199103
-extern bcopy();
-static caddr_t bpf_alloc();
-#include <net/bpf_compat.h>
-#define BPF_BUFSIZE (MCLBYTES-8)
-#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
-#else
#define BPF_BUFSIZE 4096
#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
-#endif
#define PRINET 26 /* interruptible */
* The default read buffer size is patchable.
*/
static unsigned int bpf_bufsize = BPF_BUFSIZE;
-SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
+SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED,
&bpf_bufsize, 0, "");
-static unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
-SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
+__private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
+SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW | CTLFLAG_LOCKED,
&bpf_maxbufsize, 0, "");
static unsigned int bpf_maxdevices = 256;
-SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW,
+SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW | CTLFLAG_LOCKED,
&bpf_maxdevices, 0, "");
+/*
+ * bpf_wantpktap controls the defaul visibility of DLT_PKTAP
+ * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP
+ * explicitly to be able to use DLT_PKTAP.
+ */
+static unsigned int bpf_wantpktap = 0;
+SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &bpf_wantpktap, 0, "");
+
+static int bpf_debug = 0;
+SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &bpf_debug, 0, "");
/*
* bpf_iflist is the list of interfaces; each corresponds to an ifnet
static unsigned int bpf_dtab_size = 0;
static unsigned int nbpfilter = 0;
-static lck_mtx_t *bpf_mlock;
+decl_lck_mtx_data(static, bpf_mlock_data);
+static lck_mtx_t *bpf_mlock = &bpf_mlock_data;
static lck_grp_t *bpf_mlock_grp;
static lck_grp_attr_t *bpf_mlock_grp_attr;
static lck_attr_t *bpf_mlock_attr;
-/*
- * Mark a descriptor free by making it point to itself.
- * This is probably cheaper than marking with a constant since
- * the address should be in a register anyway.
- */
+static mbuf_tag_id_t bpf_mtag_id;
#endif /* __APPLE__ */
static int bpf_allocbufs(struct bpf_d *);
static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
-static void bpf_detachd(struct bpf_d *d);
+static int bpf_detachd(struct bpf_d *d, int);
static void bpf_freed(struct bpf_d *);
static void bpf_mcopy(const void *, void *, size_t);
static int bpf_movein(struct uio *, int,
struct mbuf **, struct sockaddr *, int *);
static int bpf_setif(struct bpf_d *, ifnet_t ifp, u_int32_t dlt);
-static void bpf_wakeup(struct bpf_d *);
-static void catchpacket(struct bpf_d *, u_char *, u_int,
- u_int, void (*)(const void *, void *, size_t));
+static void bpf_timed_out(void *, void *);
+static void bpf_wakeup(struct bpf_d *);
+static void catchpacket(struct bpf_d *, u_char *, struct mbuf *, u_int,
+ u_int, int, void (*)(const void *, void *, size_t));
static void reset_d(struct bpf_d *);
-static int bpf_setf(struct bpf_d *, u_int bf_len, user_addr_t bf_insns);
-static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *,
- struct proc *);
+static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long);
+static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *);
static int bpf_setdlt(struct bpf_d *, u_int);
+static int bpf_set_traffic_class(struct bpf_d *, int);
+static void bpf_set_packet_service_class(struct mbuf *, int);
-/*static void *bpf_devfs_token[MAXBPFILTER];*/
+static void bpf_acquire_d(struct bpf_d *);
+static void bpf_release_d(struct bpf_d *);
static int bpf_devsw_installed;
* Darwin differs from BSD here, the following are static
* on BSD and not static on Darwin.
*/
- d_open_t bpfopen;
- d_close_t bpfclose;
- d_read_t bpfread;
- d_write_t bpfwrite;
- ioctl_fcn_t bpfioctl;
- select_fcn_t bpfpoll;
+ d_open_t bpfopen;
+ d_close_t bpfclose;
+ d_read_t bpfread;
+ d_write_t bpfwrite;
+ ioctl_fcn_t bpfioctl;
+ select_fcn_t bpfselect;
/* Darwin's cdevsw struct differs slightly from BSDs */
#define CDEV_MAJOR 23
static struct cdevsw bpf_cdevsw = {
- /* open */ bpfopen,
- /* close */ bpfclose,
- /* read */ bpfread,
- /* write */ bpfwrite,
- /* ioctl */ bpfioctl,
- /* stop */ eno_stop,
- /* reset */ eno_reset,
- /* tty */ NULL,
- /* select */ bpfpoll,
- /* mmap */ eno_mmap,
- /* strategy*/ eno_strat,
- /* getc */ eno_getc,
- /* putc */ eno_putc,
- /* type */ 0
+ /* open */ bpfopen,
+ /* close */ bpfclose,
+ /* read */ bpfread,
+ /* write */ bpfwrite,
+ /* ioctl */ bpfioctl,
+ /* stop */ eno_stop,
+ /* reset */ eno_reset,
+ /* tty */ NULL,
+ /* select */ bpfselect,
+ /* mmap */ eno_mmap,
+ /* strategy*/ eno_strat,
+ /* getc */ eno_getc,
+ /* putc */ eno_putc,
+ /* type */ 0
};
#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
hlen = 0;
break;
+ case DLT_IEEE802_11_RADIO:
+ sa_family = AF_IEEE80211;
+ hlen = 0;
+ break;
+
default:
return (EIO);
}
if (m == 0)
return (ENOBUFS);
if ((unsigned)len > MHLEN) {
-#if BSD >= 199103
MCLGET(m, M_WAIT);
if ((m->m_flags & M_EXT) == 0) {
-#else
- MCLGET(m);
- if (m->m_len != MCLBYTES) {
-#endif
error = ENOBUFS;
goto bad;
}
m->m_pkthdr.len = m->m_len = len;
m->m_pkthdr.rcvif = NULL;
*mp = m;
+
/*
* Make room for link header.
*/
if (hlen != 0) {
m->m_pkthdr.len -= hlen;
m->m_len -= hlen;
-#if BSD >= 199103
m->m_data += hlen; /* XXX */
-#else
- m->m_off += hlen;
-#endif
error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
if (error)
goto bad;
}
error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
- if (!error)
- return (0);
+ if (error)
+ goto bad;
+
+ /* Check for multicast destination */
+ switch (linktype) {
+ case DLT_EN10MB: {
+ struct ether_header *eh = mtod(m, struct ether_header *);
+
+ if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
+ if (_ether_cmp(etherbroadcastaddr, eh->ether_dhost) == 0)
+ m->m_flags |= M_BCAST;
+ else
+ m->m_flags |= M_MCAST;
+ }
+ break;
+ }
+ }
+
+ return 0;
bad:
m_freem(m);
return (error);
#ifdef __APPLE__
/*
- * The dynamic addition of a new device node must block all processes that are opening
- * the last device so that no process will get an unexpected ENOENT
+ * The dynamic addition of a new device node must block all processes that
+ * are opening the last device so that no process will get an unexpected
+ * ENOENT
*/
static void
bpf_make_dev_t(int maj)
d->bd_bif = bp;
d->bd_next = bp->bif_dlist;
bp->bif_dlist = d;
-
- if (first) {
- bpf_tap_mode tap_mode;
- switch ((d->bd_oflags & (FREAD | FWRITE))) {
- case FREAD:
- tap_mode = BPF_TAP_INPUT;
- break;
- case FWRITE:
- tap_mode = BPF_TAP_OUTPUT;
- break;
- default:
- tap_mode = BPF_TAP_INPUT_OUTPUT;
- break;
- }
+ /*
+ * Take a reference on the device even if an error is returned
+ * because we keep the device in the interface's list of listeners
+ */
+ bpf_acquire_d(d);
+ if (first) {
/* Find the default bpf entry for this ifp */
if (bp->bif_ifp->if_bpf == NULL) {
- struct bpf_if *primary;
+ struct bpf_if *tmp, *primary = NULL;
- for (primary = bpf_iflist; primary && primary->bif_ifp != bp->bif_ifp;
- primary = primary->bif_next)
- ;
-
+ for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) {
+ if (tmp->bif_ifp != bp->bif_ifp)
+ continue;
+ primary = tmp;
+ /*
+ * Make DLT_PKTAP only if process knows how
+ * to deal with it, otherwise find another one
+ */
+ if (tmp->bif_dlt == DLT_PKTAP &&
+ !(d->bd_flags & BPF_WANT_PKTAP))
+ continue;
+ break;
+ }
bp->bif_ifp->if_bpf = primary;
}
/* Only call dlil_set_bpf_tap for primary dlt */
if (bp->bif_ifp->if_bpf == bp)
- dlil_set_bpf_tap(bp->bif_ifp, tap_mode, bpf_tap_callback);
+ dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
if (bp->bif_tap)
- error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, tap_mode);
+ error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, BPF_TAP_INPUT_OUTPUT);
}
+ /*
+ * Reset the detach flags in case we previously detached an interface
+ */
+ d->bd_flags &= ~(BPF_DETACHING | BPF_DETACHED);
+
+ if (bp->bif_ifp->if_bpf != NULL &&
+ bp->bif_ifp->if_bpf->bif_dlt == DLT_PKTAP)
+ d->bd_flags |= BPF_FINALIZE_PKTAP;
+ else
+ d->bd_flags &= ~BPF_FINALIZE_PKTAP;
+
return error;
}
/*
* Detach a file from its interface.
+ *
+ * Return 1 if was closed by some thread, 0 otherwise
*/
-static void
-bpf_detachd(struct bpf_d *d)
+static int
+bpf_detachd(struct bpf_d *d, int closing)
{
struct bpf_d **p;
struct bpf_if *bp;
struct ifnet *ifp;
+ /*
+ * Some other thread already detached
+ */
+ if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0)
+ goto done;
+ /*
+ * This thread is doing the detach
+ */
+ d->bd_flags |= BPF_DETACHING;
+
ifp = d->bd_bif->bif_ifp;
bp = d->bd_bif;
-
+
+ if (bpf_debug != 0)
+ printf("%s: %llx %s%s\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(d),
+ if_name(ifp), closing ? " closing" : "");
+
/* Remove d from the interface's descriptor list. */
p = &bp->bif_dlist;
while (*p != d) {
* take it out.
* Most likely the network interface is gone.
*/
- printf("bpf: ifnet_set_promiscuous failed");
+ printf("%s: ifnet_set_promiscuous failed\n", __func__);
}
lck_mtx_lock(bpf_mlock);
}
+
+ /*
+ * Wake up other thread that are waiting for this thread to finish
+ * detaching
+ */
+ d->bd_flags &= ~BPF_DETACHING;
+ d->bd_flags |= BPF_DETACHED;
+ /*
+ * Note that We've kept the reference because we may have dropped
+ * the lock when turning off promiscuous mode
+ */
+ bpf_release_d(d);
+
+done:
+ /*
+ * When closing makes sure no other thread refer to the bpf_d
+ */
+ if (bpf_debug != 0)
+ printf("%s: %llx done\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(d));
+ /*
+ * Let the caller know the bpf_d is closed
+ */
+ if ((d->bd_flags & BPF_CLOSING))
+ return (1);
+ else
+ return (0);
}
+/*
+ * Start asynchronous timer, if necessary.
+ * Must be called with bpf_mlock held.
+ */
+static void
+bpf_start_timer(struct bpf_d *d)
+{
+ uint64_t deadline;
+ struct timeval tv;
+
+ if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
+ tv.tv_sec = d->bd_rtout / hz;
+ tv.tv_usec = (d->bd_rtout % hz) * tick;
+
+ clock_interval_to_deadline(
+ (uint64_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec,
+ NSEC_PER_USEC, &deadline);
+ /*
+ * The state is BPF_IDLE, so the timer hasn't
+ * been started yet, and hasn't gone off yet;
+ * there is no thread call scheduled, so this
+ * won't change the schedule.
+ *
+ * XXX - what if, by the time it gets entered,
+ * the deadline has already passed?
+ */
+ thread_call_enter_delayed(d->bd_thread_call, deadline);
+ d->bd_state = BPF_WAITING;
+ }
+}
+
+/*
+ * Cancel asynchronous timer.
+ * Must be called with bpf_mlock held.
+ */
+static boolean_t
+bpf_stop_timer(struct bpf_d *d)
+{
+ /*
+ * If the timer has already gone off, this does nothing.
+ * Our caller is expected to set d->bd_state to BPF_IDLE,
+ * with the bpf_mlock, after we are called. bpf_timed_out()
+ * also grabs bpf_mlock, so, if the timer has gone off and
+ * bpf_timed_out() hasn't finished, it's waiting for the
+ * lock; when this thread releases the lock, it will
+ * find the state is BPF_IDLE, and just release the
+ * lock and return.
+ */
+ return (thread_call_cancel(d->bd_thread_call));
+}
+
+void
+bpf_acquire_d(struct bpf_d *d)
+{
+ void *lr_saved = __builtin_return_address(0);
+
+ lck_mtx_assert(bpf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ d->bd_refcnt += 1;
+
+ d->bd_ref_lr[d->bd_next_ref_lr] = lr_saved;
+ d->bd_next_ref_lr = (d->bd_next_ref_lr + 1) % BPF_REF_HIST;
+}
+
+void
+bpf_release_d(struct bpf_d *d)
+{
+ void *lr_saved = __builtin_return_address(0);
+
+ lck_mtx_assert(bpf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ if (d->bd_refcnt <= 0)
+ panic("%s: %p refcnt <= 0", __func__, d);
+
+ d->bd_refcnt -= 1;
+
+ d->bd_unref_lr[d->bd_next_unref_lr] = lr_saved;
+ d->bd_next_unref_lr = (d->bd_next_unref_lr + 1) % BPF_REF_HIST;
+
+ if (d->bd_refcnt == 0) {
+ /* Assert the device is detached */
+ if ((d->bd_flags & BPF_DETACHED) == 0)
+ panic("%s: %p BPF_DETACHED not set", __func__, d);
+
+ _FREE(d, M_DEVBUF);
+ }
+}
+
/*
* Open ethernet device. Returns ENXIO for illegal minor device number,
* EBUSY if file is open by another process.
lck_mtx_unlock(bpf_mlock);
return (EBUSY);
}
- d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT);
+ d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF,
+ M_WAIT | M_ZERO);
if (d == NULL) {
/* this really is a catastrophic failure */
printf("bpfopen: malloc bpf_d failed\n");
lck_mtx_unlock(bpf_mlock);
return ENOMEM;
}
- bzero(d, sizeof(struct bpf_d));
-
- /*
- * It is not necessary to take the BPF lock here because no other
- * thread can access the device until it is marked opened...
- */
-
+
/* Mark "in use" and do most initialization. */
+ bpf_acquire_d(d);
d->bd_bufsize = bpf_bufsize;
d->bd_sig = SIGIO;
d->bd_seesent = 1;
d->bd_oflags = flags;
+ d->bd_state = BPF_IDLE;
+ d->bd_traffic_class = SO_TC_BE;
+ d->bd_flags |= BPF_DETACHED;
+ if (bpf_wantpktap)
+ d->bd_flags |= BPF_WANT_PKTAP;
+ else
+ d->bd_flags &= ~BPF_WANT_PKTAP;
+ d->bd_thread_call = thread_call_allocate(bpf_timed_out, d);
+ if (d->bd_thread_call == NULL) {
+ printf("bpfopen: malloc thread call failed\n");
+ bpf_dtab[minor(dev)] = NULL;
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+
+ return (ENOMEM);
+ }
#if CONFIG_MACF_NET
mac_bpfdesc_label_init(d);
mac_bpfdesc_label_associate(kauth_cred_get(), d);
if (d == 0 || d == (void *)1) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
- }
+ }
+
+ /*
+ * Other threads may call bpd_detachd() if we drop the bpf_mlock
+ */
+ d->bd_flags |= BPF_CLOSING;
+
+ if (bpf_debug != 0)
+ printf("%s: %llx\n",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(d));
+
bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */
+ /*
+ * Deal with any in-progress timeouts.
+ */
+ switch (d->bd_state) {
+ case BPF_IDLE:
+ /*
+ * Not waiting for a timeout, and no timeout happened.
+ */
+ break;
+
+ case BPF_WAITING:
+ /*
+ * Waiting for a timeout.
+ * Cancel any timer that has yet to go off,
+ * and mark the state as "closing".
+ * Then drop the lock to allow any timers that
+ * *have* gone off to run to completion, and wait
+ * for them to finish.
+ */
+ if (!bpf_stop_timer(d)) {
+ /*
+ * There was no pending call, so the call must
+ * have been in progress. Wait for the call to
+ * complete; we have to drop the lock while
+ * waiting. to let the in-progrss call complete
+ */
+ d->bd_state = BPF_DRAINING;
+ while (d->bd_state == BPF_DRAINING)
+ msleep((caddr_t)d, bpf_mlock, PRINET,
+ "bpfdraining", NULL);
+ }
+ d->bd_state = BPF_IDLE;
+ break;
+
+ case BPF_TIMED_OUT:
+ /*
+ * Timer went off, and the timeout routine finished.
+ */
+ d->bd_state = BPF_IDLE;
+ break;
+
+ case BPF_DRAINING:
+ /*
+ * Another thread is blocked on a close waiting for
+ * a timeout to finish.
+ * This "shouldn't happen", as the first thread to enter
+ * bpfclose() will set bpf_dtab[minor(dev)] to 1, and
+ * all subsequent threads should see that and fail with
+ * ENXIO.
+ */
+ panic("Two threads blocked in a BPF close");
+ break;
+ }
+
if (d->bd_bif)
- bpf_detachd(d);
+ bpf_detachd(d, 1);
selthreadclear(&d->bd_sel);
#if CONFIG_MACF_NET
mac_bpfdesc_label_destroy(d);
#endif
+ thread_call_free(d->bd_thread_call);
+
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+
bpf_freed(d);
/* Mark free in same context as bpfopen comes to check */
bpf_dtab[minor(dev)] = NULL; /* Mark closed */
+
+ bpf_release_d(d);
+
lck_mtx_unlock(bpf_mlock);
-
- _FREE(d, M_DEVBUF);
-
+
return (0);
}
static int
bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo)
{
- int st;
+ u_int64_t abstime = 0;
- lck_mtx_unlock(bpf_mlock);
-
- st = tsleep((caddr_t)d, pri, wmesg, timo);
+ if(timo)
+ clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
- lck_mtx_lock(bpf_mlock);
-
- return st;
+ return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime);
}
/*
* Zero the length of the new store buffer.
*/
#define ROTATE_BUFFERS(d) \
+ if (d->bd_hbuf_read) \
+ panic("rotating bpf buffers during read"); \
(d)->bd_hbuf = (d)->bd_sbuf; \
(d)->bd_hlen = (d)->bd_slen; \
+ (d)->bd_hcnt = (d)->bd_scnt; \
(d)->bd_sbuf = (d)->bd_fbuf; \
(d)->bd_slen = 0; \
+ (d)->bd_scnt = 0; \
(d)->bd_fbuf = NULL;
/*
* bpfread - read next chunk of packets from buffers
bpfread(dev_t dev, struct uio *uio, int ioflag)
{
struct bpf_d *d;
+ caddr_t hbuf;
+ int timed_out, hbuf_len;
int error;
+ int flags;
lck_mtx_lock(bpf_mlock);
d = bpf_dtab[minor(dev)];
- if (d == 0 || d == (void *)1) {
+ if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
+ bpf_acquire_d(d);
/*
* Restrict application to use a buffer the same size as
* as kernel buffers.
*/
if (uio_resid(uio) != d->bd_bufsize) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (EINVAL);
}
+
+ if (d->bd_state == BPF_WAITING)
+ bpf_stop_timer(d);
+
+ timed_out = (d->bd_state == BPF_TIMED_OUT);
+ d->bd_state = BPF_IDLE;
+
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return (ENXIO);
+ }
/*
* If the hold buffer is empty, then do a timed sleep, which
* ends when the timeout expires or when enough packets
* have arrived to fill the store buffer.
*/
while (d->bd_hbuf == 0) {
- if (d->bd_immediate && d->bd_slen != 0) {
+ if ((d->bd_immediate || timed_out || (ioflag & IO_NDELAY))
+ && d->bd_slen != 0) {
/*
- * A packet(s) either arrived since the previous
+ * We're in immediate mode, or are reading
+ * in non-blocking mode, or a timer was
+ * started before the read (e.g., by select()
+ * or poll()) and has expired and a packet(s)
+ * either arrived since the previous
* read or arrived while we were asleep.
* Rotate the buffers and return what's here.
*/
* it before using it again.
*/
if (d->bd_bif == NULL) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
if (ioflag & IO_NDELAY) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (EWOULDBLOCK);
}
/*
* Make sure device is still opened
*/
- d = bpf_dtab[minor(dev)];
- if (d == 0 || d == (void *)1) {
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return (ENXIO);
+ }
+
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
+
if (error == EINTR || error == ERESTART) {
+ if (d->bd_hbuf) {
+ /*
+ * Because we msleep, the hold buffer might
+ * be filled when we wake up. Avoid rotating
+ * in this case.
+ */
+ break;
+ }
+ if (d->bd_slen) {
+ /*
+ * Sometimes we may be interrupted often and
+ * the sleep above will not timeout.
+ * Regardless, we should rotate the buffers
+ * if there's any new data pending and
+ * return it.
+ */
+ ROTATE_BUFFERS(d);
+ break;
+ }
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (error);
}
break;
if (d->bd_slen == 0) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (0);
}
* At this point, we know we have something in the hold slot.
*/
+ /*
+ * Set the hold buffer read. So we do not
+ * rotate the buffers until the hold buffer
+ * read is complete. Also to avoid issues resulting
+ * from page faults during disk sleep (<rdar://problem/13436396>).
+ */
+ d->bd_hbuf_read = 1;
+ hbuf = d->bd_hbuf;
+ hbuf_len = d->bd_hlen;
+ flags = d->bd_flags;
+ lck_mtx_unlock(bpf_mlock);
+
+#ifdef __APPLE__
+ /*
+ * Before we move data to userland, we fill out the extended
+ * header fields.
+ */
+ if (flags & BPF_EXTENDED_HDR) {
+ char *p;
+
+ p = hbuf;
+ while (p < hbuf + hbuf_len) {
+ struct bpf_hdr_ext *ehp;
+ uint32_t flowid;
+ struct so_procinfo soprocinfo;
+ int found = 0;
+
+ ehp = (struct bpf_hdr_ext *)(void *)p;
+ if ((flowid = ehp->bh_flowid)) {
+ if (ehp->bh_proto == IPPROTO_TCP)
+ found = inp_findinpcb_procinfo(&tcbinfo,
+ flowid, &soprocinfo);
+ else if (ehp->bh_proto == IPPROTO_UDP)
+ found = inp_findinpcb_procinfo(&udbinfo,
+ flowid, &soprocinfo);
+ if (found == 1) {
+ ehp->bh_pid = soprocinfo.spi_pid;
+ proc_name(ehp->bh_pid, ehp->bh_comm, MAXCOMLEN);
+ }
+ ehp->bh_flowid = 0;
+ }
+ if (flags & BPF_FINALIZE_PKTAP) {
+ struct pktap_header *pktaphdr;
+
+ pktaphdr = (struct pktap_header *)(void *)
+ (p + BPF_WORDALIGN(ehp->bh_hdrlen));
+
+ if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP)
+ pktap_finalize_proc_info(pktaphdr);
+
+ if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) {
+ ehp->bh_tstamp.tv_sec =
+ pktaphdr->pth_tstamp.tv_sec;
+ ehp->bh_tstamp.tv_usec =
+ pktaphdr->pth_tstamp.tv_usec;
+ }
+ }
+ p += BPF_WORDALIGN(ehp->bh_hdrlen + ehp->bh_caplen);
+ }
+ } else if (flags & BPF_FINALIZE_PKTAP) {
+ char *p;
+
+ p = hbuf;
+ while (p < hbuf + hbuf_len) {
+ struct bpf_hdr *hp;
+ struct pktap_header *pktaphdr;
+
+ hp = (struct bpf_hdr *)(void *)p;
+ pktaphdr = (struct pktap_header *)(void *)
+ (p + BPF_WORDALIGN(hp->bh_hdrlen));
+
+ if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP)
+ pktap_finalize_proc_info(pktaphdr);
+
+ if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) {
+ hp->bh_tstamp.tv_sec =
+ pktaphdr->pth_tstamp.tv_sec;
+ hp->bh_tstamp.tv_usec =
+ pktaphdr->pth_tstamp.tv_usec;
+ }
+
+ p += BPF_WORDALIGN(hp->bh_hdrlen + hp->bh_caplen);
+ }
+ }
+#endif
+
/*
* Move data from hold buffer into user space.
* We know the entire buffer is transferred since
* we checked above that the read buffer is bpf_bufsize bytes.
*/
- error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
-
+ error = UIOMOVE(hbuf, hbuf_len, UIO_READ, uio);
+
+ lck_mtx_lock(bpf_mlock);
+ /*
+ * Make sure device is still opened
+ */
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return (ENXIO);
+ }
+
+ d->bd_hbuf_read = 0;
d->bd_fbuf = d->bd_hbuf;
d->bd_hbuf = NULL;
d->bd_hlen = 0;
+ d->bd_hcnt = 0;
+ wakeup((caddr_t)d);
+
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (error);
+
}
static void
bpf_wakeup(struct bpf_d *d)
{
+ if (d->bd_state == BPF_WAITING) {
+ bpf_stop_timer(d);
+ d->bd_state = BPF_IDLE;
+ }
wakeup((caddr_t)d);
if (d->bd_async && d->bd_sig && d->bd_sigio)
pgsigio(d->bd_sigio, d->bd_sig);
-#if BSD >= 199103
selwakeup(&d->bd_sel);
- KNOTE(&d->bd_sel.si_note, 1);
-#ifndef __APPLE__
- /* XXX */
- d->bd_sel.si_pid = 0;
-#endif
-#else
- if (d->bd_selproc) {
- selwakeup(d->bd_selproc, (int)d->bd_selcoll);
- d->bd_selcoll = 0;
- d->bd_selproc = 0;
+ if ((d->bd_flags & BPF_KNOTE))
+ KNOTE(&d->bd_sel.si_note, 1);
+}
+
+
+static void
+bpf_timed_out(void *arg, __unused void *dummy)
+{
+ struct bpf_d *d = (struct bpf_d *)arg;
+
+ lck_mtx_lock(bpf_mlock);
+ if (d->bd_state == BPF_WAITING) {
+ /*
+ * There's a select or kqueue waiting for this; if there's
+ * now stuff to read, wake it up.
+ */
+ d->bd_state = BPF_TIMED_OUT;
+ if (d->bd_slen != 0)
+ bpf_wakeup(d);
+ } else if (d->bd_state == BPF_DRAINING) {
+ /*
+ * A close is waiting for this to finish.
+ * Mark it as finished, and wake the close up.
+ */
+ d->bd_state = BPF_IDLE;
+ bpf_wakeup(d);
}
-#endif
+ lck_mtx_unlock(bpf_mlock);
}
+
+
+
+
/* keep in sync with bpf_movein above: */
#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
int error;
char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
int datlen = 0;
+ int bif_dlt;
+ int bd_hdrcmplt;
lck_mtx_lock(bpf_mlock);
d = bpf_dtab[minor(dev)];
- if (d == 0 || d == (void *)1) {
+ if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
+
+ bpf_acquire_d(d);
+
if (d->bd_bif == 0) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
ifp = d->bd_bif->bif_ifp;
+ if ((ifp->if_flags & IFF_UP) == 0) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return (ENETDOWN);
+ }
if (uio_resid(uio) == 0) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (0);
}
((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf);
- error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m,
- d->bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf,
- &datlen);
+
+ /*
+ * fix for PR-6849527
+ * geting variables onto stack before dropping lock for bpf_movein()
+ */
+ bif_dlt = (int)d->bd_bif->bif_dlt;
+ bd_hdrcmplt = d->bd_hdrcmplt;
+
+ /* bpf_movein allocating mbufs; drop lock */
+ lck_mtx_unlock(bpf_mlock);
+
+ error = bpf_movein(uio, bif_dlt, &m,
+ bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf,
+ &datlen);
+
+ /* take the lock again */
+ lck_mtx_lock(bpf_mlock);
if (error) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (error);
}
- if ((unsigned)datlen > ifp->if_mtu) {
+ /* verify the device is still open */
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
m_freem(m);
- return (EMSGSIZE);
+ return (ENXIO);
}
-
- if ((error = ifp_use(ifp, kIfNetUseCount_MustNotBeZero)) != 0) {
+
+ if (d->bd_bif == NULL) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ m_free(m);
+ return (ENXIO);
+ }
+
+ if ((unsigned)datlen > ifp->if_mtu) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
m_freem(m);
- return (error);
+ return (EMSGSIZE);
}
+
#if CONFIG_MACF_NET
mac_mbuf_label_associate_bpfdesc(d, m);
#endif
+
+ bpf_set_packet_service_class(m, d->bd_traffic_class);
+
lck_mtx_unlock(bpf_mlock);
+ /*
+ * The driver frees the mbuf.
+ */
if (d->bd_hdrcmplt) {
if (d->bd_bif->bif_send)
error = d->bd_bif->bif_send(ifp, d->bd_bif->bif_dlt, m);
else
- error = dlil_output(ifp, 0, m, NULL, NULL, 1);
- }
- else {
- error = dlil_output(ifp, PF_INET, m, NULL, (struct sockaddr *)dst_buf, 0);
+ error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL);
+ } else {
+ error = dlil_output(ifp, PF_INET, m, NULL,
+ (struct sockaddr *)dst_buf, 0, NULL);
}
-
- if (ifp_unuse(ifp) != 0)
- ifp_use_reached_zero(ifp);
-
- /*
- * The driver frees the mbuf.
- */
+
+ lck_mtx_lock(bpf_mlock);
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+
return (error);
}
static void
reset_d(struct bpf_d *d)
{
+ if (d->bd_hbuf_read)
+ panic("resetting buffers during read");
+
if (d->bd_hbuf) {
/* Free the hold buffer. */
d->bd_fbuf = d->bd_hbuf;
}
d->bd_slen = 0;
d->bd_hlen = 0;
+ d->bd_scnt = 0;
+ d->bd_hcnt = 0;
d->bd_rcount = 0;
d->bd_dcount = 0;
}
* BIOCSHDRCMPLT Set "header already complete" flag
* BIOCGSEESENT Get "see packets sent" flag
* BIOCSSEESENT Set "see packets sent" flag
+ * BIOCSETTC Set traffic class.
+ * BIOCGETTC Get traffic class.
+ * BIOCSEXTHDR Set "extended header" flag
+ * BIOCSHEADDROP Drop head of the buffer if user is not reading
+ * BIOCGHEADDROP Get "head-drop" flag
*/
/* ARGSUSED */
int
{
struct bpf_d *d;
int error = 0;
+ u_int int_arg;
+ struct ifreq ifr;
lck_mtx_lock(bpf_mlock);
d = bpf_dtab[minor(dev)];
- if (d == 0 || d == (void *)1) {
+ if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
+ bpf_acquire_d(d);
+
+ if (d->bd_state == BPF_WAITING)
+ bpf_stop_timer(d);
+ d->bd_state = BPF_IDLE;
+
switch (cmd) {
default:
/*
* Check for read packet available.
*/
- case FIONREAD:
+ case FIONREAD: /* int */
{
int n;
n = d->bd_slen;
- if (d->bd_hbuf)
+ if (d->bd_hbuf && d->bd_hbuf_read == 0)
n += d->bd_hlen;
- *(int *)addr = n;
+ bcopy(&n, addr, sizeof (n));
break;
}
- case SIOCGIFADDR:
+ case SIOCGIFADDR: /* struct ifreq */
{
struct ifnet *ifp;
/*
* Get buffer len [for read()].
*/
- case BIOCGBLEN:
- *(u_int *)addr = d->bd_bufsize;
+ case BIOCGBLEN: /* u_int */
+ bcopy(&d->bd_bufsize, addr, sizeof (u_int));
break;
/*
* Set buffer length.
*/
- case BIOCSBLEN:
-#if BSD < 199103
- error = EINVAL;
-#else
+ case BIOCSBLEN: /* u_int */
if (d->bd_bif != 0)
error = EINVAL;
else {
- u_int size = *(u_int *)addr;
+ u_int size;
+
+ bcopy(addr, &size, sizeof (size));
if (size > bpf_maxbufsize)
- *(u_int *)addr = size = bpf_maxbufsize;
+ size = bpf_maxbufsize;
else if (size < BPF_MINBUFSIZE)
- *(u_int *)addr = size = BPF_MINBUFSIZE;
+ size = BPF_MINBUFSIZE;
+ bcopy(&size, addr, sizeof (size));
d->bd_bufsize = size;
}
-#endif
break;
/*
* Set link layer read filter.
*/
- case BIOCSETF32: {
- struct bpf_program32 *prg32 = (struct bpf_program32 *)addr;
- error = bpf_setf(d, prg32->bf_len,
- CAST_USER_ADDR_T(prg32->bf_insns));
+ case BIOCSETF32:
+ case BIOCSETFNR32: { /* struct bpf_program32 */
+ struct bpf_program32 prg32;
+
+ bcopy(addr, &prg32, sizeof (prg32));
+ error = bpf_setf(d, prg32.bf_len,
+ CAST_USER_ADDR_T(prg32.bf_insns), cmd);
break;
}
- case BIOCSETF64: {
- struct bpf_program64 *prg64 = (struct bpf_program64 *)addr;
- error = bpf_setf(d, prg64->bf_len, prg64->bf_insns);
+ case BIOCSETF64:
+ case BIOCSETFNR64: { /* struct bpf_program64 */
+ struct bpf_program64 prg64;
+
+ bcopy(addr, &prg64, sizeof (prg64));
+ error = bpf_setf(d, prg64.bf_len, prg64.bf_insns, cmd);
break;
}
* Flush read packet buffer.
*/
case BIOCFLUSH:
+ while (d->bd_hbuf_read) {
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+ }
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ error = ENXIO;
+ break;
+ }
reset_d(d);
break;
/*
* Get device parameters.
*/
- case BIOCGDLT:
+ case BIOCGDLT: /* u_int */
if (d->bd_bif == 0)
error = EINVAL;
else
- *(u_int *)addr = d->bd_bif->bif_dlt;
+ bcopy(&d->bd_bif->bif_dlt, addr, sizeof (u_int));
break;
/*
* Get a list of supported data link types.
*/
- case BIOCGDLTLIST:
+ case BIOCGDLTLIST: /* struct bpf_dltlist */
if (d->bd_bif == NULL) {
error = EINVAL;
} else {
- error = bpf_getdltlist(d,
- (struct bpf_dltlist *)addr, p);
+ error = bpf_getdltlist(d, addr, p);
}
break;
/*
* Set data link type.
*/
- case BIOCSDLT:
- if (d->bd_bif == NULL)
- error = EINVAL;
- else
- error = bpf_setdlt(d, *(u_int *)addr);
- break;
+ case BIOCSDLT: /* u_int */
+ if (d->bd_bif == NULL) {
+ error = EINVAL;
+ } else {
+ u_int dlt;
+
+ bcopy(addr, &dlt, sizeof (dlt));
+ error = bpf_setdlt(d, dlt);
+ }
+ break;
/*
* Get interface name.
*/
- case BIOCGETIF:
+ case BIOCGETIF: /* struct ifreq */
if (d->bd_bif == 0)
error = EINVAL;
else {
struct ifnet *const ifp = d->bd_bif->bif_ifp;
- struct ifreq *const ifr = (struct ifreq *)addr;
- snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
- "%s%d", ifp->if_name, ifp->if_unit);
+ snprintf(((struct ifreq *)(void *)addr)->ifr_name,
+ sizeof (ifr.ifr_name), "%s", if_name(ifp));
}
break;
/*
* Set interface.
*/
- case BIOCSETIF: {
+ case BIOCSETIF: { /* struct ifreq */
ifnet_t ifp;
- ifp = ifunit(((struct ifreq *)addr)->ifr_name);
+
+ bcopy(addr, &ifr, sizeof (ifr));
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
+ ifp = ifunit(ifr.ifr_name);
if (ifp == NULL)
error = ENXIO;
else
/*
* Set read timeout.
*/
- case BIOCSRTIMEOUT:
- {
- struct BPF_TIMEVAL *_tv = (struct BPF_TIMEVAL *)addr;
- struct timeval tv;
+ case BIOCSRTIMEOUT32: { /* struct user32_timeval */
+ struct user32_timeval _tv;
+ struct timeval tv;
- tv.tv_sec = _tv->tv_sec;
- tv.tv_usec = _tv->tv_usec;
+ bcopy(addr, &_tv, sizeof (_tv));
+ tv.tv_sec = _tv.tv_sec;
+ tv.tv_usec = _tv.tv_usec;
- /*
- * Subtract 1 tick from tvtohz() since this isn't
- * a one-shot timer.
- */
- if ((error = itimerfix(&tv)) == 0)
- d->bd_rtout = tvtohz(&tv) - 1;
- break;
- }
+ /*
+ * Subtract 1 tick from tvtohz() since this isn't
+ * a one-shot timer.
+ */
+ if ((error = itimerfix(&tv)) == 0)
+ d->bd_rtout = tvtohz(&tv) - 1;
+ break;
+ }
+
+ case BIOCSRTIMEOUT64: { /* struct user64_timeval */
+ struct user64_timeval _tv;
+ struct timeval tv;
+
+ bcopy(addr, &_tv, sizeof (_tv));
+ tv.tv_sec = _tv.tv_sec;
+ tv.tv_usec = _tv.tv_usec;
+
+ /*
+ * Subtract 1 tick from tvtohz() since this isn't
+ * a one-shot timer.
+ */
+ if ((error = itimerfix(&tv)) == 0)
+ d->bd_rtout = tvtohz(&tv) - 1;
+ break;
+ }
/*
* Get read timeout.
*/
- case BIOCGRTIMEOUT:
- {
- struct BPF_TIMEVAL *tv = (struct BPF_TIMEVAL *)addr;
+ case BIOCGRTIMEOUT32: { /* struct user32_timeval */
+ struct user32_timeval tv;
- tv->tv_sec = d->bd_rtout / hz;
- tv->tv_usec = (d->bd_rtout % hz) * tick;
- break;
- }
+ bzero(&tv, sizeof (tv));
+ tv.tv_sec = d->bd_rtout / hz;
+ tv.tv_usec = (d->bd_rtout % hz) * tick;
+ bcopy(&tv, addr, sizeof (tv));
+ break;
+ }
+
+ case BIOCGRTIMEOUT64: { /* struct user64_timeval */
+ struct user64_timeval tv;
+
+ bzero(&tv, sizeof (tv));
+ tv.tv_sec = d->bd_rtout / hz;
+ tv.tv_usec = (d->bd_rtout % hz) * tick;
+ bcopy(&tv, addr, sizeof (tv));
+ break;
+ }
/*
* Get packet stats.
*/
- case BIOCGSTATS:
- {
- struct bpf_stat *bs = (struct bpf_stat *)addr;
+ case BIOCGSTATS: { /* struct bpf_stat */
+ struct bpf_stat bs;
- bs->bs_recv = d->bd_rcount;
- bs->bs_drop = d->bd_dcount;
- break;
- }
+ bzero(&bs, sizeof (bs));
+ bs.bs_recv = d->bd_rcount;
+ bs.bs_drop = d->bd_dcount;
+ bcopy(&bs, addr, sizeof (bs));
+ break;
+ }
/*
* Set immediate mode.
*/
- case BIOCIMMEDIATE:
- d->bd_immediate = *(u_int *)addr;
+ case BIOCIMMEDIATE: /* u_int */
+ d->bd_immediate = *(u_int *)(void *)addr;
break;
- case BIOCVERSION:
- {
- struct bpf_version *bv = (struct bpf_version *)addr;
+ case BIOCVERSION: { /* struct bpf_version */
+ struct bpf_version bv;
- bv->bv_major = BPF_MAJOR_VERSION;
- bv->bv_minor = BPF_MINOR_VERSION;
- break;
- }
+ bzero(&bv, sizeof (bv));
+ bv.bv_major = BPF_MAJOR_VERSION;
+ bv.bv_minor = BPF_MINOR_VERSION;
+ bcopy(&bv, addr, sizeof (bv));
+ break;
+ }
/*
* Get "header already complete" flag
*/
- case BIOCGHDRCMPLT:
- *(u_int *)addr = d->bd_hdrcmplt;
+ case BIOCGHDRCMPLT: /* u_int */
+ bcopy(&d->bd_hdrcmplt, addr, sizeof (u_int));
break;
/*
* Set "header already complete" flag
*/
- case BIOCSHDRCMPLT:
- d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
+ case BIOCSHDRCMPLT: /* u_int */
+ bcopy(addr, &int_arg, sizeof (int_arg));
+ d->bd_hdrcmplt = int_arg ? 1 : 0;
break;
/*
* Get "see sent packets" flag
*/
- case BIOCGSEESENT:
- *(u_int *)addr = d->bd_seesent;
+ case BIOCGSEESENT: /* u_int */
+ bcopy(&d->bd_seesent, addr, sizeof (u_int));
break;
/*
* Set "see sent packets" flag
*/
- case BIOCSSEESENT:
- d->bd_seesent = *(u_int *)addr;
+ case BIOCSSEESENT: /* u_int */
+ bcopy(addr, &d->bd_seesent, sizeof (u_int));
+ break;
+
+ /*
+ * Set traffic service class
+ */
+ case BIOCSETTC: { /* int */
+ int tc;
+
+ bcopy(addr, &tc, sizeof (int));
+ error = bpf_set_traffic_class(d, tc);
+ break;
+ }
+
+ /*
+ * Get traffic service class
+ */
+ case BIOCGETTC: /* int */
+ bcopy(&d->bd_traffic_class, addr, sizeof (int));
break;
- case FIONBIO: /* Non-blocking I/O */
+ case FIONBIO: /* Non-blocking I/O; int */
break;
- case FIOASYNC: /* Send signal on receive packets */
- d->bd_async = *(int *)addr;
+ case FIOASYNC: /* Send signal on receive packets; int */
+ bcopy(addr, &d->bd_async, sizeof (int));
break;
#ifndef __APPLE__
case FIOSETOWN:
*(int *)addr = -fgetown(d->bd_sigio);
break;
#endif
- case BIOCSRSIG: /* Set receive signal */
- {
- u_int sig;
+ case BIOCSRSIG: { /* Set receive signal; u_int */
+ u_int sig;
- sig = *(u_int *)addr;
+ bcopy(addr, &sig, sizeof (u_int));
- if (sig >= NSIG)
- error = EINVAL;
- else
- d->bd_sig = sig;
+ if (sig >= NSIG)
+ error = EINVAL;
+ else
+ d->bd_sig = sig;
+ break;
+ }
+ case BIOCGRSIG: /* u_int */
+ bcopy(&d->bd_sig, addr, sizeof (u_int));
+ break;
+#ifdef __APPLE__
+ case BIOCSEXTHDR: /* u_int */
+ bcopy(addr, &int_arg, sizeof (int_arg));
+ if (int_arg)
+ d->bd_flags |= BPF_EXTENDED_HDR;
+ else
+ d->bd_flags &= ~BPF_EXTENDED_HDR;
+ break;
+
+ case BIOCGIFATTACHCOUNT: { /* struct ifreq */
+ ifnet_t ifp;
+ struct bpf_if *bp;
+
+ bcopy(addr, &ifr, sizeof (ifr));
+ ifr.ifr_name[IFNAMSIZ - 1] = '\0';
+ ifp = ifunit(ifr.ifr_name);
+ if (ifp == NULL) {
+ error = ENXIO;
break;
}
- case BIOCGRSIG:
- *(u_int *)addr = d->bd_sig;
+ ifr.ifr_intval = 0;
+ for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
+ struct bpf_d *bpf_d;
+
+ if (bp->bif_ifp == NULL || bp->bif_ifp != ifp)
+ continue;
+ for (bpf_d = bp->bif_dlist; bpf_d; bpf_d = bpf_d->bd_next) {
+ ifr.ifr_intval += 1;
+ }
+ }
+ bcopy(&ifr, addr, sizeof (ifr));
break;
}
-
+ case BIOCGWANTPKTAP: /* u_int */
+ int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0;
+ bcopy(&int_arg, addr, sizeof (int_arg));
+ break;
+
+ case BIOCSWANTPKTAP: /* u_int */
+ bcopy(addr, &int_arg, sizeof (int_arg));
+ if (int_arg)
+ d->bd_flags |= BPF_WANT_PKTAP;
+ else
+ d->bd_flags &= ~BPF_WANT_PKTAP;
+ break;
+#endif
+
+ case BIOCSHEADDROP:
+ bcopy(addr, &int_arg, sizeof (int_arg));
+ d->bd_headdrop = int_arg ? 1 : 0;
+ break;
+
+ case BIOCGHEADDROP:
+ bcopy(&d->bd_headdrop, addr, sizeof (int));
+ break;
+ }
+
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (error);
* free it and replace it. Returns EINVAL for bogus requests.
*/
static int
-bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns)
+bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns,
+ u_long cmd)
{
struct bpf_insn *fcode, *old;
u_int flen, size;
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+
+ if ((d->bd_flags & BPF_CLOSING) != 0)
+ return (ENXIO);
+
old = d->bd_filter;
if (bf_insns == USER_ADDR_NULL) {
if (bf_len != 0)
if (copyin(bf_insns, (caddr_t)fcode, size) == 0 &&
bpf_validate(fcode, (int)flen)) {
d->bd_filter = fcode;
- reset_d(d);
+
+ if (cmd == BIOCSETF32 || cmd == BIOCSETF64)
+ reset_d(d);
+
if (old != 0)
FREE((caddr_t)old, M_DEVBUF);
{
struct bpf_if *bp;
int error;
-
+
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+
+ if ((d->bd_flags & BPF_CLOSING) != 0)
+ return (ENXIO);
+
/*
* Look through attached interfaces for the named one.
*/
if (ifp == 0 || ifp != theywant || (dlt != 0 && dlt != bp->bif_dlt))
continue;
+ /*
+ * If the process knows how to deal with DLT_PKTAP, use it
+ * by default
+ */
+ if (dlt == 0 && bp->bif_dlt == DLT_PKTAP &&
+ !(d->bd_flags & BPF_WANT_PKTAP))
+ continue;
/*
* We found the requested interface.
- * If it's not up, return an error.
* Allocate the packet buffers if we need to.
* If we're already attached to requested interface,
* just flush the buffer.
*/
- if ((ifp->if_flags & IFF_UP) == 0)
- return (ENETDOWN);
-
if (d->bd_sbuf == 0) {
error = bpf_allocbufs(d);
if (error != 0)
return (error);
}
if (bp != d->bd_bif) {
- if (d->bd_bif)
/*
* Detach if attached to something else.
*/
- bpf_detachd(d);
-
- if (bpf_attachd(d, bp) != 0) {
- return ENXIO;
+ if (d->bd_bif) {
+ if (bpf_detachd(d, 0) != 0)
+ return (ENXIO);
}
+ if (bpf_attachd(d, bp) != 0)
+ return (ENXIO);
}
reset_d(d);
return (0);
* Get a list of available data link type of the interface.
*/
static int
-bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl, struct proc *p)
+bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p)
{
u_int n;
int error;
struct ifnet *ifp;
struct bpf_if *bp;
user_addr_t dlist;
+ struct bpf_dltlist bfl;
+ bcopy(addr, &bfl, sizeof (bfl));
if (proc_is64bit(p)) {
- dlist = (user_addr_t)bfl->bfl_u.bflu_pad;
+ dlist = (user_addr_t)bfl.bfl_u.bflu_pad;
} else {
- dlist = CAST_USER_ADDR_T(bfl->bfl_u.bflu_list);
+ dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list);
}
ifp = d->bd_bif->bif_ifp;
n = 0;
error = 0;
+
for (bp = bpf_iflist; bp; bp = bp->bif_next) {
if (bp->bif_ifp != ifp)
continue;
+ /*
+ * Return DLT_PKTAP only to processes that know how to handle it
+ */
+ if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP))
+ continue;
if (dlist != USER_ADDR_NULL) {
- if (n >= bfl->bfl_len) {
+ if (n >= bfl.bfl_len) {
return (ENOMEM);
}
error = copyout(&bp->bif_dlt, dlist,
sizeof (bp->bif_dlt));
+ if (error != 0)
+ break;
dlist += sizeof (bp->bif_dlt);
}
n++;
}
- bfl->bfl_len = n;
+ bfl.bfl_len = n;
+ bcopy(&bfl, addr, sizeof (bfl));
+
return (error);
}
*/
static int
bpf_setdlt(struct bpf_d *d, uint32_t dlt)
-
-
{
int error, opromisc;
struct ifnet *ifp;
if (d->bd_bif->bif_dlt == dlt)
return (0);
+
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+
+ if ((d->bd_flags & BPF_CLOSING) != 0)
+ return (ENXIO);
+
ifp = d->bd_bif->bif_ifp;
for (bp = bpf_iflist; bp; bp = bp->bif_next) {
if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
}
if (bp != NULL) {
opromisc = d->bd_promisc;
- bpf_detachd(d);
+ if (bpf_detachd(d, 0) != 0)
+ return (ENXIO);
error = bpf_attachd(d, bp);
if (error) {
printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n",
lck_mtx_unlock(bpf_mlock);
error = ifnet_set_promiscuous(bp->bif_ifp, 1);
lck_mtx_lock(bpf_mlock);
- if (error)
- printf("bpf_setdlt: ifpromisc %s%d failed (%d)\n",
- ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), error);
- else
+ if (error) {
+ printf("%s: ifpromisc %s%d failed (%d)\n",
+ __func__, ifnet_name(bp->bif_ifp),
+ ifnet_unit(bp->bif_ifp), error);
+ } else {
d->bd_promisc = 1;
+ }
}
}
return (bp == NULL ? EINVAL : 0);
}
+static int
+bpf_set_traffic_class(struct bpf_d *d, int tc)
+{
+ int error = 0;
+
+ if (!SO_VALID_TC(tc))
+ error = EINVAL;
+ else
+ d->bd_traffic_class = tc;
+
+ return (error);
+}
+
+static void
+bpf_set_packet_service_class(struct mbuf *m, int tc)
+{
+ if (!(m->m_flags & M_PKTHDR))
+ return;
+
+ VERIFY(SO_VALID_TC(tc));
+ (void) m_set_service_class(m, so_tc2msc(tc));
+}
+
/*
* Support for select()
*
* Otherwise, return false but make a note that a selwakeup() must be done.
*/
int
-bpfpoll(dev_t dev, int events, void * wql, struct proc *p)
+bpfselect(dev_t dev, int which, void * wql, struct proc *p)
{
struct bpf_d *d;
- int revents = 0;
+ int ret = 0;
lck_mtx_lock(bpf_mlock);
d = bpf_dtab[minor(dev)];
- if (d == 0 || d == (void *)1) {
+ if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
- /*
- * An imitation of the FIONREAD ioctl code.
- */
+ bpf_acquire_d(d);
+
if (d->bd_bif == NULL) {
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
- if (events & (POLLIN | POLLRDNORM)) {
- if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
- revents |= events & (POLLIN | POLLRDNORM);
- else
- selrecord(p, &d->bd_sel, wql);
+ while (d->bd_hbuf_read)
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return (ENXIO);
}
+ switch (which) {
+ case FREAD:
+ if (d->bd_hlen != 0 ||
+ ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
+ d->bd_slen != 0))
+ ret = 1; /* read has data to return */
+ else {
+ /*
+ * Read has no data to return.
+ * Make the select wait, and start a timer if
+ * necessary.
+ */
+ selrecord(p, &d->bd_sel, wql);
+ bpf_start_timer(d);
+ }
+ break;
+
+ case FWRITE:
+ ret = 1; /* can't determine whether a write would block */
+ break;
+ }
+
+ bpf_release_d(d);
lck_mtx_unlock(bpf_mlock);
- return (revents);
+
+ return (ret);
}
+
/*
* Support for kevent() system call. Register EVFILT_READ filters and
* reject all others.
lck_mtx_lock(bpf_mlock);
d = bpf_dtab[minor(dev)];
- if (d == 0 || d == (void *)1) {
+ if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
}
- /*
- * An imitation of the FIONREAD ioctl code.
- */
if (d->bd_bif == NULL) {
lck_mtx_unlock(bpf_mlock);
return (ENXIO);
kn->kn_hook = d;
kn->kn_fop = &bpfread_filtops;
KNOTE_ATTACH(&d->bd_sel.si_note, kn);
+ d->bd_flags |= BPF_KNOTE;
+
lck_mtx_unlock(bpf_mlock);
- return 0;
+ return (0);
}
static void
struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
lck_mtx_lock(bpf_mlock);
- KNOTE_DETACH(&d->bd_sel.si_note, kn);
+ if (d->bd_flags & BPF_KNOTE) {
+ KNOTE_DETACH(&d->bd_sel.si_note, kn);
+ d->bd_flags &= ~BPF_KNOTE;
+ }
lck_mtx_unlock(bpf_mlock);
}
if (hint == 0)
lck_mtx_lock(bpf_mlock);
-
+
if (d->bd_immediate) {
- kn->kn_data = (d->bd_hlen == 0 ? d->bd_slen : d->bd_hlen);
- ready = (kn->kn_data >= ((kn->kn_sfflags & NOTE_LOWAT) ?
- kn->kn_sdata : 1));
+ /*
+ * If there's data in the hold buffer, it's the
+ * amount of data a read will return.
+ *
+ * If there's no data in the hold buffer, but
+ * there's data in the store buffer, a read will
+ * immediately rotate the store buffer to the
+ * hold buffer, the amount of data in the store
+ * buffer is the amount of data a read will
+ * return.
+ *
+ * If there's no data in either buffer, we're not
+ * ready to read.
+ */
+ kn->kn_data = ((d->bd_hlen == 0 || d->bd_hbuf_read)
+ ? d->bd_slen : d->bd_hlen);
+ int64_t lowwat = 1;
+ if (kn->kn_sfflags & NOTE_LOWAT)
+ {
+ if (kn->kn_sdata > d->bd_bufsize)
+ lowwat = d->bd_bufsize;
+ else if (kn->kn_sdata > lowwat)
+ lowwat = kn->kn_sdata;
+ }
+ ready = (kn->kn_data >= lowwat);
} else {
- kn->kn_data = d->bd_hlen;
+ /*
+ * If there's data in the hold buffer, it's the
+ * amount of data a read will return.
+ *
+ * If there's no data in the hold buffer, but
+ * there's data in the store buffer, if the
+ * timer has expired a read will immediately
+ * rotate the store buffer to the hold buffer,
+ * so the amount of data in the store buffer is
+ * the amount of data a read will return.
+ *
+ * If there's no data in either buffer, or there's
+ * no data in the hold buffer and the timer hasn't
+ * expired, we're not ready to read.
+ */
+ kn->kn_data = ((d->bd_hlen == 0 || d->bd_hbuf_read) && d->bd_state == BPF_TIMED_OUT ?
+ d->bd_slen : d->bd_hlen);
ready = (kn->kn_data > 0);
}
+ if (!ready)
+ bpf_start_timer(d);
if (hint == 0)
lck_mtx_unlock(bpf_mlock);
return (ready);
}
-static inline void*
-_cast_non_const(const void * ptr) {
- union {
- const void* cval;
- void* val;
- } ret;
-
- ret.cval = ptr;
- return (ret.val);
-}
-
/*
* Copy data from an mbuf chain into a buffer. This code is derived
* from m_copydata in sys/uipc_mbuf.c.
static void
bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
{
- struct mbuf *m = _cast_non_const(src_arg);
+ struct mbuf *m = (struct mbuf *)(uintptr_t)(src_arg);
u_int count;
u_char *dst;
mbuf_t m,
void* hdr,
size_t hlen,
- int outbound)
+ int outbound)
{
struct bpf_if *bp;
+ struct mbuf *savedm = m;
/*
* It's possible that we get here after the bpf descriptor has been
hack_hdr.mh_type = m->m_type;
hack_hdr.mh_flags = 0;
- m = (mbuf_t)&hack_hdr;
+ __IGNORE_WCASTALIGN(m = (mbuf_t)&hack_hdr);
}
for (m0 = m; m0 != 0; m0 = m0->m_next)
if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0)
continue;
#endif
- catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
+ catchpacket(d, (u_char *)m, savedm, pktlen,
+ slen, outbound, bpf_mcopy);
}
}
}
* pkt is really an mbuf.
*/
static void
-catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
+catchpacket(struct bpf_d *d, u_char *pkt, struct mbuf *m, u_int pktlen,
+ u_int snaplen, int outbound,
void (*cpfn)(const void *, void *, size_t))
{
struct bpf_hdr *hp;
+ struct bpf_hdr_ext *ehp;
int totlen, curlen;
- int hdrlen = d->bd_bif->bif_hdrlen;
+ int hdrlen, caplen;
+ int do_wakeup = 0;
+ u_char *payload;
+ struct timeval tv;
+ struct m_tag *mt = NULL;
+ struct bpf_mtag *bt = NULL;
+
+ hdrlen = (d->bd_flags & BPF_EXTENDED_HDR) ? d->bd_bif->bif_exthdrlen :
+ d->bd_bif->bif_hdrlen;
/*
* Figure out how many bytes to move. If the packet is
* greater or equal to the snapshot length, transfer that
* Rotate the buffers if we can, then wakeup any
* pending reads.
*/
- if (d->bd_fbuf == 0) {
+ if (d->bd_fbuf == NULL) {
+ if (d->bd_headdrop == 0) {
+ /*
+ * We haven't completed the previous read yet,
+ * so drop the packet.
+ */
+ ++d->bd_dcount;
+ return;
+ }
/*
- * We haven't completed the previous read yet,
- * so drop the packet.
+ * Drop the hold buffer as it contains older packets
*/
- ++d->bd_dcount;
- return;
+ d->bd_dcount += d->bd_hcnt;
+ d->bd_fbuf = d->bd_hbuf;
+ ROTATE_BUFFERS(d);
+ } else {
+ ROTATE_BUFFERS(d);
}
- ROTATE_BUFFERS(d);
- bpf_wakeup(d);
+ do_wakeup = 1;
curlen = 0;
}
- else if (d->bd_immediate)
+ else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
/*
- * Immediate mode is set. A packet arrived so any
- * reads should be woken up.
+ * Immediate mode is set, or the read timeout has
+ * already expired during a select call. A packet
+ * arrived, so the reader should be woken up.
*/
- bpf_wakeup(d);
+ do_wakeup = 1;
/*
* Append the bpf header.
*/
- hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
- struct timeval tv;
microtime(&tv);
- hp->bh_tstamp.tv_sec = tv.tv_sec;
- hp->bh_tstamp.tv_usec = tv.tv_usec;
- hp->bh_datalen = pktlen;
- hp->bh_hdrlen = hdrlen;
+ if (d->bd_flags & BPF_EXTENDED_HDR) {
+ ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen);
+ memset(ehp, 0, sizeof(*ehp));
+ ehp->bh_tstamp.tv_sec = tv.tv_sec;
+ ehp->bh_tstamp.tv_usec = tv.tv_usec;
+ ehp->bh_datalen = pktlen;
+ ehp->bh_hdrlen = hdrlen;
+ ehp->bh_caplen = totlen - hdrlen;
+ mt = m_tag_locate(m, bpf_mtag_id, 0, NULL);
+ if (mt && mt->m_tag_len >= sizeof(*bt)) {
+ bt = (struct bpf_mtag *)(mt + 1);
+ ehp->bh_pid = bt->bt_pid;
+ strlcpy(ehp->bh_comm, bt->bt_comm,
+ sizeof(ehp->bh_comm));
+ ehp->bh_svc = so_svc2tc(bt->bt_svc);
+ if (bt->bt_direction == BPF_MTAG_DIR_OUT)
+ ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT;
+ else
+ ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN;
+ m_tag_delete(m, mt);
+ } else if (outbound) {
+ /* only do lookups on non-raw INPCB */
+ if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID|
+ PKTF_FLOW_LOCALSRC|PKTF_FLOW_RAWSOCK)) ==
+ (PKTF_FLOW_ID|PKTF_FLOW_LOCALSRC) &&
+ m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) {
+ ehp->bh_flowid = m->m_pkthdr.pkt_flowid;
+ ehp->bh_proto = m->m_pkthdr.pkt_proto;
+ }
+ ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc);
+ ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT;
+ } else
+ ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN;
+ payload = (u_char *)ehp + hdrlen;
+ caplen = ehp->bh_caplen;
+ } else {
+ hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen);
+ hp->bh_tstamp.tv_sec = tv.tv_sec;
+ hp->bh_tstamp.tv_usec = tv.tv_usec;
+ hp->bh_datalen = pktlen;
+ hp->bh_hdrlen = hdrlen;
+ hp->bh_caplen = totlen - hdrlen;
+ payload = (u_char *)hp + hdrlen;
+ caplen = hp->bh_caplen;
+ }
/*
* Copy the packet data into the store buffer and update its length.
*/
- (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
+ (*cpfn)(pkt, payload, caplen);
d->bd_slen = curlen + totlen;
+ d->bd_scnt += 1;
+
+ if (do_wakeup)
+ bpf_wakeup(d);
}
/*
}
d->bd_slen = 0;
d->bd_hlen = 0;
+ d->bd_scnt = 0;
+ d->bd_hcnt = 0;
return (0);
}
* been detached from its interface and it yet hasn't been marked
* free.
*/
+ if (d->bd_hbuf_read)
+ panic("bpf buffer freed during read");
+
if (d->bd_sbuf != 0) {
FREE(d->bd_sbuf, M_DEVBUF);
- if (d->bd_hbuf != 0)
+ if (d->bd_hbuf != 0)
FREE(d->bd_hbuf, M_DEVBUF);
if (d->bd_fbuf != 0)
FREE(d->bd_fbuf, M_DEVBUF);
struct bpf_if *bp_temp;
struct bpf_if *bp_first = NULL;
- bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF, M_WAIT);
+ bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF,
+ M_WAIT | M_ZERO);
if (bp_new == 0)
panic("bpfattach");
}
if (bp_temp != NULL) {
- printf("bpfattach - %s%d with dlt %d is already attached\n",
- ifp->if_name, ifp->if_unit, dlt);
+ printf("bpfattach - %s with dlt %d is already attached\n",
+ if_name(ifp), dlt);
FREE(bp_new, M_DEVBUF);
lck_mtx_unlock(bpf_mlock);
return EEXIST;
}
- bzero(bp_new, sizeof(*bp_new));
bp_new->bif_ifp = ifp;
bp_new->bif_dlt = dlt;
bp_new->bif_send = send;
* performance reasons and to alleviate alignment restrictions).
*/
bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
+ bp_new->bif_exthdrlen = BPF_WORDALIGN(hdrlen +
+ sizeof(struct bpf_hdr_ext)) - hdrlen;
/* Take a reference on the interface */
ifnet_reference(ifp);
#ifndef __APPLE__
if (bootverbose)
- printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
+ printf("bpf: %s attached\n", if_name(ifp));
#endif
return 0;
bpfdetach(struct ifnet *ifp)
{
struct bpf_if *bp, *bp_prev, *bp_next;
- struct bpf_if *bp_free = NULL;
struct bpf_d *d;
-
+ if (bpf_debug != 0)
+ printf("%s: %s\n",
+ __func__, if_name(ifp));
+
lck_mtx_lock(bpf_mlock);
- /* Locate BPF interface information */
+ /*
+ * Build the list of devices attached to that interface
+ * that we need to free while keeping the lock to maintain
+ * the integrity of the interface list
+ */
bp_prev = NULL;
for (bp = bpf_iflist; bp != NULL; bp = bp_next) {
bp_next = bp->bif_next;
+
if (ifp != bp->bif_ifp) {
bp_prev = bp;
continue;
}
-
- while ((d = bp->bif_dlist) != NULL) {
- bpf_detachd(d);
- bpf_wakeup(d);
- }
-
- if (bp_prev) {
+ /* Unlink from the interface list */
+ if (bp_prev)
bp_prev->bif_next = bp->bif_next;
- } else {
+ else
bpf_iflist = bp->bif_next;
+
+ /* Detach the devices attached to the interface */
+ while ((d = bp->bif_dlist) != NULL) {
+ /*
+ * Take an extra reference to prevent the device
+ * from being freed when bpf_detachd() releases
+ * the reference for the interface list
+ */
+ bpf_acquire_d(d);
+ bpf_detachd(d, 0);
+ bpf_wakeup(d);
+ bpf_release_d(d);
}
-
- bp->bif_next = bp_free;
- bp_free = bp;
-
ifnet_release(ifp);
}
lck_mtx_unlock(bpf_mlock);
-
- FREE(bp, M_DEVBUF);
-
}
void
if (bpf_devsw_installed == 0) {
bpf_devsw_installed = 1;
-
- bpf_mlock_grp_attr = lck_grp_attr_alloc_init();
-
- bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);
-
- bpf_mlock_attr = lck_attr_alloc_init();
-
- bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);
-
- if (bpf_mlock == 0) {
- printf("bpf_init: failed to allocate bpf_mlock\n");
- bpf_devsw_installed = 0;
- return;
- }
-
+ bpf_mlock_grp_attr = lck_grp_attr_alloc_init();
+ bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);
+ bpf_mlock_attr = lck_attr_alloc_init();
+ lck_mtx_init(bpf_mlock, bpf_mlock_grp, bpf_mlock_attr);
maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
if (maj == -1) {
- if (bpf_mlock)
- lck_mtx_free(bpf_mlock, bpf_mlock_grp);
if (bpf_mlock_attr)
lck_attr_free(bpf_mlock_attr);
if (bpf_mlock_grp)
for (i = 0 ; i < NBPFILTER; i++)
bpf_make_dev_t(maj);
+
+ VERIFY(mbuf_tag_id_find(BPF_CONTROL_NAME, &bpf_mtag_id) == 0);
}
#else
cdevsw_add(&bpf_cdevsw);