X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..5eebf7385fedb1517b66b53c28e5aa6bb0a2be50:/bsd/net/bpf.c?ds=sidebyside diff --git a/bsd/net/bpf.c b/bsd/net/bpf.c index 6363b4fb9..1da778677 100644 --- a/bsd/net/bpf.c +++ b/bsd/net/bpf.c @@ -58,11 +58,10 @@ * * @(#)bpf.c 8.2 (Berkeley) 3/28/94 * + * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $ */ -#include "bpfilter.h" - -#if NBPFILTER > 0 +#include "bpf.h" #ifndef __GNUC__ #define inline @@ -77,17 +76,17 @@ #include #include #include - - -#include - - #include #include #include #include #include +#if defined(sparc) && BSD < 199103 +#include +#endif +#include + #include #include @@ -99,18 +98,21 @@ #include #include #include +#include - +#include #include #include +#if NBPFILTER > 0 + /* * Older BSDs don't have kernel malloc. */ #if BSD < 199103 extern bcopy(); static caddr_t bpf_alloc(); - +#include #define BPF_BUFSIZE (MCLBYTES-8) #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) #else @@ -118,33 +120,48 @@ static caddr_t bpf_alloc(); #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) #endif + #define PRINET 26 /* interruptible */ /* * The default read buffer size is patchable. */ static int bpf_bufsize = BPF_BUFSIZE; - - - SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, &bpf_bufsize, 0, ""); - +static int bpf_maxbufsize = BPF_MAXBUFSIZE; +SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW, + &bpf_maxbufsize, 0, ""); /* * bpf_iflist is the list of interfaces; each corresponds to an ifnet - * bpf_dtab holds the descriptors, indexed by minor device # + * bpf_dtab holds pointer to the descriptors, indexed by minor device # */ static struct bpf_if *bpf_iflist; -static struct bpf_d bpf_dtab[NBPFILTER]; -static int bpf_dtab_init; -static int nbpfilter = NBPFILTER; +#ifdef __APPLE__ +/* + * BSD now stores the bpf_d in the dev_t which is a struct + * on their system. Our dev_t is an int, so we still store + * the bpf_d in a separate table indexed by minor device #. + */ +static struct bpf_d **bpf_dtab = NULL; +static int bpf_dtab_size = 0; +static int nbpfilter = 0; + +/* + * Mark a descriptor free by making it point to itself. + * This is probably cheaper than marking with a constant since + * the address should be in a register anyway. + */ +#define D_ISFREE(d) ((d) == (d)->bd_next) +#define D_MARKFREE(d) ((d)->bd_next = (d)) +#define D_MARKUSED(d) ((d)->bd_next = 0) +#endif /* __APPLE__ */ static int bpf_allocbufs __P((struct bpf_d *)); static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); static void bpf_detachd __P((struct bpf_d *d)); static void bpf_freed __P((struct bpf_d *)); -static void bpf_ifname __P((struct ifnet *, struct ifreq *)); static void bpf_mcopy __P((const void *, void *, size_t)); static int bpf_movein __P((struct uio *, int, struct mbuf **, struct sockaddr *, int *)); @@ -156,27 +173,52 @@ static void catchpacket __P((struct bpf_d *, u_char *, u_int, static void reset_d __P((struct bpf_d *)); static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); +/*static void *bpf_devfs_token[MAXBPFILTER];*/ + +static int bpf_devsw_installed; + +void bpf_init __P((void *unused)); + + +/* + * Darwin differs from BSD here, the following are static + * on BSD and not static on Darwin. + */ d_open_t bpfopen; d_close_t bpfclose; d_read_t bpfread; d_write_t bpfwrite; d_ioctl_t bpfioctl; + select_fcn_t bpfpoll; - -#define BPF_MAJOR 7 - +#ifdef __APPLE__ void bpf_mtap(struct ifnet *, struct mbuf *); int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(), bpfpoll(); +#endif - +/* Darwin's cdevsw struct differs slightly from BSDs */ +#define CDEV_MAJOR 23 static struct cdevsw bpf_cdevsw = { - bpfopen, bpfclose, bpfread, bpfwrite, - bpfioctl, nulldev, nulldev, NULL, bpfpoll, - eno_mmap, eno_strat, eno_getc, eno_putc, 0 + /* open */ bpfopen, + /* close */ bpfclose, + /* read */ bpfread, + /* write */ bpfwrite, + /* ioctl */ bpfioctl, + /* stop */ nulldev, + /* reset */ nulldev, + /* tty */ NULL, + /* select */ bpfpoll, + /* mmap */ eno_mmap, + /* strategy*/ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* type */ 0 }; +#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) + static int bpf_movein(uio, linktype, mp, sockp, datlen) register struct uio *uio; @@ -239,11 +281,22 @@ bpf_movein(uio, linktype, mp, sockp, datlen) hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ break; #endif + case DLT_PPP: + sockp->sa_family = AF_UNSPEC; + hlen = 4; /* This should match PPP_HDRLEN */ + break; + + case DLT_APPLE_IP_OVER_IEEE1394: + sockp->sa_family = AF_UNSPEC; + hlen = sizeof(struct firewire_header); + break; default: return (EIO); } - + if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) { + return (EIO); + } len = uio->uio_resid; *datlen = len - hlen; if ((unsigned)len > MCLBYTES) @@ -290,6 +343,8 @@ bpf_movein(uio, linktype, mp, sockp, datlen) return (error); } +#ifdef __APPLE__ +/* Callback registered with Ethernet driver. */ int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) { boolean_t funnel_state; @@ -309,6 +364,62 @@ int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) return 0; } +/* + * Returns 1 on sucess, 0 on failure + */ +static int +bpf_dtab_grow(int increment) +{ + struct bpf_d **new_dtab = NULL; + + new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * (bpf_dtab_size + increment), M_DEVBUF, M_WAIT); + if (new_dtab == NULL) + return 0; + + if (bpf_dtab) { + struct bpf_d **old_dtab; + + bcopy(bpf_dtab, new_dtab, sizeof(struct bpf_d *) * bpf_dtab_size); + /* + * replace must be atomic with respect to free do bpf_dtab + * is always valid. + */ + old_dtab = bpf_dtab; + bpf_dtab = new_dtab; + _FREE(old_dtab, M_DEVBUF); + } + else bpf_dtab = new_dtab; + + bzero(bpf_dtab + bpf_dtab_size, sizeof(struct bpf_d *) * increment); + + bpf_dtab_size += increment; + + return 1; +} + +static struct bpf_d * +bpf_make_dev_t(int maj) +{ + struct bpf_d *d; + + if (nbpfilter >= bpf_dtab_size && bpf_dtab_grow(NBPFILTER) == 0) + return NULL; + + d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT); + if (d != NULL) { + int i = nbpfilter++; + + bzero(d, sizeof(struct bpf_d)); + bpf_dtab[i] = d; + D_MARKFREE(bpf_dtab[i]); + /*bpf_devfs_token[i] = */devfs_make_node(makedev(maj, i), + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, + "bpf%d", i); + } + return d; +} + +#endif /* * Attach file to the bpf interface, i.e. make d listen on bp. @@ -319,8 +430,6 @@ bpf_attachd(d, bp) struct bpf_d *d; struct bpf_if *bp; { - struct ifnet *ifp; - /* * Point d at bp, and add d to the interface's list of listeners. * Finally, point the driver's bpf cookie at the interface so @@ -331,10 +440,11 @@ bpf_attachd(d, bp) bp->bif_dlist = d; bp->bif_ifp->if_bpf = bp; - ifp = bp->bif_ifp; - if (ifp->if_set_bpf_tap) - (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); +#ifdef __APPLE__ + if (bp->bif_ifp->if_set_bpf_tap) + (*bp->bif_ifp->if_set_bpf_tap)(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); +#endif } /* @@ -346,11 +456,12 @@ bpf_detachd(d) { struct bpf_d **p; struct bpf_if *bp; +#ifdef __APPLE__ struct ifnet *ifp; ifp = d->bd_bif->bif_ifp; - if (ifp->if_set_bpf_tap) - (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); + +#endif bp = d->bd_bif; /* @@ -364,8 +475,9 @@ bpf_detachd(d) * Something is really wrong if we were able to put * the driver into promiscuous mode, but can't * take it out. + * Most likely the network interface is gone. */ - panic("bpf: ifpromisc failed"); + printf("bpf: ifpromisc failed"); } /* Remove d from the interface's descriptor list. */ p = &bp->bif_dlist; @@ -375,24 +487,18 @@ bpf_detachd(d) panic("bpf_detachd: descriptor not in list"); } *p = (*p)->bd_next; - if (bp->bif_dlist == 0) + if (bp->bif_dlist == 0) { /* * Let the driver know that there are no more listeners. */ + if (ifp->if_set_bpf_tap) + (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); d->bd_bif->bif_ifp->if_bpf = 0; + } d->bd_bif = 0; } -/* - * Mark a descriptor free by making it point to itself. - * This is probably cheaper than marking with a constant since - * the address should be in a register anyway. - */ -#define D_ISFREE(d) ((d) == (d)->bd_next) -#define D_MARKFREE(d) ((d)->bd_next = (d)) -#define D_MARKUSED(d) ((d)->bd_next = 0) - /* * Open ethernet device. Returns ENXIO for illegal minor device number, * EBUSY if file is open by another process. @@ -407,25 +513,51 @@ bpfopen(dev, flags, fmt, p) { register struct bpf_d *d; +#ifdef __APPLE__ + /* new device nodes on demand when opening the last one */ + if (minor(dev) == nbpfilter - 1) + bpf_make_dev_t(major(dev)); + if (minor(dev) >= nbpfilter) return (ENXIO); + + d = bpf_dtab[minor(dev)]; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); +#else + if (p->p_prison) + return (EPERM); + + d = dev->si_drv1; +#endif /* - * Each minor can be opened by only one process. If the requested + * Each minor can be opened by only one process. If the requested * minor is in use, return EBUSY. */ - d = &bpf_dtab[minor(dev)]; +#ifdef __APPLE__ if (!D_ISFREE(d)) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (EBUSY); } - + /* Mark "free" and do most initialization. */ bzero((char *)d, sizeof(*d)); +#else + if (d) + return (EBUSY); + make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev)); + MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK); + bzero(d, sizeof(*d)); + dev->si_drv1 = d; +#endif d->bd_bufsize = bpf_bufsize; d->bd_sig = SIGIO; + d->bd_seesent = 1; + +#ifdef __APPLE__ thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +#endif + return (0); } @@ -443,14 +575,39 @@ bpfclose(dev, flags, fmt, p) { register struct bpf_d *d; register int s; +#ifdef __APPLE__ + struct bpf_d **bpf_dtab_schk; +#endif +#ifndef __APPLE__ + funsetown(d->bd_sigio); +#endif + s = splimp(); +#ifdef __APPLE__ +again: + d = bpf_dtab[minor(dev)]; + bpf_dtab_schk = bpf_dtab; +#endif thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - s = splimp(); - d = &bpf_dtab[minor(dev)]; +#ifdef __APPLE__ + /* + * If someone grows bpf_dtab[] while we were waiting for the + * funnel, then we will be pointing off into freed memory; + * check to see if this is the case. + */ + if (bpf_dtab_schk != bpf_dtab) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto again; + } +#endif + if (d->bd_bif) bpf_detachd(d); splx(s); +#ifdef __APPLE__ + selthreadclear(&d->bd_sel); +#endif bpf_freed(d); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (0); @@ -466,8 +623,6 @@ bpf_timeout(arg) { boolean_t funnel_state; struct bpf_d *d = (struct bpf_d *)arg; - - funnel_state = thread_funnel_set(network_flock, TRUE); d->bd_timedout = 1; wakeup(arg); @@ -524,17 +679,16 @@ bpfread(dev, uio, ioflag) int error; int s; - + d = bpf_dtab[minor(dev)]; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; /* * Restrict application to use a buffer the same size as * as kernel buffers. */ if (uio->uio_resid != d->bd_bufsize) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (EINVAL); } @@ -554,6 +708,19 @@ bpfread(dev, uio, ioflag) ROTATE_BUFFERS(d); break; } + + /* + * No data is available, check to see if the bpf device + * is still pointed at a real interface. If not, return + * ENXIO so that the userland process knows to rebind + * it before using it again. + */ + if (d->bd_bif == NULL) { + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (ENXIO); + } + if (ioflag & IO_NDELAY) error = EWOULDBLOCK; else @@ -621,22 +788,23 @@ bpf_wakeup(d) pgsigio(d->bd_sigio, d->bd_sig, 0); #if BSD >= 199103 - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); selwakeup(&d->bd_sel); - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); +#ifndef __APPLE__ /* XXX */ - d->bd_sel.si_thread = 0; + d->bd_sel.si_pid = 0; +#endif #else if (d->bd_selproc) { - thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); selwakeup(d->bd_selproc, (int)d->bd_selcoll); - thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); d->bd_selcoll = 0; d->bd_selproc = 0; } #endif } +/* keep in sync with bpf_movein above: */ +#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) + int bpfwrite(dev, uio, ioflag) dev_t dev; @@ -644,17 +812,16 @@ bpfwrite(dev, uio, ioflag) int ioflag; { register struct bpf_d *d; - struct ifnet *ifp; struct mbuf *m; int error, s; - static struct sockaddr dst; + char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; int datlen; - + d = bpf_dtab[minor(dev)]; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; + if (d->bd_bif == 0) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (ENXIO); @@ -666,8 +833,9 @@ bpfwrite(dev, uio, ioflag) thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (0); } - - error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); + ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf); + error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, + (struct sockaddr *)dst_buf, &datlen); if (error) { thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (error); @@ -678,18 +846,17 @@ bpfwrite(dev, uio, ioflag) return (EMSGSIZE); } - s = splnet(); + if (d->bd_hdrcmplt) { + ((struct sockaddr *)dst_buf)->sa_family = pseudo_AF_HDRCMPLT; + } - error = dlil_output((u_long) ifp, m, - (caddr_t) 0, &dst, 0); + s = splnet(); - /* - error = dlil_inject_if_output(m, DLIL_NULL_FILTER); - */ + error = dlil_output(ifptodlt(ifp, PF_INET), m, + (caddr_t) 0, (struct sockaddr *)dst_buf, 0); splx(s); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); - /* * The driver frees the mbuf. */ @@ -730,9 +897,13 @@ reset_d(d) * BIOCGSTATS Get packet stats. * BIOCIMMEDIATE Set immediate mode. * BIOCVERSION Get filter language version. + * BIOCGHDRCMPLT Get "header already complete" flag + * BIOCSHDRCMPLT Set "header already complete" flag + * BIOCGSEESENT Get "see packets sent" flag + * BIOCSSEESENT Set "see packets sent" flag */ /* ARGSUSED */ - int +int bpfioctl(dev, cmd, addr, flags, p) dev_t dev; u_long cmd; @@ -743,9 +914,9 @@ bpfioctl(dev, cmd, addr, flags, p) register struct bpf_d *d; int s, error = 0; + d = bpf_dtab[minor(dev)]; thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - d = &bpf_dtab[minor(dev)]; switch (cmd) { @@ -802,8 +973,8 @@ bpfioctl(dev, cmd, addr, flags, p) else { register u_int size = *(u_int *)addr; - if (size > BPF_MAXBUFSIZE) - *(u_int *)addr = size = BPF_MAXBUFSIZE; + if (size > bpf_maxbufsize) + *(u_int *)addr = size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) *(u_int *)addr = size = BPF_MINBUFSIZE; d->bd_bufsize = size; @@ -858,13 +1029,18 @@ bpfioctl(dev, cmd, addr, flags, p) break; /* - * Set interface name. + * Get interface name. */ case BIOCGETIF: if (d->bd_bif == 0) error = EINVAL; - else - bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); + else { + struct ifnet *const ifp = d->bd_bif->bif_ifp; + struct ifreq *const ifr = (struct ifreq *)addr; + + snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), + "%s%d", ifp->if_name, ifp->if_unit); + } break; /* @@ -930,13 +1106,41 @@ bpfioctl(dev, cmd, addr, flags, p) break; } + /* + * Get "header already complete" flag + */ + case BIOCGHDRCMPLT: + *(u_int *)addr = d->bd_hdrcmplt; + break; + + /* + * Set "header already complete" flag + */ + case BIOCSHDRCMPLT: + d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; + break; + + /* + * Get "see sent packets" flag + */ + case BIOCGSEESENT: + *(u_int *)addr = d->bd_seesent; + break; + + /* + * Set "see sent packets" flag + */ + case BIOCSSEESENT: + d->bd_seesent = *(u_int *)addr; + break; + case FIONBIO: /* Non-blocking I/O */ break; case FIOASYNC: /* Send signal on receive packets */ d->bd_async = *(int *)addr; break; -#if ISFB31 +#ifndef __APPLE__ case FIOSETOWN: error = fsetown(*(int *)addr, &d->bd_sigio); break; @@ -1006,6 +1210,10 @@ bpf_setf(d, fp) size = flen * sizeof(*fp->bf_insns); fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); +#ifdef __APPLE__ + if (fcode == NULL) + return (ENOBUFS); +#endif if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && bpf_validate(fcode, (int)flen)) { s = splimp(); @@ -1080,28 +1288,6 @@ bpf_setif(d, ifr) return (ENXIO); } -/* - * Convert an interface name plus unit number of an ifp to a single - * name which is returned in the ifr. - */ -static void -bpf_ifname(ifp, ifr) - struct ifnet *ifp; - struct ifreq *ifr; -{ - char *s = ifp->if_name; - char *d = ifr->ifr_name; - - while (*d++ = *s++) - continue; - d--; /* back to the null */ - /* XXX Assume that unit number is less than 10. */ - *d++ = ifp->if_unit + '0'; - *d = '\0'; -} - - - /* * Support for select() and poll() system calls * @@ -1109,28 +1295,34 @@ bpf_ifname(ifp, ifr) * Otherwise, return false but make a note that a selwakeup() must be done. */ int -bpfpoll(dev, events, p) +bpfpoll(dev, events, wql, p) register dev_t dev; int events; + void * wql; struct proc *p; { register struct bpf_d *d; register int s; int revents = 0; + d = bpf_dtab[minor(dev)]; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); /* * An imitation of the FIONREAD ioctl code. */ - d = &bpf_dtab[minor(dev)]; + if (d->bd_bif == NULL) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (ENXIO); + } s = splimp(); - if (events & (POLLIN | POLLRDNORM)) + if (events & (POLLIN | POLLRDNORM)) { if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) revents |= events & (POLLIN | POLLRDNORM); else - selrecord(p, &d->bd_sel); - + selrecord(p, &d->bd_sel, wql); + } splx(s); thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); return (revents); @@ -1157,15 +1349,20 @@ bpf_tap(ifp, pkt, pktlen) * interfaces shared any data. This is not the case. */ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); - if ((bp = ifp->if_bpf)) { - for (d = bp->bif_dlist; d != 0; d = d->bd_next) { - ++d->bd_rcount; - slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); - if (slen != 0) - catchpacket(d, pkt, pktlen, slen, bcopy); - } + bp = ifp->if_bpf; +#ifdef __APPLE__ + if (bp) { +#endif + for (d = bp->bif_dlist; d != 0; d = d->bd_next) { + ++d->bd_rcount; + slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); + if (slen != 0) + catchpacket(d, pkt, pktlen, slen, bcopy); + } +#ifdef __APPLE__ } thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); +#endif } /* @@ -1188,7 +1385,7 @@ bpf_mcopy(src_arg, dst_arg, len) if (m == 0) panic("bpf_mcopy"); count = min(m->m_len, len); - bcopy(mtod(m, void *), dst, count); + bcopy(mtod((struct mbuf *)m, void *), dst, count); m = m->m_next; dst += count; len -= count; @@ -1213,6 +1410,8 @@ bpf_mtap(ifp, m) pktlen += m0->m_len; for (d = bp->bif_dlist; d != 0; d = d->bd_next) { + if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) + continue; ++d->bd_rcount; slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); if (slen != 0) @@ -1356,7 +1555,7 @@ bpfattach(ifp, dlt, hdrlen) { struct bpf_if *bp; int i; - bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_DONTWAIT); + bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT); if (bp == 0) panic("bpfattach"); @@ -1377,50 +1576,146 @@ bpfattach(ifp, dlt, hdrlen) */ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; - /* - * Mark all the descriptors free if this hasn't been done. - */ - if (!bpf_dtab_init) { - for (i = 0; i < nbpfilter; ++i) - D_MARKFREE(&bpf_dtab[i]); - bpf_dtab_init = 1; - } -#if 0 +#ifndef __APPLE__ if (bootverbose) printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); #endif } -static void *bpf_devfs_token[NBPFILTER]; +/* + * Detach bpf from an interface. This involves detaching each descriptor + * associated with the interface, and leaving bd_bif NULL. Notify each + * descriptor as it's detached so that any sleepers wake up and get + * ENXIO. + */ +void +bpfdetach(ifp) + struct ifnet *ifp; +{ + struct bpf_if *bp, *bp_prev; + struct bpf_d *d; + int s; -static int bpf_devsw_installed; + s = splimp(); + + /* Locate BPF interface information */ + bp_prev = NULL; + for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { + if (ifp == bp->bif_ifp) + break; + bp_prev = bp; + } + +#ifdef __APPLE__ + /* Check for no BPF interface information */ + if (bp == NULL) { + return; + } +#endif + + /* Interface wasn't attached */ + if (bp->bif_ifp == NULL) { + splx(s); +#ifndef __APPLE__ + printf("bpfdetach: %s%d was not attached\n", ifp->if_name, + ifp->if_unit); +#endif + return; + } + + while ((d = bp->bif_dlist) != NULL) { + bpf_detachd(d); + bpf_wakeup(d); + } + + if (bp_prev) { + bp_prev->bif_next = bp->bif_next; + } else { + bpf_iflist = bp->bif_next; + } + + FREE(bp, M_DEVBUF); + + splx(s); +} -void bpf_init __P((void *unused)); void bpf_init(unused) void *unused; { +#ifdef __APPLE__ int i; - int maj; + int maj; if (!bpf_devsw_installed ) { - bpf_devsw_installed = 1; - maj = cdevsw_add(BPF_MAJOR, &bpf_cdevsw); - if (maj == -1) { - printf("bpf_init: failed to allocate a major number!\n"); - nbpfilter = 0; - return; - } - for (i = 0 ; i < nbpfilter; i++) { - bpf_devfs_token[i] = devfs_make_node(makedev(maj, i), - DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, - "bpf%x", i); - } - } + bpf_devsw_installed = 1; + maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); + if (maj == -1) { + printf("bpf_init: failed to allocate a major number!\n"); + nbpfilter = 0; + return; + } + if (bpf_dtab_grow(NBPFILTER) == 0) { + printf("bpf_init: failed to allocate bpf_dtab\n"); + return; + } + for (i = 0 ; i < NBPFILTER; i++) + bpf_make_dev_t(maj); + } +#else + cdevsw_add(&bpf_cdevsw); +#endif } -/* +#ifndef __APPLE__ SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) -*/ - #endif + +#else /* !BPF */ +#ifndef __APPLE__ +/* + * NOP stubs to allow bpf-using drivers to load and function. + * + * A 'better' implementation would allow the core bpf functionality + * to be loaded at runtime. + */ + +void +bpf_tap(ifp, pkt, pktlen) + struct ifnet *ifp; + register u_char *pkt; + register u_int pktlen; +{ +} + +void +bpf_mtap(ifp, m) + struct ifnet *ifp; + struct mbuf *m; +{ +} + +void +bpfattach(ifp, dlt, hdrlen) + struct ifnet *ifp; + u_int dlt, hdrlen; +{ +} + +void +bpfdetach(ifp) + struct ifnet *ifp; +{ +} + +u_int +bpf_filter(pc, p, wirelen, buflen) + register const struct bpf_insn *pc; + register u_char *p; + u_int wirelen; + register u_int buflen; +{ + return -1; /* "no filter" behaviour */ +} +#endif /* !defined(__APPLE__) */ +#endif /* NBPFILTER > 0 */