]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/bpf.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / bsd / net / bpf.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1990, 1991, 1993
27 * The Regents of the University of California. All rights reserved.
28 *
29 * This code is derived from the Stanford/CMU enet packet filter,
30 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
31 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
32 * Berkeley Laboratory.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement:
44 * This product includes software developed by the University of
45 * California, Berkeley and its contributors.
46 * 4. Neither the name of the University nor the names of its contributors
47 * may be used to endorse or promote products derived from this software
48 * without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 * SUCH DAMAGE.
61 *
62 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
63 *
64 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
65 */
66
67 #include "bpf.h"
68
69 #ifndef __GNUC__
70 #define inline
71 #else
72 #define inline __inline
73 #endif
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/conf.h>
78 #include <sys/malloc.h>
79 #include <sys/mbuf.h>
80 #include <sys/time.h>
81 #include <sys/proc.h>
82 #include <sys/signalvar.h>
83 #include <sys/filio.h>
84 #include <sys/sockio.h>
85 #include <sys/ttycom.h>
86 #include <sys/filedesc.h>
87
88 #if defined(sparc) && BSD < 199103
89 #include <sys/stream.h>
90 #endif
91 #include <sys/poll.h>
92
93 #include <sys/socket.h>
94 #include <sys/vnode.h>
95
96 #include <net/if.h>
97 #include <net/bpf.h>
98 #include <net/bpfdesc.h>
99
100 #include <netinet/in.h>
101 #include <netinet/if_ether.h>
102 #include <sys/kernel.h>
103 #include <sys/sysctl.h>
104
105
106 #include <miscfs/devfs/devfs.h>
107 #include <net/dlil.h>
108
109 #if NBPFILTER > 0
110
111 /*
112 * Older BSDs don't have kernel malloc.
113 */
114 #if BSD < 199103
115 extern bcopy();
116 static caddr_t bpf_alloc();
117 #include <net/bpf_compat.h>
118 #define BPF_BUFSIZE (MCLBYTES-8)
119 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
120 #else
121 #define BPF_BUFSIZE 4096
122 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
123 #endif
124
125 #define PRINET 26 /* interruptible */
126
127 /*
128 * The default read buffer size is patchable.
129 */
130 static int bpf_bufsize = BPF_BUFSIZE;
131 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
132 &bpf_bufsize, 0, "");
133 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
134 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
135 &bpf_maxbufsize, 0, "");
136
137 /*
138 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
139 * bpf_dtab holds the descriptors, indexed by minor device #
140 */
141 static struct bpf_if *bpf_iflist;
142 #ifdef __APPLE__
143 /*
144 * BSD now stores the bpf_d in the dev_t which is a struct
145 * on their system. Our dev_t is an int, so we still store
146 * the bpf_d in a separate table indexed by minor device #.
147 */
148 static struct bpf_d bpf_dtab[NBPFILTER];
149 static int bpf_dtab_init;
150 static int nbpfilter = NBPFILTER;
151 #endif
152
153 static int bpf_allocbufs __P((struct bpf_d *));
154 static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
155 static void bpf_detachd __P((struct bpf_d *d));
156 static void bpf_freed __P((struct bpf_d *));
157 static void bpf_mcopy __P((const void *, void *, size_t));
158 static int bpf_movein __P((struct uio *, int,
159 struct mbuf **, struct sockaddr *, int *));
160 static int bpf_setif __P((struct bpf_d *, struct ifreq *));
161 static inline void
162 bpf_wakeup __P((struct bpf_d *));
163 static void catchpacket __P((struct bpf_d *, u_char *, u_int,
164 u_int, void (*)(const void *, void *, size_t)));
165 static void reset_d __P((struct bpf_d *));
166 static int bpf_setf __P((struct bpf_d *, struct bpf_program *));
167
168 /*
169 * Darwin differs from BSD here, the following are static
170 * on BSD and not static on Darwin.
171 */
172 d_open_t bpfopen;
173 d_close_t bpfclose;
174 d_read_t bpfread;
175 d_write_t bpfwrite;
176 d_ioctl_t bpfioctl;
177 select_fcn_t bpfpoll;
178
179 #ifdef __APPLE__
180 void bpf_mtap(struct ifnet *, struct mbuf *);
181
182 int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(),
183 bpfpoll();
184 #endif
185
186 /* Darwin's cdevsw struct differs slightly from BSDs */
187 #define CDEV_MAJOR 23
188 static struct cdevsw bpf_cdevsw = {
189 /* open */ bpfopen,
190 /* close */ bpfclose,
191 /* read */ bpfread,
192 /* write */ bpfwrite,
193 /* ioctl */ bpfioctl,
194 /* stop */ nulldev,
195 /* reset */ nulldev,
196 /* tty */ NULL,
197 /* select */ bpfpoll,
198 /* mmap */ eno_mmap,
199 /* strategy*/ eno_strat,
200 /* getc */ eno_getc,
201 /* putc */ eno_putc,
202 /* type */ 0
203 };
204
205
206 static int
207 bpf_movein(uio, linktype, mp, sockp, datlen)
208 register struct uio *uio;
209 int linktype, *datlen;
210 register struct mbuf **mp;
211 register struct sockaddr *sockp;
212 {
213 struct mbuf *m;
214 int error;
215 int len;
216 int hlen;
217
218 /*
219 * Build a sockaddr based on the data link layer type.
220 * We do this at this level because the ethernet header
221 * is copied directly into the data field of the sockaddr.
222 * In the case of SLIP, there is no header and the packet
223 * is forwarded as is.
224 * Also, we are careful to leave room at the front of the mbuf
225 * for the link level header.
226 */
227 switch (linktype) {
228
229 case DLT_SLIP:
230 sockp->sa_family = AF_INET;
231 hlen = 0;
232 break;
233
234 case DLT_EN10MB:
235 sockp->sa_family = AF_UNSPEC;
236 /* XXX Would MAXLINKHDR be better? */
237 hlen = sizeof(struct ether_header);
238 break;
239
240 case DLT_FDDI:
241 #if defined(__FreeBSD__) || defined(__bsdi__)
242 sockp->sa_family = AF_IMPLINK;
243 hlen = 0;
244 #else
245 sockp->sa_family = AF_UNSPEC;
246 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
247 hlen = 24;
248 #endif
249 break;
250
251 case DLT_RAW:
252 case DLT_NULL:
253 sockp->sa_family = AF_UNSPEC;
254 hlen = 0;
255 break;
256
257 #ifdef __FreeBSD__
258 case DLT_ATM_RFC1483:
259 /*
260 * en atm driver requires 4-byte atm pseudo header.
261 * though it isn't standard, vpi:vci needs to be
262 * specified anyway.
263 */
264 sockp->sa_family = AF_UNSPEC;
265 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
266 break;
267 #endif
268 case DLT_PPP:
269 sockp->sa_family = AF_UNSPEC;
270 hlen = 4; /* This should match PPP_HDRLEN */
271 break;
272
273 default:
274 return (EIO);
275 }
276
277 len = uio->uio_resid;
278 *datlen = len - hlen;
279 if ((unsigned)len > MCLBYTES)
280 return (EIO);
281
282 MGETHDR(m, M_WAIT, MT_DATA);
283 if (m == 0)
284 return (ENOBUFS);
285 if (len > MHLEN) {
286 #if BSD >= 199103
287 MCLGET(m, M_WAIT);
288 if ((m->m_flags & M_EXT) == 0) {
289 #else
290 MCLGET(m);
291 if (m->m_len != MCLBYTES) {
292 #endif
293 error = ENOBUFS;
294 goto bad;
295 }
296 }
297 m->m_pkthdr.len = m->m_len = len;
298 m->m_pkthdr.rcvif = NULL;
299 *mp = m;
300 /*
301 * Make room for link header.
302 */
303 if (hlen != 0) {
304 m->m_pkthdr.len -= hlen;
305 m->m_len -= hlen;
306 #if BSD >= 199103
307 m->m_data += hlen; /* XXX */
308 #else
309 m->m_off += hlen;
310 #endif
311 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
312 if (error)
313 goto bad;
314 }
315 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
316 if (!error)
317 return (0);
318 bad:
319 m_freem(m);
320 return (error);
321 }
322
323 #ifdef __APPLE__
324 /* Callback registered with Ethernet driver. */
325 int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
326 {
327 boolean_t funnel_state;
328
329 funnel_state = thread_funnel_set(network_flock, TRUE);
330
331 /*
332 * Do nothing if the BPF tap has been turned off.
333 * This is to protect from a potential race where this
334 * call blocks on the funnel lock. And in the meantime
335 * BPF is turned off, which will clear if_bpf.
336 */
337 if (ifp->if_bpf)
338 bpf_mtap(ifp, m);
339
340 thread_funnel_set(network_flock, funnel_state);
341 return 0;
342 }
343 #endif
344
345 /*
346 * Attach file to the bpf interface, i.e. make d listen on bp.
347 * Must be called at splimp.
348 */
349 static void
350 bpf_attachd(d, bp)
351 struct bpf_d *d;
352 struct bpf_if *bp;
353 {
354 /*
355 * Point d at bp, and add d to the interface's list of listeners.
356 * Finally, point the driver's bpf cookie at the interface so
357 * it will divert packets to bpf.
358 */
359 d->bd_bif = bp;
360 d->bd_next = bp->bif_dlist;
361 bp->bif_dlist = d;
362
363 bp->bif_ifp->if_bpf = bp;
364
365 #ifdef __APPLE__
366 if (bp->bif_ifp->if_set_bpf_tap)
367 (*bp->bif_ifp->if_set_bpf_tap)(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
368 #endif
369 }
370
371 /*
372 * Detach a file from its interface.
373 */
374 static void
375 bpf_detachd(d)
376 struct bpf_d *d;
377 {
378 struct bpf_d **p;
379 struct bpf_if *bp;
380 #ifdef __APPLE__
381 struct ifnet *ifp;
382
383 ifp = d->bd_bif->bif_ifp;
384
385 #endif
386
387 bp = d->bd_bif;
388 /*
389 * Check if this descriptor had requested promiscuous mode.
390 * If so, turn it off.
391 */
392 if (d->bd_promisc) {
393 d->bd_promisc = 0;
394 if (ifpromisc(bp->bif_ifp, 0))
395 /*
396 * Something is really wrong if we were able to put
397 * the driver into promiscuous mode, but can't
398 * take it out.
399 * Most likely the network interface is gone.
400 */
401 printf("bpf: ifpromisc failed");
402 }
403 /* Remove d from the interface's descriptor list. */
404 p = &bp->bif_dlist;
405 while (*p != d) {
406 p = &(*p)->bd_next;
407 if (*p == 0)
408 panic("bpf_detachd: descriptor not in list");
409 }
410 *p = (*p)->bd_next;
411 if (bp->bif_dlist == 0) {
412 /*
413 * Let the driver know that there are no more listeners.
414 */
415 if (ifp->if_set_bpf_tap)
416 (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0);
417 d->bd_bif->bif_ifp->if_bpf = 0;
418 }
419 d->bd_bif = 0;
420 }
421
422
423 #ifdef __APPLE__
424 /*
425 * Mark a descriptor free by making it point to itself.
426 * This is probably cheaper than marking with a constant since
427 * the address should be in a register anyway.
428 */
429 #define D_ISFREE(d) ((d) == (d)->bd_next)
430 #define D_MARKFREE(d) ((d)->bd_next = (d))
431 #define D_MARKUSED(d) ((d)->bd_next = 0)
432 #endif
433 /*
434 * Open ethernet device. Returns ENXIO for illegal minor device number,
435 * EBUSY if file is open by another process.
436 */
437 /* ARGSUSED */
438 int
439 bpfopen(dev, flags, fmt, p)
440 dev_t dev;
441 int flags;
442 int fmt;
443 struct proc *p;
444 {
445 register struct bpf_d *d;
446
447 #ifdef __APPLE__
448 if (minor(dev) >= nbpfilter)
449 return (ENXIO);
450
451 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
452
453 d = &bpf_dtab[minor(dev)];
454 #else
455 if (p->p_prison)
456 return (EPERM);
457
458 d = dev->si_drv1;
459 #endif
460 /*
461 * Each minor can be opened by only one process. If the requested
462 * minor is in use, return EBUSY.
463 */
464 #ifdef __APPLE__
465 if (!D_ISFREE(d)) {
466 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
467 return (EBUSY);
468 }
469
470 /* Mark "free" and do most initialization. */
471 bzero((char *)d, sizeof(*d));
472 #else
473 if (d)
474 return (EBUSY);
475 make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
476 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK);
477 bzero(d, sizeof(*d));
478 dev->si_drv1 = d;
479 #endif
480 d->bd_bufsize = bpf_bufsize;
481 d->bd_sig = SIGIO;
482 d->bd_seesent = 1;
483 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
484 return (0);
485 }
486
487 /*
488 * Close the descriptor by detaching it from its interface,
489 * deallocating its buffers, and marking it free.
490 */
491 /* ARGSUSED */
492 int
493 bpfclose(dev, flags, fmt, p)
494 dev_t dev;
495 int flags;
496 int fmt;
497 struct proc *p;
498 {
499 register struct bpf_d *d;
500 register int s;
501
502 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
503
504 #ifndef __APPLE__
505 funsetown(d->bd_sigio);
506 #endif
507 s = splimp();
508 #ifdef __APPLE__
509 d = &bpf_dtab[minor(dev)];
510 #endif
511 if (d->bd_bif)
512 bpf_detachd(d);
513 splx(s);
514 #ifdef __APPLE__
515 selthreadclear(&d->bd_sel);
516 #endif
517 bpf_freed(d);
518 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
519 return (0);
520 }
521
522 /*
523 * Support for SunOS, which does not have tsleep.
524 */
525 #if BSD < 199103
526 static
527 bpf_timeout(arg)
528 caddr_t arg;
529 {
530 boolean_t funnel_state;
531 struct bpf_d *d = (struct bpf_d *)arg;
532 funnel_state = thread_funnel_set(network_flock, TRUE);
533 d->bd_timedout = 1;
534 wakeup(arg);
535 (void) thread_funnel_set(network_flock, FALSE);
536 }
537
538 #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
539
540 int
541 bpf_sleep(d)
542 register struct bpf_d *d;
543 {
544 register int rto = d->bd_rtout;
545 register int st;
546
547 if (rto != 0) {
548 d->bd_timedout = 0;
549 timeout(bpf_timeout, (caddr_t)d, rto);
550 }
551 st = sleep((caddr_t)d, PRINET|PCATCH);
552 if (rto != 0) {
553 if (d->bd_timedout == 0)
554 untimeout(bpf_timeout, (caddr_t)d);
555 else if (st == 0)
556 return EWOULDBLOCK;
557 }
558 return (st != 0) ? EINTR : 0;
559 }
560 #else
561 #define BPF_SLEEP tsleep
562 #endif
563
564 /*
565 * Rotate the packet buffers in descriptor d. Move the store buffer
566 * into the hold slot, and the free buffer into the store slot.
567 * Zero the length of the new store buffer.
568 */
569 #define ROTATE_BUFFERS(d) \
570 (d)->bd_hbuf = (d)->bd_sbuf; \
571 (d)->bd_hlen = (d)->bd_slen; \
572 (d)->bd_sbuf = (d)->bd_fbuf; \
573 (d)->bd_slen = 0; \
574 (d)->bd_fbuf = 0;
575 /*
576 * bpfread - read next chunk of packets from buffers
577 */
578 int
579 bpfread(dev, uio, ioflag)
580 dev_t dev;
581 struct uio *uio;
582 int ioflag;
583 {
584 register struct bpf_d *d;
585 int error;
586 int s;
587
588 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
589 d = &bpf_dtab[minor(dev)];
590
591 /*
592 * Restrict application to use a buffer the same size as
593 * as kernel buffers.
594 */
595 if (uio->uio_resid != d->bd_bufsize) {
596 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
597 return (EINVAL);
598 }
599
600 s = splimp();
601 /*
602 * If the hold buffer is empty, then do a timed sleep, which
603 * ends when the timeout expires or when enough packets
604 * have arrived to fill the store buffer.
605 */
606 while (d->bd_hbuf == 0) {
607 if (d->bd_immediate && d->bd_slen != 0) {
608 /*
609 * A packet(s) either arrived since the previous
610 * read or arrived while we were asleep.
611 * Rotate the buffers and return what's here.
612 */
613 ROTATE_BUFFERS(d);
614 break;
615 }
616
617 /*
618 * No data is available, check to see if the bpf device
619 * is still pointed at a real interface. If not, return
620 * ENXIO so that the userland process knows to rebind
621 * it before using it again.
622 */
623 if (d->bd_bif == NULL) {
624 splx(s);
625 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
626 return (ENXIO);
627 }
628
629 if (ioflag & IO_NDELAY)
630 error = EWOULDBLOCK;
631 else
632 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
633 d->bd_rtout);
634 if (error == EINTR || error == ERESTART) {
635 splx(s);
636 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
637 return (error);
638 }
639 if (error == EWOULDBLOCK) {
640 /*
641 * On a timeout, return what's in the buffer,
642 * which may be nothing. If there is something
643 * in the store buffer, we can rotate the buffers.
644 */
645 if (d->bd_hbuf)
646 /*
647 * We filled up the buffer in between
648 * getting the timeout and arriving
649 * here, so we don't need to rotate.
650 */
651 break;
652
653 if (d->bd_slen == 0) {
654 splx(s);
655 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
656 return (0);
657 }
658 ROTATE_BUFFERS(d);
659 break;
660 }
661 }
662 /*
663 * At this point, we know we have something in the hold slot.
664 */
665 splx(s);
666
667 /*
668 * Move data from hold buffer into user space.
669 * We know the entire buffer is transferred since
670 * we checked above that the read buffer is bpf_bufsize bytes.
671 */
672 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
673
674 s = splimp();
675 d->bd_fbuf = d->bd_hbuf;
676 d->bd_hbuf = 0;
677 d->bd_hlen = 0;
678 splx(s);
679 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
680 return (error);
681 }
682
683
684 /*
685 * If there are processes sleeping on this descriptor, wake them up.
686 */
687 static inline void
688 bpf_wakeup(d)
689 register struct bpf_d *d;
690 {
691 wakeup((caddr_t)d);
692 if (d->bd_async && d->bd_sig && d->bd_sigio)
693 pgsigio(d->bd_sigio, d->bd_sig, 0);
694
695 #if BSD >= 199103
696 selwakeup(&d->bd_sel);
697 #ifndef __APPLE__
698 /* XXX */
699 d->bd_sel.si_pid = 0;
700 #endif
701 #else
702 if (d->bd_selproc) {
703 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
704 d->bd_selcoll = 0;
705 d->bd_selproc = 0;
706 }
707 #endif
708 }
709
710 int
711 bpfwrite(dev, uio, ioflag)
712 dev_t dev;
713 struct uio *uio;
714 int ioflag;
715 {
716 register struct bpf_d *d;
717 struct ifnet *ifp;
718 struct mbuf *m;
719 int error, s;
720 static struct sockaddr dst;
721 int datlen;
722
723 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
724 d = &bpf_dtab[minor(dev)];
725
726 if (d->bd_bif == 0) {
727 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
728 return (ENXIO);
729 }
730
731 ifp = d->bd_bif->bif_ifp;
732
733 if (uio->uio_resid == 0) {
734 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
735 return (0);
736 }
737
738 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
739 if (error) {
740 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
741 return (error);
742 }
743
744 if (datlen > ifp->if_mtu) {
745 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
746 return (EMSGSIZE);
747 }
748
749 if (d->bd_hdrcmplt)
750 dst.sa_family = pseudo_AF_HDRCMPLT;
751
752 s = splnet();
753
754 error = dlil_output(ifp->if_data.default_proto, m,
755 (caddr_t) 0, &dst, 0);
756
757 splx(s);
758 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
759 /*
760 * The driver frees the mbuf.
761 */
762 return (error);
763 }
764
765 /*
766 * Reset a descriptor by flushing its packet buffer and clearing the
767 * receive and drop counts. Should be called at splimp.
768 */
769 static void
770 reset_d(d)
771 struct bpf_d *d;
772 {
773 if (d->bd_hbuf) {
774 /* Free the hold buffer. */
775 d->bd_fbuf = d->bd_hbuf;
776 d->bd_hbuf = 0;
777 }
778 d->bd_slen = 0;
779 d->bd_hlen = 0;
780 d->bd_rcount = 0;
781 d->bd_dcount = 0;
782 }
783
784 /*
785 * FIONREAD Check for read packet available.
786 * SIOCGIFADDR Get interface address - convenient hook to driver.
787 * BIOCGBLEN Get buffer len [for read()].
788 * BIOCSETF Set ethernet read filter.
789 * BIOCFLUSH Flush read packet buffer.
790 * BIOCPROMISC Put interface into promiscuous mode.
791 * BIOCGDLT Get link layer type.
792 * BIOCGETIF Get interface name.
793 * BIOCSETIF Set interface.
794 * BIOCSRTIMEOUT Set read timeout.
795 * BIOCGRTIMEOUT Get read timeout.
796 * BIOCGSTATS Get packet stats.
797 * BIOCIMMEDIATE Set immediate mode.
798 * BIOCVERSION Get filter language version.
799 * BIOCGHDRCMPLT Get "header already complete" flag
800 * BIOCSHDRCMPLT Set "header already complete" flag
801 * BIOCGSEESENT Get "see packets sent" flag
802 * BIOCSSEESENT Set "see packets sent" flag
803 */
804 /* ARGSUSED */
805 int
806 bpfioctl(dev, cmd, addr, flags, p)
807 dev_t dev;
808 u_long cmd;
809 caddr_t addr;
810 int flags;
811 struct proc *p;
812 {
813 register struct bpf_d *d;
814 int s, error = 0;
815
816
817 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
818 d = &bpf_dtab[minor(dev)];
819
820 switch (cmd) {
821
822 default:
823 error = EINVAL;
824 break;
825
826 /*
827 * Check for read packet available.
828 */
829 case FIONREAD:
830 {
831 int n;
832
833 s = splimp();
834 n = d->bd_slen;
835 if (d->bd_hbuf)
836 n += d->bd_hlen;
837 splx(s);
838
839 *(int *)addr = n;
840 break;
841 }
842
843 case SIOCGIFADDR:
844 {
845 struct ifnet *ifp;
846
847 if (d->bd_bif == 0)
848 error = EINVAL;
849 else {
850 ifp = d->bd_bif->bif_ifp;
851 error = (*ifp->if_ioctl)(ifp, cmd, addr);
852 }
853 break;
854 }
855
856 /*
857 * Get buffer len [for read()].
858 */
859 case BIOCGBLEN:
860 *(u_int *)addr = d->bd_bufsize;
861 break;
862
863 /*
864 * Set buffer length.
865 */
866 case BIOCSBLEN:
867 #if BSD < 199103
868 error = EINVAL;
869 #else
870 if (d->bd_bif != 0)
871 error = EINVAL;
872 else {
873 register u_int size = *(u_int *)addr;
874
875 if (size > bpf_maxbufsize)
876 *(u_int *)addr = size = bpf_maxbufsize;
877 else if (size < BPF_MINBUFSIZE)
878 *(u_int *)addr = size = BPF_MINBUFSIZE;
879 d->bd_bufsize = size;
880 }
881 #endif
882 break;
883
884 /*
885 * Set link layer read filter.
886 */
887 case BIOCSETF:
888 error = bpf_setf(d, (struct bpf_program *)addr);
889 break;
890
891 /*
892 * Flush read packet buffer.
893 */
894 case BIOCFLUSH:
895 s = splimp();
896 reset_d(d);
897 splx(s);
898 break;
899
900 /*
901 * Put interface into promiscuous mode.
902 */
903 case BIOCPROMISC:
904 if (d->bd_bif == 0) {
905 /*
906 * No interface attached yet.
907 */
908 error = EINVAL;
909 break;
910 }
911 s = splimp();
912 if (d->bd_promisc == 0) {
913 error = ifpromisc(d->bd_bif->bif_ifp, 1);
914 if (error == 0)
915 d->bd_promisc = 1;
916 }
917 splx(s);
918 break;
919
920 /*
921 * Get device parameters.
922 */
923 case BIOCGDLT:
924 if (d->bd_bif == 0)
925 error = EINVAL;
926 else
927 *(u_int *)addr = d->bd_bif->bif_dlt;
928 break;
929
930 /*
931 * Get interface name.
932 */
933 case BIOCGETIF:
934 if (d->bd_bif == 0)
935 error = EINVAL;
936 else {
937 struct ifnet *const ifp = d->bd_bif->bif_ifp;
938 struct ifreq *const ifr = (struct ifreq *)addr;
939
940 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
941 "%s%d", ifp->if_name, ifp->if_unit);
942 }
943 break;
944
945 /*
946 * Set interface.
947 */
948 case BIOCSETIF:
949 error = bpf_setif(d, (struct ifreq *)addr);
950 break;
951
952 /*
953 * Set read timeout.
954 */
955 case BIOCSRTIMEOUT:
956 {
957 struct timeval *tv = (struct timeval *)addr;
958
959 /*
960 * Subtract 1 tick from tvtohz() since this isn't
961 * a one-shot timer.
962 */
963 if ((error = itimerfix(tv)) == 0)
964 d->bd_rtout = tvtohz(tv) - 1;
965 break;
966 }
967
968 /*
969 * Get read timeout.
970 */
971 case BIOCGRTIMEOUT:
972 {
973 struct timeval *tv = (struct timeval *)addr;
974
975 tv->tv_sec = d->bd_rtout / hz;
976 tv->tv_usec = (d->bd_rtout % hz) * tick;
977 break;
978 }
979
980 /*
981 * Get packet stats.
982 */
983 case BIOCGSTATS:
984 {
985 struct bpf_stat *bs = (struct bpf_stat *)addr;
986
987 bs->bs_recv = d->bd_rcount;
988 bs->bs_drop = d->bd_dcount;
989 break;
990 }
991
992 /*
993 * Set immediate mode.
994 */
995 case BIOCIMMEDIATE:
996 d->bd_immediate = *(u_int *)addr;
997 break;
998
999 case BIOCVERSION:
1000 {
1001 struct bpf_version *bv = (struct bpf_version *)addr;
1002
1003 bv->bv_major = BPF_MAJOR_VERSION;
1004 bv->bv_minor = BPF_MINOR_VERSION;
1005 break;
1006 }
1007
1008 /*
1009 * Get "header already complete" flag
1010 */
1011 case BIOCGHDRCMPLT:
1012 *(u_int *)addr = d->bd_hdrcmplt;
1013 break;
1014
1015 /*
1016 * Set "header already complete" flag
1017 */
1018 case BIOCSHDRCMPLT:
1019 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1020 break;
1021
1022 /*
1023 * Get "see sent packets" flag
1024 */
1025 case BIOCGSEESENT:
1026 *(u_int *)addr = d->bd_seesent;
1027 break;
1028
1029 /*
1030 * Set "see sent packets" flag
1031 */
1032 case BIOCSSEESENT:
1033 d->bd_seesent = *(u_int *)addr;
1034 break;
1035
1036 case FIONBIO: /* Non-blocking I/O */
1037 break;
1038
1039 case FIOASYNC: /* Send signal on receive packets */
1040 d->bd_async = *(int *)addr;
1041 break;
1042 #ifndef __APPLE__
1043 case FIOSETOWN:
1044 error = fsetown(*(int *)addr, &d->bd_sigio);
1045 break;
1046
1047 case FIOGETOWN:
1048 *(int *)addr = fgetown(d->bd_sigio);
1049 break;
1050
1051 /* This is deprecated, FIOSETOWN should be used instead. */
1052 case TIOCSPGRP:
1053 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1054 break;
1055
1056 /* This is deprecated, FIOGETOWN should be used instead. */
1057 case TIOCGPGRP:
1058 *(int *)addr = -fgetown(d->bd_sigio);
1059 break;
1060 #endif
1061 case BIOCSRSIG: /* Set receive signal */
1062 {
1063 u_int sig;
1064
1065 sig = *(u_int *)addr;
1066
1067 if (sig >= NSIG)
1068 error = EINVAL;
1069 else
1070 d->bd_sig = sig;
1071 break;
1072 }
1073 case BIOCGRSIG:
1074 *(u_int *)addr = d->bd_sig;
1075 break;
1076 }
1077 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1078 return (error);
1079 }
1080
1081 /*
1082 * Set d's packet filter program to fp. If this file already has a filter,
1083 * free it and replace it. Returns EINVAL for bogus requests.
1084 */
1085 static int
1086 bpf_setf(d, fp)
1087 struct bpf_d *d;
1088 struct bpf_program *fp;
1089 {
1090 struct bpf_insn *fcode, *old;
1091 u_int flen, size;
1092 int s;
1093
1094 old = d->bd_filter;
1095 if (fp->bf_insns == 0) {
1096 if (fp->bf_len != 0)
1097 return (EINVAL);
1098 s = splimp();
1099 d->bd_filter = 0;
1100 reset_d(d);
1101 splx(s);
1102 if (old != 0)
1103 FREE((caddr_t)old, M_DEVBUF);
1104 return (0);
1105 }
1106 flen = fp->bf_len;
1107 if (flen > BPF_MAXINSNS)
1108 return (EINVAL);
1109
1110 size = flen * sizeof(*fp->bf_insns);
1111 fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
1112 #ifdef __APPLE__
1113 if (fcode == NULL)
1114 return (ENOBUFS);
1115 #endif
1116 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1117 bpf_validate(fcode, (int)flen)) {
1118 s = splimp();
1119 d->bd_filter = fcode;
1120 reset_d(d);
1121 splx(s);
1122 if (old != 0)
1123 FREE((caddr_t)old, M_DEVBUF);
1124
1125 return (0);
1126 }
1127 FREE((caddr_t)fcode, M_DEVBUF);
1128 return (EINVAL);
1129 }
1130
1131 /*
1132 * Detach a file from its current interface (if attached at all) and attach
1133 * to the interface indicated by the name stored in ifr.
1134 * Return an errno or 0.
1135 */
1136 static int
1137 bpf_setif(d, ifr)
1138 struct bpf_d *d;
1139 struct ifreq *ifr;
1140 {
1141 struct bpf_if *bp;
1142 int s, error;
1143 struct ifnet *theywant;
1144
1145 theywant = ifunit(ifr->ifr_name);
1146 if (theywant == 0)
1147 return ENXIO;
1148
1149 /*
1150 * Look through attached interfaces for the named one.
1151 */
1152 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1153 struct ifnet *ifp = bp->bif_ifp;
1154
1155 if (ifp == 0 || ifp != theywant)
1156 continue;
1157 /*
1158 * We found the requested interface.
1159 * If it's not up, return an error.
1160 * Allocate the packet buffers if we need to.
1161 * If we're already attached to requested interface,
1162 * just flush the buffer.
1163 */
1164 if ((ifp->if_flags & IFF_UP) == 0)
1165 return (ENETDOWN);
1166
1167 if (d->bd_sbuf == 0) {
1168 error = bpf_allocbufs(d);
1169 if (error != 0)
1170 return (error);
1171 }
1172 s = splimp();
1173 if (bp != d->bd_bif) {
1174 if (d->bd_bif)
1175 /*
1176 * Detach if attached to something else.
1177 */
1178 bpf_detachd(d);
1179
1180 bpf_attachd(d, bp);
1181 }
1182 reset_d(d);
1183 splx(s);
1184 return (0);
1185 }
1186 /* Not found. */
1187 return (ENXIO);
1188 }
1189
1190 /*
1191 * Support for select() and poll() system calls
1192 *
1193 * Return true iff the specific operation will not block indefinitely.
1194 * Otherwise, return false but make a note that a selwakeup() must be done.
1195 */
1196 int
1197 bpfpoll(dev, events, wql, p)
1198 register dev_t dev;
1199 int events;
1200 void * wql;
1201 struct proc *p;
1202 {
1203 register struct bpf_d *d;
1204 register int s;
1205 int revents = 0;
1206
1207 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1208 /*
1209 * An imitation of the FIONREAD ioctl code.
1210 */
1211 d = &bpf_dtab[minor(dev)];
1212
1213 if (d->bd_bif == NULL) {
1214 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1215 return (ENXIO);
1216 }
1217
1218 s = splimp();
1219 if (events & (POLLIN | POLLRDNORM)) {
1220 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1221 revents |= events & (POLLIN | POLLRDNORM);
1222 else
1223 selrecord(p, &d->bd_sel, wql);
1224 }
1225 splx(s);
1226 thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
1227 return (revents);
1228 }
1229
1230 /*
1231 * Incoming linkage from device drivers. Process the packet pkt, of length
1232 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1233 * by each process' filter, and if accepted, stashed into the corresponding
1234 * buffer.
1235 */
1236 void
1237 bpf_tap(ifp, pkt, pktlen)
1238 struct ifnet *ifp;
1239 register u_char *pkt;
1240 register u_int pktlen;
1241 {
1242 struct bpf_if *bp;
1243 register struct bpf_d *d;
1244 register u_int slen;
1245 /*
1246 * Note that the ipl does not have to be raised at this point.
1247 * The only problem that could arise here is that if two different
1248 * interfaces shared any data. This is not the case.
1249 */
1250 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1251 bp = ifp->if_bpf;
1252 #ifdef __APPLE__
1253 if (bp) {
1254 #endif
1255 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1256 ++d->bd_rcount;
1257 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1258 if (slen != 0)
1259 catchpacket(d, pkt, pktlen, slen, bcopy);
1260 }
1261 #ifdef __APPLE__
1262 }
1263 thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
1264 #endif
1265 }
1266
1267 /*
1268 * Copy data from an mbuf chain into a buffer. This code is derived
1269 * from m_copydata in sys/uipc_mbuf.c.
1270 */
1271 static void
1272 bpf_mcopy(src_arg, dst_arg, len)
1273 const void *src_arg;
1274 void *dst_arg;
1275 register size_t len;
1276 {
1277 register const struct mbuf *m;
1278 register u_int count;
1279 u_char *dst;
1280
1281 m = src_arg;
1282 dst = dst_arg;
1283 while (len > 0) {
1284 if (m == 0)
1285 panic("bpf_mcopy");
1286 count = min(m->m_len, len);
1287 bcopy(mtod(m, void *), dst, count);
1288 m = m->m_next;
1289 dst += count;
1290 len -= count;
1291 }
1292 }
1293
1294 /*
1295 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1296 */
1297 void
1298 bpf_mtap(ifp, m)
1299 struct ifnet *ifp;
1300 struct mbuf *m;
1301 {
1302 struct bpf_if *bp = ifp->if_bpf;
1303 struct bpf_d *d;
1304 u_int pktlen, slen;
1305 struct mbuf *m0;
1306
1307 pktlen = 0;
1308 for (m0 = m; m0 != 0; m0 = m0->m_next)
1309 pktlen += m0->m_len;
1310
1311 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1312 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1313 continue;
1314 ++d->bd_rcount;
1315 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1316 if (slen != 0)
1317 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1318 }
1319 }
1320
1321 /*
1322 * Move the packet data from interface memory (pkt) into the
1323 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1324 * otherwise 0. "copy" is the routine called to do the actual data
1325 * transfer. bcopy is passed in to copy contiguous chunks, while
1326 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1327 * pkt is really an mbuf.
1328 */
1329 static void
1330 catchpacket(d, pkt, pktlen, snaplen, cpfn)
1331 register struct bpf_d *d;
1332 register u_char *pkt;
1333 register u_int pktlen, snaplen;
1334 register void (*cpfn) __P((const void *, void *, size_t));
1335 {
1336 register struct bpf_hdr *hp;
1337 register int totlen, curlen;
1338 register int hdrlen = d->bd_bif->bif_hdrlen;
1339 /*
1340 * Figure out how many bytes to move. If the packet is
1341 * greater or equal to the snapshot length, transfer that
1342 * much. Otherwise, transfer the whole packet (unless
1343 * we hit the buffer size limit).
1344 */
1345 totlen = hdrlen + min(snaplen, pktlen);
1346 if (totlen > d->bd_bufsize)
1347 totlen = d->bd_bufsize;
1348
1349 /*
1350 * Round up the end of the previous packet to the next longword.
1351 */
1352 curlen = BPF_WORDALIGN(d->bd_slen);
1353 if (curlen + totlen > d->bd_bufsize) {
1354 /*
1355 * This packet will overflow the storage buffer.
1356 * Rotate the buffers if we can, then wakeup any
1357 * pending reads.
1358 */
1359 if (d->bd_fbuf == 0) {
1360 /*
1361 * We haven't completed the previous read yet,
1362 * so drop the packet.
1363 */
1364 ++d->bd_dcount;
1365 return;
1366 }
1367 ROTATE_BUFFERS(d);
1368 bpf_wakeup(d);
1369 curlen = 0;
1370 }
1371 else if (d->bd_immediate)
1372 /*
1373 * Immediate mode is set. A packet arrived so any
1374 * reads should be woken up.
1375 */
1376 bpf_wakeup(d);
1377
1378 /*
1379 * Append the bpf header.
1380 */
1381 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1382 #if BSD >= 199103
1383 microtime(&hp->bh_tstamp);
1384 #elif defined(sun)
1385 uniqtime(&hp->bh_tstamp);
1386 #else
1387 hp->bh_tstamp = time;
1388 #endif
1389 hp->bh_datalen = pktlen;
1390 hp->bh_hdrlen = hdrlen;
1391 /*
1392 * Copy the packet data into the store buffer and update its length.
1393 */
1394 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1395 d->bd_slen = curlen + totlen;
1396 }
1397
1398 /*
1399 * Initialize all nonzero fields of a descriptor.
1400 */
1401 static int
1402 bpf_allocbufs(d)
1403 register struct bpf_d *d;
1404 {
1405 d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1406 if (d->bd_fbuf == 0)
1407 return (ENOBUFS);
1408
1409 d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1410 if (d->bd_sbuf == 0) {
1411 FREE(d->bd_fbuf, M_DEVBUF);
1412 return (ENOBUFS);
1413 }
1414 d->bd_slen = 0;
1415 d->bd_hlen = 0;
1416 return (0);
1417 }
1418
1419 /*
1420 * Free buffers currently in use by a descriptor.
1421 * Called on close.
1422 */
1423 static void
1424 bpf_freed(d)
1425 register struct bpf_d *d;
1426 {
1427 /*
1428 * We don't need to lock out interrupts since this descriptor has
1429 * been detached from its interface and it yet hasn't been marked
1430 * free.
1431 */
1432 if (d->bd_sbuf != 0) {
1433 FREE(d->bd_sbuf, M_DEVBUF);
1434 if (d->bd_hbuf != 0)
1435 FREE(d->bd_hbuf, M_DEVBUF);
1436 if (d->bd_fbuf != 0)
1437 FREE(d->bd_fbuf, M_DEVBUF);
1438 }
1439 if (d->bd_filter)
1440 FREE((caddr_t)d->bd_filter, M_DEVBUF);
1441
1442 D_MARKFREE(d);
1443 }
1444
1445 /*
1446 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1447 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1448 * size of the link header (variable length headers not yet supported).
1449 */
1450 void
1451 bpfattach(ifp, dlt, hdrlen)
1452 struct ifnet *ifp;
1453 u_int dlt, hdrlen;
1454 {
1455 struct bpf_if *bp;
1456 int i;
1457 bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT);
1458 if (bp == 0)
1459 panic("bpfattach");
1460
1461 bp->bif_dlist = 0;
1462 bp->bif_ifp = ifp;
1463 bp->bif_dlt = dlt;
1464
1465 bp->bif_next = bpf_iflist;
1466 bpf_iflist = bp;
1467
1468 bp->bif_ifp->if_bpf = 0;
1469
1470 /*
1471 * Compute the length of the bpf header. This is not necessarily
1472 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1473 * that the network layer header begins on a longword boundary (for
1474 * performance reasons and to alleviate alignment restrictions).
1475 */
1476 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1477
1478 #ifdef __APPLE__
1479 /*
1480 * Mark all the descriptors free if this hasn't been done.
1481 */
1482 if (!bpf_dtab_init) {
1483 for (i = 0; i < nbpfilter; ++i)
1484 D_MARKFREE(&bpf_dtab[i]);
1485 bpf_dtab_init = 1;
1486 }
1487 #else
1488 if (bootverbose)
1489 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1490 #endif
1491 }
1492
1493 /*
1494 * Detach bpf from an interface. This involves detaching each descriptor
1495 * associated with the interface, and leaving bd_bif NULL. Notify each
1496 * descriptor as it's detached so that any sleepers wake up and get
1497 * ENXIO.
1498 */
1499 void
1500 bpfdetach(ifp)
1501 struct ifnet *ifp;
1502 {
1503 struct bpf_if *bp, *bp_prev;
1504 struct bpf_d *d;
1505 int s;
1506
1507 s = splimp();
1508
1509 /* Locate BPF interface information */
1510 bp_prev = NULL;
1511 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1512 if (ifp == bp->bif_ifp)
1513 break;
1514 bp_prev = bp;
1515 }
1516
1517 #ifdef __APPLE__
1518 /* Check for no BPF interface information */
1519 if (bp == NULL) {
1520 return;
1521 }
1522 #endif
1523
1524 /* Interface wasn't attached */
1525 if (bp->bif_ifp == NULL) {
1526 splx(s);
1527 #ifndef __APPLE__
1528 printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1529 ifp->if_unit);
1530 #endif
1531 return;
1532 }
1533
1534 while ((d = bp->bif_dlist) != NULL) {
1535 bpf_detachd(d);
1536 bpf_wakeup(d);
1537 }
1538
1539 if (bp_prev) {
1540 bp_prev->bif_next = bp->bif_next;
1541 } else {
1542 bpf_iflist = bp->bif_next;
1543 }
1544
1545 FREE(bp, M_DEVBUF);
1546
1547 splx(s);
1548 }
1549
1550 static void *bpf_devfs_token[NBPFILTER];
1551
1552 static int bpf_devsw_installed;
1553
1554 void bpf_init __P((void *unused));
1555
1556 void
1557 bpf_init(unused)
1558 void *unused;
1559 {
1560 #ifdef __APPLE__
1561 int i;
1562 int maj;
1563
1564 if (!bpf_devsw_installed ) {
1565 bpf_devsw_installed = 1;
1566 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
1567 if (maj == -1) {
1568 printf("bpf_init: failed to allocate a major number!\n");
1569 nbpfilter = 0;
1570 return;
1571 }
1572 for (i = 0 ; i < nbpfilter; i++) {
1573 bpf_devfs_token[i] = devfs_make_node(makedev(maj, i),
1574 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
1575 "bpf%x", i);
1576 }
1577 }
1578 #else
1579 cdevsw_add(&bpf_cdevsw);
1580 #endif
1581 }
1582
1583 #ifndef __APPLE__
1584 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1585 #endif
1586
1587 #else /* !BPF */
1588 #ifndef __APPLE__
1589 /*
1590 * NOP stubs to allow bpf-using drivers to load and function.
1591 *
1592 * A 'better' implementation would allow the core bpf functionality
1593 * to be loaded at runtime.
1594 */
1595
1596 void
1597 bpf_tap(ifp, pkt, pktlen)
1598 struct ifnet *ifp;
1599 register u_char *pkt;
1600 register u_int pktlen;
1601 {
1602 }
1603
1604 void
1605 bpf_mtap(ifp, m)
1606 struct ifnet *ifp;
1607 struct mbuf *m;
1608 {
1609 }
1610
1611 void
1612 bpfattach(ifp, dlt, hdrlen)
1613 struct ifnet *ifp;
1614 u_int dlt, hdrlen;
1615 {
1616 }
1617
1618 void
1619 bpfdetach(ifp)
1620 struct ifnet *ifp;
1621 {
1622 }
1623
1624 u_int
1625 bpf_filter(pc, p, wirelen, buflen)
1626 register const struct bpf_insn *pc;
1627 register u_char *p;
1628 u_int wirelen;
1629 register u_int buflen;
1630 {
1631 return -1; /* "no filter" behaviour */
1632 }
1633 #endif /* !defined(__APPLE__) */
1634 #endif /* NBPFILTER > 0 */