]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/bpf.c
a4f6c67d21fa71e0af05f53875485b18b7c43e82
[apple/xnu.git] / bsd / net / bpf.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright (c) 1990, 1991, 1993
25 * The Regents of the University of California. All rights reserved.
26 *
27 * This code is derived from the Stanford/CMU enet packet filter,
28 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
29 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
30 * Berkeley Laboratory.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
61 *
62 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
63 */
64
65 #include "bpf.h"
66
67 #ifndef __GNUC__
68 #define inline
69 #else
70 #define inline __inline
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/conf.h>
76 #include <sys/malloc.h>
77 #include <sys/mbuf.h>
78 #include <sys/time.h>
79 #include <sys/proc.h>
80 #include <sys/signalvar.h>
81 #include <sys/filio.h>
82 #include <sys/sockio.h>
83 #include <sys/ttycom.h>
84 #include <sys/filedesc.h>
85 #include <sys/uio_internal.h>
86
87 #if defined(sparc) && BSD < 199103
88 #include <sys/stream.h>
89 #endif
90 #include <sys/poll.h>
91
92 #include <sys/socket.h>
93 #include <sys/vnode.h>
94
95 #include <net/if.h>
96 #include <net/bpf.h>
97 #include <net/bpfdesc.h>
98
99 #include <netinet/in.h>
100 #include <netinet/if_ether.h>
101 #include <sys/kernel.h>
102 #include <sys/sysctl.h>
103 #include <net/firewire.h>
104
105 #include <machine/spl.h>
106 #include <miscfs/devfs/devfs.h>
107 #include <net/dlil.h>
108
109 #include <kern/locks.h>
110
111 extern int tvtohz(struct timeval *);
112
113 #if NBPFILTER > 0
114
115 /*
116 * Older BSDs don't have kernel malloc.
117 */
118 #if BSD < 199103
119 extern bcopy();
120 static caddr_t bpf_alloc();
121 #include <net/bpf_compat.h>
122 #define BPF_BUFSIZE (MCLBYTES-8)
123 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
124 #else
125 #define BPF_BUFSIZE 4096
126 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
127 #endif
128
129
130 #define PRINET 26 /* interruptible */
131
132 /*
133 * The default read buffer size is patchable.
134 */
135 static unsigned int bpf_bufsize = BPF_BUFSIZE;
136 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
137 &bpf_bufsize, 0, "");
138 static unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
139 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
140 &bpf_maxbufsize, 0, "");
141 static unsigned int bpf_maxdevices = 256;
142 SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW,
143 &bpf_maxdevices, 0, "");
144
145 /*
146 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
147 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
148 */
149 static struct bpf_if *bpf_iflist;
150 #ifdef __APPLE__
151 /*
152 * BSD now stores the bpf_d in the dev_t which is a struct
153 * on their system. Our dev_t is an int, so we still store
154 * the bpf_d in a separate table indexed by minor device #.
155 *
156 * The value stored in bpf_dtab[n] represent three states:
157 * 0: device not opened
158 * 1: device opening or closing
159 * other: device <n> opened with pointer to storage
160 */
161 static struct bpf_d **bpf_dtab = NULL;
162 static unsigned int bpf_dtab_size = 0;
163 static unsigned int nbpfilter = 0;
164
165 static lck_mtx_t *bpf_mlock;
166 static lck_grp_t *bpf_mlock_grp;
167 static lck_grp_attr_t *bpf_mlock_grp_attr;
168 static lck_attr_t *bpf_mlock_attr;
169
170 /*
171 * Mark a descriptor free by making it point to itself.
172 * This is probably cheaper than marking with a constant since
173 * the address should be in a register anyway.
174 */
175 #endif /* __APPLE__ */
176
177 static int bpf_allocbufs(struct bpf_d *);
178 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
179 static void bpf_detachd(struct bpf_d *d);
180 static void bpf_freed(struct bpf_d *);
181 static void bpf_mcopy(const void *, void *, size_t);
182 static int bpf_movein(struct uio *, int,
183 struct mbuf **, struct sockaddr *, int *);
184 static int bpf_setif(struct bpf_d *, struct ifreq *);
185 static void bpf_wakeup(struct bpf_d *);
186 static void catchpacket(struct bpf_d *, u_char *, u_int,
187 u_int, void (*)(const void *, void *, size_t));
188 static void reset_d(struct bpf_d *);
189 static int bpf_setf(struct bpf_d *, struct user_bpf_program *);
190
191 /*static void *bpf_devfs_token[MAXBPFILTER];*/
192
193 static int bpf_devsw_installed;
194
195 void bpf_init(void *unused);
196 int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m);
197
198 /*
199 * Darwin differs from BSD here, the following are static
200 * on BSD and not static on Darwin.
201 */
202 d_open_t bpfopen;
203 d_close_t bpfclose;
204 d_read_t bpfread;
205 d_write_t bpfwrite;
206 ioctl_fcn_t bpfioctl;
207 select_fcn_t bpfpoll;
208
209
210 /* Darwin's cdevsw struct differs slightly from BSDs */
211 #define CDEV_MAJOR 23
212 static struct cdevsw bpf_cdevsw = {
213 /* open */ bpfopen,
214 /* close */ bpfclose,
215 /* read */ bpfread,
216 /* write */ bpfwrite,
217 /* ioctl */ bpfioctl,
218 /* stop */ eno_stop,
219 /* reset */ eno_reset,
220 /* tty */ NULL,
221 /* select */ bpfpoll,
222 /* mmap */ eno_mmap,
223 /* strategy*/ eno_strat,
224 /* getc */ eno_getc,
225 /* putc */ eno_putc,
226 /* type */ 0
227 };
228
229 #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
230
231 static int
232 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, struct sockaddr *sockp, int *datlen)
233 {
234 struct mbuf *m;
235 int error;
236 int len;
237 int hlen;
238
239 if (sockp) {
240 /*
241 * Build a sockaddr based on the data link layer type.
242 * We do this at this level because the ethernet header
243 * is copied directly into the data field of the sockaddr.
244 * In the case of SLIP, there is no header and the packet
245 * is forwarded as is.
246 * Also, we are careful to leave room at the front of the mbuf
247 * for the link level header.
248 */
249 switch (linktype) {
250
251 case DLT_SLIP:
252 sockp->sa_family = AF_INET;
253 hlen = 0;
254 break;
255
256 case DLT_EN10MB:
257 sockp->sa_family = AF_UNSPEC;
258 /* XXX Would MAXLINKHDR be better? */
259 hlen = sizeof(struct ether_header);
260 break;
261
262 case DLT_FDDI:
263 #if defined(__FreeBSD__) || defined(__bsdi__)
264 sockp->sa_family = AF_IMPLINK;
265 hlen = 0;
266 #else
267 sockp->sa_family = AF_UNSPEC;
268 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
269 hlen = 24;
270 #endif
271 break;
272
273 case DLT_RAW:
274 case DLT_NULL:
275 sockp->sa_family = AF_UNSPEC;
276 hlen = 0;
277 break;
278
279 #ifdef __FreeBSD__
280 case DLT_ATM_RFC1483:
281 /*
282 * en atm driver requires 4-byte atm pseudo header.
283 * though it isn't standard, vpi:vci needs to be
284 * specified anyway.
285 */
286 sockp->sa_family = AF_UNSPEC;
287 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
288 break;
289 #endif
290 case DLT_PPP:
291 sockp->sa_family = AF_UNSPEC;
292 hlen = 4; /* This should match PPP_HDRLEN */
293 break;
294
295 case DLT_APPLE_IP_OVER_IEEE1394:
296 sockp->sa_family = AF_UNSPEC;
297 hlen = sizeof(struct firewire_header);
298 break;
299
300 default:
301 return (EIO);
302 }
303 if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) {
304 return (EIO);
305 }
306 }
307 else {
308 hlen = 0;
309 }
310
311 // LP64todo - fix this!
312 len = uio_resid(uio);
313 *datlen = len - hlen;
314 if ((unsigned)len > MCLBYTES)
315 return (EIO);
316
317 MGETHDR(m, M_WAIT, MT_DATA);
318 if (m == 0)
319 return (ENOBUFS);
320 if ((unsigned)len > MHLEN) {
321 #if BSD >= 199103
322 MCLGET(m, M_WAIT);
323 if ((m->m_flags & M_EXT) == 0) {
324 #else
325 MCLGET(m);
326 if (m->m_len != MCLBYTES) {
327 #endif
328 error = ENOBUFS;
329 goto bad;
330 }
331 }
332 m->m_pkthdr.len = m->m_len = len;
333 m->m_pkthdr.rcvif = NULL;
334 *mp = m;
335 /*
336 * Make room for link header.
337 */
338 if (hlen != 0) {
339 m->m_pkthdr.len -= hlen;
340 m->m_len -= hlen;
341 #if BSD >= 199103
342 m->m_data += hlen; /* XXX */
343 #else
344 m->m_off += hlen;
345 #endif
346 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
347 if (error)
348 goto bad;
349 }
350 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
351 if (!error)
352 return (0);
353 bad:
354 m_freem(m);
355 return (error);
356 }
357
358 #ifdef __APPLE__
359 /* Callback registered with Ethernet driver. */
360 int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
361 {
362 /*
363 * Do nothing if the BPF tap has been turned off.
364 * This is to protect from a potential race where this
365 * call blocks on the lock. And in the meantime
366 * BPF is turned off, which will clear if_bpf.
367 */
368 if (ifp->if_bpf)
369 bpf_mtap(ifp, m);
370 return 0;
371 }
372
373 /*
374 * The dynamic addition of a new device node must block all processes that are opening
375 * the last device so that no process will get an unexpected ENOENT
376 */
377 static void
378 bpf_make_dev_t(int maj)
379 {
380 static int bpf_growing = 0;
381 unsigned int cur_size = nbpfilter, i;
382
383 if (nbpfilter >= bpf_maxdevices)
384 return;
385
386 while (bpf_growing) {
387 /* Wait until new device has been created */
388 (void)tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0);
389 }
390 if (nbpfilter > cur_size) {
391 /* other thread grew it already */
392 return;
393 }
394 bpf_growing = 1;
395
396 /* need to grow bpf_dtab first */
397 if (nbpfilter == bpf_dtab_size) {
398 int new_dtab_size;
399 struct bpf_d **new_dtab = NULL;
400 struct bpf_d **old_dtab = NULL;
401
402 new_dtab_size = bpf_dtab_size + NBPFILTER;
403 new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT);
404 if (new_dtab == 0) {
405 printf("bpf_make_dev_t: malloc bpf_dtab failed\n");
406 goto done;
407 }
408 if (bpf_dtab) {
409 bcopy(bpf_dtab, new_dtab,
410 sizeof(struct bpf_d *) * bpf_dtab_size);
411 }
412 bzero(new_dtab + bpf_dtab_size,
413 sizeof(struct bpf_d *) * NBPFILTER);
414 old_dtab = bpf_dtab;
415 bpf_dtab = new_dtab;
416 bpf_dtab_size = new_dtab_size;
417 if (old_dtab != NULL)
418 _FREE(old_dtab, M_DEVBUF);
419 }
420 i = nbpfilter++;
421 (void) devfs_make_node(makedev(maj, i),
422 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
423 "bpf%d", i);
424 done:
425 bpf_growing = 0;
426 wakeup((caddr_t)&bpf_growing);
427 }
428
429 #endif
430
431 /*
432 * Attach file to the bpf interface, i.e. make d listen on bp.
433 * Must be called at splimp.
434 */
435 static void
436 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
437 {
438 /*
439 * Point d at bp, and add d to the interface's list of listeners.
440 * Finally, point the driver's bpf cookie at the interface so
441 * it will divert packets to bpf.
442 */
443 d->bd_bif = bp;
444 d->bd_next = bp->bif_dlist;
445 bp->bif_dlist = d;
446
447 bp->bif_ifp->if_bpf = bp;
448
449 #ifdef __APPLE__
450 dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
451 #endif
452 }
453
454 /*
455 * Detach a file from its interface.
456 */
457 static void
458 bpf_detachd(struct bpf_d *d)
459 {
460 struct bpf_d **p;
461 struct bpf_if *bp;
462 #ifdef __APPLE__
463 struct ifnet *ifp;
464
465 ifp = d->bd_bif->bif_ifp;
466
467 #endif
468
469 bp = d->bd_bif;
470 /*
471 * Check if this descriptor had requested promiscuous mode.
472 * If so, turn it off.
473 */
474 if (d->bd_promisc) {
475 d->bd_promisc = 0;
476 if (ifnet_set_promiscuous(bp->bif_ifp, 0))
477 /*
478 * Something is really wrong if we were able to put
479 * the driver into promiscuous mode, but can't
480 * take it out.
481 * Most likely the network interface is gone.
482 */
483 printf("bpf: ifnet_set_promiscuous failed");
484 }
485 /* Remove d from the interface's descriptor list. */
486 p = &bp->bif_dlist;
487 while (*p != d) {
488 p = &(*p)->bd_next;
489 if (*p == 0)
490 panic("bpf_detachd: descriptor not in list");
491 }
492 *p = (*p)->bd_next;
493 if (bp->bif_dlist == 0) {
494 /*
495 * Let the driver know that there are no more listeners.
496 */
497 if (ifp->if_set_bpf_tap)
498 (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0);
499 d->bd_bif->bif_ifp->if_bpf = 0;
500 }
501 d->bd_bif = 0;
502 }
503
504
505 /*
506 * Open ethernet device. Returns ENXIO for illegal minor device number,
507 * EBUSY if file is open by another process.
508 */
509 /* ARGSUSED */
510 int
511 bpfopen(dev_t dev, __unused int flags, __unused int fmt, __unused struct proc *p)
512 {
513 register struct bpf_d *d;
514
515 if ((unsigned int) minor(dev) >= nbpfilter)
516 return (ENXIO);
517
518 /*
519 * New device nodes are created on demand when opening the last one.
520 * The programming model is for processes to loop on the minor starting at 0
521 * as long as EBUSY is returned. The loop stops when either the open succeeds or
522 * an error other that EBUSY is returned. That means that bpf_make_dev_t() must
523 * block all processes that are opening the last node. If not all
524 * processes are blocked, they could unexpectedly get ENOENT and abort their
525 * opening loop.
526 */
527 if ((unsigned int) minor(dev) == (nbpfilter - 1))
528 bpf_make_dev_t(major(dev));
529
530 /*
531 * Each minor can be opened by only one process. If the requested
532 * minor is in use, return EBUSY.
533 *
534 * Important: bpfopen() and bpfclose() have to check and set the status of a device
535 * in the same lockin context otherwise the device may be leaked because the vnode use count
536 * will be unpextectly greater than 1 when close() is called.
537 */
538 if (bpf_dtab[minor(dev)] == 0)
539 bpf_dtab[minor(dev)] = (void *)1; /* Mark opening */
540 else
541 return (EBUSY);
542
543 d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT);
544 if (d == NULL) {
545 /* this really is a catastrophic failure */
546 printf("bpfopen: malloc bpf_d failed\n");
547 bpf_dtab[minor(dev)] = 0;
548 return ENOMEM;
549 }
550 bzero(d, sizeof(struct bpf_d));
551
552 /*
553 * It is not necessary to take the BPF lock here because no other
554 * thread can access the device until it is marked opened...
555 */
556
557 /* Mark "in use" and do most initialization. */
558 d->bd_bufsize = bpf_bufsize;
559 d->bd_sig = SIGIO;
560 d->bd_seesent = 1;
561 bpf_dtab[minor(dev)] = d; /* Mark opened */
562
563 return (0);
564 }
565
566 /*
567 * Close the descriptor by detaching it from its interface,
568 * deallocating its buffers, and marking it free.
569 */
570 /* ARGSUSED */
571 int
572 bpfclose(dev_t dev, __unused int flags, __unused int fmt, __unused struct proc *p)
573 {
574 register struct bpf_d *d;
575
576 d = bpf_dtab[minor(dev)];
577 if (d == 0 || d == (void *)1)
578 return (ENXIO);
579
580 bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */
581
582 /* Take BPF lock to ensure no other thread is using the device */
583 lck_mtx_lock(bpf_mlock);
584
585 if (d->bd_bif)
586 bpf_detachd(d);
587 selthreadclear(&d->bd_sel);
588 bpf_freed(d);
589
590 lck_mtx_unlock(bpf_mlock);
591
592 /* Mark free in same context as bpfopen comes to check */
593 bpf_dtab[minor(dev)] = 0; /* Mark closed */
594 _FREE(d, M_DEVBUF);
595
596 return (0);
597 }
598
599
600 #define BPF_SLEEP bpf_sleep
601
602 static int
603 bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo)
604 {
605 register int st;
606
607 lck_mtx_unlock(bpf_mlock);
608
609 st = tsleep((caddr_t)d, pri, wmesg, timo);
610
611 lck_mtx_lock(bpf_mlock);
612
613 return st;
614 }
615
616 /*
617 * Rotate the packet buffers in descriptor d. Move the store buffer
618 * into the hold slot, and the free buffer into the store slot.
619 * Zero the length of the new store buffer.
620 */
621 #define ROTATE_BUFFERS(d) \
622 (d)->bd_hbuf = (d)->bd_sbuf; \
623 (d)->bd_hlen = (d)->bd_slen; \
624 (d)->bd_sbuf = (d)->bd_fbuf; \
625 (d)->bd_slen = 0; \
626 (d)->bd_fbuf = 0;
627 /*
628 * bpfread - read next chunk of packets from buffers
629 */
630 int
631 bpfread(dev_t dev, struct uio *uio, int ioflag)
632 {
633 register struct bpf_d *d;
634 int error;
635 int s;
636
637 d = bpf_dtab[minor(dev)];
638 if (d == 0 || d == (void *)1)
639 return (ENXIO);
640
641 lck_mtx_lock(bpf_mlock);
642
643
644 /*
645 * Restrict application to use a buffer the same size as
646 * as kernel buffers.
647 */
648 // LP64todo - fix this
649 if (uio->uio_resid != d->bd_bufsize) {
650 lck_mtx_unlock(bpf_mlock);
651 return (EINVAL);
652 }
653
654 s = splimp();
655 /*
656 * If the hold buffer is empty, then do a timed sleep, which
657 * ends when the timeout expires or when enough packets
658 * have arrived to fill the store buffer.
659 */
660 while (d->bd_hbuf == 0) {
661 if (d->bd_immediate && d->bd_slen != 0) {
662 /*
663 * A packet(s) either arrived since the previous
664 * read or arrived while we were asleep.
665 * Rotate the buffers and return what's here.
666 */
667 ROTATE_BUFFERS(d);
668 break;
669 }
670
671 /*
672 * No data is available, check to see if the bpf device
673 * is still pointed at a real interface. If not, return
674 * ENXIO so that the userland process knows to rebind
675 * it before using it again.
676 */
677 if (d->bd_bif == NULL) {
678 splx(s);
679 lck_mtx_unlock(bpf_mlock);
680 return (ENXIO);
681 }
682
683 if (ioflag & IO_NDELAY)
684 error = EWOULDBLOCK;
685 else
686 error = BPF_SLEEP(d, PRINET|PCATCH, "bpf",
687 d->bd_rtout);
688 if (error == EINTR || error == ERESTART) {
689 splx(s);
690 lck_mtx_unlock(bpf_mlock);
691 return (error);
692 }
693 if (error == EWOULDBLOCK) {
694 /*
695 * On a timeout, return what's in the buffer,
696 * which may be nothing. If there is something
697 * in the store buffer, we can rotate the buffers.
698 */
699 if (d->bd_hbuf)
700 /*
701 * We filled up the buffer in between
702 * getting the timeout and arriving
703 * here, so we don't need to rotate.
704 */
705 break;
706
707 if (d->bd_slen == 0) {
708 splx(s);
709 lck_mtx_unlock(bpf_mlock);
710 return (0);
711 }
712 ROTATE_BUFFERS(d);
713 break;
714 }
715 }
716 /*
717 * At this point, we know we have something in the hold slot.
718 */
719 splx(s);
720
721 /*
722 * Move data from hold buffer into user space.
723 * We know the entire buffer is transferred since
724 * we checked above that the read buffer is bpf_bufsize bytes.
725 */
726 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
727
728 s = splimp();
729 d->bd_fbuf = d->bd_hbuf;
730 d->bd_hbuf = 0;
731 d->bd_hlen = 0;
732 splx(s);
733 lck_mtx_unlock(bpf_mlock);
734 return (error);
735 }
736
737
738 /*
739 * If there are processes sleeping on this descriptor, wake them up.
740 */
741 static void
742 bpf_wakeup(struct bpf_d *d)
743 {
744 wakeup((caddr_t)d);
745 if (d->bd_async && d->bd_sig && d->bd_sigio)
746 pgsigio(d->bd_sigio, d->bd_sig, 0);
747
748 #if BSD >= 199103
749 selwakeup(&d->bd_sel);
750 #ifndef __APPLE__
751 /* XXX */
752 d->bd_sel.si_pid = 0;
753 #endif
754 #else
755 if (d->bd_selproc) {
756 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
757 d->bd_selcoll = 0;
758 d->bd_selproc = 0;
759 }
760 #endif
761 }
762
763 /* keep in sync with bpf_movein above: */
764 #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
765
766 int
767 bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag)
768 {
769 register struct bpf_d *d;
770 struct ifnet *ifp;
771 struct mbuf *m;
772 int error;
773 char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
774 int datlen;
775
776 d = bpf_dtab[minor(dev)];
777 if (d == 0 || d == (void *)1)
778 return (ENXIO);
779
780 lck_mtx_lock(bpf_mlock);
781
782 if (d->bd_bif == 0) {
783 lck_mtx_unlock(bpf_mlock);
784 return (ENXIO);
785 }
786
787 ifp = d->bd_bif->bif_ifp;
788
789 if (uio->uio_resid == 0) {
790 lck_mtx_unlock(bpf_mlock);
791 return (0);
792 }
793 ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf);
794 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m,
795 d->bd_hdrcmplt ? 0 : (struct sockaddr *)dst_buf, &datlen);
796 if (error) {
797 lck_mtx_unlock(bpf_mlock);
798 return (error);
799 }
800
801 if ((unsigned)datlen > ifp->if_mtu) {
802 lck_mtx_unlock(bpf_mlock);
803 return (EMSGSIZE);
804 }
805
806 lck_mtx_unlock(bpf_mlock);
807
808 if (d->bd_hdrcmplt) {
809 error = dlil_output(ifp, 0, m, NULL, NULL, 1);
810 }
811 else {
812 error = dlil_output(ifp, PF_INET, m, NULL, (struct sockaddr *)dst_buf, 0);
813 }
814
815 /*
816 * The driver frees the mbuf.
817 */
818 return (error);
819 }
820
821 /*
822 * Reset a descriptor by flushing its packet buffer and clearing the
823 * receive and drop counts. Should be called at splimp.
824 */
825 static void
826 reset_d(struct bpf_d *d)
827 {
828 if (d->bd_hbuf) {
829 /* Free the hold buffer. */
830 d->bd_fbuf = d->bd_hbuf;
831 d->bd_hbuf = 0;
832 }
833 d->bd_slen = 0;
834 d->bd_hlen = 0;
835 d->bd_rcount = 0;
836 d->bd_dcount = 0;
837 }
838
839 /*
840 * FIONREAD Check for read packet available.
841 * SIOCGIFADDR Get interface address - convenient hook to driver.
842 * BIOCGBLEN Get buffer len [for read()].
843 * BIOCSETF Set ethernet read filter.
844 * BIOCFLUSH Flush read packet buffer.
845 * BIOCPROMISC Put interface into promiscuous mode.
846 * BIOCGDLT Get link layer type.
847 * BIOCGETIF Get interface name.
848 * BIOCSETIF Set interface.
849 * BIOCSRTIMEOUT Set read timeout.
850 * BIOCGRTIMEOUT Get read timeout.
851 * BIOCGSTATS Get packet stats.
852 * BIOCIMMEDIATE Set immediate mode.
853 * BIOCVERSION Get filter language version.
854 * BIOCGHDRCMPLT Get "header already complete" flag
855 * BIOCSHDRCMPLT Set "header already complete" flag
856 * BIOCGSEESENT Get "see packets sent" flag
857 * BIOCSSEESENT Set "see packets sent" flag
858 */
859 /* ARGSUSED */
860 int
861 bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, struct proc *p)
862 {
863 register struct bpf_d *d;
864 int s, error = 0;
865
866 d = bpf_dtab[minor(dev)];
867 if (d == 0 || d == (void *)1)
868 return (ENXIO);
869
870 lck_mtx_lock(bpf_mlock);
871
872 switch (cmd) {
873
874 default:
875 error = EINVAL;
876 break;
877
878 /*
879 * Check for read packet available.
880 */
881 case FIONREAD:
882 {
883 int n;
884
885 s = splimp();
886 n = d->bd_slen;
887 if (d->bd_hbuf)
888 n += d->bd_hlen;
889 splx(s);
890
891 *(int *)addr = n;
892 break;
893 }
894
895 case SIOCGIFADDR:
896 {
897 struct ifnet *ifp;
898
899 if (d->bd_bif == 0)
900 error = EINVAL;
901 else {
902 ifp = d->bd_bif->bif_ifp;
903 error = dlil_ioctl(0, ifp, cmd, addr);
904 }
905 break;
906 }
907
908 /*
909 * Get buffer len [for read()].
910 */
911 case BIOCGBLEN:
912 *(u_int *)addr = d->bd_bufsize;
913 break;
914
915 /*
916 * Set buffer length.
917 */
918 case BIOCSBLEN:
919 #if BSD < 199103
920 error = EINVAL;
921 #else
922 if (d->bd_bif != 0)
923 error = EINVAL;
924 else {
925 register u_int size = *(u_int *)addr;
926
927 if (size > bpf_maxbufsize)
928 *(u_int *)addr = size = bpf_maxbufsize;
929 else if (size < BPF_MINBUFSIZE)
930 *(u_int *)addr = size = BPF_MINBUFSIZE;
931 d->bd_bufsize = size;
932 }
933 #endif
934 break;
935
936 /*
937 * Set link layer read filter.
938 */
939 case BIOCSETF:
940 if (proc_is64bit(p)) {
941 error = bpf_setf(d, (struct user_bpf_program *)addr);
942 }
943 else {
944 struct bpf_program * tmpp;
945 struct user_bpf_program tmp;
946
947 tmpp = (struct bpf_program *)addr;
948 tmp.bf_len = tmpp->bf_len;
949 tmp.bf_insns = CAST_USER_ADDR_T(tmpp->bf_insns);
950 error = bpf_setf(d, &tmp);
951 }
952 break;
953
954 /*
955 * Flush read packet buffer.
956 */
957 case BIOCFLUSH:
958 s = splimp();
959 reset_d(d);
960 splx(s);
961 break;
962
963 /*
964 * Put interface into promiscuous mode.
965 */
966 case BIOCPROMISC:
967 if (d->bd_bif == 0) {
968 /*
969 * No interface attached yet.
970 */
971 error = EINVAL;
972 break;
973 }
974 s = splimp();
975 if (d->bd_promisc == 0) {
976 error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1);
977 if (error == 0)
978 d->bd_promisc = 1;
979 }
980 splx(s);
981 break;
982
983 /*
984 * Get device parameters.
985 */
986 case BIOCGDLT:
987 if (d->bd_bif == 0)
988 error = EINVAL;
989 else
990 *(u_int *)addr = d->bd_bif->bif_dlt;
991 break;
992
993 /*
994 * Get interface name.
995 */
996 case BIOCGETIF:
997 if (d->bd_bif == 0)
998 error = EINVAL;
999 else {
1000 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1001 struct ifreq *const ifr = (struct ifreq *)addr;
1002
1003 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
1004 "%s%d", ifp->if_name, ifp->if_unit);
1005 }
1006 break;
1007
1008 /*
1009 * Set interface.
1010 */
1011 case BIOCSETIF:
1012 error = bpf_setif(d, (struct ifreq *)addr);
1013 break;
1014
1015 /*
1016 * Set read timeout.
1017 */
1018 case BIOCSRTIMEOUT:
1019 {
1020 struct timeval *tv = (struct timeval *)addr;
1021
1022 /*
1023 * Subtract 1 tick from tvtohz() since this isn't
1024 * a one-shot timer.
1025 */
1026 if ((error = itimerfix(tv)) == 0)
1027 d->bd_rtout = tvtohz(tv) - 1;
1028 break;
1029 }
1030
1031 /*
1032 * Get read timeout.
1033 */
1034 case BIOCGRTIMEOUT:
1035 {
1036 struct timeval *tv = (struct timeval *)addr;
1037
1038 tv->tv_sec = d->bd_rtout / hz;
1039 tv->tv_usec = (d->bd_rtout % hz) * tick;
1040 break;
1041 }
1042
1043 /*
1044 * Get packet stats.
1045 */
1046 case BIOCGSTATS:
1047 {
1048 struct bpf_stat *bs = (struct bpf_stat *)addr;
1049
1050 bs->bs_recv = d->bd_rcount;
1051 bs->bs_drop = d->bd_dcount;
1052 break;
1053 }
1054
1055 /*
1056 * Set immediate mode.
1057 */
1058 case BIOCIMMEDIATE:
1059 d->bd_immediate = *(u_int *)addr;
1060 break;
1061
1062 case BIOCVERSION:
1063 {
1064 struct bpf_version *bv = (struct bpf_version *)addr;
1065
1066 bv->bv_major = BPF_MAJOR_VERSION;
1067 bv->bv_minor = BPF_MINOR_VERSION;
1068 break;
1069 }
1070
1071 /*
1072 * Get "header already complete" flag
1073 */
1074 case BIOCGHDRCMPLT:
1075 *(u_int *)addr = d->bd_hdrcmplt;
1076 break;
1077
1078 /*
1079 * Set "header already complete" flag
1080 */
1081 case BIOCSHDRCMPLT:
1082 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1083 break;
1084
1085 /*
1086 * Get "see sent packets" flag
1087 */
1088 case BIOCGSEESENT:
1089 *(u_int *)addr = d->bd_seesent;
1090 break;
1091
1092 /*
1093 * Set "see sent packets" flag
1094 */
1095 case BIOCSSEESENT:
1096 d->bd_seesent = *(u_int *)addr;
1097 break;
1098
1099 case FIONBIO: /* Non-blocking I/O */
1100 break;
1101
1102 case FIOASYNC: /* Send signal on receive packets */
1103 d->bd_async = *(int *)addr;
1104 break;
1105 #ifndef __APPLE__
1106 case FIOSETOWN:
1107 error = fsetown(*(int *)addr, &d->bd_sigio);
1108 break;
1109
1110 case FIOGETOWN:
1111 *(int *)addr = fgetown(d->bd_sigio);
1112 break;
1113
1114 /* This is deprecated, FIOSETOWN should be used instead. */
1115 case TIOCSPGRP:
1116 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1117 break;
1118
1119 /* This is deprecated, FIOGETOWN should be used instead. */
1120 case TIOCGPGRP:
1121 *(int *)addr = -fgetown(d->bd_sigio);
1122 break;
1123 #endif
1124 case BIOCSRSIG: /* Set receive signal */
1125 {
1126 u_int sig;
1127
1128 sig = *(u_int *)addr;
1129
1130 if (sig >= NSIG)
1131 error = EINVAL;
1132 else
1133 d->bd_sig = sig;
1134 break;
1135 }
1136 case BIOCGRSIG:
1137 *(u_int *)addr = d->bd_sig;
1138 break;
1139 }
1140
1141 lck_mtx_unlock(bpf_mlock);
1142
1143 return (error);
1144 }
1145
1146 /*
1147 * Set d's packet filter program to fp. If this file already has a filter,
1148 * free it and replace it. Returns EINVAL for bogus requests.
1149 */
1150 static int
1151 bpf_setf(struct bpf_d *d, struct user_bpf_program *fp)
1152 {
1153 struct bpf_insn *fcode, *old;
1154 u_int flen, size;
1155 int s;
1156
1157 old = d->bd_filter;
1158 if (fp->bf_insns == USER_ADDR_NULL) {
1159 if (fp->bf_len != 0)
1160 return (EINVAL);
1161 s = splimp();
1162 d->bd_filter = 0;
1163 reset_d(d);
1164 splx(s);
1165 if (old != 0)
1166 FREE((caddr_t)old, M_DEVBUF);
1167 return (0);
1168 }
1169 flen = fp->bf_len;
1170 if (flen > BPF_MAXINSNS)
1171 return (EINVAL);
1172
1173 size = flen * sizeof(struct bpf_insn);
1174 fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
1175 #ifdef __APPLE__
1176 if (fcode == NULL)
1177 return (ENOBUFS);
1178 #endif
1179 if (copyin(fp->bf_insns, (caddr_t)fcode, size) == 0 &&
1180 bpf_validate(fcode, (int)flen)) {
1181 s = splimp();
1182 d->bd_filter = fcode;
1183 reset_d(d);
1184 splx(s);
1185 if (old != 0)
1186 FREE((caddr_t)old, M_DEVBUF);
1187
1188 return (0);
1189 }
1190 FREE((caddr_t)fcode, M_DEVBUF);
1191 return (EINVAL);
1192 }
1193
1194 /*
1195 * Detach a file from its current interface (if attached at all) and attach
1196 * to the interface indicated by the name stored in ifr.
1197 * Return an errno or 0.
1198 */
1199 static int
1200 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1201 {
1202 struct bpf_if *bp;
1203 int s, error;
1204 struct ifnet *theywant;
1205
1206 theywant = ifunit(ifr->ifr_name);
1207 if (theywant == 0)
1208 return ENXIO;
1209
1210 /*
1211 * Look through attached interfaces for the named one.
1212 */
1213 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1214 struct ifnet *ifp = bp->bif_ifp;
1215
1216 if (ifp == 0 || ifp != theywant)
1217 continue;
1218 /*
1219 * We found the requested interface.
1220 * If it's not up, return an error.
1221 * Allocate the packet buffers if we need to.
1222 * If we're already attached to requested interface,
1223 * just flush the buffer.
1224 */
1225 if ((ifp->if_flags & IFF_UP) == 0)
1226 return (ENETDOWN);
1227
1228 if (d->bd_sbuf == 0) {
1229 error = bpf_allocbufs(d);
1230 if (error != 0)
1231 return (error);
1232 }
1233 s = splimp();
1234 if (bp != d->bd_bif) {
1235 if (d->bd_bif)
1236 /*
1237 * Detach if attached to something else.
1238 */
1239 bpf_detachd(d);
1240
1241 bpf_attachd(d, bp);
1242 }
1243 reset_d(d);
1244 splx(s);
1245 return (0);
1246 }
1247 /* Not found. */
1248 return (ENXIO);
1249 }
1250
1251 /*
1252 * Support for select() and poll() system calls
1253 *
1254 * Return true iff the specific operation will not block indefinitely.
1255 * Otherwise, return false but make a note that a selwakeup() must be done.
1256 */
1257 int
1258 bpfpoll(dev_t dev, int events, void * wql, struct proc *p)
1259 {
1260 register struct bpf_d *d;
1261 register int s;
1262 int revents = 0;
1263
1264 d = bpf_dtab[minor(dev)];
1265 if (d == 0 || d == (void *)1)
1266 return (ENXIO);
1267
1268 lck_mtx_lock(bpf_mlock);
1269
1270 /*
1271 * An imitation of the FIONREAD ioctl code.
1272 */
1273 if (d->bd_bif == NULL) {
1274 lck_mtx_unlock(bpf_mlock);
1275 return (ENXIO);
1276 }
1277
1278 s = splimp();
1279 if (events & (POLLIN | POLLRDNORM)) {
1280 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1281 revents |= events & (POLLIN | POLLRDNORM);
1282 else
1283 selrecord(p, &d->bd_sel, wql);
1284 }
1285 splx(s);
1286
1287 lck_mtx_unlock(bpf_mlock);
1288 return (revents);
1289 }
1290
1291 /*
1292 * Incoming linkage from device drivers. Process the packet pkt, of length
1293 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1294 * by each process' filter, and if accepted, stashed into the corresponding
1295 * buffer.
1296 */
1297 void
1298 bpf_tap(struct ifnet *ifp, u_char *pkt, u_int pktlen)
1299 {
1300 struct bpf_if *bp;
1301 register struct bpf_d *d;
1302 register u_int slen;
1303 /*
1304 * Note that the ipl does not have to be raised at this point.
1305 * The only problem that could arise here is that if two different
1306 * interfaces shared any data. This is not the case.
1307 */
1308 lck_mtx_lock(bpf_mlock);
1309
1310 bp = ifp->if_bpf;
1311 #ifdef __APPLE__
1312 if (bp) {
1313 #endif
1314 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1315 ++d->bd_rcount;
1316 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1317 if (slen != 0)
1318 catchpacket(d, pkt, pktlen, slen, bcopy);
1319 }
1320 #ifdef __APPLE__
1321 }
1322 lck_mtx_unlock(bpf_mlock);
1323 #endif
1324 }
1325
1326 /*
1327 * Copy data from an mbuf chain into a buffer. This code is derived
1328 * from m_copydata in sys/uipc_mbuf.c.
1329 */
1330 static void
1331 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1332 {
1333 const struct mbuf *m;
1334 u_int count;
1335 u_char *dst;
1336
1337 m = src_arg;
1338 dst = dst_arg;
1339 while (len > 0) {
1340 if (m == 0)
1341 panic("bpf_mcopy");
1342 count = min(m->m_len, len);
1343 bcopy(mtod(m, const void *), dst, count);
1344 m = m->m_next;
1345 dst += count;
1346 len -= count;
1347 }
1348 }
1349
1350 /*
1351 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1352 */
1353 void
1354 bpf_mtap(struct ifnet *ifp, struct mbuf *m)
1355 {
1356 struct bpf_if *bp;
1357 struct bpf_d *d;
1358 u_int pktlen, slen;
1359 struct mbuf *m0;
1360
1361 lck_mtx_lock(bpf_mlock);
1362
1363 bp = ifp->if_bpf;
1364 if (bp) {
1365 pktlen = 0;
1366 for (m0 = m; m0 != 0; m0 = m0->m_next)
1367 pktlen += m0->m_len;
1368
1369 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1370 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1371 continue;
1372 ++d->bd_rcount;
1373 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1374 if (slen != 0)
1375 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1376 }
1377 }
1378
1379 lck_mtx_unlock(bpf_mlock);
1380 }
1381
1382 /*
1383 * Move the packet data from interface memory (pkt) into the
1384 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1385 * otherwise 0. "copy" is the routine called to do the actual data
1386 * transfer. bcopy is passed in to copy contiguous chunks, while
1387 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1388 * pkt is really an mbuf.
1389 */
1390 static void
1391 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1392 void (*cpfn)(const void *, void *, size_t))
1393 {
1394 register struct bpf_hdr *hp;
1395 register int totlen, curlen;
1396 register int hdrlen = d->bd_bif->bif_hdrlen;
1397 /*
1398 * Figure out how many bytes to move. If the packet is
1399 * greater or equal to the snapshot length, transfer that
1400 * much. Otherwise, transfer the whole packet (unless
1401 * we hit the buffer size limit).
1402 */
1403 totlen = hdrlen + min(snaplen, pktlen);
1404 if (totlen > d->bd_bufsize)
1405 totlen = d->bd_bufsize;
1406
1407 /*
1408 * Round up the end of the previous packet to the next longword.
1409 */
1410 curlen = BPF_WORDALIGN(d->bd_slen);
1411 if (curlen + totlen > d->bd_bufsize) {
1412 /*
1413 * This packet will overflow the storage buffer.
1414 * Rotate the buffers if we can, then wakeup any
1415 * pending reads.
1416 */
1417 if (d->bd_fbuf == 0) {
1418 /*
1419 * We haven't completed the previous read yet,
1420 * so drop the packet.
1421 */
1422 ++d->bd_dcount;
1423 return;
1424 }
1425 ROTATE_BUFFERS(d);
1426 bpf_wakeup(d);
1427 curlen = 0;
1428 }
1429 else if (d->bd_immediate)
1430 /*
1431 * Immediate mode is set. A packet arrived so any
1432 * reads should be woken up.
1433 */
1434 bpf_wakeup(d);
1435
1436 /*
1437 * Append the bpf header.
1438 */
1439 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1440 #if BSD >= 199103
1441 microtime(&hp->bh_tstamp);
1442 #elif defined(sun)
1443 uniqtime(&hp->bh_tstamp);
1444 #else
1445 hp->bh_tstamp = time;
1446 #endif
1447 hp->bh_datalen = pktlen;
1448 hp->bh_hdrlen = hdrlen;
1449 /*
1450 * Copy the packet data into the store buffer and update its length.
1451 */
1452 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1453 d->bd_slen = curlen + totlen;
1454 }
1455
1456 /*
1457 * Initialize all nonzero fields of a descriptor.
1458 */
1459 static int
1460 bpf_allocbufs(struct bpf_d *d)
1461 {
1462 d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1463 if (d->bd_fbuf == 0)
1464 return (ENOBUFS);
1465
1466 d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1467 if (d->bd_sbuf == 0) {
1468 FREE(d->bd_fbuf, M_DEVBUF);
1469 return (ENOBUFS);
1470 }
1471 d->bd_slen = 0;
1472 d->bd_hlen = 0;
1473 return (0);
1474 }
1475
1476 /*
1477 * Free buffers currently in use by a descriptor.
1478 * Called on close.
1479 */
1480 static void
1481 bpf_freed(struct bpf_d *d)
1482 {
1483 /*
1484 * We don't need to lock out interrupts since this descriptor has
1485 * been detached from its interface and it yet hasn't been marked
1486 * free.
1487 */
1488 if (d->bd_sbuf != 0) {
1489 FREE(d->bd_sbuf, M_DEVBUF);
1490 if (d->bd_hbuf != 0)
1491 FREE(d->bd_hbuf, M_DEVBUF);
1492 if (d->bd_fbuf != 0)
1493 FREE(d->bd_fbuf, M_DEVBUF);
1494 }
1495 if (d->bd_filter)
1496 FREE((caddr_t)d->bd_filter, M_DEVBUF);
1497 }
1498
1499 /*
1500 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1501 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1502 * size of the link header (variable length headers not yet supported).
1503 */
1504 void
1505 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1506 {
1507 struct bpf_if *bp;
1508 bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_WAIT);
1509 if (bp == 0)
1510 panic("bpfattach");
1511
1512 lck_mtx_lock(bpf_mlock);
1513
1514 bp->bif_dlist = 0;
1515 bp->bif_ifp = ifp;
1516 bp->bif_dlt = dlt;
1517
1518 bp->bif_next = bpf_iflist;
1519 bpf_iflist = bp;
1520
1521 bp->bif_ifp->if_bpf = 0;
1522
1523 /*
1524 * Compute the length of the bpf header. This is not necessarily
1525 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1526 * that the network layer header begins on a longword boundary (for
1527 * performance reasons and to alleviate alignment restrictions).
1528 */
1529 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1530
1531 /* Take a reference on the interface */
1532 ifp_reference(ifp);
1533
1534 lck_mtx_unlock(bpf_mlock);
1535
1536 #ifndef __APPLE__
1537 if (bootverbose)
1538 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1539 #endif
1540 }
1541
1542 /*
1543 * Detach bpf from an interface. This involves detaching each descriptor
1544 * associated with the interface, and leaving bd_bif NULL. Notify each
1545 * descriptor as it's detached so that any sleepers wake up and get
1546 * ENXIO.
1547 */
1548 void
1549 bpfdetach(struct ifnet *ifp)
1550 {
1551 struct bpf_if *bp, *bp_prev;
1552 struct bpf_d *d;
1553 int s;
1554
1555 s = splimp();
1556
1557 lck_mtx_lock(bpf_mlock);
1558
1559 /* Locate BPF interface information */
1560 bp_prev = NULL;
1561 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1562 if (ifp == bp->bif_ifp)
1563 break;
1564 bp_prev = bp;
1565 }
1566
1567 #ifdef __APPLE__
1568 /* Check for no BPF interface information */
1569 if (bp == NULL) {
1570 return;
1571 }
1572 #endif
1573
1574 /* Interface wasn't attached */
1575 if (bp->bif_ifp == NULL) {
1576 splx(s);
1577 #ifndef __APPLE__
1578 printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1579 ifp->if_unit);
1580 #endif
1581 return;
1582 }
1583
1584 while ((d = bp->bif_dlist) != NULL) {
1585 bpf_detachd(d);
1586 bpf_wakeup(d);
1587 }
1588
1589 if (bp_prev) {
1590 bp_prev->bif_next = bp->bif_next;
1591 } else {
1592 bpf_iflist = bp->bif_next;
1593 }
1594
1595 ifp_release(ifp);
1596
1597 lck_mtx_unlock(bpf_mlock);
1598
1599 FREE(bp, M_DEVBUF);
1600
1601 splx(s);
1602 }
1603
1604 void
1605 bpf_init(__unused void *unused)
1606 {
1607 #ifdef __APPLE__
1608 int i;
1609 int maj;
1610
1611 if (bpf_devsw_installed == 0) {
1612 bpf_devsw_installed = 1;
1613
1614 bpf_mlock_grp_attr = lck_grp_attr_alloc_init();
1615 lck_grp_attr_setdefault(bpf_mlock_grp_attr);
1616
1617 bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);
1618
1619 bpf_mlock_attr = lck_attr_alloc_init();
1620 lck_attr_setdefault(bpf_mlock_attr);
1621
1622 bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);
1623
1624 if (bpf_mlock == 0) {
1625 printf("bpf_init: failed to allocate bpf_mlock\n");
1626 bpf_devsw_installed = 0;
1627 return;
1628 }
1629
1630 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
1631 if (maj == -1) {
1632 if (bpf_mlock)
1633 lck_mtx_free(bpf_mlock, bpf_mlock_grp);
1634 if (bpf_mlock_attr)
1635 lck_attr_free(bpf_mlock_attr);
1636 if (bpf_mlock_grp)
1637 lck_grp_free(bpf_mlock_grp);
1638 if (bpf_mlock_grp_attr)
1639 lck_grp_attr_free(bpf_mlock_grp_attr);
1640
1641 bpf_mlock = 0;
1642 bpf_mlock_attr = 0;
1643 bpf_mlock_grp = 0;
1644 bpf_mlock_grp_attr = 0;
1645 bpf_devsw_installed = 0;
1646 printf("bpf_init: failed to allocate a major number!\n");
1647 return;
1648 }
1649
1650 for (i = 0 ; i < NBPFILTER; i++)
1651 bpf_make_dev_t(maj);
1652 }
1653 #else
1654 cdevsw_add(&bpf_cdevsw);
1655 #endif
1656 }
1657
1658 #ifndef __APPLE__
1659 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1660 #endif
1661
1662 #else /* !BPF */
1663 #ifndef __APPLE__
1664 /*
1665 * NOP stubs to allow bpf-using drivers to load and function.
1666 *
1667 * A 'better' implementation would allow the core bpf functionality
1668 * to be loaded at runtime.
1669 */
1670
1671 void
1672 bpf_tap(ifp, pkt, pktlen)
1673 struct ifnet *ifp;
1674 register u_char *pkt;
1675 register u_int pktlen;
1676 {
1677 }
1678
1679 void
1680 bpf_mtap(ifp, m)
1681 struct ifnet *ifp;
1682 struct mbuf *m;
1683 {
1684 }
1685
1686 void
1687 bpfattach(ifp, dlt, hdrlen)
1688 struct ifnet *ifp;
1689 u_int dlt, hdrlen;
1690 {
1691 }
1692
1693 void
1694 bpfdetach(ifp)
1695 struct ifnet *ifp;
1696 {
1697 }
1698
1699 u_int
1700 bpf_filter(pc, p, wirelen, buflen)
1701 register const struct bpf_insn *pc;
1702 register u_char *p;
1703 u_int wirelen;
1704 register u_int buflen;
1705 {
1706 return -1; /* "no filter" behaviour */
1707 }
1708 #endif /* !defined(__APPLE__) */
1709 #endif /* NBPFILTER > 0 */