]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/bpf.c
xnu-1504.3.12.tar.gz
[apple/xnu.git] / bsd / net / bpf.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1990, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from the Stanford/CMU enet packet filter,
33 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35 * Berkeley Laboratory.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
66 *
9bccf70c 67 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
1c79356b 68 */
2d21ac55
A
69/*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
1c79356b 75
9bccf70c 76#include "bpf.h"
1c79356b
A
77
78#ifndef __GNUC__
79#define inline
80#else
81#define inline __inline
82#endif
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/conf.h>
87#include <sys/malloc.h>
88#include <sys/mbuf.h>
89#include <sys/time.h>
90#include <sys/proc.h>
1c79356b
A
91#include <sys/signalvar.h>
92#include <sys/filio.h>
93#include <sys/sockio.h>
94#include <sys/ttycom.h>
95#include <sys/filedesc.h>
91447636 96#include <sys/uio_internal.h>
b0d623f7
A
97#include <sys/file_internal.h>
98#include <sys/event.h>
1c79356b 99
9bccf70c
A
100#if defined(sparc) && BSD < 199103
101#include <sys/stream.h>
102#endif
103#include <sys/poll.h>
104
1c79356b
A
105#include <sys/socket.h>
106#include <sys/vnode.h>
107
108#include <net/if.h>
109#include <net/bpf.h>
110#include <net/bpfdesc.h>
111
112#include <netinet/in.h>
113#include <netinet/if_ether.h>
114#include <sys/kernel.h>
115#include <sys/sysctl.h>
55e303ae 116#include <net/firewire.h>
1c79356b 117
1c79356b
A
118#include <miscfs/devfs/devfs.h>
119#include <net/dlil.h>
120
91447636
A
121#include <kern/locks.h>
122
2d21ac55
A
123#if CONFIG_MACF_NET
124#include <security/mac_framework.h>
125#endif /* MAC_NET */
91447636 126
2d21ac55 127extern int tvtohz(struct timeval *);
9bccf70c 128
1c79356b
A
129/*
130 * Older BSDs don't have kernel malloc.
131 */
132#if BSD < 199103
133extern bcopy();
134static caddr_t bpf_alloc();
9bccf70c 135#include <net/bpf_compat.h>
1c79356b
A
136#define BPF_BUFSIZE (MCLBYTES-8)
137#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
138#else
139#define BPF_BUFSIZE 4096
140#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
141#endif
142
55e303ae 143
1c79356b
A
144#define PRINET 26 /* interruptible */
145
146/*
147 * The default read buffer size is patchable.
148 */
91447636 149static unsigned int bpf_bufsize = BPF_BUFSIZE;
1c79356b
A
150SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
151 &bpf_bufsize, 0, "");
91447636 152static unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
9bccf70c
A
153SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
154 &bpf_maxbufsize, 0, "");
91447636
A
155static unsigned int bpf_maxdevices = 256;
156SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW,
157 &bpf_maxdevices, 0, "");
1c79356b
A
158
159/*
160 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
55e303ae 161 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
1c79356b
A
162 */
163static struct bpf_if *bpf_iflist;
9bccf70c
A
164#ifdef __APPLE__
165/*
166 * BSD now stores the bpf_d in the dev_t which is a struct
167 * on their system. Our dev_t is an int, so we still store
168 * the bpf_d in a separate table indexed by minor device #.
91447636
A
169 *
170 * The value stored in bpf_dtab[n] represent three states:
171 * 0: device not opened
172 * 1: device opening or closing
173 * other: device <n> opened with pointer to storage
9bccf70c 174 */
55e303ae 175static struct bpf_d **bpf_dtab = NULL;
91447636
A
176static unsigned int bpf_dtab_size = 0;
177static unsigned int nbpfilter = 0;
178
179static lck_mtx_t *bpf_mlock;
180static lck_grp_t *bpf_mlock_grp;
181static lck_grp_attr_t *bpf_mlock_grp_attr;
182static lck_attr_t *bpf_mlock_attr;
55e303ae
A
183
184/*
185 * Mark a descriptor free by making it point to itself.
186 * This is probably cheaper than marking with a constant since
187 * the address should be in a register anyway.
188 */
55e303ae 189#endif /* __APPLE__ */
1c79356b 190
91447636 191static int bpf_allocbufs(struct bpf_d *);
2d21ac55 192static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
91447636
A
193static void bpf_detachd(struct bpf_d *d);
194static void bpf_freed(struct bpf_d *);
195static void bpf_mcopy(const void *, void *, size_t);
196static int bpf_movein(struct uio *, int,
197 struct mbuf **, struct sockaddr *, int *);
2d21ac55 198static int bpf_setif(struct bpf_d *, ifnet_t ifp, u_int32_t dlt);
91447636
A
199static void bpf_wakeup(struct bpf_d *);
200static void catchpacket(struct bpf_d *, u_char *, u_int,
201 u_int, void (*)(const void *, void *, size_t));
202static void reset_d(struct bpf_d *);
2d21ac55 203static int bpf_setf(struct bpf_d *, u_int bf_len, user_addr_t bf_insns);
b0d623f7
A
204static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *,
205 struct proc *);
2d21ac55 206static int bpf_setdlt(struct bpf_d *, u_int);
1c79356b 207
55e303ae
A
208/*static void *bpf_devfs_token[MAXBPFILTER];*/
209
210static int bpf_devsw_installed;
211
91447636 212void bpf_init(void *unused);
2d21ac55 213static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m);
55e303ae 214
9bccf70c
A
215/*
216 * Darwin differs from BSD here, the following are static
217 * on BSD and not static on Darwin.
218 */
1c79356b
A
219 d_open_t bpfopen;
220 d_close_t bpfclose;
221 d_read_t bpfread;
222 d_write_t bpfwrite;
91447636 223 ioctl_fcn_t bpfioctl;
9bccf70c 224 select_fcn_t bpfpoll;
1c79356b 225
1c79356b 226
9bccf70c
A
227/* Darwin's cdevsw struct differs slightly from BSDs */
228#define CDEV_MAJOR 23
1c79356b 229static struct cdevsw bpf_cdevsw = {
9bccf70c
A
230 /* open */ bpfopen,
231 /* close */ bpfclose,
232 /* read */ bpfread,
233 /* write */ bpfwrite,
234 /* ioctl */ bpfioctl,
91447636
A
235 /* stop */ eno_stop,
236 /* reset */ eno_reset,
237 /* tty */ NULL,
9bccf70c 238 /* select */ bpfpoll,
91447636 239 /* mmap */ eno_mmap,
9bccf70c 240 /* strategy*/ eno_strat,
91447636
A
241 /* getc */ eno_getc,
242 /* putc */ eno_putc,
243 /* type */ 0
1c79356b
A
244};
245
55e303ae 246#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
9bccf70c 247
1c79356b 248static int
91447636 249bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, struct sockaddr *sockp, int *datlen)
1c79356b
A
250{
251 struct mbuf *m;
252 int error;
253 int len;
2d21ac55 254 uint8_t sa_family;
1c79356b
A
255 int hlen;
256
2d21ac55 257 switch (linktype) {
91447636 258
2d21ac55
A
259#if SLIP
260 case DLT_SLIP:
261 sa_family = AF_INET;
262 hlen = 0;
263 break;
264#endif /* SLIP */
91447636 265
2d21ac55
A
266 case DLT_EN10MB:
267 sa_family = AF_UNSPEC;
268 /* XXX Would MAXLINKHDR be better? */
269 hlen = sizeof(struct ether_header);
270 break;
91447636 271
2d21ac55
A
272#if FDDI
273 case DLT_FDDI:
91447636 274 #if defined(__FreeBSD__) || defined(__bsdi__)
2d21ac55
A
275 sa_family = AF_IMPLINK;
276 hlen = 0;
91447636 277 #else
2d21ac55
A
278 sa_family = AF_UNSPEC;
279 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
280 hlen = 24;
91447636 281 #endif
2d21ac55
A
282 break;
283#endif /* FDDI */
91447636 284
2d21ac55
A
285 case DLT_RAW:
286 case DLT_NULL:
287 sa_family = AF_UNSPEC;
288 hlen = 0;
289 break;
91447636
A
290
291 #ifdef __FreeBSD__
2d21ac55
A
292 case DLT_ATM_RFC1483:
293 /*
294 * en atm driver requires 4-byte atm pseudo header.
295 * though it isn't standard, vpi:vci needs to be
296 * specified anyway.
297 */
298 sa_family = AF_UNSPEC;
299 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
300 break;
91447636 301 #endif
2d21ac55
A
302
303 case DLT_PPP:
304 sa_family = AF_UNSPEC;
305 hlen = 4; /* This should match PPP_HDRLEN */
306 break;
91447636 307
2d21ac55
A
308 case DLT_APPLE_IP_OVER_IEEE1394:
309 sa_family = AF_UNSPEC;
310 hlen = sizeof(struct firewire_header);
311 break;
b0d623f7
A
312
313 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
314 sa_family = AF_IEEE80211;
315 hlen = 0;
316 break;
317
2d21ac55
A
318 default:
319 return (EIO);
55e303ae 320 }
2d21ac55 321
91447636
A
322 // LP64todo - fix this!
323 len = uio_resid(uio);
1c79356b
A
324 *datlen = len - hlen;
325 if ((unsigned)len > MCLBYTES)
326 return (EIO);
327
2d21ac55
A
328 if (sockp) {
329 /*
330 * Build a sockaddr based on the data link layer type.
331 * We do this at this level because the ethernet header
332 * is copied directly into the data field of the sockaddr.
333 * In the case of SLIP, there is no header and the packet
334 * is forwarded as is.
335 * Also, we are careful to leave room at the front of the mbuf
336 * for the link level header.
337 */
338 if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) {
339 return (EIO);
340 }
341 sockp->sa_family = sa_family;
342 } else {
343 /*
344 * We're directly sending the packet data supplied by
345 * the user; we don't need to make room for the link
346 * header, and don't need the header length value any
347 * more, so set it to 0.
348 */
349 hlen = 0;
350 }
351
1c79356b
A
352 MGETHDR(m, M_WAIT, MT_DATA);
353 if (m == 0)
354 return (ENOBUFS);
91447636 355 if ((unsigned)len > MHLEN) {
1c79356b
A
356#if BSD >= 199103
357 MCLGET(m, M_WAIT);
358 if ((m->m_flags & M_EXT) == 0) {
359#else
360 MCLGET(m);
361 if (m->m_len != MCLBYTES) {
362#endif
363 error = ENOBUFS;
364 goto bad;
365 }
366 }
367 m->m_pkthdr.len = m->m_len = len;
368 m->m_pkthdr.rcvif = NULL;
369 *mp = m;
370 /*
371 * Make room for link header.
372 */
373 if (hlen != 0) {
374 m->m_pkthdr.len -= hlen;
375 m->m_len -= hlen;
376#if BSD >= 199103
377 m->m_data += hlen; /* XXX */
378#else
379 m->m_off += hlen;
380#endif
381 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
382 if (error)
383 goto bad;
384 }
385 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
386 if (!error)
387 return (0);
388 bad:
389 m_freem(m);
390 return (error);
391}
392
9bccf70c 393#ifdef __APPLE__
55e303ae
A
394
395/*
91447636
A
396 * The dynamic addition of a new device node must block all processes that are opening
397 * the last device so that no process will get an unexpected ENOENT
55e303ae 398 */
91447636
A
399static void
400bpf_make_dev_t(int maj)
55e303ae 401{
91447636
A
402 static int bpf_growing = 0;
403 unsigned int cur_size = nbpfilter, i;
55e303ae 404
91447636
A
405 if (nbpfilter >= bpf_maxdevices)
406 return;
55e303ae 407
91447636
A
408 while (bpf_growing) {
409 /* Wait until new device has been created */
410 (void)tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0);
411 }
412 if (nbpfilter > cur_size) {
413 /* other thread grew it already */
414 return;
415 }
416 bpf_growing = 1;
55e303ae 417
91447636
A
418 /* need to grow bpf_dtab first */
419 if (nbpfilter == bpf_dtab_size) {
420 int new_dtab_size;
421 struct bpf_d **new_dtab = NULL;
422 struct bpf_d **old_dtab = NULL;
423
424 new_dtab_size = bpf_dtab_size + NBPFILTER;
425 new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT);
426 if (new_dtab == 0) {
427 printf("bpf_make_dev_t: malloc bpf_dtab failed\n");
428 goto done;
429 }
430 if (bpf_dtab) {
431 bcopy(bpf_dtab, new_dtab,
432 sizeof(struct bpf_d *) * bpf_dtab_size);
433 }
434 bzero(new_dtab + bpf_dtab_size,
435 sizeof(struct bpf_d *) * NBPFILTER);
436 old_dtab = bpf_dtab;
437 bpf_dtab = new_dtab;
438 bpf_dtab_size = new_dtab_size;
439 if (old_dtab != NULL)
440 _FREE(old_dtab, M_DEVBUF);
55e303ae 441 }
91447636
A
442 i = nbpfilter++;
443 (void) devfs_make_node(makedev(maj, i),
444 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
445 "bpf%d", i);
446done:
447 bpf_growing = 0;
448 wakeup((caddr_t)&bpf_growing);
55e303ae
A
449}
450
9bccf70c 451#endif
1c79356b
A
452
453/*
454 * Attach file to the bpf interface, i.e. make d listen on bp.
1c79356b 455 */
2d21ac55 456static errno_t
91447636 457bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
1c79356b 458{
2d21ac55
A
459 int first = bp->bif_dlist == NULL;
460 int error = 0;
461
1c79356b
A
462 /*
463 * Point d at bp, and add d to the interface's list of listeners.
464 * Finally, point the driver's bpf cookie at the interface so
465 * it will divert packets to bpf.
466 */
467 d->bd_bif = bp;
468 d->bd_next = bp->bif_dlist;
469 bp->bif_dlist = d;
2d21ac55
A
470
471 if (first) {
472 /* Find the default bpf entry for this ifp */
473 if (bp->bif_ifp->if_bpf == NULL) {
474 struct bpf_if *primary;
475
476 for (primary = bpf_iflist; primary && primary->bif_ifp != bp->bif_ifp;
477 primary = primary->bif_next)
478 ;
479
480 bp->bif_ifp->if_bpf = primary;
481 }
482
483 /* Only call dlil_set_bpf_tap for primary dlt */
484 if (bp->bif_ifp->if_bpf == bp)
7e4a7d39 485 dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
2d21ac55
A
486
487 if (bp->bif_tap)
7e4a7d39 488 error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, BPF_TAP_INPUT_OUTPUT);
2d21ac55 489 }
1c79356b 490
2d21ac55 491 return error;
1c79356b
A
492}
493
494/*
495 * Detach a file from its interface.
496 */
497static void
91447636 498bpf_detachd(struct bpf_d *d)
1c79356b
A
499{
500 struct bpf_d **p;
501 struct bpf_if *bp;
502 struct ifnet *ifp;
503
504 ifp = d->bd_bif->bif_ifp;
1c79356b 505 bp = d->bd_bif;
2d21ac55
A
506
507 /* Remove d from the interface's descriptor list. */
508 p = &bp->bif_dlist;
509 while (*p != d) {
510 p = &(*p)->bd_next;
511 if (*p == 0)
512 panic("bpf_detachd: descriptor not in list");
513 }
514 *p = (*p)->bd_next;
515 if (bp->bif_dlist == 0) {
516 /*
517 * Let the driver know that there are no more listeners.
518 */
519 /* Only call dlil_set_bpf_tap for primary dlt */
520 if (bp->bif_ifp->if_bpf == bp)
521 dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL);
522 if (bp->bif_tap)
523 bp->bif_tap(ifp, bp->bif_dlt, BPF_TAP_DISABLE);
524
525 for (bp = bpf_iflist; bp; bp = bp->bif_next)
526 if (bp->bif_ifp == ifp && bp->bif_dlist != 0)
527 break;
528 if (bp == NULL)
529 ifp->if_bpf = NULL;
530 }
531 d->bd_bif = NULL;
1c79356b
A
532 /*
533 * Check if this descriptor had requested promiscuous mode.
534 * If so, turn it off.
535 */
536 if (d->bd_promisc) {
537 d->bd_promisc = 0;
2d21ac55
A
538 lck_mtx_unlock(bpf_mlock);
539 if (ifnet_set_promiscuous(ifp, 0)) {
1c79356b
A
540 /*
541 * Something is really wrong if we were able to put
542 * the driver into promiscuous mode, but can't
543 * take it out.
9bccf70c 544 * Most likely the network interface is gone.
1c79356b 545 */
91447636 546 printf("bpf: ifnet_set_promiscuous failed");
2d21ac55
A
547 }
548 lck_mtx_lock(bpf_mlock);
1c79356b 549 }
1c79356b
A
550}
551
552
1c79356b
A
553/*
554 * Open ethernet device. Returns ENXIO for illegal minor device number,
555 * EBUSY if file is open by another process.
556 */
557/* ARGSUSED */
2d21ac55 558int
b0d623f7 559bpfopen(dev_t dev, int flags, __unused int fmt,
2d21ac55 560 __unused struct proc *p)
1c79356b 561{
2d21ac55 562 struct bpf_d *d;
1c79356b 563
2d21ac55
A
564 lck_mtx_lock(bpf_mlock);
565 if ((unsigned int) minor(dev) >= nbpfilter) {
566 lck_mtx_unlock(bpf_mlock);
1c79356b 567 return (ENXIO);
2d21ac55 568 }
91447636
A
569 /*
570 * New device nodes are created on demand when opening the last one.
571 * The programming model is for processes to loop on the minor starting at 0
572 * as long as EBUSY is returned. The loop stops when either the open succeeds or
573 * an error other that EBUSY is returned. That means that bpf_make_dev_t() must
574 * block all processes that are opening the last node. If not all
575 * processes are blocked, they could unexpectedly get ENOENT and abort their
576 * opening loop.
577 */
578 if ((unsigned int) minor(dev) == (nbpfilter - 1))
579 bpf_make_dev_t(major(dev));
9bccf70c 580
1c79356b 581 /*
9bccf70c 582 * Each minor can be opened by only one process. If the requested
1c79356b 583 * minor is in use, return EBUSY.
91447636
A
584 *
585 * Important: bpfopen() and bpfclose() have to check and set the status of a device
586 * in the same lockin context otherwise the device may be leaked because the vnode use count
587 * will be unpextectly greater than 1 when close() is called.
1c79356b 588 */
2d21ac55 589 if (bpf_dtab[minor(dev)] == 0) {
91447636 590 bpf_dtab[minor(dev)] = (void *)1; /* Mark opening */
2d21ac55
A
591 } else {
592 lck_mtx_unlock(bpf_mlock);
91447636 593 return (EBUSY);
2d21ac55 594 }
91447636
A
595 d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT);
596 if (d == NULL) {
597 /* this really is a catastrophic failure */
598 printf("bpfopen: malloc bpf_d failed\n");
2d21ac55
A
599 bpf_dtab[minor(dev)] = NULL;
600 lck_mtx_unlock(bpf_mlock);
91447636 601 return ENOMEM;
1c79356b 602 }
91447636 603 bzero(d, sizeof(struct bpf_d));
9bccf70c 604
91447636
A
605 /*
606 * It is not necessary to take the BPF lock here because no other
607 * thread can access the device until it is marked opened...
608 */
609
610 /* Mark "in use" and do most initialization. */
1c79356b
A
611 d->bd_bufsize = bpf_bufsize;
612 d->bd_sig = SIGIO;
9bccf70c 613 d->bd_seesent = 1;
b0d623f7 614 d->bd_oflags = flags;
2d21ac55
A
615#if CONFIG_MACF_NET
616 mac_bpfdesc_label_init(d);
617 mac_bpfdesc_label_associate(kauth_cred_get(), d);
618#endif
91447636 619 bpf_dtab[minor(dev)] = d; /* Mark opened */
2d21ac55 620 lck_mtx_unlock(bpf_mlock);
55e303ae 621
1c79356b
A
622 return (0);
623}
624
625/*
626 * Close the descriptor by detaching it from its interface,
627 * deallocating its buffers, and marking it free.
628 */
629/* ARGSUSED */
2d21ac55
A
630int
631bpfclose(dev_t dev, __unused int flags, __unused int fmt,
632 __unused struct proc *p)
1c79356b 633{
2d21ac55
A
634 struct bpf_d *d;
635
636 /* Take BPF lock to ensure no other thread is using the device */
637 lck_mtx_lock(bpf_mlock);
1c79356b 638
55e303ae 639 d = bpf_dtab[minor(dev)];
2d21ac55
A
640 if (d == 0 || d == (void *)1) {
641 lck_mtx_unlock(bpf_mlock);
91447636 642 return (ENXIO);
2d21ac55 643 }
91447636 644 bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */
55e303ae 645
1c79356b
A
646 if (d->bd_bif)
647 bpf_detachd(d);
0b4e3aa0 648 selthreadclear(&d->bd_sel);
2d21ac55
A
649#if CONFIG_MACF_NET
650 mac_bpfdesc_label_destroy(d);
651#endif
1c79356b 652 bpf_freed(d);
91447636 653
2d21ac55
A
654 /* Mark free in same context as bpfopen comes to check */
655 bpf_dtab[minor(dev)] = NULL; /* Mark closed */
91447636
A
656 lck_mtx_unlock(bpf_mlock);
657
91447636
A
658 _FREE(d, M_DEVBUF);
659
1c79356b
A
660 return (0);
661}
662
1c79356b 663
91447636 664#define BPF_SLEEP bpf_sleep
1c79356b 665
91447636
A
666static int
667bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo)
1c79356b 668{
2d21ac55 669 int st;
1c79356b 670
91447636
A
671 lck_mtx_unlock(bpf_mlock);
672
673 st = tsleep((caddr_t)d, pri, wmesg, timo);
674
675 lck_mtx_lock(bpf_mlock);
676
677 return st;
1c79356b 678}
1c79356b
A
679
680/*
681 * Rotate the packet buffers in descriptor d. Move the store buffer
682 * into the hold slot, and the free buffer into the store slot.
683 * Zero the length of the new store buffer.
684 */
685#define ROTATE_BUFFERS(d) \
686 (d)->bd_hbuf = (d)->bd_sbuf; \
687 (d)->bd_hlen = (d)->bd_slen; \
688 (d)->bd_sbuf = (d)->bd_fbuf; \
689 (d)->bd_slen = 0; \
2d21ac55 690 (d)->bd_fbuf = NULL;
1c79356b
A
691/*
692 * bpfread - read next chunk of packets from buffers
693 */
2d21ac55 694int
91447636 695bpfread(dev_t dev, struct uio *uio, int ioflag)
1c79356b 696{
2d21ac55 697 struct bpf_d *d;
1c79356b 698 int error;
2d21ac55
A
699
700 lck_mtx_lock(bpf_mlock);
1c79356b 701
55e303ae 702 d = bpf_dtab[minor(dev)];
2d21ac55
A
703 if (d == 0 || d == (void *)1) {
704 lck_mtx_unlock(bpf_mlock);
91447636 705 return (ENXIO);
2d21ac55 706 }
55e303ae 707
1c79356b
A
708
709 /*
710 * Restrict application to use a buffer the same size as
711 * as kernel buffers.
712 */
b0d623f7 713 if (uio_resid(uio) != d->bd_bufsize) {
91447636 714 lck_mtx_unlock(bpf_mlock);
1c79356b
A
715 return (EINVAL);
716 }
717
1c79356b
A
718 /*
719 * If the hold buffer is empty, then do a timed sleep, which
720 * ends when the timeout expires or when enough packets
721 * have arrived to fill the store buffer.
722 */
723 while (d->bd_hbuf == 0) {
724 if (d->bd_immediate && d->bd_slen != 0) {
725 /*
726 * A packet(s) either arrived since the previous
727 * read or arrived while we were asleep.
728 * Rotate the buffers and return what's here.
729 */
730 ROTATE_BUFFERS(d);
731 break;
732 }
9bccf70c
A
733
734 /*
735 * No data is available, check to see if the bpf device
736 * is still pointed at a real interface. If not, return
737 * ENXIO so that the userland process knows to rebind
738 * it before using it again.
739 */
740 if (d->bd_bif == NULL) {
91447636 741 lck_mtx_unlock(bpf_mlock);
9bccf70c
A
742 return (ENXIO);
743 }
b0d623f7
A
744 if (ioflag & IO_NDELAY) {
745 lck_mtx_unlock(bpf_mlock);
746 return (EWOULDBLOCK);
747 }
748 error = BPF_SLEEP(d, PRINET|PCATCH, "bpf",
749 d->bd_rtout);
2d21ac55
A
750 /*
751 * Make sure device is still opened
752 */
753 d = bpf_dtab[minor(dev)];
754 if (d == 0 || d == (void *)1) {
755 lck_mtx_unlock(bpf_mlock);
756 return (ENXIO);
757 }
1c79356b 758 if (error == EINTR || error == ERESTART) {
91447636 759 lck_mtx_unlock(bpf_mlock);
1c79356b
A
760 return (error);
761 }
762 if (error == EWOULDBLOCK) {
763 /*
764 * On a timeout, return what's in the buffer,
765 * which may be nothing. If there is something
766 * in the store buffer, we can rotate the buffers.
767 */
768 if (d->bd_hbuf)
769 /*
770 * We filled up the buffer in between
771 * getting the timeout and arriving
772 * here, so we don't need to rotate.
773 */
774 break;
775
776 if (d->bd_slen == 0) {
91447636 777 lck_mtx_unlock(bpf_mlock);
1c79356b
A
778 return (0);
779 }
780 ROTATE_BUFFERS(d);
781 break;
782 }
783 }
784 /*
785 * At this point, we know we have something in the hold slot.
786 */
1c79356b
A
787
788 /*
789 * Move data from hold buffer into user space.
790 * We know the entire buffer is transferred since
791 * we checked above that the read buffer is bpf_bufsize bytes.
792 */
793 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
794
1c79356b 795 d->bd_fbuf = d->bd_hbuf;
2d21ac55 796 d->bd_hbuf = NULL;
1c79356b 797 d->bd_hlen = 0;
91447636 798 lck_mtx_unlock(bpf_mlock);
1c79356b
A
799 return (error);
800}
801
802
803/*
804 * If there are processes sleeping on this descriptor, wake them up.
805 */
91447636
A
806static void
807bpf_wakeup(struct bpf_d *d)
1c79356b
A
808{
809 wakeup((caddr_t)d);
810 if (d->bd_async && d->bd_sig && d->bd_sigio)
2d21ac55 811 pgsigio(d->bd_sigio, d->bd_sig);
1c79356b
A
812
813#if BSD >= 199103
1c79356b 814 selwakeup(&d->bd_sel);
b0d623f7 815 KNOTE(&d->bd_sel.si_note, 1);
9bccf70c
A
816#ifndef __APPLE__
817 /* XXX */
818 d->bd_sel.si_pid = 0;
819#endif
1c79356b
A
820#else
821 if (d->bd_selproc) {
1c79356b 822 selwakeup(d->bd_selproc, (int)d->bd_selcoll);
1c79356b
A
823 d->bd_selcoll = 0;
824 d->bd_selproc = 0;
825 }
826#endif
827}
828
55e303ae
A
829/* keep in sync with bpf_movein above: */
830#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
831
2d21ac55 832int
91447636 833bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag)
1c79356b 834{
2d21ac55 835 struct bpf_d *d;
1c79356b 836 struct ifnet *ifp;
2d21ac55 837 struct mbuf *m = NULL;
91447636 838 int error;
55e303ae 839 char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
b0d623f7 840 int datlen = 0;
1c79356b 841
2d21ac55
A
842 lck_mtx_lock(bpf_mlock);
843
55e303ae 844 d = bpf_dtab[minor(dev)];
2d21ac55
A
845 if (d == 0 || d == (void *)1) {
846 lck_mtx_unlock(bpf_mlock);
91447636 847 return (ENXIO);
2d21ac55 848 }
1c79356b 849 if (d->bd_bif == 0) {
91447636 850 lck_mtx_unlock(bpf_mlock);
2d21ac55 851 return (ENXIO);
1c79356b
A
852 }
853
854 ifp = d->bd_bif->bif_ifp;
855
b0d623f7 856 if (uio_resid(uio) == 0) {
91447636 857 lck_mtx_unlock(bpf_mlock);
2d21ac55 858 return (0);
1c79356b 859 }
55e303ae
A
860 ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf);
861 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m,
2d21ac55
A
862 d->bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf,
863 &datlen);
1c79356b 864 if (error) {
91447636 865 lck_mtx_unlock(bpf_mlock);
2d21ac55 866 return (error);
1c79356b
A
867 }
868
91447636
A
869 if ((unsigned)datlen > ifp->if_mtu) {
870 lck_mtx_unlock(bpf_mlock);
2d21ac55
A
871 m_freem(m);
872 return (EMSGSIZE);
873 }
874
875 if ((error = ifp_use(ifp, kIfNetUseCount_MustNotBeZero)) != 0) {
876 lck_mtx_unlock(bpf_mlock);
877 m_freem(m);
878 return (error);
1c79356b
A
879 }
880
2d21ac55
A
881#if CONFIG_MACF_NET
882 mac_mbuf_label_associate_bpfdesc(d, m);
883#endif
91447636
A
884 lck_mtx_unlock(bpf_mlock);
885
55e303ae 886 if (d->bd_hdrcmplt) {
2d21ac55
A
887 if (d->bd_bif->bif_send)
888 error = d->bd_bif->bif_send(ifp, d->bd_bif->bif_dlt, m);
889 else
890 error = dlil_output(ifp, 0, m, NULL, NULL, 1);
55e303ae 891 }
91447636
A
892 else {
893 error = dlil_output(ifp, PF_INET, m, NULL, (struct sockaddr *)dst_buf, 0);
894 }
895
2d21ac55
A
896 if (ifp_unuse(ifp) != 0)
897 ifp_use_reached_zero(ifp);
898
1c79356b
A
899 /*
900 * The driver frees the mbuf.
901 */
902 return (error);
903}
904
905/*
906 * Reset a descriptor by flushing its packet buffer and clearing the
2d21ac55 907 * receive and drop counts.
1c79356b
A
908 */
909static void
91447636 910reset_d(struct bpf_d *d)
1c79356b
A
911{
912 if (d->bd_hbuf) {
913 /* Free the hold buffer. */
914 d->bd_fbuf = d->bd_hbuf;
2d21ac55 915 d->bd_hbuf = NULL;
1c79356b
A
916 }
917 d->bd_slen = 0;
918 d->bd_hlen = 0;
919 d->bd_rcount = 0;
920 d->bd_dcount = 0;
921}
922
923/*
924 * FIONREAD Check for read packet available.
925 * SIOCGIFADDR Get interface address - convenient hook to driver.
926 * BIOCGBLEN Get buffer len [for read()].
927 * BIOCSETF Set ethernet read filter.
928 * BIOCFLUSH Flush read packet buffer.
929 * BIOCPROMISC Put interface into promiscuous mode.
930 * BIOCGDLT Get link layer type.
931 * BIOCGETIF Get interface name.
932 * BIOCSETIF Set interface.
933 * BIOCSRTIMEOUT Set read timeout.
934 * BIOCGRTIMEOUT Get read timeout.
935 * BIOCGSTATS Get packet stats.
936 * BIOCIMMEDIATE Set immediate mode.
937 * BIOCVERSION Get filter language version.
9bccf70c
A
938 * BIOCGHDRCMPLT Get "header already complete" flag
939 * BIOCSHDRCMPLT Set "header already complete" flag
940 * BIOCGSEESENT Get "see packets sent" flag
941 * BIOCSSEESENT Set "see packets sent" flag
1c79356b
A
942 */
943/* ARGSUSED */
9bccf70c 944int
2d21ac55 945bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags,
b0d623f7 946 struct proc *p)
1c79356b 947{
2d21ac55
A
948 struct bpf_d *d;
949 int error = 0;
950
951 lck_mtx_lock(bpf_mlock);
1c79356b 952
55e303ae 953 d = bpf_dtab[minor(dev)];
2d21ac55
A
954 if (d == 0 || d == (void *)1) {
955 lck_mtx_unlock(bpf_mlock);
91447636 956 return (ENXIO);
2d21ac55 957 }
1c79356b
A
958
959 switch (cmd) {
960
961 default:
962 error = EINVAL;
963 break;
964
965 /*
966 * Check for read packet available.
967 */
968 case FIONREAD:
969 {
970 int n;
971
1c79356b
A
972 n = d->bd_slen;
973 if (d->bd_hbuf)
974 n += d->bd_hlen;
1c79356b
A
975
976 *(int *)addr = n;
977 break;
978 }
979
980 case SIOCGIFADDR:
981 {
982 struct ifnet *ifp;
983
984 if (d->bd_bif == 0)
985 error = EINVAL;
986 else {
987 ifp = d->bd_bif->bif_ifp;
2d21ac55 988 error = ifnet_ioctl(ifp, 0, cmd, addr);
1c79356b
A
989 }
990 break;
991 }
992
993 /*
994 * Get buffer len [for read()].
995 */
996 case BIOCGBLEN:
997 *(u_int *)addr = d->bd_bufsize;
998 break;
999
1000 /*
1001 * Set buffer length.
1002 */
1003 case BIOCSBLEN:
1004#if BSD < 199103
1005 error = EINVAL;
1006#else
1007 if (d->bd_bif != 0)
1008 error = EINVAL;
1009 else {
2d21ac55 1010 u_int size = *(u_int *)addr;
1c79356b 1011
9bccf70c
A
1012 if (size > bpf_maxbufsize)
1013 *(u_int *)addr = size = bpf_maxbufsize;
1c79356b
A
1014 else if (size < BPF_MINBUFSIZE)
1015 *(u_int *)addr = size = BPF_MINBUFSIZE;
1016 d->bd_bufsize = size;
1017 }
1018#endif
1019 break;
1020
1021 /*
1022 * Set link layer read filter.
1023 */
b0d623f7
A
1024 case BIOCSETF32: {
1025 struct bpf_program32 *prg32 = (struct bpf_program32 *)addr;
1026 error = bpf_setf(d, prg32->bf_len,
1027 CAST_USER_ADDR_T(prg32->bf_insns));
1c79356b 1028 break;
2d21ac55 1029 }
b0d623f7
A
1030
1031 case BIOCSETF64: {
1032 struct bpf_program64 *prg64 = (struct bpf_program64 *)addr;
1033 error = bpf_setf(d, prg64->bf_len, prg64->bf_insns);
1034 break;
1035 }
1036
1c79356b
A
1037 /*
1038 * Flush read packet buffer.
1039 */
1040 case BIOCFLUSH:
1c79356b 1041 reset_d(d);
1c79356b
A
1042 break;
1043
1044 /*
1045 * Put interface into promiscuous mode.
1046 */
1047 case BIOCPROMISC:
1048 if (d->bd_bif == 0) {
1049 /*
1050 * No interface attached yet.
1051 */
1052 error = EINVAL;
1053 break;
1054 }
1c79356b 1055 if (d->bd_promisc == 0) {
2d21ac55 1056 lck_mtx_unlock(bpf_mlock);
91447636 1057 error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1);
2d21ac55 1058 lck_mtx_lock(bpf_mlock);
1c79356b
A
1059 if (error == 0)
1060 d->bd_promisc = 1;
1061 }
1c79356b
A
1062 break;
1063
1064 /*
1065 * Get device parameters.
1066 */
1067 case BIOCGDLT:
1068 if (d->bd_bif == 0)
1069 error = EINVAL;
1070 else
1071 *(u_int *)addr = d->bd_bif->bif_dlt;
1072 break;
1073
2d21ac55
A
1074 /*
1075 * Get a list of supported data link types.
1076 */
1077 case BIOCGDLTLIST:
b0d623f7
A
1078 if (d->bd_bif == NULL) {
1079 error = EINVAL;
1080 } else {
1081 error = bpf_getdltlist(d,
1082 (struct bpf_dltlist *)addr, p);
1083 }
1084 break;
2d21ac55
A
1085
1086 /*
1087 * Set data link type.
1088 */
1089 case BIOCSDLT:
1090 if (d->bd_bif == NULL)
1091 error = EINVAL;
1092 else
1093 error = bpf_setdlt(d, *(u_int *)addr);
1094 break;
1095
1c79356b 1096 /*
9bccf70c 1097 * Get interface name.
1c79356b
A
1098 */
1099 case BIOCGETIF:
1100 if (d->bd_bif == 0)
1101 error = EINVAL;
9bccf70c
A
1102 else {
1103 struct ifnet *const ifp = d->bd_bif->bif_ifp;
1104 struct ifreq *const ifr = (struct ifreq *)addr;
1105
1106 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
1107 "%s%d", ifp->if_name, ifp->if_unit);
1108 }
1c79356b
A
1109 break;
1110
1111 /*
1112 * Set interface.
1113 */
2d21ac55
A
1114 case BIOCSETIF: {
1115 ifnet_t ifp;
1116 ifp = ifunit(((struct ifreq *)addr)->ifr_name);
1117 if (ifp == NULL)
1118 error = ENXIO;
1119 else
1120 error = bpf_setif(d, ifp, 0);
1c79356b 1121 break;
2d21ac55 1122 }
1c79356b
A
1123
1124 /*
1125 * Set read timeout.
1126 */
1127 case BIOCSRTIMEOUT:
1128 {
b0d623f7
A
1129 struct BPF_TIMEVAL *_tv = (struct BPF_TIMEVAL *)addr;
1130 struct timeval tv;
1131
1132 tv.tv_sec = _tv->tv_sec;
1133 tv.tv_usec = _tv->tv_usec;
1c79356b
A
1134
1135 /*
1136 * Subtract 1 tick from tvtohz() since this isn't
1137 * a one-shot timer.
1138 */
b0d623f7
A
1139 if ((error = itimerfix(&tv)) == 0)
1140 d->bd_rtout = tvtohz(&tv) - 1;
1c79356b
A
1141 break;
1142 }
1143
1144 /*
1145 * Get read timeout.
1146 */
1147 case BIOCGRTIMEOUT:
1148 {
b0d623f7 1149 struct BPF_TIMEVAL *tv = (struct BPF_TIMEVAL *)addr;
1c79356b
A
1150
1151 tv->tv_sec = d->bd_rtout / hz;
1152 tv->tv_usec = (d->bd_rtout % hz) * tick;
1153 break;
1154 }
1155
1156 /*
1157 * Get packet stats.
1158 */
1159 case BIOCGSTATS:
1160 {
1161 struct bpf_stat *bs = (struct bpf_stat *)addr;
1162
1163 bs->bs_recv = d->bd_rcount;
1164 bs->bs_drop = d->bd_dcount;
1165 break;
1166 }
1167
1168 /*
1169 * Set immediate mode.
1170 */
1171 case BIOCIMMEDIATE:
1172 d->bd_immediate = *(u_int *)addr;
1173 break;
1174
1175 case BIOCVERSION:
1176 {
1177 struct bpf_version *bv = (struct bpf_version *)addr;
1178
1179 bv->bv_major = BPF_MAJOR_VERSION;
1180 bv->bv_minor = BPF_MINOR_VERSION;
1181 break;
1182 }
1183
9bccf70c
A
1184 /*
1185 * Get "header already complete" flag
1186 */
1187 case BIOCGHDRCMPLT:
1188 *(u_int *)addr = d->bd_hdrcmplt;
1189 break;
1190
1191 /*
1192 * Set "header already complete" flag
1193 */
1194 case BIOCSHDRCMPLT:
1195 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1196 break;
1197
1198 /*
1199 * Get "see sent packets" flag
1200 */
1201 case BIOCGSEESENT:
1202 *(u_int *)addr = d->bd_seesent;
1203 break;
1204
1205 /*
1206 * Set "see sent packets" flag
1207 */
1208 case BIOCSSEESENT:
1209 d->bd_seesent = *(u_int *)addr;
1210 break;
1211
1c79356b
A
1212 case FIONBIO: /* Non-blocking I/O */
1213 break;
1214
1215 case FIOASYNC: /* Send signal on receive packets */
1216 d->bd_async = *(int *)addr;
1217 break;
9bccf70c 1218#ifndef __APPLE__
1c79356b
A
1219 case FIOSETOWN:
1220 error = fsetown(*(int *)addr, &d->bd_sigio);
1221 break;
1222
1223 case FIOGETOWN:
1224 *(int *)addr = fgetown(d->bd_sigio);
1225 break;
1226
1227 /* This is deprecated, FIOSETOWN should be used instead. */
1228 case TIOCSPGRP:
1229 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1230 break;
1231
1232 /* This is deprecated, FIOGETOWN should be used instead. */
1233 case TIOCGPGRP:
1234 *(int *)addr = -fgetown(d->bd_sigio);
1235 break;
1236#endif
1237 case BIOCSRSIG: /* Set receive signal */
1238 {
1239 u_int sig;
1240
1241 sig = *(u_int *)addr;
1242
1243 if (sig >= NSIG)
1244 error = EINVAL;
1245 else
1246 d->bd_sig = sig;
1247 break;
1248 }
1249 case BIOCGRSIG:
1250 *(u_int *)addr = d->bd_sig;
1251 break;
1252 }
91447636
A
1253
1254 lck_mtx_unlock(bpf_mlock);
b0d623f7 1255
1c79356b
A
1256 return (error);
1257}
1258
1259/*
1260 * Set d's packet filter program to fp. If this file already has a filter,
1261 * free it and replace it. Returns EINVAL for bogus requests.
1262 */
1263static int
2d21ac55 1264bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns)
1c79356b
A
1265{
1266 struct bpf_insn *fcode, *old;
1267 u_int flen, size;
1c79356b
A
1268
1269 old = d->bd_filter;
2d21ac55
A
1270 if (bf_insns == USER_ADDR_NULL) {
1271 if (bf_len != 0)
1c79356b 1272 return (EINVAL);
2d21ac55 1273 d->bd_filter = NULL;
1c79356b 1274 reset_d(d);
1c79356b
A
1275 if (old != 0)
1276 FREE((caddr_t)old, M_DEVBUF);
1277 return (0);
1278 }
2d21ac55 1279 flen = bf_len;
1c79356b
A
1280 if (flen > BPF_MAXINSNS)
1281 return (EINVAL);
1282
91447636 1283 size = flen * sizeof(struct bpf_insn);
1c79356b 1284 fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
9bccf70c 1285#ifdef __APPLE__
0b4e3aa0
A
1286 if (fcode == NULL)
1287 return (ENOBUFS);
9bccf70c 1288#endif
2d21ac55 1289 if (copyin(bf_insns, (caddr_t)fcode, size) == 0 &&
1c79356b 1290 bpf_validate(fcode, (int)flen)) {
1c79356b
A
1291 d->bd_filter = fcode;
1292 reset_d(d);
1c79356b
A
1293 if (old != 0)
1294 FREE((caddr_t)old, M_DEVBUF);
1295
1296 return (0);
1297 }
1298 FREE((caddr_t)fcode, M_DEVBUF);
1299 return (EINVAL);
1300}
1301
1302/*
1303 * Detach a file from its current interface (if attached at all) and attach
1304 * to the interface indicated by the name stored in ifr.
1305 * Return an errno or 0.
1306 */
1307static int
2d21ac55 1308bpf_setif(struct bpf_d *d, ifnet_t theywant, u_int32_t dlt)
1c79356b
A
1309{
1310 struct bpf_if *bp;
2d21ac55
A
1311 int error;
1312
1c79356b
A
1313 /*
1314 * Look through attached interfaces for the named one.
1315 */
1316 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1317 struct ifnet *ifp = bp->bif_ifp;
1318
2d21ac55 1319 if (ifp == 0 || ifp != theywant || (dlt != 0 && dlt != bp->bif_dlt))
1c79356b
A
1320 continue;
1321 /*
1322 * We found the requested interface.
1323 * If it's not up, return an error.
1324 * Allocate the packet buffers if we need to.
1325 * If we're already attached to requested interface,
1326 * just flush the buffer.
1327 */
1328 if ((ifp->if_flags & IFF_UP) == 0)
1329 return (ENETDOWN);
1330
1331 if (d->bd_sbuf == 0) {
1332 error = bpf_allocbufs(d);
1333 if (error != 0)
1334 return (error);
1335 }
1c79356b
A
1336 if (bp != d->bd_bif) {
1337 if (d->bd_bif)
1338 /*
1339 * Detach if attached to something else.
1340 */
1341 bpf_detachd(d);
1342
2d21ac55
A
1343 if (bpf_attachd(d, bp) != 0) {
1344 return ENXIO;
1345 }
1c79356b
A
1346 }
1347 reset_d(d);
1c79356b
A
1348 return (0);
1349 }
1350 /* Not found. */
1351 return (ENXIO);
1352}
1353
2d21ac55
A
1354
1355
1356/*
1357 * Get a list of available data link type of the interface.
1358 */
1359static int
b0d623f7 1360bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl, struct proc *p)
2d21ac55 1361{
b0d623f7
A
1362 u_int n;
1363 int error;
2d21ac55
A
1364 struct ifnet *ifp;
1365 struct bpf_if *bp;
b0d623f7
A
1366 user_addr_t dlist;
1367
1368 if (proc_is64bit(p)) {
1369 dlist = (user_addr_t)bfl->bfl_u.bflu_pad;
1370 } else {
2d21ac55
A
1371 dlist = CAST_USER_ADDR_T(bfl->bfl_u.bflu_list);
1372 }
b0d623f7 1373
2d21ac55
A
1374 ifp = d->bd_bif->bif_ifp;
1375 n = 0;
1376 error = 0;
1377 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
1378 if (bp->bif_ifp != ifp)
1379 continue;
b0d623f7 1380 if (dlist != USER_ADDR_NULL) {
2d21ac55
A
1381 if (n >= bfl->bfl_len) {
1382 return (ENOMEM);
1383 }
b0d623f7
A
1384 error = copyout(&bp->bif_dlt, dlist,
1385 sizeof (bp->bif_dlt));
1386 dlist += sizeof (bp->bif_dlt);
2d21ac55
A
1387 }
1388 n++;
1389 }
1390 bfl->bfl_len = n;
1391 return (error);
1392}
1393
1394/*
1395 * Set the data link type of a BPF instance.
1396 */
1397static int
1398bpf_setdlt(struct bpf_d *d, uint32_t dlt)
1399
1400
1401{
1402 int error, opromisc;
1403 struct ifnet *ifp;
1404 struct bpf_if *bp;
1405
1406 if (d->bd_bif->bif_dlt == dlt)
1407 return (0);
1408 ifp = d->bd_bif->bif_ifp;
1409 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
1410 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1411 break;
1412 }
1413 if (bp != NULL) {
1414 opromisc = d->bd_promisc;
1415 bpf_detachd(d);
1416 error = bpf_attachd(d, bp);
1417 if (error) {
1418 printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n",
1419 ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), error);
1420 return error;
1421 }
1422 reset_d(d);
1423 if (opromisc) {
1424 lck_mtx_unlock(bpf_mlock);
1425 error = ifnet_set_promiscuous(bp->bif_ifp, 1);
1426 lck_mtx_lock(bpf_mlock);
1427 if (error)
1428 printf("bpf_setdlt: ifpromisc %s%d failed (%d)\n",
1429 ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), error);
1430 else
1431 d->bd_promisc = 1;
1432 }
1433 }
1434 return (bp == NULL ? EINVAL : 0);
1435}
1436
1c79356b 1437/*
b0d623f7 1438 * Support for select()
1c79356b
A
1439 *
1440 * Return true iff the specific operation will not block indefinitely.
1441 * Otherwise, return false but make a note that a selwakeup() must be done.
1442 */
1443int
91447636 1444bpfpoll(dev_t dev, int events, void * wql, struct proc *p)
1c79356b 1445{
2d21ac55 1446 struct bpf_d *d;
1c79356b
A
1447 int revents = 0;
1448
2d21ac55
A
1449 lck_mtx_lock(bpf_mlock);
1450
55e303ae 1451 d = bpf_dtab[minor(dev)];
2d21ac55
A
1452 if (d == 0 || d == (void *)1) {
1453 lck_mtx_unlock(bpf_mlock);
91447636 1454 return (ENXIO);
2d21ac55 1455 }
55e303ae 1456
1c79356b
A
1457 /*
1458 * An imitation of the FIONREAD ioctl code.
1459 */
9bccf70c 1460 if (d->bd_bif == NULL) {
91447636 1461 lck_mtx_unlock(bpf_mlock);
9bccf70c
A
1462 return (ENXIO);
1463 }
1464
9bccf70c 1465 if (events & (POLLIN | POLLRDNORM)) {
1c79356b
A
1466 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1467 revents |= events & (POLLIN | POLLRDNORM);
1468 else
0b4e3aa0 1469 selrecord(p, &d->bd_sel, wql);
9bccf70c 1470 }
91447636
A
1471
1472 lck_mtx_unlock(bpf_mlock);
1c79356b
A
1473 return (revents);
1474}
1475
b0d623f7
A
1476/*
1477 * Support for kevent() system call. Register EVFILT_READ filters and
1478 * reject all others.
1479 */
1480int bpfkqfilter(dev_t dev, struct knote *kn);
1481static void filt_bpfdetach(struct knote *);
1482static int filt_bpfread(struct knote *, long);
1483
1484static struct filterops bpfread_filtops = {
1485 .f_isfd = 1,
1486 .f_detach = filt_bpfdetach,
1487 .f_event = filt_bpfread,
1488};
1489
1490int
1491bpfkqfilter(dev_t dev, struct knote *kn)
1492{
1493 struct bpf_d *d;
1494
1495 /*
1496 * Is this device a bpf?
1497 */
1498 if (major(dev) != CDEV_MAJOR) {
1499 return (EINVAL);
1500 }
1501
1502 if (kn->kn_filter != EVFILT_READ) {
1503 return (EINVAL);
1504 }
1505
1506 lck_mtx_lock(bpf_mlock);
1507
1508 d = bpf_dtab[minor(dev)];
1509 if (d == 0 || d == (void *)1) {
1510 lck_mtx_unlock(bpf_mlock);
1511 return (ENXIO);
1512 }
1513
1514 /*
1515 * An imitation of the FIONREAD ioctl code.
1516 */
1517 if (d->bd_bif == NULL) {
1518 lck_mtx_unlock(bpf_mlock);
1519 return (ENXIO);
1520 }
1521
1522 kn->kn_hook = d;
1523 kn->kn_fop = &bpfread_filtops;
1524 KNOTE_ATTACH(&d->bd_sel.si_note, kn);
1525 lck_mtx_unlock(bpf_mlock);
1526 return 0;
1527}
1528
1529static void
1530filt_bpfdetach(struct knote *kn)
1531{
1532 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1533
1534 lck_mtx_lock(bpf_mlock);
1535 KNOTE_DETACH(&d->bd_sel.si_note, kn);
1536 lck_mtx_unlock(bpf_mlock);
1537}
1538
1539static int
1540filt_bpfread(struct knote *kn, long hint)
1541{
1542 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1543 int ready = 0;
1544
1545 if (hint == 0)
1546 lck_mtx_lock(bpf_mlock);
1547
1548 if (d->bd_immediate) {
1549 kn->kn_data = (d->bd_hlen == 0 ? d->bd_slen : d->bd_hlen);
1550 ready = (kn->kn_data >= ((kn->kn_sfflags & NOTE_LOWAT) ?
1551 kn->kn_sdata : 1));
1552 } else {
1553 kn->kn_data = d->bd_hlen;
1554 ready = (kn->kn_data > 0);
1555 }
1556
1557 if (hint == 0)
1558 lck_mtx_unlock(bpf_mlock);
1559 return (ready);
1560}
1561
2d21ac55
A
1562static inline void*
1563_cast_non_const(const void * ptr) {
1564 union {
1565 const void* cval;
1566 void* val;
1567 } ret;
91447636 1568
2d21ac55
A
1569 ret.cval = ptr;
1570 return (ret.val);
1c79356b
A
1571}
1572
1573/*
1574 * Copy data from an mbuf chain into a buffer. This code is derived
1575 * from m_copydata in sys/uipc_mbuf.c.
1576 */
1577static void
91447636 1578bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1c79356b 1579{
2d21ac55 1580 struct mbuf *m = _cast_non_const(src_arg);
91447636 1581 u_int count;
1c79356b
A
1582 u_char *dst;
1583
1c79356b
A
1584 dst = dst_arg;
1585 while (len > 0) {
1586 if (m == 0)
1587 panic("bpf_mcopy");
1588 count = min(m->m_len, len);
2d21ac55 1589 bcopy(mbuf_data(m), dst, count);
1c79356b
A
1590 m = m->m_next;
1591 dst += count;
1592 len -= count;
1593 }
1594}
1595
2d21ac55
A
1596static inline void
1597bpf_tap_imp(
1598 ifnet_t ifp,
1599 u_int32_t dlt,
1600 mbuf_t m,
1601 void* hdr,
1602 size_t hlen,
1603 int outbound)
1c79356b 1604{
91447636 1605 struct bpf_if *bp;
1c79356b 1606
2d21ac55
A
1607 /*
1608 * It's possible that we get here after the bpf descriptor has been
1609 * detached from the interface; in such a case we simply return.
1610 * Lock ordering is important since we can be called asynchronously
1611 * (from the IOKit) to process an inbound packet; when that happens
1612 * we would have been holding its "gateLock" and will be acquiring
1613 * "bpf_mlock" upon entering this routine. Due to that, we release
1614 * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will
1615 * acquire "gateLock" in the IOKit), in order to avoid a deadlock
1616 * when a ifnet_set_promiscuous request simultaneously collides with
1617 * an inbound packet being passed into the tap callback.
1618 */
91447636 1619 lck_mtx_lock(bpf_mlock);
2d21ac55
A
1620 if (ifp->if_bpf == NULL) {
1621 lck_mtx_unlock(bpf_mlock);
1622 return;
1623 }
91447636 1624 bp = ifp->if_bpf;
2d21ac55
A
1625 for (bp = ifp->if_bpf; bp && bp->bif_ifp == ifp &&
1626 (dlt != 0 && bp->bif_dlt != dlt); bp = bp->bif_next)
1627 ;
1628 if (bp && bp->bif_ifp == ifp && bp->bif_dlist != NULL) {
1629 struct bpf_d *d;
1630 struct m_hdr hack_hdr;
1631 u_int pktlen = 0;
1632 u_int slen = 0;
1633 struct mbuf *m0;
1634
1635 if (hdr) {
1636 /*
1637 * This is gross. We mock up an mbuf that points to the
1638 * header buffer. This means we don't have to copy the
1639 * header. A number of interfaces prepended headers just
1640 * for bpf by allocating an mbuf on the stack. We want to
1641 * give developers an easy way to prepend a header for bpf.
1642 * Since a developer allocating an mbuf on the stack is bad,
1643 * we do even worse here, allocating only a header to point
1644 * to a buffer the developer supplied. This makes assumptions
1645 * that bpf_filter and catchpacket will not look at anything
1646 * in the mbuf other than the header. This was true at the
1647 * time this code was written.
1648 */
1649 hack_hdr.mh_next = m;
1650 hack_hdr.mh_nextpkt = NULL;
1651 hack_hdr.mh_len = hlen;
1652 hack_hdr.mh_data = hdr;
1653 hack_hdr.mh_type = m->m_type;
1654 hack_hdr.mh_flags = 0;
1655
1656 m = (mbuf_t)&hack_hdr;
1657 }
1658
1659 for (m0 = m; m0 != 0; m0 = m0->m_next)
1660 pktlen += m0->m_len;
1661
1662 for (d = bp->bif_dlist; d; d = d->bd_next) {
1663 if (outbound && !d->bd_seesent)
91447636
A
1664 continue;
1665 ++d->bd_rcount;
1666 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
2d21ac55
A
1667 if (slen != 0) {
1668#if CONFIG_MACF_NET
1669 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0)
1670 continue;
1671#endif
91447636 1672 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
2d21ac55 1673 }
91447636 1674 }
1c79356b 1675 }
91447636 1676 lck_mtx_unlock(bpf_mlock);
1c79356b
A
1677}
1678
2d21ac55
A
1679void
1680bpf_tap_out(
1681 ifnet_t ifp,
1682 u_int32_t dlt,
1683 mbuf_t m,
1684 void* hdr,
1685 size_t hlen)
1686{
1687 bpf_tap_imp(ifp, dlt, m, hdr, hlen, 1);
1688}
1689
1690void
1691bpf_tap_in(
1692 ifnet_t ifp,
1693 u_int32_t dlt,
1694 mbuf_t m,
1695 void* hdr,
1696 size_t hlen)
1697{
1698 bpf_tap_imp(ifp, dlt, m, hdr, hlen, 0);
1699}
1700
1701/* Callback registered with Ethernet driver. */
1702static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
1703{
1704 bpf_tap_imp(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL);
1705
1706 return 0;
1707}
1708
1c79356b
A
1709/*
1710 * Move the packet data from interface memory (pkt) into the
1711 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1712 * otherwise 0. "copy" is the routine called to do the actual data
1713 * transfer. bcopy is passed in to copy contiguous chunks, while
1714 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1715 * pkt is really an mbuf.
1716 */
1717static void
91447636
A
1718catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1719 void (*cpfn)(const void *, void *, size_t))
1c79356b 1720{
2d21ac55
A
1721 struct bpf_hdr *hp;
1722 int totlen, curlen;
1723 int hdrlen = d->bd_bif->bif_hdrlen;
1c79356b
A
1724 /*
1725 * Figure out how many bytes to move. If the packet is
1726 * greater or equal to the snapshot length, transfer that
1727 * much. Otherwise, transfer the whole packet (unless
1728 * we hit the buffer size limit).
1729 */
1730 totlen = hdrlen + min(snaplen, pktlen);
1731 if (totlen > d->bd_bufsize)
1732 totlen = d->bd_bufsize;
1733
1734 /*
1735 * Round up the end of the previous packet to the next longword.
1736 */
1737 curlen = BPF_WORDALIGN(d->bd_slen);
1738 if (curlen + totlen > d->bd_bufsize) {
1739 /*
1740 * This packet will overflow the storage buffer.
1741 * Rotate the buffers if we can, then wakeup any
1742 * pending reads.
1743 */
1744 if (d->bd_fbuf == 0) {
1745 /*
1746 * We haven't completed the previous read yet,
1747 * so drop the packet.
1748 */
1749 ++d->bd_dcount;
1750 return;
1751 }
1752 ROTATE_BUFFERS(d);
1753 bpf_wakeup(d);
1754 curlen = 0;
1755 }
1756 else if (d->bd_immediate)
1757 /*
1758 * Immediate mode is set. A packet arrived so any
1759 * reads should be woken up.
1760 */
1761 bpf_wakeup(d);
1762
1763 /*
1764 * Append the bpf header.
1765 */
1766 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
b0d623f7
A
1767 struct timeval tv;
1768 microtime(&tv);
1769 hp->bh_tstamp.tv_sec = tv.tv_sec;
1770 hp->bh_tstamp.tv_usec = tv.tv_usec;
1c79356b
A
1771 hp->bh_datalen = pktlen;
1772 hp->bh_hdrlen = hdrlen;
1773 /*
1774 * Copy the packet data into the store buffer and update its length.
1775 */
1776 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1777 d->bd_slen = curlen + totlen;
1778}
1779
1780/*
1781 * Initialize all nonzero fields of a descriptor.
1782 */
1783static int
91447636 1784bpf_allocbufs(struct bpf_d *d)
1c79356b
A
1785{
1786 d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1787 if (d->bd_fbuf == 0)
1788 return (ENOBUFS);
1789
1790 d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
1791 if (d->bd_sbuf == 0) {
1792 FREE(d->bd_fbuf, M_DEVBUF);
1793 return (ENOBUFS);
1794 }
1795 d->bd_slen = 0;
1796 d->bd_hlen = 0;
1797 return (0);
1798}
1799
1800/*
1801 * Free buffers currently in use by a descriptor.
1802 * Called on close.
1803 */
1804static void
91447636 1805bpf_freed(struct bpf_d *d)
1c79356b
A
1806{
1807 /*
1808 * We don't need to lock out interrupts since this descriptor has
1809 * been detached from its interface and it yet hasn't been marked
1810 * free.
1811 */
1812 if (d->bd_sbuf != 0) {
1813 FREE(d->bd_sbuf, M_DEVBUF);
1814 if (d->bd_hbuf != 0)
1815 FREE(d->bd_hbuf, M_DEVBUF);
1816 if (d->bd_fbuf != 0)
1817 FREE(d->bd_fbuf, M_DEVBUF);
1818 }
1819 if (d->bd_filter)
1820 FREE((caddr_t)d->bd_filter, M_DEVBUF);
1c79356b
A
1821}
1822
1823/*
1824 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
1825 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1826 * size of the link header (variable length headers not yet supported).
1827 */
1828void
91447636 1829bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1c79356b 1830{
2d21ac55
A
1831 bpf_attach(ifp, dlt, hdrlen, NULL, NULL);
1832}
1833
1834errno_t
1835bpf_attach(
1836 ifnet_t ifp,
1837 u_int32_t dlt,
1838 u_int32_t hdrlen,
1839 bpf_send_func send,
1840 bpf_tap_func tap)
1841{
1842 struct bpf_if *bp_new;
1843 struct bpf_if *bp_temp;
1844 struct bpf_if *bp_first = NULL;
1845
1846 bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF, M_WAIT);
1847 if (bp_new == 0)
1c79356b
A
1848 panic("bpfattach");
1849
91447636
A
1850 lck_mtx_lock(bpf_mlock);
1851
2d21ac55
A
1852 /*
1853 * Check if this interface/dlt is already attached, record first
1854 * attachment for this interface.
1855 */
1856 for (bp_temp = bpf_iflist; bp_temp && (bp_temp->bif_ifp != ifp ||
1857 bp_temp->bif_dlt != dlt); bp_temp = bp_temp->bif_next) {
1858 if (bp_temp->bif_ifp == ifp && bp_first == NULL)
1859 bp_first = bp_temp;
1860 }
1861
1862 if (bp_temp != NULL) {
1863 printf("bpfattach - %s%d with dlt %d is already attached\n",
1864 ifp->if_name, ifp->if_unit, dlt);
1865 FREE(bp_new, M_DEVBUF);
1866 lck_mtx_unlock(bpf_mlock);
1867 return EEXIST;
1868 }
1869
1870 bzero(bp_new, sizeof(*bp_new));
1871 bp_new->bif_ifp = ifp;
1872 bp_new->bif_dlt = dlt;
1873 bp_new->bif_send = send;
1874 bp_new->bif_tap = tap;
1875
1876 if (bp_first == NULL) {
1877 /* No other entries for this ifp */
1878 bp_new->bif_next = bpf_iflist;
1879 bpf_iflist = bp_new;
1880 }
1881 else {
1882 /* Add this after the first entry for this interface */
1883 bp_new->bif_next = bp_first->bif_next;
1884 bp_first->bif_next = bp_new;
1885 }
1886
1c79356b
A
1887 /*
1888 * Compute the length of the bpf header. This is not necessarily
1889 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1890 * that the network layer header begins on a longword boundary (for
1891 * performance reasons and to alleviate alignment restrictions).
1892 */
2d21ac55 1893 bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
91447636
A
1894
1895 /* Take a reference on the interface */
2d21ac55 1896 ifnet_reference(ifp);
91447636
A
1897
1898 lck_mtx_unlock(bpf_mlock);
1c79356b 1899
55e303ae 1900#ifndef __APPLE__
1c79356b
A
1901 if (bootverbose)
1902 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1903#endif
2d21ac55
A
1904
1905 return 0;
1c79356b
A
1906}
1907
9bccf70c
A
1908/*
1909 * Detach bpf from an interface. This involves detaching each descriptor
1910 * associated with the interface, and leaving bd_bif NULL. Notify each
1911 * descriptor as it's detached so that any sleepers wake up and get
1912 * ENXIO.
1913 */
1914void
91447636 1915bpfdetach(struct ifnet *ifp)
9bccf70c 1916{
2d21ac55
A
1917 struct bpf_if *bp, *bp_prev, *bp_next;
1918 struct bpf_if *bp_free = NULL;
9bccf70c 1919 struct bpf_d *d;
9bccf70c 1920
91447636
A
1921
1922 lck_mtx_lock(bpf_mlock);
9bccf70c
A
1923
1924 /* Locate BPF interface information */
1925 bp_prev = NULL;
2d21ac55
A
1926 for (bp = bpf_iflist; bp != NULL; bp = bp_next) {
1927 bp_next = bp->bif_next;
1928 if (ifp != bp->bif_ifp) {
1929 bp_prev = bp;
1930 continue;
1931 }
1932
1933 while ((d = bp->bif_dlist) != NULL) {
1934 bpf_detachd(d);
1935 bpf_wakeup(d);
1936 }
1937
1938 if (bp_prev) {
1939 bp_prev->bif_next = bp->bif_next;
1940 } else {
1941 bpf_iflist = bp->bif_next;
1942 }
1943
1944 bp->bif_next = bp_free;
1945 bp_free = bp;
1946
1947 ifnet_release(ifp);
9bccf70c
A
1948 }
1949
91447636 1950 lck_mtx_unlock(bpf_mlock);
9bccf70c
A
1951
1952 FREE(bp, M_DEVBUF);
1953
9bccf70c
A
1954}
1955
1c79356b 1956void
91447636 1957bpf_init(__unused void *unused)
1c79356b 1958{
9bccf70c 1959#ifdef __APPLE__
1c79356b 1960 int i;
9bccf70c 1961 int maj;
1c79356b 1962
91447636 1963 if (bpf_devsw_installed == 0) {
9bccf70c 1964 bpf_devsw_installed = 1;
91447636
A
1965
1966 bpf_mlock_grp_attr = lck_grp_attr_alloc_init();
91447636
A
1967
1968 bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);
1969
1970 bpf_mlock_attr = lck_attr_alloc_init();
91447636
A
1971
1972 bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr);
1973
1974 if (bpf_mlock == 0) {
1975 printf("bpf_init: failed to allocate bpf_mlock\n");
1976 bpf_devsw_installed = 0;
1977 return;
1978 }
1979
9bccf70c
A
1980 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
1981 if (maj == -1) {
91447636
A
1982 if (bpf_mlock)
1983 lck_mtx_free(bpf_mlock, bpf_mlock_grp);
1984 if (bpf_mlock_attr)
1985 lck_attr_free(bpf_mlock_attr);
1986 if (bpf_mlock_grp)
1987 lck_grp_free(bpf_mlock_grp);
1988 if (bpf_mlock_grp_attr)
1989 lck_grp_attr_free(bpf_mlock_grp_attr);
1990
2d21ac55
A
1991 bpf_mlock = NULL;
1992 bpf_mlock_attr = NULL;
1993 bpf_mlock_grp = NULL;
1994 bpf_mlock_grp_attr = NULL;
91447636 1995 bpf_devsw_installed = 0;
9bccf70c 1996 printf("bpf_init: failed to allocate a major number!\n");
55e303ae 1997 return;
9bccf70c 1998 }
91447636 1999
55e303ae
A
2000 for (i = 0 ; i < NBPFILTER; i++)
2001 bpf_make_dev_t(maj);
9bccf70c
A
2002 }
2003#else
2004 cdevsw_add(&bpf_cdevsw);
2005#endif
1c79356b
A
2006}
2007
9bccf70c 2008#ifndef __APPLE__
1c79356b 2009SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1c79356b 2010#endif
9bccf70c 2011
2d21ac55
A
2012#if CONFIG_MACF_NET
2013struct label *
2014mac_bpfdesc_label_get(struct bpf_d *d)
9bccf70c 2015{
9bccf70c 2016
2d21ac55 2017 return (d->bd_label);
9bccf70c
A
2018}
2019
2020void
2d21ac55 2021mac_bpfdesc_label_set(struct bpf_d *d, struct label *label)
9bccf70c 2022{
9bccf70c 2023
2d21ac55 2024 d->bd_label = label;
9bccf70c 2025}
2d21ac55 2026#endif