]> git.saurik.com Git - apple/xnu.git/blame - bsd/net/bpf.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / bsd / net / bpf.c
CommitLineData
1c79356b 1/*
813fb2f6 2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1990, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from the Stanford/CMU enet packet filter,
33 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35 * Berkeley Laboratory.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
66 *
9bccf70c 67 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
1c79356b 68 */
2d21ac55
A
69/*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
1c79356b 75
9bccf70c 76#include "bpf.h"
1c79356b
A
77
78#ifndef __GNUC__
79#define inline
80#else
81#define inline __inline
82#endif
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/conf.h>
87#include <sys/malloc.h>
88#include <sys/mbuf.h>
89#include <sys/time.h>
90#include <sys/proc.h>
1c79356b
A
91#include <sys/signalvar.h>
92#include <sys/filio.h>
93#include <sys/sockio.h>
94#include <sys/ttycom.h>
95#include <sys/filedesc.h>
91447636 96#include <sys/uio_internal.h>
b0d623f7
A
97#include <sys/file_internal.h>
98#include <sys/event.h>
1c79356b 99
9bccf70c
A
100#include <sys/poll.h>
101
1c79356b 102#include <sys/socket.h>
316670eb 103#include <sys/socketvar.h>
1c79356b
A
104#include <sys/vnode.h>
105
106#include <net/if.h>
107#include <net/bpf.h>
108#include <net/bpfdesc.h>
109
110#include <netinet/in.h>
316670eb
A
111#include <netinet/in_pcb.h>
112#include <netinet/in_var.h>
113#include <netinet/ip_var.h>
114#include <netinet/tcp.h>
115#include <netinet/tcp_var.h>
116#include <netinet/udp.h>
117#include <netinet/udp_var.h>
1c79356b
A
118#include <netinet/if_ether.h>
119#include <sys/kernel.h>
120#include <sys/sysctl.h>
55e303ae 121#include <net/firewire.h>
1c79356b 122
1c79356b
A
123#include <miscfs/devfs/devfs.h>
124#include <net/dlil.h>
fe8ab488 125#include <net/pktap.h>
1c79356b 126
91447636 127#include <kern/locks.h>
6d2010ae 128#include <kern/thread_call.h>
5ba3f43e 129#include <libkern/section_keywords.h>
91447636 130
2d21ac55
A
131#if CONFIG_MACF_NET
132#include <security/mac_framework.h>
133#endif /* MAC_NET */
91447636 134
2d21ac55 135extern int tvtohz(struct timeval *);
9bccf70c 136
1c79356b
A
137#define BPF_BUFSIZE 4096
138#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
1c79356b 139
55e303ae 140
1c79356b
A
141#define PRINET 26 /* interruptible */
142
5ba3f43e
A
143typedef void (*pktcopyfunc_t)(const void *, void *, size_t);
144
1c79356b
A
145/*
146 * The default read buffer size is patchable.
147 */
91447636 148static unsigned int bpf_bufsize = BPF_BUFSIZE;
6d2010ae 149SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED,
1c79356b 150 &bpf_bufsize, 0, "");
6d2010ae
A
151__private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
152SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW | CTLFLAG_LOCKED,
9bccf70c 153 &bpf_maxbufsize, 0, "");
91447636 154static unsigned int bpf_maxdevices = 256;
6d2010ae 155SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW | CTLFLAG_LOCKED,
91447636 156 &bpf_maxdevices, 0, "");
fe8ab488
A
157/*
158 * bpf_wantpktap controls the defaul visibility of DLT_PKTAP
159 * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP
160 * explicitly to be able to use DLT_PKTAP.
161 */
5ba3f43e
A
162#if CONFIG_EMBEDDED
163static unsigned int bpf_wantpktap = 1;
164#else
fe8ab488 165static unsigned int bpf_wantpktap = 0;
5ba3f43e 166#endif
fe8ab488
A
167SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED,
168 &bpf_wantpktap, 0, "");
1c79356b 169
3e170ce0
A
170static int bpf_debug = 0;
171SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
172 &bpf_debug, 0, "");
173
1c79356b
A
174/*
175 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
55e303ae 176 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
1c79356b
A
177 */
178static struct bpf_if *bpf_iflist;
9bccf70c
A
179#ifdef __APPLE__
180/*
181 * BSD now stores the bpf_d in the dev_t which is a struct
182 * on their system. Our dev_t is an int, so we still store
183 * the bpf_d in a separate table indexed by minor device #.
91447636
A
184 *
185 * The value stored in bpf_dtab[n] represent three states:
186 * 0: device not opened
187 * 1: device opening or closing
188 * other: device <n> opened with pointer to storage
9bccf70c 189 */
55e303ae 190static struct bpf_d **bpf_dtab = NULL;
91447636
A
191static unsigned int bpf_dtab_size = 0;
192static unsigned int nbpfilter = 0;
193
316670eb
A
194decl_lck_mtx_data(static, bpf_mlock_data);
195static lck_mtx_t *bpf_mlock = &bpf_mlock_data;
91447636
A
196static lck_grp_t *bpf_mlock_grp;
197static lck_grp_attr_t *bpf_mlock_grp_attr;
198static lck_attr_t *bpf_mlock_attr;
55e303ae 199
55e303ae 200#endif /* __APPLE__ */
1c79356b 201
91447636 202static int bpf_allocbufs(struct bpf_d *);
2d21ac55 203static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
3e170ce0 204static int bpf_detachd(struct bpf_d *d, int);
91447636 205static void bpf_freed(struct bpf_d *);
91447636
A
206static int bpf_movein(struct uio *, int,
207 struct mbuf **, struct sockaddr *, int *);
5ba3f43e 208static int bpf_setif(struct bpf_d *, ifnet_t ifp);
39236c6e
A
209static void bpf_timed_out(void *, void *);
210static void bpf_wakeup(struct bpf_d *);
5ba3f43e 211static void catchpacket(struct bpf_d *, struct bpf_packet *, u_int, int);
91447636 212static void reset_d(struct bpf_d *);
3e170ce0 213static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long);
316670eb 214static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *);
3e170ce0 215static int bpf_setdlt(struct bpf_d *, u_int);
316670eb
A
216static int bpf_set_traffic_class(struct bpf_d *, int);
217static void bpf_set_packet_service_class(struct mbuf *, int);
1c79356b 218
3e170ce0
A
219static void bpf_acquire_d(struct bpf_d *);
220static void bpf_release_d(struct bpf_d *);
55e303ae
A
221
222static int bpf_devsw_installed;
223
91447636 224void bpf_init(void *unused);
2d21ac55 225static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m);
55e303ae 226
9bccf70c
A
227/*
228 * Darwin differs from BSD here, the following are static
229 * on BSD and not static on Darwin.
230 */
6d2010ae
A
231 d_open_t bpfopen;
232 d_close_t bpfclose;
233 d_read_t bpfread;
234 d_write_t bpfwrite;
316670eb
A
235 ioctl_fcn_t bpfioctl;
236 select_fcn_t bpfselect;
1c79356b 237
1c79356b 238
9bccf70c
A
239/* Darwin's cdevsw struct differs slightly from BSDs */
240#define CDEV_MAJOR 23
1c79356b 241static struct cdevsw bpf_cdevsw = {
6d2010ae
A
242 /* open */ bpfopen,
243 /* close */ bpfclose,
244 /* read */ bpfread,
245 /* write */ bpfwrite,
246 /* ioctl */ bpfioctl,
316670eb
A
247 /* stop */ eno_stop,
248 /* reset */ eno_reset,
249 /* tty */ NULL,
250 /* select */ bpfselect,
251 /* mmap */ eno_mmap,
252 /* strategy*/ eno_strat,
253 /* getc */ eno_getc,
254 /* putc */ eno_putc,
255 /* type */ 0
1c79356b
A
256};
257
55e303ae 258#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
9bccf70c 259
1c79356b 260static int
91447636 261bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, struct sockaddr *sockp, int *datlen)
1c79356b
A
262{
263 struct mbuf *m;
264 int error;
265 int len;
2d21ac55 266 uint8_t sa_family;
1c79356b
A
267 int hlen;
268
2d21ac55 269 switch (linktype) {
91447636 270
2d21ac55
A
271#if SLIP
272 case DLT_SLIP:
273 sa_family = AF_INET;
274 hlen = 0;
275 break;
276#endif /* SLIP */
91447636 277
2d21ac55
A
278 case DLT_EN10MB:
279 sa_family = AF_UNSPEC;
280 /* XXX Would MAXLINKHDR be better? */
281 hlen = sizeof(struct ether_header);
282 break;
91447636 283
2d21ac55
A
284#if FDDI
285 case DLT_FDDI:
91447636 286 #if defined(__FreeBSD__) || defined(__bsdi__)
2d21ac55
A
287 sa_family = AF_IMPLINK;
288 hlen = 0;
91447636 289 #else
2d21ac55
A
290 sa_family = AF_UNSPEC;
291 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
292 hlen = 24;
91447636 293 #endif
2d21ac55
A
294 break;
295#endif /* FDDI */
91447636 296
2d21ac55
A
297 case DLT_RAW:
298 case DLT_NULL:
299 sa_family = AF_UNSPEC;
300 hlen = 0;
301 break;
91447636
A
302
303 #ifdef __FreeBSD__
2d21ac55
A
304 case DLT_ATM_RFC1483:
305 /*
306 * en atm driver requires 4-byte atm pseudo header.
307 * though it isn't standard, vpi:vci needs to be
308 * specified anyway.
309 */
310 sa_family = AF_UNSPEC;
311 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
312 break;
91447636 313 #endif
2d21ac55
A
314
315 case DLT_PPP:
316 sa_family = AF_UNSPEC;
317 hlen = 4; /* This should match PPP_HDRLEN */
318 break;
91447636 319
2d21ac55
A
320 case DLT_APPLE_IP_OVER_IEEE1394:
321 sa_family = AF_UNSPEC;
322 hlen = sizeof(struct firewire_header);
323 break;
b0d623f7
A
324
325 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
326 sa_family = AF_IEEE80211;
327 hlen = 0;
328 break;
316670eb 329
6d2010ae
A
330 case DLT_IEEE802_11_RADIO:
331 sa_family = AF_IEEE80211;
332 hlen = 0;
333 break;
b0d623f7 334
2d21ac55
A
335 default:
336 return (EIO);
55e303ae 337 }
2d21ac55 338
91447636
A
339 // LP64todo - fix this!
340 len = uio_resid(uio);
1c79356b
A
341 *datlen = len - hlen;
342 if ((unsigned)len > MCLBYTES)
343 return (EIO);
344
2d21ac55
A
345 if (sockp) {
346 /*
347 * Build a sockaddr based on the data link layer type.
348 * We do this at this level because the ethernet header
349 * is copied directly into the data field of the sockaddr.
350 * In the case of SLIP, there is no header and the packet
351 * is forwarded as is.
352 * Also, we are careful to leave room at the front of the mbuf
353 * for the link level header.
354 */
355 if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) {
356 return (EIO);
357 }
358 sockp->sa_family = sa_family;
359 } else {
360 /*
361 * We're directly sending the packet data supplied by
362 * the user; we don't need to make room for the link
363 * header, and don't need the header length value any
364 * more, so set it to 0.
365 */
366 hlen = 0;
367 }
368
1c79356b
A
369 MGETHDR(m, M_WAIT, MT_DATA);
370 if (m == 0)
371 return (ENOBUFS);
91447636 372 if ((unsigned)len > MHLEN) {
1c79356b
A
373 MCLGET(m, M_WAIT);
374 if ((m->m_flags & M_EXT) == 0) {
1c79356b
A
375 error = ENOBUFS;
376 goto bad;
377 }
378 }
379 m->m_pkthdr.len = m->m_len = len;
380 m->m_pkthdr.rcvif = NULL;
381 *mp = m;
6d2010ae 382
1c79356b
A
383 /*
384 * Make room for link header.
385 */
386 if (hlen != 0) {
387 m->m_pkthdr.len -= hlen;
388 m->m_len -= hlen;
1c79356b 389 m->m_data += hlen; /* XXX */
1c79356b
A
390 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
391 if (error)
392 goto bad;
393 }
394 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
6d2010ae
A
395 if (error)
396 goto bad;
397
398 /* Check for multicast destination */
399 switch (linktype) {
400 case DLT_EN10MB: {
401 struct ether_header *eh = mtod(m, struct ether_header *);
402
403 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
404 if (_ether_cmp(etherbroadcastaddr, eh->ether_dhost) == 0)
405 m->m_flags |= M_BCAST;
406 else
407 m->m_flags |= M_MCAST;
408 }
409 break;
410 }
411 }
412
413 return 0;
1c79356b
A
414 bad:
415 m_freem(m);
416 return (error);
417}
418
9bccf70c 419#ifdef __APPLE__
55e303ae
A
420
421/*
39236c6e
A
422 * The dynamic addition of a new device node must block all processes that
423 * are opening the last device so that no process will get an unexpected
424 * ENOENT
55e303ae 425 */
91447636
A
426static void
427bpf_make_dev_t(int maj)
55e303ae 428{
91447636
A
429 static int bpf_growing = 0;
430 unsigned int cur_size = nbpfilter, i;
55e303ae 431
91447636
A
432 if (nbpfilter >= bpf_maxdevices)
433 return;
55e303ae 434
91447636
A
435 while (bpf_growing) {
436 /* Wait until new device has been created */
437 (void)tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0);
438 }
439 if (nbpfilter > cur_size) {
440 /* other thread grew it already */
441 return;
442 }
443 bpf_growing = 1;
55e303ae 444
91447636
A
445 /* need to grow bpf_dtab first */
446 if (nbpfilter == bpf_dtab_size) {
447 int new_dtab_size;
448 struct bpf_d **new_dtab = NULL;
449 struct bpf_d **old_dtab = NULL;
450
451 new_dtab_size = bpf_dtab_size + NBPFILTER;
452 new_dtab = (struct bpf_d **)_MALLOC(sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT);
453 if (new_dtab == 0) {
454 printf("bpf_make_dev_t: malloc bpf_dtab failed\n");
455 goto done;
456 }
457 if (bpf_dtab) {
458 bcopy(bpf_dtab, new_dtab,
459 sizeof(struct bpf_d *) * bpf_dtab_size);
460 }
461 bzero(new_dtab + bpf_dtab_size,
462 sizeof(struct bpf_d *) * NBPFILTER);
463 old_dtab = bpf_dtab;
464 bpf_dtab = new_dtab;
465 bpf_dtab_size = new_dtab_size;
466 if (old_dtab != NULL)
467 _FREE(old_dtab, M_DEVBUF);
55e303ae 468 }
91447636
A
469 i = nbpfilter++;
470 (void) devfs_make_node(makedev(maj, i),
471 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
472 "bpf%d", i);
473done:
474 bpf_growing = 0;
475 wakeup((caddr_t)&bpf_growing);
55e303ae
A
476}
477
9bccf70c 478#endif
1c79356b
A
479
480/*
481 * Attach file to the bpf interface, i.e. make d listen on bp.
1c79356b 482 */
2d21ac55 483static errno_t
91447636 484bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
1c79356b 485{
2d21ac55
A
486 int first = bp->bif_dlist == NULL;
487 int error = 0;
488
1c79356b
A
489 /*
490 * Point d at bp, and add d to the interface's list of listeners.
491 * Finally, point the driver's bpf cookie at the interface so
492 * it will divert packets to bpf.
493 */
494 d->bd_bif = bp;
495 d->bd_next = bp->bif_dlist;
496 bp->bif_dlist = d;
3e170ce0
A
497
498 /*
499 * Take a reference on the device even if an error is returned
500 * because we keep the device in the interface's list of listeners
501 */
502 bpf_acquire_d(d);
503
2d21ac55
A
504 if (first) {
505 /* Find the default bpf entry for this ifp */
506 if (bp->bif_ifp->if_bpf == NULL) {
fe8ab488 507 struct bpf_if *tmp, *primary = NULL;
2d21ac55 508
fe8ab488 509 for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) {
5ba3f43e
A
510 if (tmp->bif_ifp == bp->bif_ifp) {
511 primary = tmp;
512 break;
513 }
fe8ab488 514 }
2d21ac55
A
515 bp->bif_ifp->if_bpf = primary;
516 }
2d21ac55
A
517 /* Only call dlil_set_bpf_tap for primary dlt */
518 if (bp->bif_ifp->if_bpf == bp)
5ba3f43e
A
519 dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
520
521 if (bp->bif_tap != NULL)
7e4a7d39 522 error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, BPF_TAP_INPUT_OUTPUT);
2d21ac55 523 }
1c79356b 524
3e170ce0
A
525 /*
526 * Reset the detach flags in case we previously detached an interface
527 */
528 d->bd_flags &= ~(BPF_DETACHING | BPF_DETACHED);
529
5ba3f43e 530 if (bp->bif_dlt == DLT_PKTAP) {
fe8ab488 531 d->bd_flags |= BPF_FINALIZE_PKTAP;
5ba3f43e 532 } else {
fe8ab488 533 d->bd_flags &= ~BPF_FINALIZE_PKTAP;
5ba3f43e 534 }
2d21ac55 535 return error;
1c79356b
A
536}
537
538/*
539 * Detach a file from its interface.
3e170ce0
A
540 *
541 * Return 1 if was closed by some thread, 0 otherwise
1c79356b 542 */
3e170ce0
A
543static int
544bpf_detachd(struct bpf_d *d, int closing)
1c79356b
A
545{
546 struct bpf_d **p;
547 struct bpf_if *bp;
548 struct ifnet *ifp;
549
3e170ce0
A
550 /*
551 * Some other thread already detached
552 */
553 if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0)
554 goto done;
555 /*
556 * This thread is doing the detach
557 */
558 d->bd_flags |= BPF_DETACHING;
559
1c79356b 560 ifp = d->bd_bif->bif_ifp;
1c79356b 561 bp = d->bd_bif;
3e170ce0
A
562
563 if (bpf_debug != 0)
564 printf("%s: %llx %s%s\n",
565 __func__, (uint64_t)VM_KERNEL_ADDRPERM(d),
566 if_name(ifp), closing ? " closing" : "");
567
2d21ac55
A
568 /* Remove d from the interface's descriptor list. */
569 p = &bp->bif_dlist;
570 while (*p != d) {
571 p = &(*p)->bd_next;
572 if (*p == 0)
573 panic("bpf_detachd: descriptor not in list");
574 }
575 *p = (*p)->bd_next;
576 if (bp->bif_dlist == 0) {
577 /*
578 * Let the driver know that there are no more listeners.
579 */
580 /* Only call dlil_set_bpf_tap for primary dlt */
581 if (bp->bif_ifp->if_bpf == bp)
582 dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL);
583 if (bp->bif_tap)
584 bp->bif_tap(ifp, bp->bif_dlt, BPF_TAP_DISABLE);
585
586 for (bp = bpf_iflist; bp; bp = bp->bif_next)
587 if (bp->bif_ifp == ifp && bp->bif_dlist != 0)
588 break;
589 if (bp == NULL)
590 ifp->if_bpf = NULL;
591 }
592 d->bd_bif = NULL;
1c79356b
A
593 /*
594 * Check if this descriptor had requested promiscuous mode.
595 * If so, turn it off.
596 */
597 if (d->bd_promisc) {
598 d->bd_promisc = 0;
2d21ac55
A
599 lck_mtx_unlock(bpf_mlock);
600 if (ifnet_set_promiscuous(ifp, 0)) {
1c79356b
A
601 /*
602 * Something is really wrong if we were able to put
603 * the driver into promiscuous mode, but can't
604 * take it out.
9bccf70c 605 * Most likely the network interface is gone.
1c79356b 606 */
3e170ce0 607 printf("%s: ifnet_set_promiscuous failed\n", __func__);
2d21ac55
A
608 }
609 lck_mtx_lock(bpf_mlock);
1c79356b 610 }
3e170ce0
A
611
612 /*
613 * Wake up other thread that are waiting for this thread to finish
614 * detaching
615 */
616 d->bd_flags &= ~BPF_DETACHING;
617 d->bd_flags |= BPF_DETACHED;
618 /*
619 * Note that We've kept the reference because we may have dropped
620 * the lock when turning off promiscuous mode
621 */
622 bpf_release_d(d);
623
624done:
625 /*
626 * When closing makes sure no other thread refer to the bpf_d
627 */
628 if (bpf_debug != 0)
629 printf("%s: %llx done\n",
630 __func__, (uint64_t)VM_KERNEL_ADDRPERM(d));
631 /*
632 * Let the caller know the bpf_d is closed
633 */
634 if ((d->bd_flags & BPF_CLOSING))
635 return (1);
636 else
637 return (0);
1c79356b
A
638}
639
640
6d2010ae
A
641/*
642 * Start asynchronous timer, if necessary.
643 * Must be called with bpf_mlock held.
644 */
645static void
646bpf_start_timer(struct bpf_d *d)
647{
648 uint64_t deadline;
649 struct timeval tv;
650
651 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
652 tv.tv_sec = d->bd_rtout / hz;
653 tv.tv_usec = (d->bd_rtout % hz) * tick;
654
39236c6e
A
655 clock_interval_to_deadline(
656 (uint64_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec,
657 NSEC_PER_USEC, &deadline);
6d2010ae
A
658 /*
659 * The state is BPF_IDLE, so the timer hasn't
660 * been started yet, and hasn't gone off yet;
661 * there is no thread call scheduled, so this
662 * won't change the schedule.
663 *
664 * XXX - what if, by the time it gets entered,
665 * the deadline has already passed?
666 */
667 thread_call_enter_delayed(d->bd_thread_call, deadline);
668 d->bd_state = BPF_WAITING;
669 }
670}
671
672/*
673 * Cancel asynchronous timer.
674 * Must be called with bpf_mlock held.
675 */
676static boolean_t
677bpf_stop_timer(struct bpf_d *d)
678{
679 /*
680 * If the timer has already gone off, this does nothing.
681 * Our caller is expected to set d->bd_state to BPF_IDLE,
682 * with the bpf_mlock, after we are called. bpf_timed_out()
683 * also grabs bpf_mlock, so, if the timer has gone off and
684 * bpf_timed_out() hasn't finished, it's waiting for the
685 * lock; when this thread releases the lock, it will
686 * find the state is BPF_IDLE, and just release the
687 * lock and return.
688 */
689 return (thread_call_cancel(d->bd_thread_call));
690}
691
3e170ce0
A
692void
693bpf_acquire_d(struct bpf_d *d)
694{
695 void *lr_saved = __builtin_return_address(0);
696
5ba3f43e 697 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
3e170ce0
A
698
699 d->bd_refcnt += 1;
700
701 d->bd_ref_lr[d->bd_next_ref_lr] = lr_saved;
702 d->bd_next_ref_lr = (d->bd_next_ref_lr + 1) % BPF_REF_HIST;
703}
704
705void
706bpf_release_d(struct bpf_d *d)
707{
708 void *lr_saved = __builtin_return_address(0);
709
5ba3f43e 710 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
3e170ce0
A
711
712 if (d->bd_refcnt <= 0)
713 panic("%s: %p refcnt <= 0", __func__, d);
714
715 d->bd_refcnt -= 1;
6d2010ae 716
3e170ce0
A
717 d->bd_unref_lr[d->bd_next_unref_lr] = lr_saved;
718 d->bd_next_unref_lr = (d->bd_next_unref_lr + 1) % BPF_REF_HIST;
719
720 if (d->bd_refcnt == 0) {
721 /* Assert the device is detached */
722 if ((d->bd_flags & BPF_DETACHED) == 0)
723 panic("%s: %p BPF_DETACHED not set", __func__, d);
724
725 _FREE(d, M_DEVBUF);
726 }
727}
6d2010ae 728
1c79356b
A
729/*
730 * Open ethernet device. Returns ENXIO for illegal minor device number,
731 * EBUSY if file is open by another process.
732 */
733/* ARGSUSED */
2d21ac55 734int
b0d623f7 735bpfopen(dev_t dev, int flags, __unused int fmt,
2d21ac55 736 __unused struct proc *p)
1c79356b 737{
2d21ac55 738 struct bpf_d *d;
1c79356b 739
2d21ac55
A
740 lck_mtx_lock(bpf_mlock);
741 if ((unsigned int) minor(dev) >= nbpfilter) {
742 lck_mtx_unlock(bpf_mlock);
1c79356b 743 return (ENXIO);
2d21ac55 744 }
91447636
A
745 /*
746 * New device nodes are created on demand when opening the last one.
747 * The programming model is for processes to loop on the minor starting at 0
748 * as long as EBUSY is returned. The loop stops when either the open succeeds or
749 * an error other that EBUSY is returned. That means that bpf_make_dev_t() must
750 * block all processes that are opening the last node. If not all
751 * processes are blocked, they could unexpectedly get ENOENT and abort their
752 * opening loop.
753 */
754 if ((unsigned int) minor(dev) == (nbpfilter - 1))
755 bpf_make_dev_t(major(dev));
9bccf70c 756
1c79356b 757 /*
9bccf70c 758 * Each minor can be opened by only one process. If the requested
1c79356b 759 * minor is in use, return EBUSY.
91447636
A
760 *
761 * Important: bpfopen() and bpfclose() have to check and set the status of a device
762 * in the same lockin context otherwise the device may be leaked because the vnode use count
763 * will be unpextectly greater than 1 when close() is called.
1c79356b 764 */
2d21ac55 765 if (bpf_dtab[minor(dev)] == 0) {
91447636 766 bpf_dtab[minor(dev)] = (void *)1; /* Mark opening */
2d21ac55
A
767 } else {
768 lck_mtx_unlock(bpf_mlock);
91447636 769 return (EBUSY);
2d21ac55 770 }
3e170ce0
A
771 d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF,
772 M_WAIT | M_ZERO);
91447636
A
773 if (d == NULL) {
774 /* this really is a catastrophic failure */
775 printf("bpfopen: malloc bpf_d failed\n");
2d21ac55
A
776 bpf_dtab[minor(dev)] = NULL;
777 lck_mtx_unlock(bpf_mlock);
91447636 778 return ENOMEM;
1c79356b 779 }
3e170ce0 780
91447636 781 /* Mark "in use" and do most initialization. */
3e170ce0 782 bpf_acquire_d(d);
1c79356b
A
783 d->bd_bufsize = bpf_bufsize;
784 d->bd_sig = SIGIO;
9bccf70c 785 d->bd_seesent = 1;
b0d623f7 786 d->bd_oflags = flags;
6d2010ae 787 d->bd_state = BPF_IDLE;
316670eb 788 d->bd_traffic_class = SO_TC_BE;
3e170ce0 789 d->bd_flags |= BPF_DETACHED;
fe8ab488
A
790 if (bpf_wantpktap)
791 d->bd_flags |= BPF_WANT_PKTAP;
792 else
793 d->bd_flags &= ~BPF_WANT_PKTAP;
3e170ce0 794 d->bd_thread_call = thread_call_allocate(bpf_timed_out, d);
6d2010ae
A
795 if (d->bd_thread_call == NULL) {
796 printf("bpfopen: malloc thread call failed\n");
797 bpf_dtab[minor(dev)] = NULL;
3e170ce0 798 bpf_release_d(d);
6d2010ae 799 lck_mtx_unlock(bpf_mlock);
3e170ce0
A
800
801 return (ENOMEM);
6d2010ae 802 }
2d21ac55
A
803#if CONFIG_MACF_NET
804 mac_bpfdesc_label_init(d);
805 mac_bpfdesc_label_associate(kauth_cred_get(), d);
806#endif
91447636 807 bpf_dtab[minor(dev)] = d; /* Mark opened */
2d21ac55 808 lck_mtx_unlock(bpf_mlock);
55e303ae 809
1c79356b
A
810 return (0);
811}
812
813/*
814 * Close the descriptor by detaching it from its interface,
815 * deallocating its buffers, and marking it free.
816 */
817/* ARGSUSED */
2d21ac55
A
818int
819bpfclose(dev_t dev, __unused int flags, __unused int fmt,
820 __unused struct proc *p)
1c79356b 821{
2d21ac55
A
822 struct bpf_d *d;
823
824 /* Take BPF lock to ensure no other thread is using the device */
825 lck_mtx_lock(bpf_mlock);
1c79356b 826
55e303ae 827 d = bpf_dtab[minor(dev)];
2d21ac55
A
828 if (d == 0 || d == (void *)1) {
829 lck_mtx_unlock(bpf_mlock);
91447636 830 return (ENXIO);
3e170ce0
A
831 }
832
833 /*
834 * Other threads may call bpd_detachd() if we drop the bpf_mlock
835 */
836 d->bd_flags |= BPF_CLOSING;
837
838 if (bpf_debug != 0)
839 printf("%s: %llx\n",
840 __func__, (uint64_t)VM_KERNEL_ADDRPERM(d));
841
91447636 842 bpf_dtab[minor(dev)] = (void *)1; /* Mark closing */
55e303ae 843
6d2010ae
A
844 /*
845 * Deal with any in-progress timeouts.
846 */
847 switch (d->bd_state) {
848 case BPF_IDLE:
849 /*
850 * Not waiting for a timeout, and no timeout happened.
851 */
852 break;
853
854 case BPF_WAITING:
855 /*
856 * Waiting for a timeout.
857 * Cancel any timer that has yet to go off,
858 * and mark the state as "closing".
859 * Then drop the lock to allow any timers that
860 * *have* gone off to run to completion, and wait
861 * for them to finish.
862 */
863 if (!bpf_stop_timer(d)) {
864 /*
865 * There was no pending call, so the call must
866 * have been in progress. Wait for the call to
867 * complete; we have to drop the lock while
868 * waiting. to let the in-progrss call complete
869 */
870 d->bd_state = BPF_DRAINING;
871 while (d->bd_state == BPF_DRAINING)
872 msleep((caddr_t)d, bpf_mlock, PRINET,
873 "bpfdraining", NULL);
874 }
875 d->bd_state = BPF_IDLE;
876 break;
877
878 case BPF_TIMED_OUT:
879 /*
880 * Timer went off, and the timeout routine finished.
881 */
882 d->bd_state = BPF_IDLE;
883 break;
884
885 case BPF_DRAINING:
886 /*
887 * Another thread is blocked on a close waiting for
888 * a timeout to finish.
889 * This "shouldn't happen", as the first thread to enter
890 * bpfclose() will set bpf_dtab[minor(dev)] to 1, and
891 * all subsequent threads should see that and fail with
892 * ENXIO.
893 */
894 panic("Two threads blocked in a BPF close");
895 break;
896 }
897
1c79356b 898 if (d->bd_bif)
3e170ce0 899 bpf_detachd(d, 1);
0b4e3aa0 900 selthreadclear(&d->bd_sel);
2d21ac55
A
901#if CONFIG_MACF_NET
902 mac_bpfdesc_label_destroy(d);
903#endif
6d2010ae 904 thread_call_free(d->bd_thread_call);
39236c6e
A
905
906 while (d->bd_hbuf_read)
907 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
908
1c79356b 909 bpf_freed(d);
91447636 910
2d21ac55
A
911 /* Mark free in same context as bpfopen comes to check */
912 bpf_dtab[minor(dev)] = NULL; /* Mark closed */
3e170ce0
A
913
914 bpf_release_d(d);
915
91447636 916 lck_mtx_unlock(bpf_mlock);
3e170ce0 917
1c79356b
A
918 return (0);
919}
920
1c79356b 921
91447636 922#define BPF_SLEEP bpf_sleep
1c79356b 923
91447636
A
924static int
925bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo)
1c79356b 926{
6d2010ae 927 u_int64_t abstime = 0;
1c79356b 928
6d2010ae
A
929 if(timo)
930 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
91447636 931
6d2010ae 932 return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime);
1c79356b 933}
1c79356b
A
934
935/*
936 * Rotate the packet buffers in descriptor d. Move the store buffer
937 * into the hold slot, and the free buffer into the store slot.
938 * Zero the length of the new store buffer.
939 */
940#define ROTATE_BUFFERS(d) \
39236c6e
A
941 if (d->bd_hbuf_read) \
942 panic("rotating bpf buffers during read"); \
1c79356b
A
943 (d)->bd_hbuf = (d)->bd_sbuf; \
944 (d)->bd_hlen = (d)->bd_slen; \
3e170ce0 945 (d)->bd_hcnt = (d)->bd_scnt; \
1c79356b
A
946 (d)->bd_sbuf = (d)->bd_fbuf; \
947 (d)->bd_slen = 0; \
3e170ce0 948 (d)->bd_scnt = 0; \
2d21ac55 949 (d)->bd_fbuf = NULL;
1c79356b
A
950/*
951 * bpfread - read next chunk of packets from buffers
952 */
2d21ac55 953int
91447636 954bpfread(dev_t dev, struct uio *uio, int ioflag)
1c79356b 955{
2d21ac55 956 struct bpf_d *d;
39236c6e
A
957 caddr_t hbuf;
958 int timed_out, hbuf_len;
1c79356b 959 int error;
fe8ab488 960 int flags;
2d21ac55
A
961
962 lck_mtx_lock(bpf_mlock);
1c79356b 963
55e303ae 964 d = bpf_dtab[minor(dev)];
3e170ce0 965 if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
2d21ac55 966 lck_mtx_unlock(bpf_mlock);
91447636 967 return (ENXIO);
2d21ac55 968 }
55e303ae 969
3e170ce0
A
970 bpf_acquire_d(d);
971
1c79356b
A
972 /*
973 * Restrict application to use a buffer the same size as
974 * as kernel buffers.
975 */
b0d623f7 976 if (uio_resid(uio) != d->bd_bufsize) {
3e170ce0 977 bpf_release_d(d);
91447636 978 lck_mtx_unlock(bpf_mlock);
1c79356b
A
979 return (EINVAL);
980 }
6d2010ae
A
981
982 if (d->bd_state == BPF_WAITING)
983 bpf_stop_timer(d);
984
985 timed_out = (d->bd_state == BPF_TIMED_OUT);
986 d->bd_state = BPF_IDLE;
1c79356b 987
39236c6e
A
988 while (d->bd_hbuf_read)
989 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
3e170ce0
A
990
991 if ((d->bd_flags & BPF_CLOSING) != 0) {
992 bpf_release_d(d);
39236c6e
A
993 lck_mtx_unlock(bpf_mlock);
994 return (ENXIO);
995 }
1c79356b
A
996 /*
997 * If the hold buffer is empty, then do a timed sleep, which
998 * ends when the timeout expires or when enough packets
999 * have arrived to fill the store buffer.
1000 */
1001 while (d->bd_hbuf == 0) {
6d2010ae
A
1002 if ((d->bd_immediate || timed_out || (ioflag & IO_NDELAY))
1003 && d->bd_slen != 0) {
1c79356b 1004 /*
6d2010ae
A
1005 * We're in immediate mode, or are reading
1006 * in non-blocking mode, or a timer was
1007 * started before the read (e.g., by select()
1008 * or poll()) and has expired and a packet(s)
1009 * either arrived since the previous
1c79356b
A
1010 * read or arrived while we were asleep.
1011 * Rotate the buffers and return what's here.
1012 */
1013 ROTATE_BUFFERS(d);
1014 break;
1015 }
9bccf70c
A
1016
1017 /*
1018 * No data is available, check to see if the bpf device
1019 * is still pointed at a real interface. If not, return
1020 * ENXIO so that the userland process knows to rebind
1021 * it before using it again.
1022 */
1023 if (d->bd_bif == NULL) {
3e170ce0 1024 bpf_release_d(d);
91447636 1025 lck_mtx_unlock(bpf_mlock);
9bccf70c
A
1026 return (ENXIO);
1027 }
b0d623f7 1028 if (ioflag & IO_NDELAY) {
3e170ce0 1029 bpf_release_d(d);
b0d623f7
A
1030 lck_mtx_unlock(bpf_mlock);
1031 return (EWOULDBLOCK);
1032 }
1033 error = BPF_SLEEP(d, PRINET|PCATCH, "bpf",
1034 d->bd_rtout);
2d21ac55
A
1035 /*
1036 * Make sure device is still opened
1037 */
3e170ce0
A
1038 if ((d->bd_flags & BPF_CLOSING) != 0) {
1039 bpf_release_d(d);
2d21ac55
A
1040 lck_mtx_unlock(bpf_mlock);
1041 return (ENXIO);
1042 }
39236c6e
A
1043
1044 while (d->bd_hbuf_read)
1045 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
1046
3e170ce0
A
1047 if ((d->bd_flags & BPF_CLOSING) != 0) {
1048 bpf_release_d(d);
39236c6e
A
1049 lck_mtx_unlock(bpf_mlock);
1050 return (ENXIO);
1051 }
fe8ab488 1052
1c79356b 1053 if (error == EINTR || error == ERESTART) {
5ba3f43e 1054 if (d->bd_hbuf != NULL) {
fe8ab488
A
1055 /*
1056 * Because we msleep, the hold buffer might
1057 * be filled when we wake up. Avoid rotating
1058 * in this case.
1059 */
1060 break;
1061 }
5ba3f43e 1062 if (d->bd_slen != 0) {
39236c6e
A
1063 /*
1064 * Sometimes we may be interrupted often and
1065 * the sleep above will not timeout.
1066 * Regardless, we should rotate the buffers
1067 * if there's any new data pending and
1068 * return it.
1069 */
1070 ROTATE_BUFFERS(d);
1071 break;
1072 }
3e170ce0 1073 bpf_release_d(d);
91447636 1074 lck_mtx_unlock(bpf_mlock);
5ba3f43e
A
1075 if (error == ERESTART) {
1076 printf("%s: %llx ERESTART to EINTR\n",
1077 __func__, (uint64_t)VM_KERNEL_ADDRPERM(d));
1078 error = EINTR;
1079 }
1c79356b
A
1080 return (error);
1081 }
1082 if (error == EWOULDBLOCK) {
1083 /*
1084 * On a timeout, return what's in the buffer,
1085 * which may be nothing. If there is something
1086 * in the store buffer, we can rotate the buffers.
1087 */
1088 if (d->bd_hbuf)
1089 /*
1090 * We filled up the buffer in between
1091 * getting the timeout and arriving
1092 * here, so we don't need to rotate.
1093 */
1094 break;
1095
1096 if (d->bd_slen == 0) {
3e170ce0 1097 bpf_release_d(d);
91447636 1098 lck_mtx_unlock(bpf_mlock);
1c79356b
A
1099 return (0);
1100 }
1101 ROTATE_BUFFERS(d);
1102 break;
1103 }
1104 }
1105 /*
1106 * At this point, we know we have something in the hold slot.
1107 */
1c79356b 1108
fe8ab488
A
1109 /*
1110 * Set the hold buffer read. So we do not
1111 * rotate the buffers until the hold buffer
1112 * read is complete. Also to avoid issues resulting
1113 * from page faults during disk sleep (<rdar://problem/13436396>).
1114 */
1115 d->bd_hbuf_read = 1;
1116 hbuf = d->bd_hbuf;
1117 hbuf_len = d->bd_hlen;
1118 flags = d->bd_flags;
1119 lck_mtx_unlock(bpf_mlock);
1120
39236c6e 1121#ifdef __APPLE__
316670eb
A
1122 /*
1123 * Before we move data to userland, we fill out the extended
1124 * header fields.
1125 */
fe8ab488 1126 if (flags & BPF_EXTENDED_HDR) {
316670eb
A
1127 char *p;
1128
fe8ab488
A
1129 p = hbuf;
1130 while (p < hbuf + hbuf_len) {
316670eb 1131 struct bpf_hdr_ext *ehp;
39236c6e
A
1132 uint32_t flowid;
1133 struct so_procinfo soprocinfo;
1134 int found = 0;
316670eb
A
1135
1136 ehp = (struct bpf_hdr_ext *)(void *)p;
39236c6e
A
1137 if ((flowid = ehp->bh_flowid)) {
1138 if (ehp->bh_proto == IPPROTO_TCP)
1139 found = inp_findinpcb_procinfo(&tcbinfo,
1140 flowid, &soprocinfo);
1141 else if (ehp->bh_proto == IPPROTO_UDP)
1142 found = inp_findinpcb_procinfo(&udbinfo,
1143 flowid, &soprocinfo);
fe8ab488 1144 if (found == 1) {
39236c6e
A
1145 ehp->bh_pid = soprocinfo.spi_pid;
1146 proc_name(ehp->bh_pid, ehp->bh_comm, MAXCOMLEN);
316670eb 1147 }
39236c6e 1148 ehp->bh_flowid = 0;
316670eb 1149 }
5ba3f43e 1150
fe8ab488
A
1151 if (flags & BPF_FINALIZE_PKTAP) {
1152 struct pktap_header *pktaphdr;
1153
1154 pktaphdr = (struct pktap_header *)(void *)
1155 (p + BPF_WORDALIGN(ehp->bh_hdrlen));
1156
1157 if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP)
1158 pktap_finalize_proc_info(pktaphdr);
1159
1160 if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) {
1161 ehp->bh_tstamp.tv_sec =
1162 pktaphdr->pth_tstamp.tv_sec;
1163 ehp->bh_tstamp.tv_usec =
1164 pktaphdr->pth_tstamp.tv_usec;
1165 }
1166 }
316670eb
A
1167 p += BPF_WORDALIGN(ehp->bh_hdrlen + ehp->bh_caplen);
1168 }
fe8ab488
A
1169 } else if (flags & BPF_FINALIZE_PKTAP) {
1170 char *p;
1171
1172 p = hbuf;
1173 while (p < hbuf + hbuf_len) {
1174 struct bpf_hdr *hp;
1175 struct pktap_header *pktaphdr;
1176
1177 hp = (struct bpf_hdr *)(void *)p;
1178 pktaphdr = (struct pktap_header *)(void *)
1179 (p + BPF_WORDALIGN(hp->bh_hdrlen));
1180
1181 if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP)
1182 pktap_finalize_proc_info(pktaphdr);
1183
1184 if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) {
1185 hp->bh_tstamp.tv_sec =
1186 pktaphdr->pth_tstamp.tv_sec;
1187 hp->bh_tstamp.tv_usec =
1188 pktaphdr->pth_tstamp.tv_usec;
1189 }
1190
1191 p += BPF_WORDALIGN(hp->bh_hdrlen + hp->bh_caplen);
1192 }
316670eb 1193 }
39236c6e 1194#endif
39236c6e 1195
1c79356b
A
1196 /*
1197 * Move data from hold buffer into user space.
1198 * We know the entire buffer is transferred since
1199 * we checked above that the read buffer is bpf_bufsize bytes.
1200 */
39236c6e
A
1201 error = UIOMOVE(hbuf, hbuf_len, UIO_READ, uio);
1202
1203 lck_mtx_lock(bpf_mlock);
1204 /*
1205 * Make sure device is still opened
1206 */
3e170ce0
A
1207 if ((d->bd_flags & BPF_CLOSING) != 0) {
1208 bpf_release_d(d);
39236c6e
A
1209 lck_mtx_unlock(bpf_mlock);
1210 return (ENXIO);
1211 }
1212
1213 d->bd_hbuf_read = 0;
1c79356b 1214 d->bd_fbuf = d->bd_hbuf;
2d21ac55 1215 d->bd_hbuf = NULL;
1c79356b 1216 d->bd_hlen = 0;
3e170ce0 1217 d->bd_hcnt = 0;
39236c6e 1218 wakeup((caddr_t)d);
3e170ce0
A
1219
1220 bpf_release_d(d);
91447636 1221 lck_mtx_unlock(bpf_mlock);
1c79356b 1222 return (error);
39236c6e 1223
1c79356b
A
1224}
1225
1226
1227/*
1228 * If there are processes sleeping on this descriptor, wake them up.
1229 */
91447636
A
1230static void
1231bpf_wakeup(struct bpf_d *d)
1c79356b 1232{
6d2010ae
A
1233 if (d->bd_state == BPF_WAITING) {
1234 bpf_stop_timer(d);
1235 d->bd_state = BPF_IDLE;
1236 }
1c79356b
A
1237 wakeup((caddr_t)d);
1238 if (d->bd_async && d->bd_sig && d->bd_sigio)
2d21ac55 1239 pgsigio(d->bd_sigio, d->bd_sig);
1c79356b 1240
1c79356b 1241 selwakeup(&d->bd_sel);
3e170ce0
A
1242 if ((d->bd_flags & BPF_KNOTE))
1243 KNOTE(&d->bd_sel.si_note, 1);
1c79356b
A
1244}
1245
6d2010ae
A
1246
1247static void
1248bpf_timed_out(void *arg, __unused void *dummy)
1249{
1250 struct bpf_d *d = (struct bpf_d *)arg;
1251
1252 lck_mtx_lock(bpf_mlock);
1253 if (d->bd_state == BPF_WAITING) {
1254 /*
1255 * There's a select or kqueue waiting for this; if there's
1256 * now stuff to read, wake it up.
1257 */
1258 d->bd_state = BPF_TIMED_OUT;
1259 if (d->bd_slen != 0)
1260 bpf_wakeup(d);
1261 } else if (d->bd_state == BPF_DRAINING) {
1262 /*
1263 * A close is waiting for this to finish.
1264 * Mark it as finished, and wake the close up.
1265 */
1266 d->bd_state = BPF_IDLE;
1267 bpf_wakeup(d);
1268 }
1269 lck_mtx_unlock(bpf_mlock);
1270}
1271
1272
1273
1274
1275
55e303ae
A
1276/* keep in sync with bpf_movein above: */
1277#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
1278
2d21ac55 1279int
91447636 1280bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag)
1c79356b 1281{
2d21ac55 1282 struct bpf_d *d;
1c79356b 1283 struct ifnet *ifp;
2d21ac55 1284 struct mbuf *m = NULL;
91447636 1285 int error;
55e303ae 1286 char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
b0d623f7 1287 int datlen = 0;
39236c6e
A
1288 int bif_dlt;
1289 int bd_hdrcmplt;
1c79356b 1290
2d21ac55
A
1291 lck_mtx_lock(bpf_mlock);
1292
55e303ae 1293 d = bpf_dtab[minor(dev)];
3e170ce0 1294 if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
2d21ac55 1295 lck_mtx_unlock(bpf_mlock);
91447636 1296 return (ENXIO);
2d21ac55 1297 }
3e170ce0
A
1298
1299 bpf_acquire_d(d);
1300
1c79356b 1301 if (d->bd_bif == 0) {
3e170ce0 1302 bpf_release_d(d);
91447636 1303 lck_mtx_unlock(bpf_mlock);
2d21ac55 1304 return (ENXIO);
1c79356b
A
1305 }
1306
1307 ifp = d->bd_bif->bif_ifp;
1308
6d2010ae 1309 if ((ifp->if_flags & IFF_UP) == 0) {
3e170ce0 1310 bpf_release_d(d);
6d2010ae
A
1311 lck_mtx_unlock(bpf_mlock);
1312 return (ENETDOWN);
1313 }
b0d623f7 1314 if (uio_resid(uio) == 0) {
3e170ce0 1315 bpf_release_d(d);
91447636 1316 lck_mtx_unlock(bpf_mlock);
2d21ac55 1317 return (0);
1c79356b 1318 }
55e303ae 1319 ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf);
6d2010ae 1320
316670eb
A
1321 /*
1322 * fix for PR-6849527
1323 * geting variables onto stack before dropping lock for bpf_movein()
1324 */
1325 bif_dlt = (int)d->bd_bif->bif_dlt;
1326 bd_hdrcmplt = d->bd_hdrcmplt;
1327
6d2010ae 1328 /* bpf_movein allocating mbufs; drop lock */
316670eb 1329 lck_mtx_unlock(bpf_mlock);
6d2010ae
A
1330
1331 error = bpf_movein(uio, bif_dlt, &m,
316670eb
A
1332 bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf,
1333 &datlen);
1334
3e170ce0
A
1335 /* take the lock again */
1336 lck_mtx_lock(bpf_mlock);
316670eb 1337 if (error) {
3e170ce0
A
1338 bpf_release_d(d);
1339 lck_mtx_unlock(bpf_mlock);
2d21ac55 1340 return (error);
1c79356b
A
1341 }
1342
3e170ce0
A
1343 /* verify the device is still open */
1344 if ((d->bd_flags & BPF_CLOSING) != 0) {
1345 bpf_release_d(d);
91447636 1346 lck_mtx_unlock(bpf_mlock);
2d21ac55 1347 m_freem(m);
6d2010ae 1348 return (ENXIO);
2d21ac55 1349 }
6d2010ae
A
1350
1351 if (d->bd_bif == NULL) {
3e170ce0 1352 bpf_release_d(d);
6d2010ae
A
1353 lck_mtx_unlock(bpf_mlock);
1354 m_free(m);
1355 return (ENXIO);
1356 }
1357
1358 if ((unsigned)datlen > ifp->if_mtu) {
3e170ce0 1359 bpf_release_d(d);
2d21ac55
A
1360 lck_mtx_unlock(bpf_mlock);
1361 m_freem(m);
6d2010ae 1362 return (EMSGSIZE);
1c79356b
A
1363 }
1364
6d2010ae 1365
2d21ac55
A
1366#if CONFIG_MACF_NET
1367 mac_mbuf_label_associate_bpfdesc(d, m);
1368#endif
316670eb
A
1369
1370 bpf_set_packet_service_class(m, d->bd_traffic_class);
1371
91447636
A
1372 lck_mtx_unlock(bpf_mlock);
1373
3e170ce0
A
1374 /*
1375 * The driver frees the mbuf.
1376 */
55e303ae 1377 if (d->bd_hdrcmplt) {
2d21ac55
A
1378 if (d->bd_bif->bif_send)
1379 error = d->bd_bif->bif_send(ifp, d->bd_bif->bif_dlt, m);
1380 else
316670eb
A
1381 error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL);
1382 } else {
1383 error = dlil_output(ifp, PF_INET, m, NULL,
1384 (struct sockaddr *)dst_buf, 0, NULL);
91447636 1385 }
6d2010ae 1386
3e170ce0
A
1387 lck_mtx_lock(bpf_mlock);
1388 bpf_release_d(d);
1389 lck_mtx_unlock(bpf_mlock);
1390
1c79356b
A
1391 return (error);
1392}
1393
1394/*
1395 * Reset a descriptor by flushing its packet buffer and clearing the
2d21ac55 1396 * receive and drop counts.
1c79356b
A
1397 */
1398static void
91447636 1399reset_d(struct bpf_d *d)
1c79356b 1400{
39236c6e
A
1401 if (d->bd_hbuf_read)
1402 panic("resetting buffers during read");
1403
1c79356b
A
1404 if (d->bd_hbuf) {
1405 /* Free the hold buffer. */
1406 d->bd_fbuf = d->bd_hbuf;
2d21ac55 1407 d->bd_hbuf = NULL;
1c79356b
A
1408 }
1409 d->bd_slen = 0;
1410 d->bd_hlen = 0;
3e170ce0
A
1411 d->bd_scnt = 0;
1412 d->bd_hcnt = 0;
1c79356b
A
1413 d->bd_rcount = 0;
1414 d->bd_dcount = 0;
1415}
1416
1417/*
1418 * FIONREAD Check for read packet available.
1419 * SIOCGIFADDR Get interface address - convenient hook to driver.
1420 * BIOCGBLEN Get buffer len [for read()].
1421 * BIOCSETF Set ethernet read filter.
1422 * BIOCFLUSH Flush read packet buffer.
1423 * BIOCPROMISC Put interface into promiscuous mode.
1424 * BIOCGDLT Get link layer type.
1425 * BIOCGETIF Get interface name.
1426 * BIOCSETIF Set interface.
1427 * BIOCSRTIMEOUT Set read timeout.
1428 * BIOCGRTIMEOUT Get read timeout.
1429 * BIOCGSTATS Get packet stats.
1430 * BIOCIMMEDIATE Set immediate mode.
1431 * BIOCVERSION Get filter language version.
9bccf70c
A
1432 * BIOCGHDRCMPLT Get "header already complete" flag
1433 * BIOCSHDRCMPLT Set "header already complete" flag
1434 * BIOCGSEESENT Get "see packets sent" flag
1435 * BIOCSSEESENT Set "see packets sent" flag
316670eb
A
1436 * BIOCSETTC Set traffic class.
1437 * BIOCGETTC Get traffic class.
1438 * BIOCSEXTHDR Set "extended header" flag
3e170ce0
A
1439 * BIOCSHEADDROP Drop head of the buffer if user is not reading
1440 * BIOCGHEADDROP Get "head-drop" flag
1c79356b
A
1441 */
1442/* ARGSUSED */
9bccf70c 1443int
2d21ac55 1444bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags,
b0d623f7 1445 struct proc *p)
1c79356b 1446{
2d21ac55 1447 struct bpf_d *d;
fe8ab488
A
1448 int error = 0;
1449 u_int int_arg;
316670eb 1450 struct ifreq ifr;
2d21ac55
A
1451
1452 lck_mtx_lock(bpf_mlock);
1c79356b 1453
55e303ae 1454 d = bpf_dtab[minor(dev)];
3e170ce0 1455 if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
2d21ac55 1456 lck_mtx_unlock(bpf_mlock);
91447636 1457 return (ENXIO);
2d21ac55 1458 }
1c79356b 1459
3e170ce0
A
1460 bpf_acquire_d(d);
1461
6d2010ae
A
1462 if (d->bd_state == BPF_WAITING)
1463 bpf_stop_timer(d);
1464 d->bd_state = BPF_IDLE;
1465
1c79356b
A
1466 switch (cmd) {
1467
1468 default:
1469 error = EINVAL;
1470 break;
1471
1472 /*
1473 * Check for read packet available.
1474 */
316670eb 1475 case FIONREAD: /* int */
1c79356b
A
1476 {
1477 int n;
1478
1c79356b 1479 n = d->bd_slen;
39236c6e 1480 if (d->bd_hbuf && d->bd_hbuf_read == 0)
1c79356b 1481 n += d->bd_hlen;
1c79356b 1482
316670eb 1483 bcopy(&n, addr, sizeof (n));
1c79356b
A
1484 break;
1485 }
1486
316670eb 1487 case SIOCGIFADDR: /* struct ifreq */
1c79356b
A
1488 {
1489 struct ifnet *ifp;
1490
1491 if (d->bd_bif == 0)
1492 error = EINVAL;
1493 else {
1494 ifp = d->bd_bif->bif_ifp;
2d21ac55 1495 error = ifnet_ioctl(ifp, 0, cmd, addr);
1c79356b
A
1496 }
1497 break;
1498 }
1499
1500 /*
1501 * Get buffer len [for read()].
1502 */
316670eb
A
1503 case BIOCGBLEN: /* u_int */
1504 bcopy(&d->bd_bufsize, addr, sizeof (u_int));
1c79356b
A
1505 break;
1506
1507 /*
1508 * Set buffer length.
1509 */
316670eb 1510 case BIOCSBLEN: /* u_int */
1c79356b
A
1511 if (d->bd_bif != 0)
1512 error = EINVAL;
1513 else {
316670eb
A
1514 u_int size;
1515
1516 bcopy(addr, &size, sizeof (size));
1c79356b 1517
813fb2f6
A
1518 /*
1519 * Allow larger buffer in head drop mode with the
1520 * assumption the capture is in standby mode to
1521 * keep a cache of recent traffic
1522 */
1523 if (d->bd_headdrop != 0 && size > 2 * bpf_maxbufsize)
1524 size = 2 * bpf_maxbufsize;
1525 else if (size > bpf_maxbufsize)
316670eb 1526 size = bpf_maxbufsize;
1c79356b 1527 else if (size < BPF_MINBUFSIZE)
316670eb
A
1528 size = BPF_MINBUFSIZE;
1529 bcopy(&size, addr, sizeof (size));
1c79356b
A
1530 d->bd_bufsize = size;
1531 }
1c79356b
A
1532 break;
1533
1534 /*
1535 * Set link layer read filter.
1536 */
39236c6e
A
1537 case BIOCSETF32:
1538 case BIOCSETFNR32: { /* struct bpf_program32 */
316670eb
A
1539 struct bpf_program32 prg32;
1540
1541 bcopy(addr, &prg32, sizeof (prg32));
1542 error = bpf_setf(d, prg32.bf_len,
3e170ce0 1543 CAST_USER_ADDR_T(prg32.bf_insns), cmd);
1c79356b 1544 break;
2d21ac55 1545 }
b0d623f7 1546
39236c6e
A
1547 case BIOCSETF64:
1548 case BIOCSETFNR64: { /* struct bpf_program64 */
316670eb
A
1549 struct bpf_program64 prg64;
1550
1551 bcopy(addr, &prg64, sizeof (prg64));
3e170ce0 1552 error = bpf_setf(d, prg64.bf_len, prg64.bf_insns, cmd);
b0d623f7
A
1553 break;
1554 }
1555
1c79356b
A
1556 /*
1557 * Flush read packet buffer.
1558 */
1559 case BIOCFLUSH:
39236c6e
A
1560 while (d->bd_hbuf_read) {
1561 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
1562 }
3e170ce0
A
1563 if ((d->bd_flags & BPF_CLOSING) != 0) {
1564 error = ENXIO;
1565 break;
1566 }
1c79356b 1567 reset_d(d);
1c79356b
A
1568 break;
1569
1570 /*
1571 * Put interface into promiscuous mode.
1572 */
1573 case BIOCPROMISC:
1574 if (d->bd_bif == 0) {
1575 /*
1576 * No interface attached yet.
1577 */
1578 error = EINVAL;
1579 break;
1580 }
1c79356b 1581 if (d->bd_promisc == 0) {
2d21ac55 1582 lck_mtx_unlock(bpf_mlock);
91447636 1583 error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1);
2d21ac55 1584 lck_mtx_lock(bpf_mlock);
1c79356b
A
1585 if (error == 0)
1586 d->bd_promisc = 1;
1587 }
1c79356b
A
1588 break;
1589
1590 /*
1591 * Get device parameters.
1592 */
316670eb 1593 case BIOCGDLT: /* u_int */
1c79356b
A
1594 if (d->bd_bif == 0)
1595 error = EINVAL;
1596 else
316670eb 1597 bcopy(&d->bd_bif->bif_dlt, addr, sizeof (u_int));
1c79356b
A
1598 break;
1599
2d21ac55
A
1600 /*
1601 * Get a list of supported data link types.
1602 */
316670eb 1603 case BIOCGDLTLIST: /* struct bpf_dltlist */
b0d623f7
A
1604 if (d->bd_bif == NULL) {
1605 error = EINVAL;
1606 } else {
316670eb 1607 error = bpf_getdltlist(d, addr, p);
b0d623f7
A
1608 }
1609 break;
2d21ac55
A
1610
1611 /*
1612 * Set data link type.
1613 */
316670eb
A
1614 case BIOCSDLT: /* u_int */
1615 if (d->bd_bif == NULL) {
1616 error = EINVAL;
1617 } else {
1618 u_int dlt;
1619
1620 bcopy(addr, &dlt, sizeof (dlt));
5ba3f43e
A
1621
1622 if (dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
1623 printf("BIOCSDLT downgrade DLT_PKTAP to DLT_RAW\n");
1624 dlt = DLT_RAW;
1625 }
3e170ce0 1626 error = bpf_setdlt(d, dlt);
316670eb
A
1627 }
1628 break;
2d21ac55 1629
1c79356b 1630 /*
9bccf70c 1631 * Get interface name.
1c79356b 1632 */
316670eb 1633 case BIOCGETIF: /* struct ifreq */
1c79356b
A
1634 if (d->bd_bif == 0)
1635 error = EINVAL;
9bccf70c
A
1636 else {
1637 struct ifnet *const ifp = d->bd_bif->bif_ifp;
9bccf70c 1638
316670eb 1639 snprintf(((struct ifreq *)(void *)addr)->ifr_name,
39236c6e 1640 sizeof (ifr.ifr_name), "%s", if_name(ifp));
9bccf70c 1641 }
1c79356b
A
1642 break;
1643
1644 /*
1645 * Set interface.
1646 */
316670eb 1647 case BIOCSETIF: { /* struct ifreq */
2d21ac55 1648 ifnet_t ifp;
316670eb
A
1649
1650 bcopy(addr, &ifr, sizeof (ifr));
1651 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
1652 ifp = ifunit(ifr.ifr_name);
2d21ac55
A
1653 if (ifp == NULL)
1654 error = ENXIO;
1655 else
5ba3f43e 1656 error = bpf_setif(d, ifp);
1c79356b 1657 break;
2d21ac55 1658 }
1c79356b
A
1659
1660 /*
1661 * Set read timeout.
1662 */
39236c6e 1663 case BIOCSRTIMEOUT32: { /* struct user32_timeval */
316670eb
A
1664 struct user32_timeval _tv;
1665 struct timeval tv;
b0d623f7 1666
316670eb
A
1667 bcopy(addr, &_tv, sizeof (_tv));
1668 tv.tv_sec = _tv.tv_sec;
1669 tv.tv_usec = _tv.tv_usec;
1670
1671 /*
1672 * Subtract 1 tick from tvtohz() since this isn't
1673 * a one-shot timer.
1674 */
1675 if ((error = itimerfix(&tv)) == 0)
1676 d->bd_rtout = tvtohz(&tv) - 1;
1677 break;
1678 }
1679
39236c6e 1680 case BIOCSRTIMEOUT64: { /* struct user64_timeval */
316670eb
A
1681 struct user64_timeval _tv;
1682 struct timeval tv;
1683
1684 bcopy(addr, &_tv, sizeof (_tv));
1685 tv.tv_sec = _tv.tv_sec;
1686 tv.tv_usec = _tv.tv_usec;
1687
1688 /*
1689 * Subtract 1 tick from tvtohz() since this isn't
1690 * a one-shot timer.
1691 */
1692 if ((error = itimerfix(&tv)) == 0)
1693 d->bd_rtout = tvtohz(&tv) - 1;
1694 break;
1695 }
1c79356b 1696
39236c6e 1697 /*
1c79356b
A
1698 * Get read timeout.
1699 */
316670eb
A
1700 case BIOCGRTIMEOUT32: { /* struct user32_timeval */
1701 struct user32_timeval tv;
1c79356b 1702
316670eb
A
1703 bzero(&tv, sizeof (tv));
1704 tv.tv_sec = d->bd_rtout / hz;
1705 tv.tv_usec = (d->bd_rtout % hz) * tick;
1706 bcopy(&tv, addr, sizeof (tv));
1707 break;
1708 }
6d2010ae 1709
316670eb
A
1710 case BIOCGRTIMEOUT64: { /* struct user64_timeval */
1711 struct user64_timeval tv;
6d2010ae 1712
316670eb
A
1713 bzero(&tv, sizeof (tv));
1714 tv.tv_sec = d->bd_rtout / hz;
1715 tv.tv_usec = (d->bd_rtout % hz) * tick;
1716 bcopy(&tv, addr, sizeof (tv));
1717 break;
1718 }
1c79356b
A
1719
1720 /*
1721 * Get packet stats.
1722 */
316670eb
A
1723 case BIOCGSTATS: { /* struct bpf_stat */
1724 struct bpf_stat bs;
1c79356b 1725
316670eb
A
1726 bzero(&bs, sizeof (bs));
1727 bs.bs_recv = d->bd_rcount;
1728 bs.bs_drop = d->bd_dcount;
1729 bcopy(&bs, addr, sizeof (bs));
1730 break;
1731 }
1c79356b
A
1732
1733 /*
1734 * Set immediate mode.
1735 */
316670eb 1736 case BIOCIMMEDIATE: /* u_int */
3e170ce0 1737 d->bd_immediate = *(u_int *)(void *)addr;
1c79356b
A
1738 break;
1739
316670eb
A
1740 case BIOCVERSION: { /* struct bpf_version */
1741 struct bpf_version bv;
1c79356b 1742
316670eb
A
1743 bzero(&bv, sizeof (bv));
1744 bv.bv_major = BPF_MAJOR_VERSION;
1745 bv.bv_minor = BPF_MINOR_VERSION;
1746 bcopy(&bv, addr, sizeof (bv));
1747 break;
1748 }
1c79356b 1749
9bccf70c
A
1750 /*
1751 * Get "header already complete" flag
1752 */
316670eb
A
1753 case BIOCGHDRCMPLT: /* u_int */
1754 bcopy(&d->bd_hdrcmplt, addr, sizeof (u_int));
9bccf70c
A
1755 break;
1756
1757 /*
1758 * Set "header already complete" flag
1759 */
316670eb
A
1760 case BIOCSHDRCMPLT: /* u_int */
1761 bcopy(addr, &int_arg, sizeof (int_arg));
1762 d->bd_hdrcmplt = int_arg ? 1 : 0;
9bccf70c
A
1763 break;
1764
1765 /*
1766 * Get "see sent packets" flag
1767 */
316670eb
A
1768 case BIOCGSEESENT: /* u_int */
1769 bcopy(&d->bd_seesent, addr, sizeof (u_int));
9bccf70c
A
1770 break;
1771
1772 /*
1773 * Set "see sent packets" flag
1774 */
316670eb
A
1775 case BIOCSSEESENT: /* u_int */
1776 bcopy(addr, &d->bd_seesent, sizeof (u_int));
1777 break;
1778
1779 /*
1780 * Set traffic service class
1781 */
1782 case BIOCSETTC: { /* int */
1783 int tc;
1784
1785 bcopy(addr, &tc, sizeof (int));
1786 error = bpf_set_traffic_class(d, tc);
9bccf70c 1787 break;
316670eb 1788 }
9bccf70c 1789
316670eb
A
1790 /*
1791 * Get traffic service class
1792 */
1793 case BIOCGETTC: /* int */
1794 bcopy(&d->bd_traffic_class, addr, sizeof (int));
1c79356b
A
1795 break;
1796
316670eb
A
1797 case FIONBIO: /* Non-blocking I/O; int */
1798 break;
1799
1800 case FIOASYNC: /* Send signal on receive packets; int */
1801 bcopy(addr, &d->bd_async, sizeof (int));
1c79356b 1802 break;
9bccf70c 1803#ifndef __APPLE__
1c79356b
A
1804 case FIOSETOWN:
1805 error = fsetown(*(int *)addr, &d->bd_sigio);
1806 break;
1807
1808 case FIOGETOWN:
1809 *(int *)addr = fgetown(d->bd_sigio);
1810 break;
1811
1812 /* This is deprecated, FIOSETOWN should be used instead. */
1813 case TIOCSPGRP:
1814 error = fsetown(-(*(int *)addr), &d->bd_sigio);
1815 break;
1816
1817 /* This is deprecated, FIOGETOWN should be used instead. */
1818 case TIOCGPGRP:
1819 *(int *)addr = -fgetown(d->bd_sigio);
1820 break;
1821#endif
316670eb
A
1822 case BIOCSRSIG: { /* Set receive signal; u_int */
1823 u_int sig;
1c79356b 1824
316670eb 1825 bcopy(addr, &sig, sizeof (u_int));
1c79356b 1826
316670eb
A
1827 if (sig >= NSIG)
1828 error = EINVAL;
1829 else
1830 d->bd_sig = sig;
1c79356b
A
1831 break;
1832 }
316670eb
A
1833 case BIOCGRSIG: /* u_int */
1834 bcopy(&d->bd_sig, addr, sizeof (u_int));
1835 break;
39236c6e 1836#ifdef __APPLE__
fe8ab488
A
1837 case BIOCSEXTHDR: /* u_int */
1838 bcopy(addr, &int_arg, sizeof (int_arg));
1839 if (int_arg)
1840 d->bd_flags |= BPF_EXTENDED_HDR;
1841 else
1842 d->bd_flags &= ~BPF_EXTENDED_HDR;
316670eb 1843 break;
39236c6e
A
1844
1845 case BIOCGIFATTACHCOUNT: { /* struct ifreq */
1846 ifnet_t ifp;
1847 struct bpf_if *bp;
1848
1849 bcopy(addr, &ifr, sizeof (ifr));
1850 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
1851 ifp = ifunit(ifr.ifr_name);
1852 if (ifp == NULL) {
1853 error = ENXIO;
1854 break;
1855 }
1856 ifr.ifr_intval = 0;
1857 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1858 struct bpf_d *bpf_d;
1859
1860 if (bp->bif_ifp == NULL || bp->bif_ifp != ifp)
1861 continue;
1862 for (bpf_d = bp->bif_dlist; bpf_d; bpf_d = bpf_d->bd_next) {
1863 ifr.ifr_intval += 1;
1864 }
1865 }
1866 bcopy(&ifr, addr, sizeof (ifr));
1867 break;
1868 }
fe8ab488
A
1869 case BIOCGWANTPKTAP: /* u_int */
1870 int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0;
1871 bcopy(&int_arg, addr, sizeof (int_arg));
1872 break;
1873
1874 case BIOCSWANTPKTAP: /* u_int */
1875 bcopy(addr, &int_arg, sizeof (int_arg));
1876 if (int_arg)
1877 d->bd_flags |= BPF_WANT_PKTAP;
1878 else
1879 d->bd_flags &= ~BPF_WANT_PKTAP;
1880 break;
39236c6e 1881#endif
3e170ce0
A
1882
1883 case BIOCSHEADDROP:
1884 bcopy(addr, &int_arg, sizeof (int_arg));
1885 d->bd_headdrop = int_arg ? 1 : 0;
1886 break;
1887
1888 case BIOCGHEADDROP:
1889 bcopy(&d->bd_headdrop, addr, sizeof (int));
1890 break;
316670eb
A
1891 }
1892
3e170ce0 1893 bpf_release_d(d);
91447636 1894 lck_mtx_unlock(bpf_mlock);
b0d623f7 1895
1c79356b
A
1896 return (error);
1897}
1898
1899/*
1900 * Set d's packet filter program to fp. If this file already has a filter,
1901 * free it and replace it. Returns EINVAL for bogus requests.
1902 */
1903static int
3e170ce0
A
1904bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns,
1905 u_long cmd)
1c79356b
A
1906{
1907 struct bpf_insn *fcode, *old;
1908 u_int flen, size;
1c79356b 1909
39236c6e
A
1910 while (d->bd_hbuf_read)
1911 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
1912
3e170ce0 1913 if ((d->bd_flags & BPF_CLOSING) != 0)
39236c6e
A
1914 return (ENXIO);
1915
1c79356b 1916 old = d->bd_filter;
2d21ac55
A
1917 if (bf_insns == USER_ADDR_NULL) {
1918 if (bf_len != 0)
1c79356b 1919 return (EINVAL);
2d21ac55 1920 d->bd_filter = NULL;
1c79356b 1921 reset_d(d);
1c79356b
A
1922 if (old != 0)
1923 FREE((caddr_t)old, M_DEVBUF);
1924 return (0);
1925 }
2d21ac55 1926 flen = bf_len;
1c79356b
A
1927 if (flen > BPF_MAXINSNS)
1928 return (EINVAL);
1929
91447636 1930 size = flen * sizeof(struct bpf_insn);
1c79356b 1931 fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
9bccf70c 1932#ifdef __APPLE__
0b4e3aa0
A
1933 if (fcode == NULL)
1934 return (ENOBUFS);
9bccf70c 1935#endif
2d21ac55 1936 if (copyin(bf_insns, (caddr_t)fcode, size) == 0 &&
1c79356b 1937 bpf_validate(fcode, (int)flen)) {
1c79356b 1938 d->bd_filter = fcode;
39236c6e
A
1939
1940 if (cmd == BIOCSETF32 || cmd == BIOCSETF64)
1941 reset_d(d);
1942
1c79356b
A
1943 if (old != 0)
1944 FREE((caddr_t)old, M_DEVBUF);
1945
1946 return (0);
1947 }
1948 FREE((caddr_t)fcode, M_DEVBUF);
1949 return (EINVAL);
1950}
1951
1952/*
1953 * Detach a file from its current interface (if attached at all) and attach
1954 * to the interface indicated by the name stored in ifr.
1955 * Return an errno or 0.
1956 */
1957static int
5ba3f43e 1958bpf_setif(struct bpf_d *d, ifnet_t theywant)
1c79356b
A
1959{
1960 struct bpf_if *bp;
2d21ac55 1961 int error;
39236c6e
A
1962
1963 while (d->bd_hbuf_read)
1964 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
1965
3e170ce0 1966 if ((d->bd_flags & BPF_CLOSING) != 0)
39236c6e
A
1967 return (ENXIO);
1968
1c79356b
A
1969 /*
1970 * Look through attached interfaces for the named one.
1971 */
1972 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1973 struct ifnet *ifp = bp->bif_ifp;
1974
5ba3f43e 1975 if (ifp == 0 || ifp != theywant)
1c79356b 1976 continue;
fe8ab488 1977 /*
5ba3f43e 1978 * Do not use DLT_PKTAP, unless requested explicitly
fe8ab488 1979 */
5ba3f43e 1980 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP))
fe8ab488 1981 continue;
1c79356b
A
1982 /*
1983 * We found the requested interface.
813fb2f6
A
1984 * Allocate the packet buffers.
1985 */
1986 error = bpf_allocbufs(d);
1987 if (error != 0)
1988 return (error);
1989 /*
1990 * Detach if attached to something else.
1c79356b 1991 */
1c79356b 1992 if (bp != d->bd_bif) {
813fb2f6 1993 if (d->bd_bif != NULL) {
3e170ce0
A
1994 if (bpf_detachd(d, 0) != 0)
1995 return (ENXIO);
2d21ac55 1996 }
3e170ce0
A
1997 if (bpf_attachd(d, bp) != 0)
1998 return (ENXIO);
1c79356b
A
1999 }
2000 reset_d(d);
1c79356b
A
2001 return (0);
2002 }
2003 /* Not found. */
2004 return (ENXIO);
2005}
2006
2d21ac55
A
2007
2008
2009/*
2010 * Get a list of available data link type of the interface.
2011 */
2012static int
316670eb 2013bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p)
2d21ac55 2014{
b0d623f7
A
2015 u_int n;
2016 int error;
2d21ac55
A
2017 struct ifnet *ifp;
2018 struct bpf_if *bp;
b0d623f7 2019 user_addr_t dlist;
316670eb 2020 struct bpf_dltlist bfl;
b0d623f7 2021
316670eb 2022 bcopy(addr, &bfl, sizeof (bfl));
b0d623f7 2023 if (proc_is64bit(p)) {
316670eb 2024 dlist = (user_addr_t)bfl.bfl_u.bflu_pad;
b0d623f7 2025 } else {
316670eb 2026 dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list);
2d21ac55 2027 }
b0d623f7 2028
2d21ac55
A
2029 ifp = d->bd_bif->bif_ifp;
2030 n = 0;
2031 error = 0;
fe8ab488 2032
2d21ac55
A
2033 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
2034 if (bp->bif_ifp != ifp)
2035 continue;
fe8ab488 2036 /*
5ba3f43e 2037 * Do not use DLT_PKTAP, unless requested explicitly
fe8ab488
A
2038 */
2039 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP))
2040 continue;
b0d623f7 2041 if (dlist != USER_ADDR_NULL) {
316670eb 2042 if (n >= bfl.bfl_len) {
2d21ac55
A
2043 return (ENOMEM);
2044 }
b0d623f7
A
2045 error = copyout(&bp->bif_dlt, dlist,
2046 sizeof (bp->bif_dlt));
316670eb
A
2047 if (error != 0)
2048 break;
b0d623f7 2049 dlist += sizeof (bp->bif_dlt);
2d21ac55
A
2050 }
2051 n++;
2052 }
316670eb
A
2053 bfl.bfl_len = n;
2054 bcopy(&bfl, addr, sizeof (bfl));
2055
2d21ac55
A
2056 return (error);
2057}
2058
2059/*
2060 * Set the data link type of a BPF instance.
2061 */
2062static int
3e170ce0 2063bpf_setdlt(struct bpf_d *d, uint32_t dlt)
2d21ac55
A
2064{
2065 int error, opromisc;
2066 struct ifnet *ifp;
2067 struct bpf_if *bp;
2068
2069 if (d->bd_bif->bif_dlt == dlt)
2070 return (0);
39236c6e
A
2071
2072 while (d->bd_hbuf_read)
2073 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
2074
3e170ce0 2075 if ((d->bd_flags & BPF_CLOSING) != 0)
39236c6e 2076 return (ENXIO);
fe8ab488 2077
2d21ac55
A
2078 ifp = d->bd_bif->bif_ifp;
2079 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
5ba3f43e
A
2080 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) {
2081 /*
2082 * Do not use DLT_PKTAP, unless requested explicitly
2083 */
2084 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
2085 continue;
2086 }
2d21ac55 2087 break;
5ba3f43e 2088 }
2d21ac55
A
2089 }
2090 if (bp != NULL) {
2091 opromisc = d->bd_promisc;
3e170ce0
A
2092 if (bpf_detachd(d, 0) != 0)
2093 return (ENXIO);
2d21ac55
A
2094 error = bpf_attachd(d, bp);
2095 if (error) {
2096 printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n",
2097 ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), error);
2098 return error;
2099 }
2100 reset_d(d);
2101 if (opromisc) {
2102 lck_mtx_unlock(bpf_mlock);
2103 error = ifnet_set_promiscuous(bp->bif_ifp, 1);
2104 lck_mtx_lock(bpf_mlock);
3e170ce0
A
2105 if (error) {
2106 printf("%s: ifpromisc %s%d failed (%d)\n",
2107 __func__, ifnet_name(bp->bif_ifp),
2108 ifnet_unit(bp->bif_ifp), error);
2109 } else {
2d21ac55 2110 d->bd_promisc = 1;
3e170ce0 2111 }
2d21ac55
A
2112 }
2113 }
2114 return (bp == NULL ? EINVAL : 0);
2115}
2116
316670eb
A
2117static int
2118bpf_set_traffic_class(struct bpf_d *d, int tc)
2119{
2120 int error = 0;
2121
2122 if (!SO_VALID_TC(tc))
2123 error = EINVAL;
2124 else
2125 d->bd_traffic_class = tc;
2126
2127 return (error);
2128}
2129
2130static void
2131bpf_set_packet_service_class(struct mbuf *m, int tc)
2132{
2133 if (!(m->m_flags & M_PKTHDR))
2134 return;
2135
2136 VERIFY(SO_VALID_TC(tc));
2137 (void) m_set_service_class(m, so_tc2msc(tc));
2138}
2139
1c79356b 2140/*
b0d623f7 2141 * Support for select()
1c79356b
A
2142 *
2143 * Return true iff the specific operation will not block indefinitely.
2144 * Otherwise, return false but make a note that a selwakeup() must be done.
2145 */
2146int
6d2010ae 2147bpfselect(dev_t dev, int which, void * wql, struct proc *p)
1c79356b 2148{
2d21ac55 2149 struct bpf_d *d;
6d2010ae 2150 int ret = 0;
1c79356b 2151
2d21ac55
A
2152 lck_mtx_lock(bpf_mlock);
2153
55e303ae 2154 d = bpf_dtab[minor(dev)];
3e170ce0 2155 if (d == 0 || d == (void *)1 || (d->bd_flags & BPF_CLOSING) != 0) {
2d21ac55 2156 lck_mtx_unlock(bpf_mlock);
91447636 2157 return (ENXIO);
2d21ac55 2158 }
55e303ae 2159
3e170ce0
A
2160 bpf_acquire_d(d);
2161
9bccf70c 2162 if (d->bd_bif == NULL) {
3e170ce0 2163 bpf_release_d(d);
91447636 2164 lck_mtx_unlock(bpf_mlock);
9bccf70c
A
2165 return (ENXIO);
2166 }
2167
39236c6e
A
2168 while (d->bd_hbuf_read)
2169 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
3e170ce0
A
2170
2171 if ((d->bd_flags & BPF_CLOSING) != 0) {
2172 bpf_release_d(d);
39236c6e
A
2173 lck_mtx_unlock(bpf_mlock);
2174 return (ENXIO);
2175 }
2176
6d2010ae
A
2177 switch (which) {
2178 case FREAD:
2179 if (d->bd_hlen != 0 ||
2180 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
2181 d->bd_slen != 0))
2182 ret = 1; /* read has data to return */
2183 else {
2184 /*
2185 * Read has no data to return.
2186 * Make the select wait, and start a timer if
2187 * necessary.
2188 */
2189 selrecord(p, &d->bd_sel, wql);
2190 bpf_start_timer(d);
2191 }
2192 break;
2193
2194 case FWRITE:
2195 ret = 1; /* can't determine whether a write would block */
2196 break;
9bccf70c 2197 }
91447636 2198
3e170ce0 2199 bpf_release_d(d);
91447636 2200 lck_mtx_unlock(bpf_mlock);
3e170ce0 2201
6d2010ae 2202 return (ret);
1c79356b
A
2203}
2204
6d2010ae 2205
b0d623f7
A
2206/*
2207 * Support for kevent() system call. Register EVFILT_READ filters and
2208 * reject all others.
2209 */
2210int bpfkqfilter(dev_t dev, struct knote *kn);
2211static void filt_bpfdetach(struct knote *);
2212static int filt_bpfread(struct knote *, long);
39037602
A
2213static int filt_bpftouch(struct knote *kn, struct kevent_internal_s *kev);
2214static int filt_bpfprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
b0d623f7 2215
5ba3f43e 2216SECURITY_READ_ONLY_EARLY(struct filterops) bpfread_filtops = {
b0d623f7
A
2217 .f_isfd = 1,
2218 .f_detach = filt_bpfdetach,
2219 .f_event = filt_bpfread,
39037602
A
2220 .f_touch = filt_bpftouch,
2221 .f_process = filt_bpfprocess,
b0d623f7
A
2222};
2223
b0d623f7 2224static int
39037602 2225filt_bpfread_common(struct knote *kn, struct bpf_d *d)
b0d623f7 2226{
b0d623f7
A
2227 int ready = 0;
2228
b0d623f7 2229 if (d->bd_immediate) {
6d2010ae
A
2230 /*
2231 * If there's data in the hold buffer, it's the
2232 * amount of data a read will return.
2233 *
2234 * If there's no data in the hold buffer, but
2235 * there's data in the store buffer, a read will
2236 * immediately rotate the store buffer to the
2237 * hold buffer, the amount of data in the store
2238 * buffer is the amount of data a read will
2239 * return.
2240 *
2241 * If there's no data in either buffer, we're not
2242 * ready to read.
2243 */
39236c6e
A
2244 kn->kn_data = ((d->bd_hlen == 0 || d->bd_hbuf_read)
2245 ? d->bd_slen : d->bd_hlen);
6d2010ae
A
2246 int64_t lowwat = 1;
2247 if (kn->kn_sfflags & NOTE_LOWAT)
2248 {
2249 if (kn->kn_sdata > d->bd_bufsize)
2250 lowwat = d->bd_bufsize;
2251 else if (kn->kn_sdata > lowwat)
2252 lowwat = kn->kn_sdata;
2253 }
2254 ready = (kn->kn_data >= lowwat);
b0d623f7 2255 } else {
6d2010ae
A
2256 /*
2257 * If there's data in the hold buffer, it's the
2258 * amount of data a read will return.
2259 *
2260 * If there's no data in the hold buffer, but
2261 * there's data in the store buffer, if the
2262 * timer has expired a read will immediately
2263 * rotate the store buffer to the hold buffer,
2264 * so the amount of data in the store buffer is
2265 * the amount of data a read will return.
2266 *
2267 * If there's no data in either buffer, or there's
2268 * no data in the hold buffer and the timer hasn't
2269 * expired, we're not ready to read.
2270 */
39236c6e 2271 kn->kn_data = ((d->bd_hlen == 0 || d->bd_hbuf_read) && d->bd_state == BPF_TIMED_OUT ?
6d2010ae 2272 d->bd_slen : d->bd_hlen);
b0d623f7
A
2273 ready = (kn->kn_data > 0);
2274 }
6d2010ae
A
2275 if (!ready)
2276 bpf_start_timer(d);
b0d623f7 2277
b0d623f7
A
2278 return (ready);
2279}
2280
39037602
A
2281int
2282bpfkqfilter(dev_t dev, struct knote *kn)
2283{
2284 struct bpf_d *d;
2285 int res;
2286
2287 /*
2288 * Is this device a bpf?
2289 */
2290 if (major(dev) != CDEV_MAJOR ||
2291 kn->kn_filter != EVFILT_READ) {
2292 kn->kn_flags = EV_ERROR;
2293 kn->kn_data = EINVAL;
2294 return 0;
2295 }
2296
2297 lck_mtx_lock(bpf_mlock);
2298
2299 d = bpf_dtab[minor(dev)];
2300
2301 if (d == 0 ||
2302 d == (void *)1 ||
2303 d->bd_bif == NULL ||
2304 (d->bd_flags & BPF_CLOSING) != 0) {
2305 lck_mtx_unlock(bpf_mlock);
2306 kn->kn_flags = EV_ERROR;
2307 kn->kn_data = ENXIO;
2308 return 0;
2309 }
2310
2311 kn->kn_hook = d;
2312 kn->kn_filtid = EVFILTID_BPFREAD;
2313 KNOTE_ATTACH(&d->bd_sel.si_note, kn);
2314 d->bd_flags |= BPF_KNOTE;
2315
2316 /* capture the current state */
2317 res = filt_bpfread_common(kn, d);
2318
2319 lck_mtx_unlock(bpf_mlock);
2320
2321 return (res);
2322}
2323
2324static void
2325filt_bpfdetach(struct knote *kn)
2326{
2327 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2328
2329 lck_mtx_lock(bpf_mlock);
2330 if (d->bd_flags & BPF_KNOTE) {
2331 KNOTE_DETACH(&d->bd_sel.si_note, kn);
2332 d->bd_flags &= ~BPF_KNOTE;
2333 }
2334 lck_mtx_unlock(bpf_mlock);
2335}
2336
2337static int
2338filt_bpfread(struct knote *kn, long hint)
2339{
2340#pragma unused(hint)
2341 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2342
2343 return filt_bpfread_common(kn, d);
2344}
2345
2346static int
2347filt_bpftouch(struct knote *kn, struct kevent_internal_s *kev)
2348{
2349 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2350 int res;
2351
2352 lck_mtx_lock(bpf_mlock);
2353
2354 /* save off the lowat threshold and flag */
2355 kn->kn_sdata = kev->data;
2356 kn->kn_sfflags = kev->fflags;
2357 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
2358 kn->kn_udata = kev->udata;
2359
2360 /* output data will be re-generated here */
2361 res = filt_bpfread_common(kn, d);
2362
2363 lck_mtx_unlock(bpf_mlock);
2364
2365 return res;
2366}
2367
2368static int
2369filt_bpfprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev)
2370{
2371#pragma unused(data)
2372 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
2373 int res;
2374
2375 lck_mtx_lock(bpf_mlock);
2376 res = filt_bpfread_common(kn, d);
2377 if (res) {
2378 *kev = kn->kn_kevent;
2379 }
2380 lck_mtx_unlock(bpf_mlock);
2381
2382 return res;
2383}
2384
1c79356b
A
2385/*
2386 * Copy data from an mbuf chain into a buffer. This code is derived
5ba3f43e 2387 * from m_copydata in kern/uipc_mbuf.c.
1c79356b
A
2388 */
2389static void
5ba3f43e 2390bpf_mcopy(struct mbuf * m, void *dst_arg, size_t len)
1c79356b 2391{
91447636 2392 u_int count;
1c79356b
A
2393 u_char *dst;
2394
1c79356b
A
2395 dst = dst_arg;
2396 while (len > 0) {
2397 if (m == 0)
2398 panic("bpf_mcopy");
2399 count = min(m->m_len, len);
2d21ac55 2400 bcopy(mbuf_data(m), dst, count);
1c79356b
A
2401 m = m->m_next;
2402 dst += count;
2403 len -= count;
2404 }
2405}
2406
2d21ac55
A
2407static inline void
2408bpf_tap_imp(
2409 ifnet_t ifp,
2410 u_int32_t dlt,
5ba3f43e 2411 struct bpf_packet *bpf_pkt,
316670eb 2412 int outbound)
1c79356b 2413{
5ba3f43e
A
2414 struct bpf_d *d;
2415 u_int slen;
91447636 2416 struct bpf_if *bp;
1c79356b 2417
2d21ac55
A
2418 /*
2419 * It's possible that we get here after the bpf descriptor has been
2420 * detached from the interface; in such a case we simply return.
2421 * Lock ordering is important since we can be called asynchronously
5ba3f43e 2422 * (from IOKit) to process an inbound packet; when that happens
2d21ac55
A
2423 * we would have been holding its "gateLock" and will be acquiring
2424 * "bpf_mlock" upon entering this routine. Due to that, we release
2425 * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will
2426 * acquire "gateLock" in the IOKit), in order to avoid a deadlock
2427 * when a ifnet_set_promiscuous request simultaneously collides with
2428 * an inbound packet being passed into the tap callback.
2429 */
91447636 2430 lck_mtx_lock(bpf_mlock);
2d21ac55
A
2431 if (ifp->if_bpf == NULL) {
2432 lck_mtx_unlock(bpf_mlock);
2433 return;
2434 }
5ba3f43e
A
2435 for (bp = ifp->if_bpf; bp != NULL; bp = bp->bif_next) {
2436 if (bp->bif_ifp != ifp) {
2437 /* wrong interface */
2438 bp = NULL;
2439 break;
2d21ac55 2440 }
5ba3f43e
A
2441 if (dlt == 0 || bp->bif_dlt == dlt) {
2442 /* tapping default DLT or DLT matches */
2443 break;
2444 }
2445 }
2446 if (bp == NULL) {
2447 goto done;
2448 }
2449 for (d = bp->bif_dlist; d; d = d->bd_next) {
2450 if (outbound && !d->bd_seesent)
2451 continue;
2452 ++d->bd_rcount;
2453 slen = bpf_filter(d->bd_filter, (u_char *)bpf_pkt,
2454 bpf_pkt->bpfp_total_length, 0);
2455 if (slen != 0) {
2d21ac55 2456#if CONFIG_MACF_NET
5ba3f43e
A
2457 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0)
2458 continue;
2d21ac55 2459#endif
5ba3f43e 2460 catchpacket(d, bpf_pkt, slen, outbound);
91447636 2461 }
1c79356b 2462 }
5ba3f43e
A
2463
2464 done:
91447636 2465 lck_mtx_unlock(bpf_mlock);
1c79356b
A
2466}
2467
5ba3f43e
A
2468static inline void
2469bpf_tap_mbuf(
2470 ifnet_t ifp,
2471 u_int32_t dlt,
2472 mbuf_t m,
2473 void* hdr,
2474 size_t hlen,
2475 int outbound)
2476{
2477 struct bpf_packet bpf_pkt;
2478 struct mbuf *m0;
2479
2480 if (ifp->if_bpf == NULL) {
2481 /* quickly check without taking lock */
2482 return;
2483 }
2484 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF;
2485 bpf_pkt.bpfp_mbuf = m;
2486 bpf_pkt.bpfp_total_length = 0;
2487 for (m0 = m; m0 != NULL; m0 = m0->m_next)
2488 bpf_pkt.bpfp_total_length += m0->m_len;
2489 bpf_pkt.bpfp_header = hdr;
2490 if (hdr != NULL) {
2491 bpf_pkt.bpfp_total_length += hlen;
2492 bpf_pkt.bpfp_header_length = hlen;
2493 } else {
2494 bpf_pkt.bpfp_header_length = 0;
2495 }
2496 bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound);
2497}
2498
2d21ac55
A
2499void
2500bpf_tap_out(
2501 ifnet_t ifp,
2502 u_int32_t dlt,
2503 mbuf_t m,
2504 void* hdr,
2505 size_t hlen)
2506{
5ba3f43e 2507 bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 1);
2d21ac55
A
2508}
2509
2510void
2511bpf_tap_in(
2512 ifnet_t ifp,
2513 u_int32_t dlt,
2514 mbuf_t m,
2515 void* hdr,
2516 size_t hlen)
2517{
5ba3f43e 2518 bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 0);
2d21ac55
A
2519}
2520
2521/* Callback registered with Ethernet driver. */
2522static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
2523{
5ba3f43e 2524 bpf_tap_mbuf(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL);
2d21ac55
A
2525
2526 return 0;
2527}
2528
5ba3f43e
A
2529
2530static void
2531copy_bpf_packet(struct bpf_packet * pkt, void * dst, size_t len)
2532{
2533 /* copy the optional header */
2534 if (pkt->bpfp_header_length != 0) {
2535 size_t count = min(len, pkt->bpfp_header_length);
2536 bcopy(pkt->bpfp_header, dst, count);
2537 len -= count;
2538 dst += count;
2539 }
2540 if (len == 0) {
2541 /* nothing past the header */
2542 return;
2543 }
2544 /* copy the packet */
2545 switch (pkt->bpfp_type) {
2546 case BPF_PACKET_TYPE_MBUF:
2547 bpf_mcopy(pkt->bpfp_mbuf, dst, len);
2548 break;
2549 default:
2550 break;
2551 }
2552}
2553
1c79356b
A
2554/*
2555 * Move the packet data from interface memory (pkt) into the
2556 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
5ba3f43e 2557 * otherwise 0.
1c79356b
A
2558 */
2559static void
5ba3f43e
A
2560catchpacket(struct bpf_d *d, struct bpf_packet * pkt,
2561 u_int snaplen, int outbound)
1c79356b 2562{
2d21ac55 2563 struct bpf_hdr *hp;
316670eb 2564 struct bpf_hdr_ext *ehp;
2d21ac55 2565 int totlen, curlen;
316670eb 2566 int hdrlen, caplen;
6d2010ae 2567 int do_wakeup = 0;
316670eb 2568 u_char *payload;
39236c6e 2569 struct timeval tv;
316670eb 2570
fe8ab488 2571 hdrlen = (d->bd_flags & BPF_EXTENDED_HDR) ? d->bd_bif->bif_exthdrlen :
316670eb 2572 d->bd_bif->bif_hdrlen;
1c79356b
A
2573 /*
2574 * Figure out how many bytes to move. If the packet is
2575 * greater or equal to the snapshot length, transfer that
2576 * much. Otherwise, transfer the whole packet (unless
2577 * we hit the buffer size limit).
2578 */
5ba3f43e 2579 totlen = hdrlen + min(snaplen, pkt->bpfp_total_length);
1c79356b
A
2580 if (totlen > d->bd_bufsize)
2581 totlen = d->bd_bufsize;
2582
2583 /*
2584 * Round up the end of the previous packet to the next longword.
2585 */
2586 curlen = BPF_WORDALIGN(d->bd_slen);
2587 if (curlen + totlen > d->bd_bufsize) {
2588 /*
2589 * This packet will overflow the storage buffer.
2590 * Rotate the buffers if we can, then wakeup any
2591 * pending reads.
813fb2f6
A
2592 *
2593 * We cannot rotate buffers if a read is in progress
2594 * so drop the packet
1c79356b 2595 */
813fb2f6
A
2596 if (d->bd_hbuf_read) {
2597 ++d->bd_dcount;
2598 return;
2599 }
2600
6d2010ae 2601 if (d->bd_fbuf == NULL) {
3e170ce0
A
2602 if (d->bd_headdrop == 0) {
2603 /*
2604 * We haven't completed the previous read yet,
2605 * so drop the packet.
2606 */
2607 ++d->bd_dcount;
2608 return;
2609 }
1c79356b 2610 /*
3e170ce0 2611 * Drop the hold buffer as it contains older packets
1c79356b 2612 */
3e170ce0
A
2613 d->bd_dcount += d->bd_hcnt;
2614 d->bd_fbuf = d->bd_hbuf;
2615 ROTATE_BUFFERS(d);
2616 } else {
2617 ROTATE_BUFFERS(d);
1c79356b 2618 }
6d2010ae 2619 do_wakeup = 1;
1c79356b
A
2620 curlen = 0;
2621 }
6d2010ae 2622 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1c79356b 2623 /*
6d2010ae
A
2624 * Immediate mode is set, or the read timeout has
2625 * already expired during a select call. A packet
2626 * arrived, so the reader should be woken up.
1c79356b 2627 */
6d2010ae 2628 do_wakeup = 1;
1c79356b
A
2629
2630 /*
2631 * Append the bpf header.
2632 */
b0d623f7 2633 microtime(&tv);
fe8ab488 2634 if (d->bd_flags & BPF_EXTENDED_HDR) {
5ba3f43e
A
2635 struct mbuf *m;
2636
2637 m = (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF)
2638 ? pkt->bpfp_mbuf : NULL;
316670eb
A
2639 ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen);
2640 memset(ehp, 0, sizeof(*ehp));
2641 ehp->bh_tstamp.tv_sec = tv.tv_sec;
2642 ehp->bh_tstamp.tv_usec = tv.tv_usec;
5ba3f43e
A
2643
2644 ehp->bh_datalen = pkt->bpfp_total_length;
316670eb 2645 ehp->bh_hdrlen = hdrlen;
5ba3f43e
A
2646 caplen = ehp->bh_caplen = totlen - hdrlen;
2647 if (m == NULL) {
2648 if (outbound) {
39236c6e 2649 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT;
5ba3f43e 2650 } else {
39236c6e 2651 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN;
5ba3f43e 2652 }
39236c6e 2653 } else if (outbound) {
5ba3f43e
A
2654 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT;
2655
39236c6e
A
2656 /* only do lookups on non-raw INPCB */
2657 if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID|
2658 PKTF_FLOW_LOCALSRC|PKTF_FLOW_RAWSOCK)) ==
2659 (PKTF_FLOW_ID|PKTF_FLOW_LOCALSRC) &&
2660 m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) {
2661 ehp->bh_flowid = m->m_pkthdr.pkt_flowid;
2662 ehp->bh_proto = m->m_pkthdr.pkt_proto;
2663 }
2664 ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc);
39037602
A
2665 if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT)
2666 ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT;
2667 if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ)
2668 ehp->bh_pktflags |= BPF_PKTFLAGS_START_SEQ;
2669 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT)
2670 ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT;
2671 if (m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA) {
2672 ehp->bh_unsent_bytes =
2673 m->m_pkthdr.bufstatus_if;
2674 ehp->bh_unsent_snd =
2675 m->m_pkthdr.bufstatus_sndbuf;
2676 }
39236c6e 2677 } else
316670eb
A
2678 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN;
2679 payload = (u_char *)ehp + hdrlen;
316670eb
A
2680 } else {
2681 hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen);
2682 hp->bh_tstamp.tv_sec = tv.tv_sec;
2683 hp->bh_tstamp.tv_usec = tv.tv_usec;
5ba3f43e 2684 hp->bh_datalen = pkt->bpfp_total_length;
316670eb 2685 hp->bh_hdrlen = hdrlen;
5ba3f43e 2686 caplen = hp->bh_caplen = totlen - hdrlen;
316670eb 2687 payload = (u_char *)hp + hdrlen;
316670eb 2688 }
1c79356b
A
2689 /*
2690 * Copy the packet data into the store buffer and update its length.
2691 */
5ba3f43e 2692 copy_bpf_packet(pkt, payload, caplen);
1c79356b 2693 d->bd_slen = curlen + totlen;
3e170ce0 2694 d->bd_scnt += 1;
6d2010ae
A
2695
2696 if (do_wakeup)
2697 bpf_wakeup(d);
1c79356b
A
2698}
2699
2700/*
2701 * Initialize all nonzero fields of a descriptor.
2702 */
2703static int
91447636 2704bpf_allocbufs(struct bpf_d *d)
1c79356b 2705{
813fb2f6
A
2706 if (d->bd_sbuf != NULL) {
2707 FREE(d->bd_sbuf, M_DEVBUF);
2708 d->bd_sbuf = NULL;
2709 }
2710 if (d->bd_hbuf != NULL) {
2711 FREE(d->bd_hbuf, M_DEVBUF);
2712 d->bd_hbuf = NULL;
2713 }
2714 if (d->bd_fbuf != NULL) {
2715 FREE(d->bd_fbuf, M_DEVBUF);
2716 d->bd_fbuf = NULL;
2717 }
2718
1c79356b 2719 d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
813fb2f6 2720 if (d->bd_fbuf == NULL)
1c79356b
A
2721 return (ENOBUFS);
2722
2723 d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT);
813fb2f6 2724 if (d->bd_sbuf == NULL) {
1c79356b 2725 FREE(d->bd_fbuf, M_DEVBUF);
813fb2f6 2726 d->bd_fbuf = NULL;
1c79356b
A
2727 return (ENOBUFS);
2728 }
2729 d->bd_slen = 0;
2730 d->bd_hlen = 0;
3e170ce0
A
2731 d->bd_scnt = 0;
2732 d->bd_hcnt = 0;
1c79356b
A
2733 return (0);
2734}
2735
2736/*
2737 * Free buffers currently in use by a descriptor.
2738 * Called on close.
2739 */
2740static void
91447636 2741bpf_freed(struct bpf_d *d)
1c79356b
A
2742{
2743 /*
2744 * We don't need to lock out interrupts since this descriptor has
2745 * been detached from its interface and it yet hasn't been marked
2746 * free.
2747 */
39236c6e
A
2748 if (d->bd_hbuf_read)
2749 panic("bpf buffer freed during read");
2750
1c79356b
A
2751 if (d->bd_sbuf != 0) {
2752 FREE(d->bd_sbuf, M_DEVBUF);
39236c6e 2753 if (d->bd_hbuf != 0)
1c79356b
A
2754 FREE(d->bd_hbuf, M_DEVBUF);
2755 if (d->bd_fbuf != 0)
2756 FREE(d->bd_fbuf, M_DEVBUF);
2757 }
2758 if (d->bd_filter)
2759 FREE((caddr_t)d->bd_filter, M_DEVBUF);
1c79356b
A
2760}
2761
2762/*
2763 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
2764 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
2765 * size of the link header (variable length headers not yet supported).
2766 */
2767void
91447636 2768bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1c79356b 2769{
2d21ac55
A
2770 bpf_attach(ifp, dlt, hdrlen, NULL, NULL);
2771}
2772
2773errno_t
2774bpf_attach(
2775 ifnet_t ifp,
2776 u_int32_t dlt,
2777 u_int32_t hdrlen,
2778 bpf_send_func send,
2779 bpf_tap_func tap)
2780{
5ba3f43e 2781 struct bpf_if *bp;
2d21ac55 2782 struct bpf_if *bp_new;
5ba3f43e 2783 struct bpf_if *bp_before_first = NULL;
2d21ac55 2784 struct bpf_if *bp_first = NULL;
5ba3f43e
A
2785 struct bpf_if *bp_last = NULL;
2786 boolean_t found;
2787
3e170ce0
A
2788 bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF,
2789 M_WAIT | M_ZERO);
2d21ac55 2790 if (bp_new == 0)
1c79356b
A
2791 panic("bpfattach");
2792
91447636
A
2793 lck_mtx_lock(bpf_mlock);
2794
2d21ac55 2795 /*
5ba3f43e
A
2796 * Check if this interface/dlt is already attached. Remember the
2797 * first and last attachment for this interface, as well as the
2798 * element before the first attachment.
2d21ac55 2799 */
5ba3f43e
A
2800 found = FALSE;
2801 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
2802 if (bp->bif_ifp != ifp) {
2803 if (bp_first != NULL) {
2804 /* no more elements for this interface */
2805 break;
2806 }
2807 bp_before_first = bp;
2808 } else {
2809 if (bp->bif_dlt == dlt) {
2810 found = TRUE;
2811 break;
2812 }
2813 if (bp_first == NULL) {
2814 bp_first = bp;
2815 }
2816 bp_last = bp;
2817 }
2d21ac55 2818 }
5ba3f43e
A
2819 if (found) {
2820 lck_mtx_unlock(bpf_mlock);
39236c6e
A
2821 printf("bpfattach - %s with dlt %d is already attached\n",
2822 if_name(ifp), dlt);
2d21ac55 2823 FREE(bp_new, M_DEVBUF);
2d21ac55
A
2824 return EEXIST;
2825 }
2826
2d21ac55
A
2827 bp_new->bif_ifp = ifp;
2828 bp_new->bif_dlt = dlt;
2829 bp_new->bif_send = send;
2830 bp_new->bif_tap = tap;
2831
2832 if (bp_first == NULL) {
2833 /* No other entries for this ifp */
2834 bp_new->bif_next = bpf_iflist;
2835 bpf_iflist = bp_new;
2836 }
2837 else {
5ba3f43e
A
2838 if (ifnet_type(ifp) == IFT_ETHER && dlt == DLT_EN10MB) {
2839 /* Make this the first entry for this interface */
2840 if (bp_before_first != NULL) {
2841 /* point the previous to us */
2842 bp_before_first->bif_next = bp_new;
2843 } else {
2844 /* we're the new head */
2845 bpf_iflist = bp_new;
2846 }
2847 bp_new->bif_next = bp_first;
2848 } else {
2849 /* Add this after the last entry for this interface */
2850 bp_new->bif_next = bp_last->bif_next;
2851 bp_last->bif_next = bp_new;
2852 }
2d21ac55
A
2853 }
2854
1c79356b
A
2855 /*
2856 * Compute the length of the bpf header. This is not necessarily
2857 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
2858 * that the network layer header begins on a longword boundary (for
2859 * performance reasons and to alleviate alignment restrictions).
2860 */
2d21ac55 2861 bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
316670eb
A
2862 bp_new->bif_exthdrlen = BPF_WORDALIGN(hdrlen +
2863 sizeof(struct bpf_hdr_ext)) - hdrlen;
91447636
A
2864
2865 /* Take a reference on the interface */
2d21ac55 2866 ifnet_reference(ifp);
91447636
A
2867
2868 lck_mtx_unlock(bpf_mlock);
1c79356b 2869
55e303ae 2870#ifndef __APPLE__
1c79356b 2871 if (bootverbose)
39236c6e 2872 printf("bpf: %s attached\n", if_name(ifp));
1c79356b 2873#endif
2d21ac55
A
2874
2875 return 0;
1c79356b
A
2876}
2877
9bccf70c
A
2878/*
2879 * Detach bpf from an interface. This involves detaching each descriptor
2880 * associated with the interface, and leaving bd_bif NULL. Notify each
2881 * descriptor as it's detached so that any sleepers wake up and get
2882 * ENXIO.
2883 */
2884void
91447636 2885bpfdetach(struct ifnet *ifp)
9bccf70c 2886{
2d21ac55 2887 struct bpf_if *bp, *bp_prev, *bp_next;
9bccf70c 2888 struct bpf_d *d;
9bccf70c 2889
3e170ce0 2890 if (bpf_debug != 0)
5ba3f43e 2891 printf("%s: %s\n", __func__, if_name(ifp));
3e170ce0 2892
91447636 2893 lck_mtx_lock(bpf_mlock);
9bccf70c 2894
fe8ab488
A
2895 /*
2896 * Build the list of devices attached to that interface
2897 * that we need to free while keeping the lock to maintain
2898 * the integrity of the interface list
2899 */
9bccf70c 2900 bp_prev = NULL;
2d21ac55
A
2901 for (bp = bpf_iflist; bp != NULL; bp = bp_next) {
2902 bp_next = bp->bif_next;
fe8ab488 2903
2d21ac55
A
2904 if (ifp != bp->bif_ifp) {
2905 bp_prev = bp;
2906 continue;
2907 }
fe8ab488
A
2908 /* Unlink from the interface list */
2909 if (bp_prev)
2910 bp_prev->bif_next = bp->bif_next;
2911 else
2912 bpf_iflist = bp->bif_next;
2913
3e170ce0 2914 /* Detach the devices attached to the interface */
2d21ac55 2915 while ((d = bp->bif_dlist) != NULL) {
3e170ce0
A
2916 /*
2917 * Take an extra reference to prevent the device
2918 * from being freed when bpf_detachd() releases
2919 * the reference for the interface list
2920 */
2921 bpf_acquire_d(d);
2922 bpf_detachd(d, 0);
2d21ac55 2923 bpf_wakeup(d);
3e170ce0 2924 bpf_release_d(d);
2d21ac55 2925 }
2d21ac55 2926 ifnet_release(ifp);
9bccf70c
A
2927 }
2928
91447636 2929 lck_mtx_unlock(bpf_mlock);
9bccf70c
A
2930}
2931
1c79356b 2932void
91447636 2933bpf_init(__unused void *unused)
1c79356b 2934{
9bccf70c 2935#ifdef __APPLE__
1c79356b 2936 int i;
9bccf70c 2937 int maj;
1c79356b 2938
91447636 2939 if (bpf_devsw_installed == 0) {
9bccf70c 2940 bpf_devsw_installed = 1;
39236c6e
A
2941 bpf_mlock_grp_attr = lck_grp_attr_alloc_init();
2942 bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr);
2943 bpf_mlock_attr = lck_attr_alloc_init();
2944 lck_mtx_init(bpf_mlock, bpf_mlock_grp, bpf_mlock_attr);
9bccf70c
A
2945 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
2946 if (maj == -1) {
91447636
A
2947 if (bpf_mlock_attr)
2948 lck_attr_free(bpf_mlock_attr);
2949 if (bpf_mlock_grp)
2950 lck_grp_free(bpf_mlock_grp);
2951 if (bpf_mlock_grp_attr)
2952 lck_grp_attr_free(bpf_mlock_grp_attr);
2953
2d21ac55
A
2954 bpf_mlock = NULL;
2955 bpf_mlock_attr = NULL;
2956 bpf_mlock_grp = NULL;
2957 bpf_mlock_grp_attr = NULL;
91447636 2958 bpf_devsw_installed = 0;
9bccf70c 2959 printf("bpf_init: failed to allocate a major number!\n");
55e303ae 2960 return;
9bccf70c 2961 }
91447636 2962
55e303ae
A
2963 for (i = 0 ; i < NBPFILTER; i++)
2964 bpf_make_dev_t(maj);
9bccf70c
A
2965 }
2966#else
2967 cdevsw_add(&bpf_cdevsw);
2968#endif
1c79356b
A
2969}
2970
9bccf70c 2971#ifndef __APPLE__
1c79356b 2972SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1c79356b 2973#endif
9bccf70c 2974
2d21ac55
A
2975#if CONFIG_MACF_NET
2976struct label *
2977mac_bpfdesc_label_get(struct bpf_d *d)
9bccf70c 2978{
9bccf70c 2979
2d21ac55 2980 return (d->bd_label);
9bccf70c
A
2981}
2982
2983void
2d21ac55 2984mac_bpfdesc_label_set(struct bpf_d *d, struct label *label)
9bccf70c 2985{
9bccf70c 2986
2d21ac55 2987 d->bd_label = label;
9bccf70c 2988}
2d21ac55 2989#endif