+
+ error = bpf_setup(d, bsa.bsa_uuid, ifp);
+ break;
+ }
+ case BIOCSPKTHDRV2:
+ bcopy(addr, &int_arg, sizeof(int_arg));
+ if (int_arg != 0) {
+ d->bd_flags |= BPF_PKTHDRV2;
+ } else {
+ d->bd_flags &= ~BPF_PKTHDRV2;
+ }
+ break;
+
+ case BIOCGPKTHDRV2:
+ int_arg = d->bd_flags & BPF_PKTHDRV2 ? 1 : 0;
+ bcopy(&int_arg, addr, sizeof(int));
+ break;
+ }
+
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+
+ return error;
+}
+
+/*
+ * Set d's packet filter program to fp. If this file already has a filter,
+ * free it and replace it. Returns EINVAL for bogus requests.
+ */
+static int
+bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns,
+ u_long cmd)
+{
+ struct bpf_insn *fcode, *old;
+ u_int flen, size;
+
+ while (d->bd_hbuf_read != 0) {
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+ }
+
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ return ENXIO;
+ }
+
+ old = d->bd_filter;
+ if (bf_insns == USER_ADDR_NULL) {
+ if (bf_len != 0) {
+ return EINVAL;
+ }
+ d->bd_filter = NULL;
+ reset_d(d);
+ if (old != 0) {
+ FREE(old, M_DEVBUF);
+ }
+ return 0;
+ }
+ flen = bf_len;
+ if (flen > BPF_MAXINSNS) {
+ return EINVAL;
+ }
+
+ size = flen * sizeof(struct bpf_insn);
+ fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT);
+#ifdef __APPLE__
+ if (fcode == NULL) {
+ return ENOBUFS;
+ }
+#endif
+ if (copyin(bf_insns, (caddr_t)fcode, size) == 0 &&
+ bpf_validate(fcode, (int)flen)) {
+ d->bd_filter = fcode;
+
+ if (cmd == BIOCSETF32 || cmd == BIOCSETF64) {
+ reset_d(d);
+ }
+
+ if (old != 0) {
+ FREE(old, M_DEVBUF);
+ }
+
+ return 0;
+ }
+ FREE(fcode, M_DEVBUF);
+ return EINVAL;
+}
+
+/*
+ * Detach a file from its current interface (if attached at all) and attach
+ * to the interface indicated by the name stored in ifr.
+ * Return an errno or 0.
+ */
+static int
+bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read)
+{
+ struct bpf_if *bp;
+ int error;
+
+ while (d->bd_hbuf_read != 0 && !has_hbuf_read) {
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+ }
+
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ return ENXIO;
+ }
+
+ /*
+ * Look through attached interfaces for the named one.
+ */
+ for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
+ struct ifnet *ifp = bp->bif_ifp;
+
+ if (ifp == 0 || ifp != theywant) {
+ continue;
+ }
+ /*
+ * Do not use DLT_PKTAP, unless requested explicitly
+ */
+ if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
+ continue;
+ }
+ /*
+ * Skip the coprocessor interface
+ */
+ if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) {
+ continue;
+ }
+ /*
+ * We found the requested interface.
+ * Allocate the packet buffers.
+ */
+ error = bpf_allocbufs(d);
+ if (error != 0) {
+ return error;
+ }
+ /*
+ * Detach if attached to something else.
+ */
+ if (bp != d->bd_bif) {
+ if (d->bd_bif != NULL) {
+ if (bpf_detachd(d, 0) != 0) {
+ return ENXIO;
+ }
+ }
+ if (bpf_attachd(d, bp) != 0) {
+ return ENXIO;
+ }
+ }
+ if (do_reset) {
+ reset_d(d);
+ }
+ return 0;
+ }
+ /* Not found. */
+ return ENXIO;
+}
+
+/*
+ * Get a list of available data link type of the interface.
+ */
+static int
+bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p)
+{
+ u_int n;
+ int error;
+ struct ifnet *ifp;
+ struct bpf_if *bp;
+ user_addr_t dlist;
+ struct bpf_dltlist bfl;
+
+ bcopy(addr, &bfl, sizeof(bfl));
+ if (proc_is64bit(p)) {
+ dlist = (user_addr_t)bfl.bfl_u.bflu_pad;
+ } else {
+ dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list);
+ }
+
+ ifp = d->bd_bif->bif_ifp;
+ n = 0;
+ error = 0;
+
+ for (bp = bpf_iflist; bp; bp = bp->bif_next) {
+ if (bp->bif_ifp != ifp) {
+ continue;
+ }
+ /*
+ * Do not use DLT_PKTAP, unless requested explicitly
+ */
+ if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
+ continue;
+ }
+ if (dlist != USER_ADDR_NULL) {
+ if (n >= bfl.bfl_len) {
+ return ENOMEM;
+ }
+ error = copyout(&bp->bif_dlt, dlist,
+ sizeof(bp->bif_dlt));
+ if (error != 0) {
+ break;
+ }
+ dlist += sizeof(bp->bif_dlt);
+ }
+ n++;
+ }
+ bfl.bfl_len = n;
+ bcopy(&bfl, addr, sizeof(bfl));
+
+ return error;
+}
+
+/*
+ * Set the data link type of a BPF instance.
+ */
+static int
+bpf_setdlt(struct bpf_d *d, uint32_t dlt)
+{
+ int error, opromisc;
+ struct ifnet *ifp;
+ struct bpf_if *bp;
+
+ if (d->bd_bif->bif_dlt == dlt) {
+ return 0;
+ }
+
+ while (d->bd_hbuf_read != 0) {
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+ }
+
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ return ENXIO;
+ }
+
+ ifp = d->bd_bif->bif_ifp;
+ for (bp = bpf_iflist; bp; bp = bp->bif_next) {
+ if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) {
+ /*
+ * Do not use DLT_PKTAP, unless requested explicitly
+ */
+ if (bp->bif_dlt == DLT_PKTAP &&
+ !(d->bd_flags & BPF_WANT_PKTAP)) {
+ continue;
+ }
+ break;
+ }
+ }
+ if (bp != NULL) {
+ opromisc = d->bd_promisc;
+ if (bpf_detachd(d, 0) != 0) {
+ return ENXIO;
+ }
+ error = bpf_attachd(d, bp);
+ if (error) {
+ printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n",
+ ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp),
+ error);
+ return error;
+ }
+ reset_d(d);
+ if (opromisc) {
+ lck_mtx_unlock(bpf_mlock);
+ error = ifnet_set_promiscuous(bp->bif_ifp, 1);
+ lck_mtx_lock(bpf_mlock);
+ if (error) {
+ printf("%s: ifpromisc %s%d failed (%d)\n",
+ __func__, ifnet_name(bp->bif_ifp),
+ ifnet_unit(bp->bif_ifp), error);
+ } else {
+ d->bd_promisc = 1;
+ }
+ }
+ }
+ return bp == NULL ? EINVAL : 0;
+}
+
+static int
+bpf_set_traffic_class(struct bpf_d *d, int tc)
+{
+ int error = 0;
+
+ if (!SO_VALID_TC(tc)) {
+ error = EINVAL;
+ } else {
+ d->bd_traffic_class = tc;
+ }
+
+ return error;
+}
+
+static void
+bpf_set_packet_service_class(struct mbuf *m, int tc)
+{
+ if (!(m->m_flags & M_PKTHDR)) {
+ return;
+ }
+
+ VERIFY(SO_VALID_TC(tc));
+ (void) m_set_service_class(m, so_tc2msc(tc));
+}
+
+/*
+ * Support for select()
+ *
+ * Return true iff the specific operation will not block indefinitely.
+ * Otherwise, return false but make a note that a selwakeup() must be done.
+ */
+int
+bpfselect(dev_t dev, int which, void * wql, struct proc *p)
+{
+ struct bpf_d *d;
+ int ret = 0;
+
+ lck_mtx_lock(bpf_mlock);
+
+ d = bpf_dtab[minor(dev)];
+ if (d == NULL || d == BPF_DEV_RESERVED ||
+ (d->bd_flags & BPF_CLOSING) != 0) {
+ lck_mtx_unlock(bpf_mlock);
+ return ENXIO;
+ }
+
+ bpf_acquire_d(d);
+
+ if (d->bd_bif == NULL) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return ENXIO;
+ }
+
+ while (d->bd_hbuf_read != 0) {
+ msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL);
+ }
+
+ if ((d->bd_flags & BPF_CLOSING) != 0) {
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+ return ENXIO;
+ }
+
+ switch (which) {
+ case FREAD:
+ if (d->bd_hlen != 0 ||
+ ((d->bd_immediate ||
+ d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) {
+ ret = 1; /* read has data to return */
+ } else {
+ /*
+ * Read has no data to return.
+ * Make the select wait, and start a timer if
+ * necessary.
+ */
+ selrecord(p, &d->bd_sel, wql);
+ bpf_start_timer(d);
+ }
+ break;
+
+ case FWRITE:
+ /* can't determine whether a write would block */
+ ret = 1;
+ break;
+ }
+
+ bpf_release_d(d);
+ lck_mtx_unlock(bpf_mlock);
+
+ return ret;
+}
+
+/*
+ * Support for kevent() system call. Register EVFILT_READ filters and
+ * reject all others.
+ */
+int bpfkqfilter(dev_t dev, struct knote *kn);
+static void filt_bpfdetach(struct knote *);
+static int filt_bpfread(struct knote *, long);
+static int filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev);
+static int filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev);
+
+SECURITY_READ_ONLY_EARLY(struct filterops) bpfread_filtops = {
+ .f_isfd = 1,
+ .f_detach = filt_bpfdetach,
+ .f_event = filt_bpfread,
+ .f_touch = filt_bpftouch,
+ .f_process = filt_bpfprocess,
+};
+
+static int
+filt_bpfread_common(struct knote *kn, struct kevent_qos_s *kev, struct bpf_d *d)
+{
+ int ready = 0;
+ int64_t data = 0;
+
+ if (d->bd_immediate) {
+ /*
+ * If there's data in the hold buffer, it's the
+ * amount of data a read will return.
+ *
+ * If there's no data in the hold buffer, but
+ * there's data in the store buffer, a read will
+ * immediately rotate the store buffer to the
+ * hold buffer, the amount of data in the store
+ * buffer is the amount of data a read will
+ * return.
+ *
+ * If there's no data in either buffer, we're not
+ * ready to read.
+ */
+ data = (d->bd_hlen == 0 || d->bd_hbuf_read != 0 ?
+ d->bd_slen : d->bd_hlen);
+ int64_t lowwat = knote_low_watermark(kn);
+ if (lowwat > d->bd_bufsize) {
+ lowwat = d->bd_bufsize;
+ }
+ ready = (data >= lowwat);
+ } else {
+ /*
+ * If there's data in the hold buffer, it's the
+ * amount of data a read will return.
+ *
+ * If there's no data in the hold buffer, but
+ * there's data in the store buffer, if the
+ * timer has expired a read will immediately
+ * rotate the store buffer to the hold buffer,
+ * so the amount of data in the store buffer is
+ * the amount of data a read will return.
+ *
+ * If there's no data in either buffer, or there's
+ * no data in the hold buffer and the timer hasn't
+ * expired, we're not ready to read.
+ */
+ data = ((d->bd_hlen == 0 || d->bd_hbuf_read != 0) &&
+ d->bd_state == BPF_TIMED_OUT ? d->bd_slen : d->bd_hlen);
+ ready = (data > 0);
+ }
+ if (!ready) {
+ bpf_start_timer(d);
+ } else if (kev) {
+ knote_fill_kevent(kn, kev, data);
+ }
+
+ return ready;
+}
+
+int
+bpfkqfilter(dev_t dev, struct knote *kn)
+{
+ struct bpf_d *d;
+ int res;
+
+ /*
+ * Is this device a bpf?
+ */
+ if (major(dev) != CDEV_MAJOR || kn->kn_filter != EVFILT_READ) {
+ knote_set_error(kn, EINVAL);
+ return 0;
+ }
+
+ lck_mtx_lock(bpf_mlock);
+
+ d = bpf_dtab[minor(dev)];
+
+ if (d == NULL || d == BPF_DEV_RESERVED ||
+ (d->bd_flags & BPF_CLOSING) != 0 ||
+ d->bd_bif == NULL) {
+ lck_mtx_unlock(bpf_mlock);
+ knote_set_error(kn, ENXIO);
+ return 0;
+ }
+
+ kn->kn_hook = d;
+ kn->kn_filtid = EVFILTID_BPFREAD;
+ KNOTE_ATTACH(&d->bd_sel.si_note, kn);
+ d->bd_flags |= BPF_KNOTE;
+
+ /* capture the current state */
+ res = filt_bpfread_common(kn, NULL, d);
+
+ lck_mtx_unlock(bpf_mlock);
+
+ return res;
+}
+
+static void
+filt_bpfdetach(struct knote *kn)
+{
+ struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
+
+ lck_mtx_lock(bpf_mlock);
+ if (d->bd_flags & BPF_KNOTE) {
+ KNOTE_DETACH(&d->bd_sel.si_note, kn);
+ d->bd_flags &= ~BPF_KNOTE;
+ }
+ lck_mtx_unlock(bpf_mlock);
+}
+
+static int
+filt_bpfread(struct knote *kn, long hint)
+{
+#pragma unused(hint)
+ struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
+
+ return filt_bpfread_common(kn, NULL, d);
+}
+
+static int
+filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev)
+{
+ struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
+ int res;
+
+ lck_mtx_lock(bpf_mlock);
+
+ /* save off the lowat threshold and flag */
+ kn->kn_sdata = kev->data;
+ kn->kn_sfflags = kev->fflags;
+
+ /* output data will be re-generated here */
+ res = filt_bpfread_common(kn, NULL, d);
+
+ lck_mtx_unlock(bpf_mlock);
+
+ return res;
+}
+
+static int
+filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev)
+{
+ struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
+ int res;
+
+ lck_mtx_lock(bpf_mlock);
+ res = filt_bpfread_common(kn, kev, d);
+ lck_mtx_unlock(bpf_mlock);
+
+ return res;
+}
+
+/*
+ * Copy data from an mbuf chain into a buffer. This code is derived
+ * from m_copydata in kern/uipc_mbuf.c.
+ */
+static void
+bpf_mcopy(struct mbuf * m, void *dst_arg, size_t len)
+{
+ u_int count;
+ u_char *dst;
+
+ dst = dst_arg;
+ while (len > 0) {
+ if (m == 0) {
+ panic("bpf_mcopy");
+ }
+ count = min(m->m_len, len);
+ bcopy(mbuf_data(m), dst, count);
+ m = m->m_next;
+ dst += count;
+ len -= count;
+ }
+}
+
+static inline void
+bpf_tap_imp(
+ ifnet_t ifp,
+ u_int32_t dlt,
+ struct bpf_packet *bpf_pkt,
+ int outbound)
+{
+ struct bpf_d *d;
+ u_int slen;
+ struct bpf_if *bp;
+
+ /*
+ * It's possible that we get here after the bpf descriptor has been
+ * detached from the interface; in such a case we simply return.
+ * Lock ordering is important since we can be called asynchronously
+ * (from IOKit) to process an inbound packet; when that happens
+ * we would have been holding its "gateLock" and will be acquiring
+ * "bpf_mlock" upon entering this routine. Due to that, we release
+ * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will
+ * acquire "gateLock" in the IOKit), in order to avoid a deadlock
+ * when a ifnet_set_promiscuous request simultaneously collides with
+ * an inbound packet being passed into the tap callback.
+ */
+ lck_mtx_lock(bpf_mlock);
+ if (ifp->if_bpf == NULL) {
+ lck_mtx_unlock(bpf_mlock);
+ return;
+ }
+ for (bp = ifp->if_bpf; bp != NULL; bp = bp->bif_next) {
+ if (bp->bif_ifp != ifp) {
+ /* wrong interface */
+ bp = NULL;
+ break;
+ }
+ if (dlt == 0 || bp->bif_dlt == dlt) {
+ /* tapping default DLT or DLT matches */
+ break;
+ }
+ }
+ if (bp == NULL) {
+ goto done;
+ }
+ for (d = bp->bif_dlist; d; d = d->bd_next) {
+ struct bpf_packet *bpf_pkt_saved = bpf_pkt;
+ struct bpf_packet bpf_pkt_tmp;
+ struct pktap_header_buffer bpfp_header_tmp;
+
+ if (outbound && !d->bd_seesent) {
+ continue;
+ }
+
+ ++d->bd_rcount;
+ slen = bpf_filter(d->bd_filter, (u_char *)bpf_pkt,
+ bpf_pkt->bpfp_total_length, 0);
+ if (bp->bif_ifp->if_type == IFT_PKTAP &&
+ bp->bif_dlt == DLT_PKTAP) {
+ /*
+ * Need to copy the bpf_pkt because the conversion
+ * to v2 pktap header modifies the content of the
+ * bpfp_header
+ */
+ if ((d->bd_flags & BPF_PKTHDRV2) &&
+ bpf_pkt->bpfp_header_length <= sizeof(bpfp_header_tmp)) {
+ bpf_pkt_tmp = *bpf_pkt;
+
+ bpf_pkt = &bpf_pkt_tmp;
+
+ memcpy(&bpfp_header_tmp, bpf_pkt->bpfp_header,
+ bpf_pkt->bpfp_header_length);
+
+ bpf_pkt->bpfp_header = &bpfp_header_tmp;
+
+ convert_to_pktap_header_to_v2(bpf_pkt,
+ !!(d->bd_flags & BPF_TRUNCATE));
+ }
+
+ if (d->bd_flags & BPF_TRUNCATE) {
+ slen = min(slen,
+ get_pkt_trunc_len((u_char *)bpf_pkt,
+ bpf_pkt->bpfp_total_length));
+ }
+ }
+ if (slen != 0) {
+ catchpacket(d, bpf_pkt, slen, outbound);
+ }
+ bpf_pkt = bpf_pkt_saved;
+ }
+
+done:
+ lck_mtx_unlock(bpf_mlock);
+}
+
+static inline void
+bpf_tap_mbuf(
+ ifnet_t ifp,
+ u_int32_t dlt,
+ mbuf_t m,
+ void* hdr,
+ size_t hlen,
+ int outbound)
+{
+ struct bpf_packet bpf_pkt;
+ struct mbuf *m0;
+
+ if (ifp->if_bpf == NULL) {
+ /* quickly check without taking lock */
+ return;
+ }
+ bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF;
+ bpf_pkt.bpfp_mbuf = m;
+ bpf_pkt.bpfp_total_length = 0;
+ for (m0 = m; m0 != NULL; m0 = m0->m_next) {
+ bpf_pkt.bpfp_total_length += m0->m_len;
+ }
+ bpf_pkt.bpfp_header = hdr;
+ if (hdr != NULL) {
+ bpf_pkt.bpfp_total_length += hlen;
+ bpf_pkt.bpfp_header_length = hlen;
+ } else {
+ bpf_pkt.bpfp_header_length = 0;
+ }
+ bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound);
+}
+
+void
+bpf_tap_out(
+ ifnet_t ifp,
+ u_int32_t dlt,
+ mbuf_t m,
+ void* hdr,
+ size_t hlen)
+{
+ bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 1);
+}
+
+void
+bpf_tap_in(
+ ifnet_t ifp,
+ u_int32_t dlt,
+ mbuf_t m,
+ void* hdr,
+ size_t hlen)
+{
+ bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 0);
+}
+
+/* Callback registered with Ethernet driver. */
+static int
+bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
+{
+ bpf_tap_mbuf(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL);
+
+ return 0;
+}
+
+
+static errno_t
+bpf_copydata(struct bpf_packet *pkt, size_t off, size_t len, void* out_data)
+{
+ errno_t err = 0;
+ if (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) {
+ err = mbuf_copydata(pkt->bpfp_mbuf, off, len, out_data);
+ } else {
+ err = EINVAL;
+ }
+
+ return err;
+}
+
+static void
+copy_bpf_packet(struct bpf_packet * pkt, void * dst, size_t len)
+{
+ /* copy the optional header */
+ if (pkt->bpfp_header_length != 0) {
+ size_t count = min(len, pkt->bpfp_header_length);
+ bcopy(pkt->bpfp_header, dst, count);
+ len -= count;
+ dst += count;
+ }
+ if (len == 0) {
+ /* nothing past the header */
+ return;
+ }
+ /* copy the packet */
+ switch (pkt->bpfp_type) {
+ case BPF_PACKET_TYPE_MBUF:
+ bpf_mcopy(pkt->bpfp_mbuf, dst, len);
+ break;
+ default:
+ break;
+ }
+}
+
+static uint16_t
+get_esp_trunc_len(__unused struct bpf_packet *pkt, __unused uint16_t off,
+ const uint16_t remaining_caplen)
+{
+ /*
+ * For some reason tcpdump expects to have one byte beyond the ESP header
+ */
+ uint16_t trunc_len = ESP_HDR_SIZE + 1;
+
+ if (trunc_len > remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ return trunc_len;
+}
+
+static uint16_t
+get_isakmp_trunc_len(__unused struct bpf_packet *pkt, __unused uint16_t off,
+ const uint16_t remaining_caplen)
+{
+ /*
+ * Include the payload generic header
+ */
+ uint16_t trunc_len = ISAKMP_HDR_SIZE;
+
+ if (trunc_len > remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ return trunc_len;
+}
+
+static uint16_t
+get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint16_t off,
+ const uint16_t remaining_caplen)
+{
+ int err = 0;
+ uint16_t trunc_len = 0;
+ char payload[remaining_caplen];
+
+ err = bpf_copydata(pkt, off, remaining_caplen, payload);
+ if (err != 0) {
+ return remaining_caplen;
+ }
+ /*
+ * They are three cases:
+ * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
+ * - keep alive: 1 byte payload
+ * - otherwise it's ESP
+ */
+ if (remaining_caplen >= 4 &&
+ payload[0] == 0 && payload[1] == 0 &&
+ payload[2] == 0 && payload[3] == 0) {
+ trunc_len = 4 + get_isakmp_trunc_len(pkt, off + 4, remaining_caplen - 4);
+ } else if (remaining_caplen == 1) {
+ trunc_len = 1;
+ } else {
+ trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen);
+ }
+
+ if (trunc_len > remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ return trunc_len;
+}
+
+static uint16_t
+get_udp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen)
+{
+ int err = 0;
+ uint16_t trunc_len = sizeof(struct udphdr); /* By default no UDP payload */
+
+ if (trunc_len >= remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ struct udphdr udphdr;
+ err = bpf_copydata(pkt, off, sizeof(struct udphdr), &udphdr);
+ if (err != 0) {
+ return remaining_caplen;
+ }
+
+ u_short sport, dport;
+
+ sport = EXTRACT_SHORT(&udphdr.uh_sport);
+ dport = EXTRACT_SHORT(&udphdr.uh_dport);
+
+ if (dport == PORT_DNS || sport == PORT_DNS) {
+ /*
+ * Full UDP payload for DNS
+ */
+ trunc_len = remaining_caplen;
+ } else if ((sport == PORT_BOOTPS && dport == PORT_BOOTPC) ||
+ (sport == PORT_BOOTPC && dport == PORT_BOOTPS)) {
+ /*
+ * Full UDP payload for BOOTP and DHCP
+ */
+ trunc_len = remaining_caplen;
+ } else if (dport == PORT_ISAKMP && sport == PORT_ISAKMP) {
+ /*
+ * Return the ISAKMP header
+ */
+ trunc_len += get_isakmp_trunc_len(pkt, off + sizeof(struct udphdr),
+ remaining_caplen - sizeof(struct udphdr));
+ } else if (dport == PORT_ISAKMP_NATT && sport == PORT_ISAKMP_NATT) {
+ trunc_len += get_isakmp_natt_trunc_len(pkt, off + sizeof(struct udphdr),
+ remaining_caplen - sizeof(struct udphdr));
+ }
+ if (trunc_len >= remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ return trunc_len;
+}
+
+static uint16_t
+get_tcp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen)
+{
+ int err = 0;
+ uint16_t trunc_len = sizeof(struct tcphdr); /* By default no TCP payload */
+ if (trunc_len >= remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ struct tcphdr tcphdr;
+ err = bpf_copydata(pkt, off, sizeof(struct tcphdr), &tcphdr);
+ if (err != 0) {
+ return remaining_caplen;
+ }
+
+ u_short sport, dport;
+ sport = EXTRACT_SHORT(&tcphdr.th_sport);
+ dport = EXTRACT_SHORT(&tcphdr.th_dport);
+
+ if (dport == PORT_DNS || sport == PORT_DNS) {
+ /*
+ * Full TCP payload for DNS
+ */
+ trunc_len = remaining_caplen;
+ } else {
+ trunc_len = tcphdr.th_off << 2;
+ }
+ if (trunc_len >= remaining_caplen) {
+ return remaining_caplen;
+ }
+
+ return trunc_len;
+}
+
+static uint16_t
+get_proto_trunc_len(uint8_t proto, struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_caplen)
+{
+ uint16_t trunc_len;
+
+ switch (proto) {
+ case IPPROTO_ICMP: {
+ /*
+ * Full IMCP payload
+ */
+ trunc_len = remaining_caplen;
+ break;
+ }
+ case IPPROTO_ICMPV6: {
+ /*
+ * Full IMCPV6 payload
+ */
+ trunc_len = remaining_caplen;
+ break;
+ }
+ case IPPROTO_IGMP: {
+ /*
+ * Full IGMP payload
+ */
+ trunc_len = remaining_caplen;
+ break;
+ }
+ case IPPROTO_UDP: {
+ trunc_len = get_udp_trunc_len(pkt, off, remaining_caplen);
+ break;
+ }
+ case IPPROTO_TCP: {
+ trunc_len = get_tcp_trunc_len(pkt, off, remaining_caplen);
+ break;
+ }
+ case IPPROTO_ESP: {
+ trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen);