+#define FR_IP6_OFF(fr) \
+ (ntohs((fr)->fr_ip6f_opt.ip6f_offlg & IP6F_OFF_MASK))
+#define FR_IP6_PLEN(fr) (ntohs((fr)->fr_ip6->ip6_plen))
+struct mbuf *
+pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag,
+ struct pf_frent *frent, int mff)
+{
+ struct mbuf *m, *m2;
+ struct pf_frent *frea, *frep, *next;
+ struct ip6_hdr *ip6;
+ struct ip6_frag *ip6f;
+ int plen, off, fr_max;
+ uint32_t uoff, csum, csum_flags;
+
+ VERIFY(*frag == NULL || BUFFER_FRAGMENTS(*frag));
+ m = *m0;
+ frep = NULL;
+ ip6 = frent->fr_ip6;
+ ip6f = &frent->fr_ip6f_opt;
+ off = FR_IP6_OFF(frent);
+ uoff = frent->fr_ip6f_hlen;
+ plen = FR_IP6_PLEN(frent);
+ fr_max = off + plen - (frent->fr_ip6f_hlen - sizeof *ip6);
+
+ DPFPRINTF(("0x%llx IPv6 frag plen %u off %u fr_ip6f_hlen %u "
+ "fr_max %u m_len %u\n", (uint64_t)VM_KERNEL_ADDRPERM(m), plen, off,
+ frent->fr_ip6f_hlen, fr_max, m->m_len));
+
+ /*
+ * Leverage partial checksum offload for simple UDP/IP fragments,
+ * as that is the most common case.
+ *
+ * Perform 1's complement adjustment of octets that got included/
+ * excluded in the hardware-calculated checksum value. Also take
+ * care of any trailing bytes and subtract out their partial sum.
+ */
+ if (ip6f->ip6f_nxt == IPPROTO_UDP &&
+ uoff == (sizeof (*ip6) + sizeof (*ip6f)) &&
+ (m->m_pkthdr.csum_flags &
+ (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PARTIAL)) {
+ uint32_t start = m->m_pkthdr.csum_rx_start;
+ uint32_t ip_len = (sizeof (*ip6) + ntohs(ip6->ip6_plen));
+ int32_t trailer = (m_pktlen(m) - ip_len);
+ uint32_t swbytes = (uint32_t)trailer;
+
+ csum = m->m_pkthdr.csum_rx_val;
+
+ ASSERT(trailer >= 0);
+ if (start != uoff || trailer != 0) {
+ uint16_t s = 0, d = 0;
+
+ if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) {
+ s = ip6->ip6_src.s6_addr16[1];
+ ip6->ip6_src.s6_addr16[1] = 0 ;
+ }
+ if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
+ d = ip6->ip6_dst.s6_addr16[1];
+ ip6->ip6_dst.s6_addr16[1] = 0;
+ }
+
+ /* callee folds in sum */
+ csum = m_adj_sum16(m, start, uoff,
+ (ip_len - uoff), csum);
+ if (uoff > start)
+ swbytes += (uoff - start);
+ else
+ swbytes += (start - uoff);
+
+ if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src))
+ ip6->ip6_src.s6_addr16[1] = s;
+ if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst))
+ ip6->ip6_dst.s6_addr16[1] = d;
+
+ }
+ csum_flags = m->m_pkthdr.csum_flags;
+
+ if (swbytes != 0)
+ udp_in6_cksum_stats(swbytes);
+ if (trailer != 0)
+ m_adj(m, -trailer);
+ } else {
+ csum = 0;
+ csum_flags = 0;
+ }
+
+ /* Invalidate checksum */
+ m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
+
+ /* strip off headers up to the fragment payload */
+ m->m_data += frent->fr_ip6f_hlen;
+ m->m_len -= frent->fr_ip6f_hlen;
+
+ /* Create a new reassembly queue for this packet */
+ if (*frag == NULL) {
+ *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
+ if (*frag == NULL) {
+ pf_flush_fragments();
+ *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
+ if (*frag == NULL)
+ goto drop_fragment;
+ }
+
+ (*frag)->fr_flags = 0;
+ (*frag)->fr_max = 0;
+ (*frag)->fr_af = AF_INET6;
+ (*frag)->fr_srcx.v6addr = frent->fr_ip6->ip6_src;
+ (*frag)->fr_dstx.v6addr = frent->fr_ip6->ip6_dst;
+ (*frag)->fr_p = frent->fr_ip6f_opt.ip6f_nxt;
+ (*frag)->fr_id6 = frent->fr_ip6f_opt.ip6f_ident;
+ (*frag)->fr_timeout = pf_time_second();
+ if (csum_flags != 0) {
+ (*frag)->fr_csum_flags = csum_flags;
+ (*frag)->fr_csum = csum;
+ }
+ LIST_INIT(&(*frag)->fr_queue);
+
+ RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
+ TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
+
+ /* We do not have a previous fragment */
+ frep = NULL;
+ goto insert;
+ }
+
+ /*
+ * If this fragment contains similar checksum offload info
+ * as that of the existing ones, accumulate checksum. Otherwise,
+ * invalidate checksum offload info for the entire datagram.
+ */
+ if (csum_flags != 0 && csum_flags == (*frag)->fr_csum_flags)
+ (*frag)->fr_csum += csum;
+ else if ((*frag)->fr_csum_flags != 0)
+ (*frag)->fr_csum_flags = 0;
+
+ /*
+ * Find a fragment after the current one:
+ * - off contains the real shifted offset.
+ */
+ LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
+ if (FR_IP6_OFF(frea) > off)
+ break;
+ frep = frea;
+ }
+
+ VERIFY(frep != NULL || frea != NULL);
+
+ if (frep != NULL &&
+ FR_IP6_OFF(frep) + FR_IP6_PLEN(frep) - frep->fr_ip6f_hlen > off)
+ {
+ u_int16_t precut;
+
+ precut = FR_IP6_OFF(frep) + FR_IP6_PLEN(frep) -
+ frep->fr_ip6f_hlen - off;
+ if (precut >= plen)
+ goto drop_fragment;
+ m_adj(frent->fr_m, precut);
+ DPFPRINTF(("overlap -%d\n", precut));
+ /* Enforce 8 byte boundaries */
+ frent->fr_ip6f_opt.ip6f_offlg =
+ htons(ntohs(frent->fr_ip6f_opt.ip6f_offlg) +
+ (precut >> 3));
+ off = FR_IP6_OFF(frent);
+ plen -= precut;
+ ip6->ip6_plen = htons(plen);
+ }
+
+ for (; frea != NULL && plen + off > FR_IP6_OFF(frea); frea = next) {
+ u_int16_t aftercut;
+
+ aftercut = plen + off - FR_IP6_OFF(frea);
+ DPFPRINTF(("adjust overlap %d\n", aftercut));
+ if (aftercut < FR_IP6_PLEN(frea) - frea->fr_ip6f_hlen) {
+ frea->fr_ip6->ip6_plen = htons(FR_IP6_PLEN(frea) -
+ aftercut);
+ frea->fr_ip6f_opt.ip6f_offlg =
+ htons(ntohs(frea->fr_ip6f_opt.ip6f_offlg) +
+ (aftercut >> 3));
+ m_adj(frea->fr_m, aftercut);
+ break;
+ }
+
+ /* This fragment is completely overlapped, lose it */
+ next = LIST_NEXT(frea, fr_next);
+ m_freem(frea->fr_m);
+ LIST_REMOVE(frea, fr_next);
+ pool_put(&pf_frent_pl, frea);
+ pf_nfrents--;
+ }
+
+ insert:
+ /* Update maximum data size */
+ if ((*frag)->fr_max < fr_max)
+ (*frag)->fr_max = fr_max;
+ /* This is the last segment */
+ if (!mff)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ if (frep == NULL)
+ LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
+ else
+ LIST_INSERT_AFTER(frep, frent, fr_next);
+
+ /* Check if we are completely reassembled */
+ if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
+ return (NULL);
+
+ /* Check if we have all the data */
+ off = 0;
+ for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
+ next = LIST_NEXT(frep, fr_next);
+ off += FR_IP6_PLEN(frep) - (frent->fr_ip6f_hlen - sizeof *ip6);
+ DPFPRINTF(("frep at %d, next %d, max %d\n",
+ off, next == NULL ? -1 : FR_IP6_OFF(next),
+ (*frag)->fr_max));
+ if (off < (*frag)->fr_max &&
+ (next == NULL || FR_IP6_OFF(next) != off)) {
+ DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
+ off, next == NULL ? -1 : FR_IP6_OFF(next),
+ (*frag)->fr_max));
+ return (NULL);
+ }
+ }
+ DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
+ if (off < (*frag)->fr_max)
+ return (NULL);
+
+ /* We have all the data */
+ frent = LIST_FIRST(&(*frag)->fr_queue);
+ VERIFY(frent != NULL);
+ if (frent->fr_ip6f_hlen + off > IP_MAXPACKET) {
+ DPFPRINTF(("drop: too big: %d\n", off));
+ pf_free_fragment(*frag);
+ *frag = NULL;
+ return (NULL);
+ }
+
+ ip6 = frent->fr_ip6;
+ ip6->ip6_nxt = (*frag)->fr_p;
+ ip6->ip6_plen = htons(off);
+ ip6->ip6_src = (*frag)->fr_srcx.v6addr;
+ ip6->ip6_dst = (*frag)->fr_dstx.v6addr;
+
+ if ((*frag)->fr_csum_flags != 0) {
+ csum = (*frag)->fr_csum;
+
+ ADDCARRY(csum);
+
+ m->m_pkthdr.csum_rx_val = csum;
+ m->m_pkthdr.csum_rx_start = sizeof (struct ip6_hdr);
+ m->m_pkthdr.csum_flags = (*frag)->fr_csum_flags;
+ } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
+ (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
+ /* loopback checksums are always OK */
+ m->m_pkthdr.csum_data = 0xffff;
+ m->m_pkthdr.csum_flags &= ~CSUM_PARTIAL;
+ m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ }
+
+ /* Remove from fragment queue */
+ pf_remove_fragment(*frag);
+ *frag = NULL;
+
+ m = frent->fr_m;
+ m->m_len += sizeof(struct ip6_hdr);
+ m->m_data -= sizeof(struct ip6_hdr);
+ memmove(m->m_data, ip6, sizeof(struct ip6_hdr));
+
+ next = LIST_NEXT(frent, fr_next);
+ pool_put(&pf_frent_pl, frent);
+ pf_nfrents--;
+ for (frent = next; next != NULL; frent = next) {
+ m2 = frent->fr_m;
+
+ m_cat(m, m2);
+ next = LIST_NEXT(frent, fr_next);
+ pool_put(&pf_frent_pl, frent);
+ pf_nfrents--;
+ }
+
+ /* XXX this should be done elsewhere */
+ if (m->m_flags & M_PKTHDR) {
+ int pktlen = 0;
+ for (m2 = m; m2; m2 = m2->m_next)
+ pktlen += m2->m_len;
+ m->m_pkthdr.len = pktlen;
+ }
+
+ DPFPRINTF(("complete: 0x%llx ip6_plen %d m_pkthdr.len %d\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(m), ntohs(ip6->ip6_plen),
+ m->m_pkthdr.len));
+
+ return m;
+
+ drop_fragment:
+ /* Oops - fail safe - drop packet */
+ pool_put(&pf_frent_pl, frent);
+ --pf_nfrents;
+ m_freem(m);
+ return NULL;
+}
+
+static struct mbuf *
+pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh,
+ struct pf_fragment **frag, int hlen, int mff, int drop, int *nomem)
+{
+ struct mbuf *m = *m0;
+ u_int16_t plen, off, fr_max;
+ struct pf_frcache *frp, *fra, *cur = NULL;
+ int hosed = 0;
+
+ VERIFY(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
+ m = *m0;
+ off = ntohs(fh->ip6f_offlg & IP6F_OFF_MASK);
+ plen = ntohs(h->ip6_plen) - (hlen - sizeof *h);
+
+ /*
+ * Apple Modification: dimambro@apple.com. The hlen, being passed
+ * into this function Includes all the headers associated with
+ * the packet, and may include routing headers, so to get to
+ * the data payload as stored in the original IPv6 header we need
+ * to subtract al those headers and the IP header.
+ *
+ * The 'max' local variable should also contain the offset from the start
+ * of the reassembled packet to the octet just past the end of the octets
+ * in the current fragment where:
+ * - 'off' is the offset from the start of the reassembled packet to the
+ * first octet in the fragment,
+ * - 'plen' is the length of the "payload data length" Excluding all the
+ * IPv6 headers of the fragment.
+ * - 'hlen' is computed in pf_normalize_ip6() as the offset from the start
+ * of the IPv6 packet to the beginning of the data.
+ */
+ fr_max = off + plen;
+
+ DPFPRINTF(("0x%llx plen %u off %u fr_max %u\n",
+ (uint64_t)VM_KERNEL_ADDRPERM(m), plen, off, fr_max));
+
+ /* Create a new range queue for this packet */
+ if (*frag == NULL) {
+ *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
+ if (*frag == NULL) {
+ pf_flush_fragments();
+ *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
+ if (*frag == NULL)
+ goto no_mem;
+ }
+
+ /* Get an entry for the queue */
+ cur = pool_get(&pf_cent_pl, PR_NOWAIT);
+ if (cur == NULL) {
+ pool_put(&pf_cache_pl, *frag);
+ *frag = NULL;
+ goto no_mem;
+ }
+ pf_ncache++;
+
+ (*frag)->fr_flags = PFFRAG_NOBUFFER;
+ (*frag)->fr_max = 0;
+ (*frag)->fr_af = AF_INET6;
+ (*frag)->fr_srcx.v6addr = h->ip6_src;
+ (*frag)->fr_dstx.v6addr = h->ip6_dst;
+ (*frag)->fr_p = fh->ip6f_nxt;
+ (*frag)->fr_id6 = fh->ip6f_ident;
+ (*frag)->fr_timeout = pf_time_second();
+
+ cur->fr_off = off;
+ cur->fr_end = fr_max;
+ LIST_INIT(&(*frag)->fr_cache);
+ LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
+
+ RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
+ TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
+
+ DPFPRINTF(("frag6cache[%d]: new %d-%d\n", ntohl(fh->ip6f_ident),
+ off, fr_max));
+
+ goto pass;
+ }
+
+ /*
+ * Find a fragment after the current one:
+ * - off contains the real shifted offset.
+ */
+ frp = NULL;
+ LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
+ if (fra->fr_off > off)
+ break;
+ frp = fra;
+ }
+
+ VERIFY(frp != NULL || fra != NULL);
+
+ if (frp != NULL) {
+ int precut;
+
+ precut = frp->fr_end - off;
+ if (precut >= plen) {
+ /* Fragment is entirely a duplicate */
+ DPFPRINTF(("frag6cache[%u]: dead (%d-%d) %d-%d\n",
+ ntohl(fh->ip6f_ident), frp->fr_off, frp->fr_end,
+ off, fr_max));
+ goto drop_fragment;
+ }
+ if (precut == 0) {
+ /* They are adjacent. Fixup cache entry */
+ DPFPRINTF(("frag6cache[%u]: adjacent (%d-%d) %d-%d\n",
+ ntohl(fh->ip6f_ident), frp->fr_off, frp->fr_end,
+ off, fr_max));
+ frp->fr_end = fr_max;
+ } else if (precut > 0) {
+ /* The first part of this payload overlaps with a
+ * fragment that has already been passed.
+ * Need to trim off the first part of the payload.
+ * But to do so easily, we need to create another
+ * mbuf to throw the original header into.
+ */
+
+ DPFPRINTF(("frag6cache[%u]: chop %d (%d-%d) %d-%d\n",
+ ntohl(fh->ip6f_ident), precut, frp->fr_off,
+ frp->fr_end, off, fr_max));
+
+ off += precut;
+ fr_max -= precut;
+ /* Update the previous frag to encompass this one */
+ frp->fr_end = fr_max;
+
+ if (!drop) {
+ /* XXX Optimization opportunity
+ * This is a very heavy way to trim the payload.
+ * we could do it much faster by diddling mbuf
+ * internals but that would be even less legible
+ * than this mbuf magic. For my next trick,
+ * I'll pull a rabbit out of my laptop.
+ */
+ *m0 = m_copym(m, 0, hlen, M_NOWAIT);
+ if (*m0 == NULL)
+ goto no_mem;
+ VERIFY((*m0)->m_next == NULL);
+ m_adj(m, precut + hlen);
+ m_cat(*m0, m);
+ m = *m0;
+ if (m->m_flags & M_PKTHDR) {
+ int pktlen = 0;
+ struct mbuf *t;
+ for (t = m; t; t = t->m_next)
+ pktlen += t->m_len;
+ m->m_pkthdr.len = pktlen;
+ }
+
+ h = mtod(m, struct ip6_hdr *);
+
+ VERIFY((int)m->m_len ==
+ ntohs(h->ip6_plen) - precut);
+ fh->ip6f_offlg &= ~IP6F_OFF_MASK;
+ fh->ip6f_offlg |=
+ htons(ntohs(fh->ip6f_offlg & IP6F_OFF_MASK)
+ + (precut >> 3));
+ h->ip6_plen = htons(ntohs(h->ip6_plen) -
+ precut);
+ } else {
+ hosed++;
+ }
+ } else {
+ /* There is a gap between fragments */
+
+ DPFPRINTF(("frag6cache[%u]: gap %d (%d-%d) %d-%d\n",
+ ntohl(fh->ip6f_ident), -precut, frp->fr_off,
+ frp->fr_end, off, fr_max));
+
+ cur = pool_get(&pf_cent_pl, PR_NOWAIT);
+ if (cur == NULL)
+ goto no_mem;
+ pf_ncache++;
+
+ cur->fr_off = off;
+ cur->fr_end = fr_max;
+ LIST_INSERT_AFTER(frp, cur, fr_next);
+ }
+ }
+
+ if (fra != NULL) {
+ int aftercut;
+ int merge = 0;
+
+ aftercut = fr_max - fra->fr_off;
+ if (aftercut == 0) {
+ /* Adjacent fragments */
+ DPFPRINTF(("frag6cache[%u]: adjacent %d-%d (%d-%d)\n",
+ ntohl(fh->ip6f_ident), off, fr_max, fra->fr_off,
+ fra->fr_end));
+ fra->fr_off = off;
+ merge = 1;
+ } else if (aftercut > 0) {
+ /* Need to chop off the tail of this fragment */
+ DPFPRINTF(("frag6cache[%u]: chop %d %d-%d (%d-%d)\n",
+ ntohl(fh->ip6f_ident), aftercut, off, fr_max,
+ fra->fr_off, fra->fr_end));
+ fra->fr_off = off;
+ fr_max -= aftercut;
+
+ merge = 1;
+
+ if (!drop) {
+ m_adj(m, -aftercut);
+ if (m->m_flags & M_PKTHDR) {
+ int pktlen = 0;
+ struct mbuf *t;
+ for (t = m; t; t = t->m_next)
+ pktlen += t->m_len;
+ m->m_pkthdr.len = pktlen;
+ }
+ h = mtod(m, struct ip6_hdr *);
+ VERIFY((int)m->m_len ==
+ ntohs(h->ip6_plen) - aftercut);
+ h->ip6_plen =
+ htons(ntohs(h->ip6_plen) - aftercut);
+ } else {
+ hosed++;
+ }
+ } else if (frp == NULL) {
+ /* There is a gap between fragments */
+ DPFPRINTF(("frag6cache[%u]: gap %d %d-%d (%d-%d)\n",
+ ntohl(fh->ip6f_ident), -aftercut, off, fr_max,
+ fra->fr_off, fra->fr_end));
+
+ cur = pool_get(&pf_cent_pl, PR_NOWAIT);
+ if (cur == NULL)
+ goto no_mem;
+ pf_ncache++;
+
+ cur->fr_off = off;
+ cur->fr_end = fr_max;
+ LIST_INSERT_BEFORE(fra, cur, fr_next);
+ }
+
+ /* Need to glue together two separate fragment descriptors */
+ if (merge) {
+ if (cur && fra->fr_off <= cur->fr_end) {
+ /* Need to merge in a previous 'cur' */
+ DPFPRINTF(("frag6cache[%u]: adjacent(merge "
+ "%d-%d) %d-%d (%d-%d)\n",
+ ntohl(fh->ip6f_ident), cur->fr_off,
+ cur->fr_end, off, fr_max, fra->fr_off,
+ fra->fr_end));
+ fra->fr_off = cur->fr_off;
+ LIST_REMOVE(cur, fr_next);
+ pool_put(&pf_cent_pl, cur);
+ pf_ncache--;
+ cur = NULL;
+ } else if (frp && fra->fr_off <= frp->fr_end) {
+ /* Need to merge in a modified 'frp' */
+ VERIFY(cur == NULL);
+ DPFPRINTF(("frag6cache[%u]: adjacent(merge "
+ "%d-%d) %d-%d (%d-%d)\n",
+ ntohl(fh->ip6f_ident), frp->fr_off,
+ frp->fr_end, off, fr_max, fra->fr_off,
+ fra->fr_end));
+ fra->fr_off = frp->fr_off;
+ LIST_REMOVE(frp, fr_next);
+ pool_put(&pf_cent_pl, frp);
+ pf_ncache--;
+ frp = NULL;
+ }
+ }
+ }
+
+ if (hosed) {
+ /*
+ * We must keep tracking the overall fragment even when
+ * we're going to drop it anyway so that we know when to
+ * free the overall descriptor. Thus we drop the frag late.
+ */
+ goto drop_fragment;
+ }
+
+ pass:
+ /* Update maximum data size */
+ if ((*frag)->fr_max < fr_max)
+ (*frag)->fr_max = fr_max;
+
+ /* This is the last segment */
+ if (!mff)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ /* Check if we are completely reassembled */
+ if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
+ LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
+ LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
+ /* Remove from fragment queue */
+ DPFPRINTF(("frag6cache[%u]: done 0-%d\n",
+ ntohl(fh->ip6f_ident), (*frag)->fr_max));
+ pf_free_fragment(*frag);
+ *frag = NULL;
+ }
+
+ return (m);
+
+ no_mem:
+ *nomem = 1;
+
+ /* Still need to pay attention to !IP_MF */
+ if (!mff && *frag != NULL)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ m_freem(m);
+ return (NULL);
+
+ drop_fragment:
+
+ /* Still need to pay attention to !IP_MF */
+ if (!mff && *frag != NULL)
+ (*frag)->fr_flags |= PFFRAG_SEENLAST;
+
+ if (drop) {
+ /* This fragment has been deemed bad. Don't reass */
+ if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
+ DPFPRINTF(("frag6cache[%u]: dropping overall fragment\n",
+ ntohl(fh->ip6f_ident)));
+ (*frag)->fr_flags |= PFFRAG_DROP;
+ }
+
+ m_freem(m);
+ return (NULL);
+}
+