+ m_tag_delete(m, p);
+}
+
+/* Find a tag, starting from a given position. */
+struct m_tag *
+m_tag_locate(struct mbuf *m, u_int32_t id, u_int16_t type, struct m_tag *t)
+{
+ struct m_tag *p;
+
+ VERIFY(m->m_flags & M_PKTHDR);
+
+ if (t == NULL) {
+ p = SLIST_FIRST(&m->m_pkthdr.tags);
+ } else {
+ VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN);
+ p = SLIST_NEXT(t, m_tag_link);
+ }
+ while (p != NULL) {
+ VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN);
+ if (p->m_tag_id == id && p->m_tag_type == type) {
+ return p;
+ }
+ p = SLIST_NEXT(p, m_tag_link);
+ }
+ return NULL;
+}
+
+/* Copy a single tag. */
+struct m_tag *
+m_tag_copy(struct m_tag *t, int how)
+{
+ struct m_tag *p;
+
+ VERIFY(t != NULL);
+
+ p = m_tag_alloc(t->m_tag_id, t->m_tag_type, t->m_tag_len, how);
+ if (p == NULL) {
+ return NULL;
+ }
+#if CONFIG_MACF_NET
+ /*
+ * XXXMAC: we should probably pass off the initialization, and
+ * copying here? can we hid that KERNEL_TAG_TYPE_MACLABEL is
+ * special from the mbuf code?
+ */
+ if (t != NULL &&
+ t->m_tag_id == KERNEL_MODULE_TAG_ID &&
+ t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) {
+ if (mac_mbuf_tag_init(p, how) != 0) {
+ m_tag_free(p);
+ return NULL;
+ }
+ mac_mbuf_tag_copy(t, p);
+ } else
+#endif
+ bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */
+ return p;
+}
+
+/*
+ * Copy two tag chains. The destination mbuf (to) loses any attached
+ * tags even if the operation fails. This should not be a problem, as
+ * m_tag_copy_chain() is typically called with a newly-allocated
+ * destination mbuf.
+ */
+int
+m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how)
+{
+ struct m_tag *p, *t, *tprev = NULL;
+
+ VERIFY((to->m_flags & M_PKTHDR) && (from->m_flags & M_PKTHDR));
+
+ m_tag_delete_chain(to, NULL);
+ SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) {
+ VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN);
+ t = m_tag_copy(p, how);
+ if (t == NULL) {
+ m_tag_delete_chain(to, NULL);
+ return 0;
+ }
+ if (tprev == NULL) {
+ SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link);
+ } else {
+ SLIST_INSERT_AFTER(tprev, t, m_tag_link);
+ tprev = t;
+ }
+ }
+ return 1;
+}
+
+/* Initialize dynamic and static tags on an mbuf. */
+void
+m_tag_init(struct mbuf *m, int all)
+{
+ VERIFY(m->m_flags & M_PKTHDR);
+
+ SLIST_INIT(&m->m_pkthdr.tags);
+ /*
+ * If the caller wants to preserve static mbuf tags
+ * (e.g. m_dup_pkthdr), don't zero them out.
+ */
+ if (all) {
+ bzero(&m->m_pkthdr.builtin_mtag._net_mtag,
+ sizeof(m->m_pkthdr.builtin_mtag._net_mtag));
+ }
+}
+
+/* Get first tag in chain. */
+struct m_tag *
+m_tag_first(struct mbuf *m)
+{
+ VERIFY(m->m_flags & M_PKTHDR);
+
+ return SLIST_FIRST(&m->m_pkthdr.tags);
+}
+
+/* Get next tag in chain. */
+struct m_tag *
+m_tag_next(struct mbuf *m, struct m_tag *t)
+{
+#pragma unused(m)
+ VERIFY(t != NULL);
+ VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN);
+
+ return SLIST_NEXT(t, m_tag_link);
+}
+
+int
+m_set_traffic_class(struct mbuf *m, mbuf_traffic_class_t tc)
+{
+ u_int32_t val = MBUF_TC2SCVAL(tc); /* just the val portion */
+
+ return m_set_service_class(m, m_service_class_from_val(val));
+}
+
+mbuf_traffic_class_t
+m_get_traffic_class(struct mbuf *m)
+{
+ return MBUF_SC2TC(m_get_service_class(m));
+}
+
+int
+m_set_service_class(struct mbuf *m, mbuf_svc_class_t sc)
+{
+ int error = 0;
+
+ VERIFY(m->m_flags & M_PKTHDR);
+
+ if (MBUF_VALID_SC(sc)) {
+ m->m_pkthdr.pkt_svc = sc;
+ } else {
+ error = EINVAL;
+ }
+
+ return error;
+}
+
+mbuf_svc_class_t
+m_get_service_class(struct mbuf *m)
+{
+ mbuf_svc_class_t sc;
+
+ VERIFY(m->m_flags & M_PKTHDR);
+
+ if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
+ sc = m->m_pkthdr.pkt_svc;
+ } else {
+ sc = MBUF_SC_BE;
+ }
+
+ return sc;
+}
+
+mbuf_svc_class_t
+m_service_class_from_idx(u_int32_t i)
+{
+ mbuf_svc_class_t sc = MBUF_SC_BE;
+
+ switch (i) {
+ case SCIDX_BK_SYS:
+ return MBUF_SC_BK_SYS;
+
+ case SCIDX_BK:
+ return MBUF_SC_BK;
+
+ case SCIDX_BE:
+ return MBUF_SC_BE;
+
+ case SCIDX_RD:
+ return MBUF_SC_RD;
+
+ case SCIDX_OAM:
+ return MBUF_SC_OAM;
+
+ case SCIDX_AV:
+ return MBUF_SC_AV;
+
+ case SCIDX_RV:
+ return MBUF_SC_RV;
+
+ case SCIDX_VI:
+ return MBUF_SC_VI;
+
+ case SCIDX_VO:
+ return MBUF_SC_VO;
+
+ case SCIDX_CTL:
+ return MBUF_SC_CTL;
+
+ default:
+ break;
+ }
+
+ VERIFY(0);
+ /* NOTREACHED */
+ return sc;
+}
+
+mbuf_svc_class_t
+m_service_class_from_val(u_int32_t v)
+{
+ mbuf_svc_class_t sc = MBUF_SC_BE;
+
+ switch (v) {
+ case SCVAL_BK_SYS:
+ return MBUF_SC_BK_SYS;
+
+ case SCVAL_BK:
+ return MBUF_SC_BK;
+
+ case SCVAL_BE:
+ return MBUF_SC_BE;
+
+ case SCVAL_RD:
+ return MBUF_SC_RD;
+
+ case SCVAL_OAM:
+ return MBUF_SC_OAM;
+
+ case SCVAL_AV:
+ return MBUF_SC_AV;
+
+ case SCVAL_RV:
+ return MBUF_SC_RV;
+
+ case SCVAL_VI:
+ return MBUF_SC_VI;
+
+ case SCVAL_VO:
+ return MBUF_SC_VO;
+
+ case SCVAL_CTL:
+ return MBUF_SC_CTL;
+
+ default:
+ break;
+ }
+
+ VERIFY(0);
+ /* NOTREACHED */
+ return sc;
+}
+
+uint16_t
+m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff,
+ uint32_t datalen, uint32_t sum)
+{
+ uint32_t total_sub = 0; /* total to subtract */
+ uint32_t mlen = m_pktlen(m); /* frame length */
+ uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */
+ int len;
+
+ ASSERT(bytes <= mlen);
+
+ /*
+ * Take care of excluding (len > 0) or including (len < 0)
+ * extraneous octets at the beginning of the packet, taking
+ * into account the start offset.
+ */
+ len = (dataoff - start);
+ if (len > 0) {
+ total_sub = m_sum16(m, start, len);
+ } else if (len < 0) {
+ sum += m_sum16(m, dataoff, -len);
+ }
+
+ /*
+ * Take care of excluding any postpended extraneous octets.
+ */
+ len = (mlen - bytes);
+ if (len > 0) {
+ struct mbuf *m0 = m;
+ uint32_t extra = m_sum16(m, bytes, len);
+ uint32_t off = bytes, off0 = off;
+
+ while (off > 0) {
+ if (__improbable(m == NULL)) {
+ panic("%s: invalid mbuf chain %p [off %u, "
+ "len %u]", __func__, m0, off0, len);
+ /* NOTREACHED */
+ }
+ if (off < m->m_len) {
+ break;
+ }
+ off -= m->m_len;
+ m = m->m_next;
+ }
+
+ /* if we started on odd-alignment, swap the value */
+ if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) {
+ total_sub += ((extra << 8) & 0xffff) | (extra >> 8);
+ } else {
+ total_sub += extra;
+ }
+
+ total_sub = (total_sub >> 16) + (total_sub & 0xffff);
+ }
+
+ /*
+ * 1's complement subtract any extraneous octets.
+ */
+ if (total_sub != 0) {
+ if (total_sub >= sum) {
+ sum = ~(total_sub - sum) & 0xffff;
+ } else {
+ sum -= total_sub;
+ }
+ }
+
+ /* fold 32-bit to 16-bit */
+ sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
+ sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
+ sum = (sum >> 16) + (sum & 0xffff); /* final carry */
+
+ return sum & 0xffff;
+}
+
+uint16_t
+m_sum16(struct mbuf *m, uint32_t off, uint32_t len)
+{
+ int mlen;
+
+ /*
+ * Sanity check
+ *
+ * Use m_length2() instead of m_length(), as we cannot rely on
+ * the caller setting m_pkthdr.len correctly, if the mbuf is
+ * a M_PKTHDR one.
+ */
+ if ((mlen = m_length2(m, NULL)) < (off + len)) {
+ panic("%s: mbuf %p len (%d) < off+len (%d+%d)\n", __func__,
+ m, mlen, off, len);
+ /* NOTREACHED */
+ }
+
+ return os_cpu_in_cksum_mbuf(m, len, off, 0);