X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/91447636331957f3d9b5ca5b508f07c526b0074d..a39ff7e25e19b3a8c3020042a3872ca9ec9659f1:/bsd/kern/uipc_mbuf2.c diff --git a/bsd/kern/uipc_mbuf2.c b/bsd/kern/uipc_mbuf2.c index a8c8652b2..31edbc0ce 100644 --- a/bsd/kern/uipc_mbuf2.c +++ b/bsd/kern/uipc_mbuf2.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */ @@ -84,6 +90,12 @@ * * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ /*#define PULLDOWN_DEBUG*/ @@ -93,10 +105,16 @@ #include #include #include -#if defined(PULLDOWN_STAT) && defined(INET6) +#include #include +#include +#if INET6 #include #include +#endif /* INET6 */ + +#if CONFIG_MACF_NET +#include #endif /* @@ -110,15 +128,12 @@ * XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster) */ struct mbuf * -m_pulldown(m, off, len, offp) - struct mbuf *m; - int off, len; - int *offp; +m_pulldown(struct mbuf *m, int off, int len, int *offp) { - struct mbuf *n, *o; - int hlen, tlen, olen; - int sharedcluster; -#if defined(PULLDOWN_STAT) && defined(INET6) + struct mbuf *n = NULL, *o = NULL; + int hlen = 0, tlen = 0, olen = 0; + int sharedcluster = 0; +#if defined(PULLDOWN_STAT) && INET6 static struct mbuf *prev = NULL; int prevlen = 0, prevmlen = 0; #endif @@ -131,11 +146,11 @@ m_pulldown(m, off, len, offp) return NULL; /* impossible */ } -#if defined(PULLDOWN_STAT) && defined(INET6) +#if defined(PULLDOWN_STAT) && INET6 ip6stat.ip6s_pulldown++; #endif -#if defined(PULLDOWN_STAT) && defined(INET6) +#if defined(PULLDOWN_STAT) && INET6 /* statistics for m_pullup */ ip6stat.ip6s_pullup++; if (off + len > MHLEN) @@ -207,15 +222,24 @@ m_pulldown(m, off, len, offp) } #endif n = m; + + /* + * Iterate and make n point to the mbuf + * within which the first byte at length + * offset is contained from the start of + * mbuf chain. + */ while (n != NULL && off > 0) { if (n->m_len > off) break; off -= n->m_len; n = n->m_next; } + /* be sure to point non-empty mbuf */ while (n != NULL && n->m_len == 0) n = n->m_next; + if (!n) { m_freem(m); return NULL; /* mbuf chain too short */ @@ -224,21 +248,31 @@ m_pulldown(m, off, len, offp) /* * the target data is on . * if we got enough data on the mbuf "n", we're done. + * + * It should be noted, that we should only do this either + * when offset is 0, i.e. data is pointing to the start + * or when the caller specifies an out argument to get + * the offset value in the mbuf to work with data pointer + * correctly. + * + * If offset is not 0 and caller did not provide out-argument + * to get offset, we should split the mbuf even when the length + * is contained in current mbuf. */ if ((off == 0 || offp) && len <= n->m_len - off) goto ok; -#if defined(PULLDOWN_STAT) && defined(INET6) +#if defined(PULLDOWN_STAT) && INET6 ip6stat.ip6s_pulldown_copy++; #endif /* - * when len < n->m_len - off and off != 0, it is a special case. + * when len <= n->m_len - off and off != 0, it is a special case. * len bytes from sits in single mbuf, but the caller does * not like the starting position (off). * chop the current mbuf into two pieces, set off to 0. */ - if (len < n->m_len - off) { + if (len <= n->m_len - off) { o = m_copym(n, off, n->m_len - off, M_DONTWAIT); if (o == NULL) { m_freem(m); @@ -256,6 +290,8 @@ m_pulldown(m, off, len, offp) * we need to take hlen from and tlen from m_next, 0>, * and construct contiguous mbuf with m_len == len. * note that hlen + tlen == len, and tlen > 0. + * + * Read these variables as head length and tail length */ hlen = n->m_len - off; tlen = len - hlen; @@ -279,13 +315,19 @@ m_pulldown(m, off, len, offp) if ((n->m_flags & M_EXT) == 0) sharedcluster = 0; else { - if (n->m_ext.ext_free) + if (m_get_ext_free(n) != NULL) sharedcluster = 1; else if (m_mclhasreference(n)) sharedcluster = 1; else sharedcluster = 0; } + + /* + * If we have enough space left in current mbuf to accomodate + * tail length, copy tail length worth of data starting with next mbuf + * and adjust the length of next one accordingly. + */ if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen && !sharedcluster) { m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len); @@ -293,8 +335,15 @@ m_pulldown(m, off, len, offp) m_adj(n->m_next, tlen); goto ok; } - if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen - && !sharedcluster) { + + /* + * If have enough leading space in next mbuf to accomodate head length + * of current mbuf, and total resulting length of next mbuf is greater + * than or equal to requested len bytes, then just copy hlen from + * current to the next one and adjust sizes accordingly. + */ + if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen && + (n->m_next->m_len + hlen) >= len && !sharedcluster) { n->m_next->m_data -= hlen; n->m_next->m_len += hlen; bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen); @@ -308,7 +357,7 @@ m_pulldown(m, off, len, offp) * now, we need to do the hard way. don't m_copy as there's no room * on both end. */ -#if defined(PULLDOWN_STAT) && defined(INET6) +#if defined(PULLDOWN_STAT) && INET6 ip6stat.ip6s_pulldown_alloc++; #endif MGET(o, M_DONTWAIT, m->m_type); @@ -353,84 +402,66 @@ ok: } /* - * pkthdr.aux chain manipulation. - * we don't allow clusters at this moment. + * Create and return an m_tag, either by re-using space in a previous tag + * or by allocating a new mbuf/cluster */ -struct mbuf * -m_aux_add(m, af, type) - struct mbuf *m; - int af, type; +struct m_tag * +m_tag_create(u_int32_t id, u_int16_t type, int len, int wait, struct mbuf *buf) { - struct mbuf *n; - struct mauxtag *t; - - if ((m->m_flags & M_PKTHDR) == 0) - return NULL; - - n = m_aux_find(m, af, type); - if (n) - return n; - - MGET(n, M_DONTWAIT, m->m_type); - if (n == NULL) - return NULL; - - t = mtod(n, struct mauxtag *); - t->af = af; - t->type = type; - n->m_data += sizeof(struct mauxtag); - n->m_len = 0; - n->m_next = m->m_pkthdr.aux; - m->m_pkthdr.aux = n; - return n; -} + struct m_tag *t = NULL; + struct m_tag *p; -struct mbuf * -m_aux_find(m, af, type) - struct mbuf *m; - int af, type; -{ - struct mbuf *n; - struct mauxtag *t; + if (len < 0) + return (NULL); - if ((m->m_flags & M_PKTHDR) == 0) - return NULL; + if (len + sizeof (struct m_tag) + sizeof (struct m_taghdr) > MLEN) + return (m_tag_alloc(id, type, len, wait)); - for (n = m->m_pkthdr.aux; n; n = n->m_next) { - t = (struct mauxtag *)n->m_dat; - if (t->af == af && t->type == type) - return n; - } - return NULL; -} + /* + * We've exhausted all external cases. Now, go through the m_tag + * chain and see if we can fit it in any of them. + * If not (t == NULL), call m_tag_alloc to store it in a new mbuf. + */ + p = SLIST_FIRST(&buf->m_pkthdr.tags); + while(p != NULL) { + /* 2KCL m_tag */ + if (M_TAG_ALIGN(p->m_tag_len) + + sizeof (struct m_taghdr) > MLEN) { + p = SLIST_NEXT(p, m_tag_link); + continue; + } -void -m_aux_delete(m, victim) - struct mbuf *m; - struct mbuf *victim; -{ - struct mbuf *n, *prev, *next; - struct mauxtag *t; + VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN); - if ((m->m_flags & M_PKTHDR) == 0) - return; + struct mbuf *m = m_dtom(p); + struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data; - prev = NULL; - n = m->m_pkthdr.aux; - while (n) { - t = (struct mauxtag *)n->m_dat; - next = n->m_next; - if (n == victim) { - if (prev) - prev->m_next = n->m_next; - else - m->m_pkthdr.aux = n->m_next; - n->m_next = NULL; - m_free(n); - } else - prev = n; - n = next; + VERIFY(IS_P2ALIGNED(hdr + 1, sizeof (u_int64_t))); + VERIFY(m->m_flags & M_TAGHDR && !(m->m_flags & M_EXT)); + + /* The mbuf can store this m_tag */ + if (M_TAG_ALIGN(len) <= MLEN - m->m_len) { + t = (struct m_tag *)(void *)(m->m_data + m->m_len); + VERIFY(IS_P2ALIGNED(t, sizeof (u_int64_t))); + hdr->refcnt++; + m->m_len += M_TAG_ALIGN(len); + VERIFY(m->m_len <= MLEN); + break; + } + + p = SLIST_NEXT(p, m_tag_link); } + + if (t == NULL) + return (m_tag_alloc(id, type, len, wait)); + + t->m_tag_cookie = M_TAG_VALID_PATTERN; + t->m_tag_type = type; + t->m_tag_len = len; + t->m_tag_id = id; + if (len > 0) + bzero(t + 1, len); + return (t); } /* Get a packet tag structure along with specified data following. */ @@ -440,27 +471,42 @@ m_tag_alloc(u_int32_t id, u_int16_t type, int len, int wait) struct m_tag *t; if (len < 0) - return NULL; -#ifndef __APPLE__ - t = malloc(len + sizeof(struct m_tag), M_PACKET_TAGS, wait); -#else - /*MALLOC(t, struct m_tag *, len + sizeof(struct m_tag), M_TEMP, M_WAITOK);*/ - if (len + sizeof(struct m_tag) <= MLEN) { + return (NULL); + + if (M_TAG_ALIGN(len) + sizeof (struct m_taghdr) <= MLEN) { struct mbuf *m = m_get(wait, MT_TAG); + struct m_taghdr *hdr; + if (m == NULL) - return NULL; - t = (struct m_tag *) m->m_dat; - } else if (len + sizeof(struct m_tag) <= MCLBYTES) { - MCLALLOC((caddr_t)t, wait); - } else + return (NULL); + + m->m_flags |= M_TAGHDR; + + hdr = (struct m_taghdr *)(void *)m->m_data; + VERIFY(IS_P2ALIGNED(hdr + 1, sizeof (u_int64_t))); + hdr->refcnt = 1; + m->m_len += sizeof (struct m_taghdr); + t = (struct m_tag *)(void *)(m->m_data + m->m_len); + VERIFY(IS_P2ALIGNED(t, sizeof (u_int64_t))); + m->m_len += M_TAG_ALIGN(len); + VERIFY(m->m_len <= MLEN); + } else if (len + sizeof (struct m_tag) <= MCLBYTES) { + t = (struct m_tag *)(void *)m_mclalloc(wait); + } else { t = NULL; -#endif + } + if (t == NULL) - return NULL; + return (NULL); + + VERIFY(IS_P2ALIGNED(t, sizeof (u_int64_t))); + t->m_tag_cookie = M_TAG_VALID_PATTERN; t->m_tag_type = type; t->m_tag_len = len; t->m_tag_id = id; - return t; + if (len > 0) + bzero(t + 1, len); + return (t); } @@ -468,26 +514,48 @@ m_tag_alloc(u_int32_t id, u_int16_t type, int len, int wait) void m_tag_free(struct m_tag *t) { -#ifndef __APPLE__ - free(t, M_PACKET_TAGS); -#else - /* FREE(t, M_TEMP); */ +#if CONFIG_MACF_NET + if (t != NULL && + t->m_tag_id == KERNEL_MODULE_TAG_ID && + t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) + mac_mbuf_tag_destroy(t); +#endif if (t == NULL) return; - if (t->m_tag_len <= MLEN) { + + VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN); + + if (M_TAG_ALIGN(t->m_tag_len) + sizeof (struct m_taghdr) <= MLEN) { struct mbuf * m = m_dtom(t); - m_free(m); + VERIFY(m->m_flags & M_TAGHDR); + struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data; + + VERIFY(IS_P2ALIGNED(hdr + 1, sizeof (u_int64_t))); + + /* No other tags in this mbuf */ + if(--hdr->refcnt == 0) { + m_free(m); + return; + } + + /* Pattern-fill the header */ + u_int64_t *fill_ptr = (u_int64_t *)t; + u_int64_t *end_ptr = (u_int64_t *)(t + 1); + while (fill_ptr < end_ptr) { + *fill_ptr = M_TAG_FREE_PATTERN; + fill_ptr++; + } } else { - MCLFREE((caddr_t)t); + m_mclfree((caddr_t)t); } -#endif } /* Prepend a packet tag. */ void m_tag_prepend(struct mbuf *m, struct m_tag *t) { - KASSERT(m && t, ("m_tag_prepend: null argument, m %p t %p", m, t)); + VERIFY(m != NULL && t != NULL); + SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); } @@ -495,7 +563,9 @@ m_tag_prepend(struct mbuf *m, struct m_tag *t) void m_tag_unlink(struct mbuf *m, struct m_tag *t) { - KASSERT(m && t, ("m_tag_unlink: null argument, m %p t %p", m, t)); + VERIFY(m->m_flags & M_PKTHDR); + VERIFY(t != NULL && t->m_tag_cookie == M_TAG_VALID_PATTERN); + SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); } @@ -503,7 +573,6 @@ m_tag_unlink(struct mbuf *m, struct m_tag *t) void m_tag_delete(struct mbuf *m, struct m_tag *t) { - KASSERT(m && t, ("m_tag_delete: null argument, m %p t %p", m, t)); m_tag_unlink(m, t); m_tag_free(t); } @@ -514,15 +583,21 @@ m_tag_delete_chain(struct mbuf *m, struct m_tag *t) { struct m_tag *p, *q; - KASSERT(m, ("m_tag_delete_chain: null mbuf")); - if (t != NULL) + VERIFY(m->m_flags & M_PKTHDR); + + if (t != NULL) { p = t; - else + } else { p = SLIST_FIRST(&m->m_pkthdr.tags); + } if (p == NULL) return; - while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) + + VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN); + while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) { + VERIFY(q->m_tag_cookie == M_TAG_VALID_PATTERN); m_tag_delete(m, q); + } m_tag_delete(m, p); } @@ -532,17 +607,21 @@ m_tag_locate(struct mbuf *m, u_int32_t id, u_int16_t type, struct m_tag *t) { struct m_tag *p; - KASSERT(m, ("m_tag_find: null mbuf")); - if (t == NULL) + VERIFY(m->m_flags & M_PKTHDR); + + if (t == NULL) { p = SLIST_FIRST(&m->m_pkthdr.tags); - else + } else { + VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN); p = SLIST_NEXT(t, m_tag_link); + } while (p != NULL) { + VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN); if (p->m_tag_id == id && p->m_tag_type == type) - return p; + return (p); p = SLIST_NEXT(p, m_tag_link); } - return NULL; + return (NULL); } /* Copy a single tag. */ @@ -551,12 +630,29 @@ m_tag_copy(struct m_tag *t, int how) { struct m_tag *p; - KASSERT(t, ("m_tag_copy: null tag")); - p = m_tag_alloc(t->m_tag_type, t->m_tag_id, t->m_tag_len, how); + VERIFY(t != NULL); + + p = m_tag_alloc(t->m_tag_id, t->m_tag_type, t->m_tag_len, how); if (p == NULL) return (NULL); +#if CONFIG_MACF_NET + /* + * XXXMAC: we should probably pass off the initialization, and + * copying here? can we hid that KERNEL_TAG_TYPE_MACLABEL is + * special from the mbuf code? + */ + if (t != NULL && + t->m_tag_id == KERNEL_MODULE_TAG_ID && + t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) { + if (mac_mbuf_tag_init(p, how) != 0) { + m_tag_free(p); + return (NULL); + } + mac_mbuf_tag_copy(t, p); + } else +#endif bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */ - return p; + return (p); } /* @@ -570,42 +666,284 @@ m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how) { struct m_tag *p, *t, *tprev = NULL; - KASSERT(to && from, - ("m_tag_copy: null argument, to %p from %p", to, from)); + VERIFY((to->m_flags & M_PKTHDR) && (from->m_flags & M_PKTHDR)); + m_tag_delete_chain(to, NULL); SLIST_FOREACH(p, &from->m_pkthdr.tags, m_tag_link) { + VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN); t = m_tag_copy(p, how); if (t == NULL) { m_tag_delete_chain(to, NULL); - return 0; + return (0); } - if (tprev == NULL) + if (tprev == NULL) { SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link); - else { + } else { SLIST_INSERT_AFTER(tprev, t, m_tag_link); tprev = t; } } - return 1; + return (1); } -/* Initialize tags on an mbuf. */ +/* Initialize dynamic and static tags on an mbuf. */ void -m_tag_init(struct mbuf *m) +m_tag_init(struct mbuf *m, int all) { + VERIFY(m->m_flags & M_PKTHDR); + SLIST_INIT(&m->m_pkthdr.tags); + /* + * If the caller wants to preserve static mbuf tags + * (e.g. m_dup_pkthdr), don't zero them out. + */ + if (all) { + bzero(&m->m_pkthdr.builtin_mtag._net_mtag, + sizeof (m->m_pkthdr.builtin_mtag._net_mtag)); + } } /* Get first tag in chain. */ struct m_tag * m_tag_first(struct mbuf *m) { - return SLIST_FIRST(&m->m_pkthdr.tags); + VERIFY(m->m_flags & M_PKTHDR); + + return (SLIST_FIRST(&m->m_pkthdr.tags)); } /* Get next tag in chain. */ struct m_tag * -m_tag_next(__unused struct mbuf *m, struct m_tag *t) +m_tag_next(struct mbuf *m, struct m_tag *t) { - return SLIST_NEXT(t, m_tag_link); +#pragma unused(m) + VERIFY(t != NULL); + VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN); + + return (SLIST_NEXT(t, m_tag_link)); +} + +int +m_set_traffic_class(struct mbuf *m, mbuf_traffic_class_t tc) +{ + u_int32_t val = MBUF_TC2SCVAL(tc); /* just the val portion */ + + return (m_set_service_class(m, m_service_class_from_val(val))); +} + +mbuf_traffic_class_t +m_get_traffic_class(struct mbuf *m) +{ + return (MBUF_SC2TC(m_get_service_class(m))); +} + +int +m_set_service_class(struct mbuf *m, mbuf_svc_class_t sc) +{ + int error = 0; + + VERIFY(m->m_flags & M_PKTHDR); + + if (MBUF_VALID_SC(sc)) + m->m_pkthdr.pkt_svc = sc; + else + error = EINVAL; + + return (error); +} + +mbuf_svc_class_t +m_get_service_class(struct mbuf *m) +{ + mbuf_svc_class_t sc; + + VERIFY(m->m_flags & M_PKTHDR); + + if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) + sc = m->m_pkthdr.pkt_svc; + else + sc = MBUF_SC_BE; + + return (sc); +} + +mbuf_svc_class_t +m_service_class_from_idx(u_int32_t i) +{ + mbuf_svc_class_t sc = MBUF_SC_BE; + + switch (i) { + case SCIDX_BK_SYS: + return (MBUF_SC_BK_SYS); + + case SCIDX_BK: + return (MBUF_SC_BK); + + case SCIDX_BE: + return (MBUF_SC_BE); + + case SCIDX_RD: + return (MBUF_SC_RD); + + case SCIDX_OAM: + return (MBUF_SC_OAM); + + case SCIDX_AV: + return (MBUF_SC_AV); + + case SCIDX_RV: + return (MBUF_SC_RV); + + case SCIDX_VI: + return (MBUF_SC_VI); + + case SCIDX_VO: + return (MBUF_SC_VO); + + case SCIDX_CTL: + return (MBUF_SC_CTL); + + default: + break; + } + + VERIFY(0); + /* NOTREACHED */ + return (sc); +} + +mbuf_svc_class_t +m_service_class_from_val(u_int32_t v) +{ + mbuf_svc_class_t sc = MBUF_SC_BE; + + switch (v) { + case SCVAL_BK_SYS: + return (MBUF_SC_BK_SYS); + + case SCVAL_BK: + return (MBUF_SC_BK); + + case SCVAL_BE: + return (MBUF_SC_BE); + + case SCVAL_RD: + return (MBUF_SC_RD); + + case SCVAL_OAM: + return (MBUF_SC_OAM); + + case SCVAL_AV: + return (MBUF_SC_AV); + + case SCVAL_RV: + return (MBUF_SC_RV); + + case SCVAL_VI: + return (MBUF_SC_VI); + + case SCVAL_VO: + return (MBUF_SC_VO); + + case SCVAL_CTL: + return (MBUF_SC_CTL); + + default: + break; + } + + VERIFY(0); + /* NOTREACHED */ + return (sc); +} + +uint16_t +m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff, + uint32_t datalen, uint32_t sum) +{ + uint32_t total_sub = 0; /* total to subtract */ + uint32_t mlen = m_pktlen(m); /* frame length */ + uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */ + int len; + + ASSERT(bytes <= mlen); + + /* + * Take care of excluding (len > 0) or including (len < 0) + * extraneous octets at the beginning of the packet, taking + * into account the start offset. + */ + len = (dataoff - start); + if (len > 0) + total_sub = m_sum16(m, start, len); + else if (len < 0) + sum += m_sum16(m, dataoff, -len); + + /* + * Take care of excluding any postpended extraneous octets. + */ + len = (mlen - bytes); + if (len > 0) { + struct mbuf *m0 = m; + uint32_t extra = m_sum16(m, bytes, len); + uint32_t off = bytes, off0 = off; + + while (off > 0) { + if (__improbable(m == NULL)) { + panic("%s: invalid mbuf chain %p [off %u, " + "len %u]", __func__, m0, off0, len); + /* NOTREACHED */ + } + if (off < m->m_len) + break; + off -= m->m_len; + m = m->m_next; + } + + /* if we started on odd-alignment, swap the value */ + if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) + total_sub += ((extra << 8) & 0xffff) | (extra >> 8); + else + total_sub += extra; + + total_sub = (total_sub >> 16) + (total_sub & 0xffff); + } + + /* + * 1's complement subtract any extraneous octets. + */ + if (total_sub != 0) { + if (total_sub >= sum) + sum = ~(total_sub - sum) & 0xffff; + else + sum -= total_sub; + } + + /* fold 32-bit to 16-bit */ + sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */ + sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ + sum = (sum >> 16) + (sum & 0xffff); /* final carry */ + + return (sum & 0xffff); +} + +uint16_t +m_sum16(struct mbuf *m, uint32_t off, uint32_t len) +{ + int mlen; + + /* + * Sanity check + * + * Use m_length2() instead of m_length(), as we cannot rely on + * the caller setting m_pkthdr.len correctly, if the mbuf is + * a M_PKTHDR one. + */ + if ((mlen = m_length2(m, NULL)) < (off + len)) { + panic("%s: mbuf %p len (%d) < off+len (%d+%d)\n", __func__, + m, mlen, off, len); + /* NOTREACHED */ + } + + return (os_cpu_in_cksum_mbuf(m, len, off, 0)); }