/*
- * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
struct mbuf *
m_pulldown(struct mbuf *m, int off, int len, int *offp)
{
- struct mbuf *n, *o;
- int hlen, tlen, olen;
- int sharedcluster;
+ struct mbuf *n = NULL, *o = NULL;
+ int hlen = 0, tlen = 0, olen = 0;
+ int sharedcluster = 0;
#if defined(PULLDOWN_STAT) && INET6
static struct mbuf *prev = NULL;
int prevlen = 0, prevmlen = 0;
}
#endif
n = m;
+
+ /*
+ * Iterate and make n point to the mbuf
+ * within which the first byte at length
+ * offset is contained from the start of
+ * mbuf chain.
+ */
while (n != NULL && off > 0) {
if (n->m_len > off)
break;
off -= n->m_len;
n = n->m_next;
}
+
/* be sure to point non-empty mbuf */
while (n != NULL && n->m_len == 0)
n = n->m_next;
+
if (!n) {
m_freem(m);
return NULL; /* mbuf chain too short */
/*
* the target data is on <n, off>.
* if we got enough data on the mbuf "n", we're done.
+ *
+ * It should be noted, that we should only do this either
+ * when offset is 0, i.e. data is pointing to the start
+ * or when the caller specifies an out argument to get
+ * the offset value in the mbuf to work with data pointer
+ * correctly.
+ *
+ * If offset is not 0 and caller did not provide out-argument
+ * to get offset, we should split the mbuf even when the length
+ * is contained in current mbuf.
*/
if ((off == 0 || offp) && len <= n->m_len - off)
goto ok;
#endif
/*
- * when len < n->m_len - off and off != 0, it is a special case.
+ * when len <= n->m_len - off and off != 0, it is a special case.
* len bytes from <n, off> sits in single mbuf, but the caller does
* not like the starting position (off).
* chop the current mbuf into two pieces, set off to 0.
*/
- if (len < n->m_len - off) {
+ if (len <= n->m_len - off) {
o = m_copym(n, off, n->m_len - off, M_DONTWAIT);
if (o == NULL) {
m_freem(m);
* we need to take hlen from <n, off> and tlen from <n->m_next, 0>,
* and construct contiguous mbuf with m_len == len.
* note that hlen + tlen == len, and tlen > 0.
+ *
+ * Read these variables as head length and tail length
*/
hlen = n->m_len - off;
tlen = len - hlen;
if ((n->m_flags & M_EXT) == 0)
sharedcluster = 0;
else {
- if (n->m_ext.ext_free)
+ if (m_get_ext_free(n) != NULL)
sharedcluster = 1;
else if (m_mclhasreference(n))
sharedcluster = 1;
else
sharedcluster = 0;
}
+
+ /*
+ * If we have enough space left in current mbuf to accomodate
+ * tail length, copy tail length worth of data starting with next mbuf
+ * and adjust the length of next one accordingly.
+ */
if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen
&& !sharedcluster) {
m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len);
m_adj(n->m_next, tlen);
goto ok;
}
- if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen
- && !sharedcluster) {
+
+ /*
+ * If have enough leading space in next mbuf to accomodate head length
+ * of current mbuf, and total resulting length of next mbuf is greater
+ * than or equal to requested len bytes, then just copy hlen from
+ * current to the next one and adjust sizes accordingly.
+ */
+ if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen &&
+ (n->m_next->m_len + hlen) >= len && !sharedcluster) {
n->m_next->m_data -= hlen;
n->m_next->m_len += hlen;
bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen);
* (e.g. m_dup_pkthdr), don't zero them out.
*/
if (all) {
- bzero(&m->m_pkthdr.pf_mtag, sizeof (m->m_pkthdr.pf_mtag));
- bzero(&m->m_pkthdr.proto_mtag, sizeof (m->m_pkthdr.proto_mtag));
- bzero(&m->m_pkthdr.necp_mtag, sizeof (m->m_pkthdr.necp_mtag));
+ bzero(&m->m_pkthdr.builtin_mtag._net_mtag,
+ sizeof (m->m_pkthdr.builtin_mtag._net_mtag));
}
}
}
uint16_t
-m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t ulpoff, uint32_t sum)
+m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff,
+ uint32_t datalen, uint32_t sum)
{
- int len = (ulpoff - start);
+ uint32_t total_sub = 0; /* total to subtract */
+ uint32_t mlen = m_pktlen(m); /* frame length */
+ uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */
+ int len;
+
+ ASSERT(bytes <= mlen);
+ /*
+ * Take care of excluding (len > 0) or including (len < 0)
+ * extraneous octets at the beginning of the packet, taking
+ * into account the start offset.
+ */
+ len = (dataoff - start);
+ if (len > 0)
+ total_sub = m_sum16(m, start, len);
+ else if (len < 0)
+ sum += m_sum16(m, dataoff, -len);
+
+ /*
+ * Take care of excluding any postpended extraneous octets.
+ */
+ len = (mlen - bytes);
if (len > 0) {
- uint32_t adj = m_sum16(m, start, len);
- if (adj >= sum)
- sum = ~(adj - sum) & 0xffff;
+ struct mbuf *m0 = m;
+ uint32_t extra = m_sum16(m, bytes, len);
+ uint32_t off = bytes, off0 = off;
+
+ while (off > 0) {
+ if (__improbable(m == NULL)) {
+ panic("%s: invalid mbuf chain %p [off %u, "
+ "len %u]", __func__, m0, off0, len);
+ /* NOTREACHED */
+ }
+ if (off < m->m_len)
+ break;
+ off -= m->m_len;
+ m = m->m_next;
+ }
+
+ /* if we started on odd-alignment, swap the value */
+ if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1)
+ total_sub += ((extra << 8) & 0xffff) | (extra >> 8);
+ else
+ total_sub += extra;
+
+ total_sub = (total_sub >> 16) + (total_sub & 0xffff);
+ }
+
+ /*
+ * 1's complement subtract any extraneous octets.
+ */
+ if (total_sub != 0) {
+ if (total_sub >= sum)
+ sum = ~(total_sub - sum) & 0xffff;
else
- sum -= adj;
- } else if (len < 0) {
- sum += m_sum16(m, ulpoff, -len);
+ sum -= total_sub;
}
- ADDCARRY(sum);
+ /* fold 32-bit to 16-bit */
+ sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */
+ sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
+ sum = (sum >> 16) + (sum & 0xffff); /* final carry */
- return (sum);
+ return (sum & 0xffff);
}
-extern int cpu_in_cksum(struct mbuf *m, int len, int off, uint32_t initial_sum);
-
uint16_t
m_sum16(struct mbuf *m, uint32_t off, uint32_t len)
{
* a M_PKTHDR one.
*/
if ((mlen = m_length2(m, NULL)) < (off + len)) {
- panic("%s: mbuf len (%d) < off+len (%d+%d)\n", __func__,
- mlen, off, len);
+ panic("%s: mbuf %p len (%d) < off+len (%d+%d)\n", __func__,
+ m, mlen, off, len);
+ /* NOTREACHED */
}
- return (~cpu_in_cksum(m, len, off, 0) & 0xffff);
+ return (os_cpu_in_cksum_mbuf(m, len, off, 0));
}