+/* m_mclget() add an mbuf cluster to a normal mbuf */
+struct mbuf *
+m_mclget(
+ struct mbuf *m,
+ int nowait)
+{
+ MCLALLOC(m->m_ext.ext_buf, nowait);
+ if (m->m_ext.ext_buf) {
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_size = MCLBYTES;
+ m->m_ext.ext_free = 0;
+ m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
+ &m->m_ext.ext_refs;
+ }
+
+ return m;
+}
+
+/* m_mclalloc() allocate an mbuf cluster */
+caddr_t
+m_mclalloc(
+ int nowait)
+{
+ caddr_t p;
+
+ (void)m_clalloc(1, nowait, MCLBYTES, 0);
+ if ((p = (caddr_t)mclfree)) {
+ ++mclrefcnt[mtocl(p)];
+ mbstat.m_clfree--;
+ mclfree = ((union mcluster *)p)->mcl_next;
+ } else {
+ mbstat.m_drops++;
+ }
+ MBUF_UNLOCK();
+
+ return p;
+}
+
+/* m_mclfree() releases a reference to a cluster allocated by MCLALLOC,
+ * freeing the cluster if the reference count has reached 0. */
+void
+m_mclfree(
+ caddr_t p)
+{
+ MBUF_LOCK();
+
+ m_range_check(p);
+
+ if (--mclrefcnt[mtocl(p)] == 0) {
+ ((union mcluster *)(p))->mcl_next = mclfree;
+ mclfree = (union mcluster *)(p);
+ mbstat.m_clfree++;
+ }
+ MBUF_UNLOCK();
+}
+
+/* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */
+int
+m_mclhasreference(
+ struct mbuf *m)
+{
+ return (m->m_ext.ext_refs.forward != &(m->m_ext.ext_refs));
+}
+
+__private_extern__ caddr_t
+m_bigalloc(int nowait)
+{
+ caddr_t p;
+
+ (void)m_clalloc(1, nowait, NBPG, 0);
+ if ((p = (caddr_t)mbigfree)) {
+ if (mclrefcnt[mtocl(p)] != mclrefcnt[mtocl(p) + 1])
+ panic("m_bigalloc mclrefcnt %x mismatch %d != %d",
+ p, mclrefcnt[mtocl(p)], mclrefcnt[mtocl(p) + 1]);
+ if (mclrefcnt[mtocl(p)] || mclrefcnt[mtocl(p) + 1])
+ panic("m_bigalloc mclrefcnt %x not null %d != %d",
+ p, mclrefcnt[mtocl(p)], mclrefcnt[mtocl(p) + 1]);
+ ++mclrefcnt[mtocl(p)];
+ ++mclrefcnt[mtocl(p) + 1];
+ mbstat.m_bigclfree--;
+ mbigfree = ((union mbigcluster *)p)->mbc_next;
+ } else {
+ mbstat.m_drops++;
+ }
+ MBUF_UNLOCK();
+ return p;
+}
+
+__private_extern__ void
+m_bigfree(caddr_t p, __unused u_int size, __unused caddr_t arg)
+{
+ m_range_check(p);
+
+ if (mclrefcnt[mtocl(p)] != mclrefcnt[mtocl(p) + 1])
+ panic("m_bigfree mclrefcnt %x mismatch %d != %d",
+ p, mclrefcnt[mtocl(p)], mclrefcnt[mtocl(p) + 1]);
+ --mclrefcnt[mtocl(p)];
+ --mclrefcnt[mtocl(p) + 1];
+ if (mclrefcnt[mtocl(p)] == 0) {
+ ((union mbigcluster *)(p))->mbc_next = mbigfree;
+ mbigfree = (union mbigcluster *)(p);
+ mbstat.m_bigclfree++;
+ }
+}
+
+/* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
+__private_extern__ struct mbuf *
+m_mbigget(struct mbuf *m, int nowait)
+{
+ m->m_ext.ext_buf = m_bigalloc(nowait);
+ if (m->m_ext.ext_buf) {
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_size = NBPG;
+ m->m_ext.ext_free = m_bigfree;
+ m->m_ext.ext_arg = 0;
+ m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
+ &m->m_ext.ext_refs;
+ }
+
+ return m;
+}
+
+
+/* */
+void
+m_copy_pkthdr(
+ struct mbuf *to,
+ struct mbuf *from)
+{
+ to->m_pkthdr = from->m_pkthdr;
+ from->m_pkthdr.aux = (struct mbuf *)NULL;
+ SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
+ to->m_flags = from->m_flags & M_COPYFLAGS;
+ to->m_data = (to)->m_pktdat;
+}
+
+/*
+ * "Move" mbuf pkthdr from "from" to "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ */
+#ifndef __APPLE__
+void
+m_move_pkthdr(struct mbuf *to, struct mbuf *from)
+{
+ KASSERT((to->m_flags & M_EXT) == 0, ("m_move_pkthdr: to has cluster"));
+
+ to->m_flags = from->m_flags & M_COPYFLAGS;
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr; /* especially tags */
+ SLIST_INIT(&from->m_pkthdr.tags); /* purge tags from src */
+ from->m_flags &= ~M_PKTHDR;
+}
+#endif
+
+/*
+ * Duplicate "from"'s mbuf pkthdr in "to".
+ * "from" must have M_PKTHDR set, and "to" must be empty.
+ * In particular, this does a deep copy of the packet tags.
+ */
+static int
+m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
+{
+ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
+ if ((to->m_flags & M_EXT) == 0)
+ to->m_data = to->m_pktdat;
+ to->m_pkthdr = from->m_pkthdr;
+ SLIST_INIT(&to->m_pkthdr.tags);
+ return (m_tag_copy_chain(to, from, how));
+}
+
+/*
+ * return a list of mbuf hdrs that point to clusters...
+ * try for num_needed, if wantall is not set, return whatever
+ * number were available... set up the first num_with_pkthdrs
+ * with mbuf hdrs configured as packet headers... these are
+ * chained on the m_nextpkt field... any packets requested beyond
+ * this are chained onto the last packet header's m_next field.
+ * The size of the cluster is controlled by the paramter bufsize.
+ */
+__private_extern__ struct mbuf *
+m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, int how, int wantall, size_t bufsize)
+{
+ struct mbuf *m;
+ struct mbuf **np, *top;
+ unsigned int num, needed = *num_needed;
+
+ if (bufsize != MCLBYTES && bufsize != NBPG)
+ return 0;
+
+ top = NULL;
+ np = ⊤
+
+ (void)m_clalloc(needed, how, bufsize, 0); /* takes the MBUF_LOCK, but doesn't release it... */
+
+ for (num = 0; num < needed; num++) {
+ m_range_check(mfree);
+ m_range_check(mclfree);
+ m_range_check(mbigfree);
+
+ if (mfree && ((bufsize == NBPG && mbigfree) || (bufsize == MCLBYTES && mclfree))) {
+ /* mbuf + cluster are available */
+ m = mfree;
+ MCHECK(m);
+ mfree = m->m_next;
+ ++mclrefcnt[mtocl(m)];
+ mbstat.m_mtypes[MT_FREE]--;
+ mbstat.m_mtypes[MT_DATA]++;
+ if (bufsize == NBPG) {
+ m->m_ext.ext_buf = (caddr_t)mbigfree; /* get the big cluster */
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf) + 1];
+ mbstat.m_bigclfree--;
+ mbigfree = ((union mbigcluster *)(m->m_ext.ext_buf))->mbc_next;
+ m->m_ext.ext_free = m_bigfree;
+ m->m_ext.ext_size = NBPG;
+ } else {
+ m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
+ mbstat.m_clfree--;
+ mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
+ m->m_ext.ext_free = 0;
+ m->m_ext.ext_size = MCLBYTES;
+ }
+ m->m_ext.ext_arg = 0;
+ m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs;
+ m->m_next = m->m_nextpkt = 0;
+ m->m_type = MT_DATA;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_len = 0;
+
+ if (num_with_pkthdrs == 0)
+ m->m_flags = M_EXT;
+ else {
+ m->m_flags = M_PKTHDR | M_EXT;
+ _M_CLEAR_PKTHDR(m);
+
+ num_with_pkthdrs--;
+ }
+ } else {
+ MBUF_UNLOCK();
+
+ if (num_with_pkthdrs == 0) {
+ MGET(m, how, MT_DATA );
+ } else {
+ MGETHDR(m, how, MT_DATA);
+
+ num_with_pkthdrs--;
+ }
+ if (m == 0)
+ goto fail;
+
+ if (bufsize == NBPG)
+ m = m_mbigget(m, how);
+ else
+ m = m_mclget(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ goto fail;
+ }
+ MBUF_LOCK();
+ }
+ *np = m;
+
+ if (num_with_pkthdrs)
+ np = &m->m_nextpkt;
+ else
+ np = &m->m_next;
+ }
+ MBUF_UNLOCK();
+
+ *num_needed = num;
+ return (top);
+fail:
+ if (wantall && top) {
+ m_freem(top);
+ return 0;
+ }
+ return top;
+}
+
+
+/*
+ * Return list of mbuf linked by m_nextpkt
+ * Try for num_needed, and if wantall is not set, return whatever
+ * number were available
+ * The size of each mbuf in the list is controlled by the parameter packetlen.
+ * Each mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf in
+ * the chain is called a segment.
+ * If maxsegments is not null and the value pointed to is not null, this specify
+ * the maximum number of segments for a chain of mbufs.
+ * If maxsegments is zero or the value pointed to is zero the
+ * caller does not have any restriction on the number of segments.
+ * The actual number of segments of a mbuf chain is return in the value pointed
+ * to by maxsegments.
+ * When possible the allocation is done under a single lock.
+ */
+
+__private_extern__ struct mbuf *
+m_allocpacket_internal(unsigned int *num_needed, size_t packetlen, unsigned int * maxsegments,
+ int how, int wantall, size_t wantsize)
+{
+ struct mbuf **np, *top;
+ size_t bufsize;
+ unsigned int num;
+ unsigned int numchunks = 0;
+
+ top = NULL;
+ np = ⊤
+
+ if (wantsize == 0) {
+ if (packetlen <= MINCLSIZE)
+ bufsize = packetlen;
+ else if (packetlen > MCLBYTES)
+ bufsize = NBPG;
+ else
+ bufsize = MCLBYTES;
+ } else if (wantsize == MCLBYTES || wantsize == NBPG)
+ bufsize = wantsize;
+ else
+ return 0;
+
+ if (bufsize <= MHLEN) {
+ numchunks = 1;
+ } else if (bufsize <= MINCLSIZE) {
+ if (maxsegments != NULL && *maxsegments == 1) {
+ bufsize = MCLBYTES;
+ numchunks = 1;
+ } else {
+ numchunks = 2;
+ }
+ } else if (bufsize == NBPG) {
+ numchunks = ((packetlen - 1) >> PGSHIFT) + 1;
+ } else {
+ numchunks = ((packetlen - 1) >> MCLSHIFT) + 1;
+ }
+ if (maxsegments != NULL) {
+ if (*maxsegments && numchunks > *maxsegments) {
+ *maxsegments = numchunks;
+ return 0;
+ }
+ *maxsegments = numchunks;
+ }
+ /* m_clalloc takes the MBUF_LOCK, but do not release it */
+ (void)m_clalloc(numchunks, how, (bufsize == NBPG) ? NBPG : MCLBYTES, 0);
+ for (num = 0; num < *num_needed; num++) {
+ struct mbuf **nm, *pkt = 0;
+ size_t len;
+
+ nm = &pkt;
+
+ m_range_check(mfree);
+ m_range_check(mclfree);
+ m_range_check(mbigfree);
+
+ for (len = 0; len < packetlen; ) {
+ struct mbuf *m = NULL;
+
+ if (wantsize == 0 && packetlen > MINCLSIZE) {
+ if (packetlen - len > MCLBYTES)
+ bufsize = NBPG;
+ else
+ bufsize = MCLBYTES;
+ }
+ len += bufsize;
+
+ if (mfree && ((bufsize == NBPG && mbigfree) || (bufsize == MCLBYTES && mclfree))) {
+ /* mbuf + cluster are available */
+ m = mfree;
+ MCHECK(m);
+ mfree = m->m_next;
+ ++mclrefcnt[mtocl(m)];
+ mbstat.m_mtypes[MT_FREE]--;
+ mbstat.m_mtypes[MT_DATA]++;
+ if (bufsize == NBPG) {
+ m->m_ext.ext_buf = (caddr_t)mbigfree; /* get the big cluster */
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf) + 1];
+ mbstat.m_bigclfree--;
+ mbigfree = ((union mbigcluster *)(m->m_ext.ext_buf))->mbc_next;
+ m->m_ext.ext_free = m_bigfree;
+ m->m_ext.ext_size = NBPG;
+ } else {
+ m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
+ mbstat.m_clfree--;
+ mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
+ m->m_ext.ext_free = 0;
+ m->m_ext.ext_size = MCLBYTES;
+ }
+ m->m_ext.ext_arg = 0;
+ m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs;
+ m->m_next = m->m_nextpkt = 0;
+ m->m_type = MT_DATA;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_len = 0;
+
+ if (pkt == 0) {
+ pkt = m;
+ m->m_flags = M_PKTHDR | M_EXT;
+ _M_CLEAR_PKTHDR(m);
+ } else {
+ m->m_flags = M_EXT;
+ }
+ } else {
+ MBUF_UNLOCK();
+
+ if (pkt == 0) {
+ MGETHDR(m, how, MT_DATA);
+ } else {
+ MGET(m, how, MT_DATA );
+ }
+ if (m == 0) {
+ m_freem(pkt);
+ goto fail;
+ }
+ if (bufsize <= MINCLSIZE) {
+ if (bufsize > MHLEN) {
+ MGET(m->m_next, how, MT_DATA);
+ if (m->m_next == 0) {
+ m_free(m);
+ m_freem(pkt);
+ goto fail;
+ }
+ }
+ } else {
+ if (bufsize == NBPG)
+ m = m_mbigget(m, how);
+ else
+ m = m_mclget(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ m_freem(pkt);
+ goto fail;
+ }
+ }
+ MBUF_LOCK();
+ }
+ *nm = m;
+ nm = &m->m_next;
+ }
+ *np = pkt;
+ np = &pkt->m_nextpkt;
+ }
+ MBUF_UNLOCK();
+ *num_needed = num;
+
+ return top;
+fail:
+ if (wantall && top) {
+ m_freem(top);
+ return 0;
+ }
+ *num_needed = num;
+
+ return top;
+}
+
+
+/* Best effort to get a mbuf cluster + pkthdr under one lock.
+ * If we don't have them avail, just bail out and use the regular
+ * path.
+ * Used by drivers to allocated packets on receive ring.
+ */
+__private_extern__ struct mbuf *
+m_getpacket_how(int how)
+{
+ unsigned int num_needed = 1;
+
+ return m_getpackets_internal(&num_needed, 1, how, 1, MCLBYTES);
+}
+