+#endif /* CONFIG_MBUF_NOEXPAND */
+
+ /*
+ * 1/2 of the map is reserved for 2K clusters. Out of this, 1/16th
+ * of the total number of 2K clusters allocated is reserved and cannot
+ * be turned into mbufs. It can only be used for pure cluster objects.
+ */
+ m_minlimit(MC_CL) = (nclusters >> 5);
+ m_maxlimit(MC_CL) = (nclusters >> 1);
+ m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES;
+ (void) snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl");
+
+ /*
+ * The remaining (15/16th) can be turned into mbufs.
+ */
+ m_minlimit(MC_MBUF) = 0;
+ m_maxlimit(MC_MBUF) = (m_maxlimit(MC_CL) - m_minlimit(MC_CL)) * NMBPCL;
+ m_maxsize(MC_MBUF) = m_size(MC_MBUF) = MSIZE;
+ (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
+
+ /*
+ * The other 1/2 of the map is reserved for 4K clusters.
+ */
+ m_minlimit(MC_BIGCL) = 0;
+ m_maxlimit(MC_BIGCL) = m_maxlimit(MC_CL) >> 1;
+ m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = NBPG;
+ (void) snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl");
+
+ /*
+ * Set limits for the composite classes.
+ */
+ m_minlimit(MC_MBUF_CL) = 0;
+ m_maxlimit(MC_MBUF_CL) = m_maxlimit(MC_CL) - m_minlimit(MC_CL);
+ m_maxsize(MC_MBUF_CL) = MCLBYTES;
+ m_size(MC_MBUF_CL) = m_size(MC_MBUF) + m_size(MC_CL);
+ (void) snprintf(m_cname(MC_MBUF_CL), MAX_MBUF_CNAME, "mbuf_cl");
+
+ m_minlimit(MC_MBUF_BIGCL) = 0;
+ m_maxlimit(MC_MBUF_BIGCL) = m_maxlimit(MC_BIGCL);
+ m_maxsize(MC_MBUF_BIGCL) = NBPG;
+ m_size(MC_MBUF_BIGCL) = m_size(MC_MBUF) + m_size(MC_BIGCL);
+ (void) snprintf(m_cname(MC_MBUF_BIGCL), MAX_MBUF_CNAME, "mbuf_bigcl");
+
+ /*
+ * And for jumbo classes.
+ */
+ m_minlimit(MC_16KCL) = 0;
+ m_maxlimit(MC_16KCL) = (njcl >> 3);
+ m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES;
+ (void) snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl");
+
+ m_minlimit(MC_MBUF_16KCL) = 0;
+ m_maxlimit(MC_MBUF_16KCL) = m_maxlimit(MC_16KCL);
+ m_maxsize(MC_MBUF_16KCL) = M16KCLBYTES;
+ m_size(MC_MBUF_16KCL) = m_size(MC_MBUF) + m_size(MC_16KCL);
+ (void) snprintf(m_cname(MC_MBUF_16KCL), MAX_MBUF_CNAME, "mbuf_16kcl");
+
+ /*
+ * Initialize the legacy mbstat structure.
+ */
+ bzero(&mbstat, sizeof (mbstat));
+ mbstat.m_msize = m_maxsize(MC_MBUF);
+ mbstat.m_mclbytes = m_maxsize(MC_CL);
+ mbstat.m_minclsize = MINCLSIZE;
+ mbstat.m_mlen = MLEN;
+ mbstat.m_mhlen = MHLEN;
+ mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL);
+}
+
+__private_extern__ void
+mbinit(void)
+{
+ unsigned int m;
+ int initmcl = MINCL;
+ int mcl_pages;
+ void *buf;
+
+ if (nmbclusters == 0)
+ nmbclusters = NMBCLUSTERS;
+
+ /* Setup the mbuf table */
+ mbuf_table_init();
+
+ /* Global lock for common layer */
+ mbuf_mlock_grp_attr = lck_grp_attr_alloc_init();
+ mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr);
+ mbuf_mlock_attr = lck_attr_alloc_init();
+ mbuf_mlock = lck_mtx_alloc_init(mbuf_mlock_grp, mbuf_mlock_attr);
+
+ /* Allocate cluster slabs table */
+ maxslabgrp = P2ROUNDUP(nmbclusters, NSLABSPMB) / NSLABSPMB;
+ MALLOC(slabstbl, mcl_slabg_t **, maxslabgrp * sizeof (mcl_slabg_t *),
+ M_TEMP, M_WAITOK | M_ZERO);
+ VERIFY(slabstbl != NULL);
+
+ /* Allocate audit structures if needed */
+ PE_parse_boot_arg("mbuf_debug", &mbuf_debug);
+ mbuf_debug |= mcache_getflags();
+ if (mbuf_debug & MCF_AUDIT) {
+ MALLOC(mclaudit, mcl_audit_t *,
+ nmbclusters * sizeof (*mclaudit), M_TEMP,
+ M_WAITOK | M_ZERO);
+ VERIFY(mclaudit != NULL);
+
+ mcl_audit_con_cache = mcache_create("mcl_audit_contents",
+ AUDIT_CONTENTS_SIZE, 0, 0, MCR_SLEEP);
+ VERIFY(mcl_audit_con_cache != NULL);
+ }
+
+ /* Calculate the number of pages assigned to the cluster pool */
+ mcl_pages = nmbclusters/(NBPG/CLBYTES);
+ MALLOC(mcl_paddr, int *, mcl_pages * sizeof (int), M_TEMP, M_WAITOK);
+ VERIFY(mcl_paddr != NULL);
+
+ /* Register with the I/O Bus mapper */
+ mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
+ bzero((char *)mcl_paddr, mcl_pages * sizeof (int));
+
+ embutl = (union mcluster *)
+ ((unsigned char *)mbutl + (nmbclusters * MCLBYTES));
+
+ PE_parse_boot_arg("initmcl", &initmcl);
+
+ lck_mtx_lock(mbuf_mlock);
+
+ if (m_clalloc(MAX(NBPG/CLBYTES, 1) * initmcl, M_WAIT, MCLBYTES) == 0)
+ panic("mbinit: m_clalloc failed\n");
+
+ lck_mtx_unlock(mbuf_mlock);
+
+ (void) kernel_thread(kernel_task, mbuf_worker_thread_init);
+
+ ref_cache = mcache_create("mext_ref", sizeof (struct ext_ref),
+ 0, 0, MCR_SLEEP);
+
+ /* Create the cache for each class */
+ for (m = 0; m < NELEM(mbuf_table); m++) {
+ void *allocfunc, *freefunc, *auditfunc;
+ u_int32_t flags;
+
+ flags = mbuf_debug;
+ if (m_class(m) == MC_MBUF_CL || m_class(m) == MC_MBUF_BIGCL ||
+ m_class(m) == MC_MBUF_16KCL) {
+ allocfunc = mbuf_cslab_alloc;
+ freefunc = mbuf_cslab_free;
+ auditfunc = mbuf_cslab_audit;
+ } else {
+ allocfunc = mbuf_slab_alloc;
+ freefunc = mbuf_slab_free;
+ auditfunc = mbuf_slab_audit;
+ }
+
+ /*
+ * Disable per-CPU caches for jumbo classes if there
+ * is no jumbo cluster pool available in the system.
+ * The cache itself is still created (but will never
+ * be populated) since it simplifies the code.
+ */
+ if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) &&
+ njcl == 0)
+ flags |= MCF_NOCPUCACHE;
+
+ m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m),
+ allocfunc, freefunc, auditfunc, mbuf_slab_notify,
+ (void *)m, flags, MCR_SLEEP);
+ }
+
+ /*
+ * Allocate structure for per-CPU statistics that's aligned
+ * on the CPU cache boundary; this code assumes that we never
+ * uninitialize this framework, since the original address
+ * before alignment is not saved.
+ */
+ ncpu = ml_get_max_cpus();
+ MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_SIZE,
+ M_TEMP, M_WAITOK);
+ VERIFY(buf != NULL);
+
+ mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf, CPU_CACHE_SIZE);
+ bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu));
+
+ printf("mbinit: done\n");
+}
+
+/*
+ * Obtain a slab of object(s) from the class's freelist.
+ */
+static mcache_obj_t *
+slab_alloc(mbuf_class_t class, int wait)
+{
+ mcl_slab_t *sp;
+ mcache_obj_t *buf;
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ VERIFY(class != MC_16KCL || njcl > 0);
+
+ /* This should always be NULL for us */
+ VERIFY(m_cobjlist(class) == NULL);
+
+ /*
+ * Treat composite objects as having longer lifespan by using
+ * a slab from the reverse direction, in hoping that this could
+ * reduce the probability of fragmentation for slabs that hold
+ * more than one buffer chunks (e.g. mbuf slabs). For other
+ * slabs, this probably doesn't make much of a difference.
+ */
+ if (class == MC_MBUF && (wait & MCR_COMP))
+ sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead);
+ else
+ sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class));
+
+ if (sp == NULL) {
+ VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0);
+ /* The slab list for this class is empty */
+ return (NULL);
+ }
+
+ VERIFY(m_infree(class) > 0);
+ VERIFY(!slab_is_detached(sp));
+ VERIFY(sp->sl_class == class &&
+ (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
+ buf = sp->sl_head;
+ VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf));
+
+ if (class == MC_MBUF) {
+ sp->sl_head = buf->obj_next;
+ VERIFY(sp->sl_head != NULL || sp->sl_refcnt == (NMBPCL - 1));
+ } else {
+ sp->sl_head = NULL;
+ }
+ if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) {
+ slab_nextptr_panic(sp, sp->sl_head);
+ /* In case sl_head is in the map but not in the slab */
+ VERIFY(slab_inrange(sp, sp->sl_head));
+ /* NOTREACHED */
+ }
+
+ /* Increment slab reference */
+ sp->sl_refcnt++;
+
+ if (mclaudit != NULL) {
+ mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
+ mca->mca_uflags = 0;
+ /* Save contents on mbuf objects only */
+ if (class == MC_MBUF)
+ mca->mca_uflags |= MB_SCVALID;
+ }
+
+ if (class == MC_CL) {
+ mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
+ /*
+ * A 2K cluster slab can have at most 1 reference.
+ */
+ VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
+ sp->sl_len == m_maxsize(MC_CL) && sp->sl_head == NULL);
+ } else if (class == MC_BIGCL) {
+ mcl_slab_t *nsp = sp->sl_next;
+ mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) +
+ m_infree(MC_MBUF_BIGCL);
+ /*
+ * Increment 2nd slab. A 4K big cluster takes
+ * 2 slabs, each having at most 1 reference.
+ */
+ VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
+ sp->sl_len == m_maxsize(MC_BIGCL) && sp->sl_head == NULL);
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ nsp->sl_refcnt++;
+ VERIFY(!slab_is_detached(nsp));
+ VERIFY(nsp->sl_class == MC_BIGCL &&
+ nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) &&
+ nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 &&
+ nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
+ nsp->sl_head == NULL);
+ } else if (class == MC_16KCL) {
+ mcl_slab_t *nsp;
+ int k;
+
+ --m_infree(MC_16KCL);
+ VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
+ sp->sl_len == m_maxsize(MC_16KCL) && sp->sl_head == NULL);
+ /*
+ * Increment 2nd-8th slab. A 16K big cluster takes
+ * 8 cluster slabs, each having at most 1 reference.
+ */
+ for (nsp = sp, k = 1; k < (M16KCLBYTES / MCLBYTES); k++) {
+ nsp = nsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ nsp->sl_refcnt++;
+ VERIFY(!slab_is_detached(nsp));
+ VERIFY(nsp->sl_class == MC_16KCL &&
+ nsp->sl_flags == (SLF_MAPPED | SLF_PARTIAL) &&
+ nsp->sl_refcnt == 1 && nsp->sl_chunks == 0 &&
+ nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
+ nsp->sl_head == NULL);
+ }
+ } else {
+ ASSERT(class == MC_MBUF);
+ --m_infree(MC_MBUF);
+ /*
+ * If auditing is turned on, this check is
+ * deferred until later in mbuf_slab_audit().
+ */
+ if (mclaudit == NULL)
+ _MCHECK((struct mbuf *)buf);
+ /*
+ * Since we have incremented the reference count above,
+ * an mbuf slab (formerly a 2K cluster slab that was cut
+ * up into mbufs) must have a reference count between 1
+ * and NMBPCL at this point.
+ */
+ VERIFY(sp->sl_refcnt >= 1 &&
+ (unsigned short)sp->sl_refcnt <= NMBPCL &&
+ sp->sl_chunks == NMBPCL && sp->sl_len == m_maxsize(MC_CL));
+ VERIFY((unsigned short)sp->sl_refcnt < NMBPCL ||
+ sp->sl_head == NULL);
+ }
+
+ /* If empty, remove this slab from the class's freelist */
+ if (sp->sl_head == NULL) {
+ VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPCL);
+ slab_remove(sp, class);
+ }
+
+ return (buf);
+}
+
+/*
+ * Place a slab of object(s) back into a class's slab list.
+ */
+static void
+slab_free(mbuf_class_t class, mcache_obj_t *buf)
+{
+ mcl_slab_t *sp;
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ VERIFY(class != MC_16KCL || njcl > 0);
+ VERIFY(buf->obj_next == NULL);
+ sp = slab_get(buf);
+ VERIFY(sp->sl_class == class && slab_inrange(sp, buf) &&
+ (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
+
+ /* Decrement slab reference */
+ sp->sl_refcnt--;
+
+ if (class == MC_CL || class == MC_BIGCL) {
+ VERIFY(IS_P2ALIGNED(buf, MCLBYTES));
+ /*
+ * A 2K cluster slab can have at most 1 reference
+ * which must be 0 at this point.
+ */
+ VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
+ sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
+ VERIFY(slab_is_detached(sp));
+ if (class == MC_BIGCL) {
+ mcl_slab_t *nsp = sp->sl_next;
+ VERIFY(IS_P2ALIGNED(buf, NBPG));
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ /* Decrement 2nd slab reference */
+ nsp->sl_refcnt--;
+ /*
+ * A 4K big cluster takes 2 slabs, both
+ * must now have 0 reference.
+ */
+ VERIFY(slab_is_detached(nsp));
+ VERIFY(nsp->sl_class == MC_BIGCL &&
+ (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) &&
+ nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 &&
+ nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
+ nsp->sl_head == NULL);
+ }
+ } else if (class == MC_16KCL) {
+ mcl_slab_t *nsp;
+ int k;
+ /*
+ * A 16K cluster takes 8 cluster slabs, all must
+ * now have 0 reference.
+ */
+ VERIFY(IS_P2ALIGNED(buf, NBPG));
+ VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
+ sp->sl_len == m_maxsize(MC_16KCL) && sp->sl_head == NULL);
+ VERIFY(slab_is_detached(sp));
+ for (nsp = sp, k = 1; k < (M16KCLBYTES / MCLBYTES); k++) {
+ nsp = nsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ nsp->sl_refcnt--;
+ VERIFY(slab_is_detached(nsp));
+ VERIFY(nsp->sl_class == MC_16KCL &&
+ (nsp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) &&
+ nsp->sl_refcnt == 0 && nsp->sl_chunks == 0 &&
+ nsp->sl_len == 0 && nsp->sl_base == sp->sl_base &&
+ nsp->sl_head == NULL);
+ }
+ } else {
+ /*
+ * An mbuf slab has a total of NMBPL reference counts.
+ * Since we have decremented the reference above, it
+ * must now be between 0 and NMBPCL-1.
+ */
+ VERIFY(sp->sl_refcnt >= 0 &&
+ (unsigned short)sp->sl_refcnt <= (NMBPCL - 1) &&
+ sp->sl_chunks == NMBPCL && sp->sl_len == m_maxsize(MC_CL));
+ VERIFY(sp->sl_refcnt < (NMBPCL - 1) ||
+ (slab_is_detached(sp) && sp->sl_head == NULL));
+ }
+
+ /*
+ * When auditing is enabled, ensure that the buffer still
+ * contains the free pattern. Otherwise it got corrupted
+ * while at the CPU cache layer.
+ */
+ if (mclaudit != NULL) {
+ mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
+ mcache_audit_free_verify(mca, buf, 0, m_maxsize(class));
+ mca->mca_uflags &= ~MB_SCVALID;
+ }
+
+ if (class == MC_CL) {
+ mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
+ } else if (class == MC_BIGCL) {
+ mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
+ m_infree(MC_MBUF_BIGCL);
+ } else if (class == MC_16KCL) {
+ ++m_infree(MC_16KCL);
+ } else {
+ ++m_infree(MC_MBUF);
+ buf->obj_next = sp->sl_head;
+ }
+ sp->sl_head = buf;
+
+ /* All mbufs are freed; return the cluster that we stole earlier */
+ if (sp->sl_refcnt == 0 && class == MC_MBUF) {
+ int i = NMBPCL;
+
+ m_total(MC_MBUF) -= NMBPCL;
+ mbstat.m_mbufs = m_total(MC_MBUF);
+ m_infree(MC_MBUF) -= NMBPCL;
+ mtype_stat_add(MT_FREE, -NMBPCL);
+
+ while (i--) {
+ struct mbuf *m = sp->sl_head;
+ VERIFY(m != NULL);
+ sp->sl_head = m->m_next;
+ m->m_next = NULL;
+ }
+ VERIFY(sp->sl_head == NULL);
+
+ /* Remove the slab from the mbuf class's slab list */
+ slab_remove(sp, class);
+
+ /* Reinitialize it as a 2K cluster slab */
+ slab_init(sp, MC_CL, sp->sl_flags, sp->sl_base, sp->sl_base,
+ sp->sl_len, 0, 1);
+
+ if (mclaudit != NULL)
+ mcache_set_pattern(MCACHE_FREE_PATTERN,
+ (caddr_t)sp->sl_head, m_maxsize(MC_CL));
+
+ mbstat.m_clfree = (++m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
+
+ VERIFY(slab_is_detached(sp));
+ /* And finally switch class */
+ class = MC_CL;
+ }
+
+ /* Reinsert the slab to the class's slab list */
+ if (slab_is_detached(sp))
+ slab_insert(sp, class);
+}
+
+/*
+ * Common allocator for rudimentary objects called by the CPU cache layer
+ * during an allocation request whenever there is no available element in the
+ * bucket layer. It returns one or more elements from the appropriate global
+ * freelist. If the freelist is empty, it will attempt to populate it and
+ * retry the allocation.
+ */
+static unsigned int
+mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ unsigned int need = num;
+ mcache_obj_t **list = *plist;
+
+ ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
+ ASSERT(need > 0);
+
+ lck_mtx_lock(mbuf_mlock);
+
+ for (;;) {
+ if ((*list = slab_alloc(class, wait)) != NULL) {
+ (*list)->obj_next = NULL;
+ list = *plist = &(*list)->obj_next;
+
+ if (--need == 0) {
+ /*
+ * If the number of elements in freelist has
+ * dropped below low watermark, asynchronously
+ * populate the freelist now rather than doing
+ * it later when we run out of elements.
+ */
+ if (!mbuf_cached_above(class, wait) &&
+ m_infree(class) < m_total(class) >> 5) {
+ (void) freelist_populate(class, 1,
+ M_DONTWAIT);
+ }
+ break;
+ }
+ } else {
+ VERIFY(m_infree(class) == 0 || class == MC_CL);
+
+ (void) freelist_populate(class, 1,
+ (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT);
+
+ if (m_infree(class) > 0)
+ continue;
+
+ /* Check if there's anything at the cache layer */
+ if (mbuf_cached_above(class, wait))
+ break;
+
+ /* We have nothing and cannot block; give up */
+ if (wait & MCR_NOSLEEP) {
+ if (!(wait & MCR_TRYHARD)) {
+ m_fail_cnt(class)++;
+ mbstat.m_drops++;
+ break;
+ }
+ }
+
+ /*
+ * If the freelist is still empty and the caller is
+ * willing to be blocked, sleep on the wait channel
+ * until an element is available. Otherwise, if
+ * MCR_TRYHARD is set, do our best to satisfy the
+ * request without having to go to sleep.
+ */
+ if (mbuf_worker_ready &&
+ mbuf_sleep(class, need, wait))
+ break;
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ }
+ }
+
+ m_alloc_cnt(class) += num - need;
+ lck_mtx_unlock(mbuf_mlock);
+
+ return (num - need);
+}
+
+/*
+ * Common de-allocator for rudimentary objects called by the CPU cache
+ * layer when one or more elements need to be returned to the appropriate
+ * global freelist.
+ */
+static void
+mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ mcache_obj_t *nlist;
+ unsigned int num = 0;
+ int w;
+
+ ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
+
+ lck_mtx_lock(mbuf_mlock);
+
+ for (;;) {
+ nlist = list->obj_next;
+ list->obj_next = NULL;
+ slab_free(class, list);
+ ++num;
+ if ((list = nlist) == NULL)
+ break;
+ }
+ m_free_cnt(class) += num;
+
+ if ((w = mb_waiters) > 0)
+ mb_waiters = 0;
+
+ lck_mtx_unlock(mbuf_mlock);
+
+ if (w != 0)
+ wakeup(mb_waitchan);
+}
+
+/*
+ * Common auditor for rudimentary objects called by the CPU cache layer
+ * during an allocation or free request. For the former, this is called
+ * after the objects are obtained from either the bucket or slab layer
+ * and before they are returned to the caller. For the latter, this is
+ * called immediately during free and before placing the objects into
+ * the bucket or slab layer.
+ */
+static void
+mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ mcache_audit_t *mca;
+
+ ASSERT(MBUF_CLASS_VALID(class) && !MBUF_CLASS_COMPOSITE(class));
+
+ while (list != NULL) {
+ lck_mtx_lock(mbuf_mlock);
+ mca = mcl_audit_buf2mca(class, list);
+
+ /* Do the sanity checks */
+ if (class == MC_MBUF) {
+ mcl_audit_mbuf(mca, list, FALSE, alloc);
+ ASSERT(mca->mca_uflags & MB_SCVALID);
+ } else {
+ mcl_audit_cluster(mca, list, m_maxsize(class),
+ alloc, TRUE);
+ ASSERT(!(mca->mca_uflags & MB_SCVALID));
+ }
+ /* Record this transaction */
+ mcache_buffer_log(mca, list, m_cache(class));
+ if (alloc)
+ mca->mca_uflags |= MB_INUSE;
+ else
+ mca->mca_uflags &= ~MB_INUSE;
+ /* Unpair the object (unconditionally) */
+ mca->mca_uptr = NULL;
+ lck_mtx_unlock(mbuf_mlock);
+
+ list = list->obj_next;
+ }
+}
+
+/*
+ * Common notify routine for all caches. It is called by mcache when
+ * one or more objects get freed. We use this indication to trigger
+ * the wakeup of any sleeping threads so that they can retry their
+ * allocation requests.
+ */
+static void
+mbuf_slab_notify(void *arg, u_int32_t reason)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ int w;
+
+ ASSERT(MBUF_CLASS_VALID(class));
+
+ if (reason != MCN_RETRYALLOC)
+ return;
+
+ lck_mtx_lock(mbuf_mlock);
+ if ((w = mb_waiters) > 0) {
+ m_notified(class)++;
+ mb_waiters = 0;
+ }
+ lck_mtx_unlock(mbuf_mlock);
+
+ if (w != 0)
+ wakeup(mb_waitchan);
+}
+
+/*
+ * Obtain object(s) from the composite class's freelist.
+ */
+static unsigned int
+cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num)
+{
+ unsigned int need = num;
+ mcl_slab_t *sp, *clsp, *nsp;
+ struct mbuf *m;
+ mcache_obj_t **list = *plist;
+ void *cl;
+
+ VERIFY(need > 0);
+ VERIFY(class != MC_MBUF_16KCL || njcl > 0);
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ /* Get what we can from the freelist */
+ while ((*list = m_cobjlist(class)) != NULL) {
+ MRANGE(*list);
+
+ m = (struct mbuf *)*list;
+ sp = slab_get(m);
+ cl = m->m_ext.ext_buf;
+ clsp = slab_get(cl);
+ VERIFY(m->m_flags == M_EXT && cl != NULL);
+ VERIFY(MEXT_RFA(m) != NULL && MBUF_IS_COMPOSITE(m));
+ VERIFY(clsp->sl_refcnt == 1);
+ if (class == MC_MBUF_BIGCL) {
+ nsp = clsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ VERIFY(nsp->sl_refcnt == 1);
+ } else if (class == MC_MBUF_16KCL) {
+ int k;
+ for (nsp = clsp, k = 1;
+ k < (M16KCLBYTES / MCLBYTES); k++) {
+ nsp = nsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ VERIFY(nsp->sl_refcnt == 1);
+ }
+ }
+
+ if ((m_cobjlist(class) = (*list)->obj_next) != NULL &&
+ !MBUF_IN_MAP(m_cobjlist(class))) {
+ slab_nextptr_panic(sp, m_cobjlist(class));
+ /* NOTREACHED */
+ }
+ (*list)->obj_next = NULL;
+ list = *plist = &(*list)->obj_next;
+
+ if (--need == 0)
+ break;
+ }
+ m_infree(class) -= (num - need);
+
+ return (num - need);
+}
+
+/*
+ * Place object(s) back into a composite class's freelist.
+ */
+static unsigned int
+cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged)
+{
+ mcache_obj_t *o, *tail;
+ unsigned int num = 0;
+ struct mbuf *m, *ms;
+ mcache_audit_t *mca = NULL;
+ mcache_obj_t *ref_list = NULL;
+ mcl_slab_t *clsp, *nsp;
+ void *cl;
+
+ ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
+ VERIFY(class != MC_MBUF_16KCL || njcl > 0);
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ o = tail = list;
+
+ while ((m = ms = (struct mbuf *)o) != NULL) {
+ mcache_obj_t *rfa, *nexto = o->obj_next;
+
+ /* Do the mbuf sanity checks */
+ if (mclaudit != NULL) {
+ mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
+ mcache_audit_free_verify(mca, m, 0, m_maxsize(MC_MBUF));
+ ms = (struct mbuf *)mca->mca_contents;
+ }
+
+ /* Do the cluster sanity checks */
+ cl = ms->m_ext.ext_buf;
+ clsp = slab_get(cl);
+ if (mclaudit != NULL) {
+ size_t size;
+ if (class == MC_MBUF_CL)
+ size = m_maxsize(MC_CL);
+ else if (class == MC_MBUF_BIGCL)
+ size = m_maxsize(MC_BIGCL);
+ else
+ size = m_maxsize(MC_16KCL);
+ mcache_audit_free_verify(mcl_audit_buf2mca(MC_CL,
+ (mcache_obj_t *)cl), cl, 0, size);
+ }
+ VERIFY(ms->m_type == MT_FREE);
+ VERIFY(ms->m_flags == M_EXT);
+ VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms));
+ VERIFY(clsp->sl_refcnt == 1);
+ if (class == MC_MBUF_BIGCL) {
+ nsp = clsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ VERIFY(nsp->sl_refcnt == 1);
+ } else if (class == MC_MBUF_16KCL) {
+ int k;
+ for (nsp = clsp, k = 1;
+ k < (M16KCLBYTES / MCLBYTES); k++) {
+ nsp = nsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ VERIFY(nsp->sl_refcnt == 1);
+ }
+ }
+
+ /*
+ * If we're asked to purge, restore the actual mbuf using
+ * contents of the shadow structure (if auditing is enabled)
+ * and clear EXTF_COMPOSITE flag from the mbuf, as we are
+ * about to free it and the attached cluster into their caches.
+ */
+ if (purged) {
+ /* Restore constructed mbuf fields */
+ if (mclaudit != NULL)
+ mcl_audit_restore_mbuf(m, mca, TRUE);
+
+ MEXT_REF(m) = 0;
+ MEXT_FLAGS(m) = 0;
+
+ rfa = (mcache_obj_t *)MEXT_RFA(m);
+ rfa->obj_next = ref_list;
+ ref_list = rfa;
+ MEXT_RFA(m) = NULL;
+
+ m->m_type = MT_FREE;
+ m->m_flags = m->m_len = 0;
+ m->m_next = m->m_nextpkt = NULL;
+
+ /* Save mbuf fields and make auditing happy */
+ if (mclaudit != NULL)
+ mcl_audit_mbuf(mca, o, FALSE, FALSE);
+
+ VERIFY(m_total(class) > 0);
+ m_total(class)--;
+
+ /* Free the mbuf */
+ o->obj_next = NULL;
+ slab_free(MC_MBUF, o);
+
+ /* And free the cluster */
+ ((mcache_obj_t *)cl)->obj_next = NULL;
+ if (class == MC_MBUF_CL)
+ slab_free(MC_CL, cl);
+ else if (class == MC_MBUF_BIGCL)
+ slab_free(MC_BIGCL, cl);
+ else
+ slab_free(MC_16KCL, cl);
+ }
+
+ ++num;
+ tail = o;
+ o = nexto;
+ }
+
+ if (!purged) {
+ tail->obj_next = m_cobjlist(class);
+ m_cobjlist(class) = list;
+ m_infree(class) += num;
+ } else if (ref_list != NULL) {
+ mcache_free_ext(ref_cache, ref_list);
+ }
+
+ return (num);
+}
+
+/*
+ * Common allocator for composite objects called by the CPU cache layer
+ * during an allocation request whenever there is no available element in
+ * the bucket layer. It returns one or more composite elements from the
+ * appropriate global freelist. If the freelist is empty, it will attempt
+ * to obtain the rudimentary objects from their caches and construct them
+ * into composite mbuf + cluster objects.
+ */
+static unsigned int
+mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed,
+ int wait)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ mcache_t *cp = NULL;
+ unsigned int num = 0, cnum = 0, want = needed;
+ mcache_obj_t *ref_list = NULL;
+ mcache_obj_t *mp_list = NULL;
+ mcache_obj_t *clp_list = NULL;
+ mcache_obj_t **list;
+ struct ext_ref *rfa;
+ struct mbuf *m;
+ void *cl;
+
+ ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
+ ASSERT(needed > 0);
+
+ VERIFY(class != MC_MBUF_16KCL || njcl > 0);
+
+ /* There should not be any slab for this class */
+ VERIFY(m_slab_cnt(class) == 0 &&
+ m_slablist(class).tqh_first == NULL &&
+ m_slablist(class).tqh_last == NULL);
+
+ lck_mtx_lock(mbuf_mlock);
+
+ /* Try using the freelist first */
+ num = cslab_alloc(class, plist, needed);
+ list = *plist;
+ if (num == needed) {
+ m_alloc_cnt(class) += num;
+ lck_mtx_unlock(mbuf_mlock);
+ return (needed);
+ }
+
+ lck_mtx_unlock(mbuf_mlock);
+
+ /*
+ * We could not satisfy the request using the freelist alone;
+ * allocate from the appropriate rudimentary caches and use
+ * whatever we can get to construct the composite objects.
+ */
+ needed -= num;
+
+ /*
+ * Mark these allocation requests as coming from a composite cache.
+ * Also, if the caller is willing to be blocked, mark the request
+ * with MCR_FAILOK such that we don't end up sleeping at the mbuf
+ * slab layer waiting for the individual object when one or more
+ * of the already-constructed composite objects are available.
+ */
+ wait |= MCR_COMP;
+ if (!(wait & MCR_NOSLEEP))
+ wait |= MCR_FAILOK;
+
+ needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait);
+ if (needed == 0) {
+ ASSERT(mp_list == NULL);
+ goto fail;
+ }
+ if (class == MC_MBUF_CL)
+ cp = m_cache(MC_CL);
+ else if (class == MC_MBUF_BIGCL)
+ cp = m_cache(MC_BIGCL);
+ else
+ cp = m_cache(MC_16KCL);
+ needed = mcache_alloc_ext(cp, &clp_list, needed, wait);
+ if (needed == 0) {
+ ASSERT(clp_list == NULL);
+ goto fail;
+ }
+ needed = mcache_alloc_ext(ref_cache, &ref_list, needed, wait);
+ if (needed == 0) {
+ ASSERT(ref_list == NULL);
+ goto fail;
+ }
+
+ /*
+ * By this time "needed" is MIN(mbuf, cluster, ref). Any left
+ * overs will get freed accordingly before we return to caller.
+ */
+ for (cnum = 0; cnum < needed; cnum++) {
+ struct mbuf *ms;
+
+ m = ms = (struct mbuf *)mp_list;
+ mp_list = mp_list->obj_next;
+
+ cl = clp_list;
+ clp_list = clp_list->obj_next;
+ ((mcache_obj_t *)cl)->obj_next = NULL;
+
+ rfa = (struct ext_ref *)ref_list;
+ ref_list = ref_list->obj_next;
+ ((mcache_obj_t *)rfa)->obj_next = NULL;
+
+ /*
+ * If auditing is enabled, construct the shadow mbuf
+ * in the audit structure instead of in the actual one.
+ * mbuf_cslab_audit() will take care of restoring the
+ * contents after the integrity check.
+ */
+ if (mclaudit != NULL) {
+ mcache_audit_t *mca, *cl_mca;
+ size_t size;
+
+ lck_mtx_lock(mbuf_mlock);
+ mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
+ ms = ((struct mbuf *)mca->mca_contents);
+ cl_mca = mcl_audit_buf2mca(MC_CL, (mcache_obj_t *)cl);
+
+ /*
+ * Pair them up. Note that this is done at the time
+ * the mbuf+cluster objects are constructed. This
+ * information should be treated as "best effort"
+ * debugging hint since more than one mbufs can refer
+ * to a cluster. In that case, the cluster might not
+ * be freed along with the mbuf it was paired with.
+ */
+ mca->mca_uptr = cl_mca;
+ cl_mca->mca_uptr = mca;
+
+ ASSERT(mca->mca_uflags & MB_SCVALID);
+ ASSERT(!(cl_mca->mca_uflags & MB_SCVALID));
+ lck_mtx_unlock(mbuf_mlock);
+
+ /* Technically, they are in the freelist */
+ mcache_set_pattern(MCACHE_FREE_PATTERN, m,
+ m_maxsize(MC_MBUF));
+ if (class == MC_MBUF_CL)
+ size = m_maxsize(MC_CL);
+ else if (class == MC_MBUF_BIGCL)
+ size = m_maxsize(MC_BIGCL);
+ else
+ size = m_maxsize(MC_16KCL);
+ mcache_set_pattern(MCACHE_FREE_PATTERN, cl, size);
+ }
+
+ MBUF_INIT(ms, 0, MT_FREE);
+ if (class == MC_MBUF_16KCL) {
+ MBUF_16KCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
+ } else if (class == MC_MBUF_BIGCL) {
+ MBUF_BIGCL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
+ } else {
+ MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
+ }
+ VERIFY(ms->m_flags == M_EXT);
+ VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms));
+
+ *list = (mcache_obj_t *)m;
+ (*list)->obj_next = NULL;
+ list = *plist = &(*list)->obj_next;
+ }
+
+fail:
+ /*
+ * Free up what's left of the above.
+ */
+ if (mp_list != NULL)
+ mcache_free_ext(m_cache(MC_MBUF), mp_list);
+ if (clp_list != NULL)
+ mcache_free_ext(cp, clp_list);
+ if (ref_list != NULL)
+ mcache_free_ext(ref_cache, ref_list);
+
+ lck_mtx_lock(mbuf_mlock);
+ if (num > 0 || cnum > 0) {
+ m_total(class) += cnum;
+ VERIFY(m_total(class) <= m_maxlimit(class));
+ m_alloc_cnt(class) += num + cnum;
+ }
+ if ((num + cnum) < want)
+ m_fail_cnt(class) += (want - (num + cnum));
+ lck_mtx_unlock(mbuf_mlock);
+
+ return (num + cnum);
+}
+
+/*
+ * Common de-allocator for composite objects called by the CPU cache
+ * layer when one or more elements need to be returned to the appropriate
+ * global freelist.
+ */
+static void
+mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ unsigned int num;
+ int w;
+
+ ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
+
+ lck_mtx_lock(mbuf_mlock);
+
+ num = cslab_free(class, list, purged);
+ m_free_cnt(class) += num;
+
+ if ((w = mb_waiters) > 0)
+ mb_waiters = 0;
+
+ lck_mtx_unlock(mbuf_mlock);
+
+ if (w != 0)
+ wakeup(mb_waitchan);
+}
+
+/*
+ * Common auditor for composite objects called by the CPU cache layer
+ * during an allocation or free request. For the former, this is called
+ * after the objects are obtained from either the bucket or slab layer
+ * and before they are returned to the caller. For the latter, this is
+ * called immediately during free and before placing the objects into
+ * the bucket or slab layer.
+ */
+static void
+mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
+{
+ mbuf_class_t class = (mbuf_class_t)arg;
+ mcache_audit_t *mca;
+ struct mbuf *m, *ms;
+ mcl_slab_t *clsp, *nsp;
+ size_t size;
+ void *cl;
+
+ ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
+
+ while ((m = ms = (struct mbuf *)list) != NULL) {
+ lck_mtx_lock(mbuf_mlock);
+ /* Do the mbuf sanity checks and record its transaction */
+ mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
+ mcl_audit_mbuf(mca, m, TRUE, alloc);
+ mcache_buffer_log(mca, m, m_cache(class));
+ if (alloc)
+ mca->mca_uflags |= MB_COMP_INUSE;
+ else
+ mca->mca_uflags &= ~MB_COMP_INUSE;
+
+ /*
+ * Use the shadow mbuf in the audit structure if we are
+ * freeing, since the contents of the actual mbuf has been
+ * pattern-filled by the above call to mcl_audit_mbuf().
+ */
+ if (!alloc)
+ ms = (struct mbuf *)mca->mca_contents;
+
+ /* Do the cluster sanity checks and record its transaction */
+ cl = ms->m_ext.ext_buf;
+ clsp = slab_get(cl);
+ VERIFY(ms->m_flags == M_EXT && cl != NULL);
+ VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms));
+ VERIFY(clsp->sl_refcnt == 1);
+ if (class == MC_MBUF_BIGCL) {
+ nsp = clsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ VERIFY(nsp->sl_refcnt == 1);
+ } else if (class == MC_MBUF_16KCL) {
+ int k;
+ for (nsp = clsp, k = 1;
+ k < (M16KCLBYTES / MCLBYTES); k++) {
+ nsp = nsp->sl_next;
+ /* Next slab must already be present */
+ VERIFY(nsp != NULL);
+ VERIFY(nsp->sl_refcnt == 1);
+ }
+ }
+
+ mca = mcl_audit_buf2mca(MC_CL, cl);
+ if (class == MC_MBUF_CL)
+ size = m_maxsize(MC_CL);
+ else if (class == MC_MBUF_BIGCL)
+ size = m_maxsize(MC_BIGCL);
+ else
+ size = m_maxsize(MC_16KCL);
+ mcl_audit_cluster(mca, cl, size, alloc, FALSE);
+ mcache_buffer_log(mca, cl, m_cache(class));
+ if (alloc)
+ mca->mca_uflags |= MB_COMP_INUSE;
+ else
+ mca->mca_uflags &= ~MB_COMP_INUSE;
+ lck_mtx_unlock(mbuf_mlock);
+
+ list = list->obj_next;
+ }
+}
+
+/*
+ * Allocate some number of mbuf clusters and place on cluster freelist.
+ */
+static int
+m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
+{
+ int i;
+ vm_size_t size = 0;
+ int numpages = 0;
+ vm_offset_t page = 0;
+ mcache_audit_t *mca_list = NULL;
+ mcache_obj_t *con_list = NULL;
+ mcl_slab_t *sp;
+
+ VERIFY(bufsize == m_maxsize(MC_CL) ||
+ bufsize == m_maxsize(MC_BIGCL) || bufsize == m_maxsize(MC_16KCL));
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ /*
+ * Multiple threads may attempt to populate the cluster map one
+ * after another. Since we drop the lock below prior to acquiring
+ * the physical page(s), our view of the cluster map may no longer
+ * be accurate, and we could end up over-committing the pages beyond
+ * the maximum allowed for each class. To prevent it, this entire
+ * operation (including the page mapping) is serialized.
+ */
+ while (mb_clalloc_busy) {
+ mb_clalloc_waiters++;
+ (void) msleep(mb_clalloc_waitchan, mbuf_mlock,
+ (PZERO-1), "m_clalloc", NULL);
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ }
+
+ /* We are busy now; tell everyone else to go away */
+ mb_clalloc_busy = TRUE;
+
+ /*
+ * Honor the caller's wish to block or not block. We have a way
+ * to grow the pool asynchronously using the mbuf worker thread.
+ */
+ i = m_howmany(num, bufsize);
+ if (i == 0 || (wait & M_DONTWAIT))
+ goto out;
+
+ lck_mtx_unlock(mbuf_mlock);
+
+ size = round_page_32(i * bufsize);
+ page = kmem_mb_alloc(mb_map, size);
+
+ if (page == 0) {
+ if (bufsize <= m_maxsize(MC_BIGCL)) {
+ /* Try for 1 page if failed, only for 2KB/4KB request */
+ size = NBPG;
+ page = kmem_mb_alloc(mb_map, size);
+ }
+
+ if (page == 0) {
+ lck_mtx_lock(mbuf_mlock);
+ goto out;
+ }
+ }
+
+ VERIFY(IS_P2ALIGNED(page, NBPG));
+ numpages = size / NBPG;
+
+ /* If auditing is enabled, allocate the audit structures now */
+ if (mclaudit != NULL) {
+ int needed;
+
+ /*
+ * Yes, I realize this is a waste of memory for clusters
+ * that never get transformed into mbufs, as we may end
+ * up with NMBPCL-1 unused audit structures per cluster.
+ * But doing so tremendously simplifies the allocation
+ * strategy, since at this point we are not holding the
+ * mbuf lock and the caller is okay to be blocked. For
+ * the case of big clusters, we allocate one structure
+ * for each as we never turn them into mbufs.
+ */
+ if (bufsize == m_maxsize(MC_CL)) {
+ needed = numpages * 2 * NMBPCL;
+
+ i = mcache_alloc_ext(mcl_audit_con_cache,
+ &con_list, needed, MCR_SLEEP);
+
+ VERIFY(con_list != NULL && i == needed);
+ } else if (bufsize == m_maxsize(MC_BIGCL)) {
+ needed = numpages;
+ } else {
+ needed = numpages / (M16KCLBYTES / NBPG);
+ }
+
+ i = mcache_alloc_ext(mcache_audit_cache,
+ (mcache_obj_t **)&mca_list, needed, MCR_SLEEP);
+
+ VERIFY(mca_list != NULL && i == needed);
+ }
+
+ lck_mtx_lock(mbuf_mlock);
+
+ for (i = 0; i < numpages; i++, page += NBPG) {
+ ppnum_t offset = ((char *)page - (char *)mbutl) / NBPG;
+ ppnum_t new_page = pmap_find_phys(kernel_pmap,
+ (vm_address_t)page);
+
+ /*
+ * In the case of no mapper being available the following
+ * code noops and returns the input page; if there is a
+ * mapper the appropriate I/O page is returned.
+ */
+ new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+ mcl_paddr[offset] = new_page << PGSHIFT;
+
+ /* Pattern-fill this fresh page */
+ if (mclaudit != NULL)
+ mcache_set_pattern(MCACHE_FREE_PATTERN,
+ (caddr_t)page, NBPG);
+
+ if (bufsize == m_maxsize(MC_CL)) {
+ union mcluster *mcl = (union mcluster *)page;
+
+ /* 1st cluster in the page */
+ sp = slab_get(mcl);
+ if (mclaudit != NULL)
+ mcl_audit_init(mcl, &mca_list, &con_list,
+ AUDIT_CONTENTS_SIZE, NMBPCL);
+
+ VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
+ slab_init(sp, MC_CL, SLF_MAPPED,
+ mcl, mcl, bufsize, 0, 1);
+
+ /* Insert this slab */
+ slab_insert(sp, MC_CL);
+
+ /* Update stats now since slab_get() drops the lock */
+ mbstat.m_clfree = ++m_infree(MC_CL) +
+ m_infree(MC_MBUF_CL);
+ mbstat.m_clusters = ++m_total(MC_CL);
+ VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
+
+ /* 2nd cluster in the page */
+ sp = slab_get(++mcl);
+ if (mclaudit != NULL)
+ mcl_audit_init(mcl, &mca_list, &con_list,
+ AUDIT_CONTENTS_SIZE, NMBPCL);
+
+ VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
+ slab_init(sp, MC_CL, SLF_MAPPED,
+ mcl, mcl, bufsize, 0, 1);
+
+ /* Insert this slab */
+ slab_insert(sp, MC_CL);
+
+ /* Update stats now since slab_get() drops the lock */
+ mbstat.m_clfree = ++m_infree(MC_CL) +
+ m_infree(MC_MBUF_CL);
+ mbstat.m_clusters = ++m_total(MC_CL);
+ VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
+ } else if (bufsize == m_maxsize(MC_BIGCL)) {
+ union mbigcluster *mbc = (union mbigcluster *)page;
+ mcl_slab_t *nsp;
+
+ /* One for the entire page */
+ sp = slab_get(mbc);
+ if (mclaudit != NULL)
+ mcl_audit_init(mbc, &mca_list, NULL, 0, 1);
+
+ VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
+ slab_init(sp, MC_BIGCL, SLF_MAPPED,
+ mbc, mbc, bufsize, 0, 1);
+
+ /* 2nd cluster's slab is part of the previous one */
+ nsp = slab_get(((union mcluster *)page) + 1);
+ slab_init(nsp, MC_BIGCL, SLF_MAPPED | SLF_PARTIAL,
+ mbc, NULL, 0, 0, 0);
+
+ /* Insert this slab */
+ slab_insert(sp, MC_BIGCL);
+
+ /* Update stats now since slab_get() drops the lock */
+ mbstat.m_bigclfree = ++m_infree(MC_BIGCL) +
+ m_infree(MC_MBUF_BIGCL);
+ mbstat.m_bigclusters = ++m_total(MC_BIGCL);
+ VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
+ } else if ((i % (M16KCLBYTES / NBPG)) == 0) {
+ union m16kcluster *m16kcl = (union m16kcluster *)page;
+ mcl_slab_t *nsp;
+ int k;
+
+ VERIFY(njcl > 0);
+ /* One for the entire 16KB */
+ sp = slab_get(m16kcl);
+ if (mclaudit != NULL)
+ mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1);
+
+ VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
+ slab_init(sp, MC_16KCL, SLF_MAPPED,
+ m16kcl, m16kcl, bufsize, 0, 1);
+
+ /* 2nd-8th cluster's slab is part of the first one */
+ for (k = 1; k < (M16KCLBYTES / MCLBYTES); k++) {
+ nsp = slab_get(((union mcluster *)page) + k);
+ VERIFY(nsp->sl_refcnt == 0 &&
+ nsp->sl_flags == 0);
+ slab_init(nsp, MC_16KCL,
+ SLF_MAPPED | SLF_PARTIAL,
+ m16kcl, NULL, 0, 0, 0);
+ }
+
+ /* Insert this slab */
+ slab_insert(sp, MC_16KCL);
+
+ /* Update stats now since slab_get() drops the lock */
+ m_infree(MC_16KCL)++;
+ m_total(MC_16KCL)++;
+ VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
+ }
+ }
+ VERIFY(mca_list == NULL && con_list == NULL);
+
+ /* We're done; let others enter */
+ mb_clalloc_busy = FALSE;
+ if (mb_clalloc_waiters > 0) {
+ mb_clalloc_waiters = 0;
+ wakeup(mb_clalloc_waitchan);
+ }
+
+ if (bufsize == m_maxsize(MC_CL))
+ return (numpages << 1);
+ else if (bufsize == m_maxsize(MC_BIGCL))
+ return (numpages);
+
+ VERIFY(bufsize == m_maxsize(MC_16KCL));
+ return (numpages / (M16KCLBYTES / NBPG));
+
+out:
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ /* We're done; let others enter */
+ mb_clalloc_busy = FALSE;
+ if (mb_clalloc_waiters > 0) {
+ mb_clalloc_waiters = 0;
+ wakeup(mb_clalloc_waitchan);
+ }
+
+ /*
+ * When non-blocking we kick a thread if we have to grow the
+ * pool or if the number of free clusters is less than requested.
+ */
+ if (bufsize == m_maxsize(MC_CL)) {
+ if (i > 0) {
+ /*
+ * Remember total number of clusters needed
+ * at this time.
+ */
+ i += m_total(MC_CL);
+ if (i > mbuf_expand_mcl) {
+ mbuf_expand_mcl = i;
+ if (mbuf_worker_ready)
+ wakeup((caddr_t)&mbuf_worker_run);
+ }
+ }
+
+ if (m_infree(MC_CL) >= num)
+ return (1);
+ } else if (bufsize == m_maxsize(MC_BIGCL)) {
+ if (i > 0) {
+ /*
+ * Remember total number of 4KB clusters needed
+ * at this time.
+ */
+ i += m_total(MC_BIGCL);
+ if (i > mbuf_expand_big) {
+ mbuf_expand_big = i;
+ if (mbuf_worker_ready)
+ wakeup((caddr_t)&mbuf_worker_run);
+ }
+ }
+
+ if (m_infree(MC_BIGCL) >= num)
+ return (1);
+ } else {
+ if (i > 0) {
+ /*
+ * Remember total number of 16KB clusters needed
+ * at this time.
+ */
+ i += m_total(MC_16KCL);
+ if (i > mbuf_expand_16k) {
+ mbuf_expand_16k = i;
+ if (mbuf_worker_ready)
+ wakeup((caddr_t)&mbuf_worker_run);
+ }
+ }
+
+ if (m_infree(MC_16KCL) >= num)
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Populate the global freelist of the corresponding buffer class.
+ */
+static int
+freelist_populate(mbuf_class_t class, unsigned int num, int wait)
+{
+ mcache_obj_t *o = NULL;
+ int i;
+
+ VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL ||
+ class == MC_16KCL);
+
+#if CONFIG_MBUF_NOEXPAND
+ if ((mbstat.m_mbufs / NMBPCL) >= maxmbufcl) {
+#if DEBUG
+ static int printonce = 1;
+ if (printonce == 1) {
+ printonce = 0;
+ printf("m_expand failed, allocated %ld out of %d "
+ "clusters\n", mbstat.m_mbufs / NMBPCL,
+ nmbclusters);
+ }
+#endif /* DEBUG */
+ return (0);
+ }
+#endif /* CONFIG_MBUF_NOEXPAND */
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ switch (class) {
+ case MC_MBUF:
+ case MC_CL:
+ i = m_clalloc(num, wait, m_maxsize(MC_CL));
+
+ /* Respect the 2K clusters minimum limit */
+ if (m_total(MC_CL) == m_maxlimit(MC_CL) &&
+ m_infree(MC_CL) <= m_minlimit(MC_CL)) {
+ if (class != MC_CL || (wait & MCR_COMP))
+ return (0);
+ }
+ if (class == MC_CL)
+ return (i != 0);
+ break;
+
+ case MC_BIGCL:
+ case MC_16KCL:
+ return (m_clalloc(num, wait, m_maxsize(class)) != 0);
+ /* NOTREACHED */
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ /* Steal a cluster and cut it up to create NMBPCL mbufs */
+ if ((o = slab_alloc(MC_CL, wait)) != NULL) {
+ struct mbuf *m = (struct mbuf *)o;
+ mcache_audit_t *mca = NULL;
+ mcl_slab_t *sp = slab_get(o);
+
+ VERIFY(slab_is_detached(sp) &&
+ (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
+
+ /* Make sure that the cluster is unmolested while in freelist */
+ if (mclaudit != NULL) {
+ mca = mcl_audit_buf2mca(MC_CL, o);
+ mcache_audit_free_verify(mca, o, 0, m_maxsize(MC_CL));
+ }
+
+ /* Reinitialize it as an mbuf slab */
+ slab_init(sp, MC_MBUF, sp->sl_flags, sp->sl_base, NULL,
+ sp->sl_len, 0, NMBPCL);
+
+ VERIFY(m == (struct mbuf *)sp->sl_base);
+ VERIFY(sp->sl_head == NULL);
+
+ m_total(MC_MBUF) += NMBPCL;
+ mbstat.m_mbufs = m_total(MC_MBUF);
+ m_infree(MC_MBUF) += NMBPCL;
+ mtype_stat_add(MT_FREE, NMBPCL);
+
+ i = NMBPCL;
+ while (i--) {
+ /*
+ * If auditing is enabled, construct the shadow mbuf
+ * in the audit structure instead of the actual one.
+ * mbuf_slab_audit() will take care of restoring the
+ * contents after the integrity check.
+ */
+ if (mclaudit != NULL) {
+ struct mbuf *ms;
+ mca = mcl_audit_buf2mca(MC_MBUF,
+ (mcache_obj_t *)m);
+ ms = ((struct mbuf *)mca->mca_contents);
+ ms->m_type = MT_FREE;
+ } else {
+ m->m_type = MT_FREE;
+ }
+ m->m_next = sp->sl_head;
+ sp->sl_head = (void *)m++;
+ }
+
+ /* Insert it into the mbuf class's slab list */
+ slab_insert(sp, MC_MBUF);
+
+ if ((i = mb_waiters) > 0)
+ mb_waiters = 0;
+ if (i != 0)
+ wakeup(mb_waitchan);
+
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * (Inaccurately) check if it might be worth a trip back to the
+ * mcache layer due the availability of objects there. We'll
+ * end up back here if there's nothing up there.
+ */
+static boolean_t
+mbuf_cached_above(mbuf_class_t class, int wait)
+{
+ switch (class) {
+ case MC_MBUF:
+ if (wait & MCR_COMP)
+ return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL)) ||
+ !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL)));
+ break;
+
+ case MC_CL:
+ if (wait & MCR_COMP)
+ return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL)));
+ break;
+
+ case MC_BIGCL:
+ if (wait & MCR_COMP)
+ return (!mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL)));
+ break;
+
+ case MC_16KCL:
+ if (wait & MCR_COMP)
+ return (!mcache_bkt_isempty(m_cache(MC_MBUF_16KCL)));
+ break;
+
+ case MC_MBUF_CL:
+ case MC_MBUF_BIGCL:
+ case MC_MBUF_16KCL:
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return (!mcache_bkt_isempty(m_cache(class)));
+}
+
+/*
+ * If possible, convert constructed objects to raw ones.
+ */
+static boolean_t
+mbuf_steal(mbuf_class_t class, unsigned int num)
+{
+ mcache_obj_t *top = NULL;
+ mcache_obj_t **list = ⊤
+ unsigned int tot = 0;
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ switch (class) {
+ case MC_MBUF:
+ case MC_CL:
+ case MC_BIGCL:
+ case MC_16KCL:
+ return (FALSE);
+
+ case MC_MBUF_CL:
+ case MC_MBUF_BIGCL:
+ case MC_MBUF_16KCL:
+ /* Get the required number of constructed objects if possible */
+ if (m_infree(class) > m_minlimit(class)) {
+ tot = cslab_alloc(class, &list,
+ MIN(num, m_infree(class)));
+ }
+
+ /* And destroy them to get back the raw objects */
+ if (top != NULL)
+ (void) cslab_free(class, top, 1);
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ return (tot == num);
+}
+
+static void
+m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp)
+{
+ int m, bmap = 0;
+
+ lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+
+ VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
+ VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
+ VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
+
+ /*
+ * This logic can be made smarter; for now, simply mark
+ * all other related classes as potential victims.
+ */
+ switch (class) {
+ case MC_MBUF:
+ m_wantpurge(MC_CL)++;
+ m_wantpurge(MC_MBUF_CL)++;
+ m_wantpurge(MC_MBUF_BIGCL)++;
+ break;
+
+ case MC_CL:
+ m_wantpurge(MC_MBUF)++;
+ if (!comp)
+ m_wantpurge(MC_MBUF_CL)++;
+ break;
+
+ case MC_BIGCL:
+ if (!comp)
+ m_wantpurge(MC_MBUF_BIGCL)++;
+ break;
+
+ case MC_16KCL:
+ if (!comp)
+ m_wantpurge(MC_MBUF_16KCL)++;
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+
+ /*
+ * Run through each marked class and check if we really need to
+ * purge (and therefore temporarily disable) the per-CPU caches
+ * layer used by the class. If so, remember the classes since
+ * we are going to drop the lock below prior to purging.
+ */
+ for (m = 0; m < NELEM(mbuf_table); m++) {
+ if (m_wantpurge(m) > 0) {
+ m_wantpurge(m) = 0;
+ /*
+ * Try hard to steal the required number of objects
+ * from the freelist of other mbuf classes. Only
+ * purge and disable the per-CPU caches layer when
+ * we don't have enough; it's the last resort.
+ */
+ if (!mbuf_steal(m, num))
+ bmap |= (1 << m);
+ }
+ }
+
+ lck_mtx_unlock(mbuf_mlock);
+
+ if (bmap != 0) {
+ /* drain is performed in pfslowtimo(), to avoid deadlocks */
+ do_reclaim = 1;
+
+ /* Sigh; we have no other choices but to ask mcache to purge */
+ for (m = 0; m < NELEM(mbuf_table); m++) {
+ if ((bmap & (1 << m)) &&
+ mcache_purge_cache(m_cache(m))) {
+ lck_mtx_lock(mbuf_mlock);
+ m_purge_cnt(m)++;
+ mbstat.m_drain++;
+ lck_mtx_unlock(mbuf_mlock);
+ }
+ }
+ } else {
+ /*
+ * Request mcache to reap extra elements from all of its caches;
+ * note that all reaps are serialized and happen only at a fixed
+ * interval.
+ */
+ mcache_reap();
+ }
+ lck_mtx_lock(mbuf_mlock);
+}
+
+static inline struct mbuf *
+m_get_common(int wait, short type, int hdr)
+{
+ struct mbuf *m;
+ int mcflags = MSLEEPF(wait);
+
+ /* Is this due to a non-blocking retry? If so, then try harder */
+ if (mcflags & MCR_NOSLEEP)
+ mcflags |= MCR_TRYHARD;
+
+ m = mcache_alloc(m_cache(MC_MBUF), mcflags);
+ if (m != NULL) {
+ MBUF_INIT(m, hdr, type);
+ mtype_stat_inc(type);
+ mtype_stat_dec(MT_FREE);
+#if CONFIG_MACF_NET
+ if (hdr && mac_init_mbuf(m, wait) != 0) {
+ m_free(m);
+ return (NULL);
+ }
+#endif /* MAC_NET */
+ }
+ return (m);
+}
+
+/*
+ * Space allocation routines; these are also available as macros
+ * for critical paths.
+ */
+#define _M_GET(wait, type) m_get_common(wait, type, 0)
+#define _M_GETHDR(wait, type) m_get_common(wait, type, 1)
+#define _M_RETRY(wait, type) _M_GET(wait, type)
+#define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type)
+#define _MGET(m, how, type) ((m) = _M_GET(how, type))
+#define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type))
+
+struct mbuf *
+m_get(int wait, int type)
+{
+ return (_M_GET(wait, type));
+}
+
+struct mbuf *
+m_gethdr(int wait, int type)
+{
+ return (_M_GETHDR(wait, type));
+}
+
+struct mbuf *
+m_retry(int wait, int type)
+{
+ return (_M_RETRY(wait, type));
+}
+
+struct mbuf *
+m_retryhdr(int wait, int type)
+{
+ return (_M_RETRYHDR(wait, type));
+}
+
+struct mbuf *
+m_getclr(int wait, int type)
+{
+ struct mbuf *m;
+
+ _MGET(m, wait, type);
+ if (m != NULL)
+ bzero(MTOD(m, caddr_t), MLEN);
+ return (m);
+}
+
+struct mbuf *
+m_free(struct mbuf *m)
+{
+ struct mbuf *n = m->m_next;
+
+ if (m->m_type == MT_FREE)
+ panic("m_free: freeing an already freed mbuf");
+
+ /* Free the aux data and tags if there is any */
+ if (m->m_flags & M_PKTHDR) {
+ m_tag_delete_chain(m, NULL);
+ }
+
+ if (m->m_flags & M_EXT) {
+ u_int32_t refcnt;
+ u_int32_t flags;
+
+ refcnt = m_decref(m);
+ flags = MEXT_FLAGS(m);
+ if (refcnt == 0 && flags == 0) {
+ if (m->m_ext.ext_free == NULL) {
+ mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
+ } else if (m->m_ext.ext_free == m_bigfree) {
+ mcache_free(m_cache(MC_BIGCL),
+ m->m_ext.ext_buf);
+ } else if (m->m_ext.ext_free == m_16kfree) {
+ mcache_free(m_cache(MC_16KCL),
+ m->m_ext.ext_buf);
+ } else {
+ (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
+ m->m_ext.ext_size, m->m_ext.ext_arg);
+ }
+ mcache_free(ref_cache, MEXT_RFA(m));
+ MEXT_RFA(m) = NULL;
+ } else if (refcnt == 0 && (flags & EXTF_COMPOSITE)) {
+ VERIFY(m->m_type != MT_FREE);
+
+ mtype_stat_dec(m->m_type);
+ mtype_stat_inc(MT_FREE);
+
+ m->m_type = MT_FREE;
+ m->m_flags = M_EXT;
+ m->m_len = 0;
+ m->m_next = m->m_nextpkt = NULL;
+
+ /* "Free" into the intermediate cache */
+ if (m->m_ext.ext_free == NULL) {
+ mcache_free(m_cache(MC_MBUF_CL), m);
+ } else if (m->m_ext.ext_free == m_bigfree) {
+ mcache_free(m_cache(MC_MBUF_BIGCL), m);
+ } else {
+ VERIFY(m->m_ext.ext_free == m_16kfree);
+ mcache_free(m_cache(MC_MBUF_16KCL), m);
+ }
+ return (n);
+ }
+ }
+
+ if (m->m_type != MT_FREE) {
+ mtype_stat_dec(m->m_type);
+ mtype_stat_inc(MT_FREE);
+ }
+
+ m->m_type = MT_FREE;
+ m->m_flags = m->m_len = 0;
+ m->m_next = m->m_nextpkt = NULL;
+
+ mcache_free(m_cache(MC_MBUF), m);
+
+ return (n);
+}
+
+__private_extern__ struct mbuf *
+m_clattach(struct mbuf *m, int type, caddr_t extbuf,
+ void (*extfree)(caddr_t, u_int, caddr_t), u_int extsize, caddr_t extarg,
+ int wait)
+{
+ struct ext_ref *rfa = NULL;
+
+ if (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)
+ return (NULL);
+
+ if (m->m_flags & M_EXT) {
+ u_int32_t refcnt;
+ u_int32_t flags;
+
+ refcnt = m_decref(m);
+ flags = MEXT_FLAGS(m);
+ if (refcnt == 0 && flags == 0) {
+ if (m->m_ext.ext_free == NULL) {
+ mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
+ } else if (m->m_ext.ext_free == m_bigfree) {
+ mcache_free(m_cache(MC_BIGCL),
+ m->m_ext.ext_buf);
+ } else if (m->m_ext.ext_free == m_16kfree) {
+ mcache_free(m_cache(MC_16KCL),
+ m->m_ext.ext_buf);
+ } else {
+ (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
+ m->m_ext.ext_size, m->m_ext.ext_arg);
+ }
+ /* Re-use the reference structure */
+ rfa = MEXT_RFA(m);
+ } else if (refcnt == 0 && (flags & EXTF_COMPOSITE)) {
+ VERIFY(m->m_type != MT_FREE);
+
+ mtype_stat_dec(m->m_type);
+ mtype_stat_inc(MT_FREE);
+
+ m->m_type = MT_FREE;
+ m->m_flags = M_EXT;
+ m->m_len = 0;
+ m->m_next = m->m_nextpkt = NULL;
+ /* "Free" into the intermediate cache */
+ if (m->m_ext.ext_free == NULL) {
+ mcache_free(m_cache(MC_MBUF_CL), m);
+ } else if (m->m_ext.ext_free == m_bigfree) {
+ mcache_free(m_cache(MC_MBUF_BIGCL), m);
+ } else {
+ VERIFY(m->m_ext.ext_free == m_16kfree);
+ mcache_free(m_cache(MC_MBUF_16KCL), m);
+ }
+ /*
+ * Allocate a new mbuf, since we didn't divorce
+ * the composite mbuf + cluster pair above.
+ */
+ if ((m = _M_GETHDR(wait, type)) == NULL)
+ return (NULL);
+ }
+ }
+
+ if (rfa == NULL &&
+ (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) {
+ m_free(m);
+ return (NULL);
+ }
+
+ MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa, 1, 0);
+
+ return (m);
+}
+
+/* m_mclget() add an mbuf cluster to a normal mbuf */
+struct mbuf *
+m_mclget(struct mbuf *m, int wait)
+{
+ struct ext_ref *rfa;
+
+ if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL)
+ return (m);
+
+ m->m_ext.ext_buf = m_mclalloc(wait);
+ if (m->m_ext.ext_buf != NULL) {
+ MBUF_CL_INIT(m, m->m_ext.ext_buf, rfa, 1, 0);
+ } else {
+ mcache_free(ref_cache, rfa);
+ }
+ return (m);
+}
+
+/* Allocate an mbuf cluster */
+caddr_t
+m_mclalloc(int wait)
+{
+ int mcflags = MSLEEPF(wait);
+
+ /* Is this due to a non-blocking retry? If so, then try harder */
+ if (mcflags & MCR_NOSLEEP)
+ mcflags |= MCR_TRYHARD;
+
+ return (mcache_alloc(m_cache(MC_CL), mcflags));
+}
+
+/* Free an mbuf cluster */
+void
+m_mclfree(caddr_t p)
+{
+ mcache_free(m_cache(MC_CL), p);
+}
+
+/*
+ * mcl_hasreference() checks if a cluster of an mbuf is referenced by
+ * another mbuf
+ */
+int
+m_mclhasreference(struct mbuf *m)
+{
+ if (!(m->m_flags & M_EXT))
+ return (0);