/*
- * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
+ * Copyright (c) 1998-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/simple_lock.h>
#include <kern/queue.h>
#include <kern/sched_prim.h>
+#include <kern/backtrace.h>
#include <kern/cpu_number.h>
#include <kern/zalloc.h>
* | |
* v |
* [freelist] ----------->>------------+
- * (objects never get purged to VM)
+ * (objects get purged to VM only on demand)
*
* b. Composite object:
*
*
* The mclaudit[] array is allocated at initialization time, but its contents
* get populated when the corresponding cluster is created. Because a page
- * can be turned into NMBPBG number of mbufs, we preserve enough space for the
+ * can be turned into NMBPG number of mbufs, we preserve enough space for the
* mbufs so that there is a 1-to-1 mapping between them. A page that never
* gets (or has not yet) turned into mbufs will use only cl_audit[0] with the
* remaining entries unused. For 16KB cluster, only one entry from the first
/* TODO: should be in header file */
/* kernel translater */
-extern vm_offset_t kmem_mb_alloc(vm_map_t, int, int);
+extern vm_offset_t kmem_mb_alloc(vm_map_t, int, int, kern_return_t *);
extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
extern vm_map_t mb_map; /* special map */
+static uint32_t mb_kmem_contig_failed;
+static uint32_t mb_kmem_failed;
+static uint32_t mb_kmem_one_failed;
+/* Timestamp of allocation failures. */
+static uint64_t mb_kmem_contig_failed_ts;
+static uint64_t mb_kmem_failed_ts;
+static uint64_t mb_kmem_one_failed_ts;
+static uint64_t mb_kmem_contig_failed_size;
+static uint64_t mb_kmem_failed_size;
+static uint32_t mb_kmem_stats[6];
+static const char *mb_kmem_stats_labels[] = { "INVALID_ARGUMENT",
+ "INVALID_ADDRESS",
+ "RESOURCE_SHORTAGE",
+ "NO_SPACE",
+ "KERN_FAILURE",
+ "OTHERS" };
+
/* Global lock */
decl_lck_mtx_data(static, mbuf_mlock_data);
static lck_mtx_t *mbuf_mlock = &mbuf_mlock_data;
static lck_grp_attr_t *mbuf_mlock_grp_attr;
/* Back-end (common) layer */
-static void *mbuf_worker_run; /* wait channel for worker thread */
+static uint64_t mb_expand_cnt;
+static uint64_t mb_expand_cl_cnt;
+static uint64_t mb_expand_cl_total;
+static uint64_t mb_expand_bigcl_cnt;
+static uint64_t mb_expand_bigcl_total;
+static uint64_t mb_expand_16kcl_cnt;
+static uint64_t mb_expand_16kcl_total;
+static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */
+static uint32_t mbuf_worker_run_cnt;
+static uint64_t mbuf_worker_last_runtime;
static int mbuf_worker_ready; /* worker thread is runnable */
-static int mbuf_expand_mcl; /* number of cluster creation requets */
-static int mbuf_expand_big; /* number of big cluster creation requests */
-static int mbuf_expand_16k; /* number of 16KB cluster creation requests */
static int ncpu; /* number of CPUs */
static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */
static ppnum_t mcl_pages; /* Size of array (# physical pages) */
* whenever a new piece of memory mapped in from the VM crosses the 1MB
* boundary.
*/
-#define NSLABSPMB ((1 << MBSHIFT) >> PGSHIFT) /* 256 slabs/grp */
+#define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT)
typedef struct mcl_slabg {
- mcl_slab_t slg_slab[NSLABSPMB]; /* group of slabs */
+ mcl_slab_t *slg_slab; /* group of slabs */
} mcl_slabg_t;
/*
* Number of slabs needed to control a 16KB cluster object.
*/
-#define NSLABSP16KB (M16KCLBYTES >> PGSHIFT)
+#define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT)
/*
* Per-cluster audit structure.
*/
typedef struct {
- mcache_audit_t *cl_audit[NMBPBG]; /* array of audits */
+ mcache_audit_t **cl_audit; /* array of audits */
} mcl_audit_t;
typedef struct {
int nclusters; /* # of clusters for non-jumbo (legacy) sizes */
int njcl; /* # of clusters for jumbo sizes */
int njclbytes; /* size of a jumbo cluster */
-union mbigcluster *mbutl; /* first mapped cluster address */
-union mbigcluster *embutl; /* ending virtual address of mclusters */
+unsigned char *mbutl; /* first mapped cluster address */
+unsigned char *embutl; /* ending virtual address of mclusters */
int _max_linkhdr; /* largest link-level header */
int _max_protohdr; /* largest protocol header */
int max_hdr; /* largest link+protocol header */
static mleak_stat_t *mleak_stat;
#define MLEAK_STAT_SIZE(n) \
- ((size_t)(&((mleak_stat_t *)0)->ml_trace[n]))
+ __builtin_offsetof(mleak_stat_t, ml_trace[n])
struct mallocation {
mcache_obj_t *element; /* the alloc'ed element, NULL if unused */
static lck_grp_t *mleak_lock_grp;
static lck_grp_attr_t *mleak_lock_grp_attr;
+/* Lock to protect the completion callback table */
+static lck_grp_attr_t *mbuf_tx_compl_tbl_lck_grp_attr = NULL;
+static lck_attr_t *mbuf_tx_compl_tbl_lck_attr = NULL;
+static lck_grp_t *mbuf_tx_compl_tbl_lck_grp = NULL;
+decl_lck_rw_data(, mbuf_tx_compl_tbl_lck_rw_data);
+lck_rw_t *mbuf_tx_compl_tbl_lock = &mbuf_tx_compl_tbl_lck_rw_data;
+
extern u_int32_t high_sb_max;
/* The minimum number of objects that are allocated, to start. */
int mtbl_maxlimit; /* maximum allowed */
u_int32_t mtbl_wantpurge; /* purge during next reclaim */
uint32_t mtbl_avgtotal; /* average total on iOS */
+ u_int32_t mtbl_expand; /* worker should expand the class */
} mbuf_table_t;
#define m_class(c) mbuf_table[c].mtbl_class
#define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal
#define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported
#define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt
+#define m_region_expand(c) mbuf_table[c].mtbl_expand
static mbuf_table_t mbuf_table[] = {
/*
* usage patterns on iOS.
*/
{ MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)),
- NULL, NULL, 0, 0, 0, 0, 3000 },
+ NULL, NULL, 0, 0, 0, 0, 3000, 0 },
{ MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)),
- NULL, NULL, 0, 0, 0, 0, 2000 },
+ NULL, NULL, 0, 0, 0, 0, 2000, 0 },
{ MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)),
- NULL, NULL, 0, 0, 0, 0, 1000 },
+ NULL, NULL, 0, 0, 0, 0, 1000, 0 },
{ MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)),
- NULL, NULL, 0, 0, 0, 0, 1000 },
+ NULL, NULL, 0, 0, 0, 0, 200, 0 },
/*
* The following are special caches; they serve as intermediate
* caches backed by the above rudimentary caches. Each object
* deal with the slab structures; instead, the constructed
* cached elements are simply stored in the freelists.
*/
- { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000 },
- { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 },
- { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 },
+ { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000, 0 },
+ { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000, 0 },
+ { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 200, 0 },
};
#define NELEM(a) (sizeof (a) / sizeof ((a)[0]))
static struct timeval mb_wdtstart; /* watchdog start timestamp */
static char *mbuf_dump_buf;
-#define MBUF_DUMP_BUF_SIZE 2048
+#define MBUF_DUMP_BUF_SIZE 3072
/*
* mbuf watchdog is enabled by default on embedded platforms. It is
* mb_drain_maxint controls the amount of time to wait (in seconds) before
* consecutive calls to m_drain().
*/
+#if CONFIG_EMBEDDED
+static unsigned int mb_watchdog = 1;
+static unsigned int mb_drain_maxint = 60;
+#else
static unsigned int mb_watchdog = 0;
static unsigned int mb_drain_maxint = 0;
+#endif /* CONFIG_EMBEDDED */
+
+uintptr_t mb_obscure_extfree __attribute__((visibility("hidden")));
+uintptr_t mb_obscure_extref __attribute__((visibility("hidden")));
/* Red zone */
static u_int32_t mb_redzone_cookie;
static char *mbuf_dump(void);
static void mbuf_table_init(void);
static inline void m_incref(struct mbuf *);
-static inline u_int32_t m_decref(struct mbuf *);
+static inline u_int16_t m_decref(struct mbuf *);
static int m_clalloc(const u_int32_t, const int, const u_int32_t);
static void mbuf_worker_thread_init(void);
static mcache_obj_t *slab_alloc(mbuf_class_t, int);
* cleared.
*/
#define EXTF_READONLY 0x2
-#define EXTF_MASK (EXTF_COMPOSITE | EXTF_READONLY)
-
-#define MEXT_RFA(m) ((m)->m_ext.ext_refflags)
-#define MEXT_REF(m) (MEXT_RFA(m)->refcnt)
-#define MEXT_FLAGS(m) (MEXT_RFA(m)->flags)
-#define MBUF_IS_COMPOSITE(m) \
- (MEXT_REF(m) == 0 && (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE)
+/*
+ * This flag indicates that the external cluster is paired with the mbuf.
+ * Pairing implies an external free routine defined which will be invoked
+ * when the reference count drops to the minimum at m_free time. This
+ * flag is never cleared.
+ */
+#define EXTF_PAIRED 0x4
+
+#define EXTF_MASK \
+ (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED)
+
+#define MEXT_MINREF(m) ((m_get_rfa(m))->minref)
+#define MEXT_REF(m) ((m_get_rfa(m))->refcnt)
+#define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt)
+#define MEXT_FLAGS(m) ((m_get_rfa(m))->flags)
+#define MEXT_PRIV(m) ((m_get_rfa(m))->priv)
+#define MEXT_PMBUF(m) ((m_get_rfa(m))->paired)
+#define MEXT_TOKEN(m) ((m_get_rfa(m))->ext_token)
+#define MBUF_IS_COMPOSITE(m) \
+ (MEXT_REF(m) == MEXT_MINREF(m) && \
+ (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE)
+/*
+ * This macro can be used to test if the mbuf is paired to an external
+ * cluster. The test for MEXT_PMBUF being equal to the mbuf in subject
+ * is important, as EXTF_PAIRED alone is insufficient since it is immutable,
+ * and thus survives calls to m_free_paired.
+ */
+#define MBUF_IS_PAIRED(m) \
+ (((m)->m_flags & M_EXT) && \
+ (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \
+ MEXT_PMBUF(m) == (m))
/*
* Macros used to verify the integrity of the mbuf.
*/
#define _MCHECK(m) { \
- if ((m)->m_type != MT_FREE) { \
+ if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \
if (mclaudit == NULL) \
panic("MCHECK: m_type=%d m=%p", \
(u_int16_t)(m)->m_type, m); \
}
#define MBUF_IN_MAP(addr) \
- ((void *)(addr) >= (void *)mbutl && (void *)(addr) < (void *)embutl)
+ ((unsigned char *)(addr) >= mbutl && \
+ (unsigned char *)(addr) < embutl)
#define MRANGE(addr) { \
if (!MBUF_IN_MAP(addr)) \
#define MTOD(m, t) ((t)((m)->m_data))
/*
- * Macros to obtain (4KB) cluster index and base cluster address.
+ * Macros to obtain page index given a base cluster address
*/
-
-#define MTOBG(x) (((char *)(x) - (char *)mbutl) >> MBIGCLSHIFT)
-#define BGTOM(x) ((union mbigcluster *)(mbutl + (x)))
+#define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT)
+#define PGTOM(x) (mbutl + (x << PAGE_SHIFT))
/*
* Macro to find the mbuf index relative to a base.
*/
-#define MCLIDX(c, m) (((char *)(m) - (char *)(c)) >> MSIZESHIFT)
+#define MBPAGEIDX(c, m) \
+ (((unsigned char *)(m) - (unsigned char *)(c)) >> MSIZESHIFT)
/*
* Same thing for 2KB cluster index.
*/
-#define CLBGIDX(c, m) (((char *)(m) - (char *)(c)) >> MCLSHIFT)
+#define CLPAGEIDX(c, m) \
+ (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT)
+
+/*
+ * Macro to find 4KB cluster index relative to a base
+ */
+#define BCLPAGEIDX(c, m) \
+ (((unsigned char *)(m) - (unsigned char *)(c)) >> MBIGCLSHIFT)
/*
* Macros used during mbuf and cluster initialization.
} \
}
-#define MEXT_INIT(m, buf, size, free, arg, rfa, ref, flag) { \
+#define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \
+ priv, pm) { \
(m)->m_data = (m)->m_ext.ext_buf = (buf); \
(m)->m_flags |= M_EXT; \
+ m_set_ext((m), (rfa), (free), (arg)); \
(m)->m_ext.ext_size = (size); \
- (m)->m_ext.ext_free = (free); \
- (m)->m_ext.ext_arg = (arg); \
- (m)->m_ext.ext_refs.forward = (m)->m_ext.ext_refs.backward = \
- &(m)->m_ext.ext_refs; \
- MEXT_RFA(m) = (rfa); \
+ MEXT_MINREF(m) = (min); \
MEXT_REF(m) = (ref); \
+ MEXT_PREF(m) = (pref); \
MEXT_FLAGS(m) = (flag); \
+ MEXT_PRIV(m) = (priv); \
+ MEXT_PMBUF(m) = (pm); \
}
#define MBUF_CL_INIT(m, buf, rfa, ref, flag) \
- MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, ref, flag)
+ MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \
+ ref, 0, flag, 0, NULL)
#define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \
- MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, ref, flag)
+ MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \
+ ref, 0, flag, 0, NULL)
#define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \
- MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, ref, flag)
+ MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \
+ ref, 0, flag, 0, NULL)
/*
* Macro to convert BSD malloc sleep flag to mcache's
struct omb_stat *omb_stat; /* For backwards compatibility */
#define MB_STAT_SIZE(n) \
- ((size_t)(&((mb_stat_t *)0)->mbs_class[n]))
+ __builtin_offsetof(mb_stat_t, mbs_class[n])
#define OMB_STAT_SIZE(n) \
((size_t)(&((struct omb_stat *)0)->mbs_class[n]))
mtypes_cpu_t mtc;
if (locked)
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
bzero(&mtc, sizeof (mtc));
for (m = 0; m < ncpu; m++) {
mcache_t *cp;
int k, m, bktsize;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
for (k = 0; k < NELEM(mbuf_table); k++) {
cp = m_cache(k);
static inline void
m_incref(struct mbuf *m)
{
- UInt32 old, new;
- volatile UInt32 *addr = (volatile UInt32 *)&MEXT_REF(m);
+ UInt16 old, new;
+ volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m);
do {
old = *addr;
new = old + 1;
ASSERT(new != 0);
- } while (!OSCompareAndSwap(old, new, addr));
+ } while (!OSCompareAndSwap16(old, new, addr));
/*
* If cluster is shared, mark it with (sticky) EXTF_READONLY;
- * we don't clear the flag when the refcount goes back to 1
- * to simplify code calling m_mclhasreference().
+ * we don't clear the flag when the refcount goes back to the
+ * minimum, to simplify code calling m_mclhasreference().
*/
- if (new > 1 && !(MEXT_FLAGS(m) & EXTF_READONLY))
- (void) OSBitOrAtomic(EXTF_READONLY, &MEXT_FLAGS(m));
+ if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY))
+ (void) OSBitOrAtomic16(EXTF_READONLY, &MEXT_FLAGS(m));
}
-static inline u_int32_t
+static inline u_int16_t
m_decref(struct mbuf *m)
{
- UInt32 old, new;
- volatile UInt32 *addr = (volatile UInt32 *)&MEXT_REF(m);
+ UInt16 old, new;
+ volatile UInt16 *addr = (volatile UInt16 *)&MEXT_REF(m);
do {
old = *addr;
new = old - 1;
ASSERT(old != 0);
- } while (!OSCompareAndSwap(old, new, addr));
+ } while (!OSCompareAndSwap16(old, new, addr));
return (new);
}
mbuf_table_init(void)
{
unsigned int b, c, s;
- int m;
+ int m, config_mbuf_jumbo = 0;
MALLOC(omb_stat, struct omb_stat *, OMB_STAT_SIZE(NELEM(mbuf_table)),
M_TEMP, M_WAITOK | M_ZERO);
mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m];
#if CONFIG_MBUF_JUMBO
- /*
- * Set aside 1/3 of the mbuf cluster map for jumbo clusters; we do
- * this only on platforms where jumbo cluster pool is enabled.
- */
- njcl = nmbclusters / 3;
- njclbytes = M16KCLBYTES;
+ config_mbuf_jumbo = 1;
#endif /* CONFIG_MBUF_JUMBO */
+ if (config_mbuf_jumbo == 1 || PAGE_SIZE == M16KCLBYTES) {
+ /*
+ * Set aside 1/3 of the mbuf cluster map for jumbo
+ * clusters; we do this only on platforms where jumbo
+ * cluster pool is enabled.
+ */
+ njcl = nmbclusters / 3;
+ njclbytes = M16KCLBYTES;
+ }
+
/*
* nclusters holds both the 2KB and 4KB pools, so ensure it's
* a multiple of 4KB clusters.
*/
- nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPBG);
+ nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
if (njcl > 0) {
/*
* Each jumbo cluster takes 8 2KB clusters, so make
* sure that the pool size is evenly divisible by 8;
* njcl is in 2KB unit, hence treated as such.
*/
- njcl = P2ROUNDDOWN(nmbclusters - nclusters, 8);
+ njcl = P2ROUNDDOWN(nmbclusters - nclusters, NCLPJCL);
/* Update nclusters with rounded down value of njcl */
- nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPBG);
+ nclusters = P2ROUNDDOWN(nmbclusters - njcl, NCLPG);
}
/*
- * njcl is valid only on platforms with 16KB jumbo clusters, where
- * it is configured to 1/3 of the pool size. On these platforms,
- * the remaining is used for 2KB and 4KB clusters. On platforms
- * without 16KB jumbo clusters, the entire pool is used for both
- * 2KB and 4KB clusters. A 4KB cluster can either be splitted into
- * 16 mbufs, or into 2 2KB clusters.
+ * njcl is valid only on platforms with 16KB jumbo clusters or
+ * with 16KB pages, where it is configured to 1/3 of the pool
+ * size. On these platforms, the remaining is used for 2KB
+ * and 4KB clusters. On platforms without 16KB jumbo clusters,
+ * the entire pool is used for both 2KB and 4KB clusters. A 4KB
+ * cluster can either be splitted into 16 mbufs, or into 2 2KB
+ * clusters.
*
* +---+---+------------ ... -----------+------- ... -------+
* | c | b | s | njcl |
* 1/32th of the shared region is reserved for pure 2KB and 4KB
* clusters (1/64th each.)
*/
- c = P2ROUNDDOWN((nclusters >> 6), 2); /* in 2KB unit */
- b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), 2); /* in 4KB unit */
+ c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */
+ b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */
s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */
/*
_CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6);
_CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL);
_CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16);
+ _CASSERT(MBUF_CSUM_REQ_ZERO_INVERT == CSUM_ZERO_INVERT);
_CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP);
_CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP);
_CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP);
_CASSERT(sizeof (mb_redzone_cookie) ==
sizeof (((struct pkthdr *)0)->redzone));
read_random(&mb_redzone_cookie, sizeof (mb_redzone_cookie));
+ read_random(&mb_obscure_extref, sizeof (mb_obscure_extref));
+ read_random(&mb_obscure_extfree, sizeof (mb_obscure_extfree));
+ mb_obscure_extref |= 0x3;
+ mb_obscure_extfree |= 0x3;
/* Make sure we don't save more than we should */
_CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof (struct mbuf));
* mcl_slab_g_t units, each one representing a MB of memory.
*/
maxslabgrp =
- (P2ROUNDUP(nmbclusters, (MBSIZE >> 11)) << MCLSHIFT) >> MBSHIFT;
+ (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT;
MALLOC(slabstbl, mcl_slabg_t **, maxslabgrp * sizeof (mcl_slabg_t *),
M_TEMP, M_WAITOK | M_ZERO);
VERIFY(slabstbl != NULL);
/*
* Allocate audit structures, if needed:
*
- * maxclaudit = (maxslabgrp * 1024 * 1024) / 4096
+ * maxclaudit = (maxslabgrp * 1024 * 1024) / PAGE_SIZE
*
* This yields mcl_audit_t units, each one representing a page.
*/
PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof (mbuf_debug));
mbuf_debug |= mcache_getflags();
if (mbuf_debug & MCF_DEBUG) {
- maxclaudit = ((maxslabgrp << MBSHIFT) >> PGSHIFT);
+ int l;
+ mcl_audit_t *mclad;
+ maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT);
MALLOC(mclaudit, mcl_audit_t *, maxclaudit * sizeof (*mclaudit),
M_TEMP, M_WAITOK | M_ZERO);
VERIFY(mclaudit != NULL);
+ for (l = 0, mclad = mclaudit; l < maxclaudit; l++) {
+ MALLOC(mclad[l].cl_audit, mcache_audit_t **,
+ NMBPG * sizeof(mcache_audit_t *),
+ M_TEMP, M_WAITOK | M_ZERO);
+ VERIFY(mclad[l].cl_audit != NULL);
+ }
mcl_audit_con_cache = mcache_create("mcl_audit_contents",
AUDIT_CONTENTS_SIZE, sizeof (u_int64_t), 0, MCR_SLEEP);
mleak_activate();
+ /*
+ * Allocate structure for per-CPU statistics that's aligned
+ * on the CPU cache boundary; this code assumes that we never
+ * uninitialize this framework, since the original address
+ * before alignment is not saved.
+ */
+ ncpu = ml_get_max_cpus();
+ MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE,
+ M_TEMP, M_WAITOK);
+ VERIFY(buf != NULL);
+
+ mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf,
+ CPU_CACHE_LINE_SIZE);
+ bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu));
+
/* Calculate the number of pages assigned to the cluster pool */
- mcl_pages = (nmbclusters * MCLBYTES) / CLBYTES;
+ mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE;
MALLOC(mcl_paddr, ppnum_t *, mcl_pages * sizeof (ppnum_t),
M_TEMP, M_WAITOK);
VERIFY(mcl_paddr != NULL);
mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
bzero((char *)mcl_paddr, mcl_pages * sizeof (ppnum_t));
- embutl = (union mbigcluster *)
- ((void *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES)));
- VERIFY((((char *)embutl - (char *)mbutl) % MBIGCLBYTES) == 0);
+ embutl = (mbutl + (nmbclusters * MCLBYTES));
+ VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0);
/* Prime up the freelist */
PE_parse_boot_argn("initmcl", &initmcl, sizeof (initmcl));
(void *)(uintptr_t)m, flags, MCR_SLEEP);
}
- /*
- * Allocate structure for per-CPU statistics that's aligned
- * on the CPU cache boundary; this code assumes that we never
- * uninitialize this framework, since the original address
- * before alignment is not saved.
- */
- ncpu = ml_get_max_cpus();
- MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE,
- M_TEMP, M_WAITOK);
- VERIFY(buf != NULL);
-
- mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf,
- CPU_CACHE_LINE_SIZE);
- bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu));
-
/*
* Set the max limit on sb_max to be 1/16 th of the size of
* memory allocated for mbuf clusters.
(nmbclusters << MCLSHIFT) >> MBSHIFT,
(nclusters << MCLSHIFT) >> MBSHIFT,
(njcl << MCLSHIFT) >> MBSHIFT);
+
+ /* initialize lock form tx completion callback table */
+ mbuf_tx_compl_tbl_lck_grp_attr = lck_grp_attr_alloc_init();
+ if (mbuf_tx_compl_tbl_lck_grp_attr == NULL) {
+ panic("%s: lck_grp_attr_alloc_init failed", __func__);
+ /* NOTREACHED */
+ }
+ mbuf_tx_compl_tbl_lck_grp = lck_grp_alloc_init("mbuf_tx_compl_tbl",
+ mbuf_tx_compl_tbl_lck_grp_attr);
+ if (mbuf_tx_compl_tbl_lck_grp == NULL) {
+ panic("%s: lck_grp_alloc_init failed", __func__);
+ /* NOTREACHED */
+ }
+ mbuf_tx_compl_tbl_lck_attr = lck_attr_alloc_init();
+ if (mbuf_tx_compl_tbl_lck_attr == NULL) {
+ panic("%s: lck_attr_alloc_init failed", __func__);
+ /* NOTREACHED */
+ }
+ lck_rw_init(mbuf_tx_compl_tbl_lock, mbuf_tx_compl_tbl_lck_grp,
+ mbuf_tx_compl_tbl_lck_attr);
+
}
/*
mcl_slab_t *sp;
mcache_obj_t *buf;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
-
- VERIFY(class != MC_16KCL || njcl > 0);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
/* This should always be NULL for us */
VERIFY(m_cobjlist(class) == NULL);
* more than one buffer chunks (e.g. mbuf slabs). For other
* slabs, this probably doesn't make much of a difference.
*/
- if ((class == MC_MBUF || class == MC_CL) && (wait & MCR_COMP))
+ if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL)
+ && (wait & MCR_COMP))
sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead);
else
sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class));
(sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
buf = sp->sl_head;
VERIFY(slab_inrange(sp, buf) && sp == slab_get(buf));
+ sp->sl_head = buf->obj_next;
+ /* Increment slab reference */
+ sp->sl_refcnt++;
+
+ VERIFY(sp->sl_head != NULL || sp->sl_refcnt == sp->sl_chunks);
- if (class == MC_MBUF) {
- sp->sl_head = buf->obj_next;
- VERIFY(sp->sl_head != NULL || sp->sl_refcnt == (NMBPBG - 1));
- } else if (class == MC_CL) {
- sp->sl_head = buf->obj_next;
- VERIFY(sp->sl_head != NULL || sp->sl_refcnt == (NCLPBG - 1));
- } else {
- sp->sl_head = NULL;
- }
if (sp->sl_head != NULL && !slab_inrange(sp, sp->sl_head)) {
slab_nextptr_panic(sp, sp->sl_head);
/* In case sl_head is in the map but not in the slab */
/* NOTREACHED */
}
- /* Increment slab reference */
- sp->sl_refcnt++;
-
if (mclaudit != NULL) {
mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
mca->mca_uflags = 0;
if (class == MC_CL) {
mbstat.m_clfree = (--m_infree(MC_CL)) + m_infree(MC_MBUF_CL);
/*
- * A 2K cluster slab can have at most NCLPBG references.
+ * A 2K cluster slab can have at most NCLPG references.
*/
- VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPBG &&
- sp->sl_chunks == NCLPBG &&
- sp->sl_len == m_maxsize(MC_BIGCL));
- VERIFY(sp->sl_refcnt < NCLPBG || sp->sl_head == NULL);
+ VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NCLPG &&
+ sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
+ VERIFY(sp->sl_refcnt < NCLPG || sp->sl_head == NULL);
} else if (class == MC_BIGCL) {
mbstat.m_bigclfree = (--m_infree(MC_BIGCL)) +
m_infree(MC_MBUF_BIGCL);
/*
- * A 4K cluster slab can have at most 1 reference.
+ * A 4K cluster slab can have NBCLPG references.
*/
- VERIFY(sp->sl_refcnt == 1 && sp->sl_chunks == 1 &&
- sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
+ VERIFY(sp->sl_refcnt >= 1 && sp->sl_chunks == NBCLPG &&
+ sp->sl_len == PAGE_SIZE &&
+ (sp->sl_refcnt < NBCLPG || sp->sl_head == NULL));
} else if (class == MC_16KCL) {
mcl_slab_t *nsp;
int k;
* Since we have incremented the reference count above,
* an mbuf slab (formerly a 4KB cluster slab that was cut
* up into mbufs) must have a reference count between 1
- * and NMBPBG at this point.
+ * and NMBPG at this point.
*/
- VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPBG &&
- sp->sl_chunks == NMBPBG &&
- sp->sl_len == m_maxsize(MC_BIGCL));
- VERIFY(sp->sl_refcnt < NMBPBG || sp->sl_head == NULL);
+ VERIFY(sp->sl_refcnt >= 1 && sp->sl_refcnt <= NMBPG &&
+ sp->sl_chunks == NMBPG &&
+ sp->sl_len == PAGE_SIZE);
+ VERIFY(sp->sl_refcnt < NMBPG || sp->sl_head == NULL);
}
/* If empty, remove this slab from the class's freelist */
if (sp->sl_head == NULL) {
- VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPBG);
- VERIFY(class != MC_CL || sp->sl_refcnt == NCLPBG);
+ VERIFY(class != MC_MBUF || sp->sl_refcnt == NMBPG);
+ VERIFY(class != MC_CL || sp->sl_refcnt == NCLPG);
+ VERIFY(class != MC_BIGCL || sp->sl_refcnt == NBCLPG);
slab_remove(sp, class);
}
slab_free(mbuf_class_t class, mcache_obj_t *buf)
{
mcl_slab_t *sp;
+ boolean_t reinit_supercl = false;
+ mbuf_class_t super_class;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
VERIFY(class != MC_16KCL || njcl > 0);
VERIFY(buf->obj_next == NULL);
+
+ /*
+ * Synchronizing with m_clalloc, as it reads m_total, while we here
+ * are modifying m_total.
+ */
+ while (mb_clalloc_busy) {
+ mb_clalloc_waiters++;
+ (void) msleep(mb_clalloc_waitchan, mbuf_mlock,
+ (PZERO-1), "m_clalloc", NULL);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ }
+
+ /* We are busy now; tell everyone else to go away */
+ mb_clalloc_busy = TRUE;
+
sp = slab_get(buf);
VERIFY(sp->sl_class == class && slab_inrange(sp, buf) &&
(sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
* A slab that has been splitted for 2KB clusters can have
* at most 1 outstanding reference at this point.
*/
- VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPBG - 1) &&
- sp->sl_chunks == NCLPBG &&
- sp->sl_len == m_maxsize(MC_BIGCL));
- VERIFY(sp->sl_refcnt < (NCLPBG - 1) ||
+ VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NCLPG - 1) &&
+ sp->sl_chunks == NCLPG && sp->sl_len == PAGE_SIZE);
+ VERIFY(sp->sl_refcnt < (NCLPG - 1) ||
(slab_is_detached(sp) && sp->sl_head == NULL));
} else if (class == MC_BIGCL) {
- VERIFY(IS_P2ALIGNED(buf, MCLBYTES));
- /*
- * A 4KB cluster slab can have at most 1 reference
- * which must be 0 at this point.
- */
- VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
- sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
- VERIFY(slab_is_detached(sp));
+ VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES));
+
+ /* A 4KB cluster slab can have NBCLPG references at most */
+ VERIFY(sp->sl_refcnt >= 0 && sp->sl_chunks == NBCLPG);
+ VERIFY(sp->sl_refcnt < (NBCLPG - 1) ||
+ (slab_is_detached(sp) && sp->sl_head == NULL));
} else if (class == MC_16KCL) {
mcl_slab_t *nsp;
int k;
* A 16KB cluster takes NSLABSP16KB slabs, all must
* now have 0 reference.
*/
- VERIFY(IS_P2ALIGNED(buf, MBIGCLBYTES));
+ VERIFY(IS_P2ALIGNED(buf, PAGE_SIZE));
VERIFY(sp->sl_refcnt == 0 && sp->sl_chunks == 1 &&
sp->sl_len == m_maxsize(class) && sp->sl_head == NULL);
VERIFY(slab_is_detached(sp));
}
} else {
/*
- * A slab that has been splitted for mbufs has at most NMBPBG
- * reference counts. Since we have decremented one reference
- * above, it must now be between 0 and NMBPBG-1.
+ * A slab that has been splitted for mbufs has at most
+ * NMBPG reference counts. Since we have decremented
+ * one reference above, it must now be between 0 and
+ * NMBPG-1.
*/
VERIFY(class == MC_MBUF);
- VERIFY(sp->sl_refcnt >= 0 && sp->sl_refcnt <= (NMBPBG - 1) &&
- sp->sl_chunks == NMBPBG &&
- sp->sl_len == m_maxsize(MC_BIGCL));
- VERIFY(sp->sl_refcnt < (NMBPBG - 1) ||
+ VERIFY(sp->sl_refcnt >= 0 &&
+ sp->sl_refcnt <= (NMBPG - 1) &&
+ sp->sl_chunks == NMBPG &&
+ sp->sl_len == PAGE_SIZE);
+ VERIFY(sp->sl_refcnt < (NMBPG - 1) ||
(slab_is_detached(sp) && sp->sl_head == NULL));
}
if (mclaudit != NULL) {
mcache_audit_t *mca = mcl_audit_buf2mca(class, buf);
if (mclverify) {
- mcache_audit_free_verify(mca, buf, 0, m_maxsize(class));
+ mcache_audit_free_verify(mca, buf, 0,
+ m_maxsize(class));
}
mca->mca_uflags &= ~MB_SCVALID;
}
} else if (class == MC_BIGCL) {
mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
m_infree(MC_MBUF_BIGCL);
+ buf->obj_next = sp->sl_head;
} else if (class == MC_16KCL) {
++m_infree(MC_16KCL);
} else {
sp->sl_head = buf;
/*
- * If a slab has been splitted to either one which holds 2KB clusters,
- * or one which holds mbufs, turn it back to one which holds a 4KB
- * cluster.
+ * If a slab has been split to either one which holds 2KB clusters,
+ * or one which holds mbufs, turn it back to one which holds a
+ * 4 or 16 KB cluster depending on the page size.
*/
+ if (m_maxsize(MC_BIGCL) == PAGE_SIZE) {
+ super_class = MC_BIGCL;
+ } else {
+ VERIFY(PAGE_SIZE == m_maxsize(MC_16KCL));
+ super_class = MC_16KCL;
+ }
if (class == MC_MBUF && sp->sl_refcnt == 0 &&
- m_total(class) > m_minlimit(class) &&
- m_total(MC_BIGCL) < m_maxlimit(MC_BIGCL)) {
- int i = NMBPBG;
+ m_total(class) >= (m_minlimit(class) + NMBPG) &&
+ m_total(super_class) < m_maxlimit(super_class)) {
+ int i = NMBPG;
- m_total(MC_BIGCL)++;
- mbstat.m_bigclusters = m_total(MC_BIGCL);
- m_total(MC_MBUF) -= NMBPBG;
+ m_total(MC_MBUF) -= NMBPG;
mbstat.m_mbufs = m_total(MC_MBUF);
- m_infree(MC_MBUF) -= NMBPBG;
- mtype_stat_add(MT_FREE, -((unsigned)NMBPBG));
-
- VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
- VERIFY(m_total(MC_MBUF) >= m_minlimit(MC_MBUF));
+ m_infree(MC_MBUF) -= NMBPG;
+ mtype_stat_add(MT_FREE, -((unsigned)NMBPG));
while (i--) {
struct mbuf *m = sp->sl_head;
sp->sl_head = m->m_next;
m->m_next = NULL;
}
- VERIFY(sp->sl_head == NULL);
-
- /* Remove the slab from the mbuf class's slab list */
- slab_remove(sp, class);
-
- /* Reinitialize it as a 4KB cluster slab */
- slab_init(sp, MC_BIGCL, sp->sl_flags, sp->sl_base, sp->sl_base,
- sp->sl_len, 0, 1);
-
- if (mclverify) {
- mcache_set_pattern(MCACHE_FREE_PATTERN,
- (caddr_t)sp->sl_head, m_maxsize(MC_BIGCL));
- }
- mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
- m_infree(MC_MBUF_BIGCL);
-
- VERIFY(slab_is_detached(sp));
- /* And finally switch class */
- class = MC_BIGCL;
+ reinit_supercl = true;
} else if (class == MC_CL && sp->sl_refcnt == 0 &&
- m_total(class) > m_minlimit(class) &&
- m_total(MC_BIGCL) < m_maxlimit(MC_BIGCL)) {
- int i = NCLPBG;
+ m_total(class) >= (m_minlimit(class) + NCLPG) &&
+ m_total(super_class) < m_maxlimit(super_class)) {
+ int i = NCLPG;
- m_total(MC_BIGCL)++;
- mbstat.m_bigclusters = m_total(MC_BIGCL);
- m_total(MC_CL) -= NCLPBG;
+ m_total(MC_CL) -= NCLPG;
mbstat.m_clusters = m_total(MC_CL);
- m_infree(MC_CL) -= NCLPBG;
- VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
- VERIFY(m_total(MC_CL) >= m_minlimit(MC_CL));
+ m_infree(MC_CL) -= NCLPG;
while (i--) {
union mcluster *c = sp->sl_head;
sp->sl_head = c->mcl_next;
c->mcl_next = NULL;
}
- VERIFY(sp->sl_head == NULL);
+ reinit_supercl = true;
+ } else if (class == MC_BIGCL && super_class != MC_BIGCL &&
+ sp->sl_refcnt == 0 &&
+ m_total(class) >= (m_minlimit(class) + NBCLPG) &&
+ m_total(super_class) < m_maxlimit(super_class)) {
+ int i = NBCLPG;
+
+ VERIFY(super_class == MC_16KCL);
+ m_total(MC_BIGCL) -= NBCLPG;
+ mbstat.m_bigclusters = m_total(MC_BIGCL);
+ m_infree(MC_BIGCL) -= NBCLPG;
- /* Remove the slab from the 2KB cluster class's slab list */
+ while (i--) {
+ union mbigcluster *bc = sp->sl_head;
+ VERIFY(bc != NULL);
+ sp->sl_head = bc->mbc_next;
+ bc->mbc_next = NULL;
+ }
+ reinit_supercl = true;
+ }
+
+ if (reinit_supercl) {
+ VERIFY(sp->sl_head == NULL);
+ VERIFY(m_total(class) >= m_minlimit(class));
slab_remove(sp, class);
- /* Reinitialize it as a 4KB cluster slab */
- slab_init(sp, MC_BIGCL, sp->sl_flags, sp->sl_base, sp->sl_base,
- sp->sl_len, 0, 1);
+ /* Reinitialize it as a cluster for the super class */
+ m_total(super_class)++;
+ m_infree(super_class)++;
+ VERIFY(sp->sl_flags == (SLF_MAPPED | SLF_DETACHED) &&
+ sp->sl_len == PAGE_SIZE && sp->sl_refcnt == 0);
- if (mclverify) {
+ slab_init(sp, super_class, SLF_MAPPED, sp->sl_base,
+ sp->sl_base, PAGE_SIZE, 0, 1);
+ if (mclverify)
mcache_set_pattern(MCACHE_FREE_PATTERN,
- (caddr_t)sp->sl_head, m_maxsize(MC_BIGCL));
+ (caddr_t)sp->sl_base, sp->sl_len);
+ ((mcache_obj_t *)(sp->sl_base))->obj_next = NULL;
+
+ if (super_class == MC_BIGCL) {
+ mbstat.m_bigclusters = m_total(MC_BIGCL);
+ mbstat.m_bigclfree = m_infree(MC_BIGCL) +
+ m_infree(MC_MBUF_BIGCL);
}
- mbstat.m_bigclfree = (++m_infree(MC_BIGCL)) +
- m_infree(MC_MBUF_BIGCL);
VERIFY(slab_is_detached(sp));
+ VERIFY(m_total(super_class) <= m_maxlimit(super_class));
+
/* And finally switch class */
- class = MC_BIGCL;
+ class = super_class;
}
/* Reinsert the slab to the class's slab list */
if (slab_is_detached(sp))
slab_insert(sp, class);
+
+ /* We're done; let others enter */
+ mb_clalloc_busy = FALSE;
+ if (mb_clalloc_waiters > 0) {
+ mb_clalloc_waiters = 0;
+ wakeup(mb_clalloc_waitchan);
+ }
}
/*
* it later when we run out of elements.
*/
if (!mbuf_cached_above(class, wait) &&
- m_infree(class) < m_total(class) >> 5) {
+ m_infree(class) < (m_total(class) >> 5)) {
(void) freelist_populate(class, 1,
M_DONTWAIT);
}
mbuf_sleep(class, need, wait))
break;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
}
}
VERIFY(need > 0);
VERIFY(class != MC_MBUF_16KCL || njcl > 0);
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
/* Get what we can from the freelist */
while ((*list = m_cobjlist(class)) != NULL) {
cl = m->m_ext.ext_buf;
clsp = slab_get(cl);
VERIFY(m->m_flags == M_EXT && cl != NULL);
- VERIFY(MEXT_RFA(m) != NULL && MBUF_IS_COMPOSITE(m));
+ VERIFY(m_get_rfa(m) != NULL && MBUF_IS_COMPOSITE(m));
if (class == MC_MBUF_CL) {
VERIFY(clsp->sl_refcnt >= 1 &&
- clsp->sl_refcnt <= NCLPBG);
+ clsp->sl_refcnt <= NCLPG);
} else {
- VERIFY(clsp->sl_refcnt == 1);
+ VERIFY(clsp->sl_refcnt >= 1 &&
+ clsp->sl_refcnt <= NBCLPG);
}
if (class == MC_MBUF_16KCL) {
ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
VERIFY(class != MC_MBUF_16KCL || njcl > 0);
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
if (class == MC_MBUF_CL) {
cl_class = MC_CL;
}
VERIFY(ms->m_type == MT_FREE);
VERIFY(ms->m_flags == M_EXT);
- VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms));
+ VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
if (cl_class == MC_CL) {
VERIFY(clsp->sl_refcnt >= 1 &&
- clsp->sl_refcnt <= NCLPBG);
+ clsp->sl_refcnt <= NCLPG);
} else {
- VERIFY(clsp->sl_refcnt == 1);
+ VERIFY(clsp->sl_refcnt >= 1 &&
+ clsp->sl_refcnt <= NBCLPG);
}
if (cl_class == MC_16KCL) {
int k;
if (mclaudit != NULL)
mcl_audit_restore_mbuf(m, mca, TRUE);
+ MEXT_MINREF(m) = 0;
MEXT_REF(m) = 0;
+ MEXT_PREF(m) = 0;
MEXT_FLAGS(m) = 0;
+ MEXT_PRIV(m) = 0;
+ MEXT_PMBUF(m) = NULL;
+ MEXT_TOKEN(m) = 0;
- rfa = (mcache_obj_t *)(void *)MEXT_RFA(m);
+ rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
+ m_set_ext(m, NULL, NULL, NULL);
rfa->obj_next = ref_list;
ref_list = rfa;
- MEXT_RFA(m) = NULL;
m->m_type = MT_FREE;
m->m_flags = m->m_len = 0;
lck_mtx_lock(mbuf_mlock);
mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m);
ms = MCA_SAVED_MBUF_PTR(mca);
- cl_mca = mcl_audit_buf2mca(MC_CL, (mcache_obj_t *)cl);
+ cl_mca = mcl_audit_buf2mca(cl_class,
+ (mcache_obj_t *)cl);
/*
* Pair them up. Note that this is done at the time
MBUF_CL_INIT(ms, cl, rfa, 0, EXTF_COMPOSITE);
}
VERIFY(ms->m_flags == M_EXT);
- VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms));
+ VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
*list = (mcache_obj_t *)m;
(*list)->obj_next = NULL;
static void
mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
{
- mbuf_class_t class = (mbuf_class_t)arg;
+ mbuf_class_t class = (mbuf_class_t)arg, cl_class;
mcache_audit_t *mca;
struct mbuf *m, *ms;
mcl_slab_t *clsp, *nsp;
- size_t size;
+ size_t cl_size;
void *cl;
ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class));
+ if (class == MC_MBUF_CL)
+ cl_class = MC_CL;
+ else if (class == MC_MBUF_BIGCL)
+ cl_class = MC_BIGCL;
+ else
+ cl_class = MC_16KCL;
+ cl_size = m_maxsize(cl_class);
while ((m = ms = (struct mbuf *)list) != NULL) {
lck_mtx_lock(mbuf_mlock);
cl = ms->m_ext.ext_buf;
clsp = slab_get(cl);
VERIFY(ms->m_flags == M_EXT && cl != NULL);
- VERIFY(MEXT_RFA(ms) != NULL && MBUF_IS_COMPOSITE(ms));
+ VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms));
if (class == MC_MBUF_CL)
VERIFY(clsp->sl_refcnt >= 1 &&
- clsp->sl_refcnt <= NCLPBG);
+ clsp->sl_refcnt <= NCLPG);
else
- VERIFY(clsp->sl_refcnt == 1);
+ VERIFY(clsp->sl_refcnt >= 1 &&
+ clsp->sl_refcnt <= NBCLPG);
if (class == MC_MBUF_16KCL) {
int k;
}
}
- mca = mcl_audit_buf2mca(MC_CL, cl);
- if (class == MC_MBUF_CL)
- size = m_maxsize(MC_CL);
- else if (class == MC_MBUF_BIGCL)
- size = m_maxsize(MC_BIGCL);
- else
- size = m_maxsize(MC_16KCL);
- mcl_audit_cluster(mca, cl, size, alloc, FALSE);
+
+ mca = mcl_audit_buf2mca(cl_class, cl);
+ mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE);
if (mcltrace)
mcache_buffer_log(mca, cl, m_cache(class), &mb_start);
}
}
+static void
+m_vm_error_stats(uint32_t *cnt, uint64_t *ts, uint64_t *size,
+ uint64_t alloc_size, kern_return_t error)
+{
+
+ *cnt = *cnt + 1;
+ *ts = net_uptime();
+ if (size) {
+ *size = alloc_size;
+ }
+ _CASSERT(sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]) ==
+ sizeof(mb_kmem_stats_labels) / sizeof(mb_kmem_stats_labels[0]));
+ switch (error) {
+ case KERN_SUCCESS:
+ break;
+ case KERN_INVALID_ARGUMENT:
+ mb_kmem_stats[0]++;
+ break;
+ case KERN_INVALID_ADDRESS:
+ mb_kmem_stats[1]++;
+ break;
+ case KERN_RESOURCE_SHORTAGE:
+ mb_kmem_stats[2]++;
+ break;
+ case KERN_NO_SPACE:
+ mb_kmem_stats[3]++;
+ break;
+ case KERN_FAILURE:
+ mb_kmem_stats[4]++;
+ break;
+ default:
+ mb_kmem_stats[5]++;
+ break;
+ }
+}
+
/*
* Allocate some number of mbuf clusters and place on cluster freelist.
*/
static int
m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize)
{
- int i;
+ int i, count = 0;
vm_size_t size = 0;
- int numpages = 0, large_buffer = (bufsize == m_maxsize(MC_16KCL));
+ int numpages = 0, large_buffer;
vm_offset_t page = 0;
mcache_audit_t *mca_list = NULL;
mcache_obj_t *con_list = NULL;
mcl_slab_t *sp;
+ mbuf_class_t class;
+ kern_return_t error;
+ /* Set if a buffer allocation needs allocation of multiple pages */
+ large_buffer = ((bufsize == m_maxsize(MC_16KCL)) &&
+ PAGE_SIZE < M16KCLBYTES);
VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
bufsize == m_maxsize(MC_16KCL));
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ VERIFY((bufsize == PAGE_SIZE) ||
+ (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL)));
+
+ if (bufsize == m_size(MC_BIGCL))
+ class = MC_BIGCL;
+ else
+ class = MC_16KCL;
+
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
/*
* Multiple threads may attempt to populate the cluster map one
mb_clalloc_waiters++;
(void) msleep(mb_clalloc_waitchan, mbuf_mlock,
(PZERO-1), "m_clalloc", NULL);
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
}
/* We are busy now; tell everyone else to go away */
* to grow the pool asynchronously using the mbuf worker thread.
*/
i = m_howmany(num, bufsize);
- if (i == 0 || (wait & M_DONTWAIT))
+ if (i <= 0 || (wait & M_DONTWAIT))
goto out;
lck_mtx_unlock(mbuf_mlock);
size = round_page(i * bufsize);
- page = kmem_mb_alloc(mb_map, size, large_buffer);
+ page = kmem_mb_alloc(mb_map, size, large_buffer, &error);
/*
* If we did ask for "n" 16KB physically contiguous chunks
* and didn't get them, then please try again without this
* restriction.
*/
- if (large_buffer && page == 0)
- page = kmem_mb_alloc(mb_map, size, 0);
+ net_update_uptime();
+ if (large_buffer && page == 0) {
+ m_vm_error_stats(&mb_kmem_contig_failed,
+ &mb_kmem_contig_failed_ts,
+ &mb_kmem_contig_failed_size,
+ size, error);
+ page = kmem_mb_alloc(mb_map, size, 0, &error);
+ }
if (page == 0) {
+ m_vm_error_stats(&mb_kmem_failed,
+ &mb_kmem_failed_ts,
+ &mb_kmem_failed_size,
+ size, error);
+#if PAGE_SIZE == 4096
if (bufsize == m_maxsize(MC_BIGCL)) {
- /* Try for 1 page if failed, only 4KB request */
- size = NBPG;
- page = kmem_mb_alloc(mb_map, size, 0);
+#else
+ if (bufsize >= m_maxsize(MC_BIGCL)) {
+#endif
+ /* Try for 1 page if failed */
+ size = PAGE_SIZE;
+ page = kmem_mb_alloc(mb_map, size, 0, &error);
}
if (page == 0) {
+ m_vm_error_stats(&mb_kmem_one_failed,
+ &mb_kmem_one_failed_ts,
+ NULL, size, error);
lck_mtx_lock(mbuf_mlock);
goto out;
}
}
- VERIFY(IS_P2ALIGNED(page, NBPG));
- numpages = size / NBPG;
+ VERIFY(IS_P2ALIGNED(page, PAGE_SIZE));
+ numpages = size / PAGE_SIZE;
/* If auditing is enabled, allocate the audit structures now */
if (mclaudit != NULL) {
/*
* Yes, I realize this is a waste of memory for clusters
* that never get transformed into mbufs, as we may end
- * up with NMBPBG-1 unused audit structures per cluster.
+ * up with NMBPG-1 unused audit structures per cluster.
* But doing so tremendously simplifies the allocation
* strategy, since at this point we are not holding the
* mbuf lock and the caller is okay to be blocked.
*/
- if (bufsize == m_maxsize(MC_BIGCL)) {
- needed = numpages * NMBPBG;
+ if (bufsize == PAGE_SIZE) {
+ needed = numpages * NMBPG;
i = mcache_alloc_ext(mcl_audit_con_cache,
&con_list, needed, MCR_SLEEP);
VERIFY(con_list != NULL && i == needed);
} else {
+ /*
+ * if multiple 4K pages are being used for a
+ * 16K cluster
+ */
needed = numpages / NSLABSP16KB;
}
lck_mtx_lock(mbuf_mlock);
- for (i = 0; i < numpages; i++, page += NBPG) {
- ppnum_t offset = ((char *)page - (char *)mbutl) / NBPG;
+ for (i = 0; i < numpages; i++, page += PAGE_SIZE) {
+ ppnum_t offset =
+ ((unsigned char *)page - mbutl) >> PAGE_SHIFT;
ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
- mbuf_class_t class = MC_BIGCL;
/*
- * If there is a mapper the appropriate I/O page is returned;
- * zero out the page to discard its past contents to prevent
- * exposing leftover kernel memory.
+ * If there is a mapper the appropriate I/O page is
+ * returned; zero out the page to discard its past
+ * contents to prevent exposing leftover kernel memory.
*/
VERIFY(offset < mcl_pages);
if (mcl_paddr_base != 0) {
- bzero((void *)(uintptr_t) page, page_size);
+ bzero((void *)(uintptr_t) page, PAGE_SIZE);
new_page = IOMapperInsertPage(mcl_paddr_base,
offset, new_page);
}
/* Pattern-fill this fresh page */
if (mclverify) {
mcache_set_pattern(MCACHE_FREE_PATTERN,
- (caddr_t)page, NBPG);
+ (caddr_t)page, PAGE_SIZE);
}
- if (bufsize == m_maxsize(MC_BIGCL)) {
- union mbigcluster *mbc = (union mbigcluster *)page;
-
+ if (bufsize == PAGE_SIZE) {
+ mcache_obj_t *buf;
/* One for the entire page */
- sp = slab_get(mbc);
+ sp = slab_get((void *)page);
if (mclaudit != NULL) {
- mcl_audit_init(mbc, &mca_list, &con_list,
- AUDIT_CONTENTS_SIZE, NMBPBG);
+ mcl_audit_init((void *)page,
+ &mca_list, &con_list,
+ AUDIT_CONTENTS_SIZE, NMBPG);
}
VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
- slab_init(sp, MC_BIGCL, SLF_MAPPED,
- mbc, mbc, bufsize, 0, 1);
+ slab_init(sp, class, SLF_MAPPED, (void *)page,
+ (void *)page, PAGE_SIZE, 0, 1);
+ buf = (mcache_obj_t *)page;
+ buf->obj_next = NULL;
/* Insert this slab */
- slab_insert(sp, MC_BIGCL);
-
- /* Update stats now since slab_get() drops the lock */
- mbstat.m_bigclfree = ++m_infree(MC_BIGCL) +
- m_infree(MC_MBUF_BIGCL);
- mbstat.m_bigclusters = ++m_total(MC_BIGCL);
- VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
- class = MC_BIGCL;
- } else if ((i % NSLABSP16KB) == 0) {
+ slab_insert(sp, class);
+
+ /* Update stats now since slab_get drops the lock */
+ ++m_infree(class);
+ ++m_total(class);
+ VERIFY(m_total(class) <= m_maxlimit(class));
+ if (class == MC_BIGCL) {
+ mbstat.m_bigclfree = m_infree(MC_BIGCL) +
+ m_infree(MC_MBUF_BIGCL);
+ mbstat.m_bigclusters = m_total(MC_BIGCL);
+ }
+ ++count;
+ } else if ((bufsize > PAGE_SIZE) &&
+ (i % NSLABSP16KB) == 0) {
union m16kcluster *m16kcl = (union m16kcluster *)page;
mcl_slab_t *nsp;
int k;
- VERIFY(njcl > 0);
/* One for the entire 16KB */
sp = slab_get(m16kcl);
if (mclaudit != NULL)
VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0);
slab_init(sp, MC_16KCL, SLF_MAPPED,
m16kcl, m16kcl, bufsize, 0, 1);
+ m16kcl->m16kcl_next = NULL;
/*
* 2nd-Nth page's slab is part of the first one,
SLF_MAPPED | SLF_PARTIAL,
m16kcl, NULL, 0, 0, 0);
}
-
/* Insert this slab */
slab_insert(sp, MC_16KCL);
- /* Update stats now since slab_get() drops the lock */
- m_infree(MC_16KCL)++;
- m_total(MC_16KCL)++;
+ /* Update stats now since slab_get drops the lock */
+ ++m_infree(MC_16KCL);
+ ++m_total(MC_16KCL);
VERIFY(m_total(MC_16KCL) <= m_maxlimit(MC_16KCL));
- class = MC_16KCL;
+ ++count;
}
- if (!mb_peak_newreport && mbuf_report_usage(class))
- mb_peak_newreport = TRUE;
}
VERIFY(mca_list == NULL && con_list == NULL);
+ if (!mb_peak_newreport && mbuf_report_usage(class))
+ mb_peak_newreport = TRUE;
+
/* We're done; let others enter */
mb_clalloc_busy = FALSE;
if (mb_clalloc_waiters > 0) {
wakeup(mb_clalloc_waitchan);
}
- if (bufsize == m_maxsize(MC_BIGCL))
- return (numpages);
-
- VERIFY(bufsize == m_maxsize(MC_16KCL));
- return (numpages / NSLABSP16KB);
-
+ return (count);
out:
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
/* We're done; let others enter */
mb_clalloc_busy = FALSE;
* When non-blocking we kick a thread if we have to grow the
* pool or if the number of free clusters is less than requested.
*/
- if (bufsize == m_maxsize(MC_BIGCL)) {
+ if (i > 0 && mbuf_worker_ready && mbuf_worker_needs_wakeup) {
+ wakeup((caddr_t)&mbuf_worker_needs_wakeup);
+ mbuf_worker_needs_wakeup = FALSE;
+ }
+ if (class == MC_BIGCL) {
if (i > 0) {
/*
* Remember total number of 4KB clusters needed
* at this time.
*/
i += m_total(MC_BIGCL);
- if (i > mbuf_expand_big) {
- mbuf_expand_big = i;
- if (mbuf_worker_ready)
- wakeup((caddr_t)&mbuf_worker_run);
+ if (i > m_region_expand(MC_BIGCL)) {
+ m_region_expand(MC_BIGCL) = i;
}
}
-
if (m_infree(MC_BIGCL) >= num)
return (1);
} else {
* at this time.
*/
i += m_total(MC_16KCL);
- if (i > mbuf_expand_16k) {
- mbuf_expand_16k = i;
- if (mbuf_worker_ready)
- wakeup((caddr_t)&mbuf_worker_run);
+ if (i > m_region_expand(MC_16KCL)) {
+ m_region_expand(MC_16KCL) = i;
}
}
-
if (m_infree(MC_16KCL) >= num)
return (1);
}
{
mcache_obj_t *o = NULL;
int i, numpages = 0, count;
+ mbuf_class_t super_class;
VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL ||
class == MC_16KCL);
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
- switch (class) {
- case MC_MBUF:
- case MC_CL:
- case MC_BIGCL:
- numpages = (num * m_size(class) + NBPG - 1) / NBPG;
- i = m_clalloc(numpages, wait, m_maxsize(MC_BIGCL));
+ VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) ||
+ PAGE_SIZE == m_maxsize(MC_16KCL));
- /* Respect the 4KB clusters minimum limit */
- if (m_total(MC_BIGCL) == m_maxlimit(MC_BIGCL) &&
- m_infree(MC_BIGCL) <= m_minlimit(MC_BIGCL)) {
- if (class != MC_BIGCL || (wait & MCR_COMP))
- return (0);
- }
- if (class == MC_BIGCL)
- return (i != 0);
- break;
+ if (m_maxsize(class) >= PAGE_SIZE)
+ return(m_clalloc(num, wait, m_maxsize(class)) != 0);
- case MC_16KCL:
- return (m_clalloc(num, wait, m_maxsize(class)) != 0);
- /* NOTREACHED */
+ /*
+ * The rest of the function will allocate pages and will slice
+ * them up into the right size
+ */
- default:
- VERIFY(0);
- /* NOTREACHED */
- }
+ numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /* Currently assume that pages are 4K or 16K */
+ if (PAGE_SIZE == m_maxsize(MC_BIGCL))
+ super_class = MC_BIGCL;
+ else
+ super_class = MC_16KCL;
- VERIFY(class == MC_MBUF || class == MC_CL);
+ i = m_clalloc(numpages, wait, m_maxsize(super_class));
/* how many objects will we cut the page into? */
- int numobj = (class == MC_MBUF ? NMBPBG : NCLPBG);
+ int numobj = PAGE_SIZE / m_maxsize(class);
for (count = 0; count < numpages; count++) {
-
/* respect totals, minlimit, maxlimit */
- if (m_total(MC_BIGCL) <= m_minlimit(MC_BIGCL) ||
+ if (m_total(super_class) <= m_minlimit(super_class) ||
m_total(class) >= m_maxlimit(class))
break;
- if ((o = slab_alloc(MC_BIGCL, wait)) == NULL)
+ if ((o = slab_alloc(super_class, wait)) == NULL)
break;
struct mbuf *m = (struct mbuf *)o;
union mcluster *c = (union mcluster *)o;
+ union mbigcluster *mbc = (union mbigcluster *)o;
mcl_slab_t *sp = slab_get(o);
mcache_audit_t *mca = NULL;
- VERIFY(slab_is_detached(sp) &&
- (sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
-
+ /*
+ * since one full page will be converted to MC_MBUF or
+ * MC_CL, verify that the reference count will match that
+ * assumption
+ */
+ VERIFY(sp->sl_refcnt == 1 && slab_is_detached(sp));
+ VERIFY((sp->sl_flags & (SLF_MAPPED | SLF_PARTIAL)) == SLF_MAPPED);
/*
* Make sure that the cluster is unmolested
* while in freelist
*/
if (mclverify) {
- mca = mcl_audit_buf2mca(MC_BIGCL, o);
- mcache_audit_free_verify(mca, o, 0,
- m_maxsize(MC_BIGCL));
+ mca = mcl_audit_buf2mca(super_class,
+ (mcache_obj_t *)o);
+ mcache_audit_free_verify(mca,
+ (mcache_obj_t *)o, 0, m_maxsize(super_class));
}
- /* Reinitialize it as an mbuf or 2K slab */
+ /* Reinitialize it as an mbuf or 2K or 4K slab */
slab_init(sp, class, sp->sl_flags,
- sp->sl_base, NULL, sp->sl_len, 0, numobj);
+ sp->sl_base, NULL, PAGE_SIZE, 0, numobj);
- VERIFY(o == (mcache_obj_t *)sp->sl_base);
VERIFY(sp->sl_head == NULL);
- VERIFY(m_total(MC_BIGCL) > 0);
- m_total(MC_BIGCL)--;
- mbstat.m_bigclusters = m_total(MC_BIGCL);
+ VERIFY(m_total(super_class) >= 1);
+ m_total(super_class)--;
+
+ if (super_class == MC_BIGCL)
+ mbstat.m_bigclusters = m_total(MC_BIGCL);
m_total(class) += numobj;
+ VERIFY(m_total(class) <= m_maxlimit(class));
m_infree(class) += numobj;
- VERIFY(m_total(MC_BIGCL) >= m_minlimit(MC_BIGCL));
- VERIFY(m_total(class) <= m_maxlimit(class));
if (!mb_peak_newreport && mbuf_report_usage(class))
mb_peak_newreport = TRUE;
i = numobj;
if (class == MC_MBUF) {
mbstat.m_mbufs = m_total(MC_MBUF);
- mtype_stat_add(MT_FREE, NMBPBG);
+ mtype_stat_add(MT_FREE, NMBPG);
while (i--) {
/*
* If auditing is enabled, construct the
m->m_next = sp->sl_head;
sp->sl_head = (void *)m++;
}
- } else { /* MC_CL */
+ } else if (class == MC_CL) { /* MC_CL */
mbstat.m_clfree =
m_infree(MC_CL) + m_infree(MC_MBUF_CL);
mbstat.m_clusters = m_total(MC_CL);
c->mcl_next = sp->sl_head;
sp->sl_head = (void *)c++;
}
+ } else {
+ VERIFY(class == MC_BIGCL);
+ mbstat.m_bigclusters = m_total(MC_BIGCL);
+ mbstat.m_bigclfree = m_infree(MC_BIGCL) +
+ m_infree(MC_MBUF_BIGCL);
+ while (i--) {
+ mbc->mbc_next = sp->sl_head;
+ sp->sl_head = (void *)mbc++;
+ }
}
- /* Insert into the mbuf or 2k slab list */
+ /* Insert into the mbuf or 2k or 4k slab list */
slab_insert(sp, class);
if ((i = mb_waiters) > 0)
static void
freelist_init(mbuf_class_t class)
{
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
VERIFY(class == MC_CL || class == MC_BIGCL);
VERIFY(m_total(class) == 0);
mcache_obj_t **list = ⊤
unsigned int tot = 0;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
switch (class) {
case MC_MBUF:
{
int m, bmap = 0;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL));
VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL));
return (m);
}
+static int
+m_free_paired(struct mbuf *m)
+{
+ VERIFY((m->m_flags & M_EXT) && (MEXT_FLAGS(m) & EXTF_PAIRED));
+
+ membar_sync();
+ if (MEXT_PMBUF(m) == m) {
+ volatile UInt16 *addr = (volatile UInt16 *)&MEXT_PREF(m);
+ int16_t oprefcnt, prefcnt;
+
+ /*
+ * Paired ref count might be negative in case we lose
+ * against another thread clearing MEXT_PMBUF, in the
+ * event it occurs after the above memory barrier sync.
+ * In that case just ignore as things have been unpaired.
+ */
+ do {
+ oprefcnt = *addr;
+ prefcnt = oprefcnt - 1;
+ } while (!OSCompareAndSwap16(oprefcnt, prefcnt, addr));
+
+ if (prefcnt > 1) {
+ return (1);
+ } else if (prefcnt == 1) {
+ (*(m_get_ext_free(m)))(m->m_ext.ext_buf,
+ m->m_ext.ext_size, m_get_ext_arg(m));
+ return (1);
+ } else if (prefcnt == 0) {
+ VERIFY(MBUF_IS_PAIRED(m));
+
+ /*
+ * Restore minref to its natural value, so that
+ * the caller will be able to free the cluster
+ * as appropriate.
+ */
+ MEXT_MINREF(m) = 0;
+
+ /*
+ * Clear MEXT_PMBUF, but leave EXTF_PAIRED intact
+ * as it is immutable. atomic_set_ptr also causes
+ * memory barrier sync.
+ */
+ atomic_set_ptr(&MEXT_PMBUF(m), NULL);
+
+ switch (m->m_ext.ext_size) {
+ case MCLBYTES:
+ m_set_ext(m, m_get_rfa(m), NULL, NULL);
+ break;
+
+ case MBIGCLBYTES:
+ m_set_ext(m, m_get_rfa(m), m_bigfree, NULL);
+ break;
+
+ case M16KCLBYTES:
+ m_set_ext(m, m_get_rfa(m), m_16kfree, NULL);
+ break;
+
+ default:
+ VERIFY(0);
+ /* NOTREACHED */
+ }
+ }
+ }
+
+ /*
+ * Tell caller the unpair has occurred, and that the reference
+ * count on the external cluster held for the paired mbuf should
+ * now be dropped.
+ */
+ return (0);
+}
+
struct mbuf *
m_free(struct mbuf *m)
{
m_redzone_verify(m);
/* Free the aux data and tags if there is any */
m_tag_delete_chain(m, NULL);
+
+ m_do_tx_compl_callback(m, NULL);
}
if (m->m_flags & M_EXT) {
- u_int32_t refcnt;
+ u_int16_t refcnt;
u_int32_t composite;
+ m_ext_free_func_t m_free_func;
+
+ if (MBUF_IS_PAIRED(m) && m_free_paired(m))
+ return (n);
refcnt = m_decref(m);
composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
- if (refcnt == 0 && !composite) {
- if (m->m_ext.ext_free == NULL) {
+ m_free_func = m_get_ext_free(m);
+
+ if (refcnt == MEXT_MINREF(m) && !composite) {
+ if (m_free_func == NULL) {
mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
mcache_free(m_cache(MC_BIGCL),
m->m_ext.ext_buf);
- } else if (m->m_ext.ext_free == m_16kfree) {
+ } else if (m_free_func == m_16kfree) {
mcache_free(m_cache(MC_16KCL),
m->m_ext.ext_buf);
} else {
- (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
- m->m_ext.ext_size, m->m_ext.ext_arg);
+ (*m_free_func)(m->m_ext.ext_buf,
+ m->m_ext.ext_size, m_get_ext_arg(m));
}
- mcache_free(ref_cache, MEXT_RFA(m));
- MEXT_RFA(m) = NULL;
- } else if (refcnt == 0 && composite) {
+ mcache_free(ref_cache, m_get_rfa(m));
+ m_set_ext(m, NULL, NULL, NULL);
+ } else if (refcnt == MEXT_MINREF(m) && composite) {
+ VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
VERIFY(m->m_type != MT_FREE);
mtype_stat_dec(m->m_type);
MEXT_FLAGS(m) &= ~EXTF_READONLY;
/* "Free" into the intermediate cache */
- if (m->m_ext.ext_free == NULL) {
+ if (m_free_func == NULL) {
mcache_free(m_cache(MC_MBUF_CL), m);
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
mcache_free(m_cache(MC_MBUF_BIGCL), m);
} else {
- VERIFY(m->m_ext.ext_free == m_16kfree);
+ VERIFY(m_free_func == m_16kfree);
mcache_free(m_cache(MC_MBUF_16KCL), m);
}
return (n);
__private_extern__ struct mbuf *
m_clattach(struct mbuf *m, int type, caddr_t extbuf,
void (*extfree)(caddr_t, u_int, caddr_t), u_int extsize, caddr_t extarg,
- int wait)
+ int wait, int pair)
{
struct ext_ref *rfa = NULL;
- if (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)
+ /*
+ * If pairing is requested and an existing mbuf is provided, reject
+ * it if it's already been paired to another cluster. Otherwise,
+ * allocate a new one or free any existing below.
+ */
+ if ((m != NULL && MBUF_IS_PAIRED(m)) ||
+ (m == NULL && (m = _M_GETHDR(wait, type)) == NULL))
return (NULL);
if (m->m_flags & M_EXT) {
- u_int32_t refcnt;
+ u_int16_t refcnt;
u_int32_t composite;
+ m_ext_free_func_t m_free_func;
refcnt = m_decref(m);
composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
- if (refcnt == 0 && !composite) {
- if (m->m_ext.ext_free == NULL) {
+ VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED) && MEXT_PMBUF(m) == NULL);
+ m_free_func = m_get_ext_free(m);
+ if (refcnt == MEXT_MINREF(m) && !composite) {
+ if (m_free_func == NULL) {
mcache_free(m_cache(MC_CL), m->m_ext.ext_buf);
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
mcache_free(m_cache(MC_BIGCL),
m->m_ext.ext_buf);
- } else if (m->m_ext.ext_free == m_16kfree) {
+ } else if (m_free_func == m_16kfree) {
mcache_free(m_cache(MC_16KCL),
m->m_ext.ext_buf);
} else {
- (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
- m->m_ext.ext_size, m->m_ext.ext_arg);
+ (*m_free_func)(m->m_ext.ext_buf,
+ m->m_ext.ext_size, m_get_ext_arg(m));
}
/* Re-use the reference structure */
- rfa = MEXT_RFA(m);
- } else if (refcnt == 0 && composite) {
+ rfa = m_get_rfa(m);
+ } else if (refcnt == MEXT_MINREF(m) && composite) {
VERIFY(m->m_type != MT_FREE);
mtype_stat_dec(m->m_type);
MEXT_FLAGS(m) &= ~EXTF_READONLY;
/* "Free" into the intermediate cache */
- if (m->m_ext.ext_free == NULL) {
+ if (m_free_func == NULL) {
mcache_free(m_cache(MC_MBUF_CL), m);
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
mcache_free(m_cache(MC_MBUF_BIGCL), m);
} else {
- VERIFY(m->m_ext.ext_free == m_16kfree);
+ VERIFY(m_free_func == m_16kfree);
mcache_free(m_cache(MC_MBUF_16KCL), m);
}
/*
return (NULL);
}
- MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa, 1, 0);
+ if (!pair) {
+ MEXT_INIT(m, extbuf, extsize, extfree, extarg, rfa,
+ 0, 1, 0, 0, 0, NULL);
+ } else {
+ MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
+ 1, 1, 1, EXTF_PAIRED, 0, m);
+ }
return (m);
}
m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags);
if (m != NULL) {
- u_int32_t flag;
+ u_int16_t flag;
struct ext_ref *rfa;
void *cl;
VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
cl = m->m_ext.ext_buf;
- rfa = MEXT_RFA(m);
+ rfa = m_get_rfa(m);
ASSERT(cl != NULL && rfa != NULL);
- VERIFY(MBUF_IS_COMPOSITE(m) && m->m_ext.ext_free == NULL);
+ VERIFY(MBUF_IS_COMPOSITE(m) && m_get_ext_free(m) == NULL);
flag = MEXT_FLAGS(m);
if (!(m->m_flags & M_EXT))
return (0);
- ASSERT(MEXT_RFA(m) != NULL);
+ ASSERT(m_get_rfa(m) != NULL);
return ((MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0);
}
void
m_copy_pftag(struct mbuf *to, struct mbuf *from)
{
- to->m_pkthdr.pf_mtag = from->m_pkthdr.pf_mtag;
+ memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag));
#if PF_ECN
- to->m_pkthdr.pf_mtag.pftag_hdr = NULL;
- to->m_pkthdr.pf_mtag.pftag_flags &= ~(PF_TAG_HDR_INET|PF_TAG_HDR_INET6);
+ m_pftag(to)->pftag_hdr = NULL;
+ m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET|PF_TAG_HDR_INET6);
#endif /* PF_ECN */
}
(void) m_set_service_class(m, MBUF_SC_BE);
if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO))
m->m_pkthdr.pkt_ifainfo = 0;
-#if MEASURE_BW
- m->m_pkthdr.pkt_bwseq = 0;
-#endif /* MEASURE_BW */
+ /*
+ * Preserve timestamp if requested
+ */
+ if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID))
+ m->m_pkthdr.pkt_timestamp = 0;
}
void
to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags;
(void) m_set_service_class(to, from->m_pkthdr.pkt_svc);
to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo;
-#if MEASURE_BW
- to->m_pkthdr.pkt_bwseq = from->m_pkthdr.pkt_bwseq;
-#endif /* MEASURE_BW */
}
/*
unsigned int pnum, needed = *num_needed;
mcache_obj_t *mp_list = NULL;
int mcflags = MSLEEPF(wait);
- u_int32_t flag;
+ u_int16_t flag;
struct ext_ref *rfa;
mcache_t *cp;
void *cl;
VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
cl = m->m_ext.ext_buf;
- rfa = MEXT_RFA(m);
+ rfa = m_get_rfa(m);
ASSERT(cl != NULL && rfa != NULL);
VERIFY(MBUF_IS_COMPOSITE(m));
printf("%s: File a radar related to <rdar://10146739>. \
needed = %u, pnum = %u, num_needed = %u \n",
__func__, needed, pnum, *num_needed);
- }
+ }
*num_needed = pnum;
return (top);
}
} else if (bufsize == m_maxsize(MC_16KCL)) {
VERIFY(njcl > 0);
- nsegs = ((packetlen - 1) >> (PGSHIFT + 2)) + 1;
+ nsegs = ((packetlen - 1) >> M16KCLSHIFT) + 1;
} else if (bufsize == m_maxsize(MC_BIGCL)) {
- nsegs = ((packetlen - 1) >> PGSHIFT) + 1;
+ nsegs = ((packetlen - 1) >> MBIGCLSHIFT) + 1;
} else {
nsegs = ((packetlen - 1) >> MCLSHIFT) + 1;
}
for (;;) {
struct mbuf *m;
- u_int32_t flag;
+ u_int16_t flag;
struct ext_ref *rfa;
void *cl;
int pkthdr;
+ m_ext_free_func_t m_free_func;
++num;
if (nsegs == 1 || (num % nsegs) != 0 || resid == 0) {
m = (struct mbuf *)rmp_list;
rmp_list = rmp_list->obj_next;
}
+ m_free_func = m_get_ext_free(m);
ASSERT(m != NULL);
VERIFY(m->m_type == MT_FREE && m->m_flags == M_EXT);
- VERIFY(m->m_ext.ext_free == NULL ||
- m->m_ext.ext_free == m_bigfree ||
- m->m_ext.ext_free == m_16kfree);
+ VERIFY(m_free_func == NULL || m_free_func == m_bigfree ||
+ m_free_func == m_16kfree);
cl = m->m_ext.ext_buf;
- rfa = MEXT_RFA(m);
+ rfa = m_get_rfa(m);
ASSERT(cl != NULL && rfa != NULL);
VERIFY(MBUF_IS_COMPOSITE(m));
if (pkthdr)
first = m;
MBUF_INIT(m, pkthdr, MT_DATA);
- if (m->m_ext.ext_free == m_16kfree) {
+ if (m_free_func == m_16kfree) {
MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
} else {
MBUF_CL_INIT(m, cl, rfa, 1, flag);
while (m != NULL) {
struct mbuf *next = m->m_next;
mcache_obj_t *o, *rfa;
- u_int32_t refcnt, composite;
+ u_int32_t composite;
+ u_int16_t refcnt;
+ m_ext_free_func_t m_free_func;
if (m->m_type == MT_FREE)
panic("m_free: freeing an already freed mbuf");
- if (m->m_type != MT_FREE)
- mt_free++;
-
if (m->m_flags & M_PKTHDR) {
/* Check for scratch area overflow */
m_redzone_verify(m);
m_tag_delete_chain(m, NULL);
}
- if (!(m->m_flags & M_EXT))
+ if (!(m->m_flags & M_EXT)) {
+ mt_free++;
goto simple_free;
+ }
+
+ if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
+ m = next;
+ continue;
+ }
+
+ mt_free++;
o = (mcache_obj_t *)(void *)m->m_ext.ext_buf;
refcnt = m_decref(m);
composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
- if (refcnt == 0 && !composite) {
- if (m->m_ext.ext_free == NULL) {
+ m_free_func = m_get_ext_free(m);
+ if (refcnt == MEXT_MINREF(m) && !composite) {
+ if (m_free_func == NULL) {
o->obj_next = mcl_list;
mcl_list = o;
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
o->obj_next = mbc_list;
mbc_list = o;
- } else if (m->m_ext.ext_free == m_16kfree) {
+ } else if (m_free_func == m_16kfree) {
o->obj_next = m16k_list;
m16k_list = o;
} else {
- (*(m->m_ext.ext_free))((caddr_t)o,
+ (*(m_free_func))((caddr_t)o,
m->m_ext.ext_size,
- m->m_ext.ext_arg);
+ m_get_ext_arg(m));
}
- rfa = (mcache_obj_t *)(void *)MEXT_RFA(m);
+ rfa = (mcache_obj_t *)(void *)m_get_rfa(m);
rfa->obj_next = ref_list;
ref_list = rfa;
- MEXT_RFA(m) = NULL;
- } else if (refcnt == 0 && composite) {
+ m_set_ext(m, NULL, NULL, NULL);
+ } else if (refcnt == MEXT_MINREF(m) && composite) {
+ VERIFY(!(MEXT_FLAGS(m) & EXTF_PAIRED));
VERIFY(m->m_type != MT_FREE);
/*
* Amortize the costs of atomic operations
/* "Free" into the intermediate cache */
o = (mcache_obj_t *)m;
- if (m->m_ext.ext_free == NULL) {
+ if (m_free_func == NULL) {
o->obj_next = m_mcl_list;
m_mcl_list = o;
- } else if (m->m_ext.ext_free == m_bigfree) {
+ } else if (m_free_func == m_bigfree) {
o->obj_next = m_mbc_list;
m_mbc_list = o;
} else {
- VERIFY(m->m_ext.ext_free == m_16kfree);
+ VERIFY(m_free_func == m_16kfree);
o->obj_next = m_m16k_list;
m_m16k_list = o;
}
}
mn->m_next = m;
m = mn;
- if (len < MHLEN)
+ if (m->m_flags & M_PKTHDR) {
+ VERIFY(len <= MHLEN);
MH_ALIGN(m, len);
+ } else {
+ VERIFY(len <= MLEN);
+ M_ALIGN(m, len);
+ }
m->m_len = len;
return (m);
}
* chain, copy junk along, and adjust length.
*/
struct mbuf *
-m_prepend_2(struct mbuf *m, int len, int how)
+m_prepend_2(struct mbuf *m, int len, int how, int align)
{
- if (M_LEADINGSPACE(m) >= len) {
+ if (M_LEADINGSPACE(m) >= len &&
+ (!align || IS_P2ALIGNED((m->m_data - len), sizeof(u_int32_t)))) {
m->m_data -= len;
m->m_len += len;
} else {
n->m_len = MIN(n->m_len, MLEN);
if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE)
- panic("%s n %p copy overflow",
+ panic("%s n %p copy overflow",
__func__, n);
bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t),
if (off == 0 && (m->m_flags & M_PKTHDR))
copyhdr = 1;
-
+
if (m_lastm != NULL && *m_lastm != NULL) {
m = *m_lastm;
off = *m_off;
n->m_flags |= M_EXT;
} else {
if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE)
- panic("%s n %p copy overflow",
+ panic("%s n %p copy overflow",
__func__, n);
bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t),
void
m_copydata(struct mbuf *m, int off, int len, void *vp)
{
+ int off0 = off, len0 = len;
+ struct mbuf *m0 = m;
unsigned count;
char *cp = vp;
- if (off < 0 || len < 0)
- panic("m_copydata: invalid offset %d or len %d", off, len);
+ if (__improbable(off < 0 || len < 0)) {
+ panic("%s: invalid offset %d or len %d", __func__, off, len);
+ /* NOTREACHED */
+ }
while (off > 0) {
- if (m == NULL)
- panic("m_copydata: invalid mbuf chain");
+ if (__improbable(m == NULL)) {
+ panic("%s: invalid mbuf chain %p [off %d, len %d]",
+ __func__, m0, off0, len0);
+ /* NOTREACHED */
+ }
if (off < m->m_len)
break;
off -= m->m_len;
m = m->m_next;
}
while (len > 0) {
- if (m == NULL)
- panic("m_copydata: invalid mbuf chain");
+ if (__improbable(m == NULL)) {
+ panic("%s: invalid mbuf chain %p [off %d, len %d]",
+ __func__, m0, off0, len0);
+ /* NOTREACHED */
+ }
count = MIN(m->m_len - off, len);
bcopy(MTOD(m, caddr_t) + off, cp, count);
len -= count;
VERIFY(bufsize == m_maxsize(MC_BIGCL) ||
bufsize == m_maxsize(MC_16KCL));
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
/* Numbers in 2K cluster units */
m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT;
} else { /* 16K CL */
VERIFY(njcl > 0);
- /* Under minimum */
- if (m_16kclusters < MIN16KCL)
- return (MIN16KCL - m_16kclusters);
- if (m_16kclfree >= M16KCL_LOWAT)
- return (0);
-
/* Ensure at least num clusters are available */
if (num >= m_16kclfree)
i = num - m_16kclfree;
i = MAX(i, j);
/* Check to ensure we don't go over limit */
- if (i + m_16kclusters >= m_maxlimit(MC_16KCL))
- i = m_maxlimit(MC_16KCL) - m_16kclusters;
- VERIFY((m_total(MC_16KCL) + i) <= m_maxlimit(MC_16KCL));
+ if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL))
+ i = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
}
return (i);
}
}
while (m != NULL) {
#if BLUE_DEBUG
- kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
+ printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len,
m->m_data);
#endif
if (copyhdr)
m = m->m_next;
np = &n->m_next;
#if BLUE_DEBUG
- kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
+ printf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len,
n->m_data);
#endif
}
#define MBUF_MULTIPAGES(m) \
(((m)->m_flags & M_EXT) && \
- ((IS_P2ALIGNED((m)->m_data, NBPG) && (m)->m_len > NBPG) || \
- (!IS_P2ALIGNED((m)->m_data, NBPG) && \
- P2ROUNDUP((m)->m_data, NBPG) < ((uintptr_t)(m)->m_data + (m)->m_len))))
+ ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \
+ && (m)->m_len > PAGE_SIZE) || \
+ (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \
+ P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len))))
static struct mbuf *
m_expand(struct mbuf *m, struct mbuf **last)
struct mbuf *n;
data = data0;
- if (IS_P2ALIGNED(data, NBPG) && len0 > NBPG)
- len = NBPG;
- else if (!IS_P2ALIGNED(data, NBPG) &&
- P2ROUNDUP(data, NBPG) < (data + len0))
- len = P2ROUNDUP(data, NBPG) - data;
+ if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE)
+ len = PAGE_SIZE;
+ else if (!IS_P2ALIGNED(data, PAGE_SIZE) &&
+ P2ROUNDUP(data, PAGE_SIZE) < (data + len0))
+ len = P2ROUNDUP(data, PAGE_SIZE) - data;
else
len = len0;
space = remainder;
bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
m->m_len += space;
- cp += space, remainder -= space;
+ cp += space;
+ remainder -= space;
}
while (remainder > 0) {
/*
if (length > MCLBYTES)
length = MCLBYTES;
length -= ((m_new == m_final) ? off : 0);
+ if (length < 0)
+ goto nospace;
if (m_new == NULL) {
if (length > MLEN)
{
boolean_t mcache_retry = FALSE;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
/* Check if there's anything at the cache layer */
if (mbuf_cached_above(class, wait)) {
mbuf_watchdog();
mb_waiters++;
+ m_region_expand(class) += m_total(class) + num;
+ /* wake up the worker thread */
+ if (class > MC_MBUF && mbuf_worker_ready &&
+ mbuf_worker_needs_wakeup) {
+ wakeup((caddr_t)&mbuf_worker_needs_wakeup);
+ mbuf_worker_needs_wakeup = FALSE;
+ }
+
(void) msleep(mb_waitchan, mbuf_mlock, (PZERO-1), m_cname(class), NULL);
/* We are now up; stop getting notified until next round */
return (mcache_retry);
}
+__attribute__((noreturn))
static void
mbuf_worker_thread(void)
{
while (1) {
lck_mtx_lock(mbuf_mlock);
-
+ mbuf_worker_run_cnt++;
mbuf_expand = 0;
- if (mbuf_expand_mcl) {
+ if (m_region_expand(MC_CL) > 0) {
int n;
-
+ mb_expand_cl_cnt++;
/* Adjust to current number of cluster in use */
- n = mbuf_expand_mcl -
+ n = m_region_expand(MC_CL) -
(m_total(MC_CL) - m_infree(MC_CL));
if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL))
n = m_maxlimit(MC_CL) - m_total(MC_CL);
- mbuf_expand_mcl = 0;
+ if (n > 0) {
+ mb_expand_cl_total += n;
+ }
+ m_region_expand(MC_CL) = 0;
if (n > 0 && freelist_populate(MC_CL, n, M_WAIT) > 0)
mbuf_expand++;
}
- if (mbuf_expand_big) {
+ if (m_region_expand(MC_BIGCL) > 0) {
int n;
-
+ mb_expand_bigcl_cnt++;
/* Adjust to current number of 4 KB cluster in use */
- n = mbuf_expand_big -
+ n = m_region_expand(MC_BIGCL) -
(m_total(MC_BIGCL) - m_infree(MC_BIGCL));
if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL))
n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL);
- mbuf_expand_big = 0;
+ if (n > 0) {
+ mb_expand_bigcl_total += n;
+ }
+ m_region_expand(MC_BIGCL) = 0;
if (n > 0 && freelist_populate(MC_BIGCL, n, M_WAIT) > 0)
mbuf_expand++;
}
- if (mbuf_expand_16k) {
+ if (m_region_expand(MC_16KCL) > 0) {
int n;
-
+ mb_expand_16kcl_cnt++;
/* Adjust to current number of 16 KB cluster in use */
- n = mbuf_expand_16k -
+ n = m_region_expand(MC_16KCL) -
(m_total(MC_16KCL) - m_infree(MC_16KCL));
if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL))
n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL);
- mbuf_expand_16k = 0;
+ if (n > 0) {
+ mb_expand_16kcl_total += n;
+ }
+ m_region_expand(MC_16KCL) = 0;
if (n > 0)
(void) freelist_populate(MC_16KCL, n, M_WAIT);
if (mbuf_expand) {
while (m_total(MC_MBUF) <
(m_total(MC_BIGCL) + m_total(MC_CL))) {
+ mb_expand_cnt++;
if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0)
break;
}
}
+ mbuf_worker_needs_wakeup = TRUE;
+ /*
+ * If there's a deadlock and we're not sending / receiving
+ * packets, net_uptime() won't be updated. Update it here
+ * so we are sure it's correct.
+ */
+ net_update_uptime();
+ mbuf_worker_last_runtime = net_uptime();
+ assert_wait((caddr_t)&mbuf_worker_needs_wakeup,
+ THREAD_UNINT);
lck_mtx_unlock(mbuf_mlock);
-
- assert_wait(&mbuf_worker_run, THREAD_UNINT);
(void) thread_block((thread_continue_t)mbuf_worker_thread);
}
}
+__attribute__((noreturn))
static void
mbuf_worker_thread_init(void)
{
mcl_slabg_t *slg;
unsigned int ix, k;
- lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED);
VERIFY(MBUF_IN_MAP(buf));
- ix = ((char *)buf - (char *)mbutl) >> MBSHIFT;
+ ix = ((unsigned char *)buf - mbutl) >> MBSHIFT;
VERIFY(ix < maxslabgrp);
if ((slg = slabstbl[ix]) == NULL) {
/*
- * In the current implementation, we never shrink the slabs
+ * In the current implementation, we never shrink the slabs
* table; if we attempt to reallocate a cluster group when
* it's already allocated, panic since this is a sign of a
* memory corruption (slabstbl[ix] got nullified).
/* This is a new buffer; create the slabs group for it */
MALLOC(slg, mcl_slabg_t *, sizeof (*slg), M_TEMP,
M_WAITOK | M_ZERO);
- VERIFY(slg != NULL);
+ MALLOC(slg->slg_slab, mcl_slab_t *, sizeof(mcl_slab_t) * NSLABSPMB,
+ M_TEMP, M_WAITOK | M_ZERO);
+ VERIFY(slg != NULL && slg->slg_slab != NULL);
lck_mtx_lock(mbuf_mlock);
/*
}
}
- ix = MTOBG(buf) % NSLABSPMB;
+ ix = MTOPG(buf) % NSLABSPMB;
VERIFY(ix < NSLABSPMB);
return (&slg->slg_slab[ix]);
m_slab_cnt(class)++;
TAILQ_INSERT_TAIL(&m_slablist(class), sp, sl_link);
sp->sl_flags &= ~SLF_DETACHED;
+
+ /*
+ * If a buffer spans multiple contiguous pages then mark them as
+ * detached too
+ */
if (class == MC_16KCL) {
int k;
for (k = 1; k < NSLABSP16KB; k++) {
sp = sp->sl_next;
/* Next slab must already be present */
- VERIFY(sp != NULL);
- VERIFY(slab_is_detached(sp));
+ VERIFY(sp != NULL && slab_is_detached(sp));
sp->sl_flags &= ~SLF_DETACHED;
}
}
static void
slab_remove(mcl_slab_t *sp, mbuf_class_t class)
{
+ int k;
VERIFY(!slab_is_detached(sp));
VERIFY(m_slab_cnt(class) > 0);
m_slab_cnt(class)--;
TAILQ_REMOVE(&m_slablist(class), sp, sl_link);
slab_detach(sp);
if (class == MC_16KCL) {
- int k;
for (k = 1; k < NSLABSP16KB; k++) {
sp = sp->sl_next;
/* Next slab must already be present */
boolean_t save_contents = (con_list != NULL);
unsigned int i, ix;
- ASSERT(num <= NMBPBG);
+ ASSERT(num <= NMBPG);
ASSERT(con_list == NULL || con_size != 0);
- ix = MTOBG(buf);
+ ix = MTOPG(buf);
VERIFY(ix < maxclaudit);
/* Make sure we haven't been here before */
- for (i = 0; i < NMBPBG; i++)
+ for (i = 0; i < num; i++)
VERIFY(mclaudit[ix].cl_audit[i] == NULL);
mca = mca_tail = *mca_list;
unsigned int i, ix;
mcache_audit_t *mca, *mca_list;
- ix = MTOBG(buf);
+ ix = MTOPG(buf);
VERIFY(ix < maxclaudit);
-
+
if (mclaudit[ix].cl_audit[0] != NULL) {
mca_list = mclaudit[ix].cl_audit[0];
for (i = 0; i < num; i++) {
* the corresponding audit structure for that buffer.
*/
static mcache_audit_t *
-mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *o)
+mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj)
{
mcache_audit_t *mca = NULL;
- int ix = MTOBG(o);
+ int ix = MTOPG(mobj), m_idx = 0;
+ unsigned char *page_addr;
VERIFY(ix < maxclaudit);
- VERIFY(IS_P2ALIGNED(o, MIN(m_maxsize(class), NBPG)));
+ VERIFY(IS_P2ALIGNED(mobj, MIN(m_maxsize(class), PAGE_SIZE)));
+
+ page_addr = PGTOM(ix);
switch (class) {
case MC_MBUF:
* mbuf index relative to the page base and use
* it to locate the audit structure.
*/
- VERIFY(MCLIDX(BGTOM(ix), o) < (int)NMBPBG);
- mca = mclaudit[ix].cl_audit[MCLIDX(BGTOM(ix), o)];
+ m_idx = MBPAGEIDX(page_addr, mobj);
+ VERIFY(m_idx < (int)NMBPG);
+ mca = mclaudit[ix].cl_audit[m_idx];
break;
case MC_CL:
/*
* Same thing as above, but for 2KB clusters in a page.
*/
- VERIFY(CLBGIDX(BGTOM(ix), o) < (int)NCLPBG);
- mca = mclaudit[ix].cl_audit[CLBGIDX(BGTOM(ix), o)];
+ m_idx = CLPAGEIDX(page_addr, mobj);
+ VERIFY(m_idx < (int)NCLPG);
+ mca = mclaudit[ix].cl_audit[m_idx];
break;
case MC_BIGCL:
+ m_idx = BCLPAGEIDX(page_addr, mobj);
+ VERIFY(m_idx < (int)NBCLPG);
+ mca = mclaudit[ix].cl_audit[m_idx];
+ break;
case MC_16KCL:
/*
* Same as above, but only return the first element.
if (composite) {
struct mbuf *next = m->m_next;
- VERIFY(ms->m_flags == M_EXT && MEXT_RFA(ms) != NULL &&
+ VERIFY(ms->m_flags == M_EXT && m_get_rfa(ms) != NULL &&
MBUF_IS_COMPOSITE(ms));
VERIFY(mca->mca_contents_size == AUDIT_CONTENTS_SIZE);
/*
if ((temp % mleak_table.mleak_sample_factor) == 0 && addr != NULL) {
uintptr_t bt[MLEAK_STACK_DEPTH];
- int logged = fastbacktrace(bt, MLEAK_STACK_DEPTH);
+ int logged = backtrace(bt, MLEAK_STACK_DEPTH);
mleak_log(bt, addr, logged, num);
}
}
static char *
mbuf_dump(void)
{
- unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct;
+ unsigned long totmem = 0, totfree = 0, totmbufs, totused, totpct,
+ totreturned = 0;
u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0;
u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0;
u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0;
totmem += mem;
totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) *
sp->mbcl_size;
+ totreturned += sp->mbcl_release_cnt;
}
k = snprintf(c, clen, "%lu KB allocated to network (approx. %lu%% "
"in use)\n", totmem / 1024, totpct);
MBUF_DUMP_BUF_CHK();
+ k = snprintf(c, clen, "%lu KB returned to the system\n",
+ totreturned / 1024);
+ MBUF_DUMP_BUF_CHK();
+
+ net_update_uptime();
+ k = snprintf(c, clen,
+ "VM allocation failures: contiguous %u, normal %u, one page %u\n",
+ mb_kmem_contig_failed, mb_kmem_failed, mb_kmem_one_failed);
+ MBUF_DUMP_BUF_CHK();
+ if (mb_kmem_contig_failed_ts || mb_kmem_failed_ts ||
+ mb_kmem_one_failed_ts) {
+ k = snprintf(c, clen,
+ "VM allocation failure timestamps: contiguous %llu "
+ "(size %llu), normal %llu (size %llu), one page %llu "
+ "(now %llu)\n",
+ mb_kmem_contig_failed_ts, mb_kmem_contig_failed_size,
+ mb_kmem_failed_ts, mb_kmem_failed_size,
+ mb_kmem_one_failed_ts, net_uptime());
+ MBUF_DUMP_BUF_CHK();
+ k = snprintf(c, clen,
+ "VM return codes: ");
+ MBUF_DUMP_BUF_CHK();
+ for (i = 0;
+ i < sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]);
+ i++) {
+ k = snprintf(c, clen, "%s: %u ", mb_kmem_stats_labels[i],
+ mb_kmem_stats[i]);
+ MBUF_DUMP_BUF_CHK();
+ }
+ k = snprintf(c, clen, "\n");
+ MBUF_DUMP_BUF_CHK();
+ }
+ k = snprintf(c, clen,
+ "worker thread runs: %u, expansions: %llu, cl %llu/%llu, "
+ "bigcl %llu/%llu, 16k %llu/%llu\n", mbuf_worker_run_cnt,
+ mb_expand_cnt, mb_expand_cl_cnt, mb_expand_cl_total,
+ mb_expand_bigcl_cnt, mb_expand_bigcl_total, mb_expand_16kcl_cnt,
+ mb_expand_16kcl_total);
+ MBUF_DUMP_BUF_CHK();
+ if (mbuf_worker_last_runtime != 0) {
+ k = snprintf(c, clen, "worker thread last run time: "
+ "%llu (%llu seconds ago)\n",
+ mbuf_worker_last_runtime,
+ net_uptime() - mbuf_worker_last_runtime);
+ MBUF_DUMP_BUF_CHK();
+ }
/* mbuf leak detection statistics */
mleak_update_stats();
return (ret);
}
+int
+m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n)
+{
+ ASSERT(m->m_flags & M_EXT);
+ return (atomic_test_set_32(&MEXT_PRIV(m), o, n));
+}
+
+uint32_t
+m_ext_get_prop(struct mbuf *m)
+{
+ ASSERT(m->m_flags & M_EXT);
+ return (MEXT_PRIV(m));
+}
+
+int
+m_ext_paired_is_active(struct mbuf *m)
+{
+ return (MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1);
+}
+
+void
+m_ext_paired_activate(struct mbuf *m)
+{
+ struct ext_ref *rfa;
+ int hdr, type;
+ caddr_t extbuf;
+ m_ext_free_func_t extfree;
+ u_int extsize;
+
+ VERIFY(MBUF_IS_PAIRED(m));
+ VERIFY(MEXT_REF(m) == MEXT_MINREF(m));
+ VERIFY(MEXT_PREF(m) == MEXT_MINREF(m));
+
+ hdr = (m->m_flags & M_PKTHDR);
+ type = m->m_type;
+ extbuf = m->m_ext.ext_buf;
+ extfree = m_get_ext_free(m);
+ extsize = m->m_ext.ext_size;
+ rfa = m_get_rfa(m);
+
+ VERIFY(extbuf != NULL && rfa != NULL);
+
+ /*
+ * Safe to reinitialize packet header tags, since it's
+ * already taken care of at m_free() time. Similar to
+ * what's done in m_clattach() for the cluster. Bump
+ * up MEXT_PREF to indicate activation.
+ */
+ MBUF_INIT(m, hdr, type);
+ MEXT_INIT(m, extbuf, extsize, extfree, (caddr_t)m, rfa,
+ 1, 1, 2, EXTF_PAIRED, MEXT_PRIV(m), m);
+}
+
void
m_scratch_init(struct mbuf *m)
{
}
}
+__private_extern__ inline void
+m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free,
+ caddr_t ext_arg)
+{
+ VERIFY(m->m_flags & M_EXT);
+ if (rfa != NULL) {
+ m->m_ext.ext_refflags =
+ (struct ext_ref *)(((uintptr_t)rfa) ^ mb_obscure_extref);
+ if (ext_free != NULL) {
+ rfa->ext_token = ((uintptr_t)&rfa->ext_token) ^
+ mb_obscure_extfree;
+ m->m_ext.ext_free = (m_ext_free_func_t)
+ (((uintptr_t)ext_free) ^ rfa->ext_token);
+ if (ext_arg != NULL) {
+ m->m_ext.ext_arg =
+ (caddr_t)(((uintptr_t)ext_arg) ^ rfa->ext_token);
+ } else {
+ m->m_ext.ext_arg = NULL;
+ }
+ } else {
+ rfa->ext_token = 0;
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_arg = NULL;
+ }
+ } else {
+ /*
+ * If we are going to loose the cookie in ext_token by
+ * resetting the rfa, we should use the global cookie
+ * to obscure the ext_free and ext_arg pointers.
+ */
+ if (ext_free != NULL) {
+ m->m_ext.ext_free =
+ (m_ext_free_func_t)((uintptr_t)ext_free ^
+ mb_obscure_extfree);
+ if (ext_arg != NULL) {
+ m->m_ext.ext_arg =
+ (caddr_t)((uintptr_t)ext_arg ^
+ mb_obscure_extfree);
+ } else {
+ m->m_ext.ext_arg = NULL;
+ }
+ } else {
+ m->m_ext.ext_free = NULL;
+ m->m_ext.ext_arg = NULL;
+ }
+ m->m_ext.ext_refflags = NULL;
+ }
+}
+
+__private_extern__ inline struct ext_ref *
+m_get_rfa(struct mbuf *m)
+{
+ if (m->m_ext.ext_refflags == NULL)
+ return (NULL);
+ else
+ return ((struct ext_ref *)(((uintptr_t)m->m_ext.ext_refflags) ^ mb_obscure_extref));
+}
+
+__private_extern__ inline m_ext_free_func_t
+m_get_ext_free(struct mbuf *m)
+{
+ struct ext_ref *rfa;
+ if (m->m_ext.ext_free == NULL)
+ return (NULL);
+
+ rfa = m_get_rfa(m);
+ if (rfa == NULL)
+ return ((m_ext_free_func_t)((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree));
+ else
+ return ((m_ext_free_func_t)(((uintptr_t)m->m_ext.ext_free)
+ ^ rfa->ext_token));
+}
+
+__private_extern__ inline caddr_t
+m_get_ext_arg(struct mbuf *m)
+{
+ struct ext_ref *rfa;
+ if (m->m_ext.ext_arg == NULL)
+ return (NULL);
+
+ rfa = m_get_rfa(m);
+ if (rfa == NULL) {
+ return ((caddr_t)((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree));
+ } else {
+ return ((caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^
+ rfa->ext_token));
+ }
+}
+
/*
* Send a report of mbuf usage if the usage is at least 6% of max limit
* or if there has been at least 3% increase since the last report.
*
* The values 6% and 3% are chosen so that we can do simple arithmetic
* with shift operations.
- */
+ */
static boolean_t
mbuf_report_usage(mbuf_class_t cl)
{
__private_extern__ void
mbuf_report_peak_usage(void)
{
- int i = 0;
+ int i = 0;
u_int64_t uptime;
struct nstat_sysinfo_data ns_data;
uint32_t memreleased = 0;
+ static uint32_t prevmemreleased;
uptime = net_uptime();
lck_mtx_lock(mbuf_mlock);
/* Generate an initial report after 1 week of uptime */
- if (!mb_peak_firstreport &&
+ if (!mb_peak_firstreport &&
uptime > MBUF_PEAK_FIRST_REPORT_THRESHOLD) {
mb_peak_newreport = TRUE;
mb_peak_firstreport = TRUE;
}
/*
- * Since a report is being generated before 1 week,
+ * Since a report is being generated before 1 week,
* we do not need to force another one later
*/
if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD)
m_peak(m_class(i)) = m_total(m_class(i));
memreleased += m_release_cnt(i);
}
+ memreleased = memreleased - prevmemreleased;
+ prevmemreleased = memreleased;
mb_peak_newreport = FALSE;
lck_mtx_unlock(mbuf_mlock);
ns_data.u.mb_stats.total_256b = m_peak(MC_MBUF);
ns_data.u.mb_stats.total_2kb = m_peak(MC_CL);
ns_data.u.mb_stats.total_4kb = m_peak(MC_BIGCL);
+ ns_data.u.mb_stats.total_16kb = m_peak(MC_16KCL);
ns_data.u.mb_stats.sbmb_total = total_sbmb_cnt_peak;
ns_data.u.mb_stats.sb_atmbuflimit = sbmb_limreached;
ns_data.u.mb_stats.draincnt = mbstat.m_drain;
ns_data.u.mb_stats.memreleased = memreleased;
+ ns_data.u.mb_stats.sbmb_floor = total_sbmb_cnt_floor;
nstat_sysinfo_send_data(&ns_data);
+
+ /*
+ * Reset the floor whenever we report a new
+ * peak to track the trend (increase peek usage
+ * is not a leak if mbufs get released
+ * between reports and the floor stays low)
+ */
+ total_sbmb_cnt_floor = total_sbmb_cnt_peak;
}
/*
mbuf_class_t mc;
mcl_slab_t *sp, *sp_tmp, *nsp;
unsigned int num, k, interval, released = 0;
- unsigned int total_mem = 0, use_mem = 0;
+ unsigned long total_mem = 0, use_mem = 0;
boolean_t ret, purge_caches = FALSE;
ppnum_t offset;
mcache_obj_t *obj;
- float per;
+ unsigned long per;
static uint64_t last_drain = 0;
static unsigned char scratch[32];
static ppnum_t scratch_pa = 0;
lck_mtx_unlock(mbuf_mlock);
return;
}
- interval = net_uptime() - last_drain;
+ interval = net_uptime() - last_drain;
if (interval <= mb_drain_maxint) {
lck_mtx_unlock(mbuf_mlock);
return;
- }
+ }
if (interval <= mb_drain_maxint * 5)
purge_caches = TRUE;
last_drain = net_uptime();
total_mem += m_total(mc) * m_maxsize(mc);
use_mem += m_active(mc) * m_maxsize(mc);
}
- per = (float)use_mem / (float)total_mem;
- if (per >= 0.6) {
+ per = (use_mem * 100) / total_mem;
+ if (per >= 60) {
lck_mtx_unlock(mbuf_mlock);
return;
}
* total in the freelist.
*/
for (mc = 0; mc < NELEM(mbuf_table); mc++) {
- while (m_cobjlist(mc) &&
+ while (m_cobjlist(mc) &&
m_total(mc) < m_avgtotal(mc) &&
m_infree(mc) > 0.1 * m_avgtotal(mc) + m_minlimit(mc)) {
obj = m_cobjlist(mc);
slab_remove(sp, mc);
switch (mc) {
case MC_MBUF:
- m_infree(mc) -= NMBPBG;
- m_total(mc) -= NMBPBG;
+ m_infree(mc) -= NMBPG;
+ m_total(mc) -= NMBPG;
if (mclaudit != NULL)
- mcl_audit_free(sp->sl_base, NMBPBG);
+ mcl_audit_free(sp->sl_base, NMBPG);
break;
case MC_CL:
- m_infree(mc) -= NCLPBG;
- m_total(mc) -= NCLPBG;
+ m_infree(mc) -= NCLPG;
+ m_total(mc) -= NCLPG;
if (mclaudit != NULL)
- mcl_audit_free(sp->sl_base, NMBPBG);
+ mcl_audit_free(sp->sl_base, NMBPG);
break;
case MC_BIGCL:
- m_infree(mc)--;
- m_total(mc)--;
+ {
+ m_infree(mc) -= NBCLPG;
+ m_total(mc) -= NBCLPG;
if (mclaudit != NULL)
- mcl_audit_free(sp->sl_base, NMBPBG);
+ mcl_audit_free(sp->sl_base, NMBPG);
break;
+ }
case MC_16KCL:
m_infree(mc)--;
m_total(mc)--;
for (nsp = sp, k = 1; k < NSLABSP16KB; k++) {
nsp = nsp->sl_next;
- VERIFY(nsp->sl_refcnt == 0 &&
+ VERIFY(nsp->sl_refcnt == 0 &&
nsp->sl_base != NULL &&
nsp->sl_len == 0);
slab_init(nsp, 0, 0, NULL, NULL, 0, 0,
0);
nsp->sl_flags = 0;
}
- if (mclaudit != NULL)
- mcl_audit_free(sp->sl_base, 1);
+ if (mclaudit != NULL) {
+ if (sp->sl_len == PAGE_SIZE) {
+ mcl_audit_free(sp->sl_base,
+ NMBPG);
+ } else {
+ mcl_audit_free(sp->sl_base, 1);
+ }
+ }
break;
default:
/*
}
m_release_cnt(mc) += m_size(mc);
released += m_size(mc);
- offset = ((char *)sp->sl_base - (char *)mbutl) / NBPG;
+ VERIFY(sp->sl_base != NULL &&
+ sp->sl_len >= PAGE_SIZE);
+ offset = MTOPG(sp->sl_base);
/*
* Make sure the IOMapper points to a valid, but
* bogus, address. This should prevent further DMA
*/
IOMapperInsertPage(mcl_paddr_base, offset, scratch_pa);
mcl_paddr[offset] = 0;
- kmem_free(mb_map, (vm_offset_t)sp->sl_base,
+ kmem_free(mb_map, (vm_offset_t)sp->sl_base,
sp->sl_len);
slab_init(sp, 0, 0, NULL, NULL, 0, 0, 0);
sp->sl_flags = 0;
{
#pragma unused(arg1, arg2)
int val = 0, err;
-
+
err = sysctl_handle_int(oidp, &val, 0, req);
if (err != 0 || req->newptr == USER_ADDR_NULL)
return (err);
return (err);
}
+#if DEBUG || DEVELOPMENT
+
+static int mbtest_val;
+static int mbtest_running;
+
+static void mbtest_thread(__unused void *arg)
+{
+ int i;
+
+ printf("%s thread starting\n", __func__);
+
+ for (i = 0; i < 1000; i++) {
+ unsigned int needed = 100000;
+ struct mbuf *m1, *m2, *m3;
+
+ if (njcl > 0) {
+ needed = 100000;
+ m3 = m_getpackets_internal(&needed, 0, M_DONTWAIT, 0, M16KCLBYTES);
+ m_freem_list(m3);
+ }
+
+ needed = 100000;
+ m2 = m_getpackets_internal(&needed, 0, M_DONTWAIT, 0, MBIGCLBYTES);
+ m_freem_list(m2);
+
+ m1 = m_getpackets_internal(&needed, 0, M_DONTWAIT, 0, MCLBYTES);
+ m_freem_list(m1);
+ }
+
+ printf("%s thread ending\n", __func__);
+
+ OSDecrementAtomic(&mbtest_running);
+ wakeup_one((caddr_t)&mbtest_running);
+}
+
+static void sysctl_mbtest(void)
+{
+ /* We launch three threads - wait for all of them */
+ OSIncrementAtomic(&mbtest_running);
+ OSIncrementAtomic(&mbtest_running);
+ OSIncrementAtomic(&mbtest_running);
+
+ thread_call_func_delayed((thread_call_func_t)mbtest_thread, NULL, 10);
+ thread_call_func_delayed((thread_call_func_t)mbtest_thread, NULL, 10);
+ thread_call_func_delayed((thread_call_func_t)mbtest_thread, NULL, 10);
+
+ while (mbtest_running) {
+ msleep((caddr_t)&mbtest_running, NULL, PUSER, "mbtest_running", NULL);
+ }
+}
+
+static int
+mbtest SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error = 0, val, oldval = mbtest_val;
+
+ val = oldval;
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+
+ if (val != oldval)
+ sysctl_mbtest();
+
+ mbtest_val = val;
+
+ return (error);
+}
+#endif
+
SYSCTL_DECL(_kern_ipc);
+#if DEBUG || DEVELOPMENT
+SYSCTL_PROC(_kern_ipc, OID_AUTO, mbtest,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &mbtest_val, 0, &mbtest, "I",
+ "Toggle to test mbufs");
+#endif
SYSCTL_PROC(_kern_ipc, KIPC_MBSTAT, mbstat,
CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
0, 0, mbstat_sysctl, "S,mbstat", "");
SYSCTL_INT(_kern_ipc, OID_AUTO, mb_watchdog,
CTLFLAG_RW | CTLFLAG_LOCKED, &mb_watchdog, 0, "");
SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force,
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
m_drain_force_sysctl, "I",
"Forces the mbuf garbage collection to run");
SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint,