#include <sys/mcache.h>
#define MCACHE_SIZE(n) \
- ((size_t)(&((mcache_t *)0)->mc_cpu[n]))
+ __builtin_offsetof(mcache_t, mc_cpu[n])
/* Allocate extra in case we need to manually align the pointer */
#define MCACHE_ALLOC_SIZE \
static void mcache_bkt_destroy(mcache_t *, mcache_bkttype_t *,
mcache_bkt_t *, int);
static void mcache_bkt_ws_update(mcache_t *);
+static void mcache_bkt_ws_zero(mcache_t *);
static void mcache_bkt_ws_reap(mcache_t *);
static void mcache_dispatch(void (*)(void *), void *);
static void mcache_cache_reap(mcache_t *);
return (NULL);
}
- if (!(wait & MCR_NOSLEEP))
- buf = zalloc(mcache_zone);
- else
- buf = zalloc_noblock(mcache_zone);
-
+ buf = zalloc(mcache_zone);
if (buf == NULL)
goto fail;
* Guaranteed alignment is valid only when we use the internal
* slab allocator (currently set to use the zone allocator).
*/
- if (!need_zone)
+ if (!need_zone) {
align = 1;
- else if (align == 0)
- align = MCACHE_ALIGN;
+ } else {
+ /* Enforce 64-bit minimum alignment for zone-based buffers */
+ if (align == 0)
+ align = MCACHE_ALIGN;
+ align = P2ROUNDUP(align, MCACHE_ALIGN);
+ }
if ((align & (align - 1)) != 0)
panic("mcache_create: bad alignment %lu", align);
*/
chunksize = MAX(bufsize, sizeof (u_int64_t));
if (need_zone) {
- /* Enforce 64-bit minimum alignment for zone-based buffers */
- align = MAX(align, sizeof (u_int64_t));
- chunksize += sizeof (void *) + align;
+ VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0);
+ chunksize += sizeof (uint64_t) + align;
chunksize = P2ROUNDUP(chunksize, align);
if ((cp->mc_slab_zone = zinit(chunksize, 64 * 1024 * ncpu,
PAGE_SIZE, cp->mc_name)) == NULL)
* implementation uses the zone allocator for simplicity reasons.
*/
static unsigned int
-mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait)
+mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num,
+ int wait)
{
+#pragma unused(wait)
mcache_t *cp = arg;
unsigned int need = num;
- size_t offset = 0;
size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t));
u_int32_t flags = cp->mc_flags;
void *buf, *base, **pbuf;
*list = NULL;
- /*
- * The address of the object returned to the caller is an
- * offset from the 64-bit aligned base address only if the
- * cache's alignment requirement is neither 1 nor 8 bytes.
- */
- if (cp->mc_align != 1 && cp->mc_align != sizeof (u_int64_t))
- offset = cp->mc_align;
-
for (;;) {
- if (!(wait & MCR_NOSLEEP))
- buf = zalloc(cp->mc_slab_zone);
- else
- buf = zalloc_noblock(cp->mc_slab_zone);
-
+ buf = zalloc(cp->mc_slab_zone);
if (buf == NULL)
break;
- /* Get the 64-bit aligned base address for this object */
+ /* Get the aligned base address for this object */
base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t),
- sizeof (u_int64_t));
+ cp->mc_align);
/*
* Wind back a pointer size from the aligned base and
pbuf = (void **)((intptr_t)base - sizeof (void *));
*pbuf = buf;
+ VERIFY (((intptr_t)base + cp->mc_bufsize) <=
+ ((intptr_t)buf + cp->mc_chunksize));
+
/*
* If auditing is enabled, patternize the contents of
* the buffer starting from the 64-bit aligned base to
mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
}
- /*
- * Fix up the object's address to fulfill the cache's
- * alignment requirement (if needed) and return this
- * to the caller.
- */
- VERIFY(((intptr_t)base + offset + cp->mc_bufsize) <=
- ((intptr_t)buf + cp->mc_chunksize));
- *list = (mcache_obj_t *)((intptr_t)base + offset);
+ VERIFY(IS_P2ALIGNED(base, cp->mc_align));
+ *list = (mcache_obj_t *)base;
(*list)->obj_next = NULL;
list = *plist = &(*list)->obj_next;
{
mcache_t *cp = arg;
mcache_obj_t *nlist;
- size_t offset = 0;
size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t));
u_int32_t flags = cp->mc_flags;
void *base;
void **pbuf;
- /*
- * The address of the object is an offset from a 64-bit
- * aligned base address only if the cache's alignment
- * requirement is neither 1 nor 8 bytes.
- */
- if (cp->mc_align != 1 && cp->mc_align != sizeof (u_int64_t))
- offset = cp->mc_align;
-
for (;;) {
nlist = list->obj_next;
list->obj_next = NULL;
- /* Get the 64-bit aligned base address of this object */
- base = (void *)((intptr_t)list - offset);
- VERIFY(IS_P2ALIGNED(base, sizeof (u_int64_t)));
+ base = list;
+ VERIFY(IS_P2ALIGNED(base, cp->mc_align));
/* Get the original address since we're about to free it */
pbuf = (void **)((intptr_t)base - sizeof (void *));
+ VERIFY(((intptr_t)base + cp->mc_bufsize) <=
+ ((intptr_t)*pbuf + cp->mc_chunksize));
+
if (flags & MCF_DEBUG) {
VERIFY(((intptr_t)base + rsize) <=
((intptr_t)*pbuf + cp->mc_chunksize));
- mcache_audit_free_verify(NULL, base, offset, rsize);
+ mcache_audit_free_verify(NULL, base, 0, rsize);
}
/* Free it to zone */
- VERIFY(((intptr_t)base + offset + cp->mc_bufsize) <=
- ((intptr_t)*pbuf + cp->mc_chunksize));
zfree(cp->mc_slab_zone, *pbuf);
/* No more objects to free; return to mcache */
mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
{
mcache_t *cp = arg;
- size_t offset = 0;
size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t));
void *base, **pbuf;
- /*
- * The address of the object returned to the caller is an
- * offset from the 64-bit aligned base address only if the
- * cache's alignment requirement is neither 1 nor 8 bytes.
- */
- if (cp->mc_align != 1 && cp->mc_align != sizeof (u_int64_t))
- offset = cp->mc_align;
-
while (list != NULL) {
mcache_obj_t *next = list->obj_next;
- /* Get the 64-bit aligned base address of this object */
- base = (void *)((intptr_t)list - offset);
- VERIFY(IS_P2ALIGNED(base, sizeof (u_int64_t)));
+ base = list;
+ VERIFY(IS_P2ALIGNED(base, cp->mc_align));
/* Get the original address */
pbuf = (void **)((intptr_t)base - sizeof (void *));
if (!alloc)
mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
else
- mcache_audit_free_verify_set(NULL, base, offset, rsize);
+ mcache_audit_free_verify_set(NULL, base, 0, rsize);
list = list->obj_next = next;
}
mcache_bkt_destroy(cp, btp, pbp, pobjs);
}
- /*
- * Updating the working set back to back essentially sets
- * the working set size to zero, so everything is reapable.
- */
- mcache_bkt_ws_update(cp);
- mcache_bkt_ws_update(cp);
-
+ mcache_bkt_ws_zero(cp);
mcache_bkt_ws_reap(cp);
}
MCACHE_UNLOCK(&cp->mc_bkt_lock);
}
+/*
+ * Mark everything as eligible for reaping (working set is zero).
+ */
+static void
+mcache_bkt_ws_zero(mcache_t *cp)
+{
+ MCACHE_LOCK(&cp->mc_bkt_lock);
+
+ cp->mc_full.bl_reaplimit = cp->mc_full.bl_total;
+ cp->mc_full.bl_min = cp->mc_full.bl_total;
+ cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_total;
+ cp->mc_empty.bl_min = cp->mc_empty.bl_total;
+
+ MCACHE_UNLOCK(&cp->mc_bkt_lock);
+}
+
/*
* Reap all buckets that are beyond the working set.
*/
mcache_dispatch(mcache_reap_start, flag);
}
+__private_extern__ void
+mcache_reap_now(mcache_t *cp, boolean_t purge)
+{
+ if (purge) {
+ mcache_bkt_purge(cp);
+ mcache_cache_bkt_enable(cp);
+ } else {
+ mcache_bkt_ws_zero(cp);
+ mcache_bkt_ws_reap(cp);
+ }
+}
+
static void
mcache_cache_reap(mcache_t *cp)
{