/*
- * Copyright (c) 2006-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2006-2011 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
(sizeof (void *) + MCACHE_SIZE(ncpu) + CPU_CACHE_SIZE)
#define MCACHE_CPU(c) \
- (mcache_cpu_t *)((char *)(c) + MCACHE_SIZE(cpu_number()))
+ (mcache_cpu_t *)((void *)((char *)(c) + MCACHE_SIZE(cpu_number())))
/*
* MCACHE_LIST_LOCK() and MCACHE_LIST_UNLOCK() are macros used
#define MCACHE_UNLOCK(l) lck_mtx_unlock(l)
#define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l)
-/* This should be in a header file */
-#define atomic_add_32(a, n) ((void) OSAddAtomic(n, (volatile SInt32 *)a))
-
static int ncpu;
static lck_mtx_t *mcache_llock;
static struct thread *mcache_llock_owner;
};
static mcache_t *mcache_create_common(const char *, size_t, size_t,
- mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_notifyfn_t,
- void *, u_int32_t, int, int);
+ mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t,
+ mcache_notifyfn_t, void *, u_int32_t, int, int);
static unsigned int mcache_slab_alloc(void *, mcache_obj_t ***,
unsigned int, int);
static void mcache_slab_free(void *, mcache_obj_t *, boolean_t);
PAGE_SIZE, "mcache");
if (mcache_zone == NULL)
panic("mcache_init: failed to allocate mcache zone\n");
+ zone_change(mcache_zone, Z_CALLERACCT, FALSE);
LIST_INIT(&mcache_head);
u_int32_t flags, int wait)
{
return (mcache_create_common(name, bufsize, align, mcache_slab_alloc,
- mcache_slab_free, mcache_slab_audit, NULL, NULL, flags, 1, wait));
+ mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1,
+ wait));
}
/*
__private_extern__ mcache_t *
mcache_create_ext(const char *name, size_t bufsize,
mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
- mcache_notifyfn_t notifyfn, void *arg, u_int32_t flags, int wait)
+ mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
+ u_int32_t flags, int wait)
{
return (mcache_create_common(name, bufsize, 0, allocfn,
- freefn, auditfn, notifyfn, arg, flags, 0, wait));
+ freefn, auditfn, logfn, notifyfn, arg, flags, 0, wait));
}
/*
static mcache_t *
mcache_create_common(const char *name, size_t bufsize, size_t align,
mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
- mcache_notifyfn_t notifyfn, void *arg, u_int32_t flags, int need_zone,
- int wait)
+ mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
+ u_int32_t flags, int need_zone, int wait)
{
mcache_bkttype_t *btp;
mcache_t *cp = NULL;
char lck_name[64];
/* If auditing is on and print buffer is NULL, allocate it now */
- if ((flags & MCF_AUDIT) && mca_dump_buf == NULL) {
+ if ((flags & MCF_DEBUG) && mca_dump_buf == NULL) {
int malloc_wait = (wait & MCR_NOSLEEP) ? M_NOWAIT : M_WAITOK;
MALLOC(mca_dump_buf, char *, DUMP_MCA_BUF_SIZE, M_TEMP,
malloc_wait | M_ZERO);
cp->mc_slab_alloc = allocfn;
cp->mc_slab_free = freefn;
cp->mc_slab_audit = auditfn;
+ cp->mc_slab_log = logfn;
cp->mc_slab_notify = notifyfn;
cp->mc_private = need_zone ? cp : arg;
cp->mc_bufsize = bufsize;
/* If we got them all, return to caller */
if ((need -= objs) == 0) {
MCACHE_UNLOCK(&ccp->cc_lock);
+
+ if (!(cp->mc_flags & MCF_NOLEAKLOG) &&
+ cp->mc_slab_log != NULL)
+ (*cp->mc_slab_log)(num, *top, TRUE);
+
if (cp->mc_flags & MCF_DEBUG)
goto debug_alloc;
}
}
+ if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL)
+ (*cp->mc_slab_log)((num - need), *top, TRUE);
+
if (!(cp->mc_flags & MCF_DEBUG))
return (num - need);
debug_alloc:
- if (cp->mc_flags & MCF_VERIFY) {
+ if (cp->mc_flags & MCF_DEBUG) {
mcache_obj_t **o = top;
unsigned int n;
}
/* Invoke the slab layer audit callback if auditing is enabled */
- if ((cp->mc_flags & MCF_AUDIT) && cp->mc_slab_audit != NULL)
+ if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL)
(*cp->mc_slab_audit)(cp->mc_private, *top, TRUE);
return (num - need);
mcache_obj_t *nlist;
mcache_bkt_t *bkt;
+ if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL)
+ (*cp->mc_slab_log)(0, list, FALSE);
+
/* Invoke the slab layer audit callback if auditing is enabled */
- if ((cp->mc_flags & MCF_AUDIT) && cp->mc_slab_audit != NULL)
+ if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL)
(*cp->mc_slab_audit)(cp->mc_private, list, FALSE);
MCACHE_LOCK(&ccp->cc_lock);
* the nearest 64-bit multiply; this is because we use
* 64-bit memory access to set/check the pattern.
*/
- if (flags & MCF_AUDIT) {
+ if (flags & MCF_DEBUG) {
VERIFY(((intptr_t)base + rsize) <=
((intptr_t)buf + cp->mc_chunksize));
mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
/* Get the original address since we're about to free it */
pbuf = (void **)((intptr_t)base - sizeof (void *));
- if (flags & MCF_AUDIT) {
+ if (flags & MCF_DEBUG) {
VERIFY(((intptr_t)base + rsize) <=
((intptr_t)*pbuf + cp->mc_chunksize));
mcache_audit_free_verify(NULL, base, offset, rsize);
if (nobjs > 0) {
mcache_obj_t *top = bkt->bkt_obj[nobjs - 1];
- if (cp->mc_flags & MCF_VERIFY) {
+ if (cp->mc_flags & MCF_DEBUG) {
mcache_obj_t *o = top;
int cnt = 0;
__private_extern__ void
mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size)
{
- u_int64_t *buf_end = (u_int64_t *)((char *)buf_arg + size);
+ u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
u_int64_t *buf = (u_int64_t *)buf_arg;
VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t)));
__private_extern__ void *
mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size)
{
- u_int64_t *buf_end = (u_int64_t *)((char *)buf_arg + size);
+ u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
u_int64_t *buf;
VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t)));
mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg,
size_t size)
{
- u_int64_t *buf_end = (u_int64_t *)((char *)buf_arg + size);
+ u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
u_int64_t *buf;
VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t)));
((mcache_obj_t *)addr)->obj_next = next;
}
-#undef panic(...)
+#undef panic
__private_extern__ char *
mcache_dump_mca(mcache_audit_t *mca)