2 * Copyright (c) 2006-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <mach/boolean.h>
40 #include <kern/locks.h>
41 #include <libkern/OSAtomic.h>
52 * Unlike VERIFY(), ASSERT() is evaluated only in DEBUG build.
54 #define VERIFY(EX) ((void)((EX) || assfail(#EX, __FILE__, __LINE__)))
56 #define ASSERT(EX) VERIFY(EX)
58 #define ASSERT(EX) ((void)0)
62 * Compile time assert; this should be on its own someday.
65 switch (0) { case 0: case (x): ; }
68 * Atomic macros; these should be on their own someday.
70 #define atomic_add_16_ov(a, n) \
71 ((u_int16_t) OSAddAtomic16(n, (volatile SInt16 *)a))
73 #define atomic_add_16(a, n) \
74 ((void) atomic_add_16_ov(a, n))
76 #define atomic_add_32_ov(a, n) \
77 ((u_int32_t) OSAddAtomic(n, (volatile SInt32 *)a))
79 #define atomic_add_32(a, n) \
80 ((void) atomic_add_32_ov(a, n))
82 #define atomic_add_64_ov(a, n) \
83 ((u_int64_t) OSAddAtomic64(n, (volatile SInt64 *)a))
85 #define atomic_add_64(a, n) \
86 ((void) atomic_add_64_ov(a, n))
88 #define atomic_set_64(a, n) do { \
89 while (!OSCompareAndSwap64(*a, n, (volatile UInt64 *)a)) \
94 #define atomic_get_64(n, a) do { \
98 #define atomic_get_64(n, a) do { \
99 (n) = atomic_add_64_ov(a, 0); \
101 #endif /* __LP64__ */
103 #define CPU_CACHE_SIZE 64
106 #define IS_P2ALIGNED(v, a) \
107 ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
108 #endif /* IS_P2ALIGNED */
111 #define P2ROUNDUP(x, align) \
112 (-(-((uintptr_t)(x)) & -(align)))
113 #endif /* P2ROUNDUP */
116 #define P2ROUNDDOWN(x, align) \
117 (((uintptr_t)(x)) & ~((uintptr_t)(align) - 1))
118 #endif /* P2ROUNDDOWN */
120 #define MCACHE_FREE_PATTERN 0xdeadbeefdeadbeefULL
121 #define MCACHE_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL
124 * mcache allocation request flags.
126 * MCR_NOSLEEP and MCR_FAILOK are mutually exclusive. The latter is used
127 * by the mbuf allocator to handle the implementation of several caches that
128 * involve multiple layers of mcache. It implies a best effort blocking
129 * allocation request; if the request cannot be satisfied, the caller will
130 * be blocked until further notice, similar to MCR_SLEEP, except that upon
131 * a wake up it will return immediately to the caller regardless of whether
132 * the request can been fulfilled.
134 * MCR_TRYHARD implies a non-blocking allocation request, regardless of
135 * whether MCR_NOSLEEP is set. It informs the allocator that the request
136 * should not cause the calling thread to block, and that it must have
137 * exhausted all possible schemes to fulfill the request, including doing
138 * reclaims and/or purges, before returning to the caller.
140 * Regular mcache clients should only use MCR_SLEEP or MCR_NOSLEEP.
142 #define MCR_SLEEP 0x0000 /* same as M_WAITOK */
143 #define MCR_NOSLEEP 0x0001 /* same as M_NOWAIT */
144 #define MCR_FAILOK 0x0100 /* private, for internal use only */
145 #define MCR_TRYHARD 0x0200 /* private, for internal use only */
146 #define MCR_USR1 0x1000 /* private, for internal use only */
148 #define MCR_NONBLOCKING (MCR_NOSLEEP | MCR_FAILOK | MCR_TRYHARD)
151 * Generic one-way linked list element structure. This is used to handle
152 * mcache_alloc_ext() requests in order to chain the allocated objects
153 * together before returning them to the caller.
155 typedef struct mcache_obj
{
156 struct mcache_obj
*obj_next
;
159 typedef struct mcache_bkt
{
160 void *bkt_next
; /* next bucket in list */
161 void *bkt_obj
[1]; /* one or more objects */
164 typedef struct mcache_bktlist
{
165 mcache_bkt_t
*bl_list
; /* bucket list */
166 u_int32_t bl_total
; /* number of buckets */
167 u_int32_t bl_min
; /* min since last update */
168 u_int32_t bl_reaplimit
; /* max reapable buckets */
169 u_int64_t bl_alloc
; /* allocations from this list */
172 typedef struct mcache_bkttype
{
173 int bt_bktsize
; /* bucket size (number of elements) */
174 size_t bt_minbuf
; /* all smaller buffers qualify */
175 size_t bt_maxbuf
; /* no larger bfufers qualify */
176 struct mcache
*bt_cache
; /* bucket cache */
179 typedef struct mcache_cpu
{
180 decl_lck_mtx_data(, cc_lock
);
181 mcache_bkt_t
*cc_filled
; /* the currently filled bucket */
182 mcache_bkt_t
*cc_pfilled
; /* the previously filled bucket */
183 u_int64_t cc_alloc
; /* allocations from this cpu */
184 u_int64_t cc_free
; /* frees to this cpu */
185 int cc_objs
; /* number of objects in filled bkt */
186 int cc_pobjs
; /* number of objects in previous bkt */
187 int cc_bktsize
; /* number of elements in a full bkt */
188 } __attribute__((aligned(CPU_CACHE_SIZE
), packed
)) mcache_cpu_t
;
190 typedef unsigned int (*mcache_allocfn_t
)(void *, mcache_obj_t
***,
192 typedef void (*mcache_freefn_t
)(void *, mcache_obj_t
*, boolean_t
);
193 typedef void (*mcache_auditfn_t
)(void *, mcache_obj_t
*, boolean_t
);
194 typedef void (*mcache_logfn_t
)(u_int32_t
, mcache_obj_t
*, boolean_t
);
195 typedef void (*mcache_notifyfn_t
)(void *, u_int32_t
);
197 typedef struct mcache
{
201 LIST_ENTRY(mcache
) mc_list
; /* cache linkage */
202 char mc_name
[32]; /* cache name */
203 struct zone
*mc_slab_zone
; /* backend zone allocator */
204 mcache_allocfn_t mc_slab_alloc
; /* slab layer allocate callback */
205 mcache_freefn_t mc_slab_free
; /* slab layer free callback */
206 mcache_auditfn_t mc_slab_audit
; /* slab layer audit callback */
207 mcache_logfn_t mc_slab_log
; /* slab layer log callback */
208 mcache_notifyfn_t mc_slab_notify
; /* slab layer notify callback */
209 void *mc_private
; /* opaque arg to callbacks */
210 size_t mc_bufsize
; /* object size */
211 size_t mc_align
; /* object alignment */
212 u_int32_t mc_flags
; /* cache creation flags */
213 u_int32_t mc_purge_cnt
; /* # of purges requested by slab */
214 u_int32_t mc_enable_cnt
; /* # of reenables due to purges */
215 u_int32_t mc_waiter_cnt
; /* # of slab layer waiters */
216 u_int32_t mc_wretry_cnt
; /* # of wait retries */
217 u_int32_t mc_nwretry_cnt
; /* # of no-wait retry attempts */
218 u_int32_t mc_nwfail_cnt
; /* # of no-wait retries that failed */
219 decl_lck_mtx_data(, mc_sync_lock
); /* protects purges and reenables */
220 lck_attr_t
*mc_sync_lock_attr
;
221 lck_grp_t
*mc_sync_lock_grp
;
222 lck_grp_attr_t
*mc_sync_lock_grp_attr
;
224 * Keep CPU and buckets layers lock statistics separate.
226 lck_attr_t
*mc_cpu_lock_attr
;
227 lck_grp_t
*mc_cpu_lock_grp
;
228 lck_grp_attr_t
*mc_cpu_lock_grp_attr
;
231 * Bucket layer common to all CPUs
233 decl_lck_mtx_data(, mc_bkt_lock
);
234 lck_attr_t
*mc_bkt_lock_attr
;
235 lck_grp_t
*mc_bkt_lock_grp
;
236 lck_grp_attr_t
*mc_bkt_lock_grp_attr
;
237 mcache_bkttype_t
*cache_bkttype
; /* bucket type */
238 mcache_bktlist_t mc_full
; /* full buckets */
239 mcache_bktlist_t mc_empty
; /* empty buckets */
240 size_t mc_chunksize
; /* bufsize + alignment */
241 u_int32_t mc_bkt_contention
; /* lock contention count */
242 u_int32_t mc_bkt_contention_prev
; /* previous snapshot */
245 * Per-CPU layer, aligned at cache line boundary
247 mcache_cpu_t mc_cpu
[1];
250 #define MCACHE_ALIGN 8 /* default guaranteed alignment */
252 /* Valid values for mc_flags */
253 #define MCF_VERIFY 0x00000001 /* enable verification */
254 #define MCF_TRACE 0x00000002 /* enable transaction auditing */
255 #define MCF_NOCPUCACHE 0x00000010 /* disable CPU layer caching */
256 #define MCF_NOLEAKLOG 0x00000100 /* disable leak logging */
257 #define MCF_EXPLEAKLOG 0x00000200 /* expose leak info to user space */
259 #define MCF_DEBUG (MCF_VERIFY | MCF_TRACE)
260 #define MCF_FLAGS_MASK \
261 (MCF_DEBUG | MCF_NOCPUCACHE | MCF_NOLEAKLOG | MCF_EXPLEAKLOG)
263 /* Valid values for notify callback */
264 #define MCN_RETRYALLOC 0x00000001 /* Allocation should be retried */
266 #define MCACHE_STACK_DEPTH 16
268 typedef struct mcache_audit
{
269 struct mcache_audit
*mca_next
; /* next audit struct */
270 void *mca_addr
; /* address of buffer */
271 mcache_t
*mca_cache
; /* parent cache of the buffer */
272 struct thread
*mca_thread
; /* thread doing transaction */
273 struct thread
*mca_pthread
; /* previous transaction thread */
274 size_t mca_contents_size
; /* size of contents */
275 void *mca_contents
; /* contents at last free */
276 uint16_t mca_depth
; /* pc stack depth */
277 uint16_t mca_pdepth
; /* previous transaction pc stack */
278 void *mca_stack
[MCACHE_STACK_DEPTH
];
279 void *mca_pstack
[MCACHE_STACK_DEPTH
];
280 void *mca_uptr
; /* user-specific pointer */
281 uint32_t mca_uflags
; /* user-specific flags */
284 __private_extern__
int assfail(const char *, const char *, int);
285 __private_extern__
void mcache_init(void);
286 __private_extern__
unsigned int mcache_getflags(void);
287 __private_extern__ mcache_t
*mcache_create(const char *, size_t,
288 size_t, u_int32_t
, int);
289 __private_extern__
void *mcache_alloc(mcache_t
*, int);
290 __private_extern__
void mcache_free(mcache_t
*, void *);
291 __private_extern__ mcache_t
*mcache_create_ext(const char *, size_t,
292 mcache_allocfn_t
, mcache_freefn_t
, mcache_auditfn_t
, mcache_logfn_t
,
293 mcache_notifyfn_t
, void *, u_int32_t
, int);
294 __private_extern__
void mcache_destroy(mcache_t
*);
295 __private_extern__
unsigned int mcache_alloc_ext(mcache_t
*, mcache_obj_t
**,
297 __private_extern__
void mcache_free_ext(mcache_t
*, mcache_obj_t
*);
298 __private_extern__
void mcache_reap(void);
299 __private_extern__ boolean_t
mcache_purge_cache(mcache_t
*);
300 __private_extern__
void mcache_waiter_inc(mcache_t
*);
301 __private_extern__
void mcache_waiter_dec(mcache_t
*);
302 __private_extern__ boolean_t
mcache_bkt_isempty(mcache_t
*);
304 __private_extern__
void mcache_buffer_log(mcache_audit_t
*, void *, mcache_t
*);
305 __private_extern__
void mcache_set_pattern(u_int64_t
, void *, size_t);
306 __private_extern__
void *mcache_verify_pattern(u_int64_t
, void *, size_t);
307 __private_extern__
void *mcache_verify_set_pattern(u_int64_t
, u_int64_t
,
309 __private_extern__
void mcache_audit_free_verify(mcache_audit_t
*,
310 void *, size_t, size_t);
311 __private_extern__
void mcache_audit_free_verify_set(mcache_audit_t
*,
312 void *, size_t, size_t);
313 __private_extern__
char *mcache_dump_mca(mcache_audit_t
*);
314 __private_extern__
void mcache_audit_panic(mcache_audit_t
*, void *, size_t,
317 __private_extern__ mcache_t
*mcache_audit_cache
;
323 #endif /* KERNEL_PRIVATE */
325 #endif /* _SYS_MCACHE_H */