2 * Copyright (c) 2006-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
37 #include <sys/types.h>
38 #include <sys/queue.h>
39 #include <mach/boolean.h>
40 #include <kern/locks.h>
51 * Unlike VERIFY(), ASSERT() is evaluated only in DEBUG build.
53 #define VERIFY(EX) ((void)((EX) || assfail(#EX, __FILE__, __LINE__)))
55 #define ASSERT(EX) VERIFY(EX)
57 #define ASSERT(EX) ((void)0)
61 #define CPU_CACHE_SIZE 128
62 #elif defined(__arm__)
63 #define CPU_CACHE_SIZE 32
65 #define CPU_CACHE_SIZE 64
69 #define IS_P2ALIGNED(v, a) \
70 ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
71 #endif /* IS_P2ALIGNED */
74 #define P2ROUNDUP(x, align) \
75 (-(-((uintptr_t)(x)) & -(align)))
76 #endif /* P2ROUNDUP */
79 #define P2ROUNDDOWN(x, align) \
80 (((uintptr_t)(x)) & ~((uintptr_t)(align) - 1))
81 #endif /* P2ROUNDDOWN */
83 #define MCACHE_FREE_PATTERN 0xdeadbeefdeadbeefULL
84 #define MCACHE_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL
87 * mcache allocation request flags.
89 * MCR_NOSLEEP and MCR_FAILOK are mutually exclusive. The latter is used
90 * by the mbuf allocator to handle the implementation of several caches that
91 * involve multiple layers of mcache. It implies a best effort blocking
92 * allocation request; if the request cannot be satisfied, the caller will
93 * be blocked until further notice, similar to MCR_SLEEP, except that upon
94 * a wake up it will return immediately to the caller regardless of whether
95 * the request can been fulfilled.
97 * MCR_TRYHARD implies a non-blocking allocation request, regardless of
98 * whether MCR_NOSLEEP is set. It informs the allocator that the request
99 * should not cause the calling thread to block, and that it must have
100 * exhausted all possible schemes to fulfill the request, including doing
101 * reclaims and/or purges, before returning to the caller.
103 * Regular mcache clients should only use MCR_SLEEP or MCR_NOSLEEP.
105 #define MCR_SLEEP 0x0000 /* same as M_WAITOK */
106 #define MCR_NOSLEEP 0x0001 /* same as M_NOWAIT */
107 #define MCR_FAILOK 0x0100 /* private, for internal use only */
108 #define MCR_TRYHARD 0x0200 /* private, for internal use only */
109 #define MCR_USR1 0x1000 /* private, for internal use only */
111 #define MCR_NONBLOCKING (MCR_NOSLEEP | MCR_FAILOK | MCR_TRYHARD)
114 * Generic one-way linked list element structure. This is used to handle
115 * mcache_alloc_ext() requests in order to chain the allocated objects
116 * together before returning them to the caller.
118 typedef struct mcache_obj
{
119 struct mcache_obj
*obj_next
;
122 typedef struct mcache_bkt
{
123 void *bkt_next
; /* next bucket in list */
124 void *bkt_obj
[1]; /* one or more objects */
127 typedef struct mcache_bktlist
{
128 mcache_bkt_t
*bl_list
; /* bucket list */
129 u_int32_t bl_total
; /* number of buckets */
130 u_int32_t bl_min
; /* min since last update */
131 u_int32_t bl_reaplimit
; /* max reapable buckets */
132 u_int64_t bl_alloc
; /* allocations from this list */
135 typedef struct mcache_bkttype
{
136 int bt_bktsize
; /* bucket size (number of elements) */
137 size_t bt_minbuf
; /* all smaller buffers qualify */
138 size_t bt_maxbuf
; /* no larger bfufers qualify */
139 struct mcache
*bt_cache
; /* bucket cache */
142 typedef struct mcache_cpu
{
143 decl_lck_mtx_data(, cc_lock
);
144 mcache_bkt_t
*cc_filled
; /* the currently filled bucket */
145 mcache_bkt_t
*cc_pfilled
; /* the previously filled bucket */
146 u_int64_t cc_alloc
; /* allocations from this cpu */
147 u_int64_t cc_free
; /* frees to this cpu */
148 int cc_objs
; /* number of objects in filled bkt */
149 int cc_pobjs
; /* number of objects in previous bkt */
150 int cc_bktsize
; /* number of elements in a full bkt */
151 } __attribute__((aligned(CPU_CACHE_SIZE
), packed
)) mcache_cpu_t
;
153 typedef unsigned int (*mcache_allocfn_t
)(void *, mcache_obj_t
***,
155 typedef void (*mcache_freefn_t
)(void *, mcache_obj_t
*, boolean_t
);
156 typedef void (*mcache_auditfn_t
)(void *, mcache_obj_t
*, boolean_t
);
157 typedef void (*mcache_notifyfn_t
)(void *, u_int32_t
);
159 typedef struct mcache
{
163 LIST_ENTRY(mcache
) mc_list
; /* cache linkage */
164 char mc_name
[32]; /* cache name */
165 struct zone
*mc_slab_zone
; /* backend zone allocator */
166 mcache_allocfn_t mc_slab_alloc
; /* slab layer allocate callback */
167 mcache_freefn_t mc_slab_free
; /* slab layer free callback */
168 mcache_auditfn_t mc_slab_audit
; /* slab layer audit callback */
169 mcache_notifyfn_t mc_slab_notify
; /* slab layer notify callback */
170 void *mc_private
; /* opaque arg to callbacks */
171 size_t mc_bufsize
; /* object size */
172 size_t mc_align
; /* object alignment */
173 u_int32_t mc_flags
; /* cache creation flags */
174 u_int32_t mc_purge_cnt
; /* # of purges requested by slab */
175 u_int32_t mc_enable_cnt
; /* # of reenables due to purges */
176 u_int32_t mc_waiter_cnt
; /* # of slab layer waiters */
177 u_int32_t mc_wretry_cnt
; /* # of wait retries */
178 u_int32_t mc_nwretry_cnt
; /* # of no-wait retry attempts */
179 u_int32_t mc_nwfail_cnt
; /* # of no-wait retries that failed */
180 decl_lck_mtx_data(, mc_sync_lock
); /* protects purges and reenables */
181 lck_attr_t
*mc_sync_lock_attr
;
182 lck_grp_t
*mc_sync_lock_grp
;
183 lck_grp_attr_t
*mc_sync_lock_grp_attr
;
185 * Keep CPU and buckets layers lock statistics separate.
187 lck_attr_t
*mc_cpu_lock_attr
;
188 lck_grp_t
*mc_cpu_lock_grp
;
189 lck_grp_attr_t
*mc_cpu_lock_grp_attr
;
192 * Bucket layer common to all CPUs
194 decl_lck_mtx_data(, mc_bkt_lock
);
195 lck_attr_t
*mc_bkt_lock_attr
;
196 lck_grp_t
*mc_bkt_lock_grp
;
197 lck_grp_attr_t
*mc_bkt_lock_grp_attr
;
198 mcache_bkttype_t
*cache_bkttype
; /* bucket type */
199 mcache_bktlist_t mc_full
; /* full buckets */
200 mcache_bktlist_t mc_empty
; /* empty buckets */
201 size_t mc_chunksize
; /* bufsize + alignment */
202 u_int32_t mc_bkt_contention
; /* lock contention count */
203 u_int32_t mc_bkt_contention_prev
; /* previous snapshot */
206 * Per-CPU layer, aligned at cache line boundary
208 mcache_cpu_t mc_cpu
[1];
211 #define MCACHE_ALIGN 8 /* default guaranteed alignment */
213 /* Valid values for mc_flags */
214 #define MCF_VERIFY 0x00000001 /* enable verification */
215 #define MCF_AUDIT 0x00000002 /* enable transaction auditing */
216 #define MCF_NOCPUCACHE 0x00000010 /* disable CPU layer caching */
218 #define MCF_DEBUG (MCF_VERIFY | MCF_AUDIT)
219 #define MCF_FLAGS_MASK (MCF_DEBUG | MCF_NOCPUCACHE)
221 /* Valid values for notify callback */
222 #define MCN_RETRYALLOC 0x00000001 /* Allocation should be retried */
224 #define MCACHE_STACK_DEPTH 16
226 typedef struct mcache_audit
{
227 struct mcache_audit
*mca_next
; /* next audit struct */
228 void *mca_addr
; /* address of buffer */
229 mcache_t
*mca_cache
; /* parent cache of the buffer */
230 struct thread
*mca_thread
; /* thread doing transaction */
231 struct thread
*mca_pthread
; /* previous transaction thread */
232 size_t mca_contents_size
; /* size of contents */
233 void *mca_contents
; /* contents at last free */
234 uint16_t mca_depth
; /* pc stack depth */
235 uint16_t mca_pdepth
; /* previous transaction pc stack */
236 void *mca_stack
[MCACHE_STACK_DEPTH
];
237 void *mca_pstack
[MCACHE_STACK_DEPTH
];
238 void *mca_uptr
; /* user-specific pointer */
239 uint32_t mca_uflags
; /* user-specific flags */
242 __private_extern__
int assfail(const char *, const char *, int);
243 __private_extern__
void mcache_init(void);
244 __private_extern__
unsigned int mcache_getflags(void);
245 __private_extern__ mcache_t
*mcache_create(const char *, size_t,
246 size_t, u_int32_t
, int);
247 __private_extern__
void *mcache_alloc(mcache_t
*, int);
248 __private_extern__
void mcache_free(mcache_t
*, void *);
249 __private_extern__ mcache_t
*mcache_create_ext(const char *, size_t,
250 mcache_allocfn_t
, mcache_freefn_t
, mcache_auditfn_t
, mcache_notifyfn_t
,
251 void *, u_int32_t
, int);
252 __private_extern__
void mcache_destroy(mcache_t
*);
253 __private_extern__
unsigned int mcache_alloc_ext(mcache_t
*, mcache_obj_t
**,
255 __private_extern__
void mcache_free_ext(mcache_t
*, mcache_obj_t
*);
256 __private_extern__
void mcache_reap(void);
257 __private_extern__ boolean_t
mcache_purge_cache(mcache_t
*);
258 __private_extern__
void mcache_waiter_inc(mcache_t
*);
259 __private_extern__
void mcache_waiter_dec(mcache_t
*);
260 __private_extern__ boolean_t
mcache_bkt_isempty(mcache_t
*);
262 __private_extern__
void mcache_buffer_log(mcache_audit_t
*, void *, mcache_t
*);
263 __private_extern__
void mcache_set_pattern(u_int64_t
, void *, size_t);
264 __private_extern__
void *mcache_verify_pattern(u_int64_t
, void *, size_t);
265 __private_extern__
void *mcache_verify_set_pattern(u_int64_t
, u_int64_t
,
267 __private_extern__
void mcache_audit_free_verify(mcache_audit_t
*,
268 void *, size_t, size_t);
269 __private_extern__
void mcache_audit_free_verify_set(mcache_audit_t
*,
270 void *, size_t, size_t);
271 __private_extern__
char *mcache_dump_mca(mcache_audit_t
*);
272 __private_extern__
void mcache_audit_panic(mcache_audit_t
*, void *, size_t,
275 __private_extern__ mcache_t
*mcache_audit_cache
;
281 #endif /* KERNEL_PRIVATE */
283 #endif /* _SYS_MCACHE_H */