2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
70 #include <mach/machine/vm_types.h>
71 #include <kern/kern_types.h>
72 #include <sys/cdefs.h>
74 #ifdef MACH_KERNEL_PRIVATE
76 #include <zone_debug.h>
77 #include <kern/locks.h>
78 #include <kern/queue.h>
79 #include <kern/thread_call.h>
80 #include <kern/btlog.h>
83 #include <sys/queue.h>
84 #include <san/kasan.h>
88 typedef struct gzalloc_data
{
95 * A zone is a collection of fixed size blocks for which there
96 * is fast allocation/deallocation access. Kernel routines can
97 * use zones to manage data structures dynamically, creating a zone
98 * for each type of data structure to be managed.
102 struct zone_free_element
;
103 struct zone_page_metadata
;
106 struct zone_free_element
*free_elements
; /* free elements directly linked */
108 queue_head_t any_free_foreign
; /* foreign pages crammed into zone */
109 queue_head_t all_free
;
110 queue_head_t intermediate
;
111 queue_head_t all_used
;
112 } pages
; /* list of zone_page_metadata structs, which maintain per-page free element lists */
113 int count
; /* Number of elements used now */
114 int countfree
; /* Number of free elements */
115 int count_all_free_pages
; /* Number of pages collectable by GC */
116 lck_attr_t lock_attr
; /* zone lock attribute */
117 decl_lck_mtx_data(,lock
) /* zone lock */
118 lck_mtx_ext_t lock_ext
; /* placeholder for indirect mutex */
119 vm_size_t cur_size
; /* current memory utilization */
120 vm_size_t max_size
; /* how large can this zone grow */
121 vm_size_t elem_size
; /* size of an element */
122 vm_size_t alloc_size
; /* size used for more memory */
123 uint64_t page_count
__attribute__((aligned(8))); /* number of pages used by this zone */
124 uint64_t sum_count
; /* count of allocs (life of zone) */
126 /* boolean_t */ exhaustible
:1, /* (F) merely return if empty? */
127 /* boolean_t */ collectable
:1, /* (F) garbage collect empty pages */
128 /* boolean_t */ expandable
:1, /* (T) expand zone (with message)? */
129 /* boolean_t */ allows_foreign
:1, /* (F) allow non-zalloc space */
130 /* boolean_t */ doing_alloc_without_vm_priv
:1, /* is zone expanding now via a non-vm_privileged thread? */
131 /* boolean_t */ doing_alloc_with_vm_priv
:1, /* is zone expanding now via a vm_privileged thread? */
132 /* boolean_t */ waiting
:1, /* is thread waiting for expansion? */
133 /* boolean_t */ async_pending
:1, /* asynchronous allocation pending? */
134 /* boolean_t */ zleak_on
:1, /* Are we collecting allocation information? */
135 /* boolean_t */ caller_acct
:1, /* do we account allocation/free to the caller? */
136 /* boolean_t */ noencrypt
:1,
137 /* boolean_t */ no_callout
:1,
138 /* boolean_t */ async_prio_refill
:1,
139 /* boolean_t */ gzalloc_exempt
:1,
140 /* boolean_t */ alignment_required
:1,
141 /* boolean_t */ zone_logging
:1, /* Enable zone logging for this zone. */
142 /* boolean_t */ zone_replenishing
:1,
143 /* boolean_t */ kasan_quarantine
:1,
144 /* boolean_t */ tags
:1,
145 /* boolean_t */ tags_inline
:1,
146 /* future */ tag_zone_index
:6,
147 /* boolean_t */ zone_valid
:1,
148 /* future */ _reserved
:5;
150 int index
; /* index into zone_info arrays for this zone */
151 const char *zone_name
; /* a name for the zone */
154 uint32_t zleak_capture
; /* per-zone counter for capturing every N allocations */
155 #endif /* CONFIG_ZLEAKS */
156 uint32_t zp_count
; /* counter for poisoning every N frees */
157 vm_size_t prio_refill_watermark
;
158 thread_t zone_replenish_thread
;
161 #endif /* CONFIG_GZALLOC */
164 vm_size_t kasan_redzone
;
167 btlog_t
*zlog_btlog
; /* zone logging structure to hold stacks and element references to those stacks. */
171 * structure for tracking zone usage
172 * Used either one per task/thread for all zones or <per-task,per-zone>.
174 typedef struct zinfo_usage_store_t
{
175 /* These fields may be updated atomically, and so must be 8 byte aligned */
176 uint64_t alloc
__attribute__((aligned(8))); /* allocation counter */
177 uint64_t free
__attribute__((aligned(8))); /* free counter */
178 } zinfo_usage_store_t
;
181 * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a
182 * userspace reboot is needed. The only other way to query for this information
183 * is via mach_memory_info() which is unavailable on release kernels.
185 extern uint64_t get_zones_collectable_bytes(void);
188 * zone_gc also checks if the zone_map is getting close to full and triggers jetsams if needed, provided
189 * consider_jetsams is set to TRUE. To avoid deadlocks, we only pass a value of TRUE from within the
190 * vm_pageout_garbage_collect thread.
192 extern void zone_gc(boolean_t consider_jetsams
);
193 extern void consider_zone_gc(boolean_t consider_jetsams
);
194 extern void drop_free_elements(zone_t z
);
196 /* Debug logging for zone-map-exhaustion jetsams. */
197 extern void get_zone_map_size(uint64_t *current_size
, uint64_t *capacity
);
198 extern void get_largest_zone_info(char *zone_name
, size_t zone_name_len
, uint64_t *zone_size
);
200 /* Bootstrap zone module (create zone zone) */
201 extern void zone_bootstrap(void);
203 /* Init zone module */
204 extern void zone_init(
207 /* Stack use statistics */
208 extern void stack_fake_zone_init(int zone_index
);
209 extern void stack_fake_zone_info(
213 vm_size_t
*elem_size
,
214 vm_size_t
*alloc_size
,
222 extern void zone_debug_enable(
225 extern void zone_debug_disable(
228 #define zone_debug_enabled(z) z->active_zones.next
229 #define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
230 #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
231 #endif /* ZONE_DEBUG */
233 extern unsigned int num_zones
;
234 extern struct zone zone_array
[];
236 /* zindex and page_count must pack into 16 bits
237 * update tools/lldbmacros/memory.py:GetRealMetadata
238 * when these values change */
240 #define ZINDEX_BITS (10U)
241 #define PAGECOUNT_BITS (16U - ZINDEX_BITS)
242 #define MULTIPAGE_METADATA_MAGIC ((1UL << ZINDEX_BITS) - 1)
243 #define ZONE_CHUNK_MAXPAGES ((1UL << PAGECOUNT_BITS) - 1)
246 * The max # of elements in a chunk should fit into zone_page_metadata.free_count (uint16_t).
247 * Update this if the type of free_count changes.
249 #define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX)
251 #endif /* MACH_KERNEL_PRIVATE */
256 /* Item definitions for zalloc/zinit/zone_change */
257 #define Z_EXHAUST 1 /* Make zone exhaustible */
258 #define Z_COLLECT 2 /* Make zone collectable */
259 #define Z_EXPAND 3 /* Make zone expandable */
260 #define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */
261 #define Z_CALLERACCT 5 /* Account alloc/free against the caller */
262 #define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */
263 #define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */
264 #define Z_ALIGNMENT_REQUIRED 8
265 #define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */
266 #define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */
267 #ifdef XNU_KERNEL_PRIVATE
268 #define Z_TAGS_ENABLED 11 /* Store tags */
269 #endif /* XNU_KERNEL_PRIVATE */
271 #ifdef XNU_KERNEL_PRIVATE
273 extern vm_offset_t zone_map_min_address
;
274 extern vm_offset_t zone_map_max_address
;
277 /* Non-waiting for memory version of zalloc */
278 extern void * zalloc_nopagewait(
281 /* selective version of zalloc */
282 extern void * zalloc_canblock(
286 /* selective version of zalloc */
287 extern void * zalloc_canblock_tag(
293 /* Get from zone free list */
297 /* Fill zone with memory */
303 /* Initially fill zone with specified number of elements */
308 extern void zone_prio_refill_configure(zone_t
, vm_size_t
);
310 /* See above/top of file. Z_* definitions moved so they would be usable by kexts */
312 /* Preallocate space for zone from zone map */
313 extern void zprealloc(
317 extern integer_t
zone_free_count(
320 extern vm_size_t
zone_element_size(
325 * MAX_ZTRACE_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interest. 15
326 * levels is usually enough to get past all the layers of code in kalloc and IOKit and see who the actual
327 * caller is up above these lower levels.
329 * This is used both for the zone leak detector and the zone corruption log.
332 #define MAX_ZTRACE_DEPTH 15
335 * Structure for keeping track of a backtrace, used for leak detection.
336 * This is in the .h file because it is used during panic, see kern/debug.c
337 * A non-zero size indicates that the trace is in use.
340 vm_size_t zt_size
; /* How much memory are all the allocations referring to this trace taking up? */
341 uint32_t zt_depth
; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */
342 void* zt_stack
[MAX_ZTRACE_DEPTH
]; /* series of return addresses from OSBacktrace */
343 uint32_t zt_collisions
; /* How many times did a different stack land here while it was occupied? */
344 uint32_t zt_hit_count
; /* for determining effectiveness of hash function */
349 /* support for the kern.zleak.* sysctls */
351 extern kern_return_t
zleak_activate(void);
352 extern vm_size_t zleak_max_zonemap_size
;
353 extern vm_size_t zleak_global_tracking_threshold
;
354 extern vm_size_t zleak_per_zone_tracking_threshold
;
356 extern int get_zleak_state(void);
358 #endif /* CONFIG_ZLEAKS */
360 #ifndef VM_MAX_TAG_ZONES
366 extern boolean_t zone_tagging_on
;
367 extern uint32_t zone_index_from_tag_index(uint32_t tag_zone_index
, vm_size_t
* elem_size
);
369 #endif /* VM_MAX_TAG_ZONES */
371 /* These functions used for leak detection both in zalloc.c and mbuf.c */
372 extern uintptr_t hash_mix(uintptr_t);
373 extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t);
374 extern uint32_t hashaddr(uintptr_t, uint32_t);
376 #define lock_zone(zone) \
378 lck_mtx_lock_spin_always(&(zone)->lock); \
381 #define unlock_zone(zone) \
383 lck_mtx_unlock(&(zone)->lock); \
387 void gzalloc_init(vm_size_t
);
388 void gzalloc_zone_init(zone_t
);
389 void gzalloc_configure(void);
390 void gzalloc_reconfigure(zone_t
);
391 void gzalloc_empty_free_cache(zone_t
);
392 boolean_t
gzalloc_enabled(void);
394 vm_offset_t
gzalloc_alloc(zone_t
, boolean_t
);
395 boolean_t
gzalloc_free(zone_t
, void *);
396 boolean_t
gzalloc_element_size(void *, zone_t
*, vm_size_t
*);
397 #endif /* CONFIG_GZALLOC */
399 /* Callbacks for btlog lock/unlock */
400 void zlog_btlog_lock(__unused
void *);
401 void zlog_btlog_unlock(__unused
void *);
403 #ifdef MACH_KERNEL_PRIVATE
404 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
405 int track_this_zone(const char *zonename
, const char *logname
);
408 #if DEBUG || DEVELOPMENT
409 extern boolean_t
run_zone_test(void);
410 extern vm_size_t
zone_element_info(void *addr
, vm_tag_t
* ptag
);
411 #endif /* DEBUG || DEVELOPMENT */
413 #endif /* XNU_KERNEL_PRIVATE */
415 /* Allocate from zone */
416 extern void * zalloc(
419 /* Non-blocking version of zalloc */
420 extern void * zalloc_noblock(
423 /* Free zone element */
430 vm_size_t size
, /* the size of an element */
431 vm_size_t maxmem
, /* maximum memory to use */
432 vm_size_t alloc
, /* allocation size */
433 const char *name
); /* a name for the zone */
435 /* Change zone parameters */
436 extern void zone_change(
441 /* Destroy the zone */
442 extern void zdestroy(
447 #endif /* _KERN_ZALLOC_H_ */
449 #endif /* KERNEL_PRIVATE */