2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
65 #include <zone_debug.h>
67 #include <mach/mach_types.h>
68 #include <mach/vm_param.h>
69 #include <mach/kern_return.h>
70 #include <mach/mach_host_server.h>
71 #include <mach/task_server.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach_debug/zone_info.h>
74 #include <mach/vm_map.h>
77 #include <kern/bits.h>
78 #include <kern/kern_types.h>
79 #include <kern/assert.h>
80 #include <kern/backtrace.h>
81 #include <kern/host.h>
82 #include <kern/macro_help.h>
83 #include <kern/sched.h>
84 #include <kern/locks.h>
85 #include <kern/sched_prim.h>
86 #include <kern/misc_protos.h>
87 #include <kern/thread_call.h>
88 #include <kern/zalloc.h>
89 #include <kern/kalloc.h>
91 #include <prng/random.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_page.h>
98 #include <pexpert/pexpert.h>
100 #include <machine/machparam.h>
101 #include <machine/machine_routines.h> /* ml_cpu_get_info */
103 #include <libkern/OSDebug.h>
104 #include <libkern/OSAtomic.h>
105 #include <sys/kdebug.h>
107 #include <san/kasan.h>
110 * ZONE_ALIAS_ADDR (deprecated)
113 #define from_zone_map(addr, size) \
114 ((vm_offset_t)(addr) >= zone_map_min_address && \
115 ((vm_offset_t)(addr) + size - 1) < zone_map_max_address )
118 * Zone Corruption Debugging
120 * We use three techniques to detect modification of a zone element
121 * after it's been freed.
123 * (1) Check the freelist next pointer for sanity.
124 * (2) Store a backup of the next pointer at the end of the element,
125 * and compare it to the primary next pointer when the element is allocated
126 * to detect corruption of the freelist due to use-after-free bugs.
127 * The backup pointer is also XORed with a per-boot random cookie.
128 * (3) Poison the freed element by overwriting it with 0xdeadbeef,
129 * and check for that value when the element is being reused to make sure
130 * no part of the element has been modified while it was on the freelist.
131 * This will also help catch read-after-frees, as code will now dereference
132 * 0xdeadbeef instead of a valid but freed pointer.
134 * (1) and (2) occur for every allocation and free to a zone.
135 * This is done to make it slightly more difficult for an attacker to
136 * manipulate the freelist to behave in a specific way.
138 * Poisoning (3) occurs periodically for every N frees (counted per-zone)
139 * and on every free for zones smaller than a cacheline. If -zp
140 * is passed as a boot arg, poisoning occurs for every free.
142 * Performance slowdown is inversely proportional to the frequency of poisoning,
143 * with a 4-5% hit around N=1, down to ~0.3% at N=16 and just "noise" at N=32
144 * and higher. You can expect to find a 100% reproducible bug in an average of
145 * N tries, with a standard deviation of about N, but you will want to set
146 * "-zp" to always poison every free if you are attempting to reproduce
149 * For a more heavyweight, but finer-grained method of detecting misuse
150 * of zone memory, look up the "Guard mode" zone allocator in gzalloc.c.
152 * Zone Corruption Logging
154 * You can also track where corruptions come from by using the boot-arguments
155 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later
156 * in this document for more implementation and usage information.
158 * Zone Leak Detection
160 * To debug leaks of zone memory, use the zone leak detection tool 'zleaks'
161 * found later in this file via the showtopztrace and showz* macros in kgmacros,
162 * or use zlog without the -zc argument.
166 /* Returns TRUE if we rolled over the counter at factor */
167 static inline boolean_t
168 sample_counter(volatile uint32_t * count_p
, uint32_t factor
)
170 uint32_t old_count
, new_count
;
171 boolean_t rolled_over
;
174 new_count
= old_count
= *count_p
;
176 if (++new_count
>= factor
) {
183 } while (!OSCompareAndSwap(old_count
, new_count
, count_p
));
188 #if defined(__LP64__)
189 #define ZP_POISON 0xdeadbeefdeadbeef
191 #define ZP_POISON 0xdeadbeef
194 #define ZP_DEFAULT_SAMPLING_FACTOR 16
195 #define ZP_DEFAULT_SCALE_FACTOR 4
198 * A zp_factor of 0 indicates zone poisoning is disabled,
199 * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline).
200 * Passing the -no-zp boot-arg disables even this behavior.
201 * In all cases, we record and check the integrity of a backup pointer.
204 /* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */
205 uint32_t zp_factor
= 0;
207 /* set by zp-scale=N boot arg, scales zp_factor by zone size */
208 uint32_t zp_scale
= 0;
210 /* set in zp_init, zero indicates -no-zp boot-arg */
211 vm_size_t zp_tiny_zone_limit
= 0;
213 /* initialized to a per-boot random value in zp_init */
214 uintptr_t zp_poisoned_cookie
= 0;
215 uintptr_t zp_nopoison_cookie
= 0;
218 boolean_t zone_tagging_on
;
219 #endif /* VM_MAX_TAG_ZONES */
221 static struct bool_gen zone_bool_gen
;
224 * initialize zone poisoning
225 * called from zone_bootstrap before any allocations are made from zalloc
233 * Initialize backup pointer random cookie for poisoned elements
234 * Try not to call early_random() back to back, it may return
235 * the same value if mach_absolute_time doesn't have sufficient time
236 * to tick over between calls. <rdar://problem/11597395>
237 * (This is only a problem on embedded devices)
239 zp_poisoned_cookie
= (uintptr_t) early_random();
242 * Always poison zones smaller than a cacheline,
243 * because it's pretty close to free
245 ml_cpu_info_t cpu_info
;
246 ml_cpu_get_info(&cpu_info
);
247 zp_tiny_zone_limit
= (vm_size_t
) cpu_info
.cache_line_size
;
249 zp_factor
= ZP_DEFAULT_SAMPLING_FACTOR
;
250 zp_scale
= ZP_DEFAULT_SCALE_FACTOR
;
252 //TODO: Bigger permutation?
254 * Permute the default factor +/- 1 to make it less predictable
255 * This adds or subtracts ~4 poisoned objects per 1000 frees.
257 if (zp_factor
!= 0) {
258 uint32_t rand_bits
= early_random() & 0x3;
260 if (rand_bits
== 0x1)
262 else if (rand_bits
== 0x2)
264 /* if 0x0 or 0x3, leave it alone */
267 /* -zp: enable poisoning for every alloc and free */
268 if (PE_parse_boot_argn("-zp", temp_buf
, sizeof(temp_buf
))) {
272 /* -no-zp: disable poisoning completely even for tiny zones */
273 if (PE_parse_boot_argn("-no-zp", temp_buf
, sizeof(temp_buf
))) {
275 zp_tiny_zone_limit
= 0;
276 printf("Zone poisoning disabled\n");
279 /* zp-factor=XXXX: override how often to poison freed zone elements */
280 if (PE_parse_boot_argn("zp-factor", &zp_factor
, sizeof(zp_factor
))) {
281 printf("Zone poisoning factor override: %u\n", zp_factor
);
284 /* zp-scale=XXXX: override how much zone size scales zp-factor by */
285 if (PE_parse_boot_argn("zp-scale", &zp_scale
, sizeof(zp_scale
))) {
286 printf("Zone poisoning scale factor override: %u\n", zp_scale
);
289 /* Initialize backup pointer random cookie for unpoisoned elements */
290 zp_nopoison_cookie
= (uintptr_t) early_random();
293 if (zp_poisoned_cookie
== zp_nopoison_cookie
)
294 panic("early_random() is broken: %p and %p are not random\n",
295 (void *) zp_poisoned_cookie
, (void *) zp_nopoison_cookie
);
299 * Use the last bit in the backup pointer to hint poisoning state
300 * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so
301 * the low bits are zero.
303 zp_poisoned_cookie
|= (uintptr_t)0x1ULL
;
304 zp_nopoison_cookie
&= ~((uintptr_t)0x1ULL
);
306 #if defined(__LP64__)
308 * Make backup pointers more obvious in GDB for 64 bit
309 * by making OxFFFFFF... ^ cookie = 0xFACADE...
310 * (0xFACADE = 0xFFFFFF ^ 0x053521)
311 * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011)
312 * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked
313 * by the sanity check, so it's OK for that part of the cookie to be predictable.
315 * TODO: Use #defines, xors, and shifts
318 zp_poisoned_cookie
&= 0x000000FFFFFFFFFF;
319 zp_poisoned_cookie
|= 0x0535210000000000; /* 0xFACADE */
321 zp_nopoison_cookie
&= 0x000000FFFFFFFFFF;
322 zp_nopoison_cookie
|= 0x3f00110000000000; /* 0xC0FFEE */
327 * These macros are used to keep track of the number
328 * of pages being used by the zone currently. The
329 * z->page_count is not protected by the zone lock.
331 #define ZONE_PAGE_COUNT_INCR(z, count) \
333 OSAddAtomic64(count, &(z->page_count)); \
336 #define ZONE_PAGE_COUNT_DECR(z, count) \
338 OSAddAtomic64(-count, &(z->page_count)); \
341 vm_map_t zone_map
= VM_MAP_NULL
;
343 /* for is_sane_zone_element and garbage collection */
345 vm_offset_t zone_map_min_address
= 0; /* initialized in zone_init */
346 vm_offset_t zone_map_max_address
= 0;
348 /* Globals for random boolean generator for elements in free list */
349 #define MAX_ENTROPY_PER_ZCRAM 4
351 /* VM region for all metadata structures */
352 vm_offset_t zone_metadata_region_min
= 0;
353 vm_offset_t zone_metadata_region_max
= 0;
354 decl_lck_mtx_data(static ,zone_metadata_region_lck
)
355 lck_attr_t zone_metadata_lock_attr
;
356 lck_mtx_ext_t zone_metadata_region_lck_ext
;
358 /* Helpful for walking through a zone's free element list. */
359 struct zone_free_element
{
360 struct zone_free_element
*next
;
362 /* void *backup_ptr; */
366 * Protects zone_array, num_zones, num_zones_in_use, and zone_empty_bitmap
368 decl_simple_lock_data(, all_zones_lock
)
369 unsigned int num_zones_in_use
;
370 unsigned int num_zones
;
372 #define MAX_ZONES 320
373 struct zone zone_array
[MAX_ZONES
];
375 /* Used to keep track of empty slots in the zone_array */
376 bitmap_t zone_empty_bitmap
[BITMAP_LEN(MAX_ZONES
)];
378 #if DEBUG || DEVELOPMENT
380 * Used for sysctl kern.run_zone_test which is not thread-safe. Ensure only one thread goes through at a time.
381 * Or we can end up with multiple test zones (if a second zinit() comes through before zdestroy()), which could lead us to
384 decl_simple_lock_data(, zone_test_lock
)
385 static boolean_t zone_test_running
= FALSE
;
386 static zone_t test_zone_ptr
= NULL
;
387 #endif /* DEBUG || DEVELOPMENT */
389 #define PAGE_METADATA_GET_ZINDEX(page_meta) \
392 #define PAGE_METADATA_GET_ZONE(page_meta) \
393 (&(zone_array[page_meta->zindex]))
395 #define PAGE_METADATA_SET_ZINDEX(page_meta, index) \
396 page_meta->zindex = (index);
398 struct zone_page_metadata
{
399 queue_chain_t pages
; /* linkage pointer for metadata lists */
401 /* Union for maintaining start of element free list and real metadata (for multipage allocations) */
404 * The start of the freelist can be maintained as a 32-bit offset instead of a pointer because
405 * the free elements would be at max ZONE_MAX_ALLOC_SIZE bytes away from the metadata. Offset
406 * from start of the allocation chunk to free element list head.
408 uint32_t freelist_offset
;
410 * This field is used to lookup the real metadata for multipage allocations, where we mark the
411 * metadata for all pages except the first as "fake" metadata using MULTIPAGE_METADATA_MAGIC.
412 * Offset from this fake metadata to real metadata of allocation chunk (-ve offset).
414 uint32_t real_metadata_offset
;
418 * For the first page in the allocation chunk, this represents the total number of free elements in
422 unsigned zindex
: ZINDEX_BITS
; /* Zone index within the zone_array */
423 unsigned page_count
: PAGECOUNT_BITS
; /* Count of pages within the allocation chunk */
426 /* Macro to get page index (within zone_map) of page containing element */
427 #define PAGE_INDEX_FOR_ELEMENT(element) \
428 (((vm_offset_t)trunc_page(element) - zone_map_min_address) / PAGE_SIZE)
430 /* Macro to get metadata structure given a page index in zone_map */
431 #define PAGE_METADATA_FOR_PAGE_INDEX(index) \
432 (zone_metadata_region_min + ((index) * sizeof(struct zone_page_metadata)))
434 /* Macro to get index (within zone_map) for given metadata */
435 #define PAGE_INDEX_FOR_METADATA(page_meta) \
436 (((vm_offset_t)page_meta - zone_metadata_region_min) / sizeof(struct zone_page_metadata))
438 /* Macro to get page for given page index in zone_map */
439 #define PAGE_FOR_PAGE_INDEX(index) \
440 (zone_map_min_address + (PAGE_SIZE * (index)))
442 /* Macro to get the actual metadata for a given address */
443 #define PAGE_METADATA_FOR_ELEMENT(element) \
444 (struct zone_page_metadata *)(PAGE_METADATA_FOR_PAGE_INDEX(PAGE_INDEX_FOR_ELEMENT(element)))
446 /* Magic value to indicate empty element free list */
447 #define PAGE_METADATA_EMPTY_FREELIST ((uint32_t)(~0))
449 boolean_t
get_zone_info(zone_t z
, mach_zone_name_t
*zn
, mach_zone_info_t
*zi
);
450 boolean_t
is_zone_map_nearing_exhaustion(void);
451 extern void vm_pageout_garbage_collect(int collect
);
454 page_metadata_get_freelist(struct zone_page_metadata
*page_meta
)
456 assert(PAGE_METADATA_GET_ZINDEX(page_meta
) != MULTIPAGE_METADATA_MAGIC
);
457 if (page_meta
->freelist_offset
== PAGE_METADATA_EMPTY_FREELIST
)
460 if (from_zone_map(page_meta
, sizeof(struct zone_page_metadata
)))
461 return (void *)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta
)) + page_meta
->freelist_offset
);
463 return (void *)((vm_offset_t
)page_meta
+ page_meta
->freelist_offset
);
468 page_metadata_set_freelist(struct zone_page_metadata
*page_meta
, void *addr
)
470 assert(PAGE_METADATA_GET_ZINDEX(page_meta
) != MULTIPAGE_METADATA_MAGIC
);
472 page_meta
->freelist_offset
= PAGE_METADATA_EMPTY_FREELIST
;
474 if (from_zone_map(page_meta
, sizeof(struct zone_page_metadata
)))
475 page_meta
->freelist_offset
= (uint32_t)((vm_offset_t
)(addr
) - PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta
)));
477 page_meta
->freelist_offset
= (uint32_t)((vm_offset_t
)(addr
) - (vm_offset_t
)page_meta
);
481 static inline struct zone_page_metadata
*
482 page_metadata_get_realmeta(struct zone_page_metadata
*page_meta
)
484 assert(PAGE_METADATA_GET_ZINDEX(page_meta
) == MULTIPAGE_METADATA_MAGIC
);
485 return (struct zone_page_metadata
*)((vm_offset_t
)page_meta
- page_meta
->real_metadata_offset
);
489 page_metadata_set_realmeta(struct zone_page_metadata
*page_meta
, struct zone_page_metadata
*real_meta
)
491 assert(PAGE_METADATA_GET_ZINDEX(page_meta
) == MULTIPAGE_METADATA_MAGIC
);
492 assert(PAGE_METADATA_GET_ZINDEX(real_meta
) != MULTIPAGE_METADATA_MAGIC
);
493 assert((vm_offset_t
)page_meta
> (vm_offset_t
)real_meta
);
494 vm_offset_t offset
= (vm_offset_t
)page_meta
- (vm_offset_t
)real_meta
;
495 assert(offset
<= UINT32_MAX
);
496 page_meta
->real_metadata_offset
= (uint32_t)offset
;
499 /* The backup pointer is stored in the last pointer-sized location in an element. */
500 static inline vm_offset_t
*
501 get_backup_ptr(vm_size_t elem_size
,
502 vm_offset_t
*element
)
504 return (vm_offset_t
*) ((vm_offset_t
)element
+ elem_size
- sizeof(vm_offset_t
));
508 * Routine to populate a page backing metadata in the zone_metadata_region.
509 * Must be called without the zone lock held as it might potentially block.
512 zone_populate_metadata_page(struct zone_page_metadata
*page_meta
)
514 vm_offset_t page_metadata_begin
= trunc_page(page_meta
);
515 vm_offset_t page_metadata_end
= trunc_page((vm_offset_t
)page_meta
+ sizeof(struct zone_page_metadata
));
517 for(;page_metadata_begin
<= page_metadata_end
; page_metadata_begin
+= PAGE_SIZE
) {
518 if (pmap_find_phys(kernel_pmap
, (vm_map_address_t
)page_metadata_begin
))
520 /* All updates to the zone_metadata_region are done under the zone_metadata_region_lck */
521 lck_mtx_lock(&zone_metadata_region_lck
);
522 if (0 == pmap_find_phys(kernel_pmap
, (vm_map_address_t
)page_metadata_begin
)) {
523 kern_return_t __unused ret
= kernel_memory_populate(zone_map
,
527 VM_KERN_MEMORY_OSFMK
);
529 /* should not fail with the given arguments */
530 assert(ret
== KERN_SUCCESS
);
532 lck_mtx_unlock(&zone_metadata_region_lck
);
537 static inline uint16_t
538 get_metadata_alloc_count(struct zone_page_metadata
*page_meta
)
540 assert(PAGE_METADATA_GET_ZINDEX(page_meta
) != MULTIPAGE_METADATA_MAGIC
);
541 struct zone
*z
= PAGE_METADATA_GET_ZONE(page_meta
);
542 return ((page_meta
->page_count
* PAGE_SIZE
) / z
->elem_size
);
546 * Routine to lookup metadata for any given address.
547 * If init is marked as TRUE, this should be called without holding the zone lock
548 * since the initialization might block.
550 static inline struct zone_page_metadata
*
551 get_zone_page_metadata(struct zone_free_element
*element
, boolean_t init
)
553 struct zone_page_metadata
*page_meta
= 0;
555 if (from_zone_map(element
, sizeof(struct zone_free_element
))) {
556 page_meta
= (struct zone_page_metadata
*)(PAGE_METADATA_FOR_ELEMENT(element
));
558 zone_populate_metadata_page(page_meta
);
560 page_meta
= (struct zone_page_metadata
*)(trunc_page((vm_offset_t
)element
));
563 __nosan_bzero((char *)page_meta
, sizeof(struct zone_page_metadata
));
564 return ((PAGE_METADATA_GET_ZINDEX(page_meta
) != MULTIPAGE_METADATA_MAGIC
) ? page_meta
: page_metadata_get_realmeta(page_meta
));
567 /* Routine to get the page for a given metadata */
568 static inline vm_offset_t
569 get_zone_page(struct zone_page_metadata
*page_meta
)
571 if (from_zone_map(page_meta
, sizeof(struct zone_page_metadata
)))
572 return (vm_offset_t
)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta
)));
574 return (vm_offset_t
)(trunc_page(page_meta
));
583 // for zones with tagging enabled:
585 // calculate a pointer to the tag base entry,
586 // holding either a uint32_t the first tag offset for a page in the zone map,
587 // or two uint16_t tags if the page can only hold one or two elements
589 #define ZTAGBASE(zone, element) \
590 (&((uint32_t *)zone_tagbase_min)[atop((element) - zone_map_min_address)])
592 // pointer to the tag for an element
593 #define ZTAG(zone, element) \
596 if ((zone)->tags_inline) { \
597 result = (vm_tag_t *) ZTAGBASE((zone), (element)); \
598 if ((page_mask & element) >= (zone)->elem_size) result++; \
600 result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / (zone)->elem_size]; \
606 static vm_offset_t zone_tagbase_min
;
607 static vm_offset_t zone_tagbase_max
;
608 static vm_offset_t zone_tagbase_map_size
;
609 static vm_map_t zone_tagbase_map
;
611 static vm_offset_t zone_tags_min
;
612 static vm_offset_t zone_tags_max
;
613 static vm_offset_t zone_tags_map_size
;
614 static vm_map_t zone_tags_map
;
616 // simple heap allocator for allocating the tags for new memory
618 decl_lck_mtx_data(,ztLock
) /* heap lock */
621 ztFreeIndexCount
= 8,
622 ztFreeIndexMax
= (ztFreeIndexCount
- 1),
628 #if __LITTLE_ENDIAN__
634 // ztBlock needs free bit least significant
635 #error !__LITTLE_ENDIAN__
638 typedef struct ztBlock ztBlock
;
640 static ztBlock
* ztBlocks
;
641 static uint32_t ztBlocksCount
;
642 static uint32_t ztBlocksFree
;
645 ztLog2up(uint32_t size
)
647 if (1 == size
) size
= 0;
648 else size
= 32 - __builtin_clz(size
- 1);
653 ztLog2down(uint32_t size
)
655 size
= 31 - __builtin_clz(size
);
660 ztFault(vm_map_t map
, const void * address
, size_t size
, uint32_t flags
)
662 vm_map_offset_t addr
= (vm_map_offset_t
) address
;
663 vm_map_offset_t page
, end
;
665 page
= trunc_page(addr
);
666 end
= round_page(addr
+ size
);
668 for (; page
< end
; page
+= page_size
)
670 if (!pmap_find_phys(kernel_pmap
, page
))
672 kern_return_t __unused
673 ret
= kernel_memory_populate(map
, page
, PAGE_SIZE
,
674 KMA_KOBJECT
| flags
, VM_KERN_MEMORY_DIAG
);
675 assert(ret
== KERN_SUCCESS
);
681 ztPresent(const void * address
, size_t size
)
683 vm_map_offset_t addr
= (vm_map_offset_t
) address
;
684 vm_map_offset_t page
, end
;
687 page
= trunc_page(addr
);
688 end
= round_page(addr
+ size
);
689 for (result
= TRUE
; (page
< end
); page
+= page_size
)
691 result
= pmap_find_phys(kernel_pmap
, page
);
699 ztDump(boolean_t sanity
);
701 ztDump(boolean_t sanity
)
705 for (q
= 0; q
<= ztFreeIndexMax
; q
++)
712 cq
= ztLog2down(ztBlocks
[p
].size
);
713 if (cq
> ztFreeIndexMax
) cq
= ztFreeIndexMax
;
714 if (!ztBlocks
[p
].free
715 || ((p
!= q
) && (q
!= cq
))
716 || (ztBlocks
[ztBlocks
[p
].next
].prev
!= p
)
717 || (ztBlocks
[ztBlocks
[p
].prev
].next
!= p
))
719 kprintf("zterror at %d", p
);
721 kprintf("zterror at %d", p
);
726 kprintf("zt[%03d]%c %d, %d, %d\n",
727 p
, ztBlocks
[p
].free
? 'F' : 'A',
728 ztBlocks
[p
].next
, ztBlocks
[p
].prev
,
730 p
= ztBlocks
[p
].next
;
734 if (!sanity
) printf("\n");
736 if (!sanity
) printf("-----------------------\n");
741 #define ZTBDEQ(idx) \
742 ztBlocks[ztBlocks[(idx)].prev].next = ztBlocks[(idx)].next; \
743 ztBlocks[ztBlocks[(idx)].next].prev = ztBlocks[(idx)].prev;
746 ztFree(zone_t zone __unused
, uint32_t index
, uint32_t count
)
748 uint32_t q
, w
, p
, size
, merge
;
751 ztBlocksFree
+= count
;
753 // merge with preceding
754 merge
= (index
+ count
);
755 if ((merge
< ztBlocksCount
)
756 && ztPresent(&ztBlocks
[merge
], sizeof(ztBlocks
[merge
]))
757 && ztBlocks
[merge
].free
)
760 count
+= ztBlocks
[merge
].size
;
763 // merge with following
765 if ((merge
> ztFreeIndexMax
)
766 && ztPresent(&ztBlocks
[merge
], sizeof(ztBlocks
[merge
]))
767 && ztBlocks
[merge
].free
)
769 size
= ztBlocks
[merge
].size
;
775 q
= ztLog2down(count
);
776 if (q
> ztFreeIndexMax
) q
= ztFreeIndexMax
;
778 // queue in order of size
781 p
= ztBlocks
[w
].next
;
783 if (ztBlocks
[p
].size
>= count
) break;
786 ztBlocks
[p
].prev
= index
;
787 ztBlocks
[w
].next
= index
;
790 ztFault(zone_tags_map
, &ztBlocks
[index
], sizeof(ztBlocks
[index
]), 0);
792 // mark first & last with free flag and size
793 ztBlocks
[index
].free
= TRUE
;
794 ztBlocks
[index
].size
= count
;
795 ztBlocks
[index
].prev
= w
;
796 ztBlocks
[index
].next
= p
;
799 index
+= (count
- 1);
801 ztFault(zone_tags_map
, &ztBlocks
[index
], sizeof(ztBlocks
[index
]), 0);
802 ztBlocks
[index
].free
= TRUE
;
803 ztBlocks
[index
].size
= count
;
808 ztAlloc(zone_t zone
, uint32_t count
)
810 uint32_t q
, w
, p
, leftover
;
815 if (q
> ztFreeIndexMax
) q
= ztFreeIndexMax
;
821 p
= ztBlocks
[w
].next
;
823 if (ztBlocks
[p
].size
>= count
)
825 // dequeue, mark both ends allocated
826 ztBlocks
[w
].next
= ztBlocks
[p
].next
;
827 ztBlocks
[ztBlocks
[p
].next
].prev
= w
;
828 ztBlocks
[p
].free
= FALSE
;
829 ztBlocksFree
-= ztBlocks
[p
].size
;
830 if (ztBlocks
[p
].size
> 1) ztBlocks
[p
+ ztBlocks
[p
].size
- 1].free
= FALSE
;
832 // fault all the allocation
833 ztFault(zone_tags_map
, &ztBlocks
[p
], count
* sizeof(ztBlocks
[p
]), 0);
834 // mark last as allocated
835 if (count
> 1) ztBlocks
[p
+ count
- 1].free
= FALSE
;
837 leftover
= ztBlocks
[p
].size
- count
;
838 if (leftover
) ztFree(zone
, p
+ ztBlocks
[p
].size
- leftover
, leftover
);
846 while (q
<= ztFreeIndexMax
);
852 ztInit(vm_size_t max_zonemap_size
, lck_grp_t
* group
)
855 vm_map_kernel_flags_t vmk_flags
;
858 lck_mtx_init(&ztLock
, group
, LCK_ATTR_NULL
);
860 // allocate submaps VM_KERN_MEMORY_DIAG
862 zone_tagbase_map_size
= atop(max_zonemap_size
) * sizeof(uint32_t);
863 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
864 vmk_flags
.vmkf_permanent
= TRUE
;
865 ret
= kmem_suballoc(kernel_map
, &zone_tagbase_min
, zone_tagbase_map_size
,
866 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_DIAG
,
869 if (ret
!= KERN_SUCCESS
) panic("zone_init: kmem_suballoc failed");
870 zone_tagbase_max
= zone_tagbase_min
+ round_page(zone_tagbase_map_size
);
872 zone_tags_map_size
= 2048*1024 * sizeof(vm_tag_t
);
873 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
874 vmk_flags
.vmkf_permanent
= TRUE
;
875 ret
= kmem_suballoc(kernel_map
, &zone_tags_min
, zone_tags_map_size
,
876 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_DIAG
,
879 if (ret
!= KERN_SUCCESS
) panic("zone_init: kmem_suballoc failed");
880 zone_tags_max
= zone_tags_min
+ round_page(zone_tags_map_size
);
882 ztBlocks
= (ztBlock
*) zone_tags_min
;
883 ztBlocksCount
= (uint32_t)(zone_tags_map_size
/ sizeof(ztBlock
));
885 // initialize the qheads
886 lck_mtx_lock(&ztLock
);
888 ztFault(zone_tags_map
, &ztBlocks
[0], sizeof(ztBlocks
[0]), 0);
889 for (idx
= 0; idx
< ztFreeIndexCount
; idx
++)
891 ztBlocks
[idx
].free
= TRUE
;
892 ztBlocks
[idx
].next
= idx
;
893 ztBlocks
[idx
].prev
= idx
;
894 ztBlocks
[idx
].size
= 0;
896 // free remaining space
897 ztFree(NULL
, ztFreeIndexCount
, ztBlocksCount
- ztFreeIndexCount
);
899 lck_mtx_unlock(&ztLock
);
903 ztMemoryAdd(zone_t zone
, vm_offset_t mem
, vm_size_t size
)
906 uint32_t count
, block
, blocks
, idx
;
910 tagbase
= ZTAGBASE(zone
, mem
);
912 lck_mtx_lock(&ztLock
);
915 ztFault(zone_tagbase_map
, tagbase
, pages
* sizeof(uint32_t), 0);
917 if (!zone
->tags_inline
)
920 count
= (uint32_t)(size
/ zone
->elem_size
);
921 blocks
= ((count
+ ztTagsPerBlock
- 1) / ztTagsPerBlock
);
922 block
= ztAlloc(zone
, blocks
);
923 if (-1U == block
) ztDump(false);
924 assert(-1U != block
);
927 lck_mtx_unlock(&ztLock
);
929 if (!zone
->tags_inline
)
931 // set tag base for each page
932 block
*= ztTagsPerBlock
;
933 for (idx
= 0; idx
< pages
; idx
++)
935 tagbase
[idx
] = block
+ (uint32_t)((ptoa(idx
) + (zone
->elem_size
- 1)) / zone
->elem_size
);
941 ztMemoryRemove(zone_t zone
, vm_offset_t mem
, vm_size_t size
)
944 uint32_t count
, block
, blocks
, idx
;
947 // set tag base for each page
949 tagbase
= ZTAGBASE(zone
, mem
);
951 for (idx
= 0; idx
< pages
; idx
++)
953 tagbase
[idx
] = 0xFFFFFFFF;
956 lck_mtx_lock(&ztLock
);
957 if (!zone
->tags_inline
)
959 count
= (uint32_t)(size
/ zone
->elem_size
);
960 blocks
= ((count
+ ztTagsPerBlock
- 1) / ztTagsPerBlock
);
961 assert(block
!= 0xFFFFFFFF);
962 block
/= ztTagsPerBlock
;
963 ztFree(NULL
/* zone is unlocked */, block
, blocks
);
966 lck_mtx_unlock(&ztLock
);
970 zone_index_from_tag_index(uint32_t tag_zone_index
, vm_size_t
* elem_size
)
975 simple_lock(&all_zones_lock
);
977 for (idx
= 0; idx
< num_zones
; idx
++)
979 z
= &(zone_array
[idx
]);
980 if (!z
->tags
) continue;
981 if (tag_zone_index
!= z
->tag_zone_index
) continue;
982 *elem_size
= z
->elem_size
;
986 simple_unlock(&all_zones_lock
);
988 if (idx
== num_zones
) idx
= -1U;
993 #endif /* VM_MAX_TAG_ZONES */
995 /* Routine to get the size of a zone allocated address. If the address doesnt belong to the
996 * zone_map, returns 0.
999 zone_element_size(void *addr
, zone_t
*z
)
1001 struct zone
*src_zone
;
1002 if (from_zone_map(addr
, sizeof(void *))) {
1003 struct zone_page_metadata
*page_meta
= get_zone_page_metadata((struct zone_free_element
*)addr
, FALSE
);
1004 src_zone
= PAGE_METADATA_GET_ZONE(page_meta
);
1008 return (src_zone
->elem_size
);
1012 if (gzalloc_element_size(addr
, z
, &gzsize
)) {
1015 #endif /* CONFIG_GZALLOC */
1021 #if DEBUG || DEVELOPMENT
1024 zone_element_info(void *addr
, vm_tag_t
* ptag
)
1027 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
1028 struct zone
* src_zone
;
1030 if (from_zone_map(addr
, sizeof(void *))) {
1031 struct zone_page_metadata
*page_meta
= get_zone_page_metadata((struct zone_free_element
*)addr
, FALSE
);
1032 src_zone
= PAGE_METADATA_GET_ZONE(page_meta
);
1033 #if VM_MAX_TAG_ZONES
1034 if (__improbable(src_zone
->tags
)) {
1035 tag
= (ZTAG(src_zone
, (vm_offset_t
) addr
)[0] >> 1);
1037 #endif /* VM_MAX_TAG_ZONES */
1038 size
= src_zone
->elem_size
;
1041 gzalloc_element_size(addr
, NULL
, &size
);
1042 #endif /* CONFIG_GZALLOC */
1048 #endif /* DEBUG || DEVELOPMENT */
1051 * Zone checking helper function.
1052 * A pointer that satisfies these conditions is OK to be a freelist next pointer
1053 * A pointer that doesn't satisfy these conditions indicates corruption
1055 static inline boolean_t
1056 is_sane_zone_ptr(zone_t zone
,
1060 /* Must be aligned to pointer boundary */
1061 if (__improbable((addr
& (sizeof(vm_offset_t
) - 1)) != 0))
1064 /* Must be a kernel address */
1065 if (__improbable(!pmap_kernel_va(addr
)))
1068 /* Must be from zone map if the zone only uses memory from the zone_map */
1070 * TODO: Remove the zone->collectable check when every
1071 * zone using foreign memory is properly tagged with allows_foreign
1073 if (zone
->collectable
&& !zone
->allows_foreign
) {
1074 /* check if addr is from zone map */
1075 if (addr
>= zone_map_min_address
&&
1076 (addr
+ obj_size
- 1) < zone_map_max_address
)
1085 static inline boolean_t
1086 is_sane_zone_page_metadata(zone_t zone
,
1087 vm_offset_t page_meta
)
1089 /* NULL page metadata structures are invalid */
1092 return is_sane_zone_ptr(zone
, page_meta
, sizeof(struct zone_page_metadata
));
1095 static inline boolean_t
1096 is_sane_zone_element(zone_t zone
,
1099 /* NULL is OK because it indicates the tail of the list */
1102 return is_sane_zone_ptr(zone
, addr
, zone
->elem_size
);
1105 /* Someone wrote to freed memory. */
1106 static inline void /* noreturn */
1107 zone_element_was_modified_panic(zone_t zone
,
1108 vm_offset_t element
,
1110 vm_offset_t expected
,
1113 panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p",
1117 (void *) (expected
^ found
),
1119 (uint32_t) zone
->elem_size
,
1121 (void *) zp_nopoison_cookie
,
1122 (void *) zp_poisoned_cookie
);
1126 * The primary and backup pointers don't match.
1127 * Determine which one was likely the corrupted pointer, find out what it
1128 * probably should have been, and panic.
1129 * I would like to mark this as noreturn, but panic() isn't marked noreturn.
1131 static void /* noreturn */
1132 backup_ptr_mismatch_panic(zone_t zone
,
1133 vm_offset_t element
,
1134 vm_offset_t primary
,
1137 vm_offset_t likely_backup
;
1138 vm_offset_t likely_primary
;
1140 likely_primary
= primary
^ zp_nopoison_cookie
;
1141 boolean_t sane_backup
;
1142 boolean_t sane_primary
= is_sane_zone_element(zone
, likely_primary
);
1143 boolean_t element_was_poisoned
= (backup
& 0x1) ? TRUE
: FALSE
;
1145 #if defined(__LP64__)
1146 /* We can inspect the tag in the upper bits for additional confirmation */
1147 if ((backup
& 0xFFFFFF0000000000) == 0xFACADE0000000000)
1148 element_was_poisoned
= TRUE
;
1149 else if ((backup
& 0xFFFFFF0000000000) == 0xC0FFEE0000000000)
1150 element_was_poisoned
= FALSE
;
1153 if (element_was_poisoned
) {
1154 likely_backup
= backup
^ zp_poisoned_cookie
;
1155 sane_backup
= is_sane_zone_element(zone
, likely_backup
);
1157 likely_backup
= backup
^ zp_nopoison_cookie
;
1158 sane_backup
= is_sane_zone_element(zone
, likely_backup
);
1161 /* The primary is definitely the corrupted one */
1162 if (!sane_primary
&& sane_backup
)
1163 zone_element_was_modified_panic(zone
, element
, primary
, (likely_backup
^ zp_nopoison_cookie
), 0);
1165 /* The backup is definitely the corrupted one */
1166 if (sane_primary
&& !sane_backup
)
1167 zone_element_was_modified_panic(zone
, element
, backup
,
1168 (likely_primary
^ (element_was_poisoned
? zp_poisoned_cookie
: zp_nopoison_cookie
)),
1169 zone
->elem_size
- sizeof(vm_offset_t
));
1172 * Not sure which is the corrupted one.
1173 * It's less likely that the backup pointer was overwritten with
1174 * ( (sane address) ^ (valid cookie) ), so we'll guess that the
1175 * primary pointer has been overwritten with a sane but incorrect address.
1177 if (sane_primary
&& sane_backup
)
1178 zone_element_was_modified_panic(zone
, element
, primary
, (likely_backup
^ zp_nopoison_cookie
), 0);
1180 /* Neither are sane, so just guess. */
1181 zone_element_was_modified_panic(zone
, element
, primary
, (likely_backup
^ zp_nopoison_cookie
), 0);
1185 * Adds the element to the head of the zone's free list
1186 * Keeps a backup next-pointer at the end of the element
1189 free_to_zone(zone_t zone
,
1190 vm_offset_t element
,
1193 vm_offset_t old_head
;
1194 struct zone_page_metadata
*page_meta
;
1196 vm_offset_t
*primary
= (vm_offset_t
*) element
;
1197 vm_offset_t
*backup
= get_backup_ptr(zone
->elem_size
, primary
);
1199 page_meta
= get_zone_page_metadata((struct zone_free_element
*)element
, FALSE
);
1200 assert(PAGE_METADATA_GET_ZONE(page_meta
) == zone
);
1201 old_head
= (vm_offset_t
)page_metadata_get_freelist(page_meta
);
1204 if (__improbable(!is_sane_zone_element(zone
, old_head
)))
1205 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
1206 (void *) old_head
, zone
->zone_name
);
1209 if (__improbable(!is_sane_zone_element(zone
, element
)))
1210 panic("zfree: freeing invalid pointer %p to zone %s\n",
1211 (void *) element
, zone
->zone_name
);
1214 * Always write a redundant next pointer
1215 * So that it is more difficult to forge, xor it with a random cookie
1216 * A poisoned element is indicated by using zp_poisoned_cookie
1217 * instead of zp_nopoison_cookie
1220 *backup
= old_head
^ (poison
? zp_poisoned_cookie
: zp_nopoison_cookie
);
1223 * Insert this element at the head of the free list. We also xor the
1224 * primary pointer with the zp_nopoison_cookie to make sure a free
1225 * element does not provide the location of the next free element directly.
1227 *primary
= old_head
^ zp_nopoison_cookie
;
1228 page_metadata_set_freelist(page_meta
, (struct zone_free_element
*)element
);
1229 page_meta
->free_count
++;
1230 if (zone
->allows_foreign
&& !from_zone_map(element
, zone
->elem_size
)) {
1231 if (page_meta
->free_count
== 1) {
1232 /* first foreign element freed on page, move from all_used */
1233 re_queue_tail(&zone
->pages
.any_free_foreign
, &(page_meta
->pages
));
1235 /* no other list transitions */
1237 } else if (page_meta
->free_count
== get_metadata_alloc_count(page_meta
)) {
1238 /* whether the page was on the intermediate or all_used, queue, move it to free */
1239 re_queue_tail(&zone
->pages
.all_free
, &(page_meta
->pages
));
1240 zone
->count_all_free_pages
+= page_meta
->page_count
;
1241 } else if (page_meta
->free_count
== 1) {
1242 /* first free element on page, move from all_used */
1243 re_queue_tail(&zone
->pages
.intermediate
, &(page_meta
->pages
));
1249 kasan_poison_range(element
, zone
->elem_size
, ASAN_HEAP_FREED
);
1255 * Removes an element from the zone's free list, returning 0 if the free list is empty.
1256 * Verifies that the next-pointer and backup next-pointer are intact,
1257 * and verifies that a poisoned element hasn't been modified.
1259 static inline vm_offset_t
1260 try_alloc_from_zone(zone_t zone
,
1261 vm_tag_t tag __unused
,
1262 boolean_t
* check_poison
)
1264 vm_offset_t element
;
1265 struct zone_page_metadata
*page_meta
;
1267 *check_poison
= FALSE
;
1269 /* if zone is empty, bail */
1270 if (zone
->allows_foreign
&& !queue_empty(&zone
->pages
.any_free_foreign
))
1271 page_meta
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.any_free_foreign
);
1272 else if (!queue_empty(&zone
->pages
.intermediate
))
1273 page_meta
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.intermediate
);
1274 else if (!queue_empty(&zone
->pages
.all_free
)) {
1275 page_meta
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.all_free
);
1276 assert(zone
->count_all_free_pages
>= page_meta
->page_count
);
1277 zone
->count_all_free_pages
-= page_meta
->page_count
;
1281 /* Check if page_meta passes is_sane_zone_element */
1282 if (__improbable(!is_sane_zone_page_metadata(zone
, (vm_offset_t
)page_meta
)))
1283 panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
1284 (void *) page_meta
, zone
->zone_name
);
1285 assert(PAGE_METADATA_GET_ZONE(page_meta
) == zone
);
1286 element
= (vm_offset_t
)page_metadata_get_freelist(page_meta
);
1288 if (__improbable(!is_sane_zone_ptr(zone
, element
, zone
->elem_size
)))
1289 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
1290 (void *) element
, zone
->zone_name
);
1292 vm_offset_t
*primary
= (vm_offset_t
*) element
;
1293 vm_offset_t
*backup
= get_backup_ptr(zone
->elem_size
, primary
);
1296 * Since the primary next pointer is xor'ed with zp_nopoison_cookie
1297 * for obfuscation, retrieve the original value back
1299 vm_offset_t next_element
= *primary
^ zp_nopoison_cookie
;
1300 vm_offset_t next_element_primary
= *primary
;
1301 vm_offset_t next_element_backup
= *backup
;
1304 * backup_ptr_mismatch_panic will determine what next_element
1305 * should have been, and print it appropriately
1307 if (__improbable(!is_sane_zone_element(zone
, next_element
)))
1308 backup_ptr_mismatch_panic(zone
, element
, next_element_primary
, next_element_backup
);
1310 /* Check the backup pointer for the regular cookie */
1311 if (__improbable(next_element
!= (next_element_backup
^ zp_nopoison_cookie
))) {
1313 /* Check for the poisoned cookie instead */
1314 if (__improbable(next_element
!= (next_element_backup
^ zp_poisoned_cookie
)))
1315 /* Neither cookie is valid, corruption has occurred */
1316 backup_ptr_mismatch_panic(zone
, element
, next_element_primary
, next_element_backup
);
1319 * Element was marked as poisoned, so check its integrity before using it.
1321 *check_poison
= TRUE
;
1324 /* Make sure the page_meta is at the correct offset from the start of page */
1325 if (__improbable(page_meta
!= get_zone_page_metadata((struct zone_free_element
*)element
, FALSE
)))
1326 panic("zalloc: Incorrect metadata %p found in zone %s page queue. Expected metadata: %p\n",
1327 page_meta
, zone
->zone_name
, get_zone_page_metadata((struct zone_free_element
*)element
, FALSE
));
1329 /* Make sure next_element belongs to the same page as page_meta */
1331 if (__improbable(page_meta
!= get_zone_page_metadata((struct zone_free_element
*)next_element
, FALSE
)))
1332 panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
1333 (void *)next_element
, (void *)element
, zone
->zone_name
);
1336 /* Remove this element from the free list */
1337 page_metadata_set_freelist(page_meta
, (struct zone_free_element
*)next_element
);
1338 page_meta
->free_count
--;
1340 if (page_meta
->free_count
== 0) {
1341 /* move to all used */
1342 re_queue_tail(&zone
->pages
.all_used
, &(page_meta
->pages
));
1344 if (!zone
->allows_foreign
|| from_zone_map(element
, zone
->elem_size
)) {
1345 if (get_metadata_alloc_count(page_meta
) == page_meta
->free_count
+ 1) {
1346 /* remove from free, move to intermediate */
1347 re_queue_tail(&zone
->pages
.intermediate
, &(page_meta
->pages
));
1355 #if VM_MAX_TAG_ZONES
1356 if (__improbable(zone
->tags
)) {
1357 // set the tag with b0 clear so the block remains inuse
1358 ZTAG(zone
, element
)[0] = (tag
<< 1);
1360 #endif /* VM_MAX_TAG_ZONES */
1364 kasan_poison_range(element
, zone
->elem_size
, ASAN_VALID
);
1371 * End of zone poisoning
1377 #define ZINFO_SLOTS MAX_ZONES /* for now */
1379 zone_t
zone_find_largest(void);
1382 * Async allocation of zones
1383 * This mechanism allows for bootstrapping an empty zone which is setup with
1384 * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call
1385 * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free.
1386 * This will prime the zone for the next use.
1388 * Currently the thread_callout function (zalloc_async) will loop through all zones
1389 * looking for any zone with async_pending set and do the work for it.
1391 * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call,
1392 * then zalloc_noblock to an empty zone may succeed.
1395 thread_call_param_t p0
,
1396 thread_call_param_t p1
);
1398 static thread_call_data_t call_async_alloc
;
1401 * Align elements that use the zone page list to 32 byte boundaries.
1403 #define ZONE_ELEMENT_ALIGNMENT 32
1405 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
1406 #define zone_sleep(zone) \
1407 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN_ALWAYS, (event_t)(zone), THREAD_UNINT);
1410 * The zone_locks_grp allows for collecting lock statistics.
1411 * All locks are associated to this group in zinit.
1412 * Look at tools/lockstat for debugging lock contention.
1415 lck_grp_t zone_locks_grp
;
1416 lck_grp_attr_t zone_locks_grp_attr
;
1418 #define lock_zone_init(zone) \
1420 lck_attr_setdefault(&(zone)->lock_attr); \
1421 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
1422 &zone_locks_grp, &(zone)->lock_attr); \
1425 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
1428 * Exclude more than one concurrent garbage collection
1430 decl_lck_mtx_data(, zone_gc_lock
)
1432 lck_attr_t zone_gc_lck_attr
;
1433 lck_grp_t zone_gc_lck_grp
;
1434 lck_grp_attr_t zone_gc_lck_grp_attr
;
1435 lck_mtx_ext_t zone_gc_lck_ext
;
1437 boolean_t zone_gc_allowed
= TRUE
;
1438 boolean_t panic_include_zprint
= FALSE
;
1440 mach_memory_info_t
*panic_kext_memory_info
= NULL
;
1441 vm_size_t panic_kext_memory_size
= 0;
1443 #define ZALLOC_DEBUG_ZONEGC 0x00000001
1444 #define ZALLOC_DEBUG_ZCRAM 0x00000002
1445 uint32_t zalloc_debug
= 0;
1448 * Zone leak debugging code
1450 * When enabled, this code keeps a log to track allocations to a particular zone that have not
1451 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
1452 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
1455 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
1456 * is the name of the zone you wish to log.
1458 * This code only tracks one zone, so you need to identify which one is leaking first.
1459 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
1460 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
1461 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
1462 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
1463 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
1464 * See the help in the kgmacros for usage info.
1467 * Zone corruption logging
1469 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
1470 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
1471 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
1472 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
1473 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
1474 * corrupted to examine its history. This should lead to the source of the corruption.
1477 static boolean_t log_records_init
= FALSE
;
1478 static int log_records
; /* size of the log, expressed in number of records */
1480 #define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */
1482 static int max_num_zones_to_log
= MAX_NUM_ZONES_ALLOWED_LOGGING
;
1483 static int num_zones_logged
= 0;
1485 static char zone_name_to_log
[MAX_ZONE_NAME
] = ""; /* the zone name we're logging, if any */
1487 /* Log allocations and frees to help debug a zone element corruption */
1488 boolean_t corruption_debug_flag
= FALSE
; /* enabled by "-zc" boot-arg */
1489 /* Making pointer scanning leaks detection possible for all zones */
1491 #if DEBUG || DEVELOPMENT
1492 boolean_t leak_scan_debug_flag
= FALSE
; /* enabled by "-zl" boot-arg */
1493 #endif /* DEBUG || DEVELOPMENT */
1497 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
1498 * the number of records you want in the log. For example, "zrecs=10" sets it to 10 records. Since this
1499 * is the number of stacks suspected of leaking, we don't need many records.
1502 #if defined(__LP64__)
1503 #define ZRECORDS_MAX 2560 /* Max records allowed in the log */
1505 #define ZRECORDS_MAX 1536 /* Max records allowed in the log */
1507 #define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */
1510 * Each record in the log contains a pointer to the zone element it refers to,
1511 * and a small array to hold the pc's from the stack trace. A
1512 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
1513 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
1514 * If the log fills, old records are replaced as if it were a circular buffer.
1519 * Opcodes for the btlog operation field:
1526 * Decide if we want to log this zone by doing a string compare between a zone name and the name
1527 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
1528 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
1529 * match a space in the zone name.
1533 track_this_zone(const char *zonename
, const char *logname
)
1536 const char *zc
= zonename
;
1537 const char *lc
= logname
;
1540 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
1543 for (len
= 1; len
<= MAX_ZONE_NAME
; zc
++, lc
++, len
++) {
1546 * If the current characters don't match, check for a space in
1547 * in the zone name and a corresponding period in the log name.
1548 * If that's not there, then the strings don't match.
1551 if (*zc
!= *lc
&& !(*zc
== ' ' && *lc
== '.'))
1555 * The strings are equal so far. If we're at the end, then it's a match.
1567 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
1568 * the buffer for the records has been allocated.
1571 #define DO_LOGGING(z) (z->zone_logging == TRUE && z->zlog_btlog)
1573 extern boolean_t kmem_alloc_ready
;
1577 #pragma mark Zone Leak Detection
1580 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
1581 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
1582 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
1583 * and stop tracking it if it was being tracked.
1585 * We track the allocations in the zallocations hash table, which stores the address that was returned from
1586 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
1587 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
1588 * backtraces - we don't store them more than once.
1590 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
1591 * a large amount of virtual space.
1593 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
1594 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
1595 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
1596 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
1597 uint32_t zleak_state
= 0; /* State of collection, as above */
1599 boolean_t panic_include_ztrace
= FALSE
; /* Enable zleak logging on panic */
1600 vm_size_t zleak_global_tracking_threshold
; /* Size of zone map at which to start collecting data */
1601 vm_size_t zleak_per_zone_tracking_threshold
; /* Size a zone will have before we will collect data on it */
1602 unsigned int zleak_sample_factor
= 1000; /* Allocations per sample attempt */
1605 * Counters for allocation statistics.
1608 /* Times two active records want to occupy the same spot */
1609 unsigned int z_alloc_collisions
= 0;
1610 unsigned int z_trace_collisions
= 0;
1612 /* Times a new record lands on a spot previously occupied by a freed allocation */
1613 unsigned int z_alloc_overwrites
= 0;
1614 unsigned int z_trace_overwrites
= 0;
1616 /* Times a new alloc or trace is put into the hash table */
1617 unsigned int z_alloc_recorded
= 0;
1618 unsigned int z_trace_recorded
= 0;
1620 /* Times zleak_log returned false due to not being able to acquire the lock */
1621 unsigned int z_total_conflicts
= 0;
1624 #pragma mark struct zallocation
1626 * Structure for keeping track of an allocation
1627 * An allocation bucket is in use if its element is not NULL
1629 struct zallocation
{
1630 uintptr_t za_element
; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
1631 vm_size_t za_size
; /* how much memory did this allocation take up? */
1632 uint32_t za_trace_index
; /* index into ztraces for backtrace associated with allocation */
1633 /* TODO: #if this out */
1634 uint32_t za_hit_count
; /* for determining effectiveness of hash function */
1637 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
1638 uint32_t zleak_alloc_buckets
= CONFIG_ZLEAK_ALLOCATION_MAP_NUM
;
1639 uint32_t zleak_trace_buckets
= CONFIG_ZLEAK_TRACE_MAP_NUM
;
1641 vm_size_t zleak_max_zonemap_size
;
1643 /* Hashmaps of allocations and their corresponding traces */
1644 static struct zallocation
* zallocations
;
1645 static struct ztrace
* ztraces
;
1647 /* not static so that panic can see this, see kern/debug.c */
1648 struct ztrace
* top_ztrace
;
1650 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
1651 static lck_spin_t zleak_lock
;
1652 static lck_attr_t zleak_lock_attr
;
1653 static lck_grp_t zleak_lock_grp
;
1654 static lck_grp_attr_t zleak_lock_grp_attr
;
1657 * Initializes the zone leak monitor. Called from zone_init()
1660 zleak_init(vm_size_t max_zonemap_size
)
1662 char scratch_buf
[16];
1663 boolean_t zleak_enable_flag
= FALSE
;
1665 zleak_max_zonemap_size
= max_zonemap_size
;
1666 zleak_global_tracking_threshold
= max_zonemap_size
/ 2;
1667 zleak_per_zone_tracking_threshold
= zleak_global_tracking_threshold
/ 8;
1670 if (PE_parse_boot_argn("-zleakon", scratch_buf
, sizeof(scratch_buf
))) {
1671 zleak_enable_flag
= TRUE
;
1672 printf("zone leak detection enabled\n");
1674 zleak_enable_flag
= FALSE
;
1675 printf("zone leak detection disabled\n");
1677 #else /* CONFIG_EMBEDDED */
1678 /* -zleakoff (flag to disable zone leak monitor) */
1679 if (PE_parse_boot_argn("-zleakoff", scratch_buf
, sizeof(scratch_buf
))) {
1680 zleak_enable_flag
= FALSE
;
1681 printf("zone leak detection disabled\n");
1683 zleak_enable_flag
= TRUE
;
1684 printf("zone leak detection enabled\n");
1686 #endif /* CONFIG_EMBEDDED */
1688 /* zfactor=XXXX (override how often to sample the zone allocator) */
1689 if (PE_parse_boot_argn("zfactor", &zleak_sample_factor
, sizeof(zleak_sample_factor
))) {
1690 printf("Zone leak factor override: %u\n", zleak_sample_factor
);
1693 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
1694 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets
, sizeof(zleak_alloc_buckets
))) {
1695 printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets
);
1696 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1697 if (zleak_alloc_buckets
== 0 || (zleak_alloc_buckets
& (zleak_alloc_buckets
-1))) {
1698 printf("Override isn't a power of two, bad things might happen!\n");
1702 /* zleak-traces=XXXX (override number of buckets in ztraces) */
1703 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets
, sizeof(zleak_trace_buckets
))) {
1704 printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets
);
1705 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1706 if (zleak_trace_buckets
== 0 || (zleak_trace_buckets
& (zleak_trace_buckets
-1))) {
1707 printf("Override isn't a power of two, bad things might happen!\n");
1711 /* allocate the zleak_lock */
1712 lck_grp_attr_setdefault(&zleak_lock_grp_attr
);
1713 lck_grp_init(&zleak_lock_grp
, "zleak_lock", &zleak_lock_grp_attr
);
1714 lck_attr_setdefault(&zleak_lock_attr
);
1715 lck_spin_init(&zleak_lock
, &zleak_lock_grp
, &zleak_lock_attr
);
1717 if (zleak_enable_flag
) {
1718 zleak_state
= ZLEAK_STATE_ENABLED
;
1725 * Support for kern.zleak.active sysctl - a simplified
1726 * version of the zleak_state variable.
1729 get_zleak_state(void)
1731 if (zleak_state
& ZLEAK_STATE_FAILED
)
1733 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
1742 zleak_activate(void)
1744 kern_return_t retval
;
1745 vm_size_t z_alloc_size
= zleak_alloc_buckets
* sizeof(struct zallocation
);
1746 vm_size_t z_trace_size
= zleak_trace_buckets
* sizeof(struct ztrace
);
1747 void *allocations_ptr
= NULL
;
1748 void *traces_ptr
= NULL
;
1750 /* Only one thread attempts to activate at a time */
1751 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
1752 return KERN_SUCCESS
;
1755 /* Indicate that we're doing the setup */
1756 lck_spin_lock(&zleak_lock
);
1757 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
1758 lck_spin_unlock(&zleak_lock
);
1759 return KERN_SUCCESS
;
1762 zleak_state
|= ZLEAK_STATE_ACTIVATING
;
1763 lck_spin_unlock(&zleak_lock
);
1765 /* Allocate and zero tables */
1766 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&allocations_ptr
, z_alloc_size
, VM_KERN_MEMORY_OSFMK
);
1767 if (retval
!= KERN_SUCCESS
) {
1771 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&traces_ptr
, z_trace_size
, VM_KERN_MEMORY_OSFMK
);
1772 if (retval
!= KERN_SUCCESS
) {
1776 bzero(allocations_ptr
, z_alloc_size
);
1777 bzero(traces_ptr
, z_trace_size
);
1779 /* Everything's set. Install tables, mark active. */
1780 zallocations
= allocations_ptr
;
1781 ztraces
= traces_ptr
;
1784 * Initialize the top_ztrace to the first entry in ztraces,
1785 * so we don't have to check for null in zleak_log
1787 top_ztrace
= &ztraces
[0];
1790 * Note that we do need a barrier between installing
1791 * the tables and setting the active flag, because the zfree()
1792 * path accesses the table without a lock if we're active.
1794 lck_spin_lock(&zleak_lock
);
1795 zleak_state
|= ZLEAK_STATE_ACTIVE
;
1796 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
1797 lck_spin_unlock(&zleak_lock
);
1803 * If we fail to allocate memory, don't further tax
1804 * the system by trying again.
1806 lck_spin_lock(&zleak_lock
);
1807 zleak_state
|= ZLEAK_STATE_FAILED
;
1808 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
1809 lck_spin_unlock(&zleak_lock
);
1811 if (allocations_ptr
!= NULL
) {
1812 kmem_free(kernel_map
, (vm_offset_t
)allocations_ptr
, z_alloc_size
);
1815 if (traces_ptr
!= NULL
) {
1816 kmem_free(kernel_map
, (vm_offset_t
)traces_ptr
, z_trace_size
);
1823 * TODO: What about allocations that never get deallocated,
1824 * especially ones with unique backtraces? Should we wait to record
1825 * until after boot has completed?
1826 * (How many persistent zallocs are there?)
1830 * This function records the allocation in the allocations table,
1831 * and stores the associated backtrace in the traces table
1832 * (or just increments the refcount if the trace is already recorded)
1833 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
1834 * the associated trace's refcount is decremented.
1835 * If the trace slot is in use, it returns.
1836 * The refcount is incremented by the amount of memory the allocation consumes.
1837 * The return value indicates whether to try again next time.
1840 zleak_log(uintptr_t* bt
,
1843 vm_size_t allocation_size
)
1845 /* Quit if there's someone else modifying the hash tables */
1846 if (!lck_spin_try_lock(&zleak_lock
)) {
1847 z_total_conflicts
++;
1851 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
1853 uint32_t trace_index
= hashbacktrace(bt
, depth
, zleak_trace_buckets
);
1854 struct ztrace
* trace
= &ztraces
[trace_index
];
1856 allocation
->za_hit_count
++;
1857 trace
->zt_hit_count
++;
1860 * If the allocation bucket we want to be in is occupied, and if the occupier
1861 * has the same trace as us, just bail.
1863 if (allocation
->za_element
!= (uintptr_t) 0 && trace_index
== allocation
->za_trace_index
) {
1864 z_alloc_collisions
++;
1866 lck_spin_unlock(&zleak_lock
);
1870 /* STEP 1: Store the backtrace in the traces array. */
1871 /* A size of zero indicates that the trace bucket is free. */
1873 if (trace
->zt_size
> 0 && bcmp(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t))) != 0 ) {
1875 * Different unique trace with same hash!
1876 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
1877 * and get out of the way for later chances
1879 trace
->zt_collisions
++;
1880 z_trace_collisions
++;
1882 lck_spin_unlock(&zleak_lock
);
1884 } else if (trace
->zt_size
> 0) {
1885 /* Same trace, already added, so increment refcount */
1886 trace
->zt_size
+= allocation_size
;
1888 /* Found an unused trace bucket, record the trace here! */
1889 if (trace
->zt_depth
!= 0) /* if this slot was previously used but not currently in use */
1890 z_trace_overwrites
++;
1893 trace
->zt_size
= allocation_size
;
1894 memcpy(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t)) );
1896 trace
->zt_depth
= depth
;
1897 trace
->zt_collisions
= 0;
1900 /* STEP 2: Store the allocation record in the allocations array. */
1902 if (allocation
->za_element
!= (uintptr_t) 0) {
1904 * Straight up replace any allocation record that was there. We don't want to do the work
1905 * to preserve the allocation entries that were there, because we only record a subset of the
1906 * allocations anyways.
1909 z_alloc_collisions
++;
1911 struct ztrace
* associated_trace
= &ztraces
[allocation
->za_trace_index
];
1912 /* Knock off old allocation's size, not the new allocation */
1913 associated_trace
->zt_size
-= allocation
->za_size
;
1914 } else if (allocation
->za_trace_index
!= 0) {
1915 /* Slot previously used but not currently in use */
1916 z_alloc_overwrites
++;
1919 allocation
->za_element
= addr
;
1920 allocation
->za_trace_index
= trace_index
;
1921 allocation
->za_size
= allocation_size
;
1925 if (top_ztrace
->zt_size
< trace
->zt_size
)
1928 lck_spin_unlock(&zleak_lock
);
1933 * Free the allocation record and release the stacktrace.
1934 * This should be as fast as possible because it will be called for every free.
1937 zleak_free(uintptr_t addr
,
1938 vm_size_t allocation_size
)
1940 if (addr
== (uintptr_t) 0)
1943 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
1945 /* Double-checked locking: check to find out if we're interested, lock, check to make
1946 * sure it hasn't changed, then modify it, and release the lock.
1949 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
1950 /* if the allocation was the one, grab the lock, check again, then delete it */
1951 lck_spin_lock(&zleak_lock
);
1953 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
1954 struct ztrace
*trace
;
1956 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
1957 if (allocation
->za_size
!= allocation_size
) {
1958 panic("Freeing as size %lu memory that was allocated with size %lu\n",
1959 (uintptr_t)allocation_size
, (uintptr_t)allocation
->za_size
);
1962 trace
= &ztraces
[allocation
->za_trace_index
];
1964 /* size of 0 indicates trace bucket is unused */
1965 if (trace
->zt_size
> 0) {
1966 trace
->zt_size
-= allocation_size
;
1969 /* A NULL element means the allocation bucket is unused */
1970 allocation
->za_element
= 0;
1972 lck_spin_unlock(&zleak_lock
);
1976 #endif /* CONFIG_ZLEAKS */
1978 /* These functions outside of CONFIG_ZLEAKS because they are also used in
1979 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
1982 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
1984 hash_mix(uintptr_t x
)
2007 hashbacktrace(uintptr_t* bt
, uint32_t depth
, uint32_t max_size
)
2011 uintptr_t mask
= max_size
- 1;
2014 hash
+= bt
[--depth
];
2017 hash
= hash_mix(hash
) & mask
;
2019 assert(hash
< max_size
);
2021 return (uint32_t) hash
;
2025 * TODO: Determine how well distributed this is
2026 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
2029 hashaddr(uintptr_t pt
, uint32_t max_size
)
2032 uintptr_t mask
= max_size
- 1;
2034 hash
= hash_mix(pt
) & mask
;
2036 assert(hash
< max_size
);
2038 return (uint32_t) hash
;
2041 /* End of all leak-detection code */
2044 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
2045 #define ZONE_ALLOC_FRAG_PERCENT(alloc_size, ele_size) (((alloc_size % ele_size) * 100) / alloc_size)
2047 /* Used to manage copying in of new zone names */
2048 static vm_offset_t zone_names_start
;
2049 static vm_offset_t zone_names_next
;
2052 compute_element_size(vm_size_t requested_size
)
2054 vm_size_t element_size
= requested_size
;
2056 /* Zone elements must fit both a next pointer and a backup pointer */
2057 vm_size_t minimum_element_size
= sizeof(vm_offset_t
) * 2;
2058 if (element_size
< minimum_element_size
)
2059 element_size
= minimum_element_size
;
2062 * Round element size to a multiple of sizeof(pointer)
2063 * This also enforces that allocations will be aligned on pointer boundaries
2065 element_size
= ((element_size
-1) + sizeof(vm_offset_t
)) -
2066 ((element_size
-1) % sizeof(vm_offset_t
));
2068 return element_size
;
2072 * zinit initializes a new zone. The zone data structures themselves
2073 * are stored in a zone, which is initially a static structure that
2074 * is initialized by zone_init.
2079 vm_size_t size
, /* the size of an element */
2080 vm_size_t max
, /* maximum memory to use */
2081 vm_size_t alloc
, /* allocation size */
2082 const char *name
) /* a name for the zone */
2086 size
= compute_element_size(size
);
2088 simple_lock(&all_zones_lock
);
2090 assert(num_zones
< MAX_ZONES
);
2091 assert(num_zones_in_use
<= num_zones
);
2093 /* If possible, find a previously zdestroy'ed zone in the zone_array that we can reuse instead of initializing a new zone. */
2094 for (int index
= bitmap_first(zone_empty_bitmap
, MAX_ZONES
);
2095 index
>= 0 && index
< (int)num_zones
;
2096 index
= bitmap_next(zone_empty_bitmap
, index
)) {
2097 z
= &(zone_array
[index
]);
2100 * If the zone name and the element size are the same, we can just reuse the old zone struct.
2101 * Otherwise hand out a new zone from the zone_array.
2103 if (!strcmp(z
->zone_name
, name
)) {
2104 vm_size_t old_size
= z
->elem_size
;
2106 old_size
-= z
->kasan_redzone
* 2;
2108 if (old_size
== size
) {
2109 /* Clear the empty bit for this zone, increment num_zones_in_use, and mark the zone as valid again. */
2110 bitmap_clear(zone_empty_bitmap
, index
);
2112 z
->zone_valid
= TRUE
;
2114 /* All other state is already set up since the zone was previously in use. Return early. */
2115 simple_unlock(&all_zones_lock
);
2121 /* If we're here, it means we didn't find a zone above that we could simply reuse. Set up a new zone. */
2123 /* Clear the empty bit for the new zone */
2124 bitmap_clear(zone_empty_bitmap
, num_zones
);
2126 z
= &(zone_array
[num_zones
]);
2127 z
->index
= num_zones
;
2133 * Initialize the zone lock here before dropping the all_zones_lock. Otherwise we could race with
2134 * zalloc_async() and try to grab the zone lock before it has been initialized, causing a panic.
2138 simple_unlock(&all_zones_lock
);
2141 /* Expand the zone allocation size to include the redzones. For page-multiple
2142 * zones add a full guard page because they likely require alignment. kalloc
2143 * and fakestack handles its own KASan state, so ignore those zones. */
2144 /* XXX: remove this when zinit_with_options() is a thing */
2145 const char *kalloc_name
= "kalloc.";
2146 const char *fakestack_name
= "fakestack.";
2147 if (strncmp(name
, kalloc_name
, strlen(kalloc_name
)) == 0) {
2148 z
->kasan_redzone
= 0;
2149 } else if (strncmp(name
, fakestack_name
, strlen(fakestack_name
)) == 0) {
2150 z
->kasan_redzone
= 0;
2152 if ((size
% PAGE_SIZE
) != 0) {
2153 z
->kasan_redzone
= KASAN_GUARD_SIZE
;
2155 z
->kasan_redzone
= PAGE_SIZE
;
2157 max
= (max
/ size
) * (size
+ z
->kasan_redzone
* 2);
2158 size
+= z
->kasan_redzone
* 2;
2162 max
= round_page(max
);
2164 vm_size_t best_alloc
= PAGE_SIZE
;
2166 if ((size
% PAGE_SIZE
) == 0) {
2167 /* zero fragmentation by definition */
2170 vm_size_t alloc_size
;
2171 for (alloc_size
= (2 * PAGE_SIZE
); alloc_size
<= ZONE_MAX_ALLOC_SIZE
; alloc_size
+= PAGE_SIZE
) {
2172 if (ZONE_ALLOC_FRAG_PERCENT(alloc_size
, size
) < ZONE_ALLOC_FRAG_PERCENT(best_alloc
, size
)) {
2173 best_alloc
= alloc_size
;
2179 if (max
&& (max
< alloc
))
2182 z
->free_elements
= NULL
;
2183 queue_init(&z
->pages
.any_free_foreign
);
2184 queue_init(&z
->pages
.all_free
);
2185 queue_init(&z
->pages
.intermediate
);
2186 queue_init(&z
->pages
.all_used
);
2190 z
->elem_size
= size
;
2191 z
->alloc_size
= alloc
;
2194 z
->count_all_free_pages
= 0;
2196 z
->doing_alloc_without_vm_priv
= FALSE
;
2197 z
->doing_alloc_with_vm_priv
= FALSE
;
2198 z
->exhaustible
= FALSE
;
2199 z
->collectable
= TRUE
;
2200 z
->allows_foreign
= FALSE
;
2201 z
->expandable
= TRUE
;
2203 z
->async_pending
= FALSE
;
2204 z
->caller_acct
= TRUE
;
2205 z
->noencrypt
= FALSE
;
2206 z
->no_callout
= FALSE
;
2207 z
->async_prio_refill
= FALSE
;
2208 z
->gzalloc_exempt
= FALSE
;
2209 z
->alignment_required
= FALSE
;
2210 z
->zone_replenishing
= FALSE
;
2211 z
->prio_refill_watermark
= 0;
2212 z
->zone_replenish_thread
= NULL
;
2214 z
->kasan_quarantine
= TRUE
;
2215 z
->zone_valid
= TRUE
;
2218 z
->zleak_capture
= 0;
2219 z
->zleak_on
= FALSE
;
2220 #endif /* CONFIG_ZLEAKS */
2223 * If the VM is ready to handle kmem_alloc requests, copy the zone name passed in.
2225 * Else simply maintain a pointer to the name string. The only zones we'll actually have
2226 * to do this for would be the VM-related zones that are created very early on before any
2227 * kexts can be loaded (unloaded). So we should be fine with just a pointer in this case.
2229 if (kmem_alloc_ready
) {
2230 size_t len
= MIN(strlen(name
)+1, MACH_ZONE_NAME_MAX_LEN
);
2232 if (zone_names_start
== 0 || ((zone_names_next
- zone_names_start
) + len
) > PAGE_SIZE
) {
2233 printf("zalloc: allocating memory for zone names buffer\n");
2234 kern_return_t retval
= kmem_alloc_kobject(kernel_map
, &zone_names_start
,
2235 PAGE_SIZE
, VM_KERN_MEMORY_OSFMK
);
2236 if (retval
!= KERN_SUCCESS
) {
2237 panic("zalloc: zone_names memory allocation failed");
2239 bzero((char *)zone_names_start
, PAGE_SIZE
);
2240 zone_names_next
= zone_names_start
;
2243 strlcpy((char *)zone_names_next
, name
, len
);
2244 z
->zone_name
= (char *)zone_names_next
;
2245 zone_names_next
+= len
;
2247 z
->zone_name
= name
;
2251 * Check for and set up zone leak detection if requested via boot-args. We recognized two
2254 * zlog=<zone_to_log>
2255 * zrecs=<num_records_in_log>
2257 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
2258 * control the size of the log. If zrecs is not specified, a default value is used.
2261 if (num_zones_logged
< max_num_zones_to_log
) {
2263 int i
= 1; /* zlog0 isn't allowed. */
2264 boolean_t zone_logging_enabled
= FALSE
;
2265 char zlog_name
[MAX_ZONE_NAME
] = ""; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
2267 while (i
<= max_num_zones_to_log
) {
2269 snprintf(zlog_name
, MAX_ZONE_NAME
, "zlog%d", i
);
2271 if (PE_parse_boot_argn(zlog_name
, zone_name_to_log
, sizeof(zone_name_to_log
)) == TRUE
) {
2272 if (track_this_zone(z
->zone_name
, zone_name_to_log
)) {
2273 if (z
->zone_valid
) {
2274 z
->zone_logging
= TRUE
;
2275 zone_logging_enabled
= TRUE
;
2284 if (zone_logging_enabled
== FALSE
) {
2286 * Backwards compat. with the old boot-arg used to specify single zone logging i.e. zlog
2287 * Needs to happen after the newer zlogn checks because the prefix will match all the zlogn
2290 if (PE_parse_boot_argn("zlog", zone_name_to_log
, sizeof(zone_name_to_log
)) == TRUE
) {
2291 if (track_this_zone(z
->zone_name
, zone_name_to_log
)) {
2292 if (z
->zone_valid
) {
2293 z
->zone_logging
= TRUE
;
2294 zone_logging_enabled
= TRUE
;
2301 if (log_records_init
== FALSE
&& zone_logging_enabled
== TRUE
) {
2302 if (PE_parse_boot_argn("zrecs", &log_records
, sizeof(log_records
)) == TRUE
) {
2304 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
2305 * This prevents accidentally hogging too much kernel memory and making the system
2309 log_records
= MIN(ZRECORDS_MAX
, log_records
);
2310 log_records_init
= TRUE
;
2312 log_records
= ZRECORDS_DEFAULT
;
2313 log_records_init
= TRUE
;
2318 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
2319 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to
2320 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
2321 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
2322 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
2325 if (kmem_alloc_ready
) {
2327 zone_t curr_zone
= NULL
;
2328 unsigned int max_zones
= 0, zone_idx
= 0;
2330 simple_lock(&all_zones_lock
);
2331 max_zones
= num_zones
;
2332 simple_unlock(&all_zones_lock
);
2334 for (zone_idx
= 0; zone_idx
< max_zones
; zone_idx
++) {
2336 curr_zone
= &(zone_array
[zone_idx
]);
2338 if (!curr_zone
->zone_valid
) {
2343 * We work with the zone unlocked here because we could end up needing the zone lock to
2344 * enable logging for this zone e.g. need a VM object to allocate memory to enable logging for the
2347 * We don't expect these zones to be needed at this early a time in boot and so take this chance.
2349 if (curr_zone
->zone_logging
&& curr_zone
->zlog_btlog
== NULL
) {
2351 curr_zone
->zlog_btlog
= btlog_create(log_records
, MAX_ZTRACE_DEPTH
, (corruption_debug_flag
== FALSE
) /* caller_will_remove_entries_for_element? */);
2353 if (curr_zone
->zlog_btlog
) {
2355 printf("zone: logging started for zone %s\n", curr_zone
->zone_name
);
2357 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
2358 curr_zone
->zone_logging
= FALSE
;
2367 gzalloc_zone_init(z
);
2372 unsigned zone_replenish_loops
, zone_replenish_wakeups
, zone_replenish_wakeups_initiated
, zone_replenish_throttle_count
;
2374 static void zone_replenish_thread(zone_t
);
2376 /* High priority VM privileged thread used to asynchronously refill a designated
2377 * zone, such as the reserved VM map entry zone.
2379 __attribute__((noreturn
))
2381 zone_replenish_thread(zone_t z
)
2383 vm_size_t free_size
;
2384 current_thread()->options
|= TH_OPT_VMPRIV
;
2388 assert(z
->zone_valid
);
2389 z
->zone_replenishing
= TRUE
;
2390 assert(z
->prio_refill_watermark
!= 0);
2391 while ((free_size
= (z
->cur_size
- (z
->count
* z
->elem_size
))) < (z
->prio_refill_watermark
* z
->elem_size
)) {
2392 assert(z
->doing_alloc_without_vm_priv
== FALSE
);
2393 assert(z
->doing_alloc_with_vm_priv
== FALSE
);
2394 assert(z
->async_prio_refill
== TRUE
);
2397 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
2398 vm_offset_t space
, alloc_size
;
2402 alloc_size
= round_page(z
->elem_size
);
2404 alloc_size
= z
->alloc_size
;
2407 zflags
|= KMA_NOENCRYPT
;
2409 /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */
2410 if (is_zone_map_nearing_exhaustion()) {
2411 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
2414 kr
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
, VM_KERN_MEMORY_ZONE
);
2416 if (kr
== KERN_SUCCESS
) {
2417 zcram(z
, space
, alloc_size
);
2418 } else if (kr
== KERN_RESOURCE_SHORTAGE
) {
2420 } else if (kr
== KERN_NO_SPACE
) {
2421 kr
= kernel_memory_allocate(kernel_map
, &space
, alloc_size
, 0, zflags
, VM_KERN_MEMORY_ZONE
);
2422 if (kr
== KERN_SUCCESS
) {
2423 zcram(z
, space
, alloc_size
);
2425 assert_wait_timeout(&z
->zone_replenish_thread
, THREAD_UNINT
, 1, 100 * NSEC_PER_USEC
);
2426 thread_block(THREAD_CONTINUE_NULL
);
2431 assert(z
->zone_valid
);
2432 zone_replenish_loops
++;
2435 z
->zone_replenishing
= FALSE
;
2436 /* Signal any potential throttled consumers, terminating
2437 * their timer-bounded waits.
2441 assert_wait(&z
->zone_replenish_thread
, THREAD_UNINT
);
2443 thread_block(THREAD_CONTINUE_NULL
);
2444 zone_replenish_wakeups
++;
2449 zone_prio_refill_configure(zone_t z
, vm_size_t low_water_mark
) {
2450 z
->prio_refill_watermark
= low_water_mark
;
2452 z
->async_prio_refill
= TRUE
;
2454 kern_return_t tres
= kernel_thread_start_priority((thread_continue_t
)zone_replenish_thread
, z
, MAXPRI_KERNEL
, &z
->zone_replenish_thread
);
2456 if (tres
!= KERN_SUCCESS
) {
2457 panic("zone_prio_refill_configure, thread create: 0x%x", tres
);
2460 thread_deallocate(z
->zone_replenish_thread
);
2466 unsigned int zindex
;
2471 assert(z
->zone_valid
);
2473 /* Assert that the zone does not have any allocations in flight */
2474 assert(z
->doing_alloc_without_vm_priv
== FALSE
);
2475 assert(z
->doing_alloc_with_vm_priv
== FALSE
);
2476 assert(z
->async_pending
== FALSE
);
2477 assert(z
->waiting
== FALSE
);
2478 assert(z
->async_prio_refill
== FALSE
);
2482 * Unset the valid bit. We'll hit an assert failure on further operations on this zone, until zinit() is called again.
2483 * Leave the zone valid for KASan as we will see zfree's on quarantined free elements even after the zone is destroyed.
2485 z
->zone_valid
= FALSE
;
2489 /* Dump all the free elements */
2490 drop_free_elements(z
);
2493 /* If the zone is gzalloc managed dump all the elements in the free cache */
2494 gzalloc_empty_free_cache(z
);
2500 /* Assert that all counts are zero */
2501 assert(z
->count
== 0);
2502 assert(z
->countfree
== 0);
2503 assert(z
->cur_size
== 0);
2504 assert(z
->page_count
== 0);
2505 assert(z
->count_all_free_pages
== 0);
2507 /* Assert that all queues except the foreign queue are empty. The zone allocator doesn't know how to free up foreign memory. */
2508 assert(queue_empty(&z
->pages
.all_used
));
2509 assert(queue_empty(&z
->pages
.intermediate
));
2510 assert(queue_empty(&z
->pages
.all_free
));
2517 simple_lock(&all_zones_lock
);
2519 assert(!bitmap_test(zone_empty_bitmap
, zindex
));
2520 /* Mark the zone as empty in the bitmap */
2521 bitmap_set(zone_empty_bitmap
, zindex
);
2523 assert(num_zones_in_use
> 0);
2525 simple_unlock(&all_zones_lock
);
2528 /* Initialize the metadata for an allocation chunk */
2530 zcram_metadata_init(vm_offset_t newmem
, vm_size_t size
, struct zone_page_metadata
*chunk_metadata
)
2532 struct zone_page_metadata
*page_metadata
;
2534 /* The first page is the real metadata for this allocation chunk. We mark the others as fake metadata */
2536 newmem
+= PAGE_SIZE
;
2538 for (; size
> 0; newmem
+= PAGE_SIZE
, size
-= PAGE_SIZE
) {
2539 page_metadata
= get_zone_page_metadata((struct zone_free_element
*)newmem
, TRUE
);
2540 assert(page_metadata
!= chunk_metadata
);
2541 PAGE_METADATA_SET_ZINDEX(page_metadata
, MULTIPAGE_METADATA_MAGIC
);
2542 page_metadata_set_realmeta(page_metadata
, chunk_metadata
);
2543 page_metadata
->free_count
= 0;
2549 random_free_to_zone(
2552 vm_offset_t first_element_offset
,
2554 unsigned int *entropy_buffer
)
2556 vm_offset_t last_element_offset
;
2557 vm_offset_t element_addr
;
2558 vm_size_t elem_size
;
2561 assert(element_count
<= ZONE_CHUNK_MAXELEMENTS
);
2562 elem_size
= zone
->elem_size
;
2563 last_element_offset
= first_element_offset
+ ((element_count
* elem_size
) - elem_size
);
2564 for (index
= 0; index
< element_count
; index
++) {
2565 assert(first_element_offset
<= last_element_offset
);
2567 #if DEBUG || DEVELOPMENT
2568 leak_scan_debug_flag
|| __improbable(zone
->tags
) ||
2569 #endif /* DEBUG || DEVELOPMENT */
2570 random_bool_gen_bits(&zone_bool_gen
, entropy_buffer
, MAX_ENTROPY_PER_ZCRAM
, 1)) {
2571 element_addr
= newmem
+ first_element_offset
;
2572 first_element_offset
+= elem_size
;
2574 element_addr
= newmem
+ last_element_offset
;
2575 last_element_offset
-= elem_size
;
2577 if (element_addr
!= (vm_offset_t
)zone
) {
2578 zone
->count
++; /* compensate for free_to_zone */
2579 free_to_zone(zone
, element_addr
, FALSE
);
2581 zone
->cur_size
+= elem_size
;
2586 * Cram the given memory into the specified zone. Update the zone page count accordingly.
2594 vm_size_t elem_size
;
2595 boolean_t from_zm
= FALSE
;
2597 unsigned int entropy_buffer
[MAX_ENTROPY_PER_ZCRAM
] = { 0 };
2599 /* Basic sanity checks */
2600 assert(zone
!= ZONE_NULL
&& newmem
!= (vm_offset_t
)0);
2601 assert(!zone
->collectable
|| zone
->allows_foreign
2602 || (from_zone_map(newmem
, size
)));
2604 elem_size
= zone
->elem_size
;
2606 KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC
, ZALLOC_ZCRAM
) | DBG_FUNC_START
, zone
->index
, size
);
2608 if (from_zone_map(newmem
, size
))
2612 /* We cannot support elements larger than page size for foreign memory because we
2613 * put metadata on the page itself for each page of foreign memory. We need to do
2614 * this in order to be able to reach the metadata when any element is freed
2616 assert((zone
->allows_foreign
== TRUE
) && (zone
->elem_size
<= (PAGE_SIZE
- sizeof(struct zone_page_metadata
))));
2619 if (zalloc_debug
& ZALLOC_DEBUG_ZCRAM
)
2620 kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone
, zone
->zone_name
,
2621 (unsigned long)newmem
, from_zm
? "" : "[F]", (unsigned long)size
);
2623 ZONE_PAGE_COUNT_INCR(zone
, (size
/ PAGE_SIZE
));
2626 * Initialize the metadata for all pages. We dont need the zone lock
2627 * here because we are not manipulating any zone related state yet.
2630 struct zone_page_metadata
*chunk_metadata
;
2631 size_t zone_page_metadata_size
= sizeof(struct zone_page_metadata
);
2633 assert((newmem
& PAGE_MASK
) == 0);
2634 assert((size
& PAGE_MASK
) == 0);
2636 chunk_metadata
= get_zone_page_metadata((struct zone_free_element
*)newmem
, TRUE
);
2637 chunk_metadata
->pages
.next
= NULL
;
2638 chunk_metadata
->pages
.prev
= NULL
;
2639 page_metadata_set_freelist(chunk_metadata
, 0);
2640 PAGE_METADATA_SET_ZINDEX(chunk_metadata
, zone
->index
);
2641 chunk_metadata
->free_count
= 0;
2642 assert((size
/ PAGE_SIZE
) <= ZONE_CHUNK_MAXPAGES
);
2643 chunk_metadata
->page_count
= (unsigned)(size
/ PAGE_SIZE
);
2645 zcram_metadata_init(newmem
, size
, chunk_metadata
);
2647 #if VM_MAX_TAG_ZONES
2648 if (__improbable(zone
->tags
)) {
2650 ztMemoryAdd(zone
, newmem
, size
);
2652 #endif /* VM_MAX_TAG_ZONES */
2655 assert(zone
->zone_valid
);
2656 enqueue_tail(&zone
->pages
.all_used
, &(chunk_metadata
->pages
));
2659 /* We cannot support elements larger than page size for foreign memory because we
2660 * put metadata on the page itself for each page of foreign memory. We need to do
2661 * this in order to be able to reach the metadata when any element is freed
2664 for (; size
> 0; newmem
+= PAGE_SIZE
, size
-= PAGE_SIZE
) {
2665 vm_offset_t first_element_offset
= 0;
2666 if (zone_page_metadata_size
% ZONE_ELEMENT_ALIGNMENT
== 0){
2667 first_element_offset
= zone_page_metadata_size
;
2669 first_element_offset
= zone_page_metadata_size
+ (ZONE_ELEMENT_ALIGNMENT
- (zone_page_metadata_size
% ZONE_ELEMENT_ALIGNMENT
));
2671 element_count
= (int)((PAGE_SIZE
- first_element_offset
) / elem_size
);
2672 random_free_to_zone(zone
, newmem
, first_element_offset
, element_count
, entropy_buffer
);
2675 element_count
= (int)(size
/ elem_size
);
2676 random_free_to_zone(zone
, newmem
, 0, element_count
, entropy_buffer
);
2680 KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC
, ZALLOC_ZCRAM
) | DBG_FUNC_END
, zone
->index
);
2685 * Fill a zone with enough memory to contain at least nelem elements.
2686 * Return the number of elements actually put into the zone, which may
2687 * be more than the caller asked for since the memory allocation is
2688 * rounded up to the next zone allocation size.
2698 vm_size_t alloc_size
= zone
->alloc_size
;
2699 vm_size_t elem_per_alloc
= alloc_size
/ zone
->elem_size
;
2700 vm_size_t nalloc
= (nelem
+ elem_per_alloc
- 1) / elem_per_alloc
;
2702 /* Don't mix-and-match zfill with foreign memory */
2703 assert(!zone
->allows_foreign
);
2705 /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */
2706 if (is_zone_map_nearing_exhaustion()) {
2707 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
2710 kr
= kernel_memory_allocate(zone_map
, &memory
, nalloc
* alloc_size
, 0, KMA_KOBJECT
, VM_KERN_MEMORY_ZONE
);
2711 if (kr
!= KERN_SUCCESS
) {
2712 printf("%s: kernel_memory_allocate() of %lu bytes failed\n",
2713 __func__
, (unsigned long)(nalloc
* alloc_size
));
2717 for (vm_size_t i
= 0; i
< nalloc
; i
++) {
2718 zcram(zone
, memory
+ i
* alloc_size
, alloc_size
);
2721 return (int)(nalloc
* elem_per_alloc
);
2725 * Initialize the "zone of zones" which uses fixed memory allocated
2726 * earlier in memory initialization. zone_bootstrap is called
2730 zone_bootstrap(void)
2734 if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug
, sizeof(zalloc_debug
)))
2737 /* Set up zone element poisoning */
2740 random_bool_init(&zone_bool_gen
);
2742 /* should zlog log to debug zone corruption instead of leaks? */
2743 if (PE_parse_boot_argn("-zc", temp_buf
, sizeof(temp_buf
))) {
2744 corruption_debug_flag
= TRUE
;
2747 #if DEBUG || DEVELOPMENT
2748 #if VM_MAX_TAG_ZONES
2749 /* enable tags for zones that ask for */
2750 if (PE_parse_boot_argn("-zt", temp_buf
, sizeof(temp_buf
))) {
2751 zone_tagging_on
= TRUE
;
2753 #endif /* VM_MAX_TAG_ZONES */
2754 /* disable element location randomization in a page */
2755 if (PE_parse_boot_argn("-zl", temp_buf
, sizeof(temp_buf
))) {
2756 leak_scan_debug_flag
= TRUE
;
2760 simple_lock_init(&all_zones_lock
, 0);
2762 num_zones_in_use
= 0;
2764 /* Mark all zones as empty */
2765 bitmap_full(zone_empty_bitmap
, BITMAP_LEN(MAX_ZONES
));
2766 zone_names_next
= zone_names_start
= 0;
2768 #if DEBUG || DEVELOPMENT
2769 simple_lock_init(&zone_test_lock
, 0);
2770 #endif /* DEBUG || DEVELOPMENT */
2772 thread_call_setup(&call_async_alloc
, zalloc_async
, NULL
);
2774 /* initializing global lock group for zones */
2775 lck_grp_attr_setdefault(&zone_locks_grp_attr
);
2776 lck_grp_init(&zone_locks_grp
, "zone_locks", &zone_locks_grp_attr
);
2778 lck_attr_setdefault(&zone_metadata_lock_attr
);
2779 lck_mtx_init_ext(&zone_metadata_region_lck
, &zone_metadata_region_lck_ext
, &zone_locks_grp
, &zone_metadata_lock_attr
);
2783 * We're being very conservative here and picking a value of 95%. We might need to lower this if
2784 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
2786 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
2789 * Trigger zone-map-exhaustion jetsams if the zone map is X% full, where X=zone_map_jetsam_limit.
2790 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
2792 unsigned int zone_map_jetsam_limit
= ZONE_MAP_JETSAM_LIMIT_DEFAULT
;
2795 * Returns pid of the task with the largest number of VM map entries.
2797 extern pid_t
find_largest_process_vm_map_entries(void);
2800 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
2801 * For any other pid we try to kill that process synchronously.
2803 boolean_t
memorystatus_kill_on_zone_map_exhaustion(pid_t pid
);
2805 void get_zone_map_size(uint64_t *current_size
, uint64_t *capacity
)
2807 *current_size
= zone_map
->size
;
2808 *capacity
= vm_map_max(zone_map
) - vm_map_min(zone_map
);
2811 void get_largest_zone_info(char *zone_name
, size_t zone_name_len
, uint64_t *zone_size
)
2813 zone_t largest_zone
= zone_find_largest();
2814 strlcpy(zone_name
, largest_zone
->zone_name
, zone_name_len
);
2815 *zone_size
= largest_zone
->cur_size
;
2818 boolean_t
is_zone_map_nearing_exhaustion(void)
2820 uint64_t size
= zone_map
->size
;
2821 uint64_t capacity
= vm_map_max(zone_map
) - vm_map_min(zone_map
);
2822 if (size
> ((capacity
* zone_map_jetsam_limit
) / 100)) {
2828 extern zone_t vm_map_entry_zone
;
2829 extern zone_t vm_object_zone
;
2831 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
2834 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
2835 * to walk through the jetsam priority bands and kill processes.
2837 static void kill_process_in_largest_zone(void)
2840 zone_t largest_zone
= zone_find_largest();
2842 printf("zone_map_exhaustion: Zone map size %lld, capacity %lld [jetsam limit %d%%]\n", (uint64_t)zone_map
->size
,
2843 (uint64_t)(vm_map_max(zone_map
) - vm_map_min(zone_map
)), zone_map_jetsam_limit
);
2844 printf("zone_map_exhaustion: Largest zone %s, size %lu\n", largest_zone
->zone_name
, (uintptr_t)largest_zone
->cur_size
);
2847 * We want to make sure we don't call this function from userspace. Or we could end up trying to synchronously kill the process
2848 * whose context we're in, causing the system to hang.
2850 assert(current_task() == kernel_task
);
2853 * If vm_object_zone is the largest, check to see if the number of elements in vm_map_entry_zone is comparable. If so, consider
2854 * vm_map_entry_zone as the largest. This lets us target a specific process to jetsam to quickly recover from the zone map bloat.
2856 if (largest_zone
== vm_object_zone
) {
2857 int vm_object_zone_count
= vm_object_zone
->count
;
2858 int vm_map_entry_zone_count
= vm_map_entry_zone
->count
;
2859 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
2860 if (vm_map_entry_zone_count
>= ((vm_object_zone_count
* VMENTRY_TO_VMOBJECT_COMPARISON_RATIO
) / 100)) {
2861 largest_zone
= vm_map_entry_zone
;
2862 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n", (uintptr_t)largest_zone
->cur_size
);
2866 /* TODO: Extend this to check for the largest process in other zones as well. */
2867 if (largest_zone
== vm_map_entry_zone
) {
2868 pid
= find_largest_process_vm_map_entries();
2870 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s]. Waking up memorystatus thread.\n", largest_zone
->zone_name
);
2872 if (!memorystatus_kill_on_zone_map_exhaustion(pid
)) {
2873 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid
);
2877 /* Global initialization of Zone Allocator.
2878 * Runs after zone_bootstrap.
2882 vm_size_t max_zonemap_size
)
2884 kern_return_t retval
;
2885 vm_offset_t zone_min
;
2886 vm_offset_t zone_max
;
2887 vm_offset_t zone_metadata_space
;
2888 unsigned int zone_pages
;
2889 vm_map_kernel_flags_t vmk_flags
;
2891 #if VM_MAX_TAG_ZONES
2892 if (zone_tagging_on
) ztInit(max_zonemap_size
, &zone_locks_grp
);
2895 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
2896 vmk_flags
.vmkf_permanent
= TRUE
;
2897 retval
= kmem_suballoc(kernel_map
, &zone_min
, max_zonemap_size
,
2898 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_ZONE
,
2901 if (retval
!= KERN_SUCCESS
)
2902 panic("zone_init: kmem_suballoc failed");
2903 zone_max
= zone_min
+ round_page(max_zonemap_size
);
2905 gzalloc_init(max_zonemap_size
);
2908 * Setup garbage collection information:
2910 zone_map_min_address
= zone_min
;
2911 zone_map_max_address
= zone_max
;
2913 zone_pages
= (unsigned int)atop_kernel(zone_max
- zone_min
);
2914 zone_metadata_space
= round_page(zone_pages
* sizeof(struct zone_page_metadata
));
2915 retval
= kernel_memory_allocate(zone_map
, &zone_metadata_region_min
, zone_metadata_space
,
2916 0, KMA_KOBJECT
| KMA_VAONLY
| KMA_PERMANENT
, VM_KERN_MEMORY_OSFMK
);
2917 if (retval
!= KERN_SUCCESS
)
2918 panic("zone_init: zone_metadata_region initialization failed!");
2919 zone_metadata_region_max
= zone_metadata_region_min
+ zone_metadata_space
;
2921 #if defined(__LP64__)
2923 * ensure that any vm_page_t that gets created from
2924 * the vm_page zone can be packed properly (see vm_page.h
2925 * for the packing requirements
2927 if ((vm_page_t
)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_metadata_region_max
))) != (vm_page_t
)zone_metadata_region_max
)
2928 panic("VM_PAGE_PACK_PTR failed on zone_metadata_region_max - %p", (void *)zone_metadata_region_max
);
2930 if ((vm_page_t
)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address
))) != (vm_page_t
)zone_map_max_address
)
2931 panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address
);
2934 lck_grp_attr_setdefault(&zone_gc_lck_grp_attr
);
2935 lck_grp_init(&zone_gc_lck_grp
, "zone_gc", &zone_gc_lck_grp_attr
);
2936 lck_attr_setdefault(&zone_gc_lck_attr
);
2937 lck_mtx_init_ext(&zone_gc_lock
, &zone_gc_lck_ext
, &zone_gc_lck_grp
, &zone_gc_lck_attr
);
2941 * Initialize the zone leak monitor
2943 zleak_init(max_zonemap_size
);
2944 #endif /* CONFIG_ZLEAKS */
2946 #if VM_MAX_TAG_ZONES
2947 if (zone_tagging_on
) vm_allocation_zones_init();
2950 int jetsam_limit_temp
= 0;
2951 if (PE_parse_boot_argn("zone_map_jetsam_limit", &jetsam_limit_temp
, sizeof (jetsam_limit_temp
)) &&
2952 jetsam_limit_temp
> 0 && jetsam_limit_temp
<= 100)
2953 zone_map_jetsam_limit
= jetsam_limit_temp
;
2956 extern volatile SInt32 kfree_nop_count
;
2959 #pragma mark zalloc_canblock
2961 extern boolean_t early_boot_complete
;
2964 * zalloc returns an element from the specified zone.
2970 boolean_t nopagewait
,
2972 #if !VM_MAX_TAG_ZONES
2978 vm_offset_t addr
= 0;
2979 kern_return_t retval
;
2980 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used in zone leak logging and zone leak detection */
2982 boolean_t zone_replenish_wakeup
= FALSE
, zone_alloc_throttle
= FALSE
;
2983 thread_t thr
= current_thread();
2984 boolean_t check_poison
= FALSE
;
2985 boolean_t set_doing_alloc_with_vm_priv
= FALSE
;
2988 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
2989 #endif /* CONFIG_ZLEAKS */
2993 * KASan uses zalloc() for fakestack, which can be called anywhere. However,
2994 * we make sure these calls can never block.
2996 boolean_t irq_safe
= FALSE
;
2997 const char *fakestack_name
= "fakestack.";
2998 if (strncmp(zone
->zone_name
, fakestack_name
, strlen(fakestack_name
)) == 0) {
3002 /* In every other case, zalloc() from interrupt context is unsafe. */
3003 const boolean_t irq_safe
= FALSE
;
3006 assert(zone
!= ZONE_NULL
);
3007 assert(irq_safe
|| ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !early_boot_complete
);
3010 addr
= gzalloc_alloc(zone
, canblock
);
3013 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
3015 if (__improbable(DO_LOGGING(zone
)))
3016 numsaved
= OSBacktrace((void*) zbt
, MAX_ZTRACE_DEPTH
);
3020 * Zone leak detection: capture a backtrace every zleak_sample_factor
3021 * allocations in this zone.
3023 if (__improbable(zone
->zleak_on
&& sample_counter(&zone
->zleak_capture
, zleak_sample_factor
) == TRUE
)) {
3024 /* Avoid backtracing twice if zone logging is on */
3026 zleak_tracedepth
= backtrace(zbt
, MAX_ZTRACE_DEPTH
);
3028 zleak_tracedepth
= numsaved
;
3030 #endif /* CONFIG_ZLEAKS */
3032 #if VM_MAX_TAG_ZONES
3033 if (__improbable(zone
->tags
)) vm_tag_will_update_zone(tag
, zone
->tag_zone_index
);
3034 #endif /* VM_MAX_TAG_ZONES */
3037 assert(zone
->zone_valid
);
3039 if (zone
->async_prio_refill
&& zone
->zone_replenish_thread
) {
3040 vm_size_t zfreec
= (zone
->cur_size
- (zone
->count
* zone
->elem_size
));
3041 vm_size_t zrefillwm
= zone
->prio_refill_watermark
* zone
->elem_size
;
3042 zone_replenish_wakeup
= (zfreec
< zrefillwm
);
3043 zone_alloc_throttle
= (((zfreec
< (zrefillwm
/ 2)) && ((thr
->options
& TH_OPT_VMPRIV
) == 0)) || (zfreec
== 0));
3046 if (zone_replenish_wakeup
) {
3047 zone_replenish_wakeups_initiated
++;
3048 /* Signal the potentially waiting
3051 thread_wakeup(&zone
->zone_replenish_thread
);
3053 /* We don't want to wait around for zone_replenish_thread to bump up the free count
3054 * if we're in zone_gc(). This keeps us from deadlocking with zone_replenish_thread.
3056 if (thr
->options
& TH_OPT_ZONE_GC
)
3060 /* Scheduling latencies etc. may prevent
3061 * the refill thread from keeping up
3062 * with demand. Throttle consumers
3063 * when we fall below half the
3064 * watermark, unless VM privileged
3066 if (zone_alloc_throttle
) {
3067 zone_replenish_throttle_count
++;
3068 assert_wait_timeout(zone
, THREAD_UNINT
, 1, NSEC_PER_MSEC
);
3069 thread_block(THREAD_CONTINUE_NULL
);
3072 assert(zone
->zone_valid
);
3075 zfreec
= (zone
->cur_size
- (zone
->count
* zone
->elem_size
));
3076 zrefillwm
= zone
->prio_refill_watermark
* zone
->elem_size
;
3077 zone_replenish_wakeup
= (zfreec
< zrefillwm
);
3078 zone_alloc_throttle
= (((zfreec
< (zrefillwm
/ 2)) && ((thr
->options
& TH_OPT_VMPRIV
) == 0)) || (zfreec
== 0));
3080 } while (zone_alloc_throttle
== TRUE
);
3083 if (__probable(addr
== 0))
3084 addr
= try_alloc_from_zone(zone
, tag
, &check_poison
);
3086 /* If we're here because of zone_gc(), we didn't wait for zone_replenish_thread to finish.
3087 * So we need to ensure that we did successfully grab an element. And we only need to assert
3088 * this for zones that have a replenish thread configured (in this case, the Reserved VM map
3091 if (thr
->options
& TH_OPT_ZONE_GC
&& zone
->async_prio_refill
)
3094 while ((addr
== 0) && canblock
) {
3096 * zone is empty, try to expand it
3098 * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged)
3099 * to expand the zone concurrently... this is necessary to avoid stalling
3100 * vm_privileged threads running critical code necessary to continue compressing/swapping
3101 * pages (i.e. making new free pages) from stalling behind non-vm_privileged threads
3102 * waiting to acquire free pages when the vm_page_free_count is below the
3103 * vm_page_free_reserved limit.
3105 if ((zone
->doing_alloc_without_vm_priv
|| zone
->doing_alloc_with_vm_priv
) &&
3106 (((thr
->options
& TH_OPT_VMPRIV
) == 0) || zone
->doing_alloc_with_vm_priv
)) {
3108 * This is a non-vm_privileged thread and a non-vm_privileged or
3109 * a vm_privileged thread is already expanding the zone...
3111 * this is a vm_privileged thread and a vm_privileged thread is
3112 * already expanding the zone...
3114 * In either case wait for a thread to finish, then try again.
3116 zone
->waiting
= TRUE
;
3120 vm_size_t alloc_size
;
3123 if ((zone
->cur_size
+ zone
->elem_size
) >
3125 if (zone
->exhaustible
)
3127 if (zone
->expandable
) {
3129 * We're willing to overflow certain
3130 * zones, but not without complaining.
3132 * This is best used in conjunction
3133 * with the collectable flag. What we
3134 * want is an assurance we can get the
3135 * memory back, assuming there's no
3138 zone
->max_size
+= (zone
->max_size
>> 1);
3142 panic_include_zprint
= TRUE
;
3144 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
3145 panic_include_ztrace
= TRUE
;
3146 #endif /* CONFIG_ZLEAKS */
3147 panic("zalloc: zone \"%s\" empty.", zone
->zone_name
);
3151 * It is possible that a BG thread is refilling/expanding the zone
3152 * and gets pre-empted during that operation. That blocks all other
3153 * threads from making progress leading to a watchdog timeout. To
3154 * avoid that, boost the thread priority using the rwlock boost
3156 set_thread_rwlock_boost();
3158 if ((thr
->options
& TH_OPT_VMPRIV
)) {
3159 zone
->doing_alloc_with_vm_priv
= TRUE
;
3160 set_doing_alloc_with_vm_priv
= TRUE
;
3162 zone
->doing_alloc_without_vm_priv
= TRUE
;
3167 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
3169 if (vm_pool_low() || retry
>= 1)
3171 round_page(zone
->elem_size
);
3173 alloc_size
= zone
->alloc_size
;
3175 if (zone
->noencrypt
)
3176 zflags
|= KMA_NOENCRYPT
;
3178 /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */
3179 if (is_zone_map_nearing_exhaustion()) {
3180 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
3183 retval
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
, VM_KERN_MEMORY_ZONE
);
3184 if (retval
== KERN_SUCCESS
) {
3186 if ((zleak_state
& (ZLEAK_STATE_ENABLED
| ZLEAK_STATE_ACTIVE
)) == ZLEAK_STATE_ENABLED
) {
3187 if (zone_map
->size
>= zleak_global_tracking_threshold
) {
3190 kr
= zleak_activate();
3191 if (kr
!= KERN_SUCCESS
) {
3192 printf("Failed to activate live zone leak debugging (%d).\n", kr
);
3197 if ((zleak_state
& ZLEAK_STATE_ACTIVE
) && !(zone
->zleak_on
)) {
3198 if (zone
->cur_size
> zleak_per_zone_tracking_threshold
) {
3199 zone
->zleak_on
= TRUE
;
3202 #endif /* CONFIG_ZLEAKS */
3203 zcram(zone
, space
, alloc_size
);
3206 } else if (retval
!= KERN_RESOURCE_SHORTAGE
) {
3210 panic_include_zprint
= TRUE
;
3212 if ((zleak_state
& ZLEAK_STATE_ACTIVE
)) {
3213 panic_include_ztrace
= TRUE
;
3215 #endif /* CONFIG_ZLEAKS */
3216 if (retval
== KERN_NO_SPACE
) {
3217 zone_t zone_largest
= zone_find_largest();
3218 panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)",
3219 zone
->zone_name
, zone_largest
->zone_name
,
3220 (unsigned long)zone_largest
->cur_size
, zone_largest
->count
);
3223 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone
->zone_name
, zone
->count
, retval
, (int)kfree_nop_count
);
3230 assert(zone
->zone_valid
);
3232 if (set_doing_alloc_with_vm_priv
== TRUE
)
3233 zone
->doing_alloc_with_vm_priv
= FALSE
;
3235 zone
->doing_alloc_without_vm_priv
= FALSE
;
3237 if (zone
->waiting
) {
3238 zone
->waiting
= FALSE
;
3241 clear_thread_rwlock_boost();
3243 addr
= try_alloc_from_zone(zone
, tag
, &check_poison
);
3245 retval
== KERN_RESOURCE_SHORTAGE
) {
3246 if (nopagewait
== TRUE
)
3247 break; /* out of the main while loop */
3252 assert(zone
->zone_valid
);
3256 addr
= try_alloc_from_zone(zone
, tag
, &check_poison
);
3260 /* Zone leak detection:
3261 * If we're sampling this allocation, add it to the zleaks hash table.
3263 if (addr
&& zleak_tracedepth
> 0) {
3264 /* Sampling can fail if another sample is happening at the same time in a different zone. */
3265 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
3266 /* If it failed, roll back the counter so we sample the next allocation instead. */
3267 zone
->zleak_capture
= zleak_sample_factor
;
3270 #endif /* CONFIG_ZLEAKS */
3273 if ((addr
== 0) && (!canblock
|| nopagewait
) && (zone
->async_pending
== FALSE
) && (zone
->no_callout
== FALSE
) && (zone
->exhaustible
== FALSE
) && (!vm_pool_low())) {
3274 zone
->async_pending
= TRUE
;
3276 thread_call_enter(&call_async_alloc
);
3278 assert(zone
->zone_valid
);
3279 addr
= try_alloc_from_zone(zone
, tag
, &check_poison
);
3282 #if VM_MAX_TAG_ZONES
3283 if (__improbable(zone
->tags
) && addr
) {
3284 if (reqsize
) reqsize
= zone
->elem_size
- reqsize
;
3285 vm_tag_update_zone_size(tag
, zone
->tag_zone_index
, zone
->elem_size
, reqsize
);
3287 #endif /* VM_MAX_TAG_ZONES */
3291 vm_offset_t inner_size
= zone
->elem_size
;
3293 if (__improbable(DO_LOGGING(zone
) && addr
)) {
3294 btlog_add_entry(zone
->zlog_btlog
, (void *)addr
, ZOP_ALLOC
, (void **)zbt
, numsaved
);
3297 if (__improbable(check_poison
&& addr
)) {
3298 vm_offset_t
*element_cursor
= ((vm_offset_t
*) addr
) + 1;
3299 vm_offset_t
*backup
= get_backup_ptr(inner_size
, (vm_offset_t
*) addr
);
3301 for ( ; element_cursor
< backup
; element_cursor
++)
3302 if (__improbable(*element_cursor
!= ZP_POISON
))
3303 zone_element_was_modified_panic(zone
,
3307 ((vm_offset_t
)element_cursor
) - addr
);
3312 * Clear out the old next pointer and backup to avoid leaking the cookie
3313 * and so that only values on the freelist have a valid cookie
3316 vm_offset_t
*primary
= (vm_offset_t
*) addr
;
3317 vm_offset_t
*backup
= get_backup_ptr(inner_size
, primary
);
3319 *primary
= ZP_POISON
;
3320 *backup
= ZP_POISON
;
3322 #if DEBUG || DEVELOPMENT
3323 if (__improbable(leak_scan_debug_flag
&& !(zone
->elem_size
& (sizeof(uintptr_t) - 1)))) {
3325 /* Fill element, from tail, with backtrace in reverse order */
3326 if (numsaved
== 0) numsaved
= backtrace(zbt
, MAX_ZTRACE_DEPTH
);
3327 count
= (int) (zone
->elem_size
/ sizeof(uintptr_t));
3328 if (count
>= numsaved
) count
= numsaved
- 1;
3329 for (idx
= 0; idx
< count
; idx
++) ((uintptr_t *)addr
)[count
- 1 - idx
] = zbt
[idx
+ 1];
3331 #endif /* DEBUG || DEVELOPMENT */
3334 TRACE_MACHLEAKS(ZALLOC_CODE
, ZALLOC_CODE_2
, zone
->elem_size
, addr
);
3337 /* Fixup the return address to skip the redzone */
3338 if (zone
->kasan_redzone
) {
3339 addr
= kasan_alloc(addr
, zone
->elem_size
,
3340 zone
->elem_size
- 2 * zone
->kasan_redzone
, zone
->kasan_redzone
);
3344 DTRACE_VM2(zalloc
, zone_t
, zone
, void*, addr
);
3346 return((void *)addr
);
3352 return (zalloc_internal(zone
, TRUE
, FALSE
, 0, VM_KERN_MEMORY_NONE
));
3356 zalloc_noblock(zone_t zone
)
3358 return (zalloc_internal(zone
, FALSE
, FALSE
, 0, VM_KERN_MEMORY_NONE
));
3362 zalloc_nopagewait(zone_t zone
)
3364 return (zalloc_internal(zone
, TRUE
, TRUE
, 0, VM_KERN_MEMORY_NONE
));
3368 zalloc_canblock_tag(zone_t zone
, boolean_t canblock
, vm_size_t reqsize
, vm_tag_t tag
)
3370 return (zalloc_internal(zone
, canblock
, FALSE
, reqsize
, tag
));
3374 zalloc_canblock(zone_t zone
, boolean_t canblock
)
3376 return (zalloc_internal(zone
, canblock
, FALSE
, 0, VM_KERN_MEMORY_NONE
));
3382 __unused thread_call_param_t p0
,
3383 __unused thread_call_param_t p1
)
3385 zone_t current_z
= NULL
;
3386 unsigned int max_zones
, i
;
3388 boolean_t pending
= FALSE
;
3390 simple_lock(&all_zones_lock
);
3391 max_zones
= num_zones
;
3392 simple_unlock(&all_zones_lock
);
3393 for (i
= 0; i
< max_zones
; i
++) {
3394 current_z
= &(zone_array
[i
]);
3396 if (current_z
->no_callout
== TRUE
) {
3397 /* async_pending will never be set */
3401 lock_zone(current_z
);
3402 if (current_z
->zone_valid
&& current_z
->async_pending
== TRUE
) {
3403 current_z
->async_pending
= FALSE
;
3406 unlock_zone(current_z
);
3408 if (pending
== TRUE
) {
3409 elt
= zalloc_canblock_tag(current_z
, TRUE
, 0, VM_KERN_MEMORY_OSFMK
);
3410 zfree(current_z
, elt
);
3417 * zget returns an element from the specified zone
3418 * and immediately returns nothing if there is nothing there.
3424 return zalloc_internal(zone
, FALSE
, TRUE
, 0, VM_KERN_MEMORY_NONE
);
3427 /* Keep this FALSE by default. Large memory machine run orders of magnitude
3428 slower in debug mode when true. Use debugger to enable if needed */
3429 /* static */ boolean_t zone_check
= FALSE
;
3431 static void zone_check_freelist(zone_t zone
, vm_offset_t elem
)
3433 struct zone_free_element
*this;
3434 struct zone_page_metadata
*thispage
;
3436 if (zone
->allows_foreign
) {
3437 for (thispage
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.any_free_foreign
);
3438 !queue_end(&zone
->pages
.any_free_foreign
, &(thispage
->pages
));
3439 thispage
= (struct zone_page_metadata
*)queue_next(&(thispage
->pages
))) {
3440 for (this = page_metadata_get_freelist(thispage
);
3442 this = this->next
) {
3443 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
3444 panic("zone_check_freelist");
3448 for (thispage
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.all_free
);
3449 !queue_end(&zone
->pages
.all_free
, &(thispage
->pages
));
3450 thispage
= (struct zone_page_metadata
*)queue_next(&(thispage
->pages
))) {
3451 for (this = page_metadata_get_freelist(thispage
);
3453 this = this->next
) {
3454 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
3455 panic("zone_check_freelist");
3458 for (thispage
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.intermediate
);
3459 !queue_end(&zone
->pages
.intermediate
, &(thispage
->pages
));
3460 thispage
= (struct zone_page_metadata
*)queue_next(&(thispage
->pages
))) {
3461 for (this = page_metadata_get_freelist(thispage
);
3463 this = this->next
) {
3464 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
3465 panic("zone_check_freelist");
3475 vm_offset_t elem
= (vm_offset_t
) addr
;
3476 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* only used if zone logging is enabled via boot-args */
3478 boolean_t gzfreed
= FALSE
;
3479 boolean_t poison
= FALSE
;
3480 #if VM_MAX_TAG_ZONES
3482 #endif /* VM_MAX_TAG_ZONES */
3484 assert(zone
!= ZONE_NULL
);
3485 DTRACE_VM2(zfree
, zone_t
, zone
, void*, addr
);
3489 * Resize back to the real allocation size and hand off to the KASan
3490 * quarantine. `addr` may then point to a different allocation.
3492 vm_size_t usersz
= zone
->elem_size
- 2 * zone
->kasan_redzone
;
3493 vm_size_t sz
= usersz
;
3494 if (addr
&& zone
->kasan_redzone
) {
3495 kasan_check_free((vm_address_t
)addr
, usersz
, KASAN_HEAP_ZALLOC
);
3496 addr
= (void *)kasan_dealloc((vm_address_t
)addr
, &sz
);
3497 assert(sz
== zone
->elem_size
);
3499 if (addr
&& zone
->kasan_quarantine
) {
3500 kasan_free(&addr
, &sz
, KASAN_HEAP_ZALLOC
, &zone
, usersz
, true);
3505 elem
= (vm_offset_t
)addr
;
3509 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
3512 if (__improbable(DO_LOGGING(zone
) && corruption_debug_flag
))
3513 numsaved
= OSBacktrace((void *)zbt
, MAX_ZTRACE_DEPTH
);
3516 /* Basic sanity checks */
3517 if (zone
== ZONE_NULL
|| elem
== (vm_offset_t
)0)
3518 panic("zfree: NULL");
3522 gzfreed
= gzalloc_free(zone
, addr
);
3526 struct zone_page_metadata
*page_meta
= get_zone_page_metadata((struct zone_free_element
*)addr
, FALSE
);
3527 if (zone
!= PAGE_METADATA_GET_ZONE(page_meta
)) {
3528 panic("Element %p from zone %s caught being freed to wrong zone %s\n", addr
, PAGE_METADATA_GET_ZONE(page_meta
)->zone_name
, zone
->zone_name
);
3532 TRACE_MACHLEAKS(ZFREE_CODE
, ZFREE_CODE_2
, zone
->elem_size
, (uintptr_t)addr
);
3534 if (__improbable(!gzfreed
&& zone
->collectable
&& !zone
->allows_foreign
&&
3535 !from_zone_map(elem
, zone
->elem_size
))) {
3536 panic("zfree: non-allocated memory in collectable zone!");
3539 if ((zp_factor
!= 0 || zp_tiny_zone_limit
!= 0) && !gzfreed
) {
3541 * Poison the memory before it ends up on the freelist to catch
3542 * use-after-free and use of uninitialized memory
3544 * Always poison tiny zones' elements (limit is 0 if -no-zp is set)
3545 * Also poison larger elements periodically
3548 vm_offset_t inner_size
= zone
->elem_size
;
3550 uint32_t sample_factor
= zp_factor
+ (((uint32_t)inner_size
) >> zp_scale
);
3552 if (inner_size
<= zp_tiny_zone_limit
)
3554 else if (zp_factor
!= 0 && sample_counter(&zone
->zp_count
, sample_factor
) == TRUE
)
3557 if (__improbable(poison
)) {
3559 /* memset_pattern{4|8} could help make this faster: <rdar://problem/4662004> */
3560 /* Poison everything but primary and backup */
3561 vm_offset_t
*element_cursor
= ((vm_offset_t
*) elem
) + 1;
3562 vm_offset_t
*backup
= get_backup_ptr(inner_size
, (vm_offset_t
*)elem
);
3564 for ( ; element_cursor
< backup
; element_cursor
++)
3565 *element_cursor
= ZP_POISON
;
3570 * See if we're doing logging on this zone. There are two styles of logging used depending on
3571 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
3574 if (__improbable(DO_LOGGING(zone
))) {
3575 if (corruption_debug_flag
) {
3577 * We're logging to catch a corruption. Add a record of this zfree operation
3580 btlog_add_entry(zone
->zlog_btlog
, (void *)addr
, ZOP_FREE
, (void **)zbt
, numsaved
);
3583 * We're logging to catch a leak. Remove any record we might have for this
3584 * element since it's being freed. Note that we may not find it if the buffer
3585 * overflowed and that's OK. Since the log is of a limited size, old records
3586 * get overwritten if there are more zallocs than zfrees.
3588 btlog_remove_entries_for_element(zone
->zlog_btlog
, (void *)addr
);
3593 assert(zone
->zone_valid
);
3596 zone_check_freelist(zone
, elem
);
3599 if (__probable(!gzfreed
)) {
3600 #if VM_MAX_TAG_ZONES
3601 if (__improbable(zone
->tags
)) {
3602 tag
= (ZTAG(zone
, elem
)[0] >> 1);
3603 // set the tag with b0 clear so the block remains inuse
3604 ZTAG(zone
, elem
)[0] = 0xFFFE;
3606 #endif /* VM_MAX_TAG_ZONES */
3607 free_to_zone(zone
, elem
, poison
);
3611 if (zone
->count
< 0)
3612 panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone",
3613 zone
->zone_name
, addr
);
3619 * Zone leak detection: un-track the allocation
3621 if (zone
->zleak_on
) {
3622 zleak_free(elem
, zone
->elem_size
);
3624 #endif /* CONFIG_ZLEAKS */
3626 #if VM_MAX_TAG_ZONES
3627 if (__improbable(zone
->tags
) && __probable(!gzfreed
)) {
3628 vm_tag_update_zone_size(tag
, zone
->tag_zone_index
, -((int64_t)zone
->elem_size
), 0);
3630 #endif /* VM_MAX_TAG_ZONES */
3635 /* Change a zone's flags.
3636 * This routine must be called immediately after zinit.
3644 assert( zone
!= ZONE_NULL
);
3645 assert( value
== TRUE
|| value
== FALSE
);
3649 zone
->noencrypt
= value
;
3652 zone
->exhaustible
= value
;
3655 zone
->collectable
= value
;
3658 zone
->expandable
= value
;
3661 zone
->allows_foreign
= value
;
3664 zone
->caller_acct
= value
;
3667 zone
->no_callout
= value
;
3669 case Z_TAGS_ENABLED
:
3670 #if VM_MAX_TAG_ZONES
3672 static int tag_zone_index
;
3674 zone
->tags_inline
= (((page_size
+ zone
->elem_size
- 1) / zone
->elem_size
) <= (sizeof(uint32_t) / sizeof(uint16_t)));
3675 zone
->tag_zone_index
= OSAddAtomic(1, &tag_zone_index
);
3677 #endif /* VM_MAX_TAG_ZONES */
3679 case Z_GZALLOC_EXEMPT
:
3680 zone
->gzalloc_exempt
= value
;
3682 gzalloc_reconfigure(zone
);
3685 case Z_ALIGNMENT_REQUIRED
:
3686 zone
->alignment_required
= value
;
3688 if (zone
->kasan_redzone
== KASAN_GUARD_SIZE
) {
3689 /* Don't disturb alignment with the redzone for zones with
3690 * specific alignment requirements. */
3691 zone
->elem_size
-= zone
->kasan_redzone
* 2;
3692 zone
->kasan_redzone
= 0;
3696 gzalloc_reconfigure(zone
);
3699 case Z_KASAN_QUARANTINE
:
3700 zone
->kasan_quarantine
= value
;
3703 panic("Zone_change: Wrong Item Type!");
3709 * Return the expected number of free elements in the zone.
3710 * This calculation will be incorrect if items are zfree'd that
3711 * were never zalloc'd/zget'd. The correct way to stuff memory
3712 * into a zone is by zcram.
3716 zone_free_count(zone_t zone
)
3718 integer_t free_count
;
3721 free_count
= zone
->countfree
;
3724 assert(free_count
>= 0);
3729 /* Drops the elements in the free queue of a zone. Called by zone_gc() on each zone, and when a zone is zdestroy'ed. */
3731 drop_free_elements(zone_t z
)
3733 vm_size_t elt_size
, size_freed
;
3734 int total_freed_pages
= 0;
3735 uint64_t old_all_free_count
;
3736 struct zone_page_metadata
*page_meta
;
3737 queue_head_t page_meta_head
;
3740 if (queue_empty(&z
->pages
.all_free
)) {
3746 * Snatch all of the free elements away from the zone.
3748 elt_size
= z
->elem_size
;
3749 old_all_free_count
= z
->count_all_free_pages
;
3750 queue_new_head(&z
->pages
.all_free
, &page_meta_head
, struct zone_page_metadata
*, pages
);
3751 queue_init(&z
->pages
.all_free
);
3752 z
->count_all_free_pages
= 0;
3755 /* Iterate through all elements to find out size and count of elements we snatched */
3757 queue_iterate(&page_meta_head
, page_meta
, struct zone_page_metadata
*, pages
) {
3758 assert(from_zone_map((vm_address_t
)page_meta
, sizeof(*page_meta
))); /* foreign elements should be in any_free_foreign */
3759 size_freed
+= elt_size
* page_meta
->free_count
;
3762 /* Update the zone size and free element count */
3764 z
->cur_size
-= size_freed
;
3765 z
->countfree
-= size_freed
/elt_size
;
3768 while ((page_meta
= (struct zone_page_metadata
*)dequeue_head(&page_meta_head
)) != NULL
) {
3769 vm_address_t free_page_address
;
3770 /* Free the pages for metadata and account for them */
3771 free_page_address
= get_zone_page(page_meta
);
3772 ZONE_PAGE_COUNT_DECR(z
, page_meta
->page_count
);
3773 total_freed_pages
+= page_meta
->page_count
;
3774 old_all_free_count
-= page_meta
->page_count
;
3776 kasan_poison_range(free_page_address
, page_meta
->page_count
* PAGE_SIZE
, ASAN_VALID
);
3778 #if VM_MAX_TAG_ZONES
3779 if (z
->tags
) ztMemoryRemove(z
, free_page_address
, (page_meta
->page_count
* PAGE_SIZE
));
3780 #endif /* VM_MAX_TAG_ZONES */
3781 kmem_free(zone_map
, free_page_address
, (page_meta
->page_count
* PAGE_SIZE
));
3782 if (current_thread()->options
& TH_OPT_ZONE_GC
) {
3783 thread_yield_to_preemption();
3787 /* We freed all the pages from the all_free list for this zone */
3788 assert(old_all_free_count
== 0);
3790 if (zalloc_debug
& ZALLOC_DEBUG_ZONEGC
)
3791 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z
->zone_name
, (unsigned long)size_freed
/elt_size
, total_freed_pages
);
3794 /* Zone garbage collection
3796 * zone_gc will walk through all the free elements in all the
3797 * zones that are marked collectable looking for reclaimable
3798 * pages. zone_gc is called by consider_zone_gc when the system
3799 * begins to run out of memory.
3801 * We should ensure that zone_gc never blocks.
3804 zone_gc(boolean_t consider_jetsams
)
3806 unsigned int max_zones
;
3810 if (consider_jetsams
) {
3811 kill_process_in_largest_zone();
3813 * If we do end up jetsamming something, we need to do a zone_gc so that
3814 * we can reclaim free zone elements and update the zone map size.
3819 lck_mtx_lock(&zone_gc_lock
);
3821 current_thread()->options
|= TH_OPT_ZONE_GC
;
3823 simple_lock(&all_zones_lock
);
3824 max_zones
= num_zones
;
3825 simple_unlock(&all_zones_lock
);
3827 if (zalloc_debug
& ZALLOC_DEBUG_ZONEGC
)
3828 kprintf("zone_gc() starting...\n");
3830 for (i
= 0; i
< max_zones
; i
++) {
3831 z
= &(zone_array
[i
]);
3832 assert(z
!= ZONE_NULL
);
3834 if (!z
->collectable
) {
3838 if (queue_empty(&z
->pages
.all_free
)) {
3842 drop_free_elements(z
);
3845 current_thread()->options
&= ~TH_OPT_ZONE_GC
;
3847 lck_mtx_unlock(&zone_gc_lock
);
3850 extern vm_offset_t kmapoff_kaddr
;
3851 extern unsigned int kmapoff_pgcnt
;
3856 * Called by the pageout daemon when the system needs more free pages.
3860 consider_zone_gc(boolean_t consider_jetsams
)
3862 if (kmapoff_kaddr
!= 0) {
3864 * One-time reclaim of kernel_map resources we allocated in
3867 (void) vm_deallocate(kernel_map
,
3868 kmapoff_kaddr
, kmapoff_pgcnt
* PAGE_SIZE_64
);
3872 if (zone_gc_allowed
)
3873 zone_gc(consider_jetsams
);
3880 mach_zone_name_t
*zn
,
3881 mach_zone_info_t
*zi
)
3885 assert(z
!= ZONE_NULL
);
3887 if (!z
->zone_valid
) {
3895 /* assuming here the name data is static */
3896 (void) __nosan_strlcpy(zn
->mzn_name
, zcopy
.zone_name
,
3897 strlen(zcopy
.zone_name
)+1);
3901 zi
->mzi_count
= (uint64_t)zcopy
.count
;
3902 zi
->mzi_cur_size
= ptoa_64(zcopy
.page_count
);
3903 zi
->mzi_max_size
= (uint64_t)zcopy
.max_size
;
3904 zi
->mzi_elem_size
= (uint64_t)zcopy
.elem_size
;
3905 zi
->mzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
3906 zi
->mzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
3907 zi
->mzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
3908 zi
->mzi_collectable
= 0;
3909 if (zcopy
.collectable
) {
3910 SET_MZI_COLLECTABLE_BYTES(zi
->mzi_collectable
, ((uint64_t)zcopy
.count_all_free_pages
* PAGE_SIZE
));
3911 SET_MZI_COLLECTABLE_FLAG(zi
->mzi_collectable
, TRUE
);
3920 __unused task_t task
,
3921 __unused mach_zone_name_array_t
*namesp
,
3922 __unused mach_msg_type_number_t
*namesCntp
,
3923 __unused task_zone_info_array_t
*infop
,
3924 __unused mach_msg_type_number_t
*infoCntp
)
3926 return KERN_FAILURE
;
3932 mach_zone_name_array_t
*namesp
,
3933 mach_msg_type_number_t
*namesCntp
,
3934 mach_zone_info_array_t
*infop
,
3935 mach_msg_type_number_t
*infoCntp
)
3937 return (mach_memory_info(host
, namesp
, namesCntp
, infop
, infoCntp
, NULL
, NULL
));
3944 mach_zone_name_array_t
*namesp
,
3945 mach_msg_type_number_t
*namesCntp
,
3946 mach_zone_info_array_t
*infop
,
3947 mach_msg_type_number_t
*infoCntp
,
3948 mach_memory_info_array_t
*memoryInfop
,
3949 mach_msg_type_number_t
*memoryInfoCntp
)
3951 mach_zone_name_t
*names
;
3952 vm_offset_t names_addr
;
3953 vm_size_t names_size
;
3955 mach_zone_info_t
*info
;
3956 vm_offset_t info_addr
;
3957 vm_size_t info_size
;
3959 mach_memory_info_t
*memory_info
;
3960 vm_offset_t memory_info_addr
;
3961 vm_size_t memory_info_size
;
3962 vm_size_t memory_info_vmsize
;
3963 unsigned int num_info
;
3965 unsigned int max_zones
, used_zones
, i
;
3966 mach_zone_name_t
*zn
;
3967 mach_zone_info_t
*zi
;
3972 uint64_t zones_collectable_bytes
= 0;
3974 if (host
== HOST_NULL
)
3975 return KERN_INVALID_HOST
;
3976 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
3977 if (!PE_i_can_has_debugger(NULL
))
3978 return KERN_INVALID_HOST
;
3982 * We assume that zones aren't freed once allocated.
3983 * We won't pick up any zones that are allocated later.
3986 simple_lock(&all_zones_lock
);
3987 max_zones
= (unsigned int)(num_zones
);
3988 simple_unlock(&all_zones_lock
);
3990 names_size
= round_page(max_zones
* sizeof *names
);
3991 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3992 &names_addr
, names_size
, VM_KERN_MEMORY_IPC
);
3993 if (kr
!= KERN_SUCCESS
)
3995 names
= (mach_zone_name_t
*) names_addr
;
3997 info_size
= round_page(max_zones
* sizeof *info
);
3998 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3999 &info_addr
, info_size
, VM_KERN_MEMORY_IPC
);
4000 if (kr
!= KERN_SUCCESS
) {
4001 kmem_free(ipc_kernel_map
,
4002 names_addr
, names_size
);
4005 info
= (mach_zone_info_t
*) info_addr
;
4010 used_zones
= max_zones
;
4011 for (i
= 0; i
< max_zones
; i
++) {
4012 if (!get_zone_info(&(zone_array
[i
]), zn
, zi
)) {
4016 zones_collectable_bytes
+= GET_MZI_COLLECTABLE_BYTES(zi
->mzi_collectable
);
4021 used
= used_zones
* sizeof *names
;
4022 if (used
!= names_size
) {
4023 vm_offset_t names_addr_end
= names_addr
+ used
;
4024 vm_size_t free_size
= names_size
- (round_page(names_addr_end
) - names_addr
);
4026 if (free_size
>= PAGE_SIZE
) {
4027 kmem_free(ipc_kernel_map
,
4028 round_page(names_addr_end
), free_size
);
4030 bzero((char *) names_addr_end
, round_page(names_addr_end
) - names_addr_end
);
4033 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
4034 (vm_map_size_t
)used
, TRUE
, ©
);
4035 assert(kr
== KERN_SUCCESS
);
4037 *namesp
= (mach_zone_name_t
*) copy
;
4038 *namesCntp
= used_zones
;
4040 used
= used_zones
* sizeof *info
;
4041 if (used
!= info_size
) {
4042 vm_offset_t info_addr_end
= info_addr
+ used
;
4043 vm_size_t free_size
= info_size
- (round_page(info_addr_end
) - info_addr
);
4045 if (free_size
>= PAGE_SIZE
) {
4046 kmem_free(ipc_kernel_map
,
4047 round_page(info_addr_end
), free_size
);
4049 bzero((char *) info_addr_end
, round_page(info_addr_end
) - info_addr_end
);
4052 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
4053 (vm_map_size_t
)used
, TRUE
, ©
);
4054 assert(kr
== KERN_SUCCESS
);
4056 *infop
= (mach_zone_info_t
*) copy
;
4057 *infoCntp
= used_zones
;
4060 memory_info_addr
= 0;
4062 if (memoryInfop
&& memoryInfoCntp
)
4064 num_info
= vm_page_diagnose_estimate();
4065 memory_info_size
= num_info
* sizeof(*memory_info
);
4066 memory_info_vmsize
= round_page(memory_info_size
);
4067 kr
= kmem_alloc_pageable(ipc_kernel_map
,
4068 &memory_info_addr
, memory_info_vmsize
, VM_KERN_MEMORY_IPC
);
4069 if (kr
!= KERN_SUCCESS
) {
4073 kr
= vm_map_wire_kernel(ipc_kernel_map
, memory_info_addr
, memory_info_addr
+ memory_info_vmsize
,
4074 VM_PROT_READ
|VM_PROT_WRITE
, VM_KERN_MEMORY_IPC
, FALSE
);
4075 assert(kr
== KERN_SUCCESS
);
4077 memory_info
= (mach_memory_info_t
*) memory_info_addr
;
4078 vm_page_diagnose(memory_info
, num_info
, zones_collectable_bytes
);
4080 kr
= vm_map_unwire(ipc_kernel_map
, memory_info_addr
, memory_info_addr
+ memory_info_vmsize
, FALSE
);
4081 assert(kr
== KERN_SUCCESS
);
4083 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)memory_info_addr
,
4084 (vm_map_size_t
)memory_info_size
, TRUE
, ©
);
4085 assert(kr
== KERN_SUCCESS
);
4087 *memoryInfop
= (mach_memory_info_t
*) copy
;
4088 *memoryInfoCntp
= num_info
;
4091 return KERN_SUCCESS
;
4095 mach_zone_info_for_zone(
4097 mach_zone_name_t name
,
4098 mach_zone_info_t
*infop
)
4100 unsigned int max_zones
, i
;
4103 if (host
== HOST_NULL
)
4104 return KERN_INVALID_HOST
;
4105 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
4106 if (!PE_i_can_has_debugger(NULL
))
4107 return KERN_INVALID_HOST
;
4110 if (infop
== NULL
) {
4111 return KERN_INVALID_ARGUMENT
;
4114 simple_lock(&all_zones_lock
);
4115 max_zones
= (unsigned int)(num_zones
);
4116 simple_unlock(&all_zones_lock
);
4118 zone_ptr
= ZONE_NULL
;
4119 for (i
= 0; i
< max_zones
; i
++) {
4120 zone_t z
= &(zone_array
[i
]);
4121 assert(z
!= ZONE_NULL
);
4123 /* Find the requested zone by name */
4124 if (!strncmp(name
.mzn_name
, z
->zone_name
, strlen(z
->zone_name
))) {
4130 /* No zones found with the requested zone name */
4131 if (zone_ptr
== ZONE_NULL
) {
4132 return KERN_INVALID_ARGUMENT
;
4135 if (get_zone_info(zone_ptr
, NULL
, infop
)) {
4136 return KERN_SUCCESS
;
4138 return KERN_FAILURE
;
4142 mach_zone_info_for_largest_zone(
4144 mach_zone_name_t
*namep
,
4145 mach_zone_info_t
*infop
)
4147 if (host
== HOST_NULL
)
4148 return KERN_INVALID_HOST
;
4149 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
4150 if (!PE_i_can_has_debugger(NULL
))
4151 return KERN_INVALID_HOST
;
4154 if (namep
== NULL
|| infop
== NULL
) {
4155 return KERN_INVALID_ARGUMENT
;
4158 if (get_zone_info(zone_find_largest(), namep
, infop
)) {
4159 return KERN_SUCCESS
;
4161 return KERN_FAILURE
;
4165 get_zones_collectable_bytes(void)
4167 unsigned int i
, max_zones
;
4168 uint64_t zones_collectable_bytes
= 0;
4169 mach_zone_info_t zi
;
4171 simple_lock(&all_zones_lock
);
4172 max_zones
= (unsigned int)(num_zones
);
4173 simple_unlock(&all_zones_lock
);
4175 for (i
= 0; i
< max_zones
; i
++) {
4176 if (get_zone_info(&(zone_array
[i
]), NULL
, &zi
)) {
4177 zones_collectable_bytes
+= GET_MZI_COLLECTABLE_BYTES(zi
.mzi_collectable
);
4181 return zones_collectable_bytes
;
4184 #if DEBUG || DEVELOPMENT
4187 mach_memory_info_check(void)
4189 mach_memory_info_t
* memory_info
;
4190 mach_memory_info_t
* info
;
4192 unsigned int idx
, num_info
, max_zones
;
4193 vm_offset_t memory_info_addr
;
4195 size_t memory_info_size
, memory_info_vmsize
;
4196 uint64_t top_wired
, zonestotal
, total
;
4198 num_info
= vm_page_diagnose_estimate();
4199 memory_info_size
= num_info
* sizeof(*memory_info
);
4200 memory_info_vmsize
= round_page(memory_info_size
);
4201 kr
= kmem_alloc(kernel_map
, &memory_info_addr
, memory_info_vmsize
, VM_KERN_MEMORY_DIAG
);
4202 assert (kr
== KERN_SUCCESS
);
4204 memory_info
= (mach_memory_info_t
*) memory_info_addr
;
4205 vm_page_diagnose(memory_info
, num_info
, 0);
4207 simple_lock(&all_zones_lock
);
4208 max_zones
= num_zones
;
4209 simple_unlock(&all_zones_lock
);
4211 top_wired
= total
= zonestotal
= 0;
4212 for (idx
= 0; idx
< max_zones
; idx
++)
4214 zone
= &(zone_array
[idx
]);
4215 assert(zone
!= ZONE_NULL
);
4217 zonestotal
+= ptoa_64(zone
->page_count
);
4220 for (idx
= 0; idx
< num_info
; idx
++)
4222 info
= &memory_info
[idx
];
4223 if (!info
->size
) continue;
4224 if (VM_KERN_COUNT_WIRED
== info
->site
) top_wired
= info
->size
;
4225 if (VM_KERN_SITE_HIDE
& info
->flags
) continue;
4226 if (!(VM_KERN_SITE_WIRED
& info
->flags
)) continue;
4227 total
+= info
->size
;
4229 total
+= zonestotal
;
4231 printf("vm_page_diagnose_check %qd of %qd, zones %qd, short 0x%qx\n", total
, top_wired
, zonestotal
, top_wired
- total
);
4233 kmem_free(kernel_map
, memory_info_addr
, memory_info_vmsize
);
4238 #endif /* DEBUG || DEVELOPMENT */
4244 if (host
== HOST_NULL
)
4245 return KERN_INVALID_HOST
;
4247 #if DEBUG || DEVELOPMENT
4248 consider_zone_gc(FALSE
);
4249 #endif /* DEBUG || DEVELOPMENT */
4250 return (KERN_SUCCESS
);
4253 extern unsigned int stack_total
;
4254 extern unsigned long long stack_allocs
;
4256 #if defined(__i386__) || defined (__x86_64__)
4257 extern unsigned int inuse_ptepages_count
;
4258 extern long long alloc_ptepages_count
;
4262 zone_find_largest(void)
4265 unsigned int max_zones
;
4267 zone_t zone_largest
;
4269 simple_lock(&all_zones_lock
);
4270 max_zones
= num_zones
;
4271 simple_unlock(&all_zones_lock
);
4273 zone_largest
= &(zone_array
[0]);
4274 for (i
= 0; i
< max_zones
; i
++) {
4275 the_zone
= &(zone_array
[i
]);
4276 if (the_zone
->cur_size
> zone_largest
->cur_size
) {
4277 zone_largest
= the_zone
;
4280 return zone_largest
;
4285 /* should we care about locks here ? */
4287 #define zone_in_use(z) ( z->count || z->free_elements \
4288 || !queue_empty(&z->pages.all_free) \
4289 || !queue_empty(&z->pages.intermediate) \
4290 || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign)))
4293 #endif /* ZONE_DEBUG */
4296 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4298 #if DEBUG || DEVELOPMENT
4301 zone_copy_all_allocations_inqueue(zone_t z
, queue_head_t
* queue
, uintptr_t * elems
)
4303 struct zone_page_metadata
*page_meta
;
4304 vm_offset_t free
, elements
;
4305 vm_offset_t idx
, numElements
, freeCount
, bytesAvail
, metaSize
;
4307 queue_iterate(queue
, page_meta
, struct zone_page_metadata
*, pages
)
4309 elements
= get_zone_page(page_meta
);
4310 bytesAvail
= ptoa(page_meta
->page_count
);
4312 if (z
->allows_foreign
&& !from_zone_map(elements
, z
->elem_size
))
4314 metaSize
= (sizeof(struct zone_page_metadata
) + ZONE_ELEMENT_ALIGNMENT
- 1) & ~(ZONE_ELEMENT_ALIGNMENT
- 1);
4315 bytesAvail
-= metaSize
;
4316 elements
+= metaSize
;
4318 numElements
= bytesAvail
/ z
->elem_size
;
4319 // construct array of all possible elements
4320 for (idx
= 0; idx
< numElements
; idx
++)
4322 elems
[idx
] = INSTANCE_PUT(elements
+ idx
* z
->elem_size
);
4324 // remove from the array all free elements
4325 free
= (vm_offset_t
)page_metadata_get_freelist(page_meta
);
4328 // find idx of free element
4329 for (idx
= 0; (idx
< numElements
) && (elems
[idx
] != INSTANCE_PUT(free
)); idx
++) {}
4330 assert(idx
< numElements
);
4332 bcopy(&elems
[idx
+ 1], &elems
[idx
], (numElements
- (idx
+ 1)) * sizeof(elems
[0]));
4335 // next free element
4336 vm_offset_t
*primary
= (vm_offset_t
*) free
;
4337 free
= *primary
^ zp_nopoison_cookie
;
4339 elems
+= numElements
;
4346 zone_leaks(const char * zoneName
, uint32_t nameLen
, leak_site_proc proc
, void * refCon
)
4348 uintptr_t zbt
[MAX_ZTRACE_DEPTH
];
4352 uintptr_t element
, bt
;
4353 uint32_t idx
, count
, found
;
4354 uint32_t btidx
, btcount
, nobtcount
, btfound
;
4357 unsigned int max_zones
;
4360 simple_lock(&all_zones_lock
);
4361 max_zones
= num_zones
;
4362 simple_unlock(&all_zones_lock
);
4364 for (idx
= 0; idx
< max_zones
; idx
++)
4366 if (!strncmp(zoneName
, zone_array
[idx
].zone_name
, nameLen
)) break;
4368 if (idx
>= max_zones
) return (KERN_INVALID_NAME
);
4369 zone
= &zone_array
[idx
];
4371 elemSize
= (uint32_t) zone
->elem_size
;
4372 maxElems
= ptoa(zone
->page_count
) / elemSize
;
4374 if ((zone
->alloc_size
% elemSize
)
4375 && !leak_scan_debug_flag
) return (KERN_INVALID_CAPABILITY
);
4377 kr
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*) &array
,
4378 maxElems
* sizeof(uintptr_t), VM_KERN_MEMORY_DIAG
);
4379 if (KERN_SUCCESS
!= kr
) return (kr
);
4384 next
= zone_copy_all_allocations_inqueue(zone
, &zone
->pages
.any_free_foreign
, next
);
4385 next
= zone_copy_all_allocations_inqueue(zone
, &zone
->pages
.intermediate
, next
);
4386 next
= zone_copy_all_allocations_inqueue(zone
, &zone
->pages
.all_used
, next
);
4387 count
= (uint32_t)(next
- array
);
4391 zone_leaks_scan(array
, count
, (uint32_t)zone
->elem_size
, &found
);
4392 assert(found
<= count
);
4394 for (idx
= 0; idx
< count
; idx
++)
4396 element
= array
[idx
];
4397 if (kInstanceFlagReferenced
& element
) continue;
4398 element
= INSTANCE_PUT(element
) & ~kInstanceFlags
;
4401 if (zone
->zlog_btlog
&& !corruption_debug_flag
)
4403 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
4404 btlog_copy_backtraces_for_elements(zone
->zlog_btlog
, array
, &count
, elemSize
, proc
, refCon
);
4407 for (nobtcount
= idx
= 0; idx
< count
; idx
++)
4409 element
= array
[idx
];
4410 if (!element
) continue;
4411 if (kInstanceFlagReferenced
& element
) continue;
4412 element
= INSTANCE_PUT(element
) & ~kInstanceFlags
;
4414 // see if we can find any backtrace left in the element
4415 btcount
= (typeof(btcount
)) (zone
->elem_size
/ sizeof(uintptr_t));
4416 if (btcount
>= MAX_ZTRACE_DEPTH
) btcount
= MAX_ZTRACE_DEPTH
- 1;
4417 for (btfound
= btidx
= 0; btidx
< btcount
; btidx
++)
4419 bt
= ((uintptr_t *)element
)[btcount
- 1 - btidx
];
4420 if (!VM_KERNEL_IS_SLID(bt
)) break;
4421 zbt
[btfound
++] = bt
;
4423 if (btfound
) (*proc
)(refCon
, 1, elemSize
, &zbt
[0], btfound
);
4428 // fake backtrace when we found nothing
4429 zbt
[0] = (uintptr_t) &zalloc
;
4430 (*proc
)(refCon
, nobtcount
, elemSize
, &zbt
[0], 1);
4433 kmem_free(kernel_map
, (vm_offset_t
) array
, maxElems
* sizeof(uintptr_t));
4435 return (KERN_SUCCESS
);
4439 kdp_is_in_zone(void *addr
, const char *zone_name
)
4442 return (zone_element_size(addr
, &z
) && !strcmp(z
->zone_name
, zone_name
));
4448 int i
= 0, max_iter
= 5;
4452 simple_lock(&zone_test_lock
);
4453 if (!zone_test_running
) {
4454 zone_test_running
= TRUE
;
4456 simple_unlock(&zone_test_lock
);
4457 printf("run_zone_test: Test already running.\n");
4460 simple_unlock(&zone_test_lock
);
4462 printf("run_zone_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
4464 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
4466 test_zone
= zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
4467 if (test_zone
== NULL
) {
4468 printf("run_zone_test: zinit() failed\n");
4473 if (test_zone_ptr
== NULL
&& zone_free_count(test_zone
) != 0) {
4475 if (zone_free_count(test_zone
) != 0) {
4477 printf("run_zone_test: free count is not zero\n");
4481 if (test_zone_ptr
== NULL
) {
4482 /* Stash the zone pointer returned on the fist zinit */
4483 printf("run_zone_test: zone created for the first time\n");
4484 test_zone_ptr
= test_zone
;
4485 } else if (test_zone
!= test_zone_ptr
) {
4486 printf("run_zone_test: old zone pointer and new zone pointer don't match\n");
4490 test_ptr
= zalloc(test_zone
);
4491 if (test_ptr
== NULL
) {
4492 printf("run_zone_test: zalloc() failed\n");
4495 zfree(test_zone
, test_ptr
);
4497 zdestroy(test_zone
);
4500 printf("run_zone_test: Iteration %d successful\n", i
);
4501 } while (i
< max_iter
);
4503 printf("run_zone_test: Test passed\n");
4505 simple_lock(&zone_test_lock
);
4506 zone_test_running
= FALSE
;
4507 simple_unlock(&zone_test_lock
);
4512 #endif /* DEBUG || DEVELOPMENT */