]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39037602 | 2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: kern/zalloc.c | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * | |
62 | * Zone-based memory allocator. A zone is a collection of fixed size | |
63 | * data blocks for which quick allocation/deallocation is possible. | |
64 | */ | |
65 | #include <zone_debug.h> | |
91447636 A |
66 | |
67 | #include <mach/mach_types.h> | |
68 | #include <mach/vm_param.h> | |
69 | #include <mach/kern_return.h> | |
70 | #include <mach/mach_host_server.h> | |
6d2010ae | 71 | #include <mach/task_server.h> |
91447636 A |
72 | #include <mach/machine/vm_types.h> |
73 | #include <mach_debug/zone_info.h> | |
316670eb | 74 | #include <mach/vm_map.h> |
a39ff7e2 | 75 | #include <mach/sdt.h> |
91447636 | 76 | |
5ba3f43e | 77 | #include <kern/bits.h> |
91447636 | 78 | #include <kern/kern_types.h> |
1c79356b | 79 | #include <kern/assert.h> |
39037602 | 80 | #include <kern/backtrace.h> |
91447636 | 81 | #include <kern/host.h> |
1c79356b A |
82 | #include <kern/macro_help.h> |
83 | #include <kern/sched.h> | |
b0d623f7 | 84 | #include <kern/locks.h> |
1c79356b A |
85 | #include <kern/sched_prim.h> |
86 | #include <kern/misc_protos.h> | |
0b4e3aa0 | 87 | #include <kern/thread_call.h> |
1c79356b | 88 | #include <kern/zalloc.h> |
91447636 A |
89 | #include <kern/kalloc.h> |
90 | ||
5c9f4661 A |
91 | #include <prng/random.h> |
92 | ||
91447636 A |
93 | #include <vm/pmap.h> |
94 | #include <vm/vm_map.h> | |
1c79356b | 95 | #include <vm/vm_kern.h> |
91447636 A |
96 | #include <vm/vm_page.h> |
97 | ||
316670eb A |
98 | #include <pexpert/pexpert.h> |
99 | ||
1c79356b | 100 | #include <machine/machparam.h> |
39236c6e | 101 | #include <machine/machine_routines.h> /* ml_cpu_get_info */ |
1c79356b | 102 | |
2d21ac55 | 103 | #include <libkern/OSDebug.h> |
7ddcb079 | 104 | #include <libkern/OSAtomic.h> |
2d21ac55 A |
105 | #include <sys/kdebug.h> |
106 | ||
5ba3f43e A |
107 | #include <san/kasan.h> |
108 | ||
39236c6e | 109 | /* |
39037602 | 110 | * ZONE_ALIAS_ADDR (deprecated) |
39236c6e A |
111 | */ |
112 | ||
39236c6e A |
113 | #define from_zone_map(addr, size) \ |
114 | ((vm_offset_t)(addr) >= zone_map_min_address && \ | |
115 | ((vm_offset_t)(addr) + size - 1) < zone_map_max_address ) | |
39236c6e A |
116 | |
117 | /* | |
c910b4d9 A |
118 | * Zone Corruption Debugging |
119 | * | |
39236c6e A |
120 | * We use three techniques to detect modification of a zone element |
121 | * after it's been freed. | |
316670eb | 122 | * |
39236c6e A |
123 | * (1) Check the freelist next pointer for sanity. |
124 | * (2) Store a backup of the next pointer at the end of the element, | |
125 | * and compare it to the primary next pointer when the element is allocated | |
126 | * to detect corruption of the freelist due to use-after-free bugs. | |
127 | * The backup pointer is also XORed with a per-boot random cookie. | |
128 | * (3) Poison the freed element by overwriting it with 0xdeadbeef, | |
129 | * and check for that value when the element is being reused to make sure | |
130 | * no part of the element has been modified while it was on the freelist. | |
131 | * This will also help catch read-after-frees, as code will now dereference | |
132 | * 0xdeadbeef instead of a valid but freed pointer. | |
316670eb | 133 | * |
39236c6e A |
134 | * (1) and (2) occur for every allocation and free to a zone. |
135 | * This is done to make it slightly more difficult for an attacker to | |
136 | * manipulate the freelist to behave in a specific way. | |
c910b4d9 | 137 | * |
39236c6e A |
138 | * Poisoning (3) occurs periodically for every N frees (counted per-zone) |
139 | * and on every free for zones smaller than a cacheline. If -zp | |
140 | * is passed as a boot arg, poisoning occurs for every free. | |
c910b4d9 | 141 | * |
39236c6e A |
142 | * Performance slowdown is inversely proportional to the frequency of poisoning, |
143 | * with a 4-5% hit around N=1, down to ~0.3% at N=16 and just "noise" at N=32 | |
144 | * and higher. You can expect to find a 100% reproducible bug in an average of | |
145 | * N tries, with a standard deviation of about N, but you will want to set | |
146 | * "-zp" to always poison every free if you are attempting to reproduce | |
147 | * a known bug. | |
316670eb | 148 | * |
39236c6e A |
149 | * For a more heavyweight, but finer-grained method of detecting misuse |
150 | * of zone memory, look up the "Guard mode" zone allocator in gzalloc.c. | |
151 | * | |
152 | * Zone Corruption Logging | |
153 | * | |
154 | * You can also track where corruptions come from by using the boot-arguments | |
155 | * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later | |
156 | * in this document for more implementation and usage information. | |
157 | * | |
158 | * Zone Leak Detection | |
159 | * | |
160 | * To debug leaks of zone memory, use the zone leak detection tool 'zleaks' | |
161 | * found later in this file via the showtopztrace and showz* macros in kgmacros, | |
162 | * or use zlog without the -zc argument. | |
316670eb | 163 | * |
316670eb A |
164 | */ |
165 | ||
fe8ab488 A |
166 | /* Returns TRUE if we rolled over the counter at factor */ |
167 | static inline boolean_t | |
168 | sample_counter(volatile uint32_t * count_p, uint32_t factor) | |
169 | { | |
170 | uint32_t old_count, new_count; | |
171 | boolean_t rolled_over; | |
172 | ||
173 | do { | |
174 | new_count = old_count = *count_p; | |
175 | ||
176 | if (++new_count >= factor) { | |
177 | rolled_over = TRUE; | |
178 | new_count = 0; | |
179 | } else { | |
180 | rolled_over = FALSE; | |
181 | } | |
182 | ||
183 | } while (!OSCompareAndSwap(old_count, new_count, count_p)); | |
184 | ||
185 | return rolled_over; | |
186 | } | |
316670eb | 187 | |
39236c6e A |
188 | #if defined(__LP64__) |
189 | #define ZP_POISON 0xdeadbeefdeadbeef | |
190 | #else | |
191 | #define ZP_POISON 0xdeadbeef | |
192 | #endif | |
316670eb | 193 | |
39236c6e | 194 | #define ZP_DEFAULT_SAMPLING_FACTOR 16 |
fe8ab488 | 195 | #define ZP_DEFAULT_SCALE_FACTOR 4 |
316670eb | 196 | |
39236c6e A |
197 | /* |
198 | * A zp_factor of 0 indicates zone poisoning is disabled, | |
199 | * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline). | |
200 | * Passing the -no-zp boot-arg disables even this behavior. | |
201 | * In all cases, we record and check the integrity of a backup pointer. | |
316670eb | 202 | */ |
39236c6e A |
203 | |
204 | /* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */ | |
205 | uint32_t zp_factor = 0; | |
206 | ||
fe8ab488 A |
207 | /* set by zp-scale=N boot arg, scales zp_factor by zone size */ |
208 | uint32_t zp_scale = 0; | |
209 | ||
39236c6e A |
210 | /* set in zp_init, zero indicates -no-zp boot-arg */ |
211 | vm_size_t zp_tiny_zone_limit = 0; | |
212 | ||
213 | /* initialized to a per-boot random value in zp_init */ | |
214 | uintptr_t zp_poisoned_cookie = 0; | |
215 | uintptr_t zp_nopoison_cookie = 0; | |
216 | ||
5ba3f43e A |
217 | #if VM_MAX_TAG_ZONES |
218 | boolean_t zone_tagging_on; | |
219 | #endif /* VM_MAX_TAG_ZONES */ | |
316670eb | 220 | |
5c9f4661 A |
221 | static struct bool_gen zone_bool_gen; |
222 | ||
316670eb | 223 | /* |
39236c6e A |
224 | * initialize zone poisoning |
225 | * called from zone_bootstrap before any allocations are made from zalloc | |
316670eb A |
226 | */ |
227 | static inline void | |
39236c6e A |
228 | zp_init(void) |
229 | { | |
230 | char temp_buf[16]; | |
231 | ||
232 | /* | |
233 | * Initialize backup pointer random cookie for poisoned elements | |
234 | * Try not to call early_random() back to back, it may return | |
235 | * the same value if mach_absolute_time doesn't have sufficient time | |
236 | * to tick over between calls. <rdar://problem/11597395> | |
237 | * (This is only a problem on embedded devices) | |
238 | */ | |
239 | zp_poisoned_cookie = (uintptr_t) early_random(); | |
240 | ||
241 | /* | |
242 | * Always poison zones smaller than a cacheline, | |
243 | * because it's pretty close to free | |
244 | */ | |
245 | ml_cpu_info_t cpu_info; | |
246 | ml_cpu_get_info(&cpu_info); | |
247 | zp_tiny_zone_limit = (vm_size_t) cpu_info.cache_line_size; | |
248 | ||
249 | zp_factor = ZP_DEFAULT_SAMPLING_FACTOR; | |
fe8ab488 | 250 | zp_scale = ZP_DEFAULT_SCALE_FACTOR; |
39236c6e A |
251 | |
252 | //TODO: Bigger permutation? | |
253 | /* | |
254 | * Permute the default factor +/- 1 to make it less predictable | |
255 | * This adds or subtracts ~4 poisoned objects per 1000 frees. | |
256 | */ | |
257 | if (zp_factor != 0) { | |
258 | uint32_t rand_bits = early_random() & 0x3; | |
259 | ||
260 | if (rand_bits == 0x1) | |
261 | zp_factor += 1; | |
262 | else if (rand_bits == 0x2) | |
263 | zp_factor -= 1; | |
264 | /* if 0x0 or 0x3, leave it alone */ | |
265 | } | |
266 | ||
267 | /* -zp: enable poisoning for every alloc and free */ | |
268 | if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) { | |
269 | zp_factor = 1; | |
270 | } | |
271 | ||
272 | /* -no-zp: disable poisoning completely even for tiny zones */ | |
273 | if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) { | |
274 | zp_factor = 0; | |
275 | zp_tiny_zone_limit = 0; | |
276 | printf("Zone poisoning disabled\n"); | |
277 | } | |
278 | ||
279 | /* zp-factor=XXXX: override how often to poison freed zone elements */ | |
280 | if (PE_parse_boot_argn("zp-factor", &zp_factor, sizeof(zp_factor))) { | |
281 | printf("Zone poisoning factor override: %u\n", zp_factor); | |
282 | } | |
283 | ||
fe8ab488 A |
284 | /* zp-scale=XXXX: override how much zone size scales zp-factor by */ |
285 | if (PE_parse_boot_argn("zp-scale", &zp_scale, sizeof(zp_scale))) { | |
286 | printf("Zone poisoning scale factor override: %u\n", zp_scale); | |
287 | } | |
288 | ||
39236c6e A |
289 | /* Initialize backup pointer random cookie for unpoisoned elements */ |
290 | zp_nopoison_cookie = (uintptr_t) early_random(); | |
291 | ||
292 | #if MACH_ASSERT | |
293 | if (zp_poisoned_cookie == zp_nopoison_cookie) | |
294 | panic("early_random() is broken: %p and %p are not random\n", | |
295 | (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie); | |
296 | #endif | |
297 | ||
298 | /* | |
299 | * Use the last bit in the backup pointer to hint poisoning state | |
300 | * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so | |
301 | * the low bits are zero. | |
302 | */ | |
303 | zp_poisoned_cookie |= (uintptr_t)0x1ULL; | |
304 | zp_nopoison_cookie &= ~((uintptr_t)0x1ULL); | |
305 | ||
306 | #if defined(__LP64__) | |
307 | /* | |
308 | * Make backup pointers more obvious in GDB for 64 bit | |
309 | * by making OxFFFFFF... ^ cookie = 0xFACADE... | |
310 | * (0xFACADE = 0xFFFFFF ^ 0x053521) | |
311 | * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011) | |
312 | * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked | |
313 | * by the sanity check, so it's OK for that part of the cookie to be predictable. | |
314 | * | |
315 | * TODO: Use #defines, xors, and shifts | |
316 | */ | |
317 | ||
318 | zp_poisoned_cookie &= 0x000000FFFFFFFFFF; | |
319 | zp_poisoned_cookie |= 0x0535210000000000; /* 0xFACADE */ | |
320 | ||
321 | zp_nopoison_cookie &= 0x000000FFFFFFFFFF; | |
322 | zp_nopoison_cookie |= 0x3f00110000000000; /* 0xC0FFEE */ | |
323 | #endif | |
324 | } | |
325 | ||
39236c6e A |
326 | /* |
327 | * These macros are used to keep track of the number | |
328 | * of pages being used by the zone currently. The | |
5ba3f43e | 329 | * z->page_count is not protected by the zone lock. |
39236c6e A |
330 | */ |
331 | #define ZONE_PAGE_COUNT_INCR(z, count) \ | |
332 | { \ | |
333 | OSAddAtomic64(count, &(z->page_count)); \ | |
334 | } | |
335 | ||
336 | #define ZONE_PAGE_COUNT_DECR(z, count) \ | |
337 | { \ | |
338 | OSAddAtomic64(-count, &(z->page_count)); \ | |
339 | } | |
340 | ||
39037602 A |
341 | vm_map_t zone_map = VM_MAP_NULL; |
342 | ||
39236c6e A |
343 | /* for is_sane_zone_element and garbage collection */ |
344 | ||
345 | vm_offset_t zone_map_min_address = 0; /* initialized in zone_init */ | |
346 | vm_offset_t zone_map_max_address = 0; | |
347 | ||
4bd07ac2 A |
348 | /* Globals for random boolean generator for elements in free list */ |
349 | #define MAX_ENTROPY_PER_ZCRAM 4 | |
4bd07ac2 | 350 | |
39037602 A |
351 | /* VM region for all metadata structures */ |
352 | vm_offset_t zone_metadata_region_min = 0; | |
353 | vm_offset_t zone_metadata_region_max = 0; | |
354 | decl_lck_mtx_data(static ,zone_metadata_region_lck) | |
355 | lck_attr_t zone_metadata_lock_attr; | |
356 | lck_mtx_ext_t zone_metadata_region_lck_ext; | |
357 | ||
39236c6e A |
358 | /* Helpful for walking through a zone's free element list. */ |
359 | struct zone_free_element { | |
360 | struct zone_free_element *next; | |
361 | /* ... */ | |
362 | /* void *backup_ptr; */ | |
363 | }; | |
364 | ||
39037602 | 365 | /* |
5ba3f43e | 366 | * Protects zone_array, num_zones, num_zones_in_use, and zone_empty_bitmap |
39037602 A |
367 | */ |
368 | decl_simple_lock_data(, all_zones_lock) | |
5ba3f43e | 369 | unsigned int num_zones_in_use; |
39037602 A |
370 | unsigned int num_zones; |
371 | ||
5c9f4661 | 372 | #define MAX_ZONES 320 |
39037602 | 373 | struct zone zone_array[MAX_ZONES]; |
39037602 | 374 | |
5ba3f43e A |
375 | /* Used to keep track of empty slots in the zone_array */ |
376 | bitmap_t zone_empty_bitmap[BITMAP_LEN(MAX_ZONES)]; | |
377 | ||
378 | #if DEBUG || DEVELOPMENT | |
379 | /* | |
380 | * Used for sysctl kern.run_zone_test which is not thread-safe. Ensure only one thread goes through at a time. | |
381 | * Or we can end up with multiple test zones (if a second zinit() comes through before zdestroy()), which could lead us to | |
382 | * run out of zones. | |
383 | */ | |
384 | decl_simple_lock_data(, zone_test_lock) | |
385 | static boolean_t zone_test_running = FALSE; | |
386 | static zone_t test_zone_ptr = NULL; | |
387 | #endif /* DEBUG || DEVELOPMENT */ | |
39037602 A |
388 | |
389 | #define PAGE_METADATA_GET_ZINDEX(page_meta) \ | |
390 | (page_meta->zindex) | |
391 | ||
392 | #define PAGE_METADATA_GET_ZONE(page_meta) \ | |
393 | (&(zone_array[page_meta->zindex])) | |
394 | ||
395 | #define PAGE_METADATA_SET_ZINDEX(page_meta, index) \ | |
396 | page_meta->zindex = (index); | |
397 | ||
39236c6e | 398 | struct zone_page_metadata { |
39037602 A |
399 | queue_chain_t pages; /* linkage pointer for metadata lists */ |
400 | ||
401 | /* Union for maintaining start of element free list and real metadata (for multipage allocations) */ | |
402 | union { | |
403 | /* | |
404 | * The start of the freelist can be maintained as a 32-bit offset instead of a pointer because | |
405 | * the free elements would be at max ZONE_MAX_ALLOC_SIZE bytes away from the metadata. Offset | |
406 | * from start of the allocation chunk to free element list head. | |
407 | */ | |
408 | uint32_t freelist_offset; | |
409 | /* | |
410 | * This field is used to lookup the real metadata for multipage allocations, where we mark the | |
411 | * metadata for all pages except the first as "fake" metadata using MULTIPAGE_METADATA_MAGIC. | |
412 | * Offset from this fake metadata to real metadata of allocation chunk (-ve offset). | |
413 | */ | |
414 | uint32_t real_metadata_offset; | |
415 | }; | |
416 | ||
417 | /* | |
418 | * For the first page in the allocation chunk, this represents the total number of free elements in | |
419 | * the chunk. | |
39037602 A |
420 | */ |
421 | uint16_t free_count; | |
5ba3f43e A |
422 | unsigned zindex : ZINDEX_BITS; /* Zone index within the zone_array */ |
423 | unsigned page_count : PAGECOUNT_BITS; /* Count of pages within the allocation chunk */ | |
39236c6e A |
424 | }; |
425 | ||
39037602 A |
426 | /* Macro to get page index (within zone_map) of page containing element */ |
427 | #define PAGE_INDEX_FOR_ELEMENT(element) \ | |
428 | (((vm_offset_t)trunc_page(element) - zone_map_min_address) / PAGE_SIZE) | |
429 | ||
430 | /* Macro to get metadata structure given a page index in zone_map */ | |
431 | #define PAGE_METADATA_FOR_PAGE_INDEX(index) \ | |
432 | (zone_metadata_region_min + ((index) * sizeof(struct zone_page_metadata))) | |
433 | ||
434 | /* Macro to get index (within zone_map) for given metadata */ | |
435 | #define PAGE_INDEX_FOR_METADATA(page_meta) \ | |
436 | (((vm_offset_t)page_meta - zone_metadata_region_min) / sizeof(struct zone_page_metadata)) | |
437 | ||
438 | /* Macro to get page for given page index in zone_map */ | |
439 | #define PAGE_FOR_PAGE_INDEX(index) \ | |
440 | (zone_map_min_address + (PAGE_SIZE * (index))) | |
441 | ||
442 | /* Macro to get the actual metadata for a given address */ | |
443 | #define PAGE_METADATA_FOR_ELEMENT(element) \ | |
444 | (struct zone_page_metadata *)(PAGE_METADATA_FOR_PAGE_INDEX(PAGE_INDEX_FOR_ELEMENT(element))) | |
445 | ||
446 | /* Magic value to indicate empty element free list */ | |
447 | #define PAGE_METADATA_EMPTY_FREELIST ((uint32_t)(~0)) | |
448 | ||
a39ff7e2 | 449 | boolean_t get_zone_info(zone_t z, mach_zone_name_t *zn, mach_zone_info_t *zi); |
5ba3f43e A |
450 | boolean_t is_zone_map_nearing_exhaustion(void); |
451 | extern void vm_pageout_garbage_collect(int collect); | |
452 | ||
39037602 A |
453 | static inline void * |
454 | page_metadata_get_freelist(struct zone_page_metadata *page_meta) | |
455 | { | |
456 | assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); | |
457 | if (page_meta->freelist_offset == PAGE_METADATA_EMPTY_FREELIST) | |
458 | return NULL; | |
459 | else { | |
460 | if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) | |
461 | return (void *)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta)) + page_meta->freelist_offset); | |
462 | else | |
463 | return (void *)((vm_offset_t)page_meta + page_meta->freelist_offset); | |
464 | } | |
465 | } | |
466 | ||
467 | static inline void | |
468 | page_metadata_set_freelist(struct zone_page_metadata *page_meta, void *addr) | |
469 | { | |
470 | assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); | |
471 | if (addr == NULL) | |
472 | page_meta->freelist_offset = PAGE_METADATA_EMPTY_FREELIST; | |
473 | else { | |
474 | if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) | |
475 | page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta))); | |
476 | else | |
477 | page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - (vm_offset_t)page_meta); | |
478 | } | |
479 | } | |
480 | ||
481 | static inline struct zone_page_metadata * | |
482 | page_metadata_get_realmeta(struct zone_page_metadata *page_meta) | |
483 | { | |
484 | assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC); | |
485 | return (struct zone_page_metadata *)((vm_offset_t)page_meta - page_meta->real_metadata_offset); | |
486 | } | |
487 | ||
488 | static inline void | |
489 | page_metadata_set_realmeta(struct zone_page_metadata *page_meta, struct zone_page_metadata *real_meta) | |
490 | { | |
491 | assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC); | |
492 | assert(PAGE_METADATA_GET_ZINDEX(real_meta) != MULTIPAGE_METADATA_MAGIC); | |
493 | assert((vm_offset_t)page_meta > (vm_offset_t)real_meta); | |
494 | vm_offset_t offset = (vm_offset_t)page_meta - (vm_offset_t)real_meta; | |
495 | assert(offset <= UINT32_MAX); | |
496 | page_meta->real_metadata_offset = (uint32_t)offset; | |
497 | } | |
498 | ||
39236c6e A |
499 | /* The backup pointer is stored in the last pointer-sized location in an element. */ |
500 | static inline vm_offset_t * | |
501 | get_backup_ptr(vm_size_t elem_size, | |
502 | vm_offset_t *element) | |
503 | { | |
504 | return (vm_offset_t *) ((vm_offset_t)element + elem_size - sizeof(vm_offset_t)); | |
505 | } | |
506 | ||
39037602 A |
507 | /* |
508 | * Routine to populate a page backing metadata in the zone_metadata_region. | |
509 | * Must be called without the zone lock held as it might potentially block. | |
510 | */ | |
511 | static inline void | |
512 | zone_populate_metadata_page(struct zone_page_metadata *page_meta) | |
513 | { | |
514 | vm_offset_t page_metadata_begin = trunc_page(page_meta); | |
515 | vm_offset_t page_metadata_end = trunc_page((vm_offset_t)page_meta + sizeof(struct zone_page_metadata)); | |
516 | ||
517 | for(;page_metadata_begin <= page_metadata_end; page_metadata_begin += PAGE_SIZE) { | |
518 | if (pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) | |
519 | continue; | |
520 | /* All updates to the zone_metadata_region are done under the zone_metadata_region_lck */ | |
521 | lck_mtx_lock(&zone_metadata_region_lck); | |
522 | if (0 == pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) { | |
5ba3f43e | 523 | kern_return_t __unused ret = kernel_memory_populate(zone_map, |
39037602 A |
524 | page_metadata_begin, |
525 | PAGE_SIZE, | |
526 | KMA_KOBJECT, | |
527 | VM_KERN_MEMORY_OSFMK); | |
5ba3f43e A |
528 | |
529 | /* should not fail with the given arguments */ | |
530 | assert(ret == KERN_SUCCESS); | |
39037602 A |
531 | } |
532 | lck_mtx_unlock(&zone_metadata_region_lck); | |
533 | } | |
534 | return; | |
535 | } | |
536 | ||
537 | static inline uint16_t | |
538 | get_metadata_alloc_count(struct zone_page_metadata *page_meta) | |
539 | { | |
540 | assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); | |
541 | struct zone *z = PAGE_METADATA_GET_ZONE(page_meta); | |
542 | return ((page_meta->page_count * PAGE_SIZE) / z->elem_size); | |
543 | } | |
544 | ||
545 | /* | |
546 | * Routine to lookup metadata for any given address. | |
547 | * If init is marked as TRUE, this should be called without holding the zone lock | |
548 | * since the initialization might block. | |
549 | */ | |
39236c6e | 550 | static inline struct zone_page_metadata * |
39037602 A |
551 | get_zone_page_metadata(struct zone_free_element *element, boolean_t init) |
552 | { | |
553 | struct zone_page_metadata *page_meta = 0; | |
554 | ||
555 | if (from_zone_map(element, sizeof(struct zone_free_element))) { | |
556 | page_meta = (struct zone_page_metadata *)(PAGE_METADATA_FOR_ELEMENT(element)); | |
557 | if (init) | |
558 | zone_populate_metadata_page(page_meta); | |
559 | } else { | |
560 | page_meta = (struct zone_page_metadata *)(trunc_page((vm_offset_t)element)); | |
561 | } | |
562 | if (init) | |
5ba3f43e | 563 | __nosan_bzero((char *)page_meta, sizeof(struct zone_page_metadata)); |
39037602 A |
564 | return ((PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC) ? page_meta : page_metadata_get_realmeta(page_meta)); |
565 | } | |
566 | ||
567 | /* Routine to get the page for a given metadata */ | |
568 | static inline vm_offset_t | |
569 | get_zone_page(struct zone_page_metadata *page_meta) | |
39236c6e | 570 | { |
39037602 A |
571 | if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) |
572 | return (vm_offset_t)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta))); | |
573 | else | |
574 | return (vm_offset_t)(trunc_page(page_meta)); | |
575 | } | |
576 | ||
5ba3f43e A |
577 | /* |
578 | * ZTAGS | |
579 | */ | |
580 | ||
581 | #if VM_MAX_TAG_ZONES | |
582 | ||
583 | // for zones with tagging enabled: | |
584 | ||
585 | // calculate a pointer to the tag base entry, | |
586 | // holding either a uint32_t the first tag offset for a page in the zone map, | |
587 | // or two uint16_t tags if the page can only hold one or two elements | |
588 | ||
589 | #define ZTAGBASE(zone, element) \ | |
590 | (&((uint32_t *)zone_tagbase_min)[atop((element) - zone_map_min_address)]) | |
591 | ||
592 | // pointer to the tag for an element | |
593 | #define ZTAG(zone, element) \ | |
594 | ({ \ | |
595 | vm_tag_t * result; \ | |
596 | if ((zone)->tags_inline) { \ | |
597 | result = (vm_tag_t *) ZTAGBASE((zone), (element)); \ | |
598 | if ((page_mask & element) >= (zone)->elem_size) result++; \ | |
599 | } else { \ | |
600 | result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / (zone)->elem_size]; \ | |
601 | } \ | |
602 | result; \ | |
603 | }) | |
604 | ||
605 | ||
606 | static vm_offset_t zone_tagbase_min; | |
607 | static vm_offset_t zone_tagbase_max; | |
608 | static vm_offset_t zone_tagbase_map_size; | |
609 | static vm_map_t zone_tagbase_map; | |
610 | ||
611 | static vm_offset_t zone_tags_min; | |
612 | static vm_offset_t zone_tags_max; | |
613 | static vm_offset_t zone_tags_map_size; | |
614 | static vm_map_t zone_tags_map; | |
615 | ||
616 | // simple heap allocator for allocating the tags for new memory | |
617 | ||
618 | decl_lck_mtx_data(,ztLock) /* heap lock */ | |
619 | enum | |
620 | { | |
621 | ztFreeIndexCount = 8, | |
622 | ztFreeIndexMax = (ztFreeIndexCount - 1), | |
623 | ztTagsPerBlock = 4 | |
624 | }; | |
625 | ||
626 | struct ztBlock | |
627 | { | |
628 | #if __LITTLE_ENDIAN__ | |
629 | uint64_t free:1, | |
630 | next:21, | |
631 | prev:21, | |
632 | size:21; | |
633 | #else | |
634 | // ztBlock needs free bit least significant | |
635 | #error !__LITTLE_ENDIAN__ | |
636 | #endif | |
637 | }; | |
638 | typedef struct ztBlock ztBlock; | |
639 | ||
640 | static ztBlock * ztBlocks; | |
641 | static uint32_t ztBlocksCount; | |
642 | static uint32_t ztBlocksFree; | |
643 | ||
644 | static uint32_t | |
645 | ztLog2up(uint32_t size) | |
646 | { | |
647 | if (1 == size) size = 0; | |
648 | else size = 32 - __builtin_clz(size - 1); | |
649 | return (size); | |
650 | } | |
651 | ||
652 | static uint32_t | |
653 | ztLog2down(uint32_t size) | |
654 | { | |
655 | size = 31 - __builtin_clz(size); | |
656 | return (size); | |
657 | } | |
658 | ||
659 | static void | |
660 | ztFault(vm_map_t map, const void * address, size_t size, uint32_t flags) | |
661 | { | |
662 | vm_map_offset_t addr = (vm_map_offset_t) address; | |
663 | vm_map_offset_t page, end; | |
664 | ||
665 | page = trunc_page(addr); | |
666 | end = round_page(addr + size); | |
667 | ||
668 | for (; page < end; page += page_size) | |
669 | { | |
670 | if (!pmap_find_phys(kernel_pmap, page)) | |
671 | { | |
672 | kern_return_t __unused | |
673 | ret = kernel_memory_populate(map, page, PAGE_SIZE, | |
674 | KMA_KOBJECT | flags, VM_KERN_MEMORY_DIAG); | |
675 | assert(ret == KERN_SUCCESS); | |
676 | } | |
677 | } | |
678 | } | |
679 | ||
680 | static boolean_t | |
681 | ztPresent(const void * address, size_t size) | |
682 | { | |
683 | vm_map_offset_t addr = (vm_map_offset_t) address; | |
684 | vm_map_offset_t page, end; | |
685 | boolean_t result; | |
686 | ||
687 | page = trunc_page(addr); | |
688 | end = round_page(addr + size); | |
689 | for (result = TRUE; (page < end); page += page_size) | |
690 | { | |
691 | result = pmap_find_phys(kernel_pmap, page); | |
692 | if (!result) break; | |
693 | } | |
694 | return (result); | |
695 | } | |
696 | ||
697 | ||
698 | void __unused | |
699 | ztDump(boolean_t sanity); | |
700 | void __unused | |
701 | ztDump(boolean_t sanity) | |
702 | { | |
703 | uint32_t q, cq, p; | |
704 | ||
705 | for (q = 0; q <= ztFreeIndexMax; q++) | |
706 | { | |
707 | p = q; | |
708 | do | |
709 | { | |
710 | if (sanity) | |
711 | { | |
712 | cq = ztLog2down(ztBlocks[p].size); | |
713 | if (cq > ztFreeIndexMax) cq = ztFreeIndexMax; | |
714 | if (!ztBlocks[p].free | |
715 | || ((p != q) && (q != cq)) | |
716 | || (ztBlocks[ztBlocks[p].next].prev != p) | |
717 | || (ztBlocks[ztBlocks[p].prev].next != p)) | |
718 | { | |
719 | kprintf("zterror at %d", p); | |
720 | ztDump(FALSE); | |
721 | kprintf("zterror at %d", p); | |
722 | assert(FALSE); | |
723 | } | |
724 | continue; | |
725 | } | |
726 | kprintf("zt[%03d]%c %d, %d, %d\n", | |
727 | p, ztBlocks[p].free ? 'F' : 'A', | |
728 | ztBlocks[p].next, ztBlocks[p].prev, | |
729 | ztBlocks[p].size); | |
730 | p = ztBlocks[p].next; | |
731 | if (p == q) break; | |
732 | } | |
733 | while (p != q); | |
734 | if (!sanity) printf("\n"); | |
735 | } | |
736 | if (!sanity) printf("-----------------------\n"); | |
737 | } | |
738 | ||
739 | ||
740 | ||
741 | #define ZTBDEQ(idx) \ | |
742 | ztBlocks[ztBlocks[(idx)].prev].next = ztBlocks[(idx)].next; \ | |
743 | ztBlocks[ztBlocks[(idx)].next].prev = ztBlocks[(idx)].prev; | |
744 | ||
745 | static void | |
746 | ztFree(zone_t zone __unused, uint32_t index, uint32_t count) | |
747 | { | |
748 | uint32_t q, w, p, size, merge; | |
749 | ||
750 | assert(count); | |
751 | ztBlocksFree += count; | |
752 | ||
753 | // merge with preceding | |
754 | merge = (index + count); | |
755 | if ((merge < ztBlocksCount) | |
756 | && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge])) | |
757 | && ztBlocks[merge].free) | |
758 | { | |
759 | ZTBDEQ(merge); | |
760 | count += ztBlocks[merge].size; | |
761 | } | |
762 | ||
763 | // merge with following | |
764 | merge = (index - 1); | |
765 | if ((merge > ztFreeIndexMax) | |
766 | && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge])) | |
767 | && ztBlocks[merge].free) | |
768 | { | |
769 | size = ztBlocks[merge].size; | |
770 | count += size; | |
771 | index -= size; | |
772 | ZTBDEQ(index); | |
773 | } | |
774 | ||
775 | q = ztLog2down(count); | |
776 | if (q > ztFreeIndexMax) q = ztFreeIndexMax; | |
777 | w = q; | |
778 | // queue in order of size | |
779 | while (TRUE) | |
780 | { | |
781 | p = ztBlocks[w].next; | |
782 | if (p == q) break; | |
783 | if (ztBlocks[p].size >= count) break; | |
784 | w = p; | |
785 | } | |
786 | ztBlocks[p].prev = index; | |
787 | ztBlocks[w].next = index; | |
788 | ||
789 | // fault in first | |
790 | ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0); | |
791 | ||
792 | // mark first & last with free flag and size | |
793 | ztBlocks[index].free = TRUE; | |
794 | ztBlocks[index].size = count; | |
795 | ztBlocks[index].prev = w; | |
796 | ztBlocks[index].next = p; | |
797 | if (count > 1) | |
798 | { | |
799 | index += (count - 1); | |
800 | // fault in last | |
801 | ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0); | |
802 | ztBlocks[index].free = TRUE; | |
803 | ztBlocks[index].size = count; | |
804 | } | |
805 | } | |
806 | ||
807 | static uint32_t | |
808 | ztAlloc(zone_t zone, uint32_t count) | |
809 | { | |
810 | uint32_t q, w, p, leftover; | |
811 | ||
812 | assert(count); | |
813 | ||
814 | q = ztLog2up(count); | |
815 | if (q > ztFreeIndexMax) q = ztFreeIndexMax; | |
816 | do | |
817 | { | |
818 | w = q; | |
819 | while (TRUE) | |
820 | { | |
821 | p = ztBlocks[w].next; | |
822 | if (p == q) break; | |
823 | if (ztBlocks[p].size >= count) | |
824 | { | |
825 | // dequeue, mark both ends allocated | |
826 | ztBlocks[w].next = ztBlocks[p].next; | |
827 | ztBlocks[ztBlocks[p].next].prev = w; | |
828 | ztBlocks[p].free = FALSE; | |
829 | ztBlocksFree -= ztBlocks[p].size; | |
830 | if (ztBlocks[p].size > 1) ztBlocks[p + ztBlocks[p].size - 1].free = FALSE; | |
831 | ||
832 | // fault all the allocation | |
833 | ztFault(zone_tags_map, &ztBlocks[p], count * sizeof(ztBlocks[p]), 0); | |
834 | // mark last as allocated | |
835 | if (count > 1) ztBlocks[p + count - 1].free = FALSE; | |
836 | // free remainder | |
837 | leftover = ztBlocks[p].size - count; | |
838 | if (leftover) ztFree(zone, p + ztBlocks[p].size - leftover, leftover); | |
839 | ||
840 | return (p); | |
841 | } | |
842 | w = p; | |
843 | } | |
844 | q++; | |
845 | } | |
846 | while (q <= ztFreeIndexMax); | |
847 | ||
848 | return (-1U); | |
849 | } | |
850 | ||
851 | static void | |
852 | ztInit(vm_size_t max_zonemap_size, lck_grp_t * group) | |
853 | { | |
854 | kern_return_t ret; | |
855 | vm_map_kernel_flags_t vmk_flags; | |
856 | uint32_t idx; | |
857 | ||
858 | lck_mtx_init(&ztLock, group, LCK_ATTR_NULL); | |
859 | ||
860 | // allocate submaps VM_KERN_MEMORY_DIAG | |
861 | ||
862 | zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t); | |
863 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
864 | vmk_flags.vmkf_permanent = TRUE; | |
865 | ret = kmem_suballoc(kernel_map, &zone_tagbase_min, zone_tagbase_map_size, | |
866 | FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG, | |
867 | &zone_tagbase_map); | |
868 | ||
869 | if (ret != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed"); | |
870 | zone_tagbase_max = zone_tagbase_min + round_page(zone_tagbase_map_size); | |
871 | ||
872 | zone_tags_map_size = 2048*1024 * sizeof(vm_tag_t); | |
873 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
874 | vmk_flags.vmkf_permanent = TRUE; | |
875 | ret = kmem_suballoc(kernel_map, &zone_tags_min, zone_tags_map_size, | |
876 | FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG, | |
877 | &zone_tags_map); | |
878 | ||
879 | if (ret != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed"); | |
880 | zone_tags_max = zone_tags_min + round_page(zone_tags_map_size); | |
881 | ||
882 | ztBlocks = (ztBlock *) zone_tags_min; | |
883 | ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock)); | |
884 | ||
885 | // initialize the qheads | |
886 | lck_mtx_lock(&ztLock); | |
887 | ||
888 | ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0); | |
889 | for (idx = 0; idx < ztFreeIndexCount; idx++) | |
890 | { | |
891 | ztBlocks[idx].free = TRUE; | |
892 | ztBlocks[idx].next = idx; | |
893 | ztBlocks[idx].prev = idx; | |
894 | ztBlocks[idx].size = 0; | |
895 | } | |
896 | // free remaining space | |
897 | ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount); | |
898 | ||
899 | lck_mtx_unlock(&ztLock); | |
900 | } | |
901 | ||
902 | static void | |
903 | ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size) | |
904 | { | |
905 | uint32_t * tagbase; | |
906 | uint32_t count, block, blocks, idx; | |
907 | size_t pages; | |
908 | ||
909 | pages = atop(size); | |
910 | tagbase = ZTAGBASE(zone, mem); | |
911 | ||
912 | lck_mtx_lock(&ztLock); | |
913 | ||
914 | // fault tagbase | |
915 | ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0); | |
916 | ||
917 | if (!zone->tags_inline) | |
918 | { | |
919 | // allocate tags | |
920 | count = (uint32_t)(size / zone->elem_size); | |
921 | blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); | |
922 | block = ztAlloc(zone, blocks); | |
923 | if (-1U == block) ztDump(false); | |
924 | assert(-1U != block); | |
925 | } | |
926 | ||
927 | lck_mtx_unlock(&ztLock); | |
928 | ||
929 | if (!zone->tags_inline) | |
930 | { | |
931 | // set tag base for each page | |
932 | block *= ztTagsPerBlock; | |
933 | for (idx = 0; idx < pages; idx++) | |
934 | { | |
935 | tagbase[idx] = block + (uint32_t)((ptoa(idx) + (zone->elem_size - 1)) / zone->elem_size); | |
936 | } | |
937 | } | |
938 | } | |
939 | ||
940 | static void | |
941 | ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size) | |
942 | { | |
943 | uint32_t * tagbase; | |
944 | uint32_t count, block, blocks, idx; | |
945 | size_t pages; | |
946 | ||
947 | // set tag base for each page | |
948 | pages = atop(size); | |
949 | tagbase = ZTAGBASE(zone, mem); | |
950 | block = tagbase[0]; | |
951 | for (idx = 0; idx < pages; idx++) | |
952 | { | |
953 | tagbase[idx] = 0xFFFFFFFF; | |
954 | } | |
955 | ||
956 | lck_mtx_lock(&ztLock); | |
957 | if (!zone->tags_inline) | |
958 | { | |
959 | count = (uint32_t)(size / zone->elem_size); | |
960 | blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); | |
961 | assert(block != 0xFFFFFFFF); | |
962 | block /= ztTagsPerBlock; | |
963 | ztFree(NULL /* zone is unlocked */, block, blocks); | |
964 | } | |
965 | ||
966 | lck_mtx_unlock(&ztLock); | |
967 | } | |
968 | ||
969 | uint32_t | |
970 | zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size) | |
971 | { | |
972 | zone_t z; | |
973 | uint32_t idx; | |
974 | ||
975 | simple_lock(&all_zones_lock); | |
976 | ||
977 | for (idx = 0; idx < num_zones; idx++) | |
978 | { | |
979 | z = &(zone_array[idx]); | |
980 | if (!z->tags) continue; | |
981 | if (tag_zone_index != z->tag_zone_index) continue; | |
982 | *elem_size = z->elem_size; | |
983 | break; | |
984 | } | |
985 | ||
986 | simple_unlock(&all_zones_lock); | |
987 | ||
988 | if (idx == num_zones) idx = -1U; | |
989 | ||
990 | return (idx); | |
991 | } | |
992 | ||
993 | #endif /* VM_MAX_TAG_ZONES */ | |
994 | ||
39037602 A |
995 | /* Routine to get the size of a zone allocated address. If the address doesnt belong to the |
996 | * zone_map, returns 0. | |
997 | */ | |
998 | vm_size_t | |
999 | zone_element_size(void *addr, zone_t *z) | |
1000 | { | |
1001 | struct zone *src_zone; | |
1002 | if (from_zone_map(addr, sizeof(void *))) { | |
1003 | struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); | |
1004 | src_zone = PAGE_METADATA_GET_ZONE(page_meta); | |
1005 | if (z) { | |
1006 | *z = src_zone; | |
1007 | } | |
1008 | return (src_zone->elem_size); | |
1009 | } else { | |
1010 | #if CONFIG_GZALLOC | |
1011 | vm_size_t gzsize; | |
1012 | if (gzalloc_element_size(addr, z, &gzsize)) { | |
1013 | return gzsize; | |
1014 | } | |
1015 | #endif /* CONFIG_GZALLOC */ | |
1016 | ||
1017 | return 0; | |
1018 | } | |
39236c6e A |
1019 | } |
1020 | ||
5ba3f43e A |
1021 | #if DEBUG || DEVELOPMENT |
1022 | ||
1023 | vm_size_t | |
1024 | zone_element_info(void *addr, vm_tag_t * ptag) | |
1025 | { | |
1026 | vm_size_t size = 0; | |
1027 | vm_tag_t tag = VM_KERN_MEMORY_NONE; | |
1028 | struct zone * src_zone; | |
1029 | ||
1030 | if (from_zone_map(addr, sizeof(void *))) { | |
1031 | struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); | |
1032 | src_zone = PAGE_METADATA_GET_ZONE(page_meta); | |
1033 | #if VM_MAX_TAG_ZONES | |
1034 | if (__improbable(src_zone->tags)) { | |
1035 | tag = (ZTAG(src_zone, (vm_offset_t) addr)[0] >> 1); | |
1036 | } | |
1037 | #endif /* VM_MAX_TAG_ZONES */ | |
1038 | size = src_zone->elem_size; | |
1039 | } else { | |
1040 | #if CONFIG_GZALLOC | |
1041 | gzalloc_element_size(addr, NULL, &size); | |
1042 | #endif /* CONFIG_GZALLOC */ | |
1043 | } | |
1044 | *ptag = tag; | |
1045 | return size; | |
1046 | } | |
1047 | ||
1048 | #endif /* DEBUG || DEVELOPMENT */ | |
1049 | ||
39236c6e A |
1050 | /* |
1051 | * Zone checking helper function. | |
1052 | * A pointer that satisfies these conditions is OK to be a freelist next pointer | |
1053 | * A pointer that doesn't satisfy these conditions indicates corruption | |
1054 | */ | |
1055 | static inline boolean_t | |
1056 | is_sane_zone_ptr(zone_t zone, | |
1057 | vm_offset_t addr, | |
1058 | size_t obj_size) | |
1059 | { | |
1060 | /* Must be aligned to pointer boundary */ | |
1061 | if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0)) | |
1062 | return FALSE; | |
1063 | ||
1064 | /* Must be a kernel address */ | |
1065 | if (__improbable(!pmap_kernel_va(addr))) | |
1066 | return FALSE; | |
1067 | ||
1068 | /* Must be from zone map if the zone only uses memory from the zone_map */ | |
1069 | /* | |
1070 | * TODO: Remove the zone->collectable check when every | |
1071 | * zone using foreign memory is properly tagged with allows_foreign | |
1072 | */ | |
1073 | if (zone->collectable && !zone->allows_foreign) { | |
39236c6e A |
1074 | /* check if addr is from zone map */ |
1075 | if (addr >= zone_map_min_address && | |
1076 | (addr + obj_size - 1) < zone_map_max_address ) | |
1077 | return TRUE; | |
1078 | ||
1079 | return FALSE; | |
1080 | } | |
1081 | ||
1082 | return TRUE; | |
1083 | } | |
1084 | ||
1085 | static inline boolean_t | |
1086 | is_sane_zone_page_metadata(zone_t zone, | |
1087 | vm_offset_t page_meta) | |
1088 | { | |
1089 | /* NULL page metadata structures are invalid */ | |
1090 | if (page_meta == 0) | |
1091 | return FALSE; | |
1092 | return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata)); | |
1093 | } | |
1094 | ||
1095 | static inline boolean_t | |
1096 | is_sane_zone_element(zone_t zone, | |
1097 | vm_offset_t addr) | |
1098 | { | |
1099 | /* NULL is OK because it indicates the tail of the list */ | |
1100 | if (addr == 0) | |
1101 | return TRUE; | |
1102 | return is_sane_zone_ptr(zone, addr, zone->elem_size); | |
1103 | } | |
316670eb | 1104 | |
39236c6e A |
1105 | /* Someone wrote to freed memory. */ |
1106 | static inline void /* noreturn */ | |
1107 | zone_element_was_modified_panic(zone_t zone, | |
fe8ab488 | 1108 | vm_offset_t element, |
39236c6e A |
1109 | vm_offset_t found, |
1110 | vm_offset_t expected, | |
1111 | vm_offset_t offset) | |
1112 | { | |
fe8ab488 A |
1113 | panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p", |
1114 | zone->zone_name, | |
39236c6e A |
1115 | (void *) expected, |
1116 | (void *) found, | |
1117 | (void *) (expected ^ found), | |
1118 | (uint32_t) offset, | |
1119 | (uint32_t) zone->elem_size, | |
fe8ab488 A |
1120 | (void *) element, |
1121 | (void *) zp_nopoison_cookie, | |
1122 | (void *) zp_poisoned_cookie); | |
39236c6e A |
1123 | } |
1124 | ||
1125 | /* | |
1126 | * The primary and backup pointers don't match. | |
1127 | * Determine which one was likely the corrupted pointer, find out what it | |
1128 | * probably should have been, and panic. | |
1129 | * I would like to mark this as noreturn, but panic() isn't marked noreturn. | |
1130 | */ | |
1131 | static void /* noreturn */ | |
1132 | backup_ptr_mismatch_panic(zone_t zone, | |
fe8ab488 | 1133 | vm_offset_t element, |
39236c6e A |
1134 | vm_offset_t primary, |
1135 | vm_offset_t backup) | |
1136 | { | |
1137 | vm_offset_t likely_backup; | |
39037602 | 1138 | vm_offset_t likely_primary; |
39236c6e | 1139 | |
39037602 | 1140 | likely_primary = primary ^ zp_nopoison_cookie; |
39236c6e | 1141 | boolean_t sane_backup; |
39037602 | 1142 | boolean_t sane_primary = is_sane_zone_element(zone, likely_primary); |
39236c6e A |
1143 | boolean_t element_was_poisoned = (backup & 0x1) ? TRUE : FALSE; |
1144 | ||
fe8ab488 A |
1145 | #if defined(__LP64__) |
1146 | /* We can inspect the tag in the upper bits for additional confirmation */ | |
1147 | if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000) | |
1148 | element_was_poisoned = TRUE; | |
1149 | else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000) | |
1150 | element_was_poisoned = FALSE; | |
1151 | #endif | |
1152 | ||
39236c6e A |
1153 | if (element_was_poisoned) { |
1154 | likely_backup = backup ^ zp_poisoned_cookie; | |
1155 | sane_backup = is_sane_zone_element(zone, likely_backup); | |
316670eb | 1156 | } else { |
39236c6e A |
1157 | likely_backup = backup ^ zp_nopoison_cookie; |
1158 | sane_backup = is_sane_zone_element(zone, likely_backup); | |
316670eb | 1159 | } |
39236c6e A |
1160 | |
1161 | /* The primary is definitely the corrupted one */ | |
1162 | if (!sane_primary && sane_backup) | |
39037602 | 1163 | zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0); |
39236c6e A |
1164 | |
1165 | /* The backup is definitely the corrupted one */ | |
1166 | if (sane_primary && !sane_backup) | |
fe8ab488 | 1167 | zone_element_was_modified_panic(zone, element, backup, |
5ba3f43e | 1168 | (likely_primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)), |
39236c6e A |
1169 | zone->elem_size - sizeof(vm_offset_t)); |
1170 | ||
1171 | /* | |
1172 | * Not sure which is the corrupted one. | |
1173 | * It's less likely that the backup pointer was overwritten with | |
1174 | * ( (sane address) ^ (valid cookie) ), so we'll guess that the | |
1175 | * primary pointer has been overwritten with a sane but incorrect address. | |
1176 | */ | |
1177 | if (sane_primary && sane_backup) | |
5ba3f43e | 1178 | zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0); |
39236c6e A |
1179 | |
1180 | /* Neither are sane, so just guess. */ | |
5ba3f43e | 1181 | zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0); |
316670eb A |
1182 | } |
1183 | ||
39236c6e A |
1184 | /* |
1185 | * Adds the element to the head of the zone's free list | |
1186 | * Keeps a backup next-pointer at the end of the element | |
39236c6e A |
1187 | */ |
1188 | static inline void | |
1189 | free_to_zone(zone_t zone, | |
fe8ab488 A |
1190 | vm_offset_t element, |
1191 | boolean_t poison) | |
39236c6e A |
1192 | { |
1193 | vm_offset_t old_head; | |
1194 | struct zone_page_metadata *page_meta; | |
1195 | ||
1196 | vm_offset_t *primary = (vm_offset_t *) element; | |
1197 | vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary); | |
1198 | ||
39037602 A |
1199 | page_meta = get_zone_page_metadata((struct zone_free_element *)element, FALSE); |
1200 | assert(PAGE_METADATA_GET_ZONE(page_meta) == zone); | |
1201 | old_head = (vm_offset_t)page_metadata_get_freelist(page_meta); | |
39236c6e A |
1202 | |
1203 | #if MACH_ASSERT | |
1204 | if (__improbable(!is_sane_zone_element(zone, old_head))) | |
1205 | panic("zfree: invalid head pointer %p for freelist of zone %s\n", | |
1206 | (void *) old_head, zone->zone_name); | |
1207 | #endif | |
1208 | ||
1209 | if (__improbable(!is_sane_zone_element(zone, element))) | |
1210 | panic("zfree: freeing invalid pointer %p to zone %s\n", | |
1211 | (void *) element, zone->zone_name); | |
1212 | ||
39236c6e A |
1213 | /* |
1214 | * Always write a redundant next pointer | |
1215 | * So that it is more difficult to forge, xor it with a random cookie | |
1216 | * A poisoned element is indicated by using zp_poisoned_cookie | |
1217 | * instead of zp_nopoison_cookie | |
1218 | */ | |
1219 | ||
1220 | *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie); | |
1221 | ||
39037602 A |
1222 | /* |
1223 | * Insert this element at the head of the free list. We also xor the | |
1224 | * primary pointer with the zp_nopoison_cookie to make sure a free | |
1225 | * element does not provide the location of the next free element directly. | |
1226 | */ | |
1227 | *primary = old_head ^ zp_nopoison_cookie; | |
1228 | page_metadata_set_freelist(page_meta, (struct zone_free_element *)element); | |
1229 | page_meta->free_count++; | |
1230 | if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) { | |
1231 | if (page_meta->free_count == 1) { | |
1232 | /* first foreign element freed on page, move from all_used */ | |
1233 | re_queue_tail(&zone->pages.any_free_foreign, &(page_meta->pages)); | |
1234 | } else { | |
1235 | /* no other list transitions */ | |
39236c6e | 1236 | } |
39037602 A |
1237 | } else if (page_meta->free_count == get_metadata_alloc_count(page_meta)) { |
1238 | /* whether the page was on the intermediate or all_used, queue, move it to free */ | |
1239 | re_queue_tail(&zone->pages.all_free, &(page_meta->pages)); | |
1240 | zone->count_all_free_pages += page_meta->page_count; | |
1241 | } else if (page_meta->free_count == 1) { | |
1242 | /* first free element on page, move from all_used */ | |
1243 | re_queue_tail(&zone->pages.intermediate, &(page_meta->pages)); | |
39236c6e A |
1244 | } |
1245 | zone->count--; | |
1246 | zone->countfree++; | |
5ba3f43e A |
1247 | |
1248 | #if KASAN_ZALLOC | |
1249 | kasan_poison_range(element, zone->elem_size, ASAN_HEAP_FREED); | |
1250 | #endif | |
39236c6e A |
1251 | } |
1252 | ||
1253 | ||
1254 | /* | |
1255 | * Removes an element from the zone's free list, returning 0 if the free list is empty. | |
1256 | * Verifies that the next-pointer and backup next-pointer are intact, | |
1257 | * and verifies that a poisoned element hasn't been modified. | |
1258 | */ | |
1259 | static inline vm_offset_t | |
fe8ab488 | 1260 | try_alloc_from_zone(zone_t zone, |
5ba3f43e | 1261 | vm_tag_t tag __unused, |
fe8ab488 | 1262 | boolean_t* check_poison) |
39236c6e A |
1263 | { |
1264 | vm_offset_t element; | |
1265 | struct zone_page_metadata *page_meta; | |
1266 | ||
fe8ab488 A |
1267 | *check_poison = FALSE; |
1268 | ||
39236c6e | 1269 | /* if zone is empty, bail */ |
39037602 A |
1270 | if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign)) |
1271 | page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign); | |
1272 | else if (!queue_empty(&zone->pages.intermediate)) | |
1273 | page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate); | |
1274 | else if (!queue_empty(&zone->pages.all_free)) { | |
1275 | page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free); | |
1276 | assert(zone->count_all_free_pages >= page_meta->page_count); | |
1277 | zone->count_all_free_pages -= page_meta->page_count; | |
39236c6e | 1278 | } else { |
39037602 | 1279 | return 0; |
39236c6e | 1280 | } |
39037602 A |
1281 | /* Check if page_meta passes is_sane_zone_element */ |
1282 | if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta))) | |
1283 | panic("zalloc: invalid metadata structure %p for freelist of zone %s\n", | |
1284 | (void *) page_meta, zone->zone_name); | |
1285 | assert(PAGE_METADATA_GET_ZONE(page_meta) == zone); | |
1286 | element = (vm_offset_t)page_metadata_get_freelist(page_meta); | |
39236c6e | 1287 | |
39037602 | 1288 | if (__improbable(!is_sane_zone_ptr(zone, element, zone->elem_size))) |
39236c6e A |
1289 | panic("zfree: invalid head pointer %p for freelist of zone %s\n", |
1290 | (void *) element, zone->zone_name); | |
39236c6e A |
1291 | |
1292 | vm_offset_t *primary = (vm_offset_t *) element; | |
1293 | vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary); | |
1294 | ||
39037602 A |
1295 | /* |
1296 | * Since the primary next pointer is xor'ed with zp_nopoison_cookie | |
1297 | * for obfuscation, retrieve the original value back | |
1298 | */ | |
1299 | vm_offset_t next_element = *primary ^ zp_nopoison_cookie; | |
1300 | vm_offset_t next_element_primary = *primary; | |
39236c6e A |
1301 | vm_offset_t next_element_backup = *backup; |
1302 | ||
1303 | /* | |
1304 | * backup_ptr_mismatch_panic will determine what next_element | |
1305 | * should have been, and print it appropriately | |
1306 | */ | |
1307 | if (__improbable(!is_sane_zone_element(zone, next_element))) | |
39037602 | 1308 | backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup); |
39236c6e A |
1309 | |
1310 | /* Check the backup pointer for the regular cookie */ | |
1311 | if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) { | |
1312 | ||
1313 | /* Check for the poisoned cookie instead */ | |
1314 | if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie))) | |
1315 | /* Neither cookie is valid, corruption has occurred */ | |
39037602 | 1316 | backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup); |
39236c6e A |
1317 | |
1318 | /* | |
fe8ab488 | 1319 | * Element was marked as poisoned, so check its integrity before using it. |
39236c6e | 1320 | */ |
fe8ab488 | 1321 | *check_poison = TRUE; |
39236c6e A |
1322 | } |
1323 | ||
39037602 A |
1324 | /* Make sure the page_meta is at the correct offset from the start of page */ |
1325 | if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element, FALSE))) | |
1326 | panic("zalloc: Incorrect metadata %p found in zone %s page queue. Expected metadata: %p\n", | |
1327 | page_meta, zone->zone_name, get_zone_page_metadata((struct zone_free_element *)element, FALSE)); | |
1328 | ||
1329 | /* Make sure next_element belongs to the same page as page_meta */ | |
1330 | if (next_element) { | |
1331 | if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element, FALSE))) | |
1332 | panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n", | |
1333 | (void *)next_element, (void *)element, zone->zone_name); | |
39236c6e A |
1334 | } |
1335 | ||
39236c6e | 1336 | /* Remove this element from the free list */ |
39037602 A |
1337 | page_metadata_set_freelist(page_meta, (struct zone_free_element *)next_element); |
1338 | page_meta->free_count--; | |
39236c6e | 1339 | |
39037602 A |
1340 | if (page_meta->free_count == 0) { |
1341 | /* move to all used */ | |
1342 | re_queue_tail(&zone->pages.all_used, &(page_meta->pages)); | |
1343 | } else { | |
1344 | if (!zone->allows_foreign || from_zone_map(element, zone->elem_size)) { | |
1345 | if (get_metadata_alloc_count(page_meta) == page_meta->free_count + 1) { | |
1346 | /* remove from free, move to intermediate */ | |
1347 | re_queue_tail(&zone->pages.intermediate, &(page_meta->pages)); | |
316670eb | 1348 | } |
316670eb | 1349 | } |
316670eb | 1350 | } |
39236c6e A |
1351 | zone->countfree--; |
1352 | zone->count++; | |
1353 | zone->sum_count++; | |
1354 | ||
5ba3f43e A |
1355 | #if VM_MAX_TAG_ZONES |
1356 | if (__improbable(zone->tags)) { | |
1357 | // set the tag with b0 clear so the block remains inuse | |
1358 | ZTAG(zone, element)[0] = (tag << 1); | |
1359 | } | |
1360 | #endif /* VM_MAX_TAG_ZONES */ | |
1361 | ||
1362 | ||
1363 | #if KASAN_ZALLOC | |
1364 | kasan_poison_range(element, zone->elem_size, ASAN_VALID); | |
1365 | #endif | |
1366 | ||
39236c6e | 1367 | return element; |
316670eb | 1368 | } |
1c79356b | 1369 | |
39236c6e A |
1370 | /* |
1371 | * End of zone poisoning | |
1372 | */ | |
1373 | ||
6d2010ae A |
1374 | /* |
1375 | * Zone info options | |
1376 | */ | |
39037602 | 1377 | #define ZINFO_SLOTS MAX_ZONES /* for now */ |
1c79356b | 1378 | |
39236c6e A |
1379 | zone_t zone_find_largest(void); |
1380 | ||
1381 | /* | |
1382 | * Async allocation of zones | |
1383 | * This mechanism allows for bootstrapping an empty zone which is setup with | |
1384 | * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call | |
1385 | * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free. | |
1386 | * This will prime the zone for the next use. | |
1387 | * | |
1388 | * Currently the thread_callout function (zalloc_async) will loop through all zones | |
1389 | * looking for any zone with async_pending set and do the work for it. | |
1390 | * | |
1391 | * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call, | |
1392 | * then zalloc_noblock to an empty zone may succeed. | |
1393 | */ | |
0b4e3aa0 A |
1394 | void zalloc_async( |
1395 | thread_call_param_t p0, | |
1396 | thread_call_param_t p1); | |
1397 | ||
39236c6e | 1398 | static thread_call_data_t call_async_alloc; |
0b4e3aa0 | 1399 | |
3e170ce0 A |
1400 | /* |
1401 | * Align elements that use the zone page list to 32 byte boundaries. | |
1402 | */ | |
1403 | #define ZONE_ELEMENT_ALIGNMENT 32 | |
1c79356b | 1404 | |
9bccf70c A |
1405 | #define zone_wakeup(zone) thread_wakeup((event_t)(zone)) |
1406 | #define zone_sleep(zone) \ | |
5ba3f43e | 1407 | (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN_ALWAYS, (event_t)(zone), THREAD_UNINT); |
2d21ac55 | 1408 | |
39236c6e A |
1409 | /* |
1410 | * The zone_locks_grp allows for collecting lock statistics. | |
1411 | * All locks are associated to this group in zinit. | |
1412 | * Look at tools/lockstat for debugging lock contention. | |
1413 | */ | |
1414 | ||
1415 | lck_grp_t zone_locks_grp; | |
1416 | lck_grp_attr_t zone_locks_grp_attr; | |
9bccf70c | 1417 | |
1c79356b A |
1418 | #define lock_zone_init(zone) \ |
1419 | MACRO_BEGIN \ | |
2d21ac55 A |
1420 | lck_attr_setdefault(&(zone)->lock_attr); \ |
1421 | lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \ | |
39236c6e | 1422 | &zone_locks_grp, &(zone)->lock_attr); \ |
1c79356b A |
1423 | MACRO_END |
1424 | ||
b0d623f7 | 1425 | #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock) |
1c79356b | 1426 | |
1c79356b A |
1427 | /* |
1428 | * Exclude more than one concurrent garbage collection | |
1429 | */ | |
39236c6e | 1430 | decl_lck_mtx_data(, zone_gc_lock) |
b0d623f7 | 1431 | |
39236c6e A |
1432 | lck_attr_t zone_gc_lck_attr; |
1433 | lck_grp_t zone_gc_lck_grp; | |
1434 | lck_grp_attr_t zone_gc_lck_grp_attr; | |
1435 | lck_mtx_ext_t zone_gc_lck_ext; | |
1c79356b | 1436 | |
0b4e3aa0 | 1437 | boolean_t zone_gc_allowed = TRUE; |
c910b4d9 | 1438 | boolean_t panic_include_zprint = FALSE; |
0b4e3aa0 | 1439 | |
5ba3f43e | 1440 | mach_memory_info_t *panic_kext_memory_info = NULL; |
3e170ce0 A |
1441 | vm_size_t panic_kext_memory_size = 0; |
1442 | ||
39236c6e A |
1443 | #define ZALLOC_DEBUG_ZONEGC 0x00000001 |
1444 | #define ZALLOC_DEBUG_ZCRAM 0x00000002 | |
1445 | uint32_t zalloc_debug = 0; | |
1446 | ||
c910b4d9 A |
1447 | /* |
1448 | * Zone leak debugging code | |
1449 | * | |
1450 | * When enabled, this code keeps a log to track allocations to a particular zone that have not | |
1451 | * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated | |
1452 | * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is | |
1453 | * off by default. | |
1454 | * | |
1455 | * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone> | |
1456 | * is the name of the zone you wish to log. | |
1457 | * | |
1458 | * This code only tracks one zone, so you need to identify which one is leaking first. | |
1459 | * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone | |
1460 | * garbage collector. Note that the zone name printed in the panic message is not necessarily the one | |
1461 | * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This | |
1462 | * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The | |
1463 | * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs. | |
1464 | * See the help in the kgmacros for usage info. | |
1465 | * | |
1466 | * | |
1467 | * Zone corruption logging | |
1468 | * | |
1469 | * Logging can also be used to help identify the source of a zone corruption. First, identify the zone | |
1470 | * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction | |
1471 | * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the | |
1472 | * corruption is detected, examining the log will show you the stack traces of the callers who last allocated | |
1473 | * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been | |
1474 | * corrupted to examine its history. This should lead to the source of the corruption. | |
1475 | */ | |
1476 | ||
39037602 | 1477 | static boolean_t log_records_init = FALSE; |
c910b4d9 A |
1478 | static int log_records; /* size of the log, expressed in number of records */ |
1479 | ||
5ba3f43e | 1480 | #define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */ |
39037602 A |
1481 | |
1482 | static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING; | |
1483 | static int num_zones_logged = 0; | |
1484 | ||
c910b4d9 A |
1485 | static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */ |
1486 | ||
39236c6e A |
1487 | /* Log allocations and frees to help debug a zone element corruption */ |
1488 | boolean_t corruption_debug_flag = FALSE; /* enabled by "-zc" boot-arg */ | |
39037602 A |
1489 | /* Making pointer scanning leaks detection possible for all zones */ |
1490 | ||
1491 | #if DEBUG || DEVELOPMENT | |
1492 | boolean_t leak_scan_debug_flag = FALSE; /* enabled by "-zl" boot-arg */ | |
1493 | #endif /* DEBUG || DEVELOPMENT */ | |
1494 | ||
39236c6e | 1495 | |
c910b4d9 A |
1496 | /* |
1497 | * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to | |
39037602 A |
1498 | * the number of records you want in the log. For example, "zrecs=10" sets it to 10 records. Since this |
1499 | * is the number of stacks suspected of leaking, we don't need many records. | |
c910b4d9 | 1500 | */ |
316670eb | 1501 | |
6d2010ae | 1502 | #if defined(__LP64__) |
39037602 | 1503 | #define ZRECORDS_MAX 2560 /* Max records allowed in the log */ |
6d2010ae | 1504 | #else |
39037602 | 1505 | #define ZRECORDS_MAX 1536 /* Max records allowed in the log */ |
6d2010ae | 1506 | #endif |
39037602 | 1507 | #define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */ |
0b4e3aa0 | 1508 | |
c910b4d9 | 1509 | /* |
39236c6e A |
1510 | * Each record in the log contains a pointer to the zone element it refers to, |
1511 | * and a small array to hold the pc's from the stack trace. A | |
c910b4d9 A |
1512 | * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging, |
1513 | * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees. | |
1514 | * If the log fills, old records are replaced as if it were a circular buffer. | |
1515 | */ | |
1516 | ||
c910b4d9 A |
1517 | |
1518 | /* | |
39236c6e | 1519 | * Opcodes for the btlog operation field: |
c910b4d9 A |
1520 | */ |
1521 | ||
1522 | #define ZOP_ALLOC 1 | |
1523 | #define ZOP_FREE 0 | |
1524 | ||
c910b4d9 A |
1525 | /* |
1526 | * Decide if we want to log this zone by doing a string compare between a zone name and the name | |
1527 | * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not | |
1528 | * possible to include spaces in strings passed in via the boot-args, a period in the logname will | |
1529 | * match a space in the zone name. | |
1530 | */ | |
1531 | ||
5ba3f43e A |
1532 | int |
1533 | track_this_zone(const char *zonename, const char *logname) | |
c910b4d9 A |
1534 | { |
1535 | int len; | |
1536 | const char *zc = zonename; | |
1537 | const char *lc = logname; | |
1538 | ||
1539 | /* | |
1540 | * Compare the strings. We bound the compare by MAX_ZONE_NAME. | |
1541 | */ | |
1542 | ||
1543 | for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) { | |
1544 | ||
1545 | /* | |
1546 | * If the current characters don't match, check for a space in | |
1547 | * in the zone name and a corresponding period in the log name. | |
1548 | * If that's not there, then the strings don't match. | |
1549 | */ | |
1550 | ||
1551 | if (*zc != *lc && !(*zc == ' ' && *lc == '.')) | |
1552 | break; | |
1553 | ||
1554 | /* | |
1555 | * The strings are equal so far. If we're at the end, then it's a match. | |
1556 | */ | |
1557 | ||
1558 | if (*zc == '\0') | |
1559 | return TRUE; | |
1560 | } | |
1561 | ||
1562 | return FALSE; | |
1563 | } | |
1564 | ||
1565 | ||
1566 | /* | |
1567 | * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and | |
1568 | * the buffer for the records has been allocated. | |
1569 | */ | |
1570 | ||
39037602 | 1571 | #define DO_LOGGING(z) (z->zone_logging == TRUE && z->zlog_btlog) |
c910b4d9 | 1572 | |
39236c6e | 1573 | extern boolean_t kmem_alloc_ready; |
c910b4d9 | 1574 | |
6d2010ae A |
1575 | #if CONFIG_ZLEAKS |
1576 | #pragma mark - | |
1577 | #pragma mark Zone Leak Detection | |
1578 | ||
1579 | /* | |
1580 | * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding | |
316670eb | 1581 | * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a |
6d2010ae A |
1582 | * backtrace. Every free, we examine the table and determine if the allocation was being tracked, |
1583 | * and stop tracking it if it was being tracked. | |
1584 | * | |
1585 | * We track the allocations in the zallocations hash table, which stores the address that was returned from | |
1586 | * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which | |
1587 | * stores the backtrace associated with that allocation. This provides uniquing for the relatively large | |
1588 | * backtraces - we don't store them more than once. | |
1589 | * | |
1590 | * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up | |
1591 | * a large amount of virtual space. | |
1592 | */ | |
1593 | #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */ | |
1594 | #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */ | |
1595 | #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */ | |
1596 | #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */ | |
1597 | uint32_t zleak_state = 0; /* State of collection, as above */ | |
1598 | ||
1599 | boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */ | |
1600 | vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */ | |
1601 | vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */ | |
316670eb | 1602 | unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */ |
6d2010ae A |
1603 | |
1604 | /* | |
1605 | * Counters for allocation statistics. | |
1606 | */ | |
1607 | ||
1608 | /* Times two active records want to occupy the same spot */ | |
1609 | unsigned int z_alloc_collisions = 0; | |
1610 | unsigned int z_trace_collisions = 0; | |
1611 | ||
1612 | /* Times a new record lands on a spot previously occupied by a freed allocation */ | |
1613 | unsigned int z_alloc_overwrites = 0; | |
1614 | unsigned int z_trace_overwrites = 0; | |
1615 | ||
1616 | /* Times a new alloc or trace is put into the hash table */ | |
1617 | unsigned int z_alloc_recorded = 0; | |
1618 | unsigned int z_trace_recorded = 0; | |
1619 | ||
1620 | /* Times zleak_log returned false due to not being able to acquire the lock */ | |
1621 | unsigned int z_total_conflicts = 0; | |
1622 | ||
1623 | ||
1624 | #pragma mark struct zallocation | |
1625 | /* | |
1626 | * Structure for keeping track of an allocation | |
1627 | * An allocation bucket is in use if its element is not NULL | |
1628 | */ | |
1629 | struct zallocation { | |
1630 | uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */ | |
1631 | vm_size_t za_size; /* how much memory did this allocation take up? */ | |
1632 | uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */ | |
1633 | /* TODO: #if this out */ | |
1634 | uint32_t za_hit_count; /* for determining effectiveness of hash function */ | |
1635 | }; | |
1636 | ||
1637 | /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */ | |
316670eb A |
1638 | uint32_t zleak_alloc_buckets = CONFIG_ZLEAK_ALLOCATION_MAP_NUM; |
1639 | uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM; | |
6d2010ae A |
1640 | |
1641 | vm_size_t zleak_max_zonemap_size; | |
1642 | ||
1643 | /* Hashmaps of allocations and their corresponding traces */ | |
1644 | static struct zallocation* zallocations; | |
1645 | static struct ztrace* ztraces; | |
1646 | ||
1647 | /* not static so that panic can see this, see kern/debug.c */ | |
1648 | struct ztrace* top_ztrace; | |
1649 | ||
1650 | /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */ | |
316670eb | 1651 | static lck_spin_t zleak_lock; |
6d2010ae A |
1652 | static lck_attr_t zleak_lock_attr; |
1653 | static lck_grp_t zleak_lock_grp; | |
1654 | static lck_grp_attr_t zleak_lock_grp_attr; | |
1655 | ||
1656 | /* | |
1657 | * Initializes the zone leak monitor. Called from zone_init() | |
1658 | */ | |
1659 | static void | |
1660 | zleak_init(vm_size_t max_zonemap_size) | |
1661 | { | |
1662 | char scratch_buf[16]; | |
1663 | boolean_t zleak_enable_flag = FALSE; | |
1664 | ||
1665 | zleak_max_zonemap_size = max_zonemap_size; | |
1666 | zleak_global_tracking_threshold = max_zonemap_size / 2; | |
1667 | zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8; | |
1668 | ||
5ba3f43e A |
1669 | #if CONFIG_EMBEDDED |
1670 | if (PE_parse_boot_argn("-zleakon", scratch_buf, sizeof(scratch_buf))) { | |
1671 | zleak_enable_flag = TRUE; | |
1672 | printf("zone leak detection enabled\n"); | |
1673 | } else { | |
1674 | zleak_enable_flag = FALSE; | |
1675 | printf("zone leak detection disabled\n"); | |
1676 | } | |
1677 | #else /* CONFIG_EMBEDDED */ | |
6d2010ae A |
1678 | /* -zleakoff (flag to disable zone leak monitor) */ |
1679 | if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) { | |
1680 | zleak_enable_flag = FALSE; | |
1681 | printf("zone leak detection disabled\n"); | |
1682 | } else { | |
1683 | zleak_enable_flag = TRUE; | |
1684 | printf("zone leak detection enabled\n"); | |
1685 | } | |
5ba3f43e | 1686 | #endif /* CONFIG_EMBEDDED */ |
6d2010ae A |
1687 | |
1688 | /* zfactor=XXXX (override how often to sample the zone allocator) */ | |
316670eb | 1689 | if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) { |
39236c6e | 1690 | printf("Zone leak factor override: %u\n", zleak_sample_factor); |
6d2010ae | 1691 | } |
316670eb | 1692 | |
6d2010ae A |
1693 | /* zleak-allocs=XXXX (override number of buckets in zallocations) */ |
1694 | if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) { | |
39236c6e | 1695 | printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets); |
6d2010ae A |
1696 | /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */ |
1697 | if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets-1))) { | |
39236c6e | 1698 | printf("Override isn't a power of two, bad things might happen!\n"); |
6d2010ae A |
1699 | } |
1700 | } | |
1701 | ||
1702 | /* zleak-traces=XXXX (override number of buckets in ztraces) */ | |
1703 | if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) { | |
39236c6e | 1704 | printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets); |
6d2010ae A |
1705 | /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */ |
1706 | if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets-1))) { | |
39236c6e | 1707 | printf("Override isn't a power of two, bad things might happen!\n"); |
6d2010ae A |
1708 | } |
1709 | } | |
1710 | ||
1711 | /* allocate the zleak_lock */ | |
1712 | lck_grp_attr_setdefault(&zleak_lock_grp_attr); | |
1713 | lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr); | |
1714 | lck_attr_setdefault(&zleak_lock_attr); | |
316670eb | 1715 | lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr); |
6d2010ae A |
1716 | |
1717 | if (zleak_enable_flag) { | |
1718 | zleak_state = ZLEAK_STATE_ENABLED; | |
1719 | } | |
1720 | } | |
1721 | ||
1722 | #if CONFIG_ZLEAKS | |
1723 | ||
1724 | /* | |
1725 | * Support for kern.zleak.active sysctl - a simplified | |
316670eb | 1726 | * version of the zleak_state variable. |
6d2010ae A |
1727 | */ |
1728 | int | |
1729 | get_zleak_state(void) | |
1730 | { | |
1731 | if (zleak_state & ZLEAK_STATE_FAILED) | |
1732 | return (-1); | |
1733 | if (zleak_state & ZLEAK_STATE_ACTIVE) | |
1734 | return (1); | |
1735 | return (0); | |
1736 | } | |
1737 | ||
1738 | #endif | |
1739 | ||
1740 | ||
1741 | kern_return_t | |
1742 | zleak_activate(void) | |
1743 | { | |
1744 | kern_return_t retval; | |
1745 | vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation); | |
1746 | vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace); | |
1747 | void *allocations_ptr = NULL; | |
1748 | void *traces_ptr = NULL; | |
1749 | ||
1750 | /* Only one thread attempts to activate at a time */ | |
1751 | if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) { | |
1752 | return KERN_SUCCESS; | |
1753 | } | |
1754 | ||
1755 | /* Indicate that we're doing the setup */ | |
316670eb | 1756 | lck_spin_lock(&zleak_lock); |
6d2010ae | 1757 | if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) { |
316670eb | 1758 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1759 | return KERN_SUCCESS; |
1760 | } | |
1761 | ||
1762 | zleak_state |= ZLEAK_STATE_ACTIVATING; | |
316670eb | 1763 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1764 | |
1765 | /* Allocate and zero tables */ | |
3e170ce0 | 1766 | retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size, VM_KERN_MEMORY_OSFMK); |
6d2010ae A |
1767 | if (retval != KERN_SUCCESS) { |
1768 | goto fail; | |
1769 | } | |
1770 | ||
3e170ce0 | 1771 | retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size, VM_KERN_MEMORY_OSFMK); |
6d2010ae A |
1772 | if (retval != KERN_SUCCESS) { |
1773 | goto fail; | |
1774 | } | |
1775 | ||
1776 | bzero(allocations_ptr, z_alloc_size); | |
1777 | bzero(traces_ptr, z_trace_size); | |
1778 | ||
1779 | /* Everything's set. Install tables, mark active. */ | |
1780 | zallocations = allocations_ptr; | |
1781 | ztraces = traces_ptr; | |
1782 | ||
1783 | /* | |
1784 | * Initialize the top_ztrace to the first entry in ztraces, | |
1785 | * so we don't have to check for null in zleak_log | |
1786 | */ | |
1787 | top_ztrace = &ztraces[0]; | |
1788 | ||
1789 | /* | |
1790 | * Note that we do need a barrier between installing | |
1791 | * the tables and setting the active flag, because the zfree() | |
1792 | * path accesses the table without a lock if we're active. | |
1793 | */ | |
316670eb | 1794 | lck_spin_lock(&zleak_lock); |
6d2010ae A |
1795 | zleak_state |= ZLEAK_STATE_ACTIVE; |
1796 | zleak_state &= ~ZLEAK_STATE_ACTIVATING; | |
316670eb | 1797 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1798 | |
1799 | return 0; | |
1800 | ||
1801 | fail: | |
1802 | /* | |
1803 | * If we fail to allocate memory, don't further tax | |
1804 | * the system by trying again. | |
1805 | */ | |
316670eb | 1806 | lck_spin_lock(&zleak_lock); |
6d2010ae A |
1807 | zleak_state |= ZLEAK_STATE_FAILED; |
1808 | zleak_state &= ~ZLEAK_STATE_ACTIVATING; | |
316670eb | 1809 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1810 | |
1811 | if (allocations_ptr != NULL) { | |
1812 | kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size); | |
1813 | } | |
1814 | ||
1815 | if (traces_ptr != NULL) { | |
1816 | kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size); | |
1817 | } | |
1818 | ||
1819 | return retval; | |
1820 | } | |
1821 | ||
1822 | /* | |
1823 | * TODO: What about allocations that never get deallocated, | |
1824 | * especially ones with unique backtraces? Should we wait to record | |
1825 | * until after boot has completed? | |
1826 | * (How many persistent zallocs are there?) | |
1827 | */ | |
1828 | ||
1829 | /* | |
1830 | * This function records the allocation in the allocations table, | |
1831 | * and stores the associated backtrace in the traces table | |
1832 | * (or just increments the refcount if the trace is already recorded) | |
1833 | * If the allocation slot is in use, the old allocation is replaced with the new allocation, and | |
1834 | * the associated trace's refcount is decremented. | |
1835 | * If the trace slot is in use, it returns. | |
1836 | * The refcount is incremented by the amount of memory the allocation consumes. | |
1837 | * The return value indicates whether to try again next time. | |
1838 | */ | |
1839 | static boolean_t | |
1840 | zleak_log(uintptr_t* bt, | |
1841 | uintptr_t addr, | |
1842 | uint32_t depth, | |
1843 | vm_size_t allocation_size) | |
1844 | { | |
1845 | /* Quit if there's someone else modifying the hash tables */ | |
316670eb | 1846 | if (!lck_spin_try_lock(&zleak_lock)) { |
6d2010ae A |
1847 | z_total_conflicts++; |
1848 | return FALSE; | |
1849 | } | |
1850 | ||
1851 | struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)]; | |
1852 | ||
1853 | uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets); | |
1854 | struct ztrace* trace = &ztraces[trace_index]; | |
1855 | ||
1856 | allocation->za_hit_count++; | |
1857 | trace->zt_hit_count++; | |
1858 | ||
1859 | /* | |
1860 | * If the allocation bucket we want to be in is occupied, and if the occupier | |
1861 | * has the same trace as us, just bail. | |
1862 | */ | |
1863 | if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) { | |
1864 | z_alloc_collisions++; | |
1865 | ||
316670eb | 1866 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1867 | return TRUE; |
1868 | } | |
1869 | ||
1870 | /* STEP 1: Store the backtrace in the traces array. */ | |
1871 | /* A size of zero indicates that the trace bucket is free. */ | |
1872 | ||
1873 | if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) { | |
1874 | /* | |
1875 | * Different unique trace with same hash! | |
1876 | * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated | |
1877 | * and get out of the way for later chances | |
1878 | */ | |
1879 | trace->zt_collisions++; | |
1880 | z_trace_collisions++; | |
1881 | ||
316670eb | 1882 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1883 | return TRUE; |
1884 | } else if (trace->zt_size > 0) { | |
1885 | /* Same trace, already added, so increment refcount */ | |
1886 | trace->zt_size += allocation_size; | |
1887 | } else { | |
1888 | /* Found an unused trace bucket, record the trace here! */ | |
1889 | if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */ | |
1890 | z_trace_overwrites++; | |
1891 | ||
1892 | z_trace_recorded++; | |
1893 | trace->zt_size = allocation_size; | |
1894 | memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) ); | |
1895 | ||
1896 | trace->zt_depth = depth; | |
1897 | trace->zt_collisions = 0; | |
1898 | } | |
1899 | ||
1900 | /* STEP 2: Store the allocation record in the allocations array. */ | |
1901 | ||
1902 | if (allocation->za_element != (uintptr_t) 0) { | |
1903 | /* | |
1904 | * Straight up replace any allocation record that was there. We don't want to do the work | |
1905 | * to preserve the allocation entries that were there, because we only record a subset of the | |
1906 | * allocations anyways. | |
1907 | */ | |
1908 | ||
1909 | z_alloc_collisions++; | |
1910 | ||
1911 | struct ztrace* associated_trace = &ztraces[allocation->za_trace_index]; | |
1912 | /* Knock off old allocation's size, not the new allocation */ | |
1913 | associated_trace->zt_size -= allocation->za_size; | |
1914 | } else if (allocation->za_trace_index != 0) { | |
1915 | /* Slot previously used but not currently in use */ | |
1916 | z_alloc_overwrites++; | |
1917 | } | |
1918 | ||
1919 | allocation->za_element = addr; | |
1920 | allocation->za_trace_index = trace_index; | |
1921 | allocation->za_size = allocation_size; | |
1922 | ||
1923 | z_alloc_recorded++; | |
1924 | ||
1925 | if (top_ztrace->zt_size < trace->zt_size) | |
1926 | top_ztrace = trace; | |
1927 | ||
316670eb | 1928 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1929 | return TRUE; |
1930 | } | |
1931 | ||
1932 | /* | |
1933 | * Free the allocation record and release the stacktrace. | |
1934 | * This should be as fast as possible because it will be called for every free. | |
1935 | */ | |
1936 | static void | |
1937 | zleak_free(uintptr_t addr, | |
1938 | vm_size_t allocation_size) | |
1939 | { | |
1940 | if (addr == (uintptr_t) 0) | |
1941 | return; | |
1942 | ||
1943 | struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)]; | |
1944 | ||
1945 | /* Double-checked locking: check to find out if we're interested, lock, check to make | |
1946 | * sure it hasn't changed, then modify it, and release the lock. | |
1947 | */ | |
c910b4d9 | 1948 | |
6d2010ae A |
1949 | if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) { |
1950 | /* if the allocation was the one, grab the lock, check again, then delete it */ | |
316670eb | 1951 | lck_spin_lock(&zleak_lock); |
6d2010ae A |
1952 | |
1953 | if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) { | |
1954 | struct ztrace *trace; | |
1955 | ||
1956 | /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */ | |
1957 | if (allocation->za_size != allocation_size) { | |
1958 | panic("Freeing as size %lu memory that was allocated with size %lu\n", | |
1959 | (uintptr_t)allocation_size, (uintptr_t)allocation->za_size); | |
1960 | } | |
1961 | ||
1962 | trace = &ztraces[allocation->za_trace_index]; | |
1963 | ||
1964 | /* size of 0 indicates trace bucket is unused */ | |
1965 | if (trace->zt_size > 0) { | |
1966 | trace->zt_size -= allocation_size; | |
1967 | } | |
1968 | ||
1969 | /* A NULL element means the allocation bucket is unused */ | |
1970 | allocation->za_element = 0; | |
1971 | } | |
316670eb | 1972 | lck_spin_unlock(&zleak_lock); |
6d2010ae A |
1973 | } |
1974 | } | |
1975 | ||
1976 | #endif /* CONFIG_ZLEAKS */ | |
1977 | ||
1978 | /* These functions outside of CONFIG_ZLEAKS because they are also used in | |
1979 | * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix. | |
1980 | */ | |
1981 | ||
6d2010ae A |
1982 | /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */ |
1983 | uintptr_t | |
1984 | hash_mix(uintptr_t x) | |
1985 | { | |
1986 | #ifndef __LP64__ | |
1987 | x += ~(x << 15); | |
1988 | x ^= (x >> 10); | |
1989 | x += (x << 3 ); | |
1990 | x ^= (x >> 6 ); | |
1991 | x += ~(x << 11); | |
1992 | x ^= (x >> 16); | |
1993 | #else | |
1994 | x += ~(x << 32); | |
1995 | x ^= (x >> 22); | |
1996 | x += ~(x << 13); | |
1997 | x ^= (x >> 8 ); | |
1998 | x += (x << 3 ); | |
1999 | x ^= (x >> 15); | |
2000 | x += ~(x << 27); | |
2001 | x ^= (x >> 31); | |
2002 | #endif | |
2003 | return x; | |
2004 | } | |
2005 | ||
2006 | uint32_t | |
2007 | hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size) | |
2008 | { | |
2009 | ||
2010 | uintptr_t hash = 0; | |
2011 | uintptr_t mask = max_size - 1; | |
2012 | ||
316670eb A |
2013 | while (depth) { |
2014 | hash += bt[--depth]; | |
6d2010ae A |
2015 | } |
2016 | ||
2017 | hash = hash_mix(hash) & mask; | |
2018 | ||
2019 | assert(hash < max_size); | |
2020 | ||
2021 | return (uint32_t) hash; | |
2022 | } | |
2023 | ||
2024 | /* | |
2025 | * TODO: Determine how well distributed this is | |
2026 | * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask | |
2027 | */ | |
2028 | uint32_t | |
2029 | hashaddr(uintptr_t pt, uint32_t max_size) | |
2030 | { | |
2031 | uintptr_t hash = 0; | |
2032 | uintptr_t mask = max_size - 1; | |
2033 | ||
2034 | hash = hash_mix(pt) & mask; | |
2035 | ||
2036 | assert(hash < max_size); | |
2037 | ||
2038 | return (uint32_t) hash; | |
2039 | } | |
2040 | ||
2041 | /* End of all leak-detection code */ | |
2042 | #pragma mark - | |
2043 | ||
39037602 A |
2044 | #define ZONE_MAX_ALLOC_SIZE (32 * 1024) |
2045 | #define ZONE_ALLOC_FRAG_PERCENT(alloc_size, ele_size) (((alloc_size % ele_size) * 100) / alloc_size) | |
2046 | ||
5ba3f43e A |
2047 | /* Used to manage copying in of new zone names */ |
2048 | static vm_offset_t zone_names_start; | |
2049 | static vm_offset_t zone_names_next; | |
2050 | ||
2051 | static vm_size_t | |
2052 | compute_element_size(vm_size_t requested_size) | |
2053 | { | |
2054 | vm_size_t element_size = requested_size; | |
2055 | ||
2056 | /* Zone elements must fit both a next pointer and a backup pointer */ | |
2057 | vm_size_t minimum_element_size = sizeof(vm_offset_t) * 2; | |
2058 | if (element_size < minimum_element_size) | |
2059 | element_size = minimum_element_size; | |
2060 | ||
2061 | /* | |
2062 | * Round element size to a multiple of sizeof(pointer) | |
2063 | * This also enforces that allocations will be aligned on pointer boundaries | |
2064 | */ | |
2065 | element_size = ((element_size-1) + sizeof(vm_offset_t)) - | |
2066 | ((element_size-1) % sizeof(vm_offset_t)); | |
2067 | ||
2068 | return element_size; | |
2069 | } | |
2070 | ||
1c79356b A |
2071 | /* |
2072 | * zinit initializes a new zone. The zone data structures themselves | |
2073 | * are stored in a zone, which is initially a static structure that | |
2074 | * is initialized by zone_init. | |
2075 | */ | |
5ba3f43e | 2076 | |
1c79356b A |
2077 | zone_t |
2078 | zinit( | |
2079 | vm_size_t size, /* the size of an element */ | |
2080 | vm_size_t max, /* maximum memory to use */ | |
2081 | vm_size_t alloc, /* allocation size */ | |
91447636 | 2082 | const char *name) /* a name for the zone */ |
1c79356b | 2083 | { |
5ba3f43e A |
2084 | zone_t z; |
2085 | ||
2086 | size = compute_element_size(size); | |
7ddcb079 | 2087 | |
39037602 | 2088 | simple_lock(&all_zones_lock); |
5ba3f43e | 2089 | |
00867663 | 2090 | assert(num_zones < MAX_ZONES); |
5ba3f43e A |
2091 | assert(num_zones_in_use <= num_zones); |
2092 | ||
2093 | /* If possible, find a previously zdestroy'ed zone in the zone_array that we can reuse instead of initializing a new zone. */ | |
2094 | for (int index = bitmap_first(zone_empty_bitmap, MAX_ZONES); | |
2095 | index >= 0 && index < (int)num_zones; | |
2096 | index = bitmap_next(zone_empty_bitmap, index)) { | |
2097 | z = &(zone_array[index]); | |
2098 | ||
2099 | /* | |
2100 | * If the zone name and the element size are the same, we can just reuse the old zone struct. | |
2101 | * Otherwise hand out a new zone from the zone_array. | |
2102 | */ | |
2103 | if (!strcmp(z->zone_name, name)) { | |
2104 | vm_size_t old_size = z->elem_size; | |
2105 | #if KASAN_ZALLOC | |
2106 | old_size -= z->kasan_redzone * 2; | |
2107 | #endif | |
2108 | if (old_size == size) { | |
2109 | /* Clear the empty bit for this zone, increment num_zones_in_use, and mark the zone as valid again. */ | |
2110 | bitmap_clear(zone_empty_bitmap, index); | |
2111 | num_zones_in_use++; | |
2112 | z->zone_valid = TRUE; | |
2113 | ||
2114 | /* All other state is already set up since the zone was previously in use. Return early. */ | |
2115 | simple_unlock(&all_zones_lock); | |
2116 | return (z); | |
2117 | } | |
2118 | } | |
2119 | } | |
2120 | ||
2121 | /* If we're here, it means we didn't find a zone above that we could simply reuse. Set up a new zone. */ | |
2122 | ||
2123 | /* Clear the empty bit for the new zone */ | |
2124 | bitmap_clear(zone_empty_bitmap, num_zones); | |
2125 | ||
00867663 A |
2126 | z = &(zone_array[num_zones]); |
2127 | z->index = num_zones; | |
1c79356b | 2128 | |
5ba3f43e A |
2129 | num_zones++; |
2130 | num_zones_in_use++; | |
39236c6e | 2131 | |
1c79356b | 2132 | /* |
5ba3f43e A |
2133 | * Initialize the zone lock here before dropping the all_zones_lock. Otherwise we could race with |
2134 | * zalloc_async() and try to grab the zone lock before it has been initialized, causing a panic. | |
1c79356b | 2135 | */ |
5ba3f43e A |
2136 | lock_zone_init(z); |
2137 | ||
2138 | simple_unlock(&all_zones_lock); | |
39236c6e | 2139 | |
5ba3f43e A |
2140 | #if KASAN_ZALLOC |
2141 | /* Expand the zone allocation size to include the redzones. For page-multiple | |
2142 | * zones add a full guard page because they likely require alignment. kalloc | |
2143 | * and fakestack handles its own KASan state, so ignore those zones. */ | |
2144 | /* XXX: remove this when zinit_with_options() is a thing */ | |
2145 | const char *kalloc_name = "kalloc."; | |
2146 | const char *fakestack_name = "fakestack."; | |
2147 | if (strncmp(name, kalloc_name, strlen(kalloc_name)) == 0) { | |
2148 | z->kasan_redzone = 0; | |
2149 | } else if (strncmp(name, fakestack_name, strlen(fakestack_name)) == 0) { | |
2150 | z->kasan_redzone = 0; | |
2151 | } else { | |
2152 | if ((size % PAGE_SIZE) != 0) { | |
2153 | z->kasan_redzone = KASAN_GUARD_SIZE; | |
2154 | } else { | |
2155 | z->kasan_redzone = PAGE_SIZE; | |
2156 | } | |
2157 | max = (max / size) * (size + z->kasan_redzone * 2); | |
2158 | size += z->kasan_redzone * 2; | |
2159 | } | |
2160 | #endif | |
39236c6e | 2161 | |
5ba3f43e | 2162 | max = round_page(max); |
39236c6e | 2163 | |
39037602 | 2164 | vm_size_t best_alloc = PAGE_SIZE; |
5ba3f43e A |
2165 | |
2166 | if ((size % PAGE_SIZE) == 0) { | |
2167 | /* zero fragmentation by definition */ | |
2168 | best_alloc = size; | |
2169 | } else { | |
2170 | vm_size_t alloc_size; | |
2171 | for (alloc_size = (2 * PAGE_SIZE); alloc_size <= ZONE_MAX_ALLOC_SIZE; alloc_size += PAGE_SIZE) { | |
2172 | if (ZONE_ALLOC_FRAG_PERCENT(alloc_size, size) < ZONE_ALLOC_FRAG_PERCENT(best_alloc, size)) { | |
2173 | best_alloc = alloc_size; | |
2174 | } | |
1c79356b | 2175 | } |
1c79356b | 2176 | } |
5ba3f43e | 2177 | |
39037602 | 2178 | alloc = best_alloc; |
1c79356b A |
2179 | if (max && (max < alloc)) |
2180 | max = alloc; | |
2181 | ||
39236c6e A |
2182 | z->free_elements = NULL; |
2183 | queue_init(&z->pages.any_free_foreign); | |
2184 | queue_init(&z->pages.all_free); | |
2185 | queue_init(&z->pages.intermediate); | |
2186 | queue_init(&z->pages.all_used); | |
1c79356b | 2187 | z->cur_size = 0; |
39236c6e | 2188 | z->page_count = 0; |
1c79356b A |
2189 | z->max_size = max; |
2190 | z->elem_size = size; | |
2191 | z->alloc_size = alloc; | |
1c79356b | 2192 | z->count = 0; |
39236c6e | 2193 | z->countfree = 0; |
39037602 | 2194 | z->count_all_free_pages = 0; |
6d2010ae | 2195 | z->sum_count = 0LL; |
3e170ce0 A |
2196 | z->doing_alloc_without_vm_priv = FALSE; |
2197 | z->doing_alloc_with_vm_priv = FALSE; | |
1c79356b A |
2198 | z->exhaustible = FALSE; |
2199 | z->collectable = TRUE; | |
2200 | z->allows_foreign = FALSE; | |
2201 | z->expandable = TRUE; | |
2202 | z->waiting = FALSE; | |
0b4e3aa0 | 2203 | z->async_pending = FALSE; |
6d2010ae | 2204 | z->caller_acct = TRUE; |
0b4c1975 | 2205 | z->noencrypt = FALSE; |
7ddcb079 A |
2206 | z->no_callout = FALSE; |
2207 | z->async_prio_refill = FALSE; | |
316670eb A |
2208 | z->gzalloc_exempt = FALSE; |
2209 | z->alignment_required = FALSE; | |
39037602 | 2210 | z->zone_replenishing = FALSE; |
7ddcb079 A |
2211 | z->prio_refill_watermark = 0; |
2212 | z->zone_replenish_thread = NULL; | |
39236c6e | 2213 | z->zp_count = 0; |
5ba3f43e A |
2214 | z->kasan_quarantine = TRUE; |
2215 | z->zone_valid = TRUE; | |
39037602 | 2216 | |
6d2010ae | 2217 | #if CONFIG_ZLEAKS |
6d2010ae A |
2218 | z->zleak_capture = 0; |
2219 | z->zleak_on = FALSE; | |
2220 | #endif /* CONFIG_ZLEAKS */ | |
2221 | ||
5ba3f43e A |
2222 | /* |
2223 | * If the VM is ready to handle kmem_alloc requests, copy the zone name passed in. | |
2224 | * | |
2225 | * Else simply maintain a pointer to the name string. The only zones we'll actually have | |
2226 | * to do this for would be the VM-related zones that are created very early on before any | |
2227 | * kexts can be loaded (unloaded). So we should be fine with just a pointer in this case. | |
2228 | */ | |
2229 | if (kmem_alloc_ready) { | |
2230 | size_t len = MIN(strlen(name)+1, MACH_ZONE_NAME_MAX_LEN); | |
2231 | ||
2232 | if (zone_names_start == 0 || ((zone_names_next - zone_names_start) + len) > PAGE_SIZE) { | |
2233 | printf("zalloc: allocating memory for zone names buffer\n"); | |
2234 | kern_return_t retval = kmem_alloc_kobject(kernel_map, &zone_names_start, | |
2235 | PAGE_SIZE, VM_KERN_MEMORY_OSFMK); | |
2236 | if (retval != KERN_SUCCESS) { | |
2237 | panic("zalloc: zone_names memory allocation failed"); | |
2238 | } | |
2239 | bzero((char *)zone_names_start, PAGE_SIZE); | |
2240 | zone_names_next = zone_names_start; | |
2241 | } | |
2242 | ||
2243 | strlcpy((char *)zone_names_next, name, len); | |
2244 | z->zone_name = (char *)zone_names_next; | |
2245 | zone_names_next += len; | |
2246 | } else { | |
2247 | z->zone_name = name; | |
2248 | } | |
1c79356b | 2249 | |
c910b4d9 | 2250 | /* |
39037602 A |
2251 | * Check for and set up zone leak detection if requested via boot-args. We recognized two |
2252 | * boot-args: | |
2253 | * | |
2254 | * zlog=<zone_to_log> | |
2255 | * zrecs=<num_records_in_log> | |
2256 | * | |
2257 | * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to | |
2258 | * control the size of the log. If zrecs is not specified, a default value is used. | |
c910b4d9 | 2259 | */ |
c910b4d9 | 2260 | |
39037602 | 2261 | if (num_zones_logged < max_num_zones_to_log) { |
7ddcb079 | 2262 | |
39037602 A |
2263 | int i = 1; /* zlog0 isn't allowed. */ |
2264 | boolean_t zone_logging_enabled = FALSE; | |
2265 | char zlog_name[MAX_ZONE_NAME] = ""; /* Temp. buffer to create the strings zlog1, zlog2 etc... */ | |
7ddcb079 | 2266 | |
39037602 | 2267 | while (i <= max_num_zones_to_log) { |
7ddcb079 | 2268 | |
39037602 A |
2269 | snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i); |
2270 | ||
2271 | if (PE_parse_boot_argn(zlog_name, zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) { | |
5ba3f43e A |
2272 | if (track_this_zone(z->zone_name, zone_name_to_log)) { |
2273 | if (z->zone_valid) { | |
2274 | z->zone_logging = TRUE; | |
2275 | zone_logging_enabled = TRUE; | |
2276 | num_zones_logged++; | |
2277 | break; | |
2278 | } | |
39037602 A |
2279 | } |
2280 | } | |
2281 | i++; | |
2282 | } | |
2283 | ||
2284 | if (zone_logging_enabled == FALSE) { | |
2285 | /* | |
2286 | * Backwards compat. with the old boot-arg used to specify single zone logging i.e. zlog | |
2287 | * Needs to happen after the newer zlogn checks because the prefix will match all the zlogn | |
2288 | * boot-args. | |
2289 | */ | |
2290 | if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) { | |
5ba3f43e A |
2291 | if (track_this_zone(z->zone_name, zone_name_to_log)) { |
2292 | if (z->zone_valid) { | |
39037602 A |
2293 | z->zone_logging = TRUE; |
2294 | zone_logging_enabled = TRUE; | |
2295 | num_zones_logged++; | |
5ba3f43e | 2296 | } |
39037602 A |
2297 | } |
2298 | } | |
2299 | } | |
2300 | ||
2301 | if (log_records_init == FALSE && zone_logging_enabled == TRUE) { | |
2302 | if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) { | |
2303 | /* | |
2304 | * Don't allow more than ZRECORDS_MAX records even if the user asked for more. | |
2305 | * This prevents accidentally hogging too much kernel memory and making the system | |
2306 | * unusable. | |
2307 | */ | |
2308 | ||
2309 | log_records = MIN(ZRECORDS_MAX, log_records); | |
2310 | log_records_init = TRUE; | |
2311 | } else { | |
2312 | log_records = ZRECORDS_DEFAULT; | |
2313 | log_records_init = TRUE; | |
2314 | } | |
2315 | } | |
2316 | ||
2317 | /* | |
2318 | * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are | |
2319 | * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to | |
2320 | * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one | |
2321 | * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again | |
2322 | * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized | |
2323 | * right now. | |
2324 | */ | |
2325 | if (kmem_alloc_ready) { | |
2326 | ||
2327 | zone_t curr_zone = NULL; | |
2328 | unsigned int max_zones = 0, zone_idx = 0; | |
2329 | ||
2330 | simple_lock(&all_zones_lock); | |
2331 | max_zones = num_zones; | |
2332 | simple_unlock(&all_zones_lock); | |
2333 | ||
2334 | for (zone_idx = 0; zone_idx < max_zones; zone_idx++) { | |
2335 | ||
2336 | curr_zone = &(zone_array[zone_idx]); | |
2337 | ||
5ba3f43e A |
2338 | if (!curr_zone->zone_valid) { |
2339 | continue; | |
2340 | } | |
2341 | ||
39037602 A |
2342 | /* |
2343 | * We work with the zone unlocked here because we could end up needing the zone lock to | |
2344 | * enable logging for this zone e.g. need a VM object to allocate memory to enable logging for the | |
2345 | * VM objects zone. | |
2346 | * | |
2347 | * We don't expect these zones to be needed at this early a time in boot and so take this chance. | |
2348 | */ | |
2349 | if (curr_zone->zone_logging && curr_zone->zlog_btlog == NULL) { | |
2350 | ||
2351 | curr_zone->zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, (corruption_debug_flag == FALSE) /* caller_will_remove_entries_for_element? */); | |
2352 | ||
2353 | if (curr_zone->zlog_btlog) { | |
2354 | ||
2355 | printf("zone: logging started for zone %s\n", curr_zone->zone_name); | |
2356 | } else { | |
2357 | printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n"); | |
2358 | curr_zone->zone_logging = FALSE; | |
2359 | } | |
2360 | } | |
2361 | ||
2362 | } | |
2363 | } | |
2364 | } | |
2365 | ||
2366 | #if CONFIG_GZALLOC | |
2367 | gzalloc_zone_init(z); | |
2368 | #endif | |
5ba3f43e | 2369 | |
39037602 A |
2370 | return(z); |
2371 | } | |
2372 | unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated, zone_replenish_throttle_count; | |
2373 | ||
2374 | static void zone_replenish_thread(zone_t); | |
2375 | ||
2376 | /* High priority VM privileged thread used to asynchronously refill a designated | |
2377 | * zone, such as the reserved VM map entry zone. | |
2378 | */ | |
2379 | __attribute__((noreturn)) | |
2380 | static void | |
2381 | zone_replenish_thread(zone_t z) | |
2382 | { | |
2383 | vm_size_t free_size; | |
2384 | current_thread()->options |= TH_OPT_VMPRIV; | |
2385 | ||
2386 | for (;;) { | |
7ddcb079 | 2387 | lock_zone(z); |
5ba3f43e | 2388 | assert(z->zone_valid); |
39037602 | 2389 | z->zone_replenishing = TRUE; |
7ddcb079 A |
2390 | assert(z->prio_refill_watermark != 0); |
2391 | while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) { | |
3e170ce0 A |
2392 | assert(z->doing_alloc_without_vm_priv == FALSE); |
2393 | assert(z->doing_alloc_with_vm_priv == FALSE); | |
7ddcb079 A |
2394 | assert(z->async_prio_refill == TRUE); |
2395 | ||
2396 | unlock_zone(z); | |
2397 | int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT; | |
2398 | vm_offset_t space, alloc_size; | |
2399 | kern_return_t kr; | |
2400 | ||
2401 | if (vm_pool_low()) | |
2402 | alloc_size = round_page(z->elem_size); | |
2403 | else | |
2404 | alloc_size = z->alloc_size; | |
2405 | ||
2406 | if (z->noencrypt) | |
2407 | zflags |= KMA_NOENCRYPT; | |
2408 | ||
5ba3f43e A |
2409 | /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ |
2410 | if (is_zone_map_nearing_exhaustion()) { | |
2411 | thread_wakeup((event_t) &vm_pageout_garbage_collect); | |
2412 | } | |
2413 | ||
3e170ce0 | 2414 | kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); |
7ddcb079 A |
2415 | |
2416 | if (kr == KERN_SUCCESS) { | |
7ddcb079 A |
2417 | zcram(z, space, alloc_size); |
2418 | } else if (kr == KERN_RESOURCE_SHORTAGE) { | |
2419 | VM_PAGE_WAIT(); | |
2420 | } else if (kr == KERN_NO_SPACE) { | |
3e170ce0 | 2421 | kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); |
7ddcb079 | 2422 | if (kr == KERN_SUCCESS) { |
7ddcb079 A |
2423 | zcram(z, space, alloc_size); |
2424 | } else { | |
2425 | assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC); | |
2426 | thread_block(THREAD_CONTINUE_NULL); | |
2427 | } | |
2428 | } | |
2429 | ||
2430 | lock_zone(z); | |
5ba3f43e | 2431 | assert(z->zone_valid); |
7ddcb079 A |
2432 | zone_replenish_loops++; |
2433 | } | |
2434 | ||
39037602 | 2435 | z->zone_replenishing = FALSE; |
39236c6e A |
2436 | /* Signal any potential throttled consumers, terminating |
2437 | * their timer-bounded waits. | |
2438 | */ | |
2439 | thread_wakeup(z); | |
2440 | ||
7ddcb079 | 2441 | assert_wait(&z->zone_replenish_thread, THREAD_UNINT); |
39037602 | 2442 | unlock_zone(z); |
7ddcb079 A |
2443 | thread_block(THREAD_CONTINUE_NULL); |
2444 | zone_replenish_wakeups++; | |
2445 | } | |
2446 | } | |
2447 | ||
2448 | void | |
2449 | zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) { | |
2450 | z->prio_refill_watermark = low_water_mark; | |
2451 | ||
2452 | z->async_prio_refill = TRUE; | |
2453 | OSMemoryBarrier(); | |
2454 | kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread); | |
2455 | ||
2456 | if (tres != KERN_SUCCESS) { | |
2457 | panic("zone_prio_refill_configure, thread create: 0x%x", tres); | |
2458 | } | |
2459 | ||
2460 | thread_deallocate(z->zone_replenish_thread); | |
2461 | } | |
1c79356b | 2462 | |
5ba3f43e A |
2463 | void |
2464 | zdestroy(zone_t z) | |
39037602 | 2465 | { |
5ba3f43e | 2466 | unsigned int zindex; |
39037602 | 2467 | |
5ba3f43e | 2468 | assert(z != NULL); |
39037602 | 2469 | |
5ba3f43e A |
2470 | lock_zone(z); |
2471 | assert(z->zone_valid); | |
2472 | ||
2473 | /* Assert that the zone does not have any allocations in flight */ | |
2474 | assert(z->doing_alloc_without_vm_priv == FALSE); | |
2475 | assert(z->doing_alloc_with_vm_priv == FALSE); | |
2476 | assert(z->async_pending == FALSE); | |
2477 | assert(z->waiting == FALSE); | |
2478 | assert(z->async_prio_refill == FALSE); | |
2479 | ||
2480 | #if !KASAN_ZALLOC | |
2481 | /* | |
2482 | * Unset the valid bit. We'll hit an assert failure on further operations on this zone, until zinit() is called again. | |
2483 | * Leave the zone valid for KASan as we will see zfree's on quarantined free elements even after the zone is destroyed. | |
2484 | */ | |
2485 | z->zone_valid = FALSE; | |
2486 | #endif | |
2487 | unlock_zone(z); | |
2488 | ||
2489 | /* Dump all the free elements */ | |
2490 | drop_free_elements(z); | |
2491 | ||
2492 | #if CONFIG_GZALLOC | |
2493 | /* If the zone is gzalloc managed dump all the elements in the free cache */ | |
2494 | gzalloc_empty_free_cache(z); | |
2495 | #endif | |
2496 | ||
2497 | lock_zone(z); | |
2498 | ||
2499 | #if !KASAN_ZALLOC | |
2500 | /* Assert that all counts are zero */ | |
2501 | assert(z->count == 0); | |
2502 | assert(z->countfree == 0); | |
2503 | assert(z->cur_size == 0); | |
2504 | assert(z->page_count == 0); | |
2505 | assert(z->count_all_free_pages == 0); | |
2506 | ||
2507 | /* Assert that all queues except the foreign queue are empty. The zone allocator doesn't know how to free up foreign memory. */ | |
2508 | assert(queue_empty(&z->pages.all_used)); | |
2509 | assert(queue_empty(&z->pages.intermediate)); | |
2510 | assert(queue_empty(&z->pages.all_free)); | |
2511 | #endif | |
2512 | ||
2513 | zindex = z->index; | |
2514 | ||
2515 | unlock_zone(z); | |
2516 | ||
2517 | simple_lock(&all_zones_lock); | |
2518 | ||
2519 | assert(!bitmap_test(zone_empty_bitmap, zindex)); | |
2520 | /* Mark the zone as empty in the bitmap */ | |
2521 | bitmap_set(zone_empty_bitmap, zindex); | |
2522 | num_zones_in_use--; | |
2523 | assert(num_zones_in_use > 0); | |
2524 | ||
2525 | simple_unlock(&all_zones_lock); | |
2526 | } | |
2527 | ||
2528 | /* Initialize the metadata for an allocation chunk */ | |
2529 | static inline void | |
2530 | zcram_metadata_init(vm_offset_t newmem, vm_size_t size, struct zone_page_metadata *chunk_metadata) | |
2531 | { | |
2532 | struct zone_page_metadata *page_metadata; | |
2533 | ||
2534 | /* The first page is the real metadata for this allocation chunk. We mark the others as fake metadata */ | |
2535 | size -= PAGE_SIZE; | |
2536 | newmem += PAGE_SIZE; | |
2537 | ||
2538 | for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) { | |
2539 | page_metadata = get_zone_page_metadata((struct zone_free_element *)newmem, TRUE); | |
39037602 A |
2540 | assert(page_metadata != chunk_metadata); |
2541 | PAGE_METADATA_SET_ZINDEX(page_metadata, MULTIPAGE_METADATA_MAGIC); | |
2542 | page_metadata_set_realmeta(page_metadata, chunk_metadata); | |
2543 | page_metadata->free_count = 0; | |
2544 | } | |
2545 | return; | |
2546 | } | |
2547 | ||
5c9f4661 | 2548 | static void |
4bd07ac2 A |
2549 | random_free_to_zone( |
2550 | zone_t zone, | |
2551 | vm_offset_t newmem, | |
2552 | vm_offset_t first_element_offset, | |
2553 | int element_count, | |
5c9f4661 | 2554 | unsigned int *entropy_buffer) |
4bd07ac2 A |
2555 | { |
2556 | vm_offset_t last_element_offset; | |
2557 | vm_offset_t element_addr; | |
2558 | vm_size_t elem_size; | |
5c9f4661 | 2559 | int index; |
4bd07ac2 | 2560 | |
5ba3f43e | 2561 | assert(element_count <= ZONE_CHUNK_MAXELEMENTS); |
4bd07ac2 A |
2562 | elem_size = zone->elem_size; |
2563 | last_element_offset = first_element_offset + ((element_count * elem_size) - elem_size); | |
2564 | for (index = 0; index < element_count; index++) { | |
2565 | assert(first_element_offset <= last_element_offset); | |
39037602 A |
2566 | if ( |
2567 | #if DEBUG || DEVELOPMENT | |
5ba3f43e | 2568 | leak_scan_debug_flag || __improbable(zone->tags) || |
39037602 | 2569 | #endif /* DEBUG || DEVELOPMENT */ |
5c9f4661 | 2570 | random_bool_gen_bits(&zone_bool_gen, entropy_buffer, MAX_ENTROPY_PER_ZCRAM, 1)) { |
4bd07ac2 A |
2571 | element_addr = newmem + first_element_offset; |
2572 | first_element_offset += elem_size; | |
2573 | } else { | |
2574 | element_addr = newmem + last_element_offset; | |
2575 | last_element_offset -= elem_size; | |
2576 | } | |
2577 | if (element_addr != (vm_offset_t)zone) { | |
2578 | zone->count++; /* compensate for free_to_zone */ | |
2579 | free_to_zone(zone, element_addr, FALSE); | |
2580 | } | |
4bd07ac2 A |
2581 | zone->cur_size += elem_size; |
2582 | } | |
2583 | } | |
2584 | ||
1c79356b | 2585 | /* |
3e170ce0 | 2586 | * Cram the given memory into the specified zone. Update the zone page count accordingly. |
1c79356b A |
2587 | */ |
2588 | void | |
2589 | zcram( | |
7ddcb079 A |
2590 | zone_t zone, |
2591 | vm_offset_t newmem, | |
1c79356b A |
2592 | vm_size_t size) |
2593 | { | |
7ddcb079 A |
2594 | vm_size_t elem_size; |
2595 | boolean_t from_zm = FALSE; | |
4bd07ac2 | 2596 | int element_count; |
5c9f4661 | 2597 | unsigned int entropy_buffer[MAX_ENTROPY_PER_ZCRAM] = { 0 }; |
1c79356b A |
2598 | |
2599 | /* Basic sanity checks */ | |
2600 | assert(zone != ZONE_NULL && newmem != (vm_offset_t)0); | |
2601 | assert(!zone->collectable || zone->allows_foreign | |
55e303ae | 2602 | || (from_zone_map(newmem, size))); |
1c79356b A |
2603 | |
2604 | elem_size = zone->elem_size; | |
2605 | ||
5ba3f43e | 2606 | KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, zone->index, size); |
4bd07ac2 | 2607 | |
7ddcb079 A |
2608 | if (from_zone_map(newmem, size)) |
2609 | from_zm = TRUE; | |
2610 | ||
39037602 | 2611 | if (!from_zm) { |
5c9f4661 A |
2612 | /* We cannot support elements larger than page size for foreign memory because we |
2613 | * put metadata on the page itself for each page of foreign memory. We need to do | |
2614 | * this in order to be able to reach the metadata when any element is freed | |
39037602 A |
2615 | */ |
2616 | assert((zone->allows_foreign == TRUE) && (zone->elem_size <= (PAGE_SIZE - sizeof(struct zone_page_metadata)))); | |
5c9f4661 | 2617 | } |
39037602 | 2618 | |
39236c6e A |
2619 | if (zalloc_debug & ZALLOC_DEBUG_ZCRAM) |
2620 | kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name, | |
2621 | (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size); | |
2622 | ||
3e170ce0 A |
2623 | ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE)); |
2624 | ||
5c9f4661 A |
2625 | /* |
2626 | * Initialize the metadata for all pages. We dont need the zone lock | |
39037602 A |
2627 | * here because we are not manipulating any zone related state yet. |
2628 | */ | |
39236c6e | 2629 | |
39037602 A |
2630 | struct zone_page_metadata *chunk_metadata; |
2631 | size_t zone_page_metadata_size = sizeof(struct zone_page_metadata); | |
39236c6e | 2632 | |
39037602 A |
2633 | assert((newmem & PAGE_MASK) == 0); |
2634 | assert((size & PAGE_MASK) == 0); | |
39236c6e | 2635 | |
39037602 A |
2636 | chunk_metadata = get_zone_page_metadata((struct zone_free_element *)newmem, TRUE); |
2637 | chunk_metadata->pages.next = NULL; | |
2638 | chunk_metadata->pages.prev = NULL; | |
2639 | page_metadata_set_freelist(chunk_metadata, 0); | |
2640 | PAGE_METADATA_SET_ZINDEX(chunk_metadata, zone->index); | |
2641 | chunk_metadata->free_count = 0; | |
5ba3f43e A |
2642 | assert((size / PAGE_SIZE) <= ZONE_CHUNK_MAXPAGES); |
2643 | chunk_metadata->page_count = (unsigned)(size / PAGE_SIZE); | |
39236c6e | 2644 | |
39037602 | 2645 | zcram_metadata_init(newmem, size, chunk_metadata); |
39236c6e | 2646 | |
5ba3f43e A |
2647 | #if VM_MAX_TAG_ZONES |
2648 | if (__improbable(zone->tags)) { | |
2649 | assert(from_zm); | |
2650 | ztMemoryAdd(zone, newmem, size); | |
2651 | } | |
2652 | #endif /* VM_MAX_TAG_ZONES */ | |
2653 | ||
39037602 | 2654 | lock_zone(zone); |
5ba3f43e | 2655 | assert(zone->zone_valid); |
39037602 A |
2656 | enqueue_tail(&zone->pages.all_used, &(chunk_metadata->pages)); |
2657 | ||
2658 | if (!from_zm) { | |
2659 | /* We cannot support elements larger than page size for foreign memory because we | |
2660 | * put metadata on the page itself for each page of foreign memory. We need to do | |
2661 | * this in order to be able to reach the metadata when any element is freed | |
2662 | */ | |
2663 | ||
2664 | for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) { | |
2665 | vm_offset_t first_element_offset = 0; | |
3e170ce0 A |
2666 | if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0){ |
2667 | first_element_offset = zone_page_metadata_size; | |
2668 | } else { | |
2669 | first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT)); | |
2670 | } | |
4bd07ac2 | 2671 | element_count = (int)((PAGE_SIZE - first_element_offset) / elem_size); |
39037602 | 2672 | random_free_to_zone(zone, newmem, first_element_offset, element_count, entropy_buffer); |
39236c6e | 2673 | } |
39037602 A |
2674 | } else { |
2675 | element_count = (int)(size / elem_size); | |
2676 | random_free_to_zone(zone, newmem, 0, element_count, entropy_buffer); | |
1c79356b A |
2677 | } |
2678 | unlock_zone(zone); | |
4bd07ac2 | 2679 | |
5ba3f43e | 2680 | KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, zone->index); |
4bd07ac2 | 2681 | |
1c79356b A |
2682 | } |
2683 | ||
1c79356b A |
2684 | /* |
2685 | * Fill a zone with enough memory to contain at least nelem elements. | |
1c79356b A |
2686 | * Return the number of elements actually put into the zone, which may |
2687 | * be more than the caller asked for since the memory allocation is | |
5ba3f43e | 2688 | * rounded up to the next zone allocation size. |
1c79356b A |
2689 | */ |
2690 | int | |
2691 | zfill( | |
2692 | zone_t zone, | |
2693 | int nelem) | |
2694 | { | |
5ba3f43e | 2695 | kern_return_t kr; |
1c79356b | 2696 | vm_offset_t memory; |
1c79356b | 2697 | |
5ba3f43e A |
2698 | vm_size_t alloc_size = zone->alloc_size; |
2699 | vm_size_t elem_per_alloc = alloc_size / zone->elem_size; | |
2700 | vm_size_t nalloc = (nelem + elem_per_alloc - 1) / elem_per_alloc; | |
2701 | ||
2702 | /* Don't mix-and-match zfill with foreign memory */ | |
2703 | assert(!zone->allows_foreign); | |
2704 | ||
2705 | /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ | |
2706 | if (is_zone_map_nearing_exhaustion()) { | |
2707 | thread_wakeup((event_t) &vm_pageout_garbage_collect); | |
2708 | } | |
2709 | ||
2710 | kr = kernel_memory_allocate(zone_map, &memory, nalloc * alloc_size, 0, KMA_KOBJECT, VM_KERN_MEMORY_ZONE); | |
2711 | if (kr != KERN_SUCCESS) { | |
2712 | printf("%s: kernel_memory_allocate() of %lu bytes failed\n", | |
2713 | __func__, (unsigned long)(nalloc * alloc_size)); | |
1c79356b | 2714 | return 0; |
5ba3f43e | 2715 | } |
1c79356b | 2716 | |
5ba3f43e A |
2717 | for (vm_size_t i = 0; i < nalloc; i++) { |
2718 | zcram(zone, memory + i * alloc_size, alloc_size); | |
2719 | } | |
1c79356b | 2720 | |
5ba3f43e | 2721 | return (int)(nalloc * elem_per_alloc); |
1c79356b A |
2722 | } |
2723 | ||
2724 | /* | |
2725 | * Initialize the "zone of zones" which uses fixed memory allocated | |
2726 | * earlier in memory initialization. zone_bootstrap is called | |
2727 | * before zone_init. | |
2728 | */ | |
2729 | void | |
2730 | zone_bootstrap(void) | |
2731 | { | |
2d21ac55 A |
2732 | char temp_buf[16]; |
2733 | ||
39236c6e A |
2734 | if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug))) |
2735 | zalloc_debug = 0; | |
316670eb | 2736 | |
39236c6e A |
2737 | /* Set up zone element poisoning */ |
2738 | zp_init(); | |
c910b4d9 | 2739 | |
5c9f4661 | 2740 | random_bool_init(&zone_bool_gen); |
4bd07ac2 | 2741 | |
39236c6e A |
2742 | /* should zlog log to debug zone corruption instead of leaks? */ |
2743 | if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) { | |
2744 | corruption_debug_flag = TRUE; | |
2745 | } | |
6d2010ae | 2746 | |
39037602 | 2747 | #if DEBUG || DEVELOPMENT |
5ba3f43e A |
2748 | #if VM_MAX_TAG_ZONES |
2749 | /* enable tags for zones that ask for */ | |
2750 | if (PE_parse_boot_argn("-zt", temp_buf, sizeof(temp_buf))) { | |
2751 | zone_tagging_on = TRUE; | |
2752 | } | |
2753 | #endif /* VM_MAX_TAG_ZONES */ | |
39037602 A |
2754 | /* disable element location randomization in a page */ |
2755 | if (PE_parse_boot_argn("-zl", temp_buf, sizeof(temp_buf))) { | |
2756 | leak_scan_debug_flag = TRUE; | |
2d21ac55 | 2757 | } |
39037602 | 2758 | #endif |
1c79356b | 2759 | |
91447636 | 2760 | simple_lock_init(&all_zones_lock, 0); |
1c79356b | 2761 | |
5ba3f43e | 2762 | num_zones_in_use = 0; |
1c79356b | 2763 | num_zones = 0; |
5ba3f43e A |
2764 | /* Mark all zones as empty */ |
2765 | bitmap_full(zone_empty_bitmap, BITMAP_LEN(MAX_ZONES)); | |
2766 | zone_names_next = zone_names_start = 0; | |
2767 | ||
2768 | #if DEBUG || DEVELOPMENT | |
2769 | simple_lock_init(&zone_test_lock, 0); | |
2770 | #endif /* DEBUG || DEVELOPMENT */ | |
2771 | ||
39236c6e | 2772 | thread_call_setup(&call_async_alloc, zalloc_async, NULL); |
1c79356b | 2773 | |
39236c6e A |
2774 | /* initializing global lock group for zones */ |
2775 | lck_grp_attr_setdefault(&zone_locks_grp_attr); | |
2776 | lck_grp_init(&zone_locks_grp, "zone_locks", &zone_locks_grp_attr); | |
2777 | ||
39037602 A |
2778 | lck_attr_setdefault(&zone_metadata_lock_attr); |
2779 | lck_mtx_init_ext(&zone_metadata_region_lck, &zone_metadata_region_lck_ext, &zone_locks_grp, &zone_metadata_lock_attr); | |
1c79356b A |
2780 | } |
2781 | ||
5ba3f43e A |
2782 | /* |
2783 | * We're being very conservative here and picking a value of 95%. We might need to lower this if | |
2784 | * we find that we're not catching the problem and are still hitting zone map exhaustion panics. | |
2785 | */ | |
2786 | #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95 | |
2787 | ||
2788 | /* | |
2789 | * Trigger zone-map-exhaustion jetsams if the zone map is X% full, where X=zone_map_jetsam_limit. | |
2790 | * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default. | |
2791 | */ | |
2792 | unsigned int zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT; | |
2793 | ||
2794 | /* | |
2795 | * Returns pid of the task with the largest number of VM map entries. | |
2796 | */ | |
2797 | extern pid_t find_largest_process_vm_map_entries(void); | |
2798 | ||
2799 | /* | |
2800 | * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills. | |
2801 | * For any other pid we try to kill that process synchronously. | |
2802 | */ | |
2803 | boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid); | |
2804 | ||
2805 | void get_zone_map_size(uint64_t *current_size, uint64_t *capacity) | |
2806 | { | |
2807 | *current_size = zone_map->size; | |
2808 | *capacity = vm_map_max(zone_map) - vm_map_min(zone_map); | |
2809 | } | |
2810 | ||
2811 | void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size) | |
2812 | { | |
2813 | zone_t largest_zone = zone_find_largest(); | |
2814 | strlcpy(zone_name, largest_zone->zone_name, zone_name_len); | |
2815 | *zone_size = largest_zone->cur_size; | |
2816 | } | |
2817 | ||
2818 | boolean_t is_zone_map_nearing_exhaustion(void) | |
2819 | { | |
2820 | uint64_t size = zone_map->size; | |
2821 | uint64_t capacity = vm_map_max(zone_map) - vm_map_min(zone_map); | |
2822 | if (size > ((capacity * zone_map_jetsam_limit) / 100)) { | |
2823 | return TRUE; | |
2824 | } | |
2825 | return FALSE; | |
2826 | } | |
2827 | ||
2828 | extern zone_t vm_map_entry_zone; | |
2829 | extern zone_t vm_object_zone; | |
2830 | ||
2831 | #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98 | |
2832 | ||
2833 | /* | |
2834 | * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread | |
2835 | * to walk through the jetsam priority bands and kill processes. | |
2836 | */ | |
2837 | static void kill_process_in_largest_zone(void) | |
2838 | { | |
2839 | pid_t pid = -1; | |
2840 | zone_t largest_zone = zone_find_largest(); | |
2841 | ||
2842 | printf("zone_map_exhaustion: Zone map size %lld, capacity %lld [jetsam limit %d%%]\n", (uint64_t)zone_map->size, | |
2843 | (uint64_t)(vm_map_max(zone_map) - vm_map_min(zone_map)), zone_map_jetsam_limit); | |
2844 | printf("zone_map_exhaustion: Largest zone %s, size %lu\n", largest_zone->zone_name, (uintptr_t)largest_zone->cur_size); | |
2845 | ||
2846 | /* | |
2847 | * We want to make sure we don't call this function from userspace. Or we could end up trying to synchronously kill the process | |
2848 | * whose context we're in, causing the system to hang. | |
2849 | */ | |
2850 | assert(current_task() == kernel_task); | |
2851 | ||
2852 | /* | |
2853 | * If vm_object_zone is the largest, check to see if the number of elements in vm_map_entry_zone is comparable. If so, consider | |
2854 | * vm_map_entry_zone as the largest. This lets us target a specific process to jetsam to quickly recover from the zone map bloat. | |
2855 | */ | |
2856 | if (largest_zone == vm_object_zone) { | |
2857 | int vm_object_zone_count = vm_object_zone->count; | |
2858 | int vm_map_entry_zone_count = vm_map_entry_zone->count; | |
2859 | /* Is the VM map entries zone count >= 98% of the VM objects zone count? */ | |
2860 | if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) { | |
2861 | largest_zone = vm_map_entry_zone; | |
2862 | printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n", (uintptr_t)largest_zone->cur_size); | |
2863 | } | |
2864 | } | |
2865 | ||
2866 | /* TODO: Extend this to check for the largest process in other zones as well. */ | |
2867 | if (largest_zone == vm_map_entry_zone) { | |
2868 | pid = find_largest_process_vm_map_entries(); | |
2869 | } else { | |
2870 | printf("zone_map_exhaustion: Nothing to do for the largest zone [%s]. Waking up memorystatus thread.\n", largest_zone->zone_name); | |
2871 | } | |
2872 | if (!memorystatus_kill_on_zone_map_exhaustion(pid)) { | |
2873 | printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid); | |
2874 | } | |
2875 | } | |
2876 | ||
39236c6e A |
2877 | /* Global initialization of Zone Allocator. |
2878 | * Runs after zone_bootstrap. | |
2879 | */ | |
1c79356b A |
2880 | void |
2881 | zone_init( | |
2882 | vm_size_t max_zonemap_size) | |
2883 | { | |
2884 | kern_return_t retval; | |
2885 | vm_offset_t zone_min; | |
2886 | vm_offset_t zone_max; | |
39037602 A |
2887 | vm_offset_t zone_metadata_space; |
2888 | unsigned int zone_pages; | |
5ba3f43e A |
2889 | vm_map_kernel_flags_t vmk_flags; |
2890 | ||
2891 | #if VM_MAX_TAG_ZONES | |
2892 | if (zone_tagging_on) ztInit(max_zonemap_size, &zone_locks_grp); | |
2893 | #endif | |
1c79356b | 2894 | |
5ba3f43e A |
2895 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
2896 | vmk_flags.vmkf_permanent = TRUE; | |
1c79356b | 2897 | retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size, |
5ba3f43e | 2898 | FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE, |
b0d623f7 | 2899 | &zone_map); |
91447636 | 2900 | |
1c79356b A |
2901 | if (retval != KERN_SUCCESS) |
2902 | panic("zone_init: kmem_suballoc failed"); | |
91447636 | 2903 | zone_max = zone_min + round_page(max_zonemap_size); |
316670eb A |
2904 | #if CONFIG_GZALLOC |
2905 | gzalloc_init(max_zonemap_size); | |
2906 | #endif | |
1c79356b A |
2907 | /* |
2908 | * Setup garbage collection information: | |
2909 | */ | |
1c79356b A |
2910 | zone_map_min_address = zone_min; |
2911 | zone_map_max_address = zone_max; | |
7ddcb079 | 2912 | |
39037602 A |
2913 | zone_pages = (unsigned int)atop_kernel(zone_max - zone_min); |
2914 | zone_metadata_space = round_page(zone_pages * sizeof(struct zone_page_metadata)); | |
2915 | retval = kernel_memory_allocate(zone_map, &zone_metadata_region_min, zone_metadata_space, | |
2916 | 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_OSFMK); | |
2917 | if (retval != KERN_SUCCESS) | |
2918 | panic("zone_init: zone_metadata_region initialization failed!"); | |
2919 | zone_metadata_region_max = zone_metadata_region_min + zone_metadata_space; | |
2920 | ||
fe8ab488 A |
2921 | #if defined(__LP64__) |
2922 | /* | |
2923 | * ensure that any vm_page_t that gets created from | |
2924 | * the vm_page zone can be packed properly (see vm_page.h | |
2925 | * for the packing requirements | |
2926 | */ | |
39037602 A |
2927 | if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_metadata_region_max))) != (vm_page_t)zone_metadata_region_max) |
2928 | panic("VM_PAGE_PACK_PTR failed on zone_metadata_region_max - %p", (void *)zone_metadata_region_max); | |
fe8ab488 | 2929 | |
39037602 | 2930 | if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address))) != (vm_page_t)zone_map_max_address) |
fe8ab488 A |
2931 | panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address); |
2932 | #endif | |
2933 | ||
39236c6e A |
2934 | lck_grp_attr_setdefault(&zone_gc_lck_grp_attr); |
2935 | lck_grp_init(&zone_gc_lck_grp, "zone_gc", &zone_gc_lck_grp_attr); | |
2936 | lck_attr_setdefault(&zone_gc_lck_attr); | |
2937 | lck_mtx_init_ext(&zone_gc_lock, &zone_gc_lck_ext, &zone_gc_lck_grp, &zone_gc_lck_attr); | |
b0d623f7 | 2938 | |
6d2010ae A |
2939 | #if CONFIG_ZLEAKS |
2940 | /* | |
2941 | * Initialize the zone leak monitor | |
2942 | */ | |
2943 | zleak_init(max_zonemap_size); | |
2944 | #endif /* CONFIG_ZLEAKS */ | |
5ba3f43e A |
2945 | |
2946 | #if VM_MAX_TAG_ZONES | |
2947 | if (zone_tagging_on) vm_allocation_zones_init(); | |
2948 | #endif | |
2949 | ||
2950 | int jetsam_limit_temp = 0; | |
2951 | if (PE_parse_boot_argn("zone_map_jetsam_limit", &jetsam_limit_temp, sizeof (jetsam_limit_temp)) && | |
2952 | jetsam_limit_temp > 0 && jetsam_limit_temp <= 100) | |
2953 | zone_map_jetsam_limit = jetsam_limit_temp; | |
1c79356b A |
2954 | } |
2955 | ||
b0d623f7 | 2956 | extern volatile SInt32 kfree_nop_count; |
1c79356b | 2957 | |
6d2010ae A |
2958 | #pragma mark - |
2959 | #pragma mark zalloc_canblock | |
2960 | ||
5ba3f43e A |
2961 | extern boolean_t early_boot_complete; |
2962 | ||
1c79356b A |
2963 | /* |
2964 | * zalloc returns an element from the specified zone. | |
2965 | */ | |
fe8ab488 A |
2966 | static void * |
2967 | zalloc_internal( | |
39236c6e | 2968 | zone_t zone, |
fe8ab488 | 2969 | boolean_t canblock, |
5ba3f43e A |
2970 | boolean_t nopagewait, |
2971 | vm_size_t | |
2972 | #if !VM_MAX_TAG_ZONES | |
2973 | __unused | |
2974 | #endif | |
2975 | reqsize, | |
2976 | vm_tag_t tag) | |
1c79356b | 2977 | { |
316670eb A |
2978 | vm_offset_t addr = 0; |
2979 | kern_return_t retval; | |
6d2010ae | 2980 | uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */ |
c910b4d9 | 2981 | int numsaved = 0; |
39236c6e | 2982 | boolean_t zone_replenish_wakeup = FALSE, zone_alloc_throttle = FALSE; |
39236c6e | 2983 | thread_t thr = current_thread(); |
fe8ab488 | 2984 | boolean_t check_poison = FALSE; |
3e170ce0 | 2985 | boolean_t set_doing_alloc_with_vm_priv = FALSE; |
6d2010ae A |
2986 | |
2987 | #if CONFIG_ZLEAKS | |
2988 | uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */ | |
2989 | #endif /* CONFIG_ZLEAKS */ | |
1c79356b | 2990 | |
5ba3f43e A |
2991 | #if KASAN |
2992 | /* | |
2993 | * KASan uses zalloc() for fakestack, which can be called anywhere. However, | |
2994 | * we make sure these calls can never block. | |
2995 | */ | |
2996 | boolean_t irq_safe = FALSE; | |
2997 | const char *fakestack_name = "fakestack."; | |
2998 | if (strncmp(zone->zone_name, fakestack_name, strlen(fakestack_name)) == 0) { | |
2999 | irq_safe = TRUE; | |
3000 | } | |
3001 | #elif MACH_ASSERT | |
3002 | /* In every other case, zalloc() from interrupt context is unsafe. */ | |
3003 | const boolean_t irq_safe = FALSE; | |
3004 | #endif | |
3005 | ||
1c79356b | 3006 | assert(zone != ZONE_NULL); |
5ba3f43e | 3007 | assert(irq_safe || ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !early_boot_complete); |
316670eb A |
3008 | |
3009 | #if CONFIG_GZALLOC | |
3010 | addr = gzalloc_alloc(zone, canblock); | |
316670eb | 3011 | #endif |
c910b4d9 A |
3012 | /* |
3013 | * If zone logging is turned on and this is the zone we're tracking, grab a backtrace. | |
3014 | */ | |
39236c6e | 3015 | if (__improbable(DO_LOGGING(zone))) |
6d2010ae | 3016 | numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH); |
39236c6e | 3017 | |
6d2010ae | 3018 | #if CONFIG_ZLEAKS |
fe8ab488 | 3019 | /* |
316670eb | 3020 | * Zone leak detection: capture a backtrace every zleak_sample_factor |
fe8ab488 | 3021 | * allocations in this zone. |
6d2010ae | 3022 | */ |
fe8ab488 | 3023 | if (__improbable(zone->zleak_on && sample_counter(&zone->zleak_capture, zleak_sample_factor) == TRUE)) { |
6d2010ae | 3024 | /* Avoid backtracing twice if zone logging is on */ |
fe8ab488 | 3025 | if (numsaved == 0) |
39037602 | 3026 | zleak_tracedepth = backtrace(zbt, MAX_ZTRACE_DEPTH); |
6d2010ae A |
3027 | else |
3028 | zleak_tracedepth = numsaved; | |
3029 | } | |
3030 | #endif /* CONFIG_ZLEAKS */ | |
1c79356b | 3031 | |
5ba3f43e A |
3032 | #if VM_MAX_TAG_ZONES |
3033 | if (__improbable(zone->tags)) vm_tag_will_update_zone(tag, zone->tag_zone_index); | |
3034 | #endif /* VM_MAX_TAG_ZONES */ | |
3035 | ||
fe8ab488 | 3036 | lock_zone(zone); |
5ba3f43e | 3037 | assert(zone->zone_valid); |
fe8ab488 | 3038 | |
39236c6e | 3039 | if (zone->async_prio_refill && zone->zone_replenish_thread) { |
5ba3f43e A |
3040 | vm_size_t zfreec = (zone->cur_size - (zone->count * zone->elem_size)); |
3041 | vm_size_t zrefillwm = zone->prio_refill_watermark * zone->elem_size; | |
3042 | zone_replenish_wakeup = (zfreec < zrefillwm); | |
3043 | zone_alloc_throttle = (((zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0)) || (zfreec == 0)); | |
39236c6e | 3044 | |
5ba3f43e | 3045 | do { |
39236c6e A |
3046 | if (zone_replenish_wakeup) { |
3047 | zone_replenish_wakeups_initiated++; | |
39236c6e A |
3048 | /* Signal the potentially waiting |
3049 | * refill thread. | |
3050 | */ | |
3051 | thread_wakeup(&zone->zone_replenish_thread); | |
5ba3f43e A |
3052 | |
3053 | /* We don't want to wait around for zone_replenish_thread to bump up the free count | |
3054 | * if we're in zone_gc(). This keeps us from deadlocking with zone_replenish_thread. | |
3055 | */ | |
3056 | if (thr->options & TH_OPT_ZONE_GC) | |
3057 | break; | |
3058 | ||
39037602 | 3059 | unlock_zone(zone); |
39236c6e A |
3060 | /* Scheduling latencies etc. may prevent |
3061 | * the refill thread from keeping up | |
3062 | * with demand. Throttle consumers | |
3063 | * when we fall below half the | |
3064 | * watermark, unless VM privileged | |
3065 | */ | |
3066 | if (zone_alloc_throttle) { | |
3067 | zone_replenish_throttle_count++; | |
3068 | assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC); | |
3069 | thread_block(THREAD_CONTINUE_NULL); | |
3070 | } | |
3071 | lock_zone(zone); | |
5ba3f43e | 3072 | assert(zone->zone_valid); |
39236c6e | 3073 | } |
5ba3f43e A |
3074 | |
3075 | zfreec = (zone->cur_size - (zone->count * zone->elem_size)); | |
3076 | zrefillwm = zone->prio_refill_watermark * zone->elem_size; | |
3077 | zone_replenish_wakeup = (zfreec < zrefillwm); | |
3078 | zone_alloc_throttle = (((zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0)) || (zfreec == 0)); | |
3079 | ||
39236c6e A |
3080 | } while (zone_alloc_throttle == TRUE); |
3081 | } | |
3082 | ||
316670eb | 3083 | if (__probable(addr == 0)) |
5ba3f43e | 3084 | addr = try_alloc_from_zone(zone, tag, &check_poison); |
0b4e3aa0 | 3085 | |
5ba3f43e A |
3086 | /* If we're here because of zone_gc(), we didn't wait for zone_replenish_thread to finish. |
3087 | * So we need to ensure that we did successfully grab an element. And we only need to assert | |
3088 | * this for zones that have a replenish thread configured (in this case, the Reserved VM map | |
3089 | * entries zone). | |
3090 | */ | |
3091 | if (thr->options & TH_OPT_ZONE_GC && zone->async_prio_refill) | |
3092 | assert(addr != 0); | |
a3d08fcd | 3093 | |
0b4e3aa0 | 3094 | while ((addr == 0) && canblock) { |
1c79356b | 3095 | /* |
3e170ce0 A |
3096 | * zone is empty, try to expand it |
3097 | * | |
3098 | * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged) | |
3099 | * to expand the zone concurrently... this is necessary to avoid stalling | |
3100 | * vm_privileged threads running critical code necessary to continue compressing/swapping | |
3101 | * pages (i.e. making new free pages) from stalling behind non-vm_privileged threads | |
3102 | * waiting to acquire free pages when the vm_page_free_count is below the | |
3103 | * vm_page_free_reserved limit. | |
1c79356b | 3104 | */ |
3e170ce0 A |
3105 | if ((zone->doing_alloc_without_vm_priv || zone->doing_alloc_with_vm_priv) && |
3106 | (((thr->options & TH_OPT_VMPRIV) == 0) || zone->doing_alloc_with_vm_priv)) { | |
1c79356b | 3107 | /* |
3e170ce0 A |
3108 | * This is a non-vm_privileged thread and a non-vm_privileged or |
3109 | * a vm_privileged thread is already expanding the zone... | |
3110 | * OR | |
3111 | * this is a vm_privileged thread and a vm_privileged thread is | |
3112 | * already expanding the zone... | |
3113 | * | |
3114 | * In either case wait for a thread to finish, then try again. | |
1c79356b | 3115 | */ |
1c79356b | 3116 | zone->waiting = TRUE; |
9bccf70c | 3117 | zone_sleep(zone); |
7ddcb079 A |
3118 | } else { |
3119 | vm_offset_t space; | |
3120 | vm_size_t alloc_size; | |
3121 | int retry = 0; | |
3122 | ||
1c79356b A |
3123 | if ((zone->cur_size + zone->elem_size) > |
3124 | zone->max_size) { | |
3125 | if (zone->exhaustible) | |
3126 | break; | |
3127 | if (zone->expandable) { | |
3128 | /* | |
3129 | * We're willing to overflow certain | |
3130 | * zones, but not without complaining. | |
3131 | * | |
3132 | * This is best used in conjunction | |
3133 | * with the collectable flag. What we | |
3134 | * want is an assurance we can get the | |
3135 | * memory back, assuming there's no | |
3136 | * leak. | |
3137 | */ | |
3138 | zone->max_size += (zone->max_size >> 1); | |
3139 | } else { | |
3140 | unlock_zone(zone); | |
3141 | ||
316670eb A |
3142 | panic_include_zprint = TRUE; |
3143 | #if CONFIG_ZLEAKS | |
3144 | if (zleak_state & ZLEAK_STATE_ACTIVE) | |
3145 | panic_include_ztrace = TRUE; | |
3146 | #endif /* CONFIG_ZLEAKS */ | |
1c79356b A |
3147 | panic("zalloc: zone \"%s\" empty.", zone->zone_name); |
3148 | } | |
3149 | } | |
39037602 A |
3150 | /* |
3151 | * It is possible that a BG thread is refilling/expanding the zone | |
3152 | * and gets pre-empted during that operation. That blocks all other | |
3153 | * threads from making progress leading to a watchdog timeout. To | |
3154 | * avoid that, boost the thread priority using the rwlock boost | |
3155 | */ | |
3156 | set_thread_rwlock_boost(); | |
3157 | ||
3e170ce0 A |
3158 | if ((thr->options & TH_OPT_VMPRIV)) { |
3159 | zone->doing_alloc_with_vm_priv = TRUE; | |
3160 | set_doing_alloc_with_vm_priv = TRUE; | |
3161 | } else { | |
3162 | zone->doing_alloc_without_vm_priv = TRUE; | |
3163 | } | |
1c79356b A |
3164 | unlock_zone(zone); |
3165 | ||
7ddcb079 A |
3166 | for (;;) { |
3167 | int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT; | |
fe8ab488 | 3168 | |
7ddcb079 A |
3169 | if (vm_pool_low() || retry >= 1) |
3170 | alloc_size = | |
3171 | round_page(zone->elem_size); | |
3172 | else | |
3173 | alloc_size = zone->alloc_size; | |
3174 | ||
3175 | if (zone->noencrypt) | |
3176 | zflags |= KMA_NOENCRYPT; | |
3177 | ||
5ba3f43e A |
3178 | /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ |
3179 | if (is_zone_map_nearing_exhaustion()) { | |
3180 | thread_wakeup((event_t) &vm_pageout_garbage_collect); | |
3181 | } | |
3182 | ||
3e170ce0 | 3183 | retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE); |
7ddcb079 | 3184 | if (retval == KERN_SUCCESS) { |
6d2010ae | 3185 | #if CONFIG_ZLEAKS |
7ddcb079 A |
3186 | if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) { |
3187 | if (zone_map->size >= zleak_global_tracking_threshold) { | |
3188 | kern_return_t kr; | |
3189 | ||
3190 | kr = zleak_activate(); | |
3191 | if (kr != KERN_SUCCESS) { | |
3192 | printf("Failed to activate live zone leak debugging (%d).\n", kr); | |
6d2010ae A |
3193 | } |
3194 | } | |
55e303ae | 3195 | } |
1c79356b | 3196 | |
7ddcb079 A |
3197 | if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) { |
3198 | if (zone->cur_size > zleak_per_zone_tracking_threshold) { | |
3199 | zone->zleak_on = TRUE; | |
3200 | } | |
1c79356b | 3201 | } |
7ddcb079 | 3202 | #endif /* CONFIG_ZLEAKS */ |
7ddcb079 A |
3203 | zcram(zone, space, alloc_size); |
3204 | ||
3205 | break; | |
3206 | } else if (retval != KERN_RESOURCE_SHORTAGE) { | |
3207 | retry++; | |
3208 | ||
7ddcb079 | 3209 | if (retry == 3) { |
6d2010ae A |
3210 | panic_include_zprint = TRUE; |
3211 | #if CONFIG_ZLEAKS | |
7ddcb079 | 3212 | if ((zleak_state & ZLEAK_STATE_ACTIVE)) { |
6d2010ae A |
3213 | panic_include_ztrace = TRUE; |
3214 | } | |
7ddcb079 | 3215 | #endif /* CONFIG_ZLEAKS */ |
39236c6e A |
3216 | if (retval == KERN_NO_SPACE) { |
3217 | zone_t zone_largest = zone_find_largest(); | |
3218 | panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)", | |
3219 | zone->zone_name, zone_largest->zone_name, | |
3220 | (unsigned long)zone_largest->cur_size, zone_largest->count); | |
3221 | ||
3222 | } | |
7ddcb079 | 3223 | panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count); |
6d2010ae | 3224 | } |
7ddcb079 A |
3225 | } else { |
3226 | break; | |
1c79356b A |
3227 | } |
3228 | } | |
7ddcb079 | 3229 | lock_zone(zone); |
5ba3f43e | 3230 | assert(zone->zone_valid); |
3e170ce0 A |
3231 | |
3232 | if (set_doing_alloc_with_vm_priv == TRUE) | |
3233 | zone->doing_alloc_with_vm_priv = FALSE; | |
3234 | else | |
3235 | zone->doing_alloc_without_vm_priv = FALSE; | |
3236 | ||
7ddcb079 | 3237 | if (zone->waiting) { |
3e170ce0 | 3238 | zone->waiting = FALSE; |
7ddcb079 A |
3239 | zone_wakeup(zone); |
3240 | } | |
39037602 A |
3241 | clear_thread_rwlock_boost(); |
3242 | ||
5ba3f43e | 3243 | addr = try_alloc_from_zone(zone, tag, &check_poison); |
7ddcb079 | 3244 | if (addr == 0 && |
fe8ab488 A |
3245 | retval == KERN_RESOURCE_SHORTAGE) { |
3246 | if (nopagewait == TRUE) | |
3247 | break; /* out of the main while loop */ | |
7ddcb079 | 3248 | unlock_zone(zone); |
fe8ab488 | 3249 | |
7ddcb079 A |
3250 | VM_PAGE_WAIT(); |
3251 | lock_zone(zone); | |
5ba3f43e | 3252 | assert(zone->zone_valid); |
7ddcb079 | 3253 | } |
1c79356b A |
3254 | } |
3255 | if (addr == 0) | |
5ba3f43e | 3256 | addr = try_alloc_from_zone(zone, tag, &check_poison); |
1c79356b A |
3257 | } |
3258 | ||
6d2010ae A |
3259 | #if CONFIG_ZLEAKS |
3260 | /* Zone leak detection: | |
3261 | * If we're sampling this allocation, add it to the zleaks hash table. | |
3262 | */ | |
3263 | if (addr && zleak_tracedepth > 0) { | |
3264 | /* Sampling can fail if another sample is happening at the same time in a different zone. */ | |
3265 | if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) { | |
3266 | /* If it failed, roll back the counter so we sample the next allocation instead. */ | |
316670eb | 3267 | zone->zleak_capture = zleak_sample_factor; |
6d2010ae A |
3268 | } |
3269 | } | |
3270 | #endif /* CONFIG_ZLEAKS */ | |
3271 | ||
3272 | ||
fe8ab488 | 3273 | if ((addr == 0) && (!canblock || nopagewait) && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) { |
39236c6e A |
3274 | zone->async_pending = TRUE; |
3275 | unlock_zone(zone); | |
3276 | thread_call_enter(&call_async_alloc); | |
3277 | lock_zone(zone); | |
5ba3f43e A |
3278 | assert(zone->zone_valid); |
3279 | addr = try_alloc_from_zone(zone, tag, &check_poison); | |
39236c6e A |
3280 | } |
3281 | ||
5ba3f43e A |
3282 | #if VM_MAX_TAG_ZONES |
3283 | if (__improbable(zone->tags) && addr) { | |
3284 | if (reqsize) reqsize = zone->elem_size - reqsize; | |
3285 | vm_tag_update_zone_size(tag, zone->tag_zone_index, zone->elem_size, reqsize); | |
3286 | } | |
3287 | #endif /* VM_MAX_TAG_ZONES */ | |
1c79356b A |
3288 | |
3289 | unlock_zone(zone); | |
0b4e3aa0 | 3290 | |
5ba3f43e A |
3291 | vm_offset_t inner_size = zone->elem_size; |
3292 | ||
39037602 A |
3293 | if (__improbable(DO_LOGGING(zone) && addr)) { |
3294 | btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_ALLOC, (void **)zbt, numsaved); | |
3295 | } | |
3296 | ||
fe8ab488 A |
3297 | if (__improbable(check_poison && addr)) { |
3298 | vm_offset_t *element_cursor = ((vm_offset_t *) addr) + 1; | |
3299 | vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *) addr); | |
3300 | ||
3301 | for ( ; element_cursor < backup ; element_cursor++) | |
3302 | if (__improbable(*element_cursor != ZP_POISON)) | |
3303 | zone_element_was_modified_panic(zone, | |
3304 | addr, | |
3305 | *element_cursor, | |
3306 | ZP_POISON, | |
3307 | ((vm_offset_t)element_cursor) - addr); | |
3308 | } | |
3309 | ||
3310 | if (addr) { | |
3311 | /* | |
3312 | * Clear out the old next pointer and backup to avoid leaking the cookie | |
3313 | * and so that only values on the freelist have a valid cookie | |
3314 | */ | |
3315 | ||
3316 | vm_offset_t *primary = (vm_offset_t *) addr; | |
3317 | vm_offset_t *backup = get_backup_ptr(inner_size, primary); | |
3318 | ||
3319 | *primary = ZP_POISON; | |
3320 | *backup = ZP_POISON; | |
39037602 A |
3321 | |
3322 | #if DEBUG || DEVELOPMENT | |
3323 | if (__improbable(leak_scan_debug_flag && !(zone->elem_size & (sizeof(uintptr_t) - 1)))) { | |
3324 | int count, idx; | |
3325 | /* Fill element, from tail, with backtrace in reverse order */ | |
3326 | if (numsaved == 0) numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH); | |
3327 | count = (int) (zone->elem_size / sizeof(uintptr_t)); | |
3328 | if (count >= numsaved) count = numsaved - 1; | |
3329 | for (idx = 0; idx < count; idx++) ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1]; | |
3330 | } | |
3331 | #endif /* DEBUG || DEVELOPMENT */ | |
fe8ab488 A |
3332 | } |
3333 | ||
2d21ac55 | 3334 | TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr); |
5ba3f43e A |
3335 | |
3336 | #if KASAN_ZALLOC | |
3337 | /* Fixup the return address to skip the redzone */ | |
3338 | if (zone->kasan_redzone) { | |
3339 | addr = kasan_alloc(addr, zone->elem_size, | |
3340 | zone->elem_size - 2 * zone->kasan_redzone, zone->kasan_redzone); | |
3341 | } | |
3342 | #endif | |
3343 | ||
a39ff7e2 A |
3344 | DTRACE_VM2(zalloc, zone_t, zone, void*, addr); |
3345 | ||
91447636 | 3346 | return((void *)addr); |
1c79356b A |
3347 | } |
3348 | ||
91447636 | 3349 | void * |
fe8ab488 A |
3350 | zalloc(zone_t zone) |
3351 | { | |
5ba3f43e | 3352 | return (zalloc_internal(zone, TRUE, FALSE, 0, VM_KERN_MEMORY_NONE)); |
fe8ab488 A |
3353 | } |
3354 | ||
3355 | void * | |
3356 | zalloc_noblock(zone_t zone) | |
3357 | { | |
5ba3f43e | 3358 | return (zalloc_internal(zone, FALSE, FALSE, 0, VM_KERN_MEMORY_NONE)); |
fe8ab488 A |
3359 | } |
3360 | ||
3361 | void * | |
3362 | zalloc_nopagewait(zone_t zone) | |
1c79356b | 3363 | { |
5ba3f43e A |
3364 | return (zalloc_internal(zone, TRUE, TRUE, 0, VM_KERN_MEMORY_NONE)); |
3365 | } | |
3366 | ||
3367 | void * | |
3368 | zalloc_canblock_tag(zone_t zone, boolean_t canblock, vm_size_t reqsize, vm_tag_t tag) | |
3369 | { | |
3370 | return (zalloc_internal(zone, canblock, FALSE, reqsize, tag)); | |
1c79356b A |
3371 | } |
3372 | ||
91447636 | 3373 | void * |
fe8ab488 | 3374 | zalloc_canblock(zone_t zone, boolean_t canblock) |
1c79356b | 3375 | { |
5ba3f43e | 3376 | return (zalloc_internal(zone, canblock, FALSE, 0, VM_KERN_MEMORY_NONE)); |
1c79356b A |
3377 | } |
3378 | ||
fe8ab488 | 3379 | |
0b4e3aa0 A |
3380 | void |
3381 | zalloc_async( | |
39236c6e | 3382 | __unused thread_call_param_t p0, |
91447636 | 3383 | __unused thread_call_param_t p1) |
0b4e3aa0 | 3384 | { |
39037602 | 3385 | zone_t current_z = NULL; |
39236c6e A |
3386 | unsigned int max_zones, i; |
3387 | void *elt = NULL; | |
3388 | boolean_t pending = FALSE; | |
3389 | ||
3390 | simple_lock(&all_zones_lock); | |
39236c6e A |
3391 | max_zones = num_zones; |
3392 | simple_unlock(&all_zones_lock); | |
39236c6e | 3393 | for (i = 0; i < max_zones; i++) { |
39037602 | 3394 | current_z = &(zone_array[i]); |
5ba3f43e A |
3395 | |
3396 | if (current_z->no_callout == TRUE) { | |
3397 | /* async_pending will never be set */ | |
3398 | continue; | |
3399 | } | |
3400 | ||
39236c6e | 3401 | lock_zone(current_z); |
5ba3f43e | 3402 | if (current_z->zone_valid && current_z->async_pending == TRUE) { |
39236c6e A |
3403 | current_z->async_pending = FALSE; |
3404 | pending = TRUE; | |
3405 | } | |
3406 | unlock_zone(current_z); | |
0b4e3aa0 | 3407 | |
39236c6e | 3408 | if (pending == TRUE) { |
5ba3f43e | 3409 | elt = zalloc_canblock_tag(current_z, TRUE, 0, VM_KERN_MEMORY_OSFMK); |
39236c6e A |
3410 | zfree(current_z, elt); |
3411 | pending = FALSE; | |
3412 | } | |
39236c6e | 3413 | } |
0b4e3aa0 A |
3414 | } |
3415 | ||
1c79356b A |
3416 | /* |
3417 | * zget returns an element from the specified zone | |
3418 | * and immediately returns nothing if there is nothing there. | |
1c79356b | 3419 | */ |
91447636 | 3420 | void * |
1c79356b | 3421 | zget( |
39037602 | 3422 | zone_t zone) |
1c79356b | 3423 | { |
5ba3f43e | 3424 | return zalloc_internal(zone, FALSE, TRUE, 0, VM_KERN_MEMORY_NONE); |
39037602 | 3425 | } |
1c79356b | 3426 | |
39037602 A |
3427 | /* Keep this FALSE by default. Large memory machine run orders of magnitude |
3428 | slower in debug mode when true. Use debugger to enable if needed */ | |
3429 | /* static */ boolean_t zone_check = FALSE; | |
1c79356b | 3430 | |
39037602 A |
3431 | static void zone_check_freelist(zone_t zone, vm_offset_t elem) |
3432 | { | |
3433 | struct zone_free_element *this; | |
3434 | struct zone_page_metadata *thispage; | |
fe8ab488 | 3435 | |
39037602 A |
3436 | if (zone->allows_foreign) { |
3437 | for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign); | |
3438 | !queue_end(&zone->pages.any_free_foreign, &(thispage->pages)); | |
3439 | thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { | |
3440 | for (this = page_metadata_get_freelist(thispage); | |
3441 | this != NULL; | |
3442 | this = this->next) { | |
3443 | if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) | |
3444 | panic("zone_check_freelist"); | |
3445 | } | |
3446 | } | |
1c79356b | 3447 | } |
39037602 A |
3448 | for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.all_free); |
3449 | !queue_end(&zone->pages.all_free, &(thispage->pages)); | |
3450 | thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { | |
3451 | for (this = page_metadata_get_freelist(thispage); | |
3452 | this != NULL; | |
3453 | this = this->next) { | |
3454 | if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) | |
3455 | panic("zone_check_freelist"); | |
6d2010ae A |
3456 | } |
3457 | } | |
39037602 A |
3458 | for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate); |
3459 | !queue_end(&zone->pages.intermediate, &(thispage->pages)); | |
3460 | thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { | |
3461 | for (this = page_metadata_get_freelist(thispage); | |
3462 | this != NULL; | |
3463 | this = this->next) { | |
39236c6e A |
3464 | if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) |
3465 | panic("zone_check_freelist"); | |
3466 | } | |
3467 | } | |
3468 | } | |
3469 | ||
1c79356b A |
3470 | void |
3471 | zfree( | |
39037602 | 3472 | zone_t zone, |
91447636 | 3473 | void *addr) |
1c79356b | 3474 | { |
91447636 | 3475 | vm_offset_t elem = (vm_offset_t) addr; |
39236c6e | 3476 | uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */ |
c910b4d9 | 3477 | int numsaved = 0; |
316670eb | 3478 | boolean_t gzfreed = FALSE; |
fe8ab488 | 3479 | boolean_t poison = FALSE; |
5ba3f43e A |
3480 | #if VM_MAX_TAG_ZONES |
3481 | vm_tag_t tag; | |
3482 | #endif /* VM_MAX_TAG_ZONES */ | |
c910b4d9 A |
3483 | |
3484 | assert(zone != ZONE_NULL); | |
a39ff7e2 | 3485 | DTRACE_VM2(zfree, zone_t, zone, void*, addr); |
c910b4d9 | 3486 | |
5ba3f43e A |
3487 | #if KASAN_ZALLOC |
3488 | /* | |
3489 | * Resize back to the real allocation size and hand off to the KASan | |
3490 | * quarantine. `addr` may then point to a different allocation. | |
3491 | */ | |
3492 | vm_size_t usersz = zone->elem_size - 2 * zone->kasan_redzone; | |
3493 | vm_size_t sz = usersz; | |
3494 | if (addr && zone->kasan_redzone) { | |
3495 | kasan_check_free((vm_address_t)addr, usersz, KASAN_HEAP_ZALLOC); | |
3496 | addr = (void *)kasan_dealloc((vm_address_t)addr, &sz); | |
3497 | assert(sz == zone->elem_size); | |
3498 | } | |
3499 | if (addr && zone->kasan_quarantine) { | |
3500 | kasan_free(&addr, &sz, KASAN_HEAP_ZALLOC, &zone, usersz, true); | |
3501 | if (!addr) { | |
3502 | return; | |
3503 | } | |
3504 | } | |
3505 | elem = (vm_offset_t)addr; | |
3506 | #endif | |
3507 | ||
c910b4d9 A |
3508 | /* |
3509 | * If zone logging is turned on and this is the zone we're tracking, grab a backtrace. | |
3510 | */ | |
3511 | ||
39236c6e A |
3512 | if (__improbable(DO_LOGGING(zone) && corruption_debug_flag)) |
3513 | numsaved = OSBacktrace((void *)zbt, MAX_ZTRACE_DEPTH); | |
1c79356b A |
3514 | |
3515 | #if MACH_ASSERT | |
3516 | /* Basic sanity checks */ | |
3517 | if (zone == ZONE_NULL || elem == (vm_offset_t)0) | |
3518 | panic("zfree: NULL"); | |
55e303ae A |
3519 | #endif |
3520 | ||
316670eb A |
3521 | #if CONFIG_GZALLOC |
3522 | gzfreed = gzalloc_free(zone, addr); | |
3523 | #endif | |
3524 | ||
39037602 A |
3525 | if (!gzfreed) { |
3526 | struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); | |
3527 | if (zone != PAGE_METADATA_GET_ZONE(page_meta)) { | |
3528 | panic("Element %p from zone %s caught being freed to wrong zone %s\n", addr, PAGE_METADATA_GET_ZONE(page_meta)->zone_name, zone->zone_name); | |
3529 | } | |
3530 | } | |
3531 | ||
b0d623f7 | 3532 | TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr); |
2d21ac55 | 3533 | |
316670eb A |
3534 | if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign && |
3535 | !from_zone_map(elem, zone->elem_size))) { | |
1c79356b | 3536 | panic("zfree: non-allocated memory in collectable zone!"); |
55e303ae | 3537 | } |
1c79356b | 3538 | |
fe8ab488 A |
3539 | if ((zp_factor != 0 || zp_tiny_zone_limit != 0) && !gzfreed) { |
3540 | /* | |
3541 | * Poison the memory before it ends up on the freelist to catch | |
3542 | * use-after-free and use of uninitialized memory | |
3543 | * | |
3544 | * Always poison tiny zones' elements (limit is 0 if -no-zp is set) | |
3545 | * Also poison larger elements periodically | |
3546 | */ | |
3547 | ||
3548 | vm_offset_t inner_size = zone->elem_size; | |
3549 | ||
fe8ab488 A |
3550 | uint32_t sample_factor = zp_factor + (((uint32_t)inner_size) >> zp_scale); |
3551 | ||
3552 | if (inner_size <= zp_tiny_zone_limit) | |
3553 | poison = TRUE; | |
3554 | else if (zp_factor != 0 && sample_counter(&zone->zp_count, sample_factor) == TRUE) | |
3555 | poison = TRUE; | |
3556 | ||
3557 | if (__improbable(poison)) { | |
3558 | ||
3559 | /* memset_pattern{4|8} could help make this faster: <rdar://problem/4662004> */ | |
3560 | /* Poison everything but primary and backup */ | |
3561 | vm_offset_t *element_cursor = ((vm_offset_t *) elem) + 1; | |
3562 | vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *)elem); | |
3563 | ||
3564 | for ( ; element_cursor < backup; element_cursor++) | |
3565 | *element_cursor = ZP_POISON; | |
3566 | } | |
3567 | } | |
3568 | ||
c910b4d9 A |
3569 | /* |
3570 | * See if we're doing logging on this zone. There are two styles of logging used depending on | |
3571 | * whether we're trying to catch a leak or corruption. See comments above in zalloc for details. | |
3572 | */ | |
3573 | ||
39236c6e | 3574 | if (__improbable(DO_LOGGING(zone))) { |
316670eb | 3575 | if (corruption_debug_flag) { |
c910b4d9 A |
3576 | /* |
3577 | * We're logging to catch a corruption. Add a record of this zfree operation | |
3578 | * to log. | |
3579 | */ | |
39037602 | 3580 | btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_FREE, (void **)zbt, numsaved); |
c910b4d9 | 3581 | } else { |
c910b4d9 A |
3582 | /* |
3583 | * We're logging to catch a leak. Remove any record we might have for this | |
3584 | * element since it's being freed. Note that we may not find it if the buffer | |
3585 | * overflowed and that's OK. Since the log is of a limited size, old records | |
3586 | * get overwritten if there are more zallocs than zfrees. | |
3587 | */ | |
39037602 | 3588 | btlog_remove_entries_for_element(zone->zlog_btlog, (void *)addr); |
c910b4d9 A |
3589 | } |
3590 | } | |
3591 | ||
39037602 | 3592 | lock_zone(zone); |
5ba3f43e | 3593 | assert(zone->zone_valid); |
1c79356b | 3594 | |
1c79356b | 3595 | if (zone_check) { |
39236c6e | 3596 | zone_check_freelist(zone, elem); |
1c79356b | 3597 | } |
316670eb | 3598 | |
5ba3f43e A |
3599 | if (__probable(!gzfreed)) { |
3600 | #if VM_MAX_TAG_ZONES | |
3601 | if (__improbable(zone->tags)) { | |
3602 | tag = (ZTAG(zone, elem)[0] >> 1); | |
3603 | // set the tag with b0 clear so the block remains inuse | |
3604 | ZTAG(zone, elem)[0] = 0xFFFE; | |
3605 | } | |
3606 | #endif /* VM_MAX_TAG_ZONES */ | |
fe8ab488 | 3607 | free_to_zone(zone, elem, poison); |
5ba3f43e | 3608 | } |
316670eb | 3609 | |
b0d623f7 A |
3610 | #if MACH_ASSERT |
3611 | if (zone->count < 0) | |
39236c6e A |
3612 | panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone", |
3613 | zone->zone_name, addr); | |
b0d623f7 | 3614 | #endif |
6d2010ae | 3615 | |
0b4e3aa0 | 3616 | |
6d2010ae | 3617 | #if CONFIG_ZLEAKS |
6d2010ae A |
3618 | /* |
3619 | * Zone leak detection: un-track the allocation | |
3620 | */ | |
3621 | if (zone->zleak_on) { | |
3622 | zleak_free(elem, zone->elem_size); | |
3623 | } | |
3624 | #endif /* CONFIG_ZLEAKS */ | |
3625 | ||
5ba3f43e A |
3626 | #if VM_MAX_TAG_ZONES |
3627 | if (__improbable(zone->tags) && __probable(!gzfreed)) { | |
3628 | vm_tag_update_zone_size(tag, zone->tag_zone_index, -((int64_t)zone->elem_size), 0); | |
3629 | } | |
3630 | #endif /* VM_MAX_TAG_ZONES */ | |
3631 | ||
1c79356b A |
3632 | unlock_zone(zone); |
3633 | } | |
3634 | ||
1c79356b A |
3635 | /* Change a zone's flags. |
3636 | * This routine must be called immediately after zinit. | |
3637 | */ | |
3638 | void | |
3639 | zone_change( | |
3640 | zone_t zone, | |
3641 | unsigned int item, | |
3642 | boolean_t value) | |
3643 | { | |
3644 | assert( zone != ZONE_NULL ); | |
3645 | assert( value == TRUE || value == FALSE ); | |
3646 | ||
3647 | switch(item){ | |
0b4c1975 A |
3648 | case Z_NOENCRYPT: |
3649 | zone->noencrypt = value; | |
3650 | break; | |
1c79356b A |
3651 | case Z_EXHAUST: |
3652 | zone->exhaustible = value; | |
3653 | break; | |
3654 | case Z_COLLECT: | |
3655 | zone->collectable = value; | |
3656 | break; | |
3657 | case Z_EXPAND: | |
3658 | zone->expandable = value; | |
3659 | break; | |
3660 | case Z_FOREIGN: | |
3661 | zone->allows_foreign = value; | |
3662 | break; | |
6d2010ae A |
3663 | case Z_CALLERACCT: |
3664 | zone->caller_acct = value; | |
3665 | break; | |
7ddcb079 A |
3666 | case Z_NOCALLOUT: |
3667 | zone->no_callout = value; | |
3668 | break; | |
5ba3f43e A |
3669 | case Z_TAGS_ENABLED: |
3670 | #if VM_MAX_TAG_ZONES | |
3671 | { | |
3672 | static int tag_zone_index; | |
3673 | zone->tags = TRUE; | |
3674 | zone->tags_inline = (((page_size + zone->elem_size - 1) / zone->elem_size) <= (sizeof(uint32_t) / sizeof(uint16_t))); | |
3675 | zone->tag_zone_index = OSAddAtomic(1, &tag_zone_index); | |
3676 | } | |
3677 | #endif /* VM_MAX_TAG_ZONES */ | |
3678 | break; | |
316670eb A |
3679 | case Z_GZALLOC_EXEMPT: |
3680 | zone->gzalloc_exempt = value; | |
3681 | #if CONFIG_GZALLOC | |
3682 | gzalloc_reconfigure(zone); | |
3683 | #endif | |
3684 | break; | |
3685 | case Z_ALIGNMENT_REQUIRED: | |
3686 | zone->alignment_required = value; | |
5ba3f43e A |
3687 | #if KASAN_ZALLOC |
3688 | if (zone->kasan_redzone == KASAN_GUARD_SIZE) { | |
3689 | /* Don't disturb alignment with the redzone for zones with | |
3690 | * specific alignment requirements. */ | |
3691 | zone->elem_size -= zone->kasan_redzone * 2; | |
3692 | zone->kasan_redzone = 0; | |
3693 | } | |
3694 | #endif | |
316670eb A |
3695 | #if CONFIG_GZALLOC |
3696 | gzalloc_reconfigure(zone); | |
3697 | #endif | |
3698 | break; | |
5ba3f43e A |
3699 | case Z_KASAN_QUARANTINE: |
3700 | zone->kasan_quarantine = value; | |
3701 | break; | |
1c79356b A |
3702 | default: |
3703 | panic("Zone_change: Wrong Item Type!"); | |
3704 | /* break; */ | |
1c79356b | 3705 | } |
1c79356b A |
3706 | } |
3707 | ||
3708 | /* | |
3709 | * Return the expected number of free elements in the zone. | |
3710 | * This calculation will be incorrect if items are zfree'd that | |
3711 | * were never zalloc'd/zget'd. The correct way to stuff memory | |
3712 | * into a zone is by zcram. | |
3713 | */ | |
3714 | ||
3715 | integer_t | |
3716 | zone_free_count(zone_t zone) | |
3717 | { | |
3718 | integer_t free_count; | |
3719 | ||
3720 | lock_zone(zone); | |
39236c6e | 3721 | free_count = zone->countfree; |
1c79356b A |
3722 | unlock_zone(zone); |
3723 | ||
3724 | assert(free_count >= 0); | |
3725 | ||
3726 | return(free_count); | |
3727 | } | |
3728 | ||
5ba3f43e A |
3729 | /* Drops the elements in the free queue of a zone. Called by zone_gc() on each zone, and when a zone is zdestroy'ed. */ |
3730 | void | |
3731 | drop_free_elements(zone_t z) | |
3732 | { | |
3733 | vm_size_t elt_size, size_freed; | |
3734 | int total_freed_pages = 0; | |
3735 | uint64_t old_all_free_count; | |
3736 | struct zone_page_metadata *page_meta; | |
3737 | queue_head_t page_meta_head; | |
3738 | ||
3739 | lock_zone(z); | |
3740 | if (queue_empty(&z->pages.all_free)) { | |
3741 | unlock_zone(z); | |
3742 | return; | |
3743 | } | |
3744 | ||
3745 | /* | |
3746 | * Snatch all of the free elements away from the zone. | |
3747 | */ | |
3748 | elt_size = z->elem_size; | |
3749 | old_all_free_count = z->count_all_free_pages; | |
3750 | queue_new_head(&z->pages.all_free, &page_meta_head, struct zone_page_metadata *, pages); | |
3751 | queue_init(&z->pages.all_free); | |
3752 | z->count_all_free_pages = 0; | |
3753 | unlock_zone(z); | |
3754 | ||
3755 | /* Iterate through all elements to find out size and count of elements we snatched */ | |
3756 | size_freed = 0; | |
3757 | queue_iterate(&page_meta_head, page_meta, struct zone_page_metadata *, pages) { | |
3758 | assert(from_zone_map((vm_address_t)page_meta, sizeof(*page_meta))); /* foreign elements should be in any_free_foreign */ | |
3759 | size_freed += elt_size * page_meta->free_count; | |
3760 | } | |
3761 | ||
3762 | /* Update the zone size and free element count */ | |
3763 | lock_zone(z); | |
3764 | z->cur_size -= size_freed; | |
3765 | z->countfree -= size_freed/elt_size; | |
3766 | unlock_zone(z); | |
3767 | ||
3768 | while ((page_meta = (struct zone_page_metadata *)dequeue_head(&page_meta_head)) != NULL) { | |
3769 | vm_address_t free_page_address; | |
3770 | /* Free the pages for metadata and account for them */ | |
3771 | free_page_address = get_zone_page(page_meta); | |
3772 | ZONE_PAGE_COUNT_DECR(z, page_meta->page_count); | |
3773 | total_freed_pages += page_meta->page_count; | |
3774 | old_all_free_count -= page_meta->page_count; | |
3775 | #if KASAN_ZALLOC | |
3776 | kasan_poison_range(free_page_address, page_meta->page_count * PAGE_SIZE, ASAN_VALID); | |
3777 | #endif | |
3778 | #if VM_MAX_TAG_ZONES | |
3779 | if (z->tags) ztMemoryRemove(z, free_page_address, (page_meta->page_count * PAGE_SIZE)); | |
3780 | #endif /* VM_MAX_TAG_ZONES */ | |
3781 | kmem_free(zone_map, free_page_address, (page_meta->page_count * PAGE_SIZE)); | |
3782 | if (current_thread()->options & TH_OPT_ZONE_GC) { | |
3783 | thread_yield_to_preemption(); | |
3784 | } | |
3785 | } | |
3786 | ||
3787 | /* We freed all the pages from the all_free list for this zone */ | |
3788 | assert(old_all_free_count == 0); | |
3789 | ||
3790 | if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) | |
3791 | kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages); | |
3792 | } | |
3793 | ||
1c79356b A |
3794 | /* Zone garbage collection |
3795 | * | |
3796 | * zone_gc will walk through all the free elements in all the | |
3797 | * zones that are marked collectable looking for reclaimable | |
3798 | * pages. zone_gc is called by consider_zone_gc when the system | |
3799 | * begins to run out of memory. | |
5ba3f43e A |
3800 | * |
3801 | * We should ensure that zone_gc never blocks. | |
1c79356b A |
3802 | */ |
3803 | void | |
5ba3f43e | 3804 | zone_gc(boolean_t consider_jetsams) |
1c79356b A |
3805 | { |
3806 | unsigned int max_zones; | |
55e303ae | 3807 | zone_t z; |
1c79356b | 3808 | unsigned int i; |
5ba3f43e A |
3809 | |
3810 | if (consider_jetsams) { | |
3811 | kill_process_in_largest_zone(); | |
3812 | /* | |
3813 | * If we do end up jetsamming something, we need to do a zone_gc so that | |
3814 | * we can reclaim free zone elements and update the zone map size. | |
3815 | * Fall through. | |
3816 | */ | |
3817 | } | |
1c79356b | 3818 | |
b0d623f7 | 3819 | lck_mtx_lock(&zone_gc_lock); |
1c79356b | 3820 | |
5ba3f43e A |
3821 | current_thread()->options |= TH_OPT_ZONE_GC; |
3822 | ||
1c79356b A |
3823 | simple_lock(&all_zones_lock); |
3824 | max_zones = num_zones; | |
1c79356b A |
3825 | simple_unlock(&all_zones_lock); |
3826 | ||
39236c6e | 3827 | if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) |
39037602 | 3828 | kprintf("zone_gc() starting...\n"); |
1c79356b | 3829 | |
39037602 A |
3830 | for (i = 0; i < max_zones; i++) { |
3831 | z = &(zone_array[i]); | |
1c79356b A |
3832 | assert(z != ZONE_NULL); |
3833 | ||
5ba3f43e | 3834 | if (!z->collectable) { |
316670eb | 3835 | continue; |
39037602 A |
3836 | } |
3837 | ||
39037602 | 3838 | if (queue_empty(&z->pages.all_free)) { |
1c79356b A |
3839 | continue; |
3840 | } | |
39037602 | 3841 | |
5ba3f43e | 3842 | drop_free_elements(z); |
39037602 | 3843 | } |
316670eb | 3844 | |
5ba3f43e A |
3845 | current_thread()->options &= ~TH_OPT_ZONE_GC; |
3846 | ||
39037602 A |
3847 | lck_mtx_unlock(&zone_gc_lock); |
3848 | } | |
316670eb | 3849 | |
39037602 A |
3850 | extern vm_offset_t kmapoff_kaddr; |
3851 | extern unsigned int kmapoff_pgcnt; | |
316670eb | 3852 | |
39037602 A |
3853 | /* |
3854 | * consider_zone_gc: | |
3855 | * | |
3856 | * Called by the pageout daemon when the system needs more free pages. | |
3857 | */ | |
1c79356b A |
3858 | |
3859 | void | |
5ba3f43e | 3860 | consider_zone_gc(boolean_t consider_jetsams) |
1c79356b | 3861 | { |
316670eb A |
3862 | if (kmapoff_kaddr != 0) { |
3863 | /* | |
3864 | * One-time reclaim of kernel_map resources we allocated in | |
3865 | * early boot. | |
3866 | */ | |
3867 | (void) vm_deallocate(kernel_map, | |
3868 | kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64); | |
3869 | kmapoff_kaddr = 0; | |
3870 | } | |
1c79356b | 3871 | |
39037602 | 3872 | if (zone_gc_allowed) |
5ba3f43e | 3873 | zone_gc(consider_jetsams); |
6d2010ae | 3874 | } |
2d21ac55 | 3875 | |
a39ff7e2 A |
3876 | |
3877 | boolean_t | |
3878 | get_zone_info( | |
3879 | zone_t z, | |
3880 | mach_zone_name_t *zn, | |
3881 | mach_zone_info_t *zi) | |
3882 | { | |
3883 | struct zone zcopy; | |
3884 | ||
3885 | assert(z != ZONE_NULL); | |
3886 | lock_zone(z); | |
3887 | if (!z->zone_valid) { | |
3888 | unlock_zone(z); | |
3889 | return FALSE; | |
3890 | } | |
3891 | zcopy = *z; | |
3892 | unlock_zone(z); | |
3893 | ||
3894 | if (zn != NULL) { | |
3895 | /* assuming here the name data is static */ | |
3896 | (void) __nosan_strlcpy(zn->mzn_name, zcopy.zone_name, | |
3897 | strlen(zcopy.zone_name)+1); | |
3898 | } | |
3899 | ||
3900 | if (zi != NULL) { | |
3901 | zi->mzi_count = (uint64_t)zcopy.count; | |
3902 | zi->mzi_cur_size = ptoa_64(zcopy.page_count); | |
3903 | zi->mzi_max_size = (uint64_t)zcopy.max_size; | |
3904 | zi->mzi_elem_size = (uint64_t)zcopy.elem_size; | |
3905 | zi->mzi_alloc_size = (uint64_t)zcopy.alloc_size; | |
3906 | zi->mzi_sum_size = zcopy.sum_count * zcopy.elem_size; | |
3907 | zi->mzi_exhaustible = (uint64_t)zcopy.exhaustible; | |
3908 | zi->mzi_collectable = 0; | |
3909 | if (zcopy.collectable) { | |
3910 | SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable, ((uint64_t)zcopy.count_all_free_pages * PAGE_SIZE)); | |
3911 | SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE); | |
3912 | } | |
3913 | } | |
3914 | ||
3915 | return TRUE; | |
3916 | } | |
3917 | ||
6d2010ae A |
3918 | kern_return_t |
3919 | task_zone_info( | |
39037602 A |
3920 | __unused task_t task, |
3921 | __unused mach_zone_name_array_t *namesp, | |
316670eb | 3922 | __unused mach_msg_type_number_t *namesCntp, |
39037602 | 3923 | __unused task_zone_info_array_t *infop, |
316670eb A |
3924 | __unused mach_msg_type_number_t *infoCntp) |
3925 | { | |
3926 | return KERN_FAILURE; | |
3927 | } | |
3928 | ||
6d2010ae A |
3929 | kern_return_t |
3930 | mach_zone_info( | |
316670eb | 3931 | host_priv_t host, |
6d2010ae A |
3932 | mach_zone_name_array_t *namesp, |
3933 | mach_msg_type_number_t *namesCntp, | |
3934 | mach_zone_info_array_t *infop, | |
3935 | mach_msg_type_number_t *infoCntp) | |
3e170ce0 A |
3936 | { |
3937 | return (mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL)); | |
3938 | } | |
3939 | ||
39037602 | 3940 | |
3e170ce0 A |
3941 | kern_return_t |
3942 | mach_memory_info( | |
3943 | host_priv_t host, | |
3944 | mach_zone_name_array_t *namesp, | |
3945 | mach_msg_type_number_t *namesCntp, | |
3946 | mach_zone_info_array_t *infop, | |
3947 | mach_msg_type_number_t *infoCntp, | |
3948 | mach_memory_info_array_t *memoryInfop, | |
3949 | mach_msg_type_number_t *memoryInfoCntp) | |
6d2010ae A |
3950 | { |
3951 | mach_zone_name_t *names; | |
3952 | vm_offset_t names_addr; | |
3953 | vm_size_t names_size; | |
3e170ce0 | 3954 | |
6d2010ae A |
3955 | mach_zone_info_t *info; |
3956 | vm_offset_t info_addr; | |
3957 | vm_size_t info_size; | |
3e170ce0 A |
3958 | |
3959 | mach_memory_info_t *memory_info; | |
3960 | vm_offset_t memory_info_addr; | |
3961 | vm_size_t memory_info_size; | |
2dced7af | 3962 | vm_size_t memory_info_vmsize; |
5ba3f43e | 3963 | unsigned int num_info; |
3e170ce0 | 3964 | |
5ba3f43e | 3965 | unsigned int max_zones, used_zones, i; |
6d2010ae A |
3966 | mach_zone_name_t *zn; |
3967 | mach_zone_info_t *zi; | |
3968 | kern_return_t kr; | |
3969 | ||
3970 | vm_size_t used; | |
3971 | vm_map_copy_t copy; | |
39037602 | 3972 | uint64_t zones_collectable_bytes = 0; |
6d2010ae A |
3973 | |
3974 | if (host == HOST_NULL) | |
3975 | return KERN_INVALID_HOST; | |
316670eb A |
3976 | #if CONFIG_DEBUGGER_FOR_ZONE_INFO |
3977 | if (!PE_i_can_has_debugger(NULL)) | |
3978 | return KERN_INVALID_HOST; | |
3979 | #endif | |
6d2010ae A |
3980 | |
3981 | /* | |
3982 | * We assume that zones aren't freed once allocated. | |
3983 | * We won't pick up any zones that are allocated later. | |
3984 | */ | |
3985 | ||
3986 | simple_lock(&all_zones_lock); | |
39037602 | 3987 | max_zones = (unsigned int)(num_zones); |
6d2010ae A |
3988 | simple_unlock(&all_zones_lock); |
3989 | ||
3990 | names_size = round_page(max_zones * sizeof *names); | |
3991 | kr = kmem_alloc_pageable(ipc_kernel_map, | |
3e170ce0 | 3992 | &names_addr, names_size, VM_KERN_MEMORY_IPC); |
6d2010ae A |
3993 | if (kr != KERN_SUCCESS) |
3994 | return kr; | |
3995 | names = (mach_zone_name_t *) names_addr; | |
3996 | ||
3997 | info_size = round_page(max_zones * sizeof *info); | |
3998 | kr = kmem_alloc_pageable(ipc_kernel_map, | |
3e170ce0 | 3999 | &info_addr, info_size, VM_KERN_MEMORY_IPC); |
6d2010ae A |
4000 | if (kr != KERN_SUCCESS) { |
4001 | kmem_free(ipc_kernel_map, | |
4002 | names_addr, names_size); | |
4003 | return kr; | |
4004 | } | |
6d2010ae A |
4005 | info = (mach_zone_info_t *) info_addr; |
4006 | ||
4007 | zn = &names[0]; | |
4008 | zi = &info[0]; | |
4009 | ||
5ba3f43e | 4010 | used_zones = max_zones; |
39037602 | 4011 | for (i = 0; i < max_zones; i++) { |
a39ff7e2 | 4012 | if (!get_zone_info(&(zone_array[i]), zn, zi)) { |
5ba3f43e A |
4013 | used_zones--; |
4014 | continue; | |
4015 | } | |
a39ff7e2 | 4016 | zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable); |
6d2010ae A |
4017 | zn++; |
4018 | zi++; | |
4019 | } | |
4020 | ||
5ba3f43e | 4021 | used = used_zones * sizeof *names; |
a39ff7e2 A |
4022 | if (used != names_size) { |
4023 | vm_offset_t names_addr_end = names_addr + used; | |
4024 | vm_size_t free_size = names_size - (round_page(names_addr_end) - names_addr); | |
4025 | ||
4026 | if (free_size >= PAGE_SIZE) { | |
4027 | kmem_free(ipc_kernel_map, | |
4028 | round_page(names_addr_end), free_size); | |
4029 | } | |
4030 | bzero((char *) names_addr_end, round_page(names_addr_end) - names_addr_end); | |
4031 | } | |
6d2010ae A |
4032 | |
4033 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr, | |
2dced7af | 4034 | (vm_map_size_t)used, TRUE, ©); |
6d2010ae A |
4035 | assert(kr == KERN_SUCCESS); |
4036 | ||
4037 | *namesp = (mach_zone_name_t *) copy; | |
5ba3f43e | 4038 | *namesCntp = used_zones; |
6d2010ae | 4039 | |
5ba3f43e | 4040 | used = used_zones * sizeof *info; |
a39ff7e2 A |
4041 | if (used != info_size) { |
4042 | vm_offset_t info_addr_end = info_addr + used; | |
4043 | vm_size_t free_size = info_size - (round_page(info_addr_end) - info_addr); | |
6d2010ae | 4044 | |
a39ff7e2 A |
4045 | if (free_size >= PAGE_SIZE) { |
4046 | kmem_free(ipc_kernel_map, | |
4047 | round_page(info_addr_end), free_size); | |
4048 | } | |
4049 | bzero((char *) info_addr_end, round_page(info_addr_end) - info_addr_end); | |
4050 | } | |
6d2010ae A |
4051 | |
4052 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr, | |
2dced7af | 4053 | (vm_map_size_t)used, TRUE, ©); |
6d2010ae A |
4054 | assert(kr == KERN_SUCCESS); |
4055 | ||
4056 | *infop = (mach_zone_info_t *) copy; | |
5ba3f43e | 4057 | *infoCntp = used_zones; |
39037602 | 4058 | |
5ba3f43e | 4059 | num_info = 0; |
39037602 | 4060 | memory_info_addr = 0; |
6d2010ae | 4061 | |
3e170ce0 A |
4062 | if (memoryInfop && memoryInfoCntp) |
4063 | { | |
5ba3f43e A |
4064 | num_info = vm_page_diagnose_estimate(); |
4065 | memory_info_size = num_info * sizeof(*memory_info); | |
39037602 A |
4066 | memory_info_vmsize = round_page(memory_info_size); |
4067 | kr = kmem_alloc_pageable(ipc_kernel_map, | |
4068 | &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC); | |
4069 | if (kr != KERN_SUCCESS) { | |
39037602 A |
4070 | return kr; |
4071 | } | |
4072 | ||
5ba3f43e A |
4073 | kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, |
4074 | VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); | |
39037602 A |
4075 | assert(kr == KERN_SUCCESS); |
4076 | ||
4077 | memory_info = (mach_memory_info_t *) memory_info_addr; | |
5ba3f43e | 4078 | vm_page_diagnose(memory_info, num_info, zones_collectable_bytes); |
39037602 A |
4079 | |
4080 | kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE); | |
4081 | assert(kr == KERN_SUCCESS); | |
4082 | ||
3e170ce0 A |
4083 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr, |
4084 | (vm_map_size_t)memory_info_size, TRUE, ©); | |
4085 | assert(kr == KERN_SUCCESS); | |
4086 | ||
4087 | *memoryInfop = (mach_memory_info_t *) copy; | |
5ba3f43e | 4088 | *memoryInfoCntp = num_info; |
3e170ce0 A |
4089 | } |
4090 | ||
6d2010ae A |
4091 | return KERN_SUCCESS; |
4092 | } | |
4093 | ||
a39ff7e2 A |
4094 | kern_return_t |
4095 | mach_zone_info_for_zone( | |
4096 | host_priv_t host, | |
4097 | mach_zone_name_t name, | |
4098 | mach_zone_info_t *infop) | |
4099 | { | |
4100 | unsigned int max_zones, i; | |
4101 | zone_t zone_ptr; | |
4102 | ||
4103 | if (host == HOST_NULL) | |
4104 | return KERN_INVALID_HOST; | |
4105 | #if CONFIG_DEBUGGER_FOR_ZONE_INFO | |
4106 | if (!PE_i_can_has_debugger(NULL)) | |
4107 | return KERN_INVALID_HOST; | |
4108 | #endif | |
4109 | ||
4110 | if (infop == NULL) { | |
4111 | return KERN_INVALID_ARGUMENT; | |
4112 | } | |
4113 | ||
4114 | simple_lock(&all_zones_lock); | |
4115 | max_zones = (unsigned int)(num_zones); | |
4116 | simple_unlock(&all_zones_lock); | |
4117 | ||
4118 | zone_ptr = ZONE_NULL; | |
4119 | for (i = 0; i < max_zones; i++) { | |
4120 | zone_t z = &(zone_array[i]); | |
4121 | assert(z != ZONE_NULL); | |
4122 | ||
4123 | /* Find the requested zone by name */ | |
4124 | if (!strncmp(name.mzn_name, z->zone_name, strlen(z->zone_name))) { | |
4125 | zone_ptr = z; | |
4126 | break; | |
4127 | } | |
4128 | } | |
4129 | ||
4130 | /* No zones found with the requested zone name */ | |
4131 | if (zone_ptr == ZONE_NULL) { | |
4132 | return KERN_INVALID_ARGUMENT; | |
4133 | } | |
4134 | ||
4135 | if (get_zone_info(zone_ptr, NULL, infop)) { | |
4136 | return KERN_SUCCESS; | |
4137 | } | |
4138 | return KERN_FAILURE; | |
4139 | } | |
4140 | ||
4141 | kern_return_t | |
4142 | mach_zone_info_for_largest_zone( | |
4143 | host_priv_t host, | |
4144 | mach_zone_name_t *namep, | |
4145 | mach_zone_info_t *infop) | |
4146 | { | |
4147 | if (host == HOST_NULL) | |
4148 | return KERN_INVALID_HOST; | |
4149 | #if CONFIG_DEBUGGER_FOR_ZONE_INFO | |
4150 | if (!PE_i_can_has_debugger(NULL)) | |
4151 | return KERN_INVALID_HOST; | |
4152 | #endif | |
4153 | ||
4154 | if (namep == NULL || infop == NULL) { | |
4155 | return KERN_INVALID_ARGUMENT; | |
4156 | } | |
4157 | ||
4158 | if (get_zone_info(zone_find_largest(), namep, infop)) { | |
4159 | return KERN_SUCCESS; | |
4160 | } | |
4161 | return KERN_FAILURE; | |
4162 | } | |
4163 | ||
5ba3f43e A |
4164 | uint64_t |
4165 | get_zones_collectable_bytes(void) | |
4166 | { | |
5ba3f43e A |
4167 | unsigned int i, max_zones; |
4168 | uint64_t zones_collectable_bytes = 0; | |
a39ff7e2 | 4169 | mach_zone_info_t zi; |
5ba3f43e A |
4170 | |
4171 | simple_lock(&all_zones_lock); | |
4172 | max_zones = (unsigned int)(num_zones); | |
4173 | simple_unlock(&all_zones_lock); | |
4174 | ||
4175 | for (i = 0; i < max_zones; i++) { | |
a39ff7e2 A |
4176 | if (get_zone_info(&(zone_array[i]), NULL, &zi)) { |
4177 | zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable); | |
4178 | } | |
5ba3f43e A |
4179 | } |
4180 | ||
4181 | return zones_collectable_bytes; | |
4182 | } | |
4183 | ||
4184 | #if DEBUG || DEVELOPMENT | |
4185 | ||
4186 | kern_return_t | |
4187 | mach_memory_info_check(void) | |
4188 | { | |
4189 | mach_memory_info_t * memory_info; | |
4190 | mach_memory_info_t * info; | |
4191 | zone_t zone; | |
4192 | unsigned int idx, num_info, max_zones; | |
4193 | vm_offset_t memory_info_addr; | |
4194 | kern_return_t kr; | |
4195 | size_t memory_info_size, memory_info_vmsize; | |
4196 | uint64_t top_wired, zonestotal, total; | |
4197 | ||
4198 | num_info = vm_page_diagnose_estimate(); | |
4199 | memory_info_size = num_info * sizeof(*memory_info); | |
4200 | memory_info_vmsize = round_page(memory_info_size); | |
4201 | kr = kmem_alloc(kernel_map, &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_DIAG); | |
4202 | assert (kr == KERN_SUCCESS); | |
4203 | ||
4204 | memory_info = (mach_memory_info_t *) memory_info_addr; | |
4205 | vm_page_diagnose(memory_info, num_info, 0); | |
4206 | ||
4207 | simple_lock(&all_zones_lock); | |
4208 | max_zones = num_zones; | |
4209 | simple_unlock(&all_zones_lock); | |
4210 | ||
4211 | top_wired = total = zonestotal = 0; | |
4212 | for (idx = 0; idx < max_zones; idx++) | |
4213 | { | |
4214 | zone = &(zone_array[idx]); | |
4215 | assert(zone != ZONE_NULL); | |
4216 | lock_zone(zone); | |
4217 | zonestotal += ptoa_64(zone->page_count); | |
4218 | unlock_zone(zone); | |
4219 | } | |
4220 | for (idx = 0; idx < num_info; idx++) | |
4221 | { | |
4222 | info = &memory_info[idx]; | |
4223 | if (!info->size) continue; | |
4224 | if (VM_KERN_COUNT_WIRED == info->site) top_wired = info->size; | |
4225 | if (VM_KERN_SITE_HIDE & info->flags) continue; | |
4226 | if (!(VM_KERN_SITE_WIRED & info->flags)) continue; | |
4227 | total += info->size; | |
4228 | } | |
4229 | total += zonestotal; | |
4230 | ||
4231 | printf("vm_page_diagnose_check %qd of %qd, zones %qd, short 0x%qx\n", total, top_wired, zonestotal, top_wired - total); | |
4232 | ||
4233 | kmem_free(kernel_map, memory_info_addr, memory_info_vmsize); | |
4234 | ||
4235 | return (kr); | |
4236 | } | |
4237 | ||
4238 | #endif /* DEBUG || DEVELOPMENT */ | |
4239 | ||
316670eb A |
4240 | kern_return_t |
4241 | mach_zone_force_gc( | |
4242 | host_t host) | |
4243 | { | |
316670eb A |
4244 | if (host == HOST_NULL) |
4245 | return KERN_INVALID_HOST; | |
4246 | ||
5ba3f43e A |
4247 | #if DEBUG || DEVELOPMENT |
4248 | consider_zone_gc(FALSE); | |
4249 | #endif /* DEBUG || DEVELOPMENT */ | |
316670eb A |
4250 | return (KERN_SUCCESS); |
4251 | } | |
4252 | ||
b0d623f7 | 4253 | extern unsigned int stack_total; |
6d2010ae | 4254 | extern unsigned long long stack_allocs; |
b0d623f7 A |
4255 | |
4256 | #if defined(__i386__) || defined (__x86_64__) | |
4257 | extern unsigned int inuse_ptepages_count; | |
6d2010ae | 4258 | extern long long alloc_ptepages_count; |
b0d623f7 A |
4259 | #endif |
4260 | ||
39236c6e A |
4261 | zone_t |
4262 | zone_find_largest(void) | |
4263 | { | |
4264 | unsigned int i; | |
4265 | unsigned int max_zones; | |
4266 | zone_t the_zone; | |
4267 | zone_t zone_largest; | |
4268 | ||
4269 | simple_lock(&all_zones_lock); | |
39236c6e A |
4270 | max_zones = num_zones; |
4271 | simple_unlock(&all_zones_lock); | |
4272 | ||
39037602 | 4273 | zone_largest = &(zone_array[0]); |
39236c6e | 4274 | for (i = 0; i < max_zones; i++) { |
39037602 | 4275 | the_zone = &(zone_array[i]); |
39236c6e A |
4276 | if (the_zone->cur_size > zone_largest->cur_size) { |
4277 | zone_largest = the_zone; | |
4278 | } | |
39236c6e A |
4279 | } |
4280 | return zone_largest; | |
4281 | } | |
4282 | ||
1c79356b A |
4283 | #if ZONE_DEBUG |
4284 | ||
4285 | /* should we care about locks here ? */ | |
4286 | ||
39236c6e A |
4287 | #define zone_in_use(z) ( z->count || z->free_elements \ |
4288 | || !queue_empty(&z->pages.all_free) \ | |
4289 | || !queue_empty(&z->pages.intermediate) \ | |
4290 | || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign))) | |
1c79356b | 4291 | |
39037602 A |
4292 | |
4293 | #endif /* ZONE_DEBUG */ | |
4294 | ||
4295 | ||
4296 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
4297 | ||
4298 | #if DEBUG || DEVELOPMENT | |
4299 | ||
4300 | static uintptr_t * | |
4301 | zone_copy_all_allocations_inqueue(zone_t z, queue_head_t * queue, uintptr_t * elems) | |
4302 | { | |
4303 | struct zone_page_metadata *page_meta; | |
4304 | vm_offset_t free, elements; | |
4305 | vm_offset_t idx, numElements, freeCount, bytesAvail, metaSize; | |
4306 | ||
4307 | queue_iterate(queue, page_meta, struct zone_page_metadata *, pages) | |
4308 | { | |
4309 | elements = get_zone_page(page_meta); | |
4310 | bytesAvail = ptoa(page_meta->page_count); | |
4311 | freeCount = 0; | |
4312 | if (z->allows_foreign && !from_zone_map(elements, z->elem_size)) | |
4313 | { | |
4314 | metaSize = (sizeof(struct zone_page_metadata) + ZONE_ELEMENT_ALIGNMENT - 1) & ~(ZONE_ELEMENT_ALIGNMENT - 1); | |
4315 | bytesAvail -= metaSize; | |
4316 | elements += metaSize; | |
4317 | } | |
4318 | numElements = bytesAvail / z->elem_size; | |
4319 | // construct array of all possible elements | |
4320 | for (idx = 0; idx < numElements; idx++) | |
4321 | { | |
4322 | elems[idx] = INSTANCE_PUT(elements + idx * z->elem_size); | |
4323 | } | |
4324 | // remove from the array all free elements | |
4325 | free = (vm_offset_t)page_metadata_get_freelist(page_meta); | |
4326 | while (free) | |
4327 | { | |
4328 | // find idx of free element | |
4329 | for (idx = 0; (idx < numElements) && (elems[idx] != INSTANCE_PUT(free)); idx++) {} | |
4330 | assert(idx < numElements); | |
4331 | // remove it | |
4332 | bcopy(&elems[idx + 1], &elems[idx], (numElements - (idx + 1)) * sizeof(elems[0])); | |
4333 | numElements--; | |
4334 | freeCount++; | |
4335 | // next free element | |
4336 | vm_offset_t *primary = (vm_offset_t *) free; | |
4337 | free = *primary ^ zp_nopoison_cookie; | |
4338 | } | |
4339 | elems += numElements; | |
4340 | } | |
4341 | ||
4342 | return (elems); | |
4343 | } | |
4344 | ||
4345 | kern_return_t | |
4346 | zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * refCon) | |
4347 | { | |
4348 | uintptr_t zbt[MAX_ZTRACE_DEPTH]; | |
4349 | zone_t zone; | |
4350 | uintptr_t * array; | |
4351 | uintptr_t * next; | |
4352 | uintptr_t element, bt; | |
4353 | uint32_t idx, count, found; | |
4354 | uint32_t btidx, btcount, nobtcount, btfound; | |
4355 | uint32_t elemSize; | |
4356 | uint64_t maxElems; | |
5ba3f43e A |
4357 | unsigned int max_zones; |
4358 | kern_return_t kr; | |
39037602 | 4359 | |
5ba3f43e A |
4360 | simple_lock(&all_zones_lock); |
4361 | max_zones = num_zones; | |
4362 | simple_unlock(&all_zones_lock); | |
4363 | ||
4364 | for (idx = 0; idx < max_zones; idx++) | |
39037602 A |
4365 | { |
4366 | if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) break; | |
4367 | } | |
5ba3f43e | 4368 | if (idx >= max_zones) return (KERN_INVALID_NAME); |
39037602 A |
4369 | zone = &zone_array[idx]; |
4370 | ||
4371 | elemSize = (uint32_t) zone->elem_size; | |
4372 | maxElems = ptoa(zone->page_count) / elemSize; | |
4373 | ||
4374 | if ((zone->alloc_size % elemSize) | |
4375 | && !leak_scan_debug_flag) return (KERN_INVALID_CAPABILITY); | |
4376 | ||
4377 | kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array, | |
4378 | maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG); | |
4379 | if (KERN_SUCCESS != kr) return (kr); | |
4380 | ||
4381 | lock_zone(zone); | |
4382 | ||
4383 | next = array; | |
4384 | next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next); | |
4385 | next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next); | |
4386 | next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next); | |
4387 | count = (uint32_t)(next - array); | |
4388 | ||
4389 | unlock_zone(zone); | |
4390 | ||
4391 | zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found); | |
4392 | assert(found <= count); | |
4393 | ||
4394 | for (idx = 0; idx < count; idx++) | |
4395 | { | |
4396 | element = array[idx]; | |
4397 | if (kInstanceFlagReferenced & element) continue; | |
4398 | element = INSTANCE_PUT(element) & ~kInstanceFlags; | |
4399 | } | |
4400 | ||
4401 | if (zone->zlog_btlog && !corruption_debug_flag) | |
4402 | { | |
4403 | // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found | |
4404 | btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon); | |
4405 | } | |
4406 | ||
4407 | for (nobtcount = idx = 0; idx < count; idx++) | |
4408 | { | |
4409 | element = array[idx]; | |
4410 | if (!element) continue; | |
4411 | if (kInstanceFlagReferenced & element) continue; | |
4412 | element = INSTANCE_PUT(element) & ~kInstanceFlags; | |
4413 | ||
4414 | // see if we can find any backtrace left in the element | |
4415 | btcount = (typeof(btcount)) (zone->elem_size / sizeof(uintptr_t)); | |
4416 | if (btcount >= MAX_ZTRACE_DEPTH) btcount = MAX_ZTRACE_DEPTH - 1; | |
4417 | for (btfound = btidx = 0; btidx < btcount; btidx++) | |
4418 | { | |
4419 | bt = ((uintptr_t *)element)[btcount - 1 - btidx]; | |
4420 | if (!VM_KERNEL_IS_SLID(bt)) break; | |
4421 | zbt[btfound++] = bt; | |
4422 | } | |
4423 | if (btfound) (*proc)(refCon, 1, elemSize, &zbt[0], btfound); | |
4424 | else nobtcount++; | |
4425 | } | |
4426 | if (nobtcount) | |
4427 | { | |
4428 | // fake backtrace when we found nothing | |
4429 | zbt[0] = (uintptr_t) &zalloc; | |
4430 | (*proc)(refCon, nobtcount, elemSize, &zbt[0], 1); | |
4431 | } | |
4432 | ||
4433 | kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t)); | |
4434 | ||
4435 | return (KERN_SUCCESS); | |
1c79356b A |
4436 | } |
4437 | ||
5ba3f43e A |
4438 | boolean_t |
4439 | kdp_is_in_zone(void *addr, const char *zone_name) | |
1c79356b | 4440 | { |
5ba3f43e A |
4441 | zone_t z; |
4442 | return (zone_element_size(addr, &z) && !strcmp(z->zone_name, zone_name)); | |
4443 | } | |
b0d623f7 | 4444 | |
5ba3f43e A |
4445 | boolean_t |
4446 | run_zone_test(void) | |
4447 | { | |
4448 | int i = 0, max_iter = 5; | |
4449 | void * test_ptr; | |
4450 | zone_t test_zone; | |
b0d623f7 | 4451 | |
5ba3f43e A |
4452 | simple_lock(&zone_test_lock); |
4453 | if (!zone_test_running) { | |
4454 | zone_test_running = TRUE; | |
4455 | } else { | |
4456 | simple_unlock(&zone_test_lock); | |
4457 | printf("run_zone_test: Test already running.\n"); | |
4458 | return FALSE; | |
4459 | } | |
4460 | simple_unlock(&zone_test_lock); | |
39037602 | 4461 | |
5ba3f43e | 4462 | printf("run_zone_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n"); |
39037602 | 4463 | |
5ba3f43e A |
4464 | /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */ |
4465 | do { | |
4466 | test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl"); | |
4467 | if (test_zone == NULL) { | |
4468 | printf("run_zone_test: zinit() failed\n"); | |
4469 | return FALSE; | |
4470 | } | |
39037602 | 4471 | |
5ba3f43e A |
4472 | #if KASAN_ZALLOC |
4473 | if (test_zone_ptr == NULL && zone_free_count(test_zone) != 0) { | |
4474 | #else | |
4475 | if (zone_free_count(test_zone) != 0) { | |
4476 | #endif | |
4477 | printf("run_zone_test: free count is not zero\n"); | |
4478 | return FALSE; | |
4479 | } | |
4480 | ||
4481 | if (test_zone_ptr == NULL) { | |
4482 | /* Stash the zone pointer returned on the fist zinit */ | |
4483 | printf("run_zone_test: zone created for the first time\n"); | |
4484 | test_zone_ptr = test_zone; | |
4485 | } else if (test_zone != test_zone_ptr) { | |
4486 | printf("run_zone_test: old zone pointer and new zone pointer don't match\n"); | |
4487 | return FALSE; | |
4488 | } | |
4489 | ||
4490 | test_ptr = zalloc(test_zone); | |
4491 | if (test_ptr == NULL) { | |
4492 | printf("run_zone_test: zalloc() failed\n"); | |
4493 | return FALSE; | |
4494 | } | |
4495 | zfree(test_zone, test_ptr); | |
4496 | ||
4497 | zdestroy(test_zone); | |
4498 | i++; | |
4499 | ||
4500 | printf("run_zone_test: Iteration %d successful\n", i); | |
4501 | } while (i < max_iter); | |
4502 | ||
4503 | printf("run_zone_test: Test passed\n"); | |
4504 | ||
4505 | simple_lock(&zone_test_lock); | |
4506 | zone_test_running = FALSE; | |
4507 | simple_unlock(&zone_test_lock); | |
4508 | ||
4509 | return TRUE; | |
813fb2f6 A |
4510 | } |
4511 | ||
39037602 | 4512 | #endif /* DEBUG || DEVELOPMENT */ |