]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / osfmk / kern / zalloc.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65 #include <zone_debug.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/vm_param.h>
69 #include <mach/kern_return.h>
70 #include <mach/mach_host_server.h>
71 #include <mach/task_server.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach_debug/zone_info.h>
74 #include <mach/vm_map.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/backtrace.h>
79 #include <kern/host.h>
80 #include <kern/macro_help.h>
81 #include <kern/sched.h>
82 #include <kern/locks.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <kern/thread_call.h>
86 #include <kern/zalloc.h>
87 #include <kern/kalloc.h>
88
89 #include <vm/pmap.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
93
94 #include <pexpert/pexpert.h>
95
96 #include <machine/machparam.h>
97 #include <machine/machine_routines.h> /* ml_cpu_get_info */
98
99 #include <libkern/OSDebug.h>
100 #include <libkern/OSAtomic.h>
101 #include <sys/kdebug.h>
102
103 /*
104 * ZONE_ALIAS_ADDR (deprecated)
105 */
106
107 #define from_zone_map(addr, size) \
108 ((vm_offset_t)(addr) >= zone_map_min_address && \
109 ((vm_offset_t)(addr) + size - 1) < zone_map_max_address )
110
111 /*
112 * Zone Corruption Debugging
113 *
114 * We use three techniques to detect modification of a zone element
115 * after it's been freed.
116 *
117 * (1) Check the freelist next pointer for sanity.
118 * (2) Store a backup of the next pointer at the end of the element,
119 * and compare it to the primary next pointer when the element is allocated
120 * to detect corruption of the freelist due to use-after-free bugs.
121 * The backup pointer is also XORed with a per-boot random cookie.
122 * (3) Poison the freed element by overwriting it with 0xdeadbeef,
123 * and check for that value when the element is being reused to make sure
124 * no part of the element has been modified while it was on the freelist.
125 * This will also help catch read-after-frees, as code will now dereference
126 * 0xdeadbeef instead of a valid but freed pointer.
127 *
128 * (1) and (2) occur for every allocation and free to a zone.
129 * This is done to make it slightly more difficult for an attacker to
130 * manipulate the freelist to behave in a specific way.
131 *
132 * Poisoning (3) occurs periodically for every N frees (counted per-zone)
133 * and on every free for zones smaller than a cacheline. If -zp
134 * is passed as a boot arg, poisoning occurs for every free.
135 *
136 * Performance slowdown is inversely proportional to the frequency of poisoning,
137 * with a 4-5% hit around N=1, down to ~0.3% at N=16 and just "noise" at N=32
138 * and higher. You can expect to find a 100% reproducible bug in an average of
139 * N tries, with a standard deviation of about N, but you will want to set
140 * "-zp" to always poison every free if you are attempting to reproduce
141 * a known bug.
142 *
143 * For a more heavyweight, but finer-grained method of detecting misuse
144 * of zone memory, look up the "Guard mode" zone allocator in gzalloc.c.
145 *
146 * Zone Corruption Logging
147 *
148 * You can also track where corruptions come from by using the boot-arguments
149 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later
150 * in this document for more implementation and usage information.
151 *
152 * Zone Leak Detection
153 *
154 * To debug leaks of zone memory, use the zone leak detection tool 'zleaks'
155 * found later in this file via the showtopztrace and showz* macros in kgmacros,
156 * or use zlog without the -zc argument.
157 *
158 */
159
160 /* Returns TRUE if we rolled over the counter at factor */
161 static inline boolean_t
162 sample_counter(volatile uint32_t * count_p, uint32_t factor)
163 {
164 uint32_t old_count, new_count;
165 boolean_t rolled_over;
166
167 do {
168 new_count = old_count = *count_p;
169
170 if (++new_count >= factor) {
171 rolled_over = TRUE;
172 new_count = 0;
173 } else {
174 rolled_over = FALSE;
175 }
176
177 } while (!OSCompareAndSwap(old_count, new_count, count_p));
178
179 return rolled_over;
180 }
181
182 #if defined(__LP64__)
183 #define ZP_POISON 0xdeadbeefdeadbeef
184 #else
185 #define ZP_POISON 0xdeadbeef
186 #endif
187
188 #define ZP_DEFAULT_SAMPLING_FACTOR 16
189 #define ZP_DEFAULT_SCALE_FACTOR 4
190
191 /*
192 * A zp_factor of 0 indicates zone poisoning is disabled,
193 * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline).
194 * Passing the -no-zp boot-arg disables even this behavior.
195 * In all cases, we record and check the integrity of a backup pointer.
196 */
197
198 /* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */
199 uint32_t zp_factor = 0;
200
201 /* set by zp-scale=N boot arg, scales zp_factor by zone size */
202 uint32_t zp_scale = 0;
203
204 /* set in zp_init, zero indicates -no-zp boot-arg */
205 vm_size_t zp_tiny_zone_limit = 0;
206
207 /* initialized to a per-boot random value in zp_init */
208 uintptr_t zp_poisoned_cookie = 0;
209 uintptr_t zp_nopoison_cookie = 0;
210
211
212 /*
213 * initialize zone poisoning
214 * called from zone_bootstrap before any allocations are made from zalloc
215 */
216 static inline void
217 zp_init(void)
218 {
219 char temp_buf[16];
220
221 /*
222 * Initialize backup pointer random cookie for poisoned elements
223 * Try not to call early_random() back to back, it may return
224 * the same value if mach_absolute_time doesn't have sufficient time
225 * to tick over between calls. <rdar://problem/11597395>
226 * (This is only a problem on embedded devices)
227 */
228 zp_poisoned_cookie = (uintptr_t) early_random();
229
230 /*
231 * Always poison zones smaller than a cacheline,
232 * because it's pretty close to free
233 */
234 ml_cpu_info_t cpu_info;
235 ml_cpu_get_info(&cpu_info);
236 zp_tiny_zone_limit = (vm_size_t) cpu_info.cache_line_size;
237
238 zp_factor = ZP_DEFAULT_SAMPLING_FACTOR;
239 zp_scale = ZP_DEFAULT_SCALE_FACTOR;
240
241 //TODO: Bigger permutation?
242 /*
243 * Permute the default factor +/- 1 to make it less predictable
244 * This adds or subtracts ~4 poisoned objects per 1000 frees.
245 */
246 if (zp_factor != 0) {
247 uint32_t rand_bits = early_random() & 0x3;
248
249 if (rand_bits == 0x1)
250 zp_factor += 1;
251 else if (rand_bits == 0x2)
252 zp_factor -= 1;
253 /* if 0x0 or 0x3, leave it alone */
254 }
255
256 /* -zp: enable poisoning for every alloc and free */
257 if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
258 zp_factor = 1;
259 }
260
261 /* -no-zp: disable poisoning completely even for tiny zones */
262 if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
263 zp_factor = 0;
264 zp_tiny_zone_limit = 0;
265 printf("Zone poisoning disabled\n");
266 }
267
268 /* zp-factor=XXXX: override how often to poison freed zone elements */
269 if (PE_parse_boot_argn("zp-factor", &zp_factor, sizeof(zp_factor))) {
270 printf("Zone poisoning factor override: %u\n", zp_factor);
271 }
272
273 /* zp-scale=XXXX: override how much zone size scales zp-factor by */
274 if (PE_parse_boot_argn("zp-scale", &zp_scale, sizeof(zp_scale))) {
275 printf("Zone poisoning scale factor override: %u\n", zp_scale);
276 }
277
278 /* Initialize backup pointer random cookie for unpoisoned elements */
279 zp_nopoison_cookie = (uintptr_t) early_random();
280
281 #if MACH_ASSERT
282 if (zp_poisoned_cookie == zp_nopoison_cookie)
283 panic("early_random() is broken: %p and %p are not random\n",
284 (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie);
285 #endif
286
287 /*
288 * Use the last bit in the backup pointer to hint poisoning state
289 * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so
290 * the low bits are zero.
291 */
292 zp_poisoned_cookie |= (uintptr_t)0x1ULL;
293 zp_nopoison_cookie &= ~((uintptr_t)0x1ULL);
294
295 #if defined(__LP64__)
296 /*
297 * Make backup pointers more obvious in GDB for 64 bit
298 * by making OxFFFFFF... ^ cookie = 0xFACADE...
299 * (0xFACADE = 0xFFFFFF ^ 0x053521)
300 * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011)
301 * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked
302 * by the sanity check, so it's OK for that part of the cookie to be predictable.
303 *
304 * TODO: Use #defines, xors, and shifts
305 */
306
307 zp_poisoned_cookie &= 0x000000FFFFFFFFFF;
308 zp_poisoned_cookie |= 0x0535210000000000; /* 0xFACADE */
309
310 zp_nopoison_cookie &= 0x000000FFFFFFFFFF;
311 zp_nopoison_cookie |= 0x3f00110000000000; /* 0xC0FFEE */
312 #endif
313 }
314
315 /*
316 * These macros are used to keep track of the number
317 * of pages being used by the zone currently. The
318 * z->page_count is protected by the zone lock.
319 */
320 #define ZONE_PAGE_COUNT_INCR(z, count) \
321 { \
322 OSAddAtomic64(count, &(z->page_count)); \
323 }
324
325 #define ZONE_PAGE_COUNT_DECR(z, count) \
326 { \
327 OSAddAtomic64(-count, &(z->page_count)); \
328 }
329
330 vm_map_t zone_map = VM_MAP_NULL;
331
332 /* for is_sane_zone_element and garbage collection */
333
334 vm_offset_t zone_map_min_address = 0; /* initialized in zone_init */
335 vm_offset_t zone_map_max_address = 0;
336
337 /* Globals for random boolean generator for elements in free list */
338 #define MAX_ENTROPY_PER_ZCRAM 4
339 #define RANDOM_BOOL_GEN_SEED_COUNT 4
340 static unsigned int bool_gen_seed[RANDOM_BOOL_GEN_SEED_COUNT];
341 static unsigned int bool_gen_global = 0;
342 decl_simple_lock_data(, bool_gen_lock)
343
344 /* VM region for all metadata structures */
345 vm_offset_t zone_metadata_region_min = 0;
346 vm_offset_t zone_metadata_region_max = 0;
347 decl_lck_mtx_data(static ,zone_metadata_region_lck)
348 lck_attr_t zone_metadata_lock_attr;
349 lck_mtx_ext_t zone_metadata_region_lck_ext;
350
351 /* Helpful for walking through a zone's free element list. */
352 struct zone_free_element {
353 struct zone_free_element *next;
354 /* ... */
355 /* void *backup_ptr; */
356 };
357
358 /*
359 * Protects num_zones, zone_array and zone_array_index
360 */
361 decl_simple_lock_data(, all_zones_lock)
362 unsigned int num_zones;
363
364 #define MAX_ZONES 256
365 struct zone zone_array[MAX_ZONES];
366 static int zone_array_index = 0;
367
368 #define MULTIPAGE_METADATA_MAGIC (0xff)
369
370 #define PAGE_METADATA_GET_ZINDEX(page_meta) \
371 (page_meta->zindex)
372
373 #define PAGE_METADATA_GET_ZONE(page_meta) \
374 (&(zone_array[page_meta->zindex]))
375
376 #define PAGE_METADATA_SET_ZINDEX(page_meta, index) \
377 page_meta->zindex = (index);
378
379 struct zone_page_metadata {
380 queue_chain_t pages; /* linkage pointer for metadata lists */
381
382 /* Union for maintaining start of element free list and real metadata (for multipage allocations) */
383 union {
384 /*
385 * The start of the freelist can be maintained as a 32-bit offset instead of a pointer because
386 * the free elements would be at max ZONE_MAX_ALLOC_SIZE bytes away from the metadata. Offset
387 * from start of the allocation chunk to free element list head.
388 */
389 uint32_t freelist_offset;
390 /*
391 * This field is used to lookup the real metadata for multipage allocations, where we mark the
392 * metadata for all pages except the first as "fake" metadata using MULTIPAGE_METADATA_MAGIC.
393 * Offset from this fake metadata to real metadata of allocation chunk (-ve offset).
394 */
395 uint32_t real_metadata_offset;
396 };
397
398 /*
399 * For the first page in the allocation chunk, this represents the total number of free elements in
400 * the chunk.
401 * For all other pages, it represents the number of free elements on that page (used
402 * for garbage collection of zones with large multipage allocation size)
403 */
404 uint16_t free_count;
405 uint8_t zindex; /* Zone index within the zone_array */
406 uint8_t page_count; /* Count of pages within the allocation chunk */
407 };
408
409 /* Macro to get page index (within zone_map) of page containing element */
410 #define PAGE_INDEX_FOR_ELEMENT(element) \
411 (((vm_offset_t)trunc_page(element) - zone_map_min_address) / PAGE_SIZE)
412
413 /* Macro to get metadata structure given a page index in zone_map */
414 #define PAGE_METADATA_FOR_PAGE_INDEX(index) \
415 (zone_metadata_region_min + ((index) * sizeof(struct zone_page_metadata)))
416
417 /* Macro to get index (within zone_map) for given metadata */
418 #define PAGE_INDEX_FOR_METADATA(page_meta) \
419 (((vm_offset_t)page_meta - zone_metadata_region_min) / sizeof(struct zone_page_metadata))
420
421 /* Macro to get page for given page index in zone_map */
422 #define PAGE_FOR_PAGE_INDEX(index) \
423 (zone_map_min_address + (PAGE_SIZE * (index)))
424
425 /* Macro to get the actual metadata for a given address */
426 #define PAGE_METADATA_FOR_ELEMENT(element) \
427 (struct zone_page_metadata *)(PAGE_METADATA_FOR_PAGE_INDEX(PAGE_INDEX_FOR_ELEMENT(element)))
428
429 /* Magic value to indicate empty element free list */
430 #define PAGE_METADATA_EMPTY_FREELIST ((uint32_t)(~0))
431
432 static inline void *
433 page_metadata_get_freelist(struct zone_page_metadata *page_meta)
434 {
435 assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC);
436 if (page_meta->freelist_offset == PAGE_METADATA_EMPTY_FREELIST)
437 return NULL;
438 else {
439 if (from_zone_map(page_meta, sizeof(struct zone_page_metadata)))
440 return (void *)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta)) + page_meta->freelist_offset);
441 else
442 return (void *)((vm_offset_t)page_meta + page_meta->freelist_offset);
443 }
444 }
445
446 static inline void
447 page_metadata_set_freelist(struct zone_page_metadata *page_meta, void *addr)
448 {
449 assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC);
450 if (addr == NULL)
451 page_meta->freelist_offset = PAGE_METADATA_EMPTY_FREELIST;
452 else {
453 if (from_zone_map(page_meta, sizeof(struct zone_page_metadata)))
454 page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta)));
455 else
456 page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - (vm_offset_t)page_meta);
457 }
458 }
459
460 static inline struct zone_page_metadata *
461 page_metadata_get_realmeta(struct zone_page_metadata *page_meta)
462 {
463 assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC);
464 return (struct zone_page_metadata *)((vm_offset_t)page_meta - page_meta->real_metadata_offset);
465 }
466
467 static inline void
468 page_metadata_set_realmeta(struct zone_page_metadata *page_meta, struct zone_page_metadata *real_meta)
469 {
470 assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC);
471 assert(PAGE_METADATA_GET_ZINDEX(real_meta) != MULTIPAGE_METADATA_MAGIC);
472 assert((vm_offset_t)page_meta > (vm_offset_t)real_meta);
473 vm_offset_t offset = (vm_offset_t)page_meta - (vm_offset_t)real_meta;
474 assert(offset <= UINT32_MAX);
475 page_meta->real_metadata_offset = (uint32_t)offset;
476 }
477
478 /* The backup pointer is stored in the last pointer-sized location in an element. */
479 static inline vm_offset_t *
480 get_backup_ptr(vm_size_t elem_size,
481 vm_offset_t *element)
482 {
483 return (vm_offset_t *) ((vm_offset_t)element + elem_size - sizeof(vm_offset_t));
484 }
485
486 /*
487 * Routine to populate a page backing metadata in the zone_metadata_region.
488 * Must be called without the zone lock held as it might potentially block.
489 */
490 static inline void
491 zone_populate_metadata_page(struct zone_page_metadata *page_meta)
492 {
493 vm_offset_t page_metadata_begin = trunc_page(page_meta);
494 vm_offset_t page_metadata_end = trunc_page((vm_offset_t)page_meta + sizeof(struct zone_page_metadata));
495
496 for(;page_metadata_begin <= page_metadata_end; page_metadata_begin += PAGE_SIZE) {
497 if (pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin))
498 continue;
499 /* All updates to the zone_metadata_region are done under the zone_metadata_region_lck */
500 lck_mtx_lock(&zone_metadata_region_lck);
501 if (0 == pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) {
502 kernel_memory_populate(zone_map,
503 page_metadata_begin,
504 PAGE_SIZE,
505 KMA_KOBJECT,
506 VM_KERN_MEMORY_OSFMK);
507 }
508 lck_mtx_unlock(&zone_metadata_region_lck);
509 }
510 return;
511 }
512
513 static inline uint16_t
514 get_metadata_alloc_count(struct zone_page_metadata *page_meta)
515 {
516 assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC);
517 struct zone *z = PAGE_METADATA_GET_ZONE(page_meta);
518 return ((page_meta->page_count * PAGE_SIZE) / z->elem_size);
519 }
520
521 /*
522 * Routine to lookup metadata for any given address.
523 * If init is marked as TRUE, this should be called without holding the zone lock
524 * since the initialization might block.
525 */
526 static inline struct zone_page_metadata *
527 get_zone_page_metadata(struct zone_free_element *element, boolean_t init)
528 {
529 struct zone_page_metadata *page_meta = 0;
530
531 if (from_zone_map(element, sizeof(struct zone_free_element))) {
532 page_meta = (struct zone_page_metadata *)(PAGE_METADATA_FOR_ELEMENT(element));
533 if (init)
534 zone_populate_metadata_page(page_meta);
535 } else {
536 page_meta = (struct zone_page_metadata *)(trunc_page((vm_offset_t)element));
537 }
538 if (init)
539 bzero((char *)page_meta, sizeof(struct zone_page_metadata));
540 return ((PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC) ? page_meta : page_metadata_get_realmeta(page_meta));
541 }
542
543 /* Routine to get the page for a given metadata */
544 static inline vm_offset_t
545 get_zone_page(struct zone_page_metadata *page_meta)
546 {
547 if (from_zone_map(page_meta, sizeof(struct zone_page_metadata)))
548 return (vm_offset_t)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta)));
549 else
550 return (vm_offset_t)(trunc_page(page_meta));
551 }
552
553 /* Routine to get the size of a zone allocated address. If the address doesnt belong to the
554 * zone_map, returns 0.
555 */
556 vm_size_t
557 zone_element_size(void *addr, zone_t *z)
558 {
559 struct zone *src_zone;
560 if (from_zone_map(addr, sizeof(void *))) {
561 struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE);
562 src_zone = PAGE_METADATA_GET_ZONE(page_meta);
563 if (z) {
564 *z = src_zone;
565 }
566 return (src_zone->elem_size);
567 } else {
568 #if CONFIG_GZALLOC
569 vm_size_t gzsize;
570 if (gzalloc_element_size(addr, z, &gzsize)) {
571 return gzsize;
572 }
573 #endif /* CONFIG_GZALLOC */
574
575 return 0;
576 }
577 }
578
579 /*
580 * Zone checking helper function.
581 * A pointer that satisfies these conditions is OK to be a freelist next pointer
582 * A pointer that doesn't satisfy these conditions indicates corruption
583 */
584 static inline boolean_t
585 is_sane_zone_ptr(zone_t zone,
586 vm_offset_t addr,
587 size_t obj_size)
588 {
589 /* Must be aligned to pointer boundary */
590 if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0))
591 return FALSE;
592
593 /* Must be a kernel address */
594 if (__improbable(!pmap_kernel_va(addr)))
595 return FALSE;
596
597 /* Must be from zone map if the zone only uses memory from the zone_map */
598 /*
599 * TODO: Remove the zone->collectable check when every
600 * zone using foreign memory is properly tagged with allows_foreign
601 */
602 if (zone->collectable && !zone->allows_foreign) {
603 /* check if addr is from zone map */
604 if (addr >= zone_map_min_address &&
605 (addr + obj_size - 1) < zone_map_max_address )
606 return TRUE;
607
608 return FALSE;
609 }
610
611 return TRUE;
612 }
613
614 static inline boolean_t
615 is_sane_zone_page_metadata(zone_t zone,
616 vm_offset_t page_meta)
617 {
618 /* NULL page metadata structures are invalid */
619 if (page_meta == 0)
620 return FALSE;
621 return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata));
622 }
623
624 static inline boolean_t
625 is_sane_zone_element(zone_t zone,
626 vm_offset_t addr)
627 {
628 /* NULL is OK because it indicates the tail of the list */
629 if (addr == 0)
630 return TRUE;
631 return is_sane_zone_ptr(zone, addr, zone->elem_size);
632 }
633
634 /* Someone wrote to freed memory. */
635 static inline void /* noreturn */
636 zone_element_was_modified_panic(zone_t zone,
637 vm_offset_t element,
638 vm_offset_t found,
639 vm_offset_t expected,
640 vm_offset_t offset)
641 {
642 panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p",
643 zone->zone_name,
644 (void *) expected,
645 (void *) found,
646 (void *) (expected ^ found),
647 (uint32_t) offset,
648 (uint32_t) zone->elem_size,
649 (void *) element,
650 (void *) zp_nopoison_cookie,
651 (void *) zp_poisoned_cookie);
652 }
653
654 /*
655 * The primary and backup pointers don't match.
656 * Determine which one was likely the corrupted pointer, find out what it
657 * probably should have been, and panic.
658 * I would like to mark this as noreturn, but panic() isn't marked noreturn.
659 */
660 static void /* noreturn */
661 backup_ptr_mismatch_panic(zone_t zone,
662 vm_offset_t element,
663 vm_offset_t primary,
664 vm_offset_t backup)
665 {
666 vm_offset_t likely_backup;
667 vm_offset_t likely_primary;
668
669 likely_primary = primary ^ zp_nopoison_cookie;
670 boolean_t sane_backup;
671 boolean_t sane_primary = is_sane_zone_element(zone, likely_primary);
672 boolean_t element_was_poisoned = (backup & 0x1) ? TRUE : FALSE;
673
674 #if defined(__LP64__)
675 /* We can inspect the tag in the upper bits for additional confirmation */
676 if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000)
677 element_was_poisoned = TRUE;
678 else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000)
679 element_was_poisoned = FALSE;
680 #endif
681
682 if (element_was_poisoned) {
683 likely_backup = backup ^ zp_poisoned_cookie;
684 sane_backup = is_sane_zone_element(zone, likely_backup);
685 } else {
686 likely_backup = backup ^ zp_nopoison_cookie;
687 sane_backup = is_sane_zone_element(zone, likely_backup);
688 }
689
690 /* The primary is definitely the corrupted one */
691 if (!sane_primary && sane_backup)
692 zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0);
693
694 /* The backup is definitely the corrupted one */
695 if (sane_primary && !sane_backup)
696 zone_element_was_modified_panic(zone, element, backup,
697 (primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)),
698 zone->elem_size - sizeof(vm_offset_t));
699
700 /*
701 * Not sure which is the corrupted one.
702 * It's less likely that the backup pointer was overwritten with
703 * ( (sane address) ^ (valid cookie) ), so we'll guess that the
704 * primary pointer has been overwritten with a sane but incorrect address.
705 */
706 if (sane_primary && sane_backup)
707 zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
708
709 /* Neither are sane, so just guess. */
710 zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
711 }
712
713 /*
714 * Adds the element to the head of the zone's free list
715 * Keeps a backup next-pointer at the end of the element
716 */
717 static inline void
718 free_to_zone(zone_t zone,
719 vm_offset_t element,
720 boolean_t poison)
721 {
722 vm_offset_t old_head;
723 struct zone_page_metadata *page_meta;
724
725 vm_offset_t *primary = (vm_offset_t *) element;
726 vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
727
728 page_meta = get_zone_page_metadata((struct zone_free_element *)element, FALSE);
729 assert(PAGE_METADATA_GET_ZONE(page_meta) == zone);
730 old_head = (vm_offset_t)page_metadata_get_freelist(page_meta);
731
732 #if MACH_ASSERT
733 if (__improbable(!is_sane_zone_element(zone, old_head)))
734 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
735 (void *) old_head, zone->zone_name);
736 #endif
737
738 if (__improbable(!is_sane_zone_element(zone, element)))
739 panic("zfree: freeing invalid pointer %p to zone %s\n",
740 (void *) element, zone->zone_name);
741
742 /*
743 * Always write a redundant next pointer
744 * So that it is more difficult to forge, xor it with a random cookie
745 * A poisoned element is indicated by using zp_poisoned_cookie
746 * instead of zp_nopoison_cookie
747 */
748
749 *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie);
750
751 /*
752 * Insert this element at the head of the free list. We also xor the
753 * primary pointer with the zp_nopoison_cookie to make sure a free
754 * element does not provide the location of the next free element directly.
755 */
756 *primary = old_head ^ zp_nopoison_cookie;
757 page_metadata_set_freelist(page_meta, (struct zone_free_element *)element);
758 page_meta->free_count++;
759 if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
760 if (page_meta->free_count == 1) {
761 /* first foreign element freed on page, move from all_used */
762 re_queue_tail(&zone->pages.any_free_foreign, &(page_meta->pages));
763 } else {
764 /* no other list transitions */
765 }
766 } else if (page_meta->free_count == get_metadata_alloc_count(page_meta)) {
767 /* whether the page was on the intermediate or all_used, queue, move it to free */
768 re_queue_tail(&zone->pages.all_free, &(page_meta->pages));
769 zone->count_all_free_pages += page_meta->page_count;
770 } else if (page_meta->free_count == 1) {
771 /* first free element on page, move from all_used */
772 re_queue_tail(&zone->pages.intermediate, &(page_meta->pages));
773 }
774 zone->count--;
775 zone->countfree++;
776 }
777
778
779 /*
780 * Removes an element from the zone's free list, returning 0 if the free list is empty.
781 * Verifies that the next-pointer and backup next-pointer are intact,
782 * and verifies that a poisoned element hasn't been modified.
783 */
784 static inline vm_offset_t
785 try_alloc_from_zone(zone_t zone,
786 boolean_t* check_poison)
787 {
788 vm_offset_t element;
789 struct zone_page_metadata *page_meta;
790
791 *check_poison = FALSE;
792
793 /* if zone is empty, bail */
794 if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign))
795 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
796 else if (!queue_empty(&zone->pages.intermediate))
797 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
798 else if (!queue_empty(&zone->pages.all_free)) {
799 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
800 assert(zone->count_all_free_pages >= page_meta->page_count);
801 zone->count_all_free_pages -= page_meta->page_count;
802 } else {
803 return 0;
804 }
805 /* Check if page_meta passes is_sane_zone_element */
806 if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta)))
807 panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
808 (void *) page_meta, zone->zone_name);
809 assert(PAGE_METADATA_GET_ZONE(page_meta) == zone);
810 element = (vm_offset_t)page_metadata_get_freelist(page_meta);
811
812 if (__improbable(!is_sane_zone_ptr(zone, element, zone->elem_size)))
813 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
814 (void *) element, zone->zone_name);
815
816 vm_offset_t *primary = (vm_offset_t *) element;
817 vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
818
819 /*
820 * Since the primary next pointer is xor'ed with zp_nopoison_cookie
821 * for obfuscation, retrieve the original value back
822 */
823 vm_offset_t next_element = *primary ^ zp_nopoison_cookie;
824 vm_offset_t next_element_primary = *primary;
825 vm_offset_t next_element_backup = *backup;
826
827 /*
828 * backup_ptr_mismatch_panic will determine what next_element
829 * should have been, and print it appropriately
830 */
831 if (__improbable(!is_sane_zone_element(zone, next_element)))
832 backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup);
833
834 /* Check the backup pointer for the regular cookie */
835 if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) {
836
837 /* Check for the poisoned cookie instead */
838 if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie)))
839 /* Neither cookie is valid, corruption has occurred */
840 backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup);
841
842 /*
843 * Element was marked as poisoned, so check its integrity before using it.
844 */
845 *check_poison = TRUE;
846 }
847
848 /* Make sure the page_meta is at the correct offset from the start of page */
849 if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element, FALSE)))
850 panic("zalloc: Incorrect metadata %p found in zone %s page queue. Expected metadata: %p\n",
851 page_meta, zone->zone_name, get_zone_page_metadata((struct zone_free_element *)element, FALSE));
852
853 /* Make sure next_element belongs to the same page as page_meta */
854 if (next_element) {
855 if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element, FALSE)))
856 panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
857 (void *)next_element, (void *)element, zone->zone_name);
858 }
859
860 /* Remove this element from the free list */
861 page_metadata_set_freelist(page_meta, (struct zone_free_element *)next_element);
862 page_meta->free_count--;
863
864 if (page_meta->free_count == 0) {
865 /* move to all used */
866 re_queue_tail(&zone->pages.all_used, &(page_meta->pages));
867 } else {
868 if (!zone->allows_foreign || from_zone_map(element, zone->elem_size)) {
869 if (get_metadata_alloc_count(page_meta) == page_meta->free_count + 1) {
870 /* remove from free, move to intermediate */
871 re_queue_tail(&zone->pages.intermediate, &(page_meta->pages));
872 }
873 }
874 }
875 zone->countfree--;
876 zone->count++;
877 zone->sum_count++;
878
879 return element;
880 }
881
882 /*
883 * End of zone poisoning
884 */
885
886 /*
887 * Zone info options
888 */
889 #define ZINFO_SLOTS MAX_ZONES /* for now */
890
891 void zone_display_zprint(void);
892
893 zone_t zone_find_largest(void);
894
895 /*
896 * Async allocation of zones
897 * This mechanism allows for bootstrapping an empty zone which is setup with
898 * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call
899 * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free.
900 * This will prime the zone for the next use.
901 *
902 * Currently the thread_callout function (zalloc_async) will loop through all zones
903 * looking for any zone with async_pending set and do the work for it.
904 *
905 * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call,
906 * then zalloc_noblock to an empty zone may succeed.
907 */
908 void zalloc_async(
909 thread_call_param_t p0,
910 thread_call_param_t p1);
911
912 static thread_call_data_t call_async_alloc;
913
914 /*
915 * Align elements that use the zone page list to 32 byte boundaries.
916 */
917 #define ZONE_ELEMENT_ALIGNMENT 32
918
919 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
920 #define zone_sleep(zone) \
921 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
922
923 /*
924 * The zone_locks_grp allows for collecting lock statistics.
925 * All locks are associated to this group in zinit.
926 * Look at tools/lockstat for debugging lock contention.
927 */
928
929 lck_grp_t zone_locks_grp;
930 lck_grp_attr_t zone_locks_grp_attr;
931
932 #define lock_zone_init(zone) \
933 MACRO_BEGIN \
934 lck_attr_setdefault(&(zone)->lock_attr); \
935 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
936 &zone_locks_grp, &(zone)->lock_attr); \
937 MACRO_END
938
939 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
940
941 /*
942 * Exclude more than one concurrent garbage collection
943 */
944 decl_lck_mtx_data(, zone_gc_lock)
945
946 lck_attr_t zone_gc_lck_attr;
947 lck_grp_t zone_gc_lck_grp;
948 lck_grp_attr_t zone_gc_lck_grp_attr;
949 lck_mtx_ext_t zone_gc_lck_ext;
950
951 boolean_t zone_gc_allowed = TRUE;
952 boolean_t panic_include_zprint = FALSE;
953
954 vm_offset_t panic_kext_memory_info = 0;
955 vm_size_t panic_kext_memory_size = 0;
956
957 #define ZALLOC_DEBUG_ZONEGC 0x00000001
958 #define ZALLOC_DEBUG_ZCRAM 0x00000002
959 uint32_t zalloc_debug = 0;
960
961 /*
962 * Zone leak debugging code
963 *
964 * When enabled, this code keeps a log to track allocations to a particular zone that have not
965 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
966 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
967 * off by default.
968 *
969 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
970 * is the name of the zone you wish to log.
971 *
972 * This code only tracks one zone, so you need to identify which one is leaking first.
973 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
974 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
975 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
976 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
977 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
978 * See the help in the kgmacros for usage info.
979 *
980 *
981 * Zone corruption logging
982 *
983 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
984 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
985 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
986 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
987 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
988 * corrupted to examine its history. This should lead to the source of the corruption.
989 */
990
991 static boolean_t log_records_init = FALSE;
992 static int log_records; /* size of the log, expressed in number of records */
993
994 #define MAX_NUM_ZONES_ALLOWED_LOGGING 5 /* Maximum 5 zones can be logged at once */
995
996 static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING;
997 static int num_zones_logged = 0;
998
999 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
1000
1001 static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
1002
1003 /* Log allocations and frees to help debug a zone element corruption */
1004 boolean_t corruption_debug_flag = FALSE; /* enabled by "-zc" boot-arg */
1005 /* Making pointer scanning leaks detection possible for all zones */
1006
1007 #if DEBUG || DEVELOPMENT
1008 boolean_t leak_scan_debug_flag = FALSE; /* enabled by "-zl" boot-arg */
1009 #endif /* DEBUG || DEVELOPMENT */
1010
1011
1012 /*
1013 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
1014 * the number of records you want in the log. For example, "zrecs=10" sets it to 10 records. Since this
1015 * is the number of stacks suspected of leaking, we don't need many records.
1016 */
1017
1018 #if defined(__LP64__)
1019 #define ZRECORDS_MAX 2560 /* Max records allowed in the log */
1020 #else
1021 #define ZRECORDS_MAX 1536 /* Max records allowed in the log */
1022 #endif
1023 #define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */
1024
1025 /*
1026 * Each record in the log contains a pointer to the zone element it refers to,
1027 * and a small array to hold the pc's from the stack trace. A
1028 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
1029 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
1030 * If the log fills, old records are replaced as if it were a circular buffer.
1031 */
1032
1033
1034 /*
1035 * Opcodes for the btlog operation field:
1036 */
1037
1038 #define ZOP_ALLOC 1
1039 #define ZOP_FREE 0
1040
1041 /*
1042 * Decide if we want to log this zone by doing a string compare between a zone name and the name
1043 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
1044 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
1045 * match a space in the zone name.
1046 */
1047
1048 static int
1049 log_this_zone(const char *zonename, const char *logname)
1050 {
1051 int len;
1052 const char *zc = zonename;
1053 const char *lc = logname;
1054
1055 /*
1056 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
1057 */
1058
1059 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
1060
1061 /*
1062 * If the current characters don't match, check for a space in
1063 * in the zone name and a corresponding period in the log name.
1064 * If that's not there, then the strings don't match.
1065 */
1066
1067 if (*zc != *lc && !(*zc == ' ' && *lc == '.'))
1068 break;
1069
1070 /*
1071 * The strings are equal so far. If we're at the end, then it's a match.
1072 */
1073
1074 if (*zc == '\0')
1075 return TRUE;
1076 }
1077
1078 return FALSE;
1079 }
1080
1081
1082 /*
1083 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
1084 * the buffer for the records has been allocated.
1085 */
1086
1087 #define DO_LOGGING(z) (z->zone_logging == TRUE && z->zlog_btlog)
1088
1089 extern boolean_t kmem_alloc_ready;
1090
1091 #if CONFIG_ZLEAKS
1092 #pragma mark -
1093 #pragma mark Zone Leak Detection
1094
1095 /*
1096 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
1097 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
1098 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
1099 * and stop tracking it if it was being tracked.
1100 *
1101 * We track the allocations in the zallocations hash table, which stores the address that was returned from
1102 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
1103 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
1104 * backtraces - we don't store them more than once.
1105 *
1106 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
1107 * a large amount of virtual space.
1108 */
1109 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
1110 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
1111 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
1112 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
1113 uint32_t zleak_state = 0; /* State of collection, as above */
1114
1115 boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */
1116 vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
1117 vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
1118 unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */
1119
1120 /*
1121 * Counters for allocation statistics.
1122 */
1123
1124 /* Times two active records want to occupy the same spot */
1125 unsigned int z_alloc_collisions = 0;
1126 unsigned int z_trace_collisions = 0;
1127
1128 /* Times a new record lands on a spot previously occupied by a freed allocation */
1129 unsigned int z_alloc_overwrites = 0;
1130 unsigned int z_trace_overwrites = 0;
1131
1132 /* Times a new alloc or trace is put into the hash table */
1133 unsigned int z_alloc_recorded = 0;
1134 unsigned int z_trace_recorded = 0;
1135
1136 /* Times zleak_log returned false due to not being able to acquire the lock */
1137 unsigned int z_total_conflicts = 0;
1138
1139
1140 #pragma mark struct zallocation
1141 /*
1142 * Structure for keeping track of an allocation
1143 * An allocation bucket is in use if its element is not NULL
1144 */
1145 struct zallocation {
1146 uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
1147 vm_size_t za_size; /* how much memory did this allocation take up? */
1148 uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */
1149 /* TODO: #if this out */
1150 uint32_t za_hit_count; /* for determining effectiveness of hash function */
1151 };
1152
1153 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
1154 uint32_t zleak_alloc_buckets = CONFIG_ZLEAK_ALLOCATION_MAP_NUM;
1155 uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM;
1156
1157 vm_size_t zleak_max_zonemap_size;
1158
1159 /* Hashmaps of allocations and their corresponding traces */
1160 static struct zallocation* zallocations;
1161 static struct ztrace* ztraces;
1162
1163 /* not static so that panic can see this, see kern/debug.c */
1164 struct ztrace* top_ztrace;
1165
1166 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
1167 static lck_spin_t zleak_lock;
1168 static lck_attr_t zleak_lock_attr;
1169 static lck_grp_t zleak_lock_grp;
1170 static lck_grp_attr_t zleak_lock_grp_attr;
1171
1172 /*
1173 * Initializes the zone leak monitor. Called from zone_init()
1174 */
1175 static void
1176 zleak_init(vm_size_t max_zonemap_size)
1177 {
1178 char scratch_buf[16];
1179 boolean_t zleak_enable_flag = FALSE;
1180
1181 zleak_max_zonemap_size = max_zonemap_size;
1182 zleak_global_tracking_threshold = max_zonemap_size / 2;
1183 zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8;
1184
1185 /* -zleakoff (flag to disable zone leak monitor) */
1186 if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) {
1187 zleak_enable_flag = FALSE;
1188 printf("zone leak detection disabled\n");
1189 } else {
1190 zleak_enable_flag = TRUE;
1191 printf("zone leak detection enabled\n");
1192 }
1193
1194 /* zfactor=XXXX (override how often to sample the zone allocator) */
1195 if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) {
1196 printf("Zone leak factor override: %u\n", zleak_sample_factor);
1197 }
1198
1199 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
1200 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) {
1201 printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets);
1202 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1203 if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets-1))) {
1204 printf("Override isn't a power of two, bad things might happen!\n");
1205 }
1206 }
1207
1208 /* zleak-traces=XXXX (override number of buckets in ztraces) */
1209 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) {
1210 printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets);
1211 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1212 if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets-1))) {
1213 printf("Override isn't a power of two, bad things might happen!\n");
1214 }
1215 }
1216
1217 /* allocate the zleak_lock */
1218 lck_grp_attr_setdefault(&zleak_lock_grp_attr);
1219 lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr);
1220 lck_attr_setdefault(&zleak_lock_attr);
1221 lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr);
1222
1223 if (zleak_enable_flag) {
1224 zleak_state = ZLEAK_STATE_ENABLED;
1225 }
1226 }
1227
1228 #if CONFIG_ZLEAKS
1229
1230 /*
1231 * Support for kern.zleak.active sysctl - a simplified
1232 * version of the zleak_state variable.
1233 */
1234 int
1235 get_zleak_state(void)
1236 {
1237 if (zleak_state & ZLEAK_STATE_FAILED)
1238 return (-1);
1239 if (zleak_state & ZLEAK_STATE_ACTIVE)
1240 return (1);
1241 return (0);
1242 }
1243
1244 #endif
1245
1246
1247 kern_return_t
1248 zleak_activate(void)
1249 {
1250 kern_return_t retval;
1251 vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
1252 vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
1253 void *allocations_ptr = NULL;
1254 void *traces_ptr = NULL;
1255
1256 /* Only one thread attempts to activate at a time */
1257 if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
1258 return KERN_SUCCESS;
1259 }
1260
1261 /* Indicate that we're doing the setup */
1262 lck_spin_lock(&zleak_lock);
1263 if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
1264 lck_spin_unlock(&zleak_lock);
1265 return KERN_SUCCESS;
1266 }
1267
1268 zleak_state |= ZLEAK_STATE_ACTIVATING;
1269 lck_spin_unlock(&zleak_lock);
1270
1271 /* Allocate and zero tables */
1272 retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size, VM_KERN_MEMORY_OSFMK);
1273 if (retval != KERN_SUCCESS) {
1274 goto fail;
1275 }
1276
1277 retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size, VM_KERN_MEMORY_OSFMK);
1278 if (retval != KERN_SUCCESS) {
1279 goto fail;
1280 }
1281
1282 bzero(allocations_ptr, z_alloc_size);
1283 bzero(traces_ptr, z_trace_size);
1284
1285 /* Everything's set. Install tables, mark active. */
1286 zallocations = allocations_ptr;
1287 ztraces = traces_ptr;
1288
1289 /*
1290 * Initialize the top_ztrace to the first entry in ztraces,
1291 * so we don't have to check for null in zleak_log
1292 */
1293 top_ztrace = &ztraces[0];
1294
1295 /*
1296 * Note that we do need a barrier between installing
1297 * the tables and setting the active flag, because the zfree()
1298 * path accesses the table without a lock if we're active.
1299 */
1300 lck_spin_lock(&zleak_lock);
1301 zleak_state |= ZLEAK_STATE_ACTIVE;
1302 zleak_state &= ~ZLEAK_STATE_ACTIVATING;
1303 lck_spin_unlock(&zleak_lock);
1304
1305 return 0;
1306
1307 fail:
1308 /*
1309 * If we fail to allocate memory, don't further tax
1310 * the system by trying again.
1311 */
1312 lck_spin_lock(&zleak_lock);
1313 zleak_state |= ZLEAK_STATE_FAILED;
1314 zleak_state &= ~ZLEAK_STATE_ACTIVATING;
1315 lck_spin_unlock(&zleak_lock);
1316
1317 if (allocations_ptr != NULL) {
1318 kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
1319 }
1320
1321 if (traces_ptr != NULL) {
1322 kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
1323 }
1324
1325 return retval;
1326 }
1327
1328 /*
1329 * TODO: What about allocations that never get deallocated,
1330 * especially ones with unique backtraces? Should we wait to record
1331 * until after boot has completed?
1332 * (How many persistent zallocs are there?)
1333 */
1334
1335 /*
1336 * This function records the allocation in the allocations table,
1337 * and stores the associated backtrace in the traces table
1338 * (or just increments the refcount if the trace is already recorded)
1339 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
1340 * the associated trace's refcount is decremented.
1341 * If the trace slot is in use, it returns.
1342 * The refcount is incremented by the amount of memory the allocation consumes.
1343 * The return value indicates whether to try again next time.
1344 */
1345 static boolean_t
1346 zleak_log(uintptr_t* bt,
1347 uintptr_t addr,
1348 uint32_t depth,
1349 vm_size_t allocation_size)
1350 {
1351 /* Quit if there's someone else modifying the hash tables */
1352 if (!lck_spin_try_lock(&zleak_lock)) {
1353 z_total_conflicts++;
1354 return FALSE;
1355 }
1356
1357 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
1358
1359 uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
1360 struct ztrace* trace = &ztraces[trace_index];
1361
1362 allocation->za_hit_count++;
1363 trace->zt_hit_count++;
1364
1365 /*
1366 * If the allocation bucket we want to be in is occupied, and if the occupier
1367 * has the same trace as us, just bail.
1368 */
1369 if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
1370 z_alloc_collisions++;
1371
1372 lck_spin_unlock(&zleak_lock);
1373 return TRUE;
1374 }
1375
1376 /* STEP 1: Store the backtrace in the traces array. */
1377 /* A size of zero indicates that the trace bucket is free. */
1378
1379 if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) {
1380 /*
1381 * Different unique trace with same hash!
1382 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
1383 * and get out of the way for later chances
1384 */
1385 trace->zt_collisions++;
1386 z_trace_collisions++;
1387
1388 lck_spin_unlock(&zleak_lock);
1389 return TRUE;
1390 } else if (trace->zt_size > 0) {
1391 /* Same trace, already added, so increment refcount */
1392 trace->zt_size += allocation_size;
1393 } else {
1394 /* Found an unused trace bucket, record the trace here! */
1395 if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */
1396 z_trace_overwrites++;
1397
1398 z_trace_recorded++;
1399 trace->zt_size = allocation_size;
1400 memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) );
1401
1402 trace->zt_depth = depth;
1403 trace->zt_collisions = 0;
1404 }
1405
1406 /* STEP 2: Store the allocation record in the allocations array. */
1407
1408 if (allocation->za_element != (uintptr_t) 0) {
1409 /*
1410 * Straight up replace any allocation record that was there. We don't want to do the work
1411 * to preserve the allocation entries that were there, because we only record a subset of the
1412 * allocations anyways.
1413 */
1414
1415 z_alloc_collisions++;
1416
1417 struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
1418 /* Knock off old allocation's size, not the new allocation */
1419 associated_trace->zt_size -= allocation->za_size;
1420 } else if (allocation->za_trace_index != 0) {
1421 /* Slot previously used but not currently in use */
1422 z_alloc_overwrites++;
1423 }
1424
1425 allocation->za_element = addr;
1426 allocation->za_trace_index = trace_index;
1427 allocation->za_size = allocation_size;
1428
1429 z_alloc_recorded++;
1430
1431 if (top_ztrace->zt_size < trace->zt_size)
1432 top_ztrace = trace;
1433
1434 lck_spin_unlock(&zleak_lock);
1435 return TRUE;
1436 }
1437
1438 /*
1439 * Free the allocation record and release the stacktrace.
1440 * This should be as fast as possible because it will be called for every free.
1441 */
1442 static void
1443 zleak_free(uintptr_t addr,
1444 vm_size_t allocation_size)
1445 {
1446 if (addr == (uintptr_t) 0)
1447 return;
1448
1449 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
1450
1451 /* Double-checked locking: check to find out if we're interested, lock, check to make
1452 * sure it hasn't changed, then modify it, and release the lock.
1453 */
1454
1455 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
1456 /* if the allocation was the one, grab the lock, check again, then delete it */
1457 lck_spin_lock(&zleak_lock);
1458
1459 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
1460 struct ztrace *trace;
1461
1462 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
1463 if (allocation->za_size != allocation_size) {
1464 panic("Freeing as size %lu memory that was allocated with size %lu\n",
1465 (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
1466 }
1467
1468 trace = &ztraces[allocation->za_trace_index];
1469
1470 /* size of 0 indicates trace bucket is unused */
1471 if (trace->zt_size > 0) {
1472 trace->zt_size -= allocation_size;
1473 }
1474
1475 /* A NULL element means the allocation bucket is unused */
1476 allocation->za_element = 0;
1477 }
1478 lck_spin_unlock(&zleak_lock);
1479 }
1480 }
1481
1482 #endif /* CONFIG_ZLEAKS */
1483
1484 /* These functions outside of CONFIG_ZLEAKS because they are also used in
1485 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
1486 */
1487
1488 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
1489 uintptr_t
1490 hash_mix(uintptr_t x)
1491 {
1492 #ifndef __LP64__
1493 x += ~(x << 15);
1494 x ^= (x >> 10);
1495 x += (x << 3 );
1496 x ^= (x >> 6 );
1497 x += ~(x << 11);
1498 x ^= (x >> 16);
1499 #else
1500 x += ~(x << 32);
1501 x ^= (x >> 22);
1502 x += ~(x << 13);
1503 x ^= (x >> 8 );
1504 x += (x << 3 );
1505 x ^= (x >> 15);
1506 x += ~(x << 27);
1507 x ^= (x >> 31);
1508 #endif
1509 return x;
1510 }
1511
1512 uint32_t
1513 hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
1514 {
1515
1516 uintptr_t hash = 0;
1517 uintptr_t mask = max_size - 1;
1518
1519 while (depth) {
1520 hash += bt[--depth];
1521 }
1522
1523 hash = hash_mix(hash) & mask;
1524
1525 assert(hash < max_size);
1526
1527 return (uint32_t) hash;
1528 }
1529
1530 /*
1531 * TODO: Determine how well distributed this is
1532 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
1533 */
1534 uint32_t
1535 hashaddr(uintptr_t pt, uint32_t max_size)
1536 {
1537 uintptr_t hash = 0;
1538 uintptr_t mask = max_size - 1;
1539
1540 hash = hash_mix(pt) & mask;
1541
1542 assert(hash < max_size);
1543
1544 return (uint32_t) hash;
1545 }
1546
1547 /* End of all leak-detection code */
1548 #pragma mark -
1549
1550 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
1551 #define ZONE_ALLOC_FRAG_PERCENT(alloc_size, ele_size) (((alloc_size % ele_size) * 100) / alloc_size)
1552
1553 /*
1554 * zinit initializes a new zone. The zone data structures themselves
1555 * are stored in a zone, which is initially a static structure that
1556 * is initialized by zone_init.
1557 */
1558 zone_t
1559 zinit(
1560 vm_size_t size, /* the size of an element */
1561 vm_size_t max, /* maximum memory to use */
1562 vm_size_t alloc, /* allocation size */
1563 const char *name) /* a name for the zone */
1564 {
1565 zone_t z;
1566
1567 simple_lock(&all_zones_lock);
1568 z = &(zone_array[zone_array_index]);
1569 zone_array_index++;
1570 assert(zone_array_index != MAX_ZONES);
1571 simple_unlock(&all_zones_lock);
1572
1573 /* Zone elements must fit both a next pointer and a backup pointer */
1574 vm_size_t minimum_element_size = sizeof(vm_offset_t) * 2;
1575 if (size < minimum_element_size)
1576 size = minimum_element_size;
1577
1578 /*
1579 * Round element size to a multiple of sizeof(pointer)
1580 * This also enforces that allocations will be aligned on pointer boundaries
1581 */
1582 size = ((size-1) + sizeof(vm_offset_t)) -
1583 ((size-1) % sizeof(vm_offset_t));
1584
1585 if (alloc == 0)
1586 alloc = PAGE_SIZE;
1587
1588 alloc = round_page(alloc);
1589 max = round_page(max);
1590
1591 vm_size_t best_alloc = PAGE_SIZE;
1592 vm_size_t alloc_size;
1593 for (alloc_size = (2 * PAGE_SIZE); alloc_size <= ZONE_MAX_ALLOC_SIZE; alloc_size += PAGE_SIZE) {
1594 if (ZONE_ALLOC_FRAG_PERCENT(alloc_size, size) < ZONE_ALLOC_FRAG_PERCENT(best_alloc, size)) {
1595 best_alloc = alloc_size;
1596 }
1597 }
1598 alloc = best_alloc;
1599 if (max && (max < alloc))
1600 max = alloc;
1601
1602 z->free_elements = NULL;
1603 queue_init(&z->pages.any_free_foreign);
1604 queue_init(&z->pages.all_free);
1605 queue_init(&z->pages.intermediate);
1606 queue_init(&z->pages.all_used);
1607 z->cur_size = 0;
1608 z->page_count = 0;
1609 z->max_size = max;
1610 z->elem_size = size;
1611 z->alloc_size = alloc;
1612 z->zone_name = name;
1613 z->count = 0;
1614 z->countfree = 0;
1615 z->count_all_free_pages = 0;
1616 z->sum_count = 0LL;
1617 z->doing_alloc_without_vm_priv = FALSE;
1618 z->doing_alloc_with_vm_priv = FALSE;
1619 z->exhaustible = FALSE;
1620 z->collectable = TRUE;
1621 z->allows_foreign = FALSE;
1622 z->expandable = TRUE;
1623 z->waiting = FALSE;
1624 z->async_pending = FALSE;
1625 z->caller_acct = TRUE;
1626 z->noencrypt = FALSE;
1627 z->no_callout = FALSE;
1628 z->async_prio_refill = FALSE;
1629 z->gzalloc_exempt = FALSE;
1630 z->alignment_required = FALSE;
1631 z->zone_replenishing = FALSE;
1632 z->prio_refill_watermark = 0;
1633 z->zone_replenish_thread = NULL;
1634 z->zp_count = 0;
1635
1636 #if CONFIG_ZLEAKS
1637 z->zleak_capture = 0;
1638 z->zleak_on = FALSE;
1639 #endif /* CONFIG_ZLEAKS */
1640
1641 lock_zone_init(z);
1642
1643 /*
1644 * Add the zone to the all-zones list.
1645 */
1646 simple_lock(&all_zones_lock);
1647 z->index = num_zones;
1648 num_zones++;
1649 simple_unlock(&all_zones_lock);
1650
1651 /*
1652 * Check for and set up zone leak detection if requested via boot-args. We recognized two
1653 * boot-args:
1654 *
1655 * zlog=<zone_to_log>
1656 * zrecs=<num_records_in_log>
1657 *
1658 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
1659 * control the size of the log. If zrecs is not specified, a default value is used.
1660 */
1661
1662 if (num_zones_logged < max_num_zones_to_log) {
1663
1664 int i = 1; /* zlog0 isn't allowed. */
1665 boolean_t zone_logging_enabled = FALSE;
1666 char zlog_name[MAX_ZONE_NAME] = ""; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
1667
1668 while (i <= max_num_zones_to_log) {
1669
1670 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
1671
1672 if (PE_parse_boot_argn(zlog_name, zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
1673 if (log_this_zone(z->zone_name, zone_name_to_log)) {
1674 z->zone_logging = TRUE;
1675 zone_logging_enabled = TRUE;
1676 num_zones_logged++;
1677 break;
1678 }
1679 }
1680 i++;
1681 }
1682
1683 if (zone_logging_enabled == FALSE) {
1684 /*
1685 * Backwards compat. with the old boot-arg used to specify single zone logging i.e. zlog
1686 * Needs to happen after the newer zlogn checks because the prefix will match all the zlogn
1687 * boot-args.
1688 */
1689 if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
1690 if (log_this_zone(z->zone_name, zone_name_to_log)) {
1691 z->zone_logging = TRUE;
1692 zone_logging_enabled = TRUE;
1693 num_zones_logged++;
1694 }
1695 }
1696 }
1697
1698 if (log_records_init == FALSE && zone_logging_enabled == TRUE) {
1699 if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) {
1700 /*
1701 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
1702 * This prevents accidentally hogging too much kernel memory and making the system
1703 * unusable.
1704 */
1705
1706 log_records = MIN(ZRECORDS_MAX, log_records);
1707 log_records_init = TRUE;
1708 } else {
1709 log_records = ZRECORDS_DEFAULT;
1710 log_records_init = TRUE;
1711 }
1712 }
1713
1714 /*
1715 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1716 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to
1717 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
1718 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1719 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1720 * right now.
1721 */
1722 if (kmem_alloc_ready) {
1723
1724 zone_t curr_zone = NULL;
1725 unsigned int max_zones = 0, zone_idx = 0;
1726
1727 simple_lock(&all_zones_lock);
1728 max_zones = num_zones;
1729 simple_unlock(&all_zones_lock);
1730
1731 for (zone_idx = 0; zone_idx < max_zones; zone_idx++) {
1732
1733 curr_zone = &(zone_array[zone_idx]);
1734
1735 /*
1736 * We work with the zone unlocked here because we could end up needing the zone lock to
1737 * enable logging for this zone e.g. need a VM object to allocate memory to enable logging for the
1738 * VM objects zone.
1739 *
1740 * We don't expect these zones to be needed at this early a time in boot and so take this chance.
1741 */
1742 if (curr_zone->zone_logging && curr_zone->zlog_btlog == NULL) {
1743
1744 curr_zone->zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, (corruption_debug_flag == FALSE) /* caller_will_remove_entries_for_element? */);
1745
1746 if (curr_zone->zlog_btlog) {
1747
1748 printf("zone: logging started for zone %s\n", curr_zone->zone_name);
1749 } else {
1750 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1751 curr_zone->zone_logging = FALSE;
1752 }
1753 }
1754
1755 }
1756 }
1757 }
1758
1759 #if CONFIG_GZALLOC
1760 gzalloc_zone_init(z);
1761 #endif
1762 return(z);
1763 }
1764 unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated, zone_replenish_throttle_count;
1765
1766 static void zone_replenish_thread(zone_t);
1767
1768 /* High priority VM privileged thread used to asynchronously refill a designated
1769 * zone, such as the reserved VM map entry zone.
1770 */
1771 __attribute__((noreturn))
1772 static void
1773 zone_replenish_thread(zone_t z)
1774 {
1775 vm_size_t free_size;
1776 current_thread()->options |= TH_OPT_VMPRIV;
1777
1778 for (;;) {
1779 lock_zone(z);
1780 z->zone_replenishing = TRUE;
1781 assert(z->prio_refill_watermark != 0);
1782 while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
1783 assert(z->doing_alloc_without_vm_priv == FALSE);
1784 assert(z->doing_alloc_with_vm_priv == FALSE);
1785 assert(z->async_prio_refill == TRUE);
1786
1787 unlock_zone(z);
1788 int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
1789 vm_offset_t space, alloc_size;
1790 kern_return_t kr;
1791
1792 if (vm_pool_low())
1793 alloc_size = round_page(z->elem_size);
1794 else
1795 alloc_size = z->alloc_size;
1796
1797 if (z->noencrypt)
1798 zflags |= KMA_NOENCRYPT;
1799
1800 kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
1801
1802 if (kr == KERN_SUCCESS) {
1803 zcram(z, space, alloc_size);
1804 } else if (kr == KERN_RESOURCE_SHORTAGE) {
1805 VM_PAGE_WAIT();
1806 } else if (kr == KERN_NO_SPACE) {
1807 kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
1808 if (kr == KERN_SUCCESS) {
1809 zcram(z, space, alloc_size);
1810 } else {
1811 assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
1812 thread_block(THREAD_CONTINUE_NULL);
1813 }
1814 }
1815
1816 lock_zone(z);
1817 zone_replenish_loops++;
1818 }
1819
1820 z->zone_replenishing = FALSE;
1821 /* Signal any potential throttled consumers, terminating
1822 * their timer-bounded waits.
1823 */
1824 thread_wakeup(z);
1825
1826 assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
1827 unlock_zone(z);
1828 thread_block(THREAD_CONTINUE_NULL);
1829 zone_replenish_wakeups++;
1830 }
1831 }
1832
1833 void
1834 zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
1835 z->prio_refill_watermark = low_water_mark;
1836
1837 z->async_prio_refill = TRUE;
1838 OSMemoryBarrier();
1839 kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
1840
1841 if (tres != KERN_SUCCESS) {
1842 panic("zone_prio_refill_configure, thread create: 0x%x", tres);
1843 }
1844
1845 thread_deallocate(z->zone_replenish_thread);
1846 }
1847
1848 /* Initialize the metadata for an allocation chunk */
1849 static inline void
1850 zcram_metadata_init(vm_offset_t newmem, vm_size_t size, struct zone_page_metadata *chunk_metadata)
1851 {
1852 struct zone_page_metadata *page_metadata;
1853
1854 /* The first page is the real metadata for this allocation chunk. We mark the others as fake metadata */
1855 size -= PAGE_SIZE;
1856 newmem += PAGE_SIZE;
1857
1858 for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) {
1859 page_metadata = get_zone_page_metadata((struct zone_free_element *)newmem, TRUE);
1860 assert(page_metadata != chunk_metadata);
1861 PAGE_METADATA_SET_ZINDEX(page_metadata, MULTIPAGE_METADATA_MAGIC);
1862 page_metadata_set_realmeta(page_metadata, chunk_metadata);
1863 page_metadata->free_count = 0;
1864 }
1865 return;
1866 }
1867
1868
1869 /*
1870 * Boolean Random Number Generator for generating booleans to randomize
1871 * the order of elements in newly zcram()'ed memory. The algorithm is a
1872 * modified version of the KISS RNG proposed in the paper:
1873 * http://stat.fsu.edu/techreports/M802.pdf
1874 * The modifications have been documented in the technical paper
1875 * paper from UCL:
1876 * http://www0.cs.ucl.ac.uk/staff/d.jones/GoodPracticeRNG.pdf
1877 */
1878
1879 static void random_bool_gen_entropy(
1880 int *buffer,
1881 int count)
1882 {
1883
1884 int i, t;
1885 simple_lock(&bool_gen_lock);
1886 for (i = 0; i < count; i++) {
1887 bool_gen_seed[1] ^= (bool_gen_seed[1] << 5);
1888 bool_gen_seed[1] ^= (bool_gen_seed[1] >> 7);
1889 bool_gen_seed[1] ^= (bool_gen_seed[1] << 22);
1890 t = bool_gen_seed[2] + bool_gen_seed[3] + bool_gen_global;
1891 bool_gen_seed[2] = bool_gen_seed[3];
1892 bool_gen_global = t < 0;
1893 bool_gen_seed[3] = t &2147483647;
1894 bool_gen_seed[0] += 1411392427;
1895 buffer[i] = (bool_gen_seed[0] + bool_gen_seed[1] + bool_gen_seed[3]);
1896 }
1897 simple_unlock(&bool_gen_lock);
1898 }
1899
1900 static boolean_t random_bool_gen(
1901 int *buffer,
1902 int index,
1903 int bufsize)
1904 {
1905 int valindex, bitpos;
1906 valindex = (index / (8 * sizeof(int))) % bufsize;
1907 bitpos = index % (8 * sizeof(int));
1908 return (boolean_t)(buffer[valindex] & (1 << bitpos));
1909 }
1910
1911 static void
1912 random_free_to_zone(
1913 zone_t zone,
1914 vm_offset_t newmem,
1915 vm_offset_t first_element_offset,
1916 int element_count,
1917 int *entropy_buffer)
1918 {
1919 vm_offset_t last_element_offset;
1920 vm_offset_t element_addr;
1921 vm_size_t elem_size;
1922 int index;
1923
1924 elem_size = zone->elem_size;
1925 last_element_offset = first_element_offset + ((element_count * elem_size) - elem_size);
1926 for (index = 0; index < element_count; index++) {
1927 assert(first_element_offset <= last_element_offset);
1928 if (
1929 #if DEBUG || DEVELOPMENT
1930 leak_scan_debug_flag ||
1931 #endif /* DEBUG || DEVELOPMENT */
1932 random_bool_gen(entropy_buffer, index, MAX_ENTROPY_PER_ZCRAM)) {
1933 element_addr = newmem + first_element_offset;
1934 first_element_offset += elem_size;
1935 } else {
1936 element_addr = newmem + last_element_offset;
1937 last_element_offset -= elem_size;
1938 }
1939 if (element_addr != (vm_offset_t)zone) {
1940 zone->count++; /* compensate for free_to_zone */
1941 free_to_zone(zone, element_addr, FALSE);
1942 }
1943 zone->cur_size += elem_size;
1944 }
1945 }
1946
1947 /*
1948 * Cram the given memory into the specified zone. Update the zone page count accordingly.
1949 */
1950 void
1951 zcram(
1952 zone_t zone,
1953 vm_offset_t newmem,
1954 vm_size_t size)
1955 {
1956 vm_size_t elem_size;
1957 boolean_t from_zm = FALSE;
1958 int element_count;
1959 int entropy_buffer[MAX_ENTROPY_PER_ZCRAM];
1960
1961 /* Basic sanity checks */
1962 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
1963 assert(!zone->collectable || zone->allows_foreign
1964 || (from_zone_map(newmem, size)));
1965
1966 elem_size = zone->elem_size;
1967
1968 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(zone), size, 0, 0, 0);
1969
1970 if (from_zone_map(newmem, size))
1971 from_zm = TRUE;
1972
1973 if (!from_zm) {
1974 /* We cannot support elements larger than page size for foreign memory because we
1975 * put metadata on the page itself for each page of foreign memory. We need to do
1976 * this in order to be able to reach the metadata when any element is freed
1977 */
1978 assert((zone->allows_foreign == TRUE) && (zone->elem_size <= (PAGE_SIZE - sizeof(struct zone_page_metadata))));
1979 }
1980
1981 if (zalloc_debug & ZALLOC_DEBUG_ZCRAM)
1982 kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name,
1983 (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size);
1984
1985 ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE));
1986
1987 random_bool_gen_entropy(entropy_buffer, MAX_ENTROPY_PER_ZCRAM);
1988
1989 /*
1990 * Initialize the metadata for all pages. We dont need the zone lock
1991 * here because we are not manipulating any zone related state yet.
1992 */
1993
1994 struct zone_page_metadata *chunk_metadata;
1995 size_t zone_page_metadata_size = sizeof(struct zone_page_metadata);
1996
1997 assert((newmem & PAGE_MASK) == 0);
1998 assert((size & PAGE_MASK) == 0);
1999
2000 chunk_metadata = get_zone_page_metadata((struct zone_free_element *)newmem, TRUE);
2001 chunk_metadata->pages.next = NULL;
2002 chunk_metadata->pages.prev = NULL;
2003 page_metadata_set_freelist(chunk_metadata, 0);
2004 PAGE_METADATA_SET_ZINDEX(chunk_metadata, zone->index);
2005 chunk_metadata->free_count = 0;
2006 chunk_metadata->page_count = (size / PAGE_SIZE);
2007
2008 zcram_metadata_init(newmem, size, chunk_metadata);
2009
2010 lock_zone(zone);
2011 enqueue_tail(&zone->pages.all_used, &(chunk_metadata->pages));
2012
2013 if (!from_zm) {
2014 /* We cannot support elements larger than page size for foreign memory because we
2015 * put metadata on the page itself for each page of foreign memory. We need to do
2016 * this in order to be able to reach the metadata when any element is freed
2017 */
2018
2019 for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) {
2020 vm_offset_t first_element_offset = 0;
2021 if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0){
2022 first_element_offset = zone_page_metadata_size;
2023 } else {
2024 first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT));
2025 }
2026 element_count = (int)((PAGE_SIZE - first_element_offset) / elem_size);
2027 random_free_to_zone(zone, newmem, first_element_offset, element_count, entropy_buffer);
2028 }
2029 } else {
2030 element_count = (int)(size / elem_size);
2031 random_free_to_zone(zone, newmem, 0, element_count, entropy_buffer);
2032 }
2033 unlock_zone(zone);
2034
2035 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(zone), 0, 0, 0, 0);
2036
2037 }
2038
2039 /*
2040 * Fill a zone with enough memory to contain at least nelem elements.
2041 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
2042 * Return the number of elements actually put into the zone, which may
2043 * be more than the caller asked for since the memory allocation is
2044 * rounded up to a full page.
2045 */
2046 int
2047 zfill(
2048 zone_t zone,
2049 int nelem)
2050 {
2051 kern_return_t kr;
2052 vm_size_t size;
2053 vm_offset_t memory;
2054 int nalloc;
2055
2056 assert(nelem > 0);
2057 if (nelem <= 0)
2058 return 0;
2059 size = nelem * zone->elem_size;
2060 size = round_page(size);
2061 kr = kmem_alloc_kobject(kernel_map, &memory, size, VM_KERN_MEMORY_ZONE);
2062 if (kr != KERN_SUCCESS)
2063 return 0;
2064
2065 zone_change(zone, Z_FOREIGN, TRUE);
2066 zcram(zone, memory, size);
2067 nalloc = (int)(size / zone->elem_size);
2068 assert(nalloc >= nelem);
2069
2070 return nalloc;
2071 }
2072
2073 /*
2074 * Initialize the "zone of zones" which uses fixed memory allocated
2075 * earlier in memory initialization. zone_bootstrap is called
2076 * before zone_init.
2077 */
2078 void
2079 zone_bootstrap(void)
2080 {
2081 char temp_buf[16];
2082 unsigned int i;
2083
2084 if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug)))
2085 zalloc_debug = 0;
2086
2087 /* Set up zone element poisoning */
2088 zp_init();
2089
2090 /* Seed the random boolean generator for elements in zone free list */
2091 for (i = 0; i < RANDOM_BOOL_GEN_SEED_COUNT; i++) {
2092 bool_gen_seed[i] = (unsigned int)early_random();
2093 }
2094 simple_lock_init(&bool_gen_lock, 0);
2095
2096 /* should zlog log to debug zone corruption instead of leaks? */
2097 if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) {
2098 corruption_debug_flag = TRUE;
2099 }
2100
2101 #if DEBUG || DEVELOPMENT
2102 /* disable element location randomization in a page */
2103 if (PE_parse_boot_argn("-zl", temp_buf, sizeof(temp_buf))) {
2104 leak_scan_debug_flag = TRUE;
2105 }
2106 #endif
2107
2108 simple_lock_init(&all_zones_lock, 0);
2109
2110 num_zones = 0;
2111 thread_call_setup(&call_async_alloc, zalloc_async, NULL);
2112
2113 /* initializing global lock group for zones */
2114 lck_grp_attr_setdefault(&zone_locks_grp_attr);
2115 lck_grp_init(&zone_locks_grp, "zone_locks", &zone_locks_grp_attr);
2116
2117 lck_attr_setdefault(&zone_metadata_lock_attr);
2118 lck_mtx_init_ext(&zone_metadata_region_lck, &zone_metadata_region_lck_ext, &zone_locks_grp, &zone_metadata_lock_attr);
2119 }
2120
2121 /* Global initialization of Zone Allocator.
2122 * Runs after zone_bootstrap.
2123 */
2124 void
2125 zone_init(
2126 vm_size_t max_zonemap_size)
2127 {
2128 kern_return_t retval;
2129 vm_offset_t zone_min;
2130 vm_offset_t zone_max;
2131 vm_offset_t zone_metadata_space;
2132 unsigned int zone_pages;
2133
2134 retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
2135 FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT | VM_MAKE_TAG(VM_KERN_MEMORY_ZONE),
2136 &zone_map);
2137
2138 if (retval != KERN_SUCCESS)
2139 panic("zone_init: kmem_suballoc failed");
2140 zone_max = zone_min + round_page(max_zonemap_size);
2141 #if CONFIG_GZALLOC
2142 gzalloc_init(max_zonemap_size);
2143 #endif
2144 /*
2145 * Setup garbage collection information:
2146 */
2147 zone_map_min_address = zone_min;
2148 zone_map_max_address = zone_max;
2149
2150 zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
2151 zone_metadata_space = round_page(zone_pages * sizeof(struct zone_page_metadata));
2152 retval = kernel_memory_allocate(zone_map, &zone_metadata_region_min, zone_metadata_space,
2153 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_OSFMK);
2154 if (retval != KERN_SUCCESS)
2155 panic("zone_init: zone_metadata_region initialization failed!");
2156 zone_metadata_region_max = zone_metadata_region_min + zone_metadata_space;
2157
2158 #if defined(__LP64__)
2159 /*
2160 * ensure that any vm_page_t that gets created from
2161 * the vm_page zone can be packed properly (see vm_page.h
2162 * for the packing requirements
2163 */
2164 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_metadata_region_max))) != (vm_page_t)zone_metadata_region_max)
2165 panic("VM_PAGE_PACK_PTR failed on zone_metadata_region_max - %p", (void *)zone_metadata_region_max);
2166
2167 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address))) != (vm_page_t)zone_map_max_address)
2168 panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address);
2169 #endif
2170
2171 lck_grp_attr_setdefault(&zone_gc_lck_grp_attr);
2172 lck_grp_init(&zone_gc_lck_grp, "zone_gc", &zone_gc_lck_grp_attr);
2173 lck_attr_setdefault(&zone_gc_lck_attr);
2174 lck_mtx_init_ext(&zone_gc_lock, &zone_gc_lck_ext, &zone_gc_lck_grp, &zone_gc_lck_attr);
2175
2176 #if CONFIG_ZLEAKS
2177 /*
2178 * Initialize the zone leak monitor
2179 */
2180 zleak_init(max_zonemap_size);
2181 #endif /* CONFIG_ZLEAKS */
2182 }
2183
2184 extern volatile SInt32 kfree_nop_count;
2185
2186 #pragma mark -
2187 #pragma mark zalloc_canblock
2188
2189 /*
2190 * zalloc returns an element from the specified zone.
2191 */
2192 static void *
2193 zalloc_internal(
2194 zone_t zone,
2195 boolean_t canblock,
2196 boolean_t nopagewait)
2197 {
2198 vm_offset_t addr = 0;
2199 kern_return_t retval;
2200 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
2201 int numsaved = 0;
2202 boolean_t zone_replenish_wakeup = FALSE, zone_alloc_throttle = FALSE;
2203 #if CONFIG_GZALLOC
2204 boolean_t did_gzalloc = FALSE;
2205 #endif
2206 thread_t thr = current_thread();
2207 boolean_t check_poison = FALSE;
2208 boolean_t set_doing_alloc_with_vm_priv = FALSE;
2209
2210 #if CONFIG_ZLEAKS
2211 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
2212 #endif /* CONFIG_ZLEAKS */
2213
2214 assert(zone != ZONE_NULL);
2215
2216 #if CONFIG_GZALLOC
2217 addr = gzalloc_alloc(zone, canblock);
2218 did_gzalloc = (addr != 0);
2219 #endif
2220
2221 /*
2222 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2223 */
2224 if (__improbable(DO_LOGGING(zone)))
2225 numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH);
2226
2227 #if CONFIG_ZLEAKS
2228 /*
2229 * Zone leak detection: capture a backtrace every zleak_sample_factor
2230 * allocations in this zone.
2231 */
2232 if (__improbable(zone->zleak_on && sample_counter(&zone->zleak_capture, zleak_sample_factor) == TRUE)) {
2233 /* Avoid backtracing twice if zone logging is on */
2234 if (numsaved == 0)
2235 zleak_tracedepth = backtrace(zbt, MAX_ZTRACE_DEPTH);
2236 else
2237 zleak_tracedepth = numsaved;
2238 }
2239 #endif /* CONFIG_ZLEAKS */
2240
2241 lock_zone(zone);
2242
2243 if (zone->async_prio_refill && zone->zone_replenish_thread) {
2244 do {
2245 vm_size_t zfreec = (zone->cur_size - (zone->count * zone->elem_size));
2246 vm_size_t zrefillwm = zone->prio_refill_watermark * zone->elem_size;
2247 zone_replenish_wakeup = (zfreec < zrefillwm);
2248 zone_alloc_throttle = (zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0);
2249
2250 if (zone_replenish_wakeup) {
2251 zone_replenish_wakeups_initiated++;
2252 /* Signal the potentially waiting
2253 * refill thread.
2254 */
2255 thread_wakeup(&zone->zone_replenish_thread);
2256 unlock_zone(zone);
2257 /* Scheduling latencies etc. may prevent
2258 * the refill thread from keeping up
2259 * with demand. Throttle consumers
2260 * when we fall below half the
2261 * watermark, unless VM privileged
2262 */
2263 if (zone_alloc_throttle) {
2264 zone_replenish_throttle_count++;
2265 assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC);
2266 thread_block(THREAD_CONTINUE_NULL);
2267 }
2268 lock_zone(zone);
2269 }
2270 } while (zone_alloc_throttle == TRUE);
2271 }
2272
2273 if (__probable(addr == 0))
2274 addr = try_alloc_from_zone(zone, &check_poison);
2275
2276
2277 while ((addr == 0) && canblock) {
2278 /*
2279 * zone is empty, try to expand it
2280 *
2281 * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged)
2282 * to expand the zone concurrently... this is necessary to avoid stalling
2283 * vm_privileged threads running critical code necessary to continue compressing/swapping
2284 * pages (i.e. making new free pages) from stalling behind non-vm_privileged threads
2285 * waiting to acquire free pages when the vm_page_free_count is below the
2286 * vm_page_free_reserved limit.
2287 */
2288 if ((zone->doing_alloc_without_vm_priv || zone->doing_alloc_with_vm_priv) &&
2289 (((thr->options & TH_OPT_VMPRIV) == 0) || zone->doing_alloc_with_vm_priv)) {
2290 /*
2291 * This is a non-vm_privileged thread and a non-vm_privileged or
2292 * a vm_privileged thread is already expanding the zone...
2293 * OR
2294 * this is a vm_privileged thread and a vm_privileged thread is
2295 * already expanding the zone...
2296 *
2297 * In either case wait for a thread to finish, then try again.
2298 */
2299 zone->waiting = TRUE;
2300 zone_sleep(zone);
2301 } else {
2302 vm_offset_t space;
2303 vm_size_t alloc_size;
2304 int retry = 0;
2305
2306 if ((zone->cur_size + zone->elem_size) >
2307 zone->max_size) {
2308 if (zone->exhaustible)
2309 break;
2310 if (zone->expandable) {
2311 /*
2312 * We're willing to overflow certain
2313 * zones, but not without complaining.
2314 *
2315 * This is best used in conjunction
2316 * with the collectable flag. What we
2317 * want is an assurance we can get the
2318 * memory back, assuming there's no
2319 * leak.
2320 */
2321 zone->max_size += (zone->max_size >> 1);
2322 } else {
2323 unlock_zone(zone);
2324
2325 panic_include_zprint = TRUE;
2326 #if CONFIG_ZLEAKS
2327 if (zleak_state & ZLEAK_STATE_ACTIVE)
2328 panic_include_ztrace = TRUE;
2329 #endif /* CONFIG_ZLEAKS */
2330 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
2331 }
2332 }
2333 /*
2334 * It is possible that a BG thread is refilling/expanding the zone
2335 * and gets pre-empted during that operation. That blocks all other
2336 * threads from making progress leading to a watchdog timeout. To
2337 * avoid that, boost the thread priority using the rwlock boost
2338 */
2339 set_thread_rwlock_boost();
2340
2341 if ((thr->options & TH_OPT_VMPRIV)) {
2342 zone->doing_alloc_with_vm_priv = TRUE;
2343 set_doing_alloc_with_vm_priv = TRUE;
2344 } else {
2345 zone->doing_alloc_without_vm_priv = TRUE;
2346 }
2347 unlock_zone(zone);
2348
2349 for (;;) {
2350 int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
2351
2352 if (vm_pool_low() || retry >= 1)
2353 alloc_size =
2354 round_page(zone->elem_size);
2355 else
2356 alloc_size = zone->alloc_size;
2357
2358 if (zone->noencrypt)
2359 zflags |= KMA_NOENCRYPT;
2360
2361 retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
2362 if (retval == KERN_SUCCESS) {
2363 #if CONFIG_ZLEAKS
2364 if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
2365 if (zone_map->size >= zleak_global_tracking_threshold) {
2366 kern_return_t kr;
2367
2368 kr = zleak_activate();
2369 if (kr != KERN_SUCCESS) {
2370 printf("Failed to activate live zone leak debugging (%d).\n", kr);
2371 }
2372 }
2373 }
2374
2375 if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
2376 if (zone->cur_size > zleak_per_zone_tracking_threshold) {
2377 zone->zleak_on = TRUE;
2378 }
2379 }
2380 #endif /* CONFIG_ZLEAKS */
2381 zcram(zone, space, alloc_size);
2382
2383 break;
2384 } else if (retval != KERN_RESOURCE_SHORTAGE) {
2385 retry++;
2386
2387 if (retry == 2) {
2388 zone_gc();
2389 printf("zalloc did gc\n");
2390 zone_display_zprint();
2391 }
2392 if (retry == 3) {
2393 panic_include_zprint = TRUE;
2394 #if CONFIG_ZLEAKS
2395 if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
2396 panic_include_ztrace = TRUE;
2397 }
2398 #endif /* CONFIG_ZLEAKS */
2399 if (retval == KERN_NO_SPACE) {
2400 zone_t zone_largest = zone_find_largest();
2401 panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)",
2402 zone->zone_name, zone_largest->zone_name,
2403 (unsigned long)zone_largest->cur_size, zone_largest->count);
2404
2405 }
2406 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
2407 }
2408 } else {
2409 break;
2410 }
2411 }
2412 lock_zone(zone);
2413
2414 if (set_doing_alloc_with_vm_priv == TRUE)
2415 zone->doing_alloc_with_vm_priv = FALSE;
2416 else
2417 zone->doing_alloc_without_vm_priv = FALSE;
2418
2419 if (zone->waiting) {
2420 zone->waiting = FALSE;
2421 zone_wakeup(zone);
2422 }
2423 clear_thread_rwlock_boost();
2424
2425 addr = try_alloc_from_zone(zone, &check_poison);
2426 if (addr == 0 &&
2427 retval == KERN_RESOURCE_SHORTAGE) {
2428 if (nopagewait == TRUE)
2429 break; /* out of the main while loop */
2430 unlock_zone(zone);
2431
2432 VM_PAGE_WAIT();
2433 lock_zone(zone);
2434 }
2435 }
2436 if (addr == 0)
2437 addr = try_alloc_from_zone(zone, &check_poison);
2438 }
2439
2440 #if CONFIG_ZLEAKS
2441 /* Zone leak detection:
2442 * If we're sampling this allocation, add it to the zleaks hash table.
2443 */
2444 if (addr && zleak_tracedepth > 0) {
2445 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2446 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2447 /* If it failed, roll back the counter so we sample the next allocation instead. */
2448 zone->zleak_capture = zleak_sample_factor;
2449 }
2450 }
2451 #endif /* CONFIG_ZLEAKS */
2452
2453
2454 if ((addr == 0) && (!canblock || nopagewait) && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
2455 zone->async_pending = TRUE;
2456 unlock_zone(zone);
2457 thread_call_enter(&call_async_alloc);
2458 lock_zone(zone);
2459 addr = try_alloc_from_zone(zone, &check_poison);
2460 }
2461
2462 vm_offset_t inner_size = zone->elem_size;
2463
2464 unlock_zone(zone);
2465
2466 if (__improbable(DO_LOGGING(zone) && addr)) {
2467 btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_ALLOC, (void **)zbt, numsaved);
2468 }
2469
2470 if (__improbable(check_poison && addr)) {
2471 vm_offset_t *element_cursor = ((vm_offset_t *) addr) + 1;
2472 vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *) addr);
2473
2474 for ( ; element_cursor < backup ; element_cursor++)
2475 if (__improbable(*element_cursor != ZP_POISON))
2476 zone_element_was_modified_panic(zone,
2477 addr,
2478 *element_cursor,
2479 ZP_POISON,
2480 ((vm_offset_t)element_cursor) - addr);
2481 }
2482
2483 if (addr) {
2484 /*
2485 * Clear out the old next pointer and backup to avoid leaking the cookie
2486 * and so that only values on the freelist have a valid cookie
2487 */
2488
2489 vm_offset_t *primary = (vm_offset_t *) addr;
2490 vm_offset_t *backup = get_backup_ptr(inner_size, primary);
2491
2492 *primary = ZP_POISON;
2493 *backup = ZP_POISON;
2494
2495 #if DEBUG || DEVELOPMENT
2496 if (__improbable(leak_scan_debug_flag && !(zone->elem_size & (sizeof(uintptr_t) - 1)))) {
2497 int count, idx;
2498 /* Fill element, from tail, with backtrace in reverse order */
2499 if (numsaved == 0) numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH);
2500 count = (int) (zone->elem_size / sizeof(uintptr_t));
2501 if (count >= numsaved) count = numsaved - 1;
2502 for (idx = 0; idx < count; idx++) ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1];
2503 }
2504 #endif /* DEBUG || DEVELOPMENT */
2505 }
2506
2507 TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
2508 return((void *)addr);
2509 }
2510
2511
2512 void *
2513 zalloc(zone_t zone)
2514 {
2515 return (zalloc_internal(zone, TRUE, FALSE));
2516 }
2517
2518 void *
2519 zalloc_noblock(zone_t zone)
2520 {
2521 return (zalloc_internal(zone, FALSE, FALSE));
2522 }
2523
2524 void *
2525 zalloc_nopagewait(zone_t zone)
2526 {
2527 return (zalloc_internal(zone, TRUE, TRUE));
2528 }
2529
2530 void *
2531 zalloc_canblock(zone_t zone, boolean_t canblock)
2532 {
2533 return (zalloc_internal(zone, canblock, FALSE));
2534 }
2535
2536
2537 void
2538 zalloc_async(
2539 __unused thread_call_param_t p0,
2540 __unused thread_call_param_t p1)
2541 {
2542 zone_t current_z = NULL;
2543 unsigned int max_zones, i;
2544 void *elt = NULL;
2545 boolean_t pending = FALSE;
2546
2547 simple_lock(&all_zones_lock);
2548 max_zones = num_zones;
2549 simple_unlock(&all_zones_lock);
2550 for (i = 0; i < max_zones; i++) {
2551 current_z = &(zone_array[i]);
2552 lock_zone(current_z);
2553 if (current_z->async_pending == TRUE) {
2554 current_z->async_pending = FALSE;
2555 pending = TRUE;
2556 }
2557 unlock_zone(current_z);
2558
2559 if (pending == TRUE) {
2560 elt = zalloc_canblock(current_z, TRUE);
2561 zfree(current_z, elt);
2562 pending = FALSE;
2563 }
2564 }
2565 }
2566
2567 /*
2568 * zget returns an element from the specified zone
2569 * and immediately returns nothing if there is nothing there.
2570 */
2571 void *
2572 zget(
2573 zone_t zone)
2574 {
2575 return zalloc_internal(zone, FALSE, TRUE);
2576 }
2577
2578 /* Keep this FALSE by default. Large memory machine run orders of magnitude
2579 slower in debug mode when true. Use debugger to enable if needed */
2580 /* static */ boolean_t zone_check = FALSE;
2581
2582 static void zone_check_freelist(zone_t zone, vm_offset_t elem)
2583 {
2584 struct zone_free_element *this;
2585 struct zone_page_metadata *thispage;
2586
2587 if (zone->allows_foreign) {
2588 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
2589 !queue_end(&zone->pages.any_free_foreign, &(thispage->pages));
2590 thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) {
2591 for (this = page_metadata_get_freelist(thispage);
2592 this != NULL;
2593 this = this->next) {
2594 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2595 panic("zone_check_freelist");
2596 }
2597 }
2598 }
2599 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
2600 !queue_end(&zone->pages.all_free, &(thispage->pages));
2601 thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) {
2602 for (this = page_metadata_get_freelist(thispage);
2603 this != NULL;
2604 this = this->next) {
2605 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2606 panic("zone_check_freelist");
2607 }
2608 }
2609 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
2610 !queue_end(&zone->pages.intermediate, &(thispage->pages));
2611 thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) {
2612 for (this = page_metadata_get_freelist(thispage);
2613 this != NULL;
2614 this = this->next) {
2615 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2616 panic("zone_check_freelist");
2617 }
2618 }
2619 }
2620
2621 void
2622 zfree(
2623 zone_t zone,
2624 void *addr)
2625 {
2626 vm_offset_t elem = (vm_offset_t) addr;
2627 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
2628 int numsaved = 0;
2629 boolean_t gzfreed = FALSE;
2630 boolean_t poison = FALSE;
2631
2632 assert(zone != ZONE_NULL);
2633
2634 /*
2635 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2636 */
2637
2638 if (__improbable(DO_LOGGING(zone) && corruption_debug_flag))
2639 numsaved = OSBacktrace((void *)zbt, MAX_ZTRACE_DEPTH);
2640
2641 #if MACH_ASSERT
2642 /* Basic sanity checks */
2643 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
2644 panic("zfree: NULL");
2645 #endif
2646
2647 #if CONFIG_GZALLOC
2648 gzfreed = gzalloc_free(zone, addr);
2649 #endif
2650
2651 if (!gzfreed) {
2652 struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE);
2653 if (zone != PAGE_METADATA_GET_ZONE(page_meta)) {
2654 panic("Element %p from zone %s caught being freed to wrong zone %s\n", addr, PAGE_METADATA_GET_ZONE(page_meta)->zone_name, zone->zone_name);
2655 }
2656 }
2657
2658 TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
2659
2660 if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
2661 !from_zone_map(elem, zone->elem_size))) {
2662 panic("zfree: non-allocated memory in collectable zone!");
2663 }
2664
2665 if ((zp_factor != 0 || zp_tiny_zone_limit != 0) && !gzfreed) {
2666 /*
2667 * Poison the memory before it ends up on the freelist to catch
2668 * use-after-free and use of uninitialized memory
2669 *
2670 * Always poison tiny zones' elements (limit is 0 if -no-zp is set)
2671 * Also poison larger elements periodically
2672 */
2673
2674 vm_offset_t inner_size = zone->elem_size;
2675
2676 uint32_t sample_factor = zp_factor + (((uint32_t)inner_size) >> zp_scale);
2677
2678 if (inner_size <= zp_tiny_zone_limit)
2679 poison = TRUE;
2680 else if (zp_factor != 0 && sample_counter(&zone->zp_count, sample_factor) == TRUE)
2681 poison = TRUE;
2682
2683 if (__improbable(poison)) {
2684
2685 /* memset_pattern{4|8} could help make this faster: <rdar://problem/4662004> */
2686 /* Poison everything but primary and backup */
2687 vm_offset_t *element_cursor = ((vm_offset_t *) elem) + 1;
2688 vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *)elem);
2689
2690 for ( ; element_cursor < backup; element_cursor++)
2691 *element_cursor = ZP_POISON;
2692 }
2693 }
2694
2695 /*
2696 * See if we're doing logging on this zone. There are two styles of logging used depending on
2697 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
2698 */
2699
2700 if (__improbable(DO_LOGGING(zone))) {
2701 if (corruption_debug_flag) {
2702 /*
2703 * We're logging to catch a corruption. Add a record of this zfree operation
2704 * to log.
2705 */
2706 btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_FREE, (void **)zbt, numsaved);
2707 } else {
2708 /*
2709 * We're logging to catch a leak. Remove any record we might have for this
2710 * element since it's being freed. Note that we may not find it if the buffer
2711 * overflowed and that's OK. Since the log is of a limited size, old records
2712 * get overwritten if there are more zallocs than zfrees.
2713 */
2714 btlog_remove_entries_for_element(zone->zlog_btlog, (void *)addr);
2715 }
2716 }
2717
2718 lock_zone(zone);
2719
2720 if (zone_check) {
2721 zone_check_freelist(zone, elem);
2722 }
2723
2724 if (__probable(!gzfreed))
2725 free_to_zone(zone, elem, poison);
2726
2727 #if MACH_ASSERT
2728 if (zone->count < 0)
2729 panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone",
2730 zone->zone_name, addr);
2731 #endif
2732
2733
2734 #if CONFIG_ZLEAKS
2735 /*
2736 * Zone leak detection: un-track the allocation
2737 */
2738 if (zone->zleak_on) {
2739 zleak_free(elem, zone->elem_size);
2740 }
2741 #endif /* CONFIG_ZLEAKS */
2742
2743 unlock_zone(zone);
2744 }
2745
2746
2747 /* Change a zone's flags.
2748 * This routine must be called immediately after zinit.
2749 */
2750 void
2751 zone_change(
2752 zone_t zone,
2753 unsigned int item,
2754 boolean_t value)
2755 {
2756 assert( zone != ZONE_NULL );
2757 assert( value == TRUE || value == FALSE );
2758
2759 switch(item){
2760 case Z_NOENCRYPT:
2761 zone->noencrypt = value;
2762 break;
2763 case Z_EXHAUST:
2764 zone->exhaustible = value;
2765 break;
2766 case Z_COLLECT:
2767 zone->collectable = value;
2768 break;
2769 case Z_EXPAND:
2770 zone->expandable = value;
2771 break;
2772 case Z_FOREIGN:
2773 zone->allows_foreign = value;
2774 break;
2775 case Z_CALLERACCT:
2776 zone->caller_acct = value;
2777 break;
2778 case Z_NOCALLOUT:
2779 zone->no_callout = value;
2780 break;
2781 case Z_GZALLOC_EXEMPT:
2782 zone->gzalloc_exempt = value;
2783 #if CONFIG_GZALLOC
2784 gzalloc_reconfigure(zone);
2785 #endif
2786 break;
2787 case Z_ALIGNMENT_REQUIRED:
2788 zone->alignment_required = value;
2789 #if CONFIG_GZALLOC
2790 gzalloc_reconfigure(zone);
2791 #endif
2792 break;
2793 default:
2794 panic("Zone_change: Wrong Item Type!");
2795 /* break; */
2796 }
2797 }
2798
2799 /*
2800 * Return the expected number of free elements in the zone.
2801 * This calculation will be incorrect if items are zfree'd that
2802 * were never zalloc'd/zget'd. The correct way to stuff memory
2803 * into a zone is by zcram.
2804 */
2805
2806 integer_t
2807 zone_free_count(zone_t zone)
2808 {
2809 integer_t free_count;
2810
2811 lock_zone(zone);
2812 free_count = zone->countfree;
2813 unlock_zone(zone);
2814
2815 assert(free_count >= 0);
2816
2817 return(free_count);
2818 }
2819
2820 /* Zone garbage collection
2821 *
2822 * zone_gc will walk through all the free elements in all the
2823 * zones that are marked collectable looking for reclaimable
2824 * pages. zone_gc is called by consider_zone_gc when the system
2825 * begins to run out of memory.
2826 */
2827 extern zone_t vm_map_entry_reserved_zone;
2828 uint64_t zone_gc_bailed = 0;
2829
2830 void
2831 zone_gc(void)
2832 {
2833 unsigned int max_zones;
2834 zone_t z;
2835 unsigned int i;
2836 zone_t zres = vm_map_entry_reserved_zone;
2837
2838 lck_mtx_lock(&zone_gc_lock);
2839
2840 simple_lock(&all_zones_lock);
2841 max_zones = num_zones;
2842 simple_unlock(&all_zones_lock);
2843
2844 if (zalloc_debug & ZALLOC_DEBUG_ZONEGC)
2845 kprintf("zone_gc() starting...\n");
2846
2847 for (i = 0; i < max_zones; i++) {
2848 z = &(zone_array[i]);
2849 vm_size_t elt_size, size_freed;
2850 int total_freed_pages = 0;
2851 struct zone_page_metadata *page_meta;
2852 queue_head_t page_meta_head;
2853
2854 assert(z != ZONE_NULL);
2855
2856 if (!z->collectable)
2857 continue;
2858
2859 if (queue_empty(&z->pages.all_free)) {
2860 continue;
2861 }
2862
2863 /*
2864 * Since kmem_free() might use VM entries from the reserved VM entries zone, we should bail from zone_gc() if we
2865 * are below the critical threshold for that zone. Otherwise, there could be a deadlock between the zone_gc
2866 * thread and the zone_replenish thread for the VM entries zone on the zone_map lock.
2867 */
2868 if (zres->zone_replenishing) {
2869 zone_gc_bailed++;
2870 break;
2871 }
2872
2873 lock_zone(z);
2874 elt_size = z->elem_size;
2875
2876 if (queue_empty(&z->pages.all_free)) {
2877 unlock_zone(z);
2878 continue;
2879 }
2880
2881 /*
2882 * Snatch all of the free elements away from the zone.
2883 */
2884 uint64_t old_all_free_count = z->count_all_free_pages;
2885 queue_new_head(&z->pages.all_free, &page_meta_head, struct zone_page_metadata *, pages);
2886 queue_init(&z->pages.all_free);
2887 z->count_all_free_pages = 0;
2888 unlock_zone(z);
2889
2890 /* Iterate through all elements to find out size and count of elements we snatched */
2891 size_freed = 0;
2892 queue_iterate(&page_meta_head, page_meta, struct zone_page_metadata *, pages) {
2893 assert(from_zone_map((vm_address_t)page_meta, sizeof(*page_meta))); /* foreign elements should be in any_free_foreign */
2894 size_freed += elt_size * page_meta->free_count;
2895 }
2896
2897 /* Update the zone size and free element count */
2898 lock_zone(z);
2899 z->cur_size -= size_freed;
2900 z->countfree -= size_freed/elt_size;
2901 unlock_zone(z);
2902
2903 while ((page_meta = (struct zone_page_metadata *)dequeue_head(&page_meta_head)) != NULL) {
2904 vm_address_t free_page_address;
2905 if (zres->zone_replenishing)
2906 break;
2907 /* Free the pages for metadata and account for them */
2908 free_page_address = get_zone_page(page_meta);
2909 ZONE_PAGE_COUNT_DECR(z, page_meta->page_count);
2910 total_freed_pages += page_meta->page_count;
2911 old_all_free_count -= page_meta->page_count;
2912 size_freed -= (elt_size * page_meta->free_count);
2913 kmem_free(zone_map, free_page_address, (page_meta->page_count * PAGE_SIZE));
2914 thread_yield_to_preemption();
2915 }
2916 if (page_meta != NULL) {
2917 /*
2918 * We bailed because the VM entry reserved zone is replenishing. Put the remaining
2919 * metadata objects back on the all_free list and bail.
2920 */
2921 queue_entry_t qe;
2922 enqueue_head(&page_meta_head, &(page_meta->pages));
2923 zone_gc_bailed++;
2924
2925 lock_zone(z);
2926 qe_foreach_safe(qe, &page_meta_head) {
2927 re_queue_tail(&z->pages.all_free, qe);
2928 }
2929 z->count_all_free_pages += (int)old_all_free_count;
2930 z->cur_size += size_freed;
2931 z->countfree += size_freed/elt_size;
2932 unlock_zone(z);
2933 if (zalloc_debug & ZALLOC_DEBUG_ZONEGC)
2934 kprintf("zone_gc() bailed due to VM entry zone replenishing (zone_gc_bailed: %lld)\n", zone_gc_bailed);
2935 break;
2936 }
2937
2938 /* We freed all the pages from the all_free list for this zone */
2939 assert(old_all_free_count == 0);
2940
2941 if (zalloc_debug & ZALLOC_DEBUG_ZONEGC)
2942 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages);
2943 }
2944
2945 lck_mtx_unlock(&zone_gc_lock);
2946 }
2947
2948 extern vm_offset_t kmapoff_kaddr;
2949 extern unsigned int kmapoff_pgcnt;
2950
2951 /*
2952 * consider_zone_gc:
2953 *
2954 * Called by the pageout daemon when the system needs more free pages.
2955 */
2956
2957 void
2958 consider_zone_gc(void)
2959 {
2960 if (kmapoff_kaddr != 0) {
2961 /*
2962 * One-time reclaim of kernel_map resources we allocated in
2963 * early boot.
2964 */
2965 (void) vm_deallocate(kernel_map,
2966 kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64);
2967 kmapoff_kaddr = 0;
2968 }
2969
2970 if (zone_gc_allowed)
2971 zone_gc();
2972 }
2973
2974 kern_return_t
2975 task_zone_info(
2976 __unused task_t task,
2977 __unused mach_zone_name_array_t *namesp,
2978 __unused mach_msg_type_number_t *namesCntp,
2979 __unused task_zone_info_array_t *infop,
2980 __unused mach_msg_type_number_t *infoCntp)
2981 {
2982 return KERN_FAILURE;
2983 }
2984
2985 kern_return_t
2986 mach_zone_info(
2987 host_priv_t host,
2988 mach_zone_name_array_t *namesp,
2989 mach_msg_type_number_t *namesCntp,
2990 mach_zone_info_array_t *infop,
2991 mach_msg_type_number_t *infoCntp)
2992 {
2993 return (mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL));
2994 }
2995
2996
2997 kern_return_t
2998 host_zone_info(
2999 host_priv_t host,
3000 zone_name_array_t *namesp,
3001 mach_msg_type_number_t *namesCntp,
3002 zone_info_array_t *infop,
3003 mach_msg_type_number_t *infoCntp)
3004 {
3005 return (mach_memory_info(host, (mach_zone_name_array_t *)namesp, namesCntp, (mach_zone_info_array_t *)infop, infoCntp, NULL, NULL));
3006 }
3007
3008 kern_return_t
3009 mach_memory_info(
3010 host_priv_t host,
3011 mach_zone_name_array_t *namesp,
3012 mach_msg_type_number_t *namesCntp,
3013 mach_zone_info_array_t *infop,
3014 mach_msg_type_number_t *infoCntp,
3015 mach_memory_info_array_t *memoryInfop,
3016 mach_msg_type_number_t *memoryInfoCntp)
3017 {
3018 mach_zone_name_t *names;
3019 vm_offset_t names_addr;
3020 vm_size_t names_size;
3021
3022 mach_zone_info_t *info;
3023 vm_offset_t info_addr;
3024 vm_size_t info_size;
3025
3026 mach_memory_info_t *memory_info;
3027 vm_offset_t memory_info_addr;
3028 vm_size_t memory_info_size;
3029 vm_size_t memory_info_vmsize;
3030 unsigned int num_sites;
3031
3032 unsigned int max_zones, i;
3033 zone_t z;
3034 mach_zone_name_t *zn;
3035 mach_zone_info_t *zi;
3036 kern_return_t kr;
3037
3038 vm_size_t used;
3039 vm_map_copy_t copy;
3040 uint64_t zones_collectable_bytes = 0;
3041
3042 if (host == HOST_NULL)
3043 return KERN_INVALID_HOST;
3044 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
3045 if (!PE_i_can_has_debugger(NULL))
3046 return KERN_INVALID_HOST;
3047 #endif
3048
3049 /*
3050 * We assume that zones aren't freed once allocated.
3051 * We won't pick up any zones that are allocated later.
3052 */
3053
3054 simple_lock(&all_zones_lock);
3055 max_zones = (unsigned int)(num_zones);
3056 simple_unlock(&all_zones_lock);
3057
3058 names_size = round_page(max_zones * sizeof *names);
3059 kr = kmem_alloc_pageable(ipc_kernel_map,
3060 &names_addr, names_size, VM_KERN_MEMORY_IPC);
3061 if (kr != KERN_SUCCESS)
3062 return kr;
3063 names = (mach_zone_name_t *) names_addr;
3064
3065 info_size = round_page(max_zones * sizeof *info);
3066 kr = kmem_alloc_pageable(ipc_kernel_map,
3067 &info_addr, info_size, VM_KERN_MEMORY_IPC);
3068 if (kr != KERN_SUCCESS) {
3069 kmem_free(ipc_kernel_map,
3070 names_addr, names_size);
3071 return kr;
3072 }
3073 info = (mach_zone_info_t *) info_addr;
3074
3075 zn = &names[0];
3076 zi = &info[0];
3077
3078 for (i = 0; i < max_zones; i++) {
3079 struct zone zcopy;
3080 z = &(zone_array[i]);
3081 assert(z != ZONE_NULL);
3082
3083 lock_zone(z);
3084 zcopy = *z;
3085 unlock_zone(z);
3086
3087 /* assuming here the name data is static */
3088 (void) strncpy(zn->mzn_name, zcopy.zone_name,
3089 sizeof zn->mzn_name);
3090 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
3091
3092 zi->mzi_count = (uint64_t)zcopy.count;
3093 zi->mzi_cur_size = ptoa_64(zcopy.page_count);
3094 zi->mzi_max_size = (uint64_t)zcopy.max_size;
3095 zi->mzi_elem_size = (uint64_t)zcopy.elem_size;
3096 zi->mzi_alloc_size = (uint64_t)zcopy.alloc_size;
3097 zi->mzi_sum_size = zcopy.sum_count * zcopy.elem_size;
3098 zi->mzi_exhaustible = (uint64_t)zcopy.exhaustible;
3099 zi->mzi_collectable = (uint64_t)zcopy.collectable;
3100 zones_collectable_bytes += ((uint64_t)zcopy.count_all_free_pages * PAGE_SIZE);
3101 zn++;
3102 zi++;
3103 }
3104
3105 used = max_zones * sizeof *names;
3106 if (used != names_size)
3107 bzero((char *) (names_addr + used), names_size - used);
3108
3109 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
3110 (vm_map_size_t)used, TRUE, &copy);
3111 assert(kr == KERN_SUCCESS);
3112
3113 *namesp = (mach_zone_name_t *) copy;
3114 *namesCntp = max_zones;
3115
3116 used = max_zones * sizeof *info;
3117
3118 if (used != info_size)
3119 bzero((char *) (info_addr + used), info_size - used);
3120
3121 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
3122 (vm_map_size_t)used, TRUE, &copy);
3123 assert(kr == KERN_SUCCESS);
3124
3125 *infop = (mach_zone_info_t *) copy;
3126 *infoCntp = max_zones;
3127
3128 num_sites = 0;
3129 memory_info_addr = 0;
3130
3131 if (memoryInfop && memoryInfoCntp)
3132 {
3133 num_sites = VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT;
3134 memory_info_size = num_sites * sizeof(*info);
3135 memory_info_vmsize = round_page(memory_info_size);
3136 kr = kmem_alloc_pageable(ipc_kernel_map,
3137 &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC);
3138 if (kr != KERN_SUCCESS) {
3139 kmem_free(ipc_kernel_map,
3140 names_addr, names_size);
3141 kmem_free(ipc_kernel_map,
3142 info_addr, info_size);
3143 return kr;
3144 }
3145
3146 kr = vm_map_wire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
3147 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE);
3148 assert(kr == KERN_SUCCESS);
3149
3150 memory_info = (mach_memory_info_t *) memory_info_addr;
3151 vm_page_diagnose(memory_info, num_sites, zones_collectable_bytes);
3152
3153 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
3154 assert(kr == KERN_SUCCESS);
3155
3156 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
3157 (vm_map_size_t)memory_info_size, TRUE, &copy);
3158 assert(kr == KERN_SUCCESS);
3159
3160 *memoryInfop = (mach_memory_info_t *) copy;
3161 *memoryInfoCntp = num_sites;
3162 }
3163
3164 return KERN_SUCCESS;
3165 }
3166
3167 kern_return_t
3168 mach_zone_force_gc(
3169 host_t host)
3170 {
3171
3172 if (host == HOST_NULL)
3173 return KERN_INVALID_HOST;
3174
3175 consider_zone_gc();
3176
3177 return (KERN_SUCCESS);
3178 }
3179
3180 extern unsigned int stack_total;
3181 extern unsigned long long stack_allocs;
3182
3183 #if defined(__i386__) || defined (__x86_64__)
3184 extern unsigned int inuse_ptepages_count;
3185 extern long long alloc_ptepages_count;
3186 #endif
3187
3188 void zone_display_zprint()
3189 {
3190 unsigned int i;
3191 zone_t the_zone;
3192
3193 for (i = 0; i < num_zones; i++) {
3194 the_zone = &(zone_array[i]);
3195 if(the_zone->cur_size > (1024*1024)) {
3196 printf("%.20s:\t%lu\n",the_zone->zone_name,(uintptr_t)the_zone->cur_size);
3197 }
3198 }
3199 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
3200
3201 #if defined(__i386__) || defined (__x86_64__)
3202 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
3203 #endif
3204
3205 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total);
3206 }
3207
3208 zone_t
3209 zone_find_largest(void)
3210 {
3211 unsigned int i;
3212 unsigned int max_zones;
3213 zone_t the_zone;
3214 zone_t zone_largest;
3215
3216 simple_lock(&all_zones_lock);
3217 max_zones = num_zones;
3218 simple_unlock(&all_zones_lock);
3219
3220 zone_largest = &(zone_array[0]);
3221 for (i = 0; i < max_zones; i++) {
3222 the_zone = &(zone_array[i]);
3223 if (the_zone->cur_size > zone_largest->cur_size) {
3224 zone_largest = the_zone;
3225 }
3226 }
3227 return zone_largest;
3228 }
3229
3230 #if ZONE_DEBUG
3231
3232 /* should we care about locks here ? */
3233
3234 #define zone_in_use(z) ( z->count || z->free_elements \
3235 || !queue_empty(&z->pages.all_free) \
3236 || !queue_empty(&z->pages.intermediate) \
3237 || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign)))
3238
3239
3240 #endif /* ZONE_DEBUG */
3241
3242
3243 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3244
3245 #if DEBUG || DEVELOPMENT
3246
3247 static uintptr_t *
3248 zone_copy_all_allocations_inqueue(zone_t z, queue_head_t * queue, uintptr_t * elems)
3249 {
3250 struct zone_page_metadata *page_meta;
3251 vm_offset_t free, elements;
3252 vm_offset_t idx, numElements, freeCount, bytesAvail, metaSize;
3253
3254 queue_iterate(queue, page_meta, struct zone_page_metadata *, pages)
3255 {
3256 elements = get_zone_page(page_meta);
3257 bytesAvail = ptoa(page_meta->page_count);
3258 freeCount = 0;
3259 if (z->allows_foreign && !from_zone_map(elements, z->elem_size))
3260 {
3261 metaSize = (sizeof(struct zone_page_metadata) + ZONE_ELEMENT_ALIGNMENT - 1) & ~(ZONE_ELEMENT_ALIGNMENT - 1);
3262 bytesAvail -= metaSize;
3263 elements += metaSize;
3264 }
3265 numElements = bytesAvail / z->elem_size;
3266 // construct array of all possible elements
3267 for (idx = 0; idx < numElements; idx++)
3268 {
3269 elems[idx] = INSTANCE_PUT(elements + idx * z->elem_size);
3270 }
3271 // remove from the array all free elements
3272 free = (vm_offset_t)page_metadata_get_freelist(page_meta);
3273 while (free)
3274 {
3275 // find idx of free element
3276 for (idx = 0; (idx < numElements) && (elems[idx] != INSTANCE_PUT(free)); idx++) {}
3277 assert(idx < numElements);
3278 // remove it
3279 bcopy(&elems[idx + 1], &elems[idx], (numElements - (idx + 1)) * sizeof(elems[0]));
3280 numElements--;
3281 freeCount++;
3282 // next free element
3283 vm_offset_t *primary = (vm_offset_t *) free;
3284 free = *primary ^ zp_nopoison_cookie;
3285 }
3286 elems += numElements;
3287 }
3288
3289 return (elems);
3290 }
3291
3292 kern_return_t
3293 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * refCon)
3294 {
3295 uintptr_t zbt[MAX_ZTRACE_DEPTH];
3296 zone_t zone;
3297 uintptr_t * array;
3298 uintptr_t * next;
3299 uintptr_t element, bt;
3300 uint32_t idx, count, found;
3301 uint32_t btidx, btcount, nobtcount, btfound;
3302 uint32_t elemSize;
3303 uint64_t maxElems;
3304 kern_return_t kr;
3305
3306 for (idx = 0; idx < num_zones; idx++)
3307 {
3308 if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) break;
3309 }
3310 if (idx >= num_zones) return (KERN_INVALID_NAME);
3311 zone = &zone_array[idx];
3312
3313 elemSize = (uint32_t) zone->elem_size;
3314 maxElems = ptoa(zone->page_count) / elemSize;
3315
3316 if ((zone->alloc_size % elemSize)
3317 && !leak_scan_debug_flag) return (KERN_INVALID_CAPABILITY);
3318
3319 kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array,
3320 maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG);
3321 if (KERN_SUCCESS != kr) return (kr);
3322
3323 lock_zone(zone);
3324
3325 next = array;
3326 next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next);
3327 next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next);
3328 next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next);
3329 count = (uint32_t)(next - array);
3330
3331 unlock_zone(zone);
3332
3333 zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found);
3334 assert(found <= count);
3335
3336 for (idx = 0; idx < count; idx++)
3337 {
3338 element = array[idx];
3339 if (kInstanceFlagReferenced & element) continue;
3340 element = INSTANCE_PUT(element) & ~kInstanceFlags;
3341 }
3342
3343 if (zone->zlog_btlog && !corruption_debug_flag)
3344 {
3345 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
3346 btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon);
3347 }
3348
3349 for (nobtcount = idx = 0; idx < count; idx++)
3350 {
3351 element = array[idx];
3352 if (!element) continue;
3353 if (kInstanceFlagReferenced & element) continue;
3354 element = INSTANCE_PUT(element) & ~kInstanceFlags;
3355
3356 // see if we can find any backtrace left in the element
3357 btcount = (typeof(btcount)) (zone->elem_size / sizeof(uintptr_t));
3358 if (btcount >= MAX_ZTRACE_DEPTH) btcount = MAX_ZTRACE_DEPTH - 1;
3359 for (btfound = btidx = 0; btidx < btcount; btidx++)
3360 {
3361 bt = ((uintptr_t *)element)[btcount - 1 - btidx];
3362 if (!VM_KERNEL_IS_SLID(bt)) break;
3363 zbt[btfound++] = bt;
3364 }
3365 if (btfound) (*proc)(refCon, 1, elemSize, &zbt[0], btfound);
3366 else nobtcount++;
3367 }
3368 if (nobtcount)
3369 {
3370 // fake backtrace when we found nothing
3371 zbt[0] = (uintptr_t) &zalloc;
3372 (*proc)(refCon, nobtcount, elemSize, &zbt[0], 1);
3373 }
3374
3375 kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t));
3376
3377 return (KERN_SUCCESS);
3378 }
3379
3380 void
3381 kern_wired_diagnose(void)
3382 {
3383 unsigned int count = VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT;
3384 mach_memory_info_t info[count];
3385 unsigned int idx;
3386 uint64_t total_zone, total_wired, top_wired, osfmk_wired;
3387
3388 if (KERN_SUCCESS != vm_page_diagnose(info, count, 0)) return;
3389
3390 total_zone = total_wired = top_wired = osfmk_wired = 0;
3391 for (idx = 0; idx < num_zones; idx++)
3392 {
3393 total_zone += ptoa_64(zone_array[idx].page_count);
3394 }
3395 total_wired = total_zone;
3396
3397 for (idx = 0; idx < count; idx++)
3398 {
3399 if (VM_KERN_COUNT_WIRED == info[idx].site) top_wired = info[idx].size;
3400 if (VM_KERN_MEMORY_OSFMK == info[idx].site) osfmk_wired = info[idx].size;
3401 if (VM_KERN_SITE_HIDE & info[idx].flags) continue;
3402 if (!(VM_KERN_SITE_WIRED & info[idx].flags)) continue;
3403 total_wired += info[idx].size;
3404 }
3405
3406 printf("top 0x%qx, total 0x%qx, zone 0x%qx, osfmk 0x%qx\n",
3407 top_wired, total_wired, total_zone, osfmk_wired);
3408 }
3409
3410 boolean_t
3411 kdp_is_in_zone(void *addr, const char *zone_name)
3412 {
3413 zone_t z;
3414 return (zone_element_size(addr, &z) && !strcmp(z->zone_name, zone_name));
3415 }
3416
3417 #endif /* DEBUG || DEVELOPMENT */