]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.c
351268d3454930e451cace047a95db6c92629554
[apple/xnu.git] / osfmk / kern / zalloc.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65 #include <zone_debug.h>
66 #include <zone_alias_addr.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach_debug/zone_info.h>
75 #include <mach/vm_map.h>
76
77 #include <kern/kern_types.h>
78 #include <kern/assert.h>
79 #include <kern/host.h>
80 #include <kern/macro_help.h>
81 #include <kern/sched.h>
82 #include <kern/locks.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <kern/thread_call.h>
86 #include <kern/zalloc.h>
87 #include <kern/kalloc.h>
88 #include <kern/btlog.h>
89
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
94
95 #include <pexpert/pexpert.h>
96
97 #include <machine/machparam.h>
98 #include <machine/machine_routines.h> /* ml_cpu_get_info */
99
100 #include <libkern/OSDebug.h>
101 #include <libkern/OSAtomic.h>
102 #include <sys/kdebug.h>
103
104 /*
105 * ZONE_ALIAS_ADDR
106 *
107 * With this option enabled, zones with alloc_size <= PAGE_SIZE allocate
108 * a virtual page from the zone_map, but before zcram-ing the allocated memory
109 * into the zone, the page is translated to use the alias address of the page
110 * in the static kernel region. zone_gc reverses that translation when
111 * scanning the freelist to collect free pages so that it can look up the page
112 * in the zone_page_table, and free it to kmem_free.
113 *
114 * The static kernel region is a flat 1:1 mapping of physical memory passed
115 * to xnu by the booter. It is mapped to the range:
116 * [gVirtBase, gVirtBase + gPhysSize]
117 *
118 * Accessing memory via the static kernel region is faster due to the
119 * entire region being mapped via large pages, cutting down
120 * on TLB misses.
121 *
122 * zinit favors using PAGE_SIZE backing allocations for a zone unless it would
123 * waste more than 10% space to use a single page, in order to take advantage
124 * of the speed benefit for as many zones as possible.
125 *
126 * Zones with > PAGE_SIZE allocations can't take advantage of this
127 * because kernel_memory_allocate doesn't give out physically contiguous pages.
128 *
129 * zone_virtual_addr()
130 * - translates an address from the static kernel region to the zone_map
131 * - returns the same address if it's not from the static kernel region
132 * It relies on the fact that a physical page mapped to the
133 * zone_map is not mapped anywhere else (except the static kernel region).
134 *
135 * zone_alias_addr()
136 * - translates a virtual memory address from the zone_map to the
137 * corresponding address in the static kernel region
138 *
139 */
140
141 #if !ZONE_ALIAS_ADDR
142 #define from_zone_map(addr, size) \
143 ((vm_offset_t)(addr) >= zone_map_min_address && \
144 ((vm_offset_t)(addr) + size - 1) < zone_map_max_address )
145 #else
146 #define from_zone_map(addr, size) \
147 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) >= zone_map_min_address && \
148 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) + size -1) < zone_map_max_address )
149 #endif
150
151 /*
152 * Zone Corruption Debugging
153 *
154 * We use three techniques to detect modification of a zone element
155 * after it's been freed.
156 *
157 * (1) Check the freelist next pointer for sanity.
158 * (2) Store a backup of the next pointer at the end of the element,
159 * and compare it to the primary next pointer when the element is allocated
160 * to detect corruption of the freelist due to use-after-free bugs.
161 * The backup pointer is also XORed with a per-boot random cookie.
162 * (3) Poison the freed element by overwriting it with 0xdeadbeef,
163 * and check for that value when the element is being reused to make sure
164 * no part of the element has been modified while it was on the freelist.
165 * This will also help catch read-after-frees, as code will now dereference
166 * 0xdeadbeef instead of a valid but freed pointer.
167 *
168 * (1) and (2) occur for every allocation and free to a zone.
169 * This is done to make it slightly more difficult for an attacker to
170 * manipulate the freelist to behave in a specific way.
171 *
172 * Poisoning (3) occurs periodically for every N frees (counted per-zone)
173 * and on every free for zones smaller than a cacheline. If -zp
174 * is passed as a boot arg, poisoning occurs for every free.
175 *
176 * Performance slowdown is inversely proportional to the frequency of poisoning,
177 * with a 4-5% hit around N=1, down to ~0.3% at N=16 and just "noise" at N=32
178 * and higher. You can expect to find a 100% reproducible bug in an average of
179 * N tries, with a standard deviation of about N, but you will want to set
180 * "-zp" to always poison every free if you are attempting to reproduce
181 * a known bug.
182 *
183 * For a more heavyweight, but finer-grained method of detecting misuse
184 * of zone memory, look up the "Guard mode" zone allocator in gzalloc.c.
185 *
186 * Zone Corruption Logging
187 *
188 * You can also track where corruptions come from by using the boot-arguments
189 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later
190 * in this document for more implementation and usage information.
191 *
192 * Zone Leak Detection
193 *
194 * To debug leaks of zone memory, use the zone leak detection tool 'zleaks'
195 * found later in this file via the showtopztrace and showz* macros in kgmacros,
196 * or use zlog without the -zc argument.
197 *
198 */
199
200 /* Returns TRUE if we rolled over the counter at factor */
201 static inline boolean_t
202 sample_counter(volatile uint32_t * count_p, uint32_t factor)
203 {
204 uint32_t old_count, new_count;
205 boolean_t rolled_over;
206
207 do {
208 new_count = old_count = *count_p;
209
210 if (++new_count >= factor) {
211 rolled_over = TRUE;
212 new_count = 0;
213 } else {
214 rolled_over = FALSE;
215 }
216
217 } while (!OSCompareAndSwap(old_count, new_count, count_p));
218
219 return rolled_over;
220 }
221
222 #if defined(__LP64__)
223 #define ZP_POISON 0xdeadbeefdeadbeef
224 #else
225 #define ZP_POISON 0xdeadbeef
226 #endif
227
228 #define ZP_DEFAULT_SAMPLING_FACTOR 16
229 #define ZP_DEFAULT_SCALE_FACTOR 4
230
231 /*
232 * A zp_factor of 0 indicates zone poisoning is disabled,
233 * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline).
234 * Passing the -no-zp boot-arg disables even this behavior.
235 * In all cases, we record and check the integrity of a backup pointer.
236 */
237
238 /* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */
239 uint32_t zp_factor = 0;
240
241 /* set by zp-scale=N boot arg, scales zp_factor by zone size */
242 uint32_t zp_scale = 0;
243
244 /* set in zp_init, zero indicates -no-zp boot-arg */
245 vm_size_t zp_tiny_zone_limit = 0;
246
247 /* initialized to a per-boot random value in zp_init */
248 uintptr_t zp_poisoned_cookie = 0;
249 uintptr_t zp_nopoison_cookie = 0;
250
251
252 /*
253 * initialize zone poisoning
254 * called from zone_bootstrap before any allocations are made from zalloc
255 */
256 static inline void
257 zp_init(void)
258 {
259 char temp_buf[16];
260
261 /*
262 * Initialize backup pointer random cookie for poisoned elements
263 * Try not to call early_random() back to back, it may return
264 * the same value if mach_absolute_time doesn't have sufficient time
265 * to tick over between calls. <rdar://problem/11597395>
266 * (This is only a problem on embedded devices)
267 */
268 zp_poisoned_cookie = (uintptr_t) early_random();
269
270 /*
271 * Always poison zones smaller than a cacheline,
272 * because it's pretty close to free
273 */
274 ml_cpu_info_t cpu_info;
275 ml_cpu_get_info(&cpu_info);
276 zp_tiny_zone_limit = (vm_size_t) cpu_info.cache_line_size;
277
278 zp_factor = ZP_DEFAULT_SAMPLING_FACTOR;
279 zp_scale = ZP_DEFAULT_SCALE_FACTOR;
280
281 //TODO: Bigger permutation?
282 /*
283 * Permute the default factor +/- 1 to make it less predictable
284 * This adds or subtracts ~4 poisoned objects per 1000 frees.
285 */
286 if (zp_factor != 0) {
287 uint32_t rand_bits = early_random() & 0x3;
288
289 if (rand_bits == 0x1)
290 zp_factor += 1;
291 else if (rand_bits == 0x2)
292 zp_factor -= 1;
293 /* if 0x0 or 0x3, leave it alone */
294 }
295
296 /* -zp: enable poisoning for every alloc and free */
297 if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
298 zp_factor = 1;
299 }
300
301 /* -no-zp: disable poisoning completely even for tiny zones */
302 if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
303 zp_factor = 0;
304 zp_tiny_zone_limit = 0;
305 printf("Zone poisoning disabled\n");
306 }
307
308 /* zp-factor=XXXX: override how often to poison freed zone elements */
309 if (PE_parse_boot_argn("zp-factor", &zp_factor, sizeof(zp_factor))) {
310 printf("Zone poisoning factor override: %u\n", zp_factor);
311 }
312
313 /* zp-scale=XXXX: override how much zone size scales zp-factor by */
314 if (PE_parse_boot_argn("zp-scale", &zp_scale, sizeof(zp_scale))) {
315 printf("Zone poisoning scale factor override: %u\n", zp_scale);
316 }
317
318 /* Initialize backup pointer random cookie for unpoisoned elements */
319 zp_nopoison_cookie = (uintptr_t) early_random();
320
321 #if MACH_ASSERT
322 if (zp_poisoned_cookie == zp_nopoison_cookie)
323 panic("early_random() is broken: %p and %p are not random\n",
324 (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie);
325 #endif
326
327 /*
328 * Use the last bit in the backup pointer to hint poisoning state
329 * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so
330 * the low bits are zero.
331 */
332 zp_poisoned_cookie |= (uintptr_t)0x1ULL;
333 zp_nopoison_cookie &= ~((uintptr_t)0x1ULL);
334
335 #if defined(__LP64__)
336 /*
337 * Make backup pointers more obvious in GDB for 64 bit
338 * by making OxFFFFFF... ^ cookie = 0xFACADE...
339 * (0xFACADE = 0xFFFFFF ^ 0x053521)
340 * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011)
341 * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked
342 * by the sanity check, so it's OK for that part of the cookie to be predictable.
343 *
344 * TODO: Use #defines, xors, and shifts
345 */
346
347 zp_poisoned_cookie &= 0x000000FFFFFFFFFF;
348 zp_poisoned_cookie |= 0x0535210000000000; /* 0xFACADE */
349
350 zp_nopoison_cookie &= 0x000000FFFFFFFFFF;
351 zp_nopoison_cookie |= 0x3f00110000000000; /* 0xC0FFEE */
352 #endif
353 }
354
355 /* zone_map page count for page table structure */
356 uint64_t zone_map_table_page_count = 0;
357
358 /*
359 * These macros are used to keep track of the number
360 * of pages being used by the zone currently. The
361 * z->page_count is protected by the zone lock.
362 */
363 #define ZONE_PAGE_COUNT_INCR(z, count) \
364 { \
365 OSAddAtomic64(count, &(z->page_count)); \
366 }
367
368 #define ZONE_PAGE_COUNT_DECR(z, count) \
369 { \
370 OSAddAtomic64(-count, &(z->page_count)); \
371 }
372
373 /* for is_sane_zone_element and garbage collection */
374
375 vm_offset_t zone_map_min_address = 0; /* initialized in zone_init */
376 vm_offset_t zone_map_max_address = 0;
377
378 /* Globals for random boolean generator for elements in free list */
379 #define MAX_ENTROPY_PER_ZCRAM 4
380 #define RANDOM_BOOL_GEN_SEED_COUNT 4
381 static unsigned int bool_gen_seed[RANDOM_BOOL_GEN_SEED_COUNT];
382 static unsigned int bool_gen_global = 0;
383 decl_simple_lock_data(, bool_gen_lock)
384
385 /* Helpful for walking through a zone's free element list. */
386 struct zone_free_element {
387 struct zone_free_element *next;
388 /* ... */
389 /* void *backup_ptr; */
390 };
391
392 struct zone_page_metadata {
393 queue_chain_t pages;
394 struct zone_free_element *elements;
395 zone_t zone;
396 uint16_t alloc_count;
397 uint16_t free_count;
398 };
399
400 /* The backup pointer is stored in the last pointer-sized location in an element. */
401 static inline vm_offset_t *
402 get_backup_ptr(vm_size_t elem_size,
403 vm_offset_t *element)
404 {
405 return (vm_offset_t *) ((vm_offset_t)element + elem_size - sizeof(vm_offset_t));
406 }
407
408 static inline struct zone_page_metadata *
409 get_zone_page_metadata(struct zone_free_element *element)
410 {
411 return (struct zone_page_metadata *)(trunc_page((vm_offset_t)element));
412 }
413
414 /*
415 * Zone checking helper function.
416 * A pointer that satisfies these conditions is OK to be a freelist next pointer
417 * A pointer that doesn't satisfy these conditions indicates corruption
418 */
419 static inline boolean_t
420 is_sane_zone_ptr(zone_t zone,
421 vm_offset_t addr,
422 size_t obj_size)
423 {
424 /* Must be aligned to pointer boundary */
425 if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0))
426 return FALSE;
427
428 /* Must be a kernel address */
429 if (__improbable(!pmap_kernel_va(addr)))
430 return FALSE;
431
432 /* Must be from zone map if the zone only uses memory from the zone_map */
433 /*
434 * TODO: Remove the zone->collectable check when every
435 * zone using foreign memory is properly tagged with allows_foreign
436 */
437 if (zone->collectable && !zone->allows_foreign) {
438 #if ZONE_ALIAS_ADDR
439 /*
440 * If this address is in the static kernel region, it might be
441 * the alias address of a valid zone element.
442 * If we tried to find the zone_virtual_addr() of an invalid
443 * address in the static kernel region, it will panic, so don't
444 * check addresses in this region.
445 *
446 * TODO: Use a safe variant of zone_virtual_addr to
447 * make this check more accurate
448 *
449 * The static kernel region is mapped at:
450 * [gVirtBase, gVirtBase + gPhysSize]
451 */
452 if ((addr - gVirtBase) < gPhysSize)
453 return TRUE;
454 #endif
455 /* check if addr is from zone map */
456 if (addr >= zone_map_min_address &&
457 (addr + obj_size - 1) < zone_map_max_address )
458 return TRUE;
459
460 return FALSE;
461 }
462
463 return TRUE;
464 }
465
466 static inline boolean_t
467 is_sane_zone_page_metadata(zone_t zone,
468 vm_offset_t page_meta)
469 {
470 /* NULL page metadata structures are invalid */
471 if (page_meta == 0)
472 return FALSE;
473 return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata));
474 }
475
476 static inline boolean_t
477 is_sane_zone_element(zone_t zone,
478 vm_offset_t addr)
479 {
480 /* NULL is OK because it indicates the tail of the list */
481 if (addr == 0)
482 return TRUE;
483 return is_sane_zone_ptr(zone, addr, zone->elem_size);
484 }
485
486 /* Someone wrote to freed memory. */
487 static inline void /* noreturn */
488 zone_element_was_modified_panic(zone_t zone,
489 vm_offset_t element,
490 vm_offset_t found,
491 vm_offset_t expected,
492 vm_offset_t offset)
493 {
494 panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p",
495 zone->zone_name,
496 (void *) expected,
497 (void *) found,
498 (void *) (expected ^ found),
499 (uint32_t) offset,
500 (uint32_t) zone->elem_size,
501 (void *) element,
502 (void *) zp_nopoison_cookie,
503 (void *) zp_poisoned_cookie);
504 }
505
506 /*
507 * The primary and backup pointers don't match.
508 * Determine which one was likely the corrupted pointer, find out what it
509 * probably should have been, and panic.
510 * I would like to mark this as noreturn, but panic() isn't marked noreturn.
511 */
512 static void /* noreturn */
513 backup_ptr_mismatch_panic(zone_t zone,
514 vm_offset_t element,
515 vm_offset_t primary,
516 vm_offset_t backup)
517 {
518 vm_offset_t likely_backup;
519
520 boolean_t sane_backup;
521 boolean_t sane_primary = is_sane_zone_element(zone, primary);
522 boolean_t element_was_poisoned = (backup & 0x1) ? TRUE : FALSE;
523
524 #if defined(__LP64__)
525 /* We can inspect the tag in the upper bits for additional confirmation */
526 if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000)
527 element_was_poisoned = TRUE;
528 else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000)
529 element_was_poisoned = FALSE;
530 #endif
531
532 if (element_was_poisoned) {
533 likely_backup = backup ^ zp_poisoned_cookie;
534 sane_backup = is_sane_zone_element(zone, likely_backup);
535 } else {
536 likely_backup = backup ^ zp_nopoison_cookie;
537 sane_backup = is_sane_zone_element(zone, likely_backup);
538 }
539
540 /* The primary is definitely the corrupted one */
541 if (!sane_primary && sane_backup)
542 zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
543
544 /* The backup is definitely the corrupted one */
545 if (sane_primary && !sane_backup)
546 zone_element_was_modified_panic(zone, element, backup,
547 (primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)),
548 zone->elem_size - sizeof(vm_offset_t));
549
550 /*
551 * Not sure which is the corrupted one.
552 * It's less likely that the backup pointer was overwritten with
553 * ( (sane address) ^ (valid cookie) ), so we'll guess that the
554 * primary pointer has been overwritten with a sane but incorrect address.
555 */
556 if (sane_primary && sane_backup)
557 zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
558
559 /* Neither are sane, so just guess. */
560 zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
561 }
562
563 /*
564 * Sets the next element of tail to elem.
565 * elem can be NULL.
566 * Preserves the poisoning state of the element.
567 */
568 static inline void
569 append_zone_element(zone_t zone,
570 struct zone_free_element *tail,
571 struct zone_free_element *elem)
572 {
573 vm_offset_t *backup = get_backup_ptr(zone->elem_size, (vm_offset_t *) tail);
574
575 vm_offset_t old_backup = *backup;
576
577 vm_offset_t old_next = (vm_offset_t) tail->next;
578 vm_offset_t new_next = (vm_offset_t) elem;
579
580 if (old_next == (old_backup ^ zp_nopoison_cookie))
581 *backup = new_next ^ zp_nopoison_cookie;
582 else if (old_next == (old_backup ^ zp_poisoned_cookie))
583 *backup = new_next ^ zp_poisoned_cookie;
584 else
585 backup_ptr_mismatch_panic(zone,
586 (vm_offset_t) tail,
587 old_next,
588 old_backup);
589
590 tail->next = elem;
591 }
592
593
594 /*
595 * Insert a linked list of elements (delineated by head and tail) at the head of
596 * the zone free list. Every element in the list being added has already gone
597 * through append_zone_element, so their backup pointers are already
598 * set properly.
599 * Precondition: There should be no elements after tail
600 */
601 static inline void
602 add_list_to_zone(zone_t zone,
603 struct zone_free_element *head,
604 struct zone_free_element *tail)
605 {
606 assert(tail->next == NULL);
607 assert(!zone->use_page_list);
608
609 append_zone_element(zone, tail, zone->free_elements);
610
611 zone->free_elements = head;
612 }
613
614
615 /*
616 * Adds the element to the head of the zone's free list
617 * Keeps a backup next-pointer at the end of the element
618 */
619 static inline void
620 free_to_zone(zone_t zone,
621 vm_offset_t element,
622 boolean_t poison)
623 {
624 vm_offset_t old_head;
625 struct zone_page_metadata *page_meta;
626
627 vm_offset_t *primary = (vm_offset_t *) element;
628 vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
629
630 if (zone->use_page_list) {
631 page_meta = get_zone_page_metadata((struct zone_free_element *)element);
632 assert(page_meta->zone == zone);
633 old_head = (vm_offset_t)page_meta->elements;
634 } else {
635 old_head = (vm_offset_t)zone->free_elements;
636 }
637
638 #if MACH_ASSERT
639 if (__improbable(!is_sane_zone_element(zone, old_head)))
640 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
641 (void *) old_head, zone->zone_name);
642 #endif
643
644 if (__improbable(!is_sane_zone_element(zone, element)))
645 panic("zfree: freeing invalid pointer %p to zone %s\n",
646 (void *) element, zone->zone_name);
647
648 /*
649 * Always write a redundant next pointer
650 * So that it is more difficult to forge, xor it with a random cookie
651 * A poisoned element is indicated by using zp_poisoned_cookie
652 * instead of zp_nopoison_cookie
653 */
654
655 *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie);
656
657 /* Insert this element at the head of the free list */
658 *primary = old_head;
659 if (zone->use_page_list) {
660 page_meta->elements = (struct zone_free_element *)element;
661 page_meta->free_count++;
662 if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
663 if (page_meta->free_count == 1) {
664 /* first foreign element freed on page, move from all_used */
665 remqueue((queue_entry_t)page_meta);
666 enqueue_tail(&zone->pages.any_free_foreign, (queue_entry_t)page_meta);
667 } else {
668 /* no other list transitions */
669 }
670 } else if (page_meta->free_count == page_meta->alloc_count) {
671 /* whether the page was on the intermediate or all_used, queue, move it to free */
672 remqueue((queue_entry_t)page_meta);
673 enqueue_tail(&zone->pages.all_free, (queue_entry_t)page_meta);
674 } else if (page_meta->free_count == 1) {
675 /* first free element on page, move from all_used */
676 remqueue((queue_entry_t)page_meta);
677 enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
678 }
679 } else {
680 zone->free_elements = (struct zone_free_element *)element;
681 }
682 zone->count--;
683 zone->countfree++;
684 }
685
686
687 /*
688 * Removes an element from the zone's free list, returning 0 if the free list is empty.
689 * Verifies that the next-pointer and backup next-pointer are intact,
690 * and verifies that a poisoned element hasn't been modified.
691 */
692 static inline vm_offset_t
693 try_alloc_from_zone(zone_t zone,
694 boolean_t* check_poison)
695 {
696 vm_offset_t element;
697 struct zone_page_metadata *page_meta;
698
699 *check_poison = FALSE;
700
701 /* if zone is empty, bail */
702 if (zone->use_page_list) {
703 if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign))
704 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
705 else if (!queue_empty(&zone->pages.intermediate))
706 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
707 else if (!queue_empty(&zone->pages.all_free))
708 page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
709 else {
710 return 0;
711 }
712
713 /* Check if page_meta passes is_sane_zone_element */
714 if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta)))
715 panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
716 (void *) page_meta, zone->zone_name);
717 assert(page_meta->zone == zone);
718 element = (vm_offset_t)page_meta->elements;
719 } else {
720 if (zone->free_elements == NULL)
721 return 0;
722
723 element = (vm_offset_t)zone->free_elements;
724 }
725
726 #if MACH_ASSERT
727 if (__improbable(!is_sane_zone_element(zone, element)))
728 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
729 (void *) element, zone->zone_name);
730 #endif
731
732 vm_offset_t *primary = (vm_offset_t *) element;
733 vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
734
735 vm_offset_t next_element = *primary;
736 vm_offset_t next_element_backup = *backup;
737
738 /*
739 * backup_ptr_mismatch_panic will determine what next_element
740 * should have been, and print it appropriately
741 */
742 if (__improbable(!is_sane_zone_element(zone, next_element)))
743 backup_ptr_mismatch_panic(zone, element, next_element, next_element_backup);
744
745 /* Check the backup pointer for the regular cookie */
746 if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) {
747
748 /* Check for the poisoned cookie instead */
749 if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie)))
750 /* Neither cookie is valid, corruption has occurred */
751 backup_ptr_mismatch_panic(zone, element, next_element, next_element_backup);
752
753 /*
754 * Element was marked as poisoned, so check its integrity before using it.
755 */
756 *check_poison = TRUE;
757 }
758
759 if (zone->use_page_list) {
760
761 /* Make sure the page_meta is at the correct offset from the start of page */
762 if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element)))
763 panic("zalloc: metadata located at incorrect location on page of zone %s\n",
764 zone->zone_name);
765
766 /* Make sure next_element belongs to the same page as page_meta */
767 if (next_element) {
768 if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element)))
769 panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
770 (void *)next_element, (void *)element, zone->zone_name);
771 }
772 }
773
774 /* Remove this element from the free list */
775 if (zone->use_page_list) {
776
777 page_meta->elements = (struct zone_free_element *)next_element;
778 page_meta->free_count--;
779
780 if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
781 if (page_meta->free_count == 0) {
782 /* move to all used */
783 remqueue((queue_entry_t)page_meta);
784 enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
785 } else {
786 /* no other list transitions */
787 }
788 } else if (page_meta->free_count == 0) {
789 /* remove from intermediate or free, move to all_used */
790 remqueue((queue_entry_t)page_meta);
791 enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
792 } else if (page_meta->alloc_count == page_meta->free_count + 1) {
793 /* remove from free, move to intermediate */
794 remqueue((queue_entry_t)page_meta);
795 enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
796 }
797 } else {
798 zone->free_elements = (struct zone_free_element *)next_element;
799 }
800 zone->countfree--;
801 zone->count++;
802 zone->sum_count++;
803
804 return element;
805 }
806
807
808 /*
809 * End of zone poisoning
810 */
811
812 /*
813 * Fake zones for things that want to report via zprint but are not actually zones.
814 */
815 struct fake_zone_info {
816 const char* name;
817 void (*init)(int);
818 void (*query)(int *,
819 vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
820 uint64_t *, int *, int *, int *);
821 };
822
823 static const struct fake_zone_info fake_zones[] = {
824 };
825 static const unsigned int num_fake_zones =
826 sizeof (fake_zones) / sizeof (fake_zones[0]);
827
828 /*
829 * Zone info options
830 */
831 boolean_t zinfo_per_task = FALSE; /* enabled by -zinfop in boot-args */
832 #define ZINFO_SLOTS 200 /* for now */
833 #define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
834
835 /*
836 * Support for garbage collection of unused zone pages
837 *
838 * The kernel virtually allocates the "zone map" submap of the kernel
839 * map. When an individual zone needs more storage, memory is allocated
840 * out of the zone map, and the two-level "zone_page_table" is
841 * on-demand expanded so that it has entries for those pages.
842 * zone_page_init()/zone_page_alloc() initialize "alloc_count"
843 * to the number of zone elements that occupy the zone page (which may
844 * be a minimum of 1, including if a zone element spans multiple
845 * pages).
846 *
847 * Asynchronously, the zone_gc() logic attempts to walk zone free
848 * lists to see if all the elements on a zone page are free. If
849 * "collect_count" (which it increments during the scan) matches
850 * "alloc_count", the zone page is a candidate for collection and the
851 * physical page is returned to the VM system. During this process, the
852 * first word of the zone page is re-used to maintain a linked list of
853 * to-be-collected zone pages.
854 */
855 typedef uint32_t zone_page_index_t;
856 #define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
857
858 struct zone_page_table_entry {
859 volatile uint16_t alloc_count;
860 volatile uint16_t collect_count;
861 };
862
863 #define ZONE_PAGE_USED 0
864 #define ZONE_PAGE_UNUSED 0xffff
865
866 /* Forwards */
867 void zone_page_init(
868 vm_offset_t addr,
869 vm_size_t size);
870
871 void zone_page_alloc(
872 vm_offset_t addr,
873 vm_size_t size);
874
875 void zone_page_free_element(
876 zone_page_index_t *free_page_head,
877 zone_page_index_t *free_page_tail,
878 vm_offset_t addr,
879 vm_size_t size);
880
881 void zone_page_collect(
882 vm_offset_t addr,
883 vm_size_t size);
884
885 boolean_t zone_page_collectable(
886 vm_offset_t addr,
887 vm_size_t size);
888
889 void zone_page_keep(
890 vm_offset_t addr,
891 vm_size_t size);
892
893 void zone_display_zprint(void);
894
895 zone_t zone_find_largest(void);
896
897 /*
898 * Async allocation of zones
899 * This mechanism allows for bootstrapping an empty zone which is setup with
900 * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call
901 * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free.
902 * This will prime the zone for the next use.
903 *
904 * Currently the thread_callout function (zalloc_async) will loop through all zones
905 * looking for any zone with async_pending set and do the work for it.
906 *
907 * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call,
908 * then zalloc_noblock to an empty zone may succeed.
909 */
910 void zalloc_async(
911 thread_call_param_t p0,
912 thread_call_param_t p1);
913
914 static thread_call_data_t call_async_alloc;
915
916 vm_map_t zone_map = VM_MAP_NULL;
917
918 zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
919
920 zone_t zinfo_zone = ZONE_NULL; /* zone of per-task zone info */
921
922 /*
923 * The VM system gives us an initial chunk of memory.
924 * It has to be big enough to allocate the zone_zone
925 * all the way through the pmap zone.
926 */
927
928 vm_offset_t zdata;
929 vm_size_t zdata_size;
930 /*
931 * Align elements that use the zone page list to 32 byte boundaries.
932 */
933 #define ZONE_ELEMENT_ALIGNMENT 32
934
935 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
936 #define zone_sleep(zone) \
937 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
938
939 /*
940 * The zone_locks_grp allows for collecting lock statistics.
941 * All locks are associated to this group in zinit.
942 * Look at tools/lockstat for debugging lock contention.
943 */
944
945 lck_grp_t zone_locks_grp;
946 lck_grp_attr_t zone_locks_grp_attr;
947
948 #define lock_zone_init(zone) \
949 MACRO_BEGIN \
950 lck_attr_setdefault(&(zone)->lock_attr); \
951 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
952 &zone_locks_grp, &(zone)->lock_attr); \
953 MACRO_END
954
955 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
956
957 /*
958 * Garbage collection map information
959 */
960 #define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
961 struct zone_page_table_entry * volatile zone_page_table[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE];
962 vm_size_t zone_page_table_used_size;
963 unsigned int zone_pages;
964 unsigned int zone_page_table_second_level_size; /* power of 2 */
965 unsigned int zone_page_table_second_level_shift_amount;
966
967 #define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
968 #define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
969
970 void zone_page_table_expand(zone_page_index_t pindex);
971 struct zone_page_table_entry *zone_page_table_lookup(zone_page_index_t pindex);
972
973 /*
974 * Exclude more than one concurrent garbage collection
975 */
976 decl_lck_mtx_data(, zone_gc_lock)
977
978 lck_attr_t zone_gc_lck_attr;
979 lck_grp_t zone_gc_lck_grp;
980 lck_grp_attr_t zone_gc_lck_grp_attr;
981 lck_mtx_ext_t zone_gc_lck_ext;
982
983 /*
984 * Protects first_zone, last_zone, num_zones,
985 * and the next_zone field of zones.
986 */
987 decl_simple_lock_data(, all_zones_lock)
988 zone_t first_zone;
989 zone_t *last_zone;
990 unsigned int num_zones;
991
992 boolean_t zone_gc_allowed = TRUE;
993 boolean_t zone_gc_forced = FALSE;
994 boolean_t panic_include_zprint = FALSE;
995 boolean_t zone_gc_allowed_by_time_throttle = TRUE;
996
997 vm_offset_t panic_kext_memory_info = 0;
998 vm_size_t panic_kext_memory_size = 0;
999
1000 #define ZALLOC_DEBUG_ZONEGC 0x00000001
1001 #define ZALLOC_DEBUG_ZCRAM 0x00000002
1002 uint32_t zalloc_debug = 0;
1003
1004 /*
1005 * Zone leak debugging code
1006 *
1007 * When enabled, this code keeps a log to track allocations to a particular zone that have not
1008 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
1009 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
1010 * off by default.
1011 *
1012 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
1013 * is the name of the zone you wish to log.
1014 *
1015 * This code only tracks one zone, so you need to identify which one is leaking first.
1016 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
1017 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
1018 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
1019 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
1020 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
1021 * See the help in the kgmacros for usage info.
1022 *
1023 *
1024 * Zone corruption logging
1025 *
1026 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
1027 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
1028 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
1029 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
1030 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
1031 * corrupted to examine its history. This should lead to the source of the corruption.
1032 */
1033
1034 static int log_records; /* size of the log, expressed in number of records */
1035
1036 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
1037
1038 static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
1039
1040 /* Log allocations and frees to help debug a zone element corruption */
1041 boolean_t corruption_debug_flag = FALSE; /* enabled by "-zc" boot-arg */
1042
1043 /*
1044 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
1045 * the number of records you want in the log. For example, "zrecs=1000" sets it to 1000 records. Note
1046 * that the larger the size of the log, the slower the system will run due to linear searching in the log,
1047 * but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
1048 * records since going much larger than this tends to make the system unresponsive and unbootable on small
1049 * memory configurations. The default value is 4000 records.
1050 */
1051
1052 #if defined(__LP64__)
1053 #define ZRECORDS_MAX 128000 /* Max records allowed in the log */
1054 #else
1055 #define ZRECORDS_MAX 8000 /* Max records allowed in the log */
1056 #endif
1057 #define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
1058
1059 /*
1060 * Each record in the log contains a pointer to the zone element it refers to,
1061 * and a small array to hold the pc's from the stack trace. A
1062 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
1063 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
1064 * If the log fills, old records are replaced as if it were a circular buffer.
1065 */
1066
1067
1068 /*
1069 * Opcodes for the btlog operation field:
1070 */
1071
1072 #define ZOP_ALLOC 1
1073 #define ZOP_FREE 0
1074
1075 /*
1076 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
1077 */
1078 static btlog_t *zlog_btlog; /* the log itself, dynamically allocated when logging is enabled */
1079 static zone_t zone_of_interest = NULL; /* the zone being watched; corresponds to zone_name_to_log */
1080
1081 /*
1082 * Decide if we want to log this zone by doing a string compare between a zone name and the name
1083 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
1084 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
1085 * match a space in the zone name.
1086 */
1087
1088 static int
1089 log_this_zone(const char *zonename, const char *logname)
1090 {
1091 int len;
1092 const char *zc = zonename;
1093 const char *lc = logname;
1094
1095 /*
1096 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
1097 */
1098
1099 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
1100
1101 /*
1102 * If the current characters don't match, check for a space in
1103 * in the zone name and a corresponding period in the log name.
1104 * If that's not there, then the strings don't match.
1105 */
1106
1107 if (*zc != *lc && !(*zc == ' ' && *lc == '.'))
1108 break;
1109
1110 /*
1111 * The strings are equal so far. If we're at the end, then it's a match.
1112 */
1113
1114 if (*zc == '\0')
1115 return TRUE;
1116 }
1117
1118 return FALSE;
1119 }
1120
1121
1122 /*
1123 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
1124 * the buffer for the records has been allocated.
1125 */
1126
1127 #define DO_LOGGING(z) (zlog_btlog && (z) == zone_of_interest)
1128
1129 extern boolean_t kmem_alloc_ready;
1130
1131 #if CONFIG_ZLEAKS
1132 #pragma mark -
1133 #pragma mark Zone Leak Detection
1134
1135 /*
1136 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
1137 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
1138 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
1139 * and stop tracking it if it was being tracked.
1140 *
1141 * We track the allocations in the zallocations hash table, which stores the address that was returned from
1142 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
1143 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
1144 * backtraces - we don't store them more than once.
1145 *
1146 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
1147 * a large amount of virtual space.
1148 */
1149 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
1150 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
1151 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
1152 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
1153 uint32_t zleak_state = 0; /* State of collection, as above */
1154
1155 boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */
1156 vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
1157 vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
1158 unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */
1159
1160 /*
1161 * Counters for allocation statistics.
1162 */
1163
1164 /* Times two active records want to occupy the same spot */
1165 unsigned int z_alloc_collisions = 0;
1166 unsigned int z_trace_collisions = 0;
1167
1168 /* Times a new record lands on a spot previously occupied by a freed allocation */
1169 unsigned int z_alloc_overwrites = 0;
1170 unsigned int z_trace_overwrites = 0;
1171
1172 /* Times a new alloc or trace is put into the hash table */
1173 unsigned int z_alloc_recorded = 0;
1174 unsigned int z_trace_recorded = 0;
1175
1176 /* Times zleak_log returned false due to not being able to acquire the lock */
1177 unsigned int z_total_conflicts = 0;
1178
1179
1180 #pragma mark struct zallocation
1181 /*
1182 * Structure for keeping track of an allocation
1183 * An allocation bucket is in use if its element is not NULL
1184 */
1185 struct zallocation {
1186 uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
1187 vm_size_t za_size; /* how much memory did this allocation take up? */
1188 uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */
1189 /* TODO: #if this out */
1190 uint32_t za_hit_count; /* for determining effectiveness of hash function */
1191 };
1192
1193 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
1194 uint32_t zleak_alloc_buckets = CONFIG_ZLEAK_ALLOCATION_MAP_NUM;
1195 uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM;
1196
1197 vm_size_t zleak_max_zonemap_size;
1198
1199 /* Hashmaps of allocations and their corresponding traces */
1200 static struct zallocation* zallocations;
1201 static struct ztrace* ztraces;
1202
1203 /* not static so that panic can see this, see kern/debug.c */
1204 struct ztrace* top_ztrace;
1205
1206 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
1207 static lck_spin_t zleak_lock;
1208 static lck_attr_t zleak_lock_attr;
1209 static lck_grp_t zleak_lock_grp;
1210 static lck_grp_attr_t zleak_lock_grp_attr;
1211
1212 /*
1213 * Initializes the zone leak monitor. Called from zone_init()
1214 */
1215 static void
1216 zleak_init(vm_size_t max_zonemap_size)
1217 {
1218 char scratch_buf[16];
1219 boolean_t zleak_enable_flag = FALSE;
1220
1221 zleak_max_zonemap_size = max_zonemap_size;
1222 zleak_global_tracking_threshold = max_zonemap_size / 2;
1223 zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8;
1224
1225 /* -zleakoff (flag to disable zone leak monitor) */
1226 if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) {
1227 zleak_enable_flag = FALSE;
1228 printf("zone leak detection disabled\n");
1229 } else {
1230 zleak_enable_flag = TRUE;
1231 printf("zone leak detection enabled\n");
1232 }
1233
1234 /* zfactor=XXXX (override how often to sample the zone allocator) */
1235 if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) {
1236 printf("Zone leak factor override: %u\n", zleak_sample_factor);
1237 }
1238
1239 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
1240 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) {
1241 printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets);
1242 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1243 if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets-1))) {
1244 printf("Override isn't a power of two, bad things might happen!\n");
1245 }
1246 }
1247
1248 /* zleak-traces=XXXX (override number of buckets in ztraces) */
1249 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) {
1250 printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets);
1251 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1252 if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets-1))) {
1253 printf("Override isn't a power of two, bad things might happen!\n");
1254 }
1255 }
1256
1257 /* allocate the zleak_lock */
1258 lck_grp_attr_setdefault(&zleak_lock_grp_attr);
1259 lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr);
1260 lck_attr_setdefault(&zleak_lock_attr);
1261 lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr);
1262
1263 if (zleak_enable_flag) {
1264 zleak_state = ZLEAK_STATE_ENABLED;
1265 }
1266 }
1267
1268 #if CONFIG_ZLEAKS
1269
1270 /*
1271 * Support for kern.zleak.active sysctl - a simplified
1272 * version of the zleak_state variable.
1273 */
1274 int
1275 get_zleak_state(void)
1276 {
1277 if (zleak_state & ZLEAK_STATE_FAILED)
1278 return (-1);
1279 if (zleak_state & ZLEAK_STATE_ACTIVE)
1280 return (1);
1281 return (0);
1282 }
1283
1284 #endif
1285
1286
1287 kern_return_t
1288 zleak_activate(void)
1289 {
1290 kern_return_t retval;
1291 vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
1292 vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
1293 void *allocations_ptr = NULL;
1294 void *traces_ptr = NULL;
1295
1296 /* Only one thread attempts to activate at a time */
1297 if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
1298 return KERN_SUCCESS;
1299 }
1300
1301 /* Indicate that we're doing the setup */
1302 lck_spin_lock(&zleak_lock);
1303 if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
1304 lck_spin_unlock(&zleak_lock);
1305 return KERN_SUCCESS;
1306 }
1307
1308 zleak_state |= ZLEAK_STATE_ACTIVATING;
1309 lck_spin_unlock(&zleak_lock);
1310
1311 /* Allocate and zero tables */
1312 retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size, VM_KERN_MEMORY_OSFMK);
1313 if (retval != KERN_SUCCESS) {
1314 goto fail;
1315 }
1316
1317 retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size, VM_KERN_MEMORY_OSFMK);
1318 if (retval != KERN_SUCCESS) {
1319 goto fail;
1320 }
1321
1322 bzero(allocations_ptr, z_alloc_size);
1323 bzero(traces_ptr, z_trace_size);
1324
1325 /* Everything's set. Install tables, mark active. */
1326 zallocations = allocations_ptr;
1327 ztraces = traces_ptr;
1328
1329 /*
1330 * Initialize the top_ztrace to the first entry in ztraces,
1331 * so we don't have to check for null in zleak_log
1332 */
1333 top_ztrace = &ztraces[0];
1334
1335 /*
1336 * Note that we do need a barrier between installing
1337 * the tables and setting the active flag, because the zfree()
1338 * path accesses the table without a lock if we're active.
1339 */
1340 lck_spin_lock(&zleak_lock);
1341 zleak_state |= ZLEAK_STATE_ACTIVE;
1342 zleak_state &= ~ZLEAK_STATE_ACTIVATING;
1343 lck_spin_unlock(&zleak_lock);
1344
1345 return 0;
1346
1347 fail:
1348 /*
1349 * If we fail to allocate memory, don't further tax
1350 * the system by trying again.
1351 */
1352 lck_spin_lock(&zleak_lock);
1353 zleak_state |= ZLEAK_STATE_FAILED;
1354 zleak_state &= ~ZLEAK_STATE_ACTIVATING;
1355 lck_spin_unlock(&zleak_lock);
1356
1357 if (allocations_ptr != NULL) {
1358 kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
1359 }
1360
1361 if (traces_ptr != NULL) {
1362 kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
1363 }
1364
1365 return retval;
1366 }
1367
1368 /*
1369 * TODO: What about allocations that never get deallocated,
1370 * especially ones with unique backtraces? Should we wait to record
1371 * until after boot has completed?
1372 * (How many persistent zallocs are there?)
1373 */
1374
1375 /*
1376 * This function records the allocation in the allocations table,
1377 * and stores the associated backtrace in the traces table
1378 * (or just increments the refcount if the trace is already recorded)
1379 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
1380 * the associated trace's refcount is decremented.
1381 * If the trace slot is in use, it returns.
1382 * The refcount is incremented by the amount of memory the allocation consumes.
1383 * The return value indicates whether to try again next time.
1384 */
1385 static boolean_t
1386 zleak_log(uintptr_t* bt,
1387 uintptr_t addr,
1388 uint32_t depth,
1389 vm_size_t allocation_size)
1390 {
1391 /* Quit if there's someone else modifying the hash tables */
1392 if (!lck_spin_try_lock(&zleak_lock)) {
1393 z_total_conflicts++;
1394 return FALSE;
1395 }
1396
1397 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
1398
1399 uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
1400 struct ztrace* trace = &ztraces[trace_index];
1401
1402 allocation->za_hit_count++;
1403 trace->zt_hit_count++;
1404
1405 /*
1406 * If the allocation bucket we want to be in is occupied, and if the occupier
1407 * has the same trace as us, just bail.
1408 */
1409 if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
1410 z_alloc_collisions++;
1411
1412 lck_spin_unlock(&zleak_lock);
1413 return TRUE;
1414 }
1415
1416 /* STEP 1: Store the backtrace in the traces array. */
1417 /* A size of zero indicates that the trace bucket is free. */
1418
1419 if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) {
1420 /*
1421 * Different unique trace with same hash!
1422 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
1423 * and get out of the way for later chances
1424 */
1425 trace->zt_collisions++;
1426 z_trace_collisions++;
1427
1428 lck_spin_unlock(&zleak_lock);
1429 return TRUE;
1430 } else if (trace->zt_size > 0) {
1431 /* Same trace, already added, so increment refcount */
1432 trace->zt_size += allocation_size;
1433 } else {
1434 /* Found an unused trace bucket, record the trace here! */
1435 if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */
1436 z_trace_overwrites++;
1437
1438 z_trace_recorded++;
1439 trace->zt_size = allocation_size;
1440 memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) );
1441
1442 trace->zt_depth = depth;
1443 trace->zt_collisions = 0;
1444 }
1445
1446 /* STEP 2: Store the allocation record in the allocations array. */
1447
1448 if (allocation->za_element != (uintptr_t) 0) {
1449 /*
1450 * Straight up replace any allocation record that was there. We don't want to do the work
1451 * to preserve the allocation entries that were there, because we only record a subset of the
1452 * allocations anyways.
1453 */
1454
1455 z_alloc_collisions++;
1456
1457 struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
1458 /* Knock off old allocation's size, not the new allocation */
1459 associated_trace->zt_size -= allocation->za_size;
1460 } else if (allocation->za_trace_index != 0) {
1461 /* Slot previously used but not currently in use */
1462 z_alloc_overwrites++;
1463 }
1464
1465 allocation->za_element = addr;
1466 allocation->za_trace_index = trace_index;
1467 allocation->za_size = allocation_size;
1468
1469 z_alloc_recorded++;
1470
1471 if (top_ztrace->zt_size < trace->zt_size)
1472 top_ztrace = trace;
1473
1474 lck_spin_unlock(&zleak_lock);
1475 return TRUE;
1476 }
1477
1478 /*
1479 * Free the allocation record and release the stacktrace.
1480 * This should be as fast as possible because it will be called for every free.
1481 */
1482 static void
1483 zleak_free(uintptr_t addr,
1484 vm_size_t allocation_size)
1485 {
1486 if (addr == (uintptr_t) 0)
1487 return;
1488
1489 struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
1490
1491 /* Double-checked locking: check to find out if we're interested, lock, check to make
1492 * sure it hasn't changed, then modify it, and release the lock.
1493 */
1494
1495 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
1496 /* if the allocation was the one, grab the lock, check again, then delete it */
1497 lck_spin_lock(&zleak_lock);
1498
1499 if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
1500 struct ztrace *trace;
1501
1502 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
1503 if (allocation->za_size != allocation_size) {
1504 panic("Freeing as size %lu memory that was allocated with size %lu\n",
1505 (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
1506 }
1507
1508 trace = &ztraces[allocation->za_trace_index];
1509
1510 /* size of 0 indicates trace bucket is unused */
1511 if (trace->zt_size > 0) {
1512 trace->zt_size -= allocation_size;
1513 }
1514
1515 /* A NULL element means the allocation bucket is unused */
1516 allocation->za_element = 0;
1517 }
1518 lck_spin_unlock(&zleak_lock);
1519 }
1520 }
1521
1522 #endif /* CONFIG_ZLEAKS */
1523
1524 /* These functions outside of CONFIG_ZLEAKS because they are also used in
1525 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
1526 */
1527
1528 /*
1529 * This function captures a backtrace from the current stack and
1530 * returns the number of frames captured, limited by max_frames.
1531 * It's fast because it does no checking to make sure there isn't bad data.
1532 * Since it's only called from threads that we're going to keep executing,
1533 * if there's bad data we were going to die eventually.
1534 * If this function is inlined, it doesn't record the frame of the function it's inside.
1535 * (because there's no stack frame!)
1536 */
1537
1538 uint32_t
1539 fastbacktrace(uintptr_t* bt, uint32_t max_frames)
1540 {
1541 uintptr_t* frameptr = NULL, *frameptr_next = NULL;
1542 uintptr_t retaddr = 0;
1543 uint32_t frame_index = 0, frames = 0;
1544 uintptr_t kstackb, kstackt;
1545 thread_t cthread = current_thread();
1546
1547 if (__improbable(cthread == NULL))
1548 return 0;
1549
1550 kstackb = cthread->kernel_stack;
1551 kstackt = kstackb + kernel_stack_size;
1552 /* Load stack frame pointer (EBP on x86) into frameptr */
1553 frameptr = __builtin_frame_address(0);
1554 if (((uintptr_t)frameptr > kstackt) || ((uintptr_t)frameptr < kstackb))
1555 frameptr = NULL;
1556
1557 while (frameptr != NULL && frame_index < max_frames ) {
1558 /* Next frame pointer is pointed to by the previous one */
1559 frameptr_next = (uintptr_t*) *frameptr;
1560
1561 /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
1562 /* That also means the return address is worthless, so don't record it */
1563 if (frameptr_next == NULL)
1564 break;
1565 /* Verify thread stack bounds */
1566 if (((uintptr_t)frameptr_next > kstackt) || ((uintptr_t)frameptr_next < kstackb))
1567 break;
1568 /* Pull return address from one spot above the frame pointer */
1569 retaddr = *(frameptr + 1);
1570
1571 /* Store it in the backtrace array */
1572 bt[frame_index++] = retaddr;
1573
1574 frameptr = frameptr_next;
1575 }
1576
1577 /* Save the number of frames captured for return value */
1578 frames = frame_index;
1579
1580 /* Fill in the rest of the backtrace with zeros */
1581 while (frame_index < max_frames)
1582 bt[frame_index++] = 0;
1583
1584 return frames;
1585 }
1586
1587 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
1588 uintptr_t
1589 hash_mix(uintptr_t x)
1590 {
1591 #ifndef __LP64__
1592 x += ~(x << 15);
1593 x ^= (x >> 10);
1594 x += (x << 3 );
1595 x ^= (x >> 6 );
1596 x += ~(x << 11);
1597 x ^= (x >> 16);
1598 #else
1599 x += ~(x << 32);
1600 x ^= (x >> 22);
1601 x += ~(x << 13);
1602 x ^= (x >> 8 );
1603 x += (x << 3 );
1604 x ^= (x >> 15);
1605 x += ~(x << 27);
1606 x ^= (x >> 31);
1607 #endif
1608 return x;
1609 }
1610
1611 uint32_t
1612 hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
1613 {
1614
1615 uintptr_t hash = 0;
1616 uintptr_t mask = max_size - 1;
1617
1618 while (depth) {
1619 hash += bt[--depth];
1620 }
1621
1622 hash = hash_mix(hash) & mask;
1623
1624 assert(hash < max_size);
1625
1626 return (uint32_t) hash;
1627 }
1628
1629 /*
1630 * TODO: Determine how well distributed this is
1631 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
1632 */
1633 uint32_t
1634 hashaddr(uintptr_t pt, uint32_t max_size)
1635 {
1636 uintptr_t hash = 0;
1637 uintptr_t mask = max_size - 1;
1638
1639 hash = hash_mix(pt) & mask;
1640
1641 assert(hash < max_size);
1642
1643 return (uint32_t) hash;
1644 }
1645
1646 /* End of all leak-detection code */
1647 #pragma mark -
1648
1649 /*
1650 * zinit initializes a new zone. The zone data structures themselves
1651 * are stored in a zone, which is initially a static structure that
1652 * is initialized by zone_init.
1653 */
1654 zone_t
1655 zinit(
1656 vm_size_t size, /* the size of an element */
1657 vm_size_t max, /* maximum memory to use */
1658 vm_size_t alloc, /* allocation size */
1659 const char *name) /* a name for the zone */
1660 {
1661 zone_t z;
1662 boolean_t use_page_list = FALSE;
1663
1664 if (zone_zone == ZONE_NULL) {
1665
1666 z = (struct zone *)zdata;
1667 /* special handling in zcram() because the first element is being used */
1668 } else
1669 z = (zone_t) zalloc(zone_zone);
1670
1671 if (z == ZONE_NULL)
1672 return(ZONE_NULL);
1673
1674 /* Zone elements must fit both a next pointer and a backup pointer */
1675 vm_size_t minimum_element_size = sizeof(vm_offset_t) * 2;
1676 if (size < minimum_element_size)
1677 size = minimum_element_size;
1678
1679 /*
1680 * Round element size to a multiple of sizeof(pointer)
1681 * This also enforces that allocations will be aligned on pointer boundaries
1682 */
1683 size = ((size-1) + sizeof(vm_offset_t)) -
1684 ((size-1) % sizeof(vm_offset_t));
1685
1686 if (alloc == 0)
1687 alloc = PAGE_SIZE;
1688
1689 alloc = round_page(alloc);
1690 max = round_page(max);
1691
1692 /*
1693 * we look for an allocation size with less than 1% waste
1694 * up to 5 pages in size...
1695 * otherwise, we look for an allocation size with least fragmentation
1696 * in the range of 1 - 5 pages
1697 * This size will be used unless
1698 * the user suggestion is larger AND has less fragmentation
1699 */
1700 #if ZONE_ALIAS_ADDR
1701 /* Favor PAGE_SIZE allocations unless we waste >10% space */
1702 if ((size < PAGE_SIZE) && (PAGE_SIZE % size <= PAGE_SIZE / 10))
1703 alloc = PAGE_SIZE;
1704 else
1705 #endif
1706 #if defined(__LP64__)
1707 if (((alloc % size) != 0) || (alloc > PAGE_SIZE * 8))
1708 #endif
1709 {
1710 vm_size_t best, waste; unsigned int i;
1711 best = PAGE_SIZE;
1712 waste = best % size;
1713
1714 for (i = 1; i <= 5; i++) {
1715 vm_size_t tsize, twaste;
1716
1717 tsize = i * PAGE_SIZE;
1718
1719 if ((tsize % size) < (tsize / 100)) {
1720 alloc = tsize;
1721 goto use_this_allocation;
1722 }
1723 twaste = tsize % size;
1724 if (twaste < waste)
1725 best = tsize, waste = twaste;
1726 }
1727 if (alloc <= best || (alloc % size >= waste))
1728 alloc = best;
1729 }
1730 use_this_allocation:
1731 if (max && (max < alloc))
1732 max = alloc;
1733
1734 /*
1735 * Opt into page list tracking if we can reliably map an allocation
1736 * to its page_metadata, and if the wastage in the tail of
1737 * the allocation is not too large
1738 */
1739
1740 /* zone_zone can't use page metadata since the page metadata will overwrite zone metadata */
1741 if (alloc == PAGE_SIZE && zone_zone != ZONE_NULL) {
1742 vm_offset_t first_element_offset;
1743 size_t zone_page_metadata_size = sizeof(struct zone_page_metadata);
1744
1745 if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0) {
1746 first_element_offset = zone_page_metadata_size;
1747 } else {
1748 first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT));
1749 }
1750
1751 if (((PAGE_SIZE - first_element_offset) % size) <= PAGE_SIZE / 100) {
1752 use_page_list = TRUE;
1753 }
1754 }
1755
1756 z->free_elements = NULL;
1757 queue_init(&z->pages.any_free_foreign);
1758 queue_init(&z->pages.all_free);
1759 queue_init(&z->pages.intermediate);
1760 queue_init(&z->pages.all_used);
1761 z->cur_size = 0;
1762 z->page_count = 0;
1763 z->max_size = max;
1764 z->elem_size = size;
1765 z->alloc_size = alloc;
1766 z->zone_name = name;
1767 z->count = 0;
1768 z->countfree = 0;
1769 z->sum_count = 0LL;
1770 z->doing_alloc_without_vm_priv = FALSE;
1771 z->doing_alloc_with_vm_priv = FALSE;
1772 z->doing_gc = FALSE;
1773 z->exhaustible = FALSE;
1774 z->collectable = TRUE;
1775 z->allows_foreign = FALSE;
1776 z->expandable = TRUE;
1777 z->waiting = FALSE;
1778 z->async_pending = FALSE;
1779 z->caller_acct = TRUE;
1780 z->noencrypt = FALSE;
1781 z->no_callout = FALSE;
1782 z->async_prio_refill = FALSE;
1783 z->gzalloc_exempt = FALSE;
1784 z->alignment_required = FALSE;
1785 z->use_page_list = use_page_list;
1786 z->prio_refill_watermark = 0;
1787 z->zone_replenish_thread = NULL;
1788 z->zp_count = 0;
1789 #if CONFIG_ZLEAKS
1790 z->zleak_capture = 0;
1791 z->zleak_on = FALSE;
1792 #endif /* CONFIG_ZLEAKS */
1793
1794 #if ZONE_DEBUG
1795 z->active_zones.next = z->active_zones.prev = NULL;
1796 zone_debug_enable(z);
1797 #endif /* ZONE_DEBUG */
1798 lock_zone_init(z);
1799
1800 /*
1801 * Add the zone to the all-zones list.
1802 * If we are tracking zone info per task, and we have
1803 * already used all the available stat slots, then keep
1804 * using the overflow zone slot.
1805 */
1806 z->next_zone = ZONE_NULL;
1807 simple_lock(&all_zones_lock);
1808 *last_zone = z;
1809 last_zone = &z->next_zone;
1810 z->index = num_zones;
1811 if (zinfo_per_task) {
1812 if (num_zones > ZONES_MAX)
1813 z->index = ZONES_MAX;
1814 }
1815 num_zones++;
1816 simple_unlock(&all_zones_lock);
1817
1818 /*
1819 * Check if we should be logging this zone. If so, remember the zone pointer.
1820 */
1821 if (log_this_zone(z->zone_name, zone_name_to_log)) {
1822 zone_of_interest = z;
1823 }
1824
1825 /*
1826 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1827 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to
1828 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
1829 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1830 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1831 * right now.
1832 */
1833 if (zone_of_interest != NULL && zlog_btlog == NULL && kmem_alloc_ready) {
1834 zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, NULL, NULL, NULL);
1835 if (zlog_btlog) {
1836 printf("zone: logging started for zone %s\n", zone_of_interest->zone_name);
1837 } else {
1838 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1839 zone_of_interest = NULL;
1840 }
1841 }
1842 #if CONFIG_GZALLOC
1843 gzalloc_zone_init(z);
1844 #endif
1845 return(z);
1846 }
1847 unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated, zone_replenish_throttle_count;
1848
1849 static void zone_replenish_thread(zone_t);
1850
1851 /* High priority VM privileged thread used to asynchronously refill a designated
1852 * zone, such as the reserved VM map entry zone.
1853 */
1854 static void zone_replenish_thread(zone_t z) {
1855 vm_size_t free_size;
1856 current_thread()->options |= TH_OPT_VMPRIV;
1857
1858 for (;;) {
1859 lock_zone(z);
1860 assert(z->prio_refill_watermark != 0);
1861 while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
1862 assert(z->doing_alloc_without_vm_priv == FALSE);
1863 assert(z->doing_alloc_with_vm_priv == FALSE);
1864 assert(z->async_prio_refill == TRUE);
1865
1866 unlock_zone(z);
1867 int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
1868 vm_offset_t space, alloc_size;
1869 kern_return_t kr;
1870
1871 if (vm_pool_low())
1872 alloc_size = round_page(z->elem_size);
1873 else
1874 alloc_size = z->alloc_size;
1875
1876 if (z->noencrypt)
1877 zflags |= KMA_NOENCRYPT;
1878
1879 kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
1880
1881 if (kr == KERN_SUCCESS) {
1882 #if ZONE_ALIAS_ADDR
1883 if (alloc_size == PAGE_SIZE)
1884 space = zone_alias_addr(space);
1885 #endif
1886 zcram(z, space, alloc_size);
1887 } else if (kr == KERN_RESOURCE_SHORTAGE) {
1888 VM_PAGE_WAIT();
1889 } else if (kr == KERN_NO_SPACE) {
1890 kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
1891 if (kr == KERN_SUCCESS) {
1892 #if ZONE_ALIAS_ADDR
1893 if (alloc_size == PAGE_SIZE)
1894 space = zone_alias_addr(space);
1895 #endif
1896 zcram(z, space, alloc_size);
1897 } else {
1898 assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
1899 thread_block(THREAD_CONTINUE_NULL);
1900 }
1901 }
1902
1903 lock_zone(z);
1904 zone_replenish_loops++;
1905 }
1906
1907 unlock_zone(z);
1908 /* Signal any potential throttled consumers, terminating
1909 * their timer-bounded waits.
1910 */
1911 thread_wakeup(z);
1912
1913 assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
1914 thread_block(THREAD_CONTINUE_NULL);
1915 zone_replenish_wakeups++;
1916 }
1917 }
1918
1919 void
1920 zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
1921 z->prio_refill_watermark = low_water_mark;
1922
1923 z->async_prio_refill = TRUE;
1924 OSMemoryBarrier();
1925 kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
1926
1927 if (tres != KERN_SUCCESS) {
1928 panic("zone_prio_refill_configure, thread create: 0x%x", tres);
1929 }
1930
1931 thread_deallocate(z->zone_replenish_thread);
1932 }
1933
1934 /*
1935 * Boolean Random Number Generator for generating booleans to randomize
1936 * the order of elements in newly zcram()'ed memory. The algorithm is a
1937 * modified version of the KISS RNG proposed in the paper:
1938 * http://stat.fsu.edu/techreports/M802.pdf
1939 * The modifications have been documented in the technical paper
1940 * paper from UCL:
1941 * http://www0.cs.ucl.ac.uk/staff/d.jones/GoodPracticeRNG.pdf
1942 */
1943
1944 static void random_bool_gen_entropy(
1945 int *buffer,
1946 int count)
1947 {
1948
1949 int i, t;
1950 simple_lock(&bool_gen_lock);
1951 for (i = 0; i < count; i++) {
1952 bool_gen_seed[1] ^= (bool_gen_seed[1] << 5);
1953 bool_gen_seed[1] ^= (bool_gen_seed[1] >> 7);
1954 bool_gen_seed[1] ^= (bool_gen_seed[1] << 22);
1955 t = bool_gen_seed[2] + bool_gen_seed[3] + bool_gen_global;
1956 bool_gen_seed[2] = bool_gen_seed[3];
1957 bool_gen_global = t < 0;
1958 bool_gen_seed[3] = t &2147483647;
1959 bool_gen_seed[0] += 1411392427;
1960 buffer[i] = (bool_gen_seed[0] + bool_gen_seed[1] + bool_gen_seed[3]);
1961 }
1962 simple_unlock(&bool_gen_lock);
1963 }
1964
1965 static boolean_t random_bool_gen(
1966 int *buffer,
1967 int index,
1968 int bufsize)
1969 {
1970 int valindex, bitpos;
1971 valindex = (index / (8 * sizeof(int))) % bufsize;
1972 bitpos = index % (8 * sizeof(int));
1973 return (boolean_t)(buffer[valindex] & (1 << bitpos));
1974 }
1975
1976 static void
1977 random_free_to_zone(
1978 zone_t zone,
1979 vm_offset_t newmem,
1980 vm_offset_t first_element_offset,
1981 int element_count,
1982 boolean_t from_zm,
1983 int *entropy_buffer)
1984 {
1985 vm_offset_t last_element_offset;
1986 vm_offset_t element_addr;
1987 vm_size_t elem_size;
1988 int index;
1989
1990 elem_size = zone->elem_size;
1991 last_element_offset = first_element_offset + ((element_count * elem_size) - elem_size);
1992 for (index = 0; index < element_count; index++) {
1993 assert(first_element_offset <= last_element_offset);
1994 if (random_bool_gen(entropy_buffer, index, MAX_ENTROPY_PER_ZCRAM)) {
1995 element_addr = newmem + first_element_offset;
1996 first_element_offset += elem_size;
1997 } else {
1998 element_addr = newmem + last_element_offset;
1999 last_element_offset -= elem_size;
2000 }
2001 if (element_addr != (vm_offset_t)zone) {
2002 zone->count++; /* compensate for free_to_zone */
2003 free_to_zone(zone, element_addr, FALSE);
2004 }
2005 if (!zone->use_page_list && from_zm) {
2006 zone_page_alloc(element_addr, elem_size);
2007 }
2008 zone->cur_size += elem_size;
2009 }
2010 }
2011
2012 /*
2013 * Cram the given memory into the specified zone. Update the zone page count accordingly.
2014 */
2015 void
2016 zcram(
2017 zone_t zone,
2018 vm_offset_t newmem,
2019 vm_size_t size)
2020 {
2021 vm_size_t elem_size;
2022 boolean_t from_zm = FALSE;
2023 vm_offset_t first_element_offset;
2024 int element_count;
2025 int entropy_buffer[MAX_ENTROPY_PER_ZCRAM];
2026
2027 /* Basic sanity checks */
2028 assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
2029 assert(!zone->collectable || zone->allows_foreign
2030 || (from_zone_map(newmem, size)));
2031
2032 elem_size = zone->elem_size;
2033
2034 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(zone), size, 0, 0, 0);
2035
2036 if (from_zone_map(newmem, size))
2037 from_zm = TRUE;
2038
2039 if (zalloc_debug & ZALLOC_DEBUG_ZCRAM)
2040 kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name,
2041 (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size);
2042
2043 if (from_zm && !zone->use_page_list)
2044 zone_page_init(newmem, size);
2045
2046 ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE));
2047
2048 random_bool_gen_entropy(entropy_buffer, MAX_ENTROPY_PER_ZCRAM);
2049
2050 lock_zone(zone);
2051
2052 if (zone->use_page_list) {
2053 struct zone_page_metadata *page_metadata;
2054 size_t zone_page_metadata_size = sizeof(struct zone_page_metadata);
2055
2056 assert((newmem & PAGE_MASK) == 0);
2057 assert((size & PAGE_MASK) == 0);
2058 for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) {
2059
2060 page_metadata = (struct zone_page_metadata *)(newmem);
2061
2062 page_metadata->pages.next = NULL;
2063 page_metadata->pages.prev = NULL;
2064 page_metadata->elements = NULL;
2065 page_metadata->zone = zone;
2066 page_metadata->alloc_count = 0;
2067 page_metadata->free_count = 0;
2068
2069 enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_metadata);
2070
2071 if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0){
2072 first_element_offset = zone_page_metadata_size;
2073 } else {
2074 first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT));
2075 }
2076 element_count = (int)((PAGE_SIZE - first_element_offset) / elem_size);
2077 page_metadata->alloc_count += element_count;
2078 random_free_to_zone(zone, newmem, first_element_offset, element_count, from_zm, entropy_buffer);
2079 }
2080 } else {
2081 first_element_offset = 0;
2082 element_count = (int)((size - first_element_offset) / elem_size);
2083 random_free_to_zone(zone, newmem, first_element_offset, element_count, from_zm, entropy_buffer);
2084 }
2085 unlock_zone(zone);
2086
2087 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(zone), 0, 0, 0, 0);
2088
2089 }
2090
2091
2092 /*
2093 * Steal memory for the zone package. Called from
2094 * vm_page_bootstrap().
2095 */
2096 void
2097 zone_steal_memory(void)
2098 {
2099 #if CONFIG_GZALLOC
2100 gzalloc_configure();
2101 #endif
2102 /* Request enough early memory to get to the pmap zone */
2103 zdata_size = 12 * sizeof(struct zone);
2104 zdata_size = round_page(zdata_size);
2105 zdata = (vm_offset_t)pmap_steal_memory(zdata_size);
2106 }
2107
2108
2109 /*
2110 * Fill a zone with enough memory to contain at least nelem elements.
2111 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
2112 * Return the number of elements actually put into the zone, which may
2113 * be more than the caller asked for since the memory allocation is
2114 * rounded up to a full page.
2115 */
2116 int
2117 zfill(
2118 zone_t zone,
2119 int nelem)
2120 {
2121 kern_return_t kr;
2122 vm_size_t size;
2123 vm_offset_t memory;
2124 int nalloc;
2125
2126 assert(nelem > 0);
2127 if (nelem <= 0)
2128 return 0;
2129 size = nelem * zone->elem_size;
2130 size = round_page(size);
2131 kr = kmem_alloc_kobject(kernel_map, &memory, size, VM_KERN_MEMORY_ZONE);
2132 if (kr != KERN_SUCCESS)
2133 return 0;
2134
2135 zone_change(zone, Z_FOREIGN, TRUE);
2136 zcram(zone, memory, size);
2137 nalloc = (int)(size / zone->elem_size);
2138 assert(nalloc >= nelem);
2139
2140 return nalloc;
2141 }
2142
2143 /*
2144 * Initialize the "zone of zones" which uses fixed memory allocated
2145 * earlier in memory initialization. zone_bootstrap is called
2146 * before zone_init.
2147 */
2148 void
2149 zone_bootstrap(void)
2150 {
2151 char temp_buf[16];
2152 unsigned int i;
2153
2154 if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof(temp_buf))) {
2155 zinfo_per_task = TRUE;
2156 }
2157
2158 if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug)))
2159 zalloc_debug = 0;
2160
2161 /* Set up zone element poisoning */
2162 zp_init();
2163
2164 /* Seed the random boolean generator for elements in zone free list */
2165 for (i = 0; i < RANDOM_BOOL_GEN_SEED_COUNT; i++) {
2166 bool_gen_seed[i] = (unsigned int)early_random();
2167 }
2168 simple_lock_init(&bool_gen_lock, 0);
2169
2170 /* should zlog log to debug zone corruption instead of leaks? */
2171 if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) {
2172 corruption_debug_flag = TRUE;
2173 }
2174
2175 /*
2176 * Check for and set up zone leak detection if requested via boot-args. We recognized two
2177 * boot-args:
2178 *
2179 * zlog=<zone_to_log>
2180 * zrecs=<num_records_in_log>
2181 *
2182 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
2183 * control the size of the log. If zrecs is not specified, a default value is used.
2184 */
2185
2186 if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
2187 if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) {
2188
2189 /*
2190 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
2191 * This prevents accidentally hogging too much kernel memory and making the system
2192 * unusable.
2193 */
2194
2195 log_records = MIN(ZRECORDS_MAX, log_records);
2196
2197 } else {
2198 log_records = ZRECORDS_DEFAULT;
2199 }
2200 }
2201
2202 simple_lock_init(&all_zones_lock, 0);
2203
2204 first_zone = ZONE_NULL;
2205 last_zone = &first_zone;
2206 num_zones = 0;
2207 thread_call_setup(&call_async_alloc, zalloc_async, NULL);
2208
2209 /* assertion: nobody else called zinit before us */
2210 assert(zone_zone == ZONE_NULL);
2211
2212 /* initializing global lock group for zones */
2213 lck_grp_attr_setdefault(&zone_locks_grp_attr);
2214 lck_grp_init(&zone_locks_grp, "zone_locks", &zone_locks_grp_attr);
2215
2216 zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
2217 sizeof(struct zone), "zones");
2218 zone_change(zone_zone, Z_COLLECT, FALSE);
2219 zone_change(zone_zone, Z_CALLERACCT, FALSE);
2220 zone_change(zone_zone, Z_NOENCRYPT, TRUE);
2221
2222 zcram(zone_zone, zdata, zdata_size);
2223 VM_PAGE_MOVE_STOLEN(atop_64(zdata_size));
2224
2225 /* initialize fake zones and zone info if tracking by task */
2226 if (zinfo_per_task) {
2227 vm_size_t zisize = sizeof(zinfo_usage_store_t) * ZINFO_SLOTS;
2228
2229 for (i = 0; i < num_fake_zones; i++)
2230 fake_zones[i].init(ZINFO_SLOTS - num_fake_zones + i);
2231 zinfo_zone = zinit(zisize, zisize * CONFIG_TASK_MAX,
2232 zisize, "per task zinfo");
2233 zone_change(zinfo_zone, Z_CALLERACCT, FALSE);
2234 }
2235 }
2236
2237 void
2238 zinfo_task_init(task_t task)
2239 {
2240 if (zinfo_per_task) {
2241 task->tkm_zinfo = zalloc(zinfo_zone);
2242 memset(task->tkm_zinfo, 0, sizeof(zinfo_usage_store_t) * ZINFO_SLOTS);
2243 } else {
2244 task->tkm_zinfo = NULL;
2245 }
2246 }
2247
2248 void
2249 zinfo_task_free(task_t task)
2250 {
2251 assert(task != kernel_task);
2252 if (task->tkm_zinfo != NULL) {
2253 zfree(zinfo_zone, task->tkm_zinfo);
2254 task->tkm_zinfo = NULL;
2255 }
2256 }
2257
2258 /* Global initialization of Zone Allocator.
2259 * Runs after zone_bootstrap.
2260 */
2261 void
2262 zone_init(
2263 vm_size_t max_zonemap_size)
2264 {
2265 kern_return_t retval;
2266 vm_offset_t zone_min;
2267 vm_offset_t zone_max;
2268
2269 retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
2270 FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT | VM_MAKE_TAG(VM_KERN_MEMORY_ZONE),
2271 &zone_map);
2272
2273 if (retval != KERN_SUCCESS)
2274 panic("zone_init: kmem_suballoc failed");
2275 zone_max = zone_min + round_page(max_zonemap_size);
2276 #if CONFIG_GZALLOC
2277 gzalloc_init(max_zonemap_size);
2278 #endif
2279 /*
2280 * Setup garbage collection information:
2281 */
2282 zone_map_min_address = zone_min;
2283 zone_map_max_address = zone_max;
2284
2285 #if defined(__LP64__)
2286 /*
2287 * ensure that any vm_page_t that gets created from
2288 * the vm_page zone can be packed properly (see vm_page.h
2289 * for the packing requirements
2290 */
2291 if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_min_address)) != (vm_page_t)zone_map_min_address)
2292 panic("VM_PAGE_PACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address);
2293
2294 if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address)) != (vm_page_t)zone_map_max_address)
2295 panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address);
2296 #endif
2297
2298 zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
2299 zone_page_table_used_size = sizeof(zone_page_table);
2300
2301 zone_page_table_second_level_size = 1;
2302 zone_page_table_second_level_shift_amount = 0;
2303
2304 /*
2305 * Find the power of 2 for the second level that allows
2306 * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
2307 * slots.
2308 */
2309 while ((zone_page_table_first_level_slot(zone_pages-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE) {
2310 zone_page_table_second_level_size <<= 1;
2311 zone_page_table_second_level_shift_amount++;
2312 }
2313
2314 lck_grp_attr_setdefault(&zone_gc_lck_grp_attr);
2315 lck_grp_init(&zone_gc_lck_grp, "zone_gc", &zone_gc_lck_grp_attr);
2316 lck_attr_setdefault(&zone_gc_lck_attr);
2317 lck_mtx_init_ext(&zone_gc_lock, &zone_gc_lck_ext, &zone_gc_lck_grp, &zone_gc_lck_attr);
2318
2319 #if CONFIG_ZLEAKS
2320 /*
2321 * Initialize the zone leak monitor
2322 */
2323 zleak_init(max_zonemap_size);
2324 #endif /* CONFIG_ZLEAKS */
2325 }
2326
2327 void
2328 zone_page_table_expand(zone_page_index_t pindex)
2329 {
2330 unsigned int first_index;
2331 struct zone_page_table_entry * volatile * first_level_ptr;
2332
2333 assert(pindex < zone_pages);
2334
2335 first_index = zone_page_table_first_level_slot(pindex);
2336 first_level_ptr = &zone_page_table[first_index];
2337
2338 if (*first_level_ptr == NULL) {
2339 /*
2340 * We were able to verify the old first-level slot
2341 * had NULL, so attempt to populate it.
2342 */
2343
2344 vm_offset_t second_level_array = 0;
2345 vm_size_t second_level_size = round_page(zone_page_table_second_level_size * sizeof(struct zone_page_table_entry));
2346 zone_page_index_t i;
2347 struct zone_page_table_entry *entry_array;
2348
2349 if (kmem_alloc_kobject(zone_map, &second_level_array,
2350 second_level_size, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) {
2351 panic("zone_page_table_expand");
2352 }
2353 zone_map_table_page_count += (second_level_size / PAGE_SIZE);
2354
2355 /*
2356 * zone_gc() may scan the "zone_page_table" directly,
2357 * so make sure any slots have a valid unused state.
2358 */
2359 entry_array = (struct zone_page_table_entry *)second_level_array;
2360 for (i=0; i < zone_page_table_second_level_size; i++) {
2361 entry_array[i].alloc_count = ZONE_PAGE_UNUSED;
2362 entry_array[i].collect_count = 0;
2363 }
2364
2365 if (OSCompareAndSwapPtr(NULL, entry_array, first_level_ptr)) {
2366 /* Old slot was NULL, replaced with expanded level */
2367 OSAddAtomicLong(second_level_size, &zone_page_table_used_size);
2368 } else {
2369 /* Old slot was not NULL, someone else expanded first */
2370 kmem_free(zone_map, second_level_array, second_level_size);
2371 zone_map_table_page_count -= (second_level_size / PAGE_SIZE);
2372 }
2373 } else {
2374 /* Old slot was not NULL, already been expanded */
2375 }
2376 }
2377
2378 struct zone_page_table_entry *
2379 zone_page_table_lookup(zone_page_index_t pindex)
2380 {
2381 unsigned int first_index = zone_page_table_first_level_slot(pindex);
2382 struct zone_page_table_entry *second_level = zone_page_table[first_index];
2383
2384 if (second_level) {
2385 return &second_level[zone_page_table_second_level_slot(pindex)];
2386 }
2387
2388 return NULL;
2389 }
2390
2391 extern volatile SInt32 kfree_nop_count;
2392
2393 #pragma mark -
2394 #pragma mark zalloc_canblock
2395
2396 /*
2397 * zalloc returns an element from the specified zone.
2398 */
2399 static void *
2400 zalloc_internal(
2401 zone_t zone,
2402 boolean_t canblock,
2403 boolean_t nopagewait)
2404 {
2405 vm_offset_t addr = 0;
2406 kern_return_t retval;
2407 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
2408 int numsaved = 0;
2409 boolean_t zone_replenish_wakeup = FALSE, zone_alloc_throttle = FALSE;
2410 #if CONFIG_GZALLOC || ZONE_DEBUG
2411 boolean_t did_gzalloc = FALSE;
2412 #endif
2413 thread_t thr = current_thread();
2414 boolean_t check_poison = FALSE;
2415 boolean_t set_doing_alloc_with_vm_priv = FALSE;
2416
2417 #if CONFIG_ZLEAKS
2418 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
2419 #endif /* CONFIG_ZLEAKS */
2420
2421 assert(zone != ZONE_NULL);
2422
2423 #if CONFIG_GZALLOC
2424 addr = gzalloc_alloc(zone, canblock);
2425 did_gzalloc = (addr != 0);
2426 #endif
2427
2428 /*
2429 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2430 */
2431 if (__improbable(DO_LOGGING(zone)))
2432 numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH);
2433
2434 #if CONFIG_ZLEAKS
2435 /*
2436 * Zone leak detection: capture a backtrace every zleak_sample_factor
2437 * allocations in this zone.
2438 */
2439 if (__improbable(zone->zleak_on && sample_counter(&zone->zleak_capture, zleak_sample_factor) == TRUE)) {
2440 /* Avoid backtracing twice if zone logging is on */
2441 if (numsaved == 0)
2442 zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
2443 else
2444 zleak_tracedepth = numsaved;
2445 }
2446 #endif /* CONFIG_ZLEAKS */
2447
2448 lock_zone(zone);
2449
2450 if (zone->async_prio_refill && zone->zone_replenish_thread) {
2451 do {
2452 vm_size_t zfreec = (zone->cur_size - (zone->count * zone->elem_size));
2453 vm_size_t zrefillwm = zone->prio_refill_watermark * zone->elem_size;
2454 zone_replenish_wakeup = (zfreec < zrefillwm);
2455 zone_alloc_throttle = (zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0);
2456
2457 if (zone_replenish_wakeup) {
2458 zone_replenish_wakeups_initiated++;
2459 unlock_zone(zone);
2460 /* Signal the potentially waiting
2461 * refill thread.
2462 */
2463 thread_wakeup(&zone->zone_replenish_thread);
2464
2465 /* Scheduling latencies etc. may prevent
2466 * the refill thread from keeping up
2467 * with demand. Throttle consumers
2468 * when we fall below half the
2469 * watermark, unless VM privileged
2470 */
2471 if (zone_alloc_throttle) {
2472 zone_replenish_throttle_count++;
2473 assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC);
2474 thread_block(THREAD_CONTINUE_NULL);
2475 }
2476 lock_zone(zone);
2477 }
2478 } while (zone_alloc_throttle == TRUE);
2479 }
2480
2481 if (__probable(addr == 0))
2482 addr = try_alloc_from_zone(zone, &check_poison);
2483
2484
2485 while ((addr == 0) && canblock) {
2486 /*
2487 * zone is empty, try to expand it
2488 *
2489 * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged)
2490 * to expand the zone concurrently... this is necessary to avoid stalling
2491 * vm_privileged threads running critical code necessary to continue compressing/swapping
2492 * pages (i.e. making new free pages) from stalling behind non-vm_privileged threads
2493 * waiting to acquire free pages when the vm_page_free_count is below the
2494 * vm_page_free_reserved limit.
2495 */
2496 if ((zone->doing_alloc_without_vm_priv || zone->doing_alloc_with_vm_priv) &&
2497 (((thr->options & TH_OPT_VMPRIV) == 0) || zone->doing_alloc_with_vm_priv)) {
2498 /*
2499 * This is a non-vm_privileged thread and a non-vm_privileged or
2500 * a vm_privileged thread is already expanding the zone...
2501 * OR
2502 * this is a vm_privileged thread and a vm_privileged thread is
2503 * already expanding the zone...
2504 *
2505 * In either case wait for a thread to finish, then try again.
2506 */
2507 zone->waiting = TRUE;
2508 zone_sleep(zone);
2509 } else if (zone->doing_gc) {
2510 /*
2511 * zone_gc() is running. Since we need an element
2512 * from the free list that is currently being
2513 * collected, set the waiting bit and
2514 * wait for the GC process to finish
2515 * before trying again
2516 */
2517 zone->waiting = TRUE;
2518 zone_sleep(zone);
2519 } else {
2520 vm_offset_t space;
2521 vm_size_t alloc_size;
2522 int retry = 0;
2523
2524 if ((zone->cur_size + zone->elem_size) >
2525 zone->max_size) {
2526 if (zone->exhaustible)
2527 break;
2528 if (zone->expandable) {
2529 /*
2530 * We're willing to overflow certain
2531 * zones, but not without complaining.
2532 *
2533 * This is best used in conjunction
2534 * with the collectable flag. What we
2535 * want is an assurance we can get the
2536 * memory back, assuming there's no
2537 * leak.
2538 */
2539 zone->max_size += (zone->max_size >> 1);
2540 } else {
2541 unlock_zone(zone);
2542
2543 panic_include_zprint = TRUE;
2544 #if CONFIG_ZLEAKS
2545 if (zleak_state & ZLEAK_STATE_ACTIVE)
2546 panic_include_ztrace = TRUE;
2547 #endif /* CONFIG_ZLEAKS */
2548 panic("zalloc: zone \"%s\" empty.", zone->zone_name);
2549 }
2550 }
2551 if ((thr->options & TH_OPT_VMPRIV)) {
2552 zone->doing_alloc_with_vm_priv = TRUE;
2553 set_doing_alloc_with_vm_priv = TRUE;
2554 } else {
2555 zone->doing_alloc_without_vm_priv = TRUE;
2556 }
2557 unlock_zone(zone);
2558
2559 for (;;) {
2560 int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
2561
2562 if (vm_pool_low() || retry >= 1)
2563 alloc_size =
2564 round_page(zone->elem_size);
2565 else
2566 alloc_size = zone->alloc_size;
2567
2568 if (zone->noencrypt)
2569 zflags |= KMA_NOENCRYPT;
2570
2571 retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
2572 if (retval == KERN_SUCCESS) {
2573 #if ZONE_ALIAS_ADDR
2574 if (alloc_size == PAGE_SIZE)
2575 space = zone_alias_addr(space);
2576 #endif
2577
2578 #if CONFIG_ZLEAKS
2579 if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
2580 if (zone_map->size >= zleak_global_tracking_threshold) {
2581 kern_return_t kr;
2582
2583 kr = zleak_activate();
2584 if (kr != KERN_SUCCESS) {
2585 printf("Failed to activate live zone leak debugging (%d).\n", kr);
2586 }
2587 }
2588 }
2589
2590 if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
2591 if (zone->cur_size > zleak_per_zone_tracking_threshold) {
2592 zone->zleak_on = TRUE;
2593 }
2594 }
2595 #endif /* CONFIG_ZLEAKS */
2596 zcram(zone, space, alloc_size);
2597
2598 break;
2599 } else if (retval != KERN_RESOURCE_SHORTAGE) {
2600 retry++;
2601
2602 if (retry == 2) {
2603 zone_gc(TRUE);
2604 printf("zalloc did gc\n");
2605 zone_display_zprint();
2606 }
2607 if (retry == 3) {
2608 panic_include_zprint = TRUE;
2609 #if CONFIG_ZLEAKS
2610 if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
2611 panic_include_ztrace = TRUE;
2612 }
2613 #endif /* CONFIG_ZLEAKS */
2614 if (retval == KERN_NO_SPACE) {
2615 zone_t zone_largest = zone_find_largest();
2616 panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)",
2617 zone->zone_name, zone_largest->zone_name,
2618 (unsigned long)zone_largest->cur_size, zone_largest->count);
2619
2620 }
2621 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
2622 }
2623 } else {
2624 break;
2625 }
2626 }
2627 lock_zone(zone);
2628
2629 if (set_doing_alloc_with_vm_priv == TRUE)
2630 zone->doing_alloc_with_vm_priv = FALSE;
2631 else
2632 zone->doing_alloc_without_vm_priv = FALSE;
2633
2634 if (zone->waiting) {
2635 zone->waiting = FALSE;
2636 zone_wakeup(zone);
2637 }
2638 addr = try_alloc_from_zone(zone, &check_poison);
2639 if (addr == 0 &&
2640 retval == KERN_RESOURCE_SHORTAGE) {
2641 if (nopagewait == TRUE)
2642 break; /* out of the main while loop */
2643 unlock_zone(zone);
2644
2645 VM_PAGE_WAIT();
2646 lock_zone(zone);
2647 }
2648 }
2649 if (addr == 0)
2650 addr = try_alloc_from_zone(zone, &check_poison);
2651 }
2652
2653 #if CONFIG_ZLEAKS
2654 /* Zone leak detection:
2655 * If we're sampling this allocation, add it to the zleaks hash table.
2656 */
2657 if (addr && zleak_tracedepth > 0) {
2658 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2659 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2660 /* If it failed, roll back the counter so we sample the next allocation instead. */
2661 zone->zleak_capture = zleak_sample_factor;
2662 }
2663 }
2664 #endif /* CONFIG_ZLEAKS */
2665
2666
2667 if ((addr == 0) && (!canblock || nopagewait) && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
2668 zone->async_pending = TRUE;
2669 unlock_zone(zone);
2670 thread_call_enter(&call_async_alloc);
2671 lock_zone(zone);
2672 addr = try_alloc_from_zone(zone, &check_poison);
2673 }
2674
2675 /*
2676 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
2677 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
2678 * since it protects the various log related data structures.
2679 */
2680
2681 if (__improbable(DO_LOGGING(zone) && addr)) {
2682 btlog_add_entry(zlog_btlog, (void *)addr, ZOP_ALLOC, (void **)zbt, numsaved);
2683 }
2684
2685 vm_offset_t inner_size = zone->elem_size;
2686
2687 #if ZONE_DEBUG
2688 if (!did_gzalloc && addr && zone_debug_enabled(zone)) {
2689 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
2690 addr += ZONE_DEBUG_OFFSET;
2691 inner_size -= ZONE_DEBUG_OFFSET;
2692 }
2693 #endif
2694
2695 unlock_zone(zone);
2696
2697 if (__improbable(check_poison && addr)) {
2698 vm_offset_t *element_cursor = ((vm_offset_t *) addr) + 1;
2699 vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *) addr);
2700
2701 for ( ; element_cursor < backup ; element_cursor++)
2702 if (__improbable(*element_cursor != ZP_POISON))
2703 zone_element_was_modified_panic(zone,
2704 addr,
2705 *element_cursor,
2706 ZP_POISON,
2707 ((vm_offset_t)element_cursor) - addr);
2708 }
2709
2710 if (addr) {
2711 /*
2712 * Clear out the old next pointer and backup to avoid leaking the cookie
2713 * and so that only values on the freelist have a valid cookie
2714 */
2715
2716 vm_offset_t *primary = (vm_offset_t *) addr;
2717 vm_offset_t *backup = get_backup_ptr(inner_size, primary);
2718
2719 *primary = ZP_POISON;
2720 *backup = ZP_POISON;
2721 }
2722
2723 TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
2724
2725 if (addr) {
2726 task_t task;
2727 zinfo_usage_t zinfo;
2728 vm_size_t sz = zone->elem_size;
2729
2730 if (zone->caller_acct)
2731 ledger_credit(thr->t_ledger, task_ledgers.tkm_private, sz);
2732 else
2733 ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, sz);
2734
2735 if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
2736 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].alloc);
2737 }
2738 return((void *)addr);
2739 }
2740
2741
2742 void *
2743 zalloc(zone_t zone)
2744 {
2745 return (zalloc_internal(zone, TRUE, FALSE));
2746 }
2747
2748 void *
2749 zalloc_noblock(zone_t zone)
2750 {
2751 return (zalloc_internal(zone, FALSE, FALSE));
2752 }
2753
2754 void *
2755 zalloc_nopagewait(zone_t zone)
2756 {
2757 return (zalloc_internal(zone, TRUE, TRUE));
2758 }
2759
2760 void *
2761 zalloc_canblock(zone_t zone, boolean_t canblock)
2762 {
2763 return (zalloc_internal(zone, canblock, FALSE));
2764 }
2765
2766
2767 void
2768 zalloc_async(
2769 __unused thread_call_param_t p0,
2770 __unused thread_call_param_t p1)
2771 {
2772 zone_t current_z = NULL, head_z;
2773 unsigned int max_zones, i;
2774 void *elt = NULL;
2775 boolean_t pending = FALSE;
2776
2777 simple_lock(&all_zones_lock);
2778 head_z = first_zone;
2779 max_zones = num_zones;
2780 simple_unlock(&all_zones_lock);
2781 current_z = head_z;
2782 for (i = 0; i < max_zones; i++) {
2783 lock_zone(current_z);
2784 if (current_z->async_pending == TRUE) {
2785 current_z->async_pending = FALSE;
2786 pending = TRUE;
2787 }
2788 unlock_zone(current_z);
2789
2790 if (pending == TRUE) {
2791 elt = zalloc_canblock(current_z, TRUE);
2792 zfree(current_z, elt);
2793 pending = FALSE;
2794 }
2795 /*
2796 * This is based on assumption that zones never get
2797 * freed once allocated and linked.
2798 * Hence a read outside of lock is OK.
2799 */
2800 current_z = current_z->next_zone;
2801 }
2802 }
2803
2804 /*
2805 * zget returns an element from the specified zone
2806 * and immediately returns nothing if there is nothing there.
2807 *
2808 * This form should be used when you can not block (like when
2809 * processing an interrupt).
2810 *
2811 * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
2812 * friend vm_page_more_fictitious can block, so it doesn't seem like
2813 * this is used for interrupts any more....
2814 */
2815 void *
2816 zget(
2817 register zone_t zone)
2818 {
2819 vm_offset_t addr;
2820 boolean_t check_poison = FALSE;
2821
2822 #if CONFIG_ZLEAKS
2823 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
2824 uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
2825 #endif /* CONFIG_ZLEAKS */
2826
2827 assert( zone != ZONE_NULL );
2828
2829 #if CONFIG_ZLEAKS
2830 /*
2831 * Zone leak detection: capture a backtrace
2832 */
2833 if (__improbable(zone->zleak_on && sample_counter(&zone->zleak_capture, zleak_sample_factor) == TRUE)) {
2834 zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
2835 }
2836 #endif /* CONFIG_ZLEAKS */
2837
2838 if (!lock_try_zone(zone))
2839 return NULL;
2840
2841 addr = try_alloc_from_zone(zone, &check_poison);
2842
2843 vm_offset_t inner_size = zone->elem_size;
2844
2845 #if ZONE_DEBUG
2846 if (addr && zone_debug_enabled(zone)) {
2847 enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
2848 addr += ZONE_DEBUG_OFFSET;
2849 inner_size -= ZONE_DEBUG_OFFSET;
2850 }
2851 #endif /* ZONE_DEBUG */
2852
2853 #if CONFIG_ZLEAKS
2854 /*
2855 * Zone leak detection: record the allocation
2856 */
2857 if (zone->zleak_on && zleak_tracedepth > 0 && addr) {
2858 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2859 if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
2860 /* If it failed, roll back the counter so we sample the next allocation instead. */
2861 zone->zleak_capture = zleak_sample_factor;
2862 }
2863 }
2864 #endif /* CONFIG_ZLEAKS */
2865
2866 unlock_zone(zone);
2867
2868 if (__improbable(check_poison && addr)) {
2869 vm_offset_t *element_cursor = ((vm_offset_t *) addr) + 1;
2870 vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *) addr);
2871
2872 for ( ; element_cursor < backup ; element_cursor++)
2873 if (__improbable(*element_cursor != ZP_POISON))
2874 zone_element_was_modified_panic(zone,
2875 addr,
2876 *element_cursor,
2877 ZP_POISON,
2878 ((vm_offset_t)element_cursor) - addr);
2879 }
2880
2881 if (addr) {
2882 /*
2883 * Clear out the old next pointer and backup to avoid leaking the cookie
2884 * and so that only values on the freelist have a valid cookie
2885 */
2886 vm_offset_t *primary = (vm_offset_t *) addr;
2887 vm_offset_t *backup = get_backup_ptr(inner_size, primary);
2888
2889 *primary = ZP_POISON;
2890 *backup = ZP_POISON;
2891 }
2892
2893 return((void *) addr);
2894 }
2895
2896 /* Keep this FALSE by default. Large memory machine run orders of magnitude
2897 slower in debug mode when true. Use debugger to enable if needed */
2898 /* static */ boolean_t zone_check = FALSE;
2899
2900 static void zone_check_freelist(zone_t zone, vm_offset_t elem)
2901 {
2902 struct zone_free_element *this;
2903 struct zone_page_metadata *thispage;
2904
2905 if (zone->use_page_list) {
2906 if (zone->allows_foreign) {
2907 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
2908 !queue_end(&zone->pages.any_free_foreign, (queue_entry_t)thispage);
2909 thispage = (struct zone_page_metadata *)queue_next((queue_chain_t *)thispage)) {
2910 for (this = thispage->elements;
2911 this != NULL;
2912 this = this->next) {
2913 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2914 panic("zone_check_freelist");
2915 }
2916 }
2917 }
2918 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
2919 !queue_end(&zone->pages.all_free, (queue_entry_t)thispage);
2920 thispage = (struct zone_page_metadata *)queue_next((queue_chain_t *)thispage)) {
2921 for (this = thispage->elements;
2922 this != NULL;
2923 this = this->next) {
2924 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2925 panic("zone_check_freelist");
2926 }
2927 }
2928 for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
2929 !queue_end(&zone->pages.intermediate, (queue_entry_t)thispage);
2930 thispage = (struct zone_page_metadata *)queue_next((queue_chain_t *)thispage)) {
2931 for (this = thispage->elements;
2932 this != NULL;
2933 this = this->next) {
2934 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2935 panic("zone_check_freelist");
2936 }
2937 }
2938 } else {
2939 for (this = zone->free_elements;
2940 this != NULL;
2941 this = this->next) {
2942 if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem)
2943 panic("zone_check_freelist");
2944 }
2945 }
2946 }
2947
2948 static zone_t zone_last_bogus_zone = ZONE_NULL;
2949 static vm_offset_t zone_last_bogus_elem = 0;
2950
2951 void
2952 zfree(
2953 register zone_t zone,
2954 void *addr)
2955 {
2956 vm_offset_t elem = (vm_offset_t) addr;
2957 uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
2958 int numsaved = 0;
2959 boolean_t gzfreed = FALSE;
2960 boolean_t poison = FALSE;
2961
2962 assert(zone != ZONE_NULL);
2963
2964 #if 1
2965 if (zone->use_page_list) {
2966 struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr);
2967 if (zone != page_meta->zone) {
2968 /*
2969 * Something bad has happened. Someone tried to zfree a pointer but the metadata says it is from
2970 * a different zone (or maybe it's from a zone that doesn't use page free lists at all). We can repair
2971 * some cases of this, if:
2972 * 1) The specified zone had use_page_list, and the true zone also has use_page_list set. In that case
2973 * we can swap the zone_t
2974 * 2) The specified zone had use_page_list, but the true zone does not. In this case page_meta is garbage,
2975 * and dereferencing page_meta->zone might panic.
2976 * To distinguish the two, we enumerate the zone list to match it up.
2977 * We do not handle the case where an incorrect zone is passed that does not have use_page_list set,
2978 * even if the true zone did have this set.
2979 */
2980 zone_t fixed_zone = NULL;
2981 int fixed_i, max_zones;
2982
2983 simple_lock(&all_zones_lock);
2984 max_zones = num_zones;
2985 fixed_zone = first_zone;
2986 simple_unlock(&all_zones_lock);
2987
2988 for (fixed_i=0; fixed_i < max_zones; fixed_i++, fixed_zone = fixed_zone->next_zone) {
2989 if (fixed_zone == page_meta->zone && fixed_zone->use_page_list) {
2990 /* we can fix this */
2991 printf("Fixing incorrect zfree from zone %s to zone %s\n", zone->zone_name, fixed_zone->zone_name);
2992 zone = fixed_zone;
2993 break;
2994 }
2995 }
2996 }
2997 }
2998 #endif
2999
3000 /*
3001 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
3002 */
3003
3004 if (__improbable(DO_LOGGING(zone) && corruption_debug_flag))
3005 numsaved = OSBacktrace((void *)zbt, MAX_ZTRACE_DEPTH);
3006
3007 #if MACH_ASSERT
3008 /* Basic sanity checks */
3009 if (zone == ZONE_NULL || elem == (vm_offset_t)0)
3010 panic("zfree: NULL");
3011 /* zone_gc assumes zones are never freed */
3012 if (zone == zone_zone)
3013 panic("zfree: freeing to zone_zone breaks zone_gc!");
3014 #endif
3015
3016 #if CONFIG_GZALLOC
3017 gzfreed = gzalloc_free(zone, addr);
3018 #endif
3019
3020 TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
3021
3022 if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
3023 !from_zone_map(elem, zone->elem_size))) {
3024 #if MACH_ASSERT
3025 panic("zfree: non-allocated memory in collectable zone!");
3026 #endif
3027 zone_last_bogus_zone = zone;
3028 zone_last_bogus_elem = elem;
3029 return;
3030 }
3031
3032 if ((zp_factor != 0 || zp_tiny_zone_limit != 0) && !gzfreed) {
3033 /*
3034 * Poison the memory before it ends up on the freelist to catch
3035 * use-after-free and use of uninitialized memory
3036 *
3037 * Always poison tiny zones' elements (limit is 0 if -no-zp is set)
3038 * Also poison larger elements periodically
3039 */
3040
3041 vm_offset_t inner_size = zone->elem_size;
3042
3043 #if ZONE_DEBUG
3044 if (!gzfreed && zone_debug_enabled(zone)) {
3045 inner_size -= ZONE_DEBUG_OFFSET;
3046 }
3047 #endif
3048 uint32_t sample_factor = zp_factor + (((uint32_t)inner_size) >> zp_scale);
3049
3050 if (inner_size <= zp_tiny_zone_limit)
3051 poison = TRUE;
3052 else if (zp_factor != 0 && sample_counter(&zone->zp_count, sample_factor) == TRUE)
3053 poison = TRUE;
3054
3055 if (__improbable(poison)) {
3056
3057 /* memset_pattern{4|8} could help make this faster: <rdar://problem/4662004> */
3058 /* Poison everything but primary and backup */
3059 vm_offset_t *element_cursor = ((vm_offset_t *) elem) + 1;
3060 vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *)elem);
3061
3062 for ( ; element_cursor < backup; element_cursor++)
3063 *element_cursor = ZP_POISON;
3064 }
3065 }
3066
3067 lock_zone(zone);
3068
3069 /*
3070 * See if we're doing logging on this zone. There are two styles of logging used depending on
3071 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
3072 */
3073
3074 if (__improbable(DO_LOGGING(zone))) {
3075 if (corruption_debug_flag) {
3076 /*
3077 * We're logging to catch a corruption. Add a record of this zfree operation
3078 * to log.
3079 */
3080 btlog_add_entry(zlog_btlog, (void *)addr, ZOP_FREE, (void **)zbt, numsaved);
3081 } else {
3082 /*
3083 * We're logging to catch a leak. Remove any record we might have for this
3084 * element since it's being freed. Note that we may not find it if the buffer
3085 * overflowed and that's OK. Since the log is of a limited size, old records
3086 * get overwritten if there are more zallocs than zfrees.
3087 */
3088 btlog_remove_entries_for_element(zlog_btlog, (void *)addr);
3089 }
3090 }
3091
3092 #if ZONE_DEBUG
3093 if (!gzfreed && zone_debug_enabled(zone)) {
3094 queue_t tmp_elem;
3095
3096 elem -= ZONE_DEBUG_OFFSET;
3097 if (zone_check) {
3098 /* check the zone's consistency */
3099
3100 for (tmp_elem = queue_first(&zone->active_zones);
3101 !queue_end(tmp_elem, &zone->active_zones);
3102 tmp_elem = queue_next(tmp_elem))
3103 if (elem == (vm_offset_t)tmp_elem)
3104 break;
3105 if (elem != (vm_offset_t)tmp_elem)
3106 panic("zfree()ing element from wrong zone");
3107 }
3108 remqueue((queue_t) elem);
3109 }
3110 #endif /* ZONE_DEBUG */
3111 if (zone_check) {
3112 zone_check_freelist(zone, elem);
3113 }
3114
3115 if (__probable(!gzfreed))
3116 free_to_zone(zone, elem, poison);
3117
3118 #if MACH_ASSERT
3119 if (zone->count < 0)
3120 panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone",
3121 zone->zone_name, addr);
3122 #endif
3123
3124
3125 #if CONFIG_ZLEAKS
3126 /*
3127 * Zone leak detection: un-track the allocation
3128 */
3129 if (zone->zleak_on) {
3130 zleak_free(elem, zone->elem_size);
3131 }
3132 #endif /* CONFIG_ZLEAKS */
3133
3134 /*
3135 * If elements have one or more pages, and memory is low,
3136 * request to run the garbage collection in the zone the next
3137 * time the pageout thread runs.
3138 */
3139 if (zone->elem_size >= PAGE_SIZE &&
3140 vm_pool_low()){
3141 zone_gc_forced = TRUE;
3142 }
3143 unlock_zone(zone);
3144
3145 {
3146 thread_t thr = current_thread();
3147 task_t task;
3148 zinfo_usage_t zinfo;
3149 vm_size_t sz = zone->elem_size;
3150
3151 if (zone->caller_acct)
3152 ledger_debit(thr->t_ledger, task_ledgers.tkm_private, sz);
3153 else
3154 ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, sz);
3155
3156 if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
3157 OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].free);
3158 }
3159 }
3160
3161
3162 /* Change a zone's flags.
3163 * This routine must be called immediately after zinit.
3164 */
3165 void
3166 zone_change(
3167 zone_t zone,
3168 unsigned int item,
3169 boolean_t value)
3170 {
3171 assert( zone != ZONE_NULL );
3172 assert( value == TRUE || value == FALSE );
3173
3174 switch(item){
3175 case Z_NOENCRYPT:
3176 zone->noencrypt = value;
3177 break;
3178 case Z_EXHAUST:
3179 zone->exhaustible = value;
3180 break;
3181 case Z_COLLECT:
3182 zone->collectable = value;
3183 break;
3184 case Z_EXPAND:
3185 zone->expandable = value;
3186 break;
3187 case Z_FOREIGN:
3188 zone->allows_foreign = value;
3189 break;
3190 case Z_CALLERACCT:
3191 zone->caller_acct = value;
3192 break;
3193 case Z_NOCALLOUT:
3194 zone->no_callout = value;
3195 break;
3196 case Z_GZALLOC_EXEMPT:
3197 zone->gzalloc_exempt = value;
3198 #if CONFIG_GZALLOC
3199 gzalloc_reconfigure(zone);
3200 #endif
3201 break;
3202 case Z_ALIGNMENT_REQUIRED:
3203 zone->alignment_required = value;
3204 /*
3205 * Disable the page list optimization here to provide
3206 * more of an alignment guarantee. This prevents
3207 * the alignment from being modified by the metadata stored
3208 * at the beginning of the page.
3209 */
3210 zone->use_page_list = FALSE;
3211 #if ZONE_DEBUG
3212 zone_debug_disable(zone);
3213 #endif
3214 #if CONFIG_GZALLOC
3215 gzalloc_reconfigure(zone);
3216 #endif
3217 break;
3218 default:
3219 panic("Zone_change: Wrong Item Type!");
3220 /* break; */
3221 }
3222 }
3223
3224 /*
3225 * Return the expected number of free elements in the zone.
3226 * This calculation will be incorrect if items are zfree'd that
3227 * were never zalloc'd/zget'd. The correct way to stuff memory
3228 * into a zone is by zcram.
3229 */
3230
3231 integer_t
3232 zone_free_count(zone_t zone)
3233 {
3234 integer_t free_count;
3235
3236 lock_zone(zone);
3237 free_count = zone->countfree;
3238 unlock_zone(zone);
3239
3240 assert(free_count >= 0);
3241
3242 return(free_count);
3243 }
3244
3245 /*
3246 * Zone garbage collection subroutines
3247 */
3248
3249 boolean_t
3250 zone_page_collectable(
3251 vm_offset_t addr,
3252 vm_size_t size)
3253 {
3254 struct zone_page_table_entry *zp;
3255 zone_page_index_t i, j;
3256
3257 #if ZONE_ALIAS_ADDR
3258 addr = zone_virtual_addr(addr);
3259 #endif
3260 #if MACH_ASSERT
3261 if (!from_zone_map(addr, size))
3262 panic("zone_page_collectable");
3263 #endif
3264
3265 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
3266 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
3267
3268 for (; i <= j; i++) {
3269 zp = zone_page_table_lookup(i);
3270 if (zp->collect_count == zp->alloc_count)
3271 return (TRUE);
3272 }
3273
3274 return (FALSE);
3275 }
3276
3277 void
3278 zone_page_keep(
3279 vm_offset_t addr,
3280 vm_size_t size)
3281 {
3282 struct zone_page_table_entry *zp;
3283 zone_page_index_t i, j;
3284
3285 #if ZONE_ALIAS_ADDR
3286 addr = zone_virtual_addr(addr);
3287 #endif
3288 #if MACH_ASSERT
3289 if (!from_zone_map(addr, size))
3290 panic("zone_page_keep");
3291 #endif
3292
3293 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
3294 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
3295
3296 for (; i <= j; i++) {
3297 zp = zone_page_table_lookup(i);
3298 zp->collect_count = 0;
3299 }
3300 }
3301
3302 void
3303 zone_page_collect(
3304 vm_offset_t addr,
3305 vm_size_t size)
3306 {
3307 struct zone_page_table_entry *zp;
3308 zone_page_index_t i, j;
3309
3310 #if ZONE_ALIAS_ADDR
3311 addr = zone_virtual_addr(addr);
3312 #endif
3313 #if MACH_ASSERT
3314 if (!from_zone_map(addr, size))
3315 panic("zone_page_collect");
3316 #endif
3317
3318 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
3319 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
3320
3321 for (; i <= j; i++) {
3322 zp = zone_page_table_lookup(i);
3323 ++zp->collect_count;
3324 }
3325 }
3326
3327 void
3328 zone_page_init(
3329 vm_offset_t addr,
3330 vm_size_t size)
3331 {
3332 struct zone_page_table_entry *zp;
3333 zone_page_index_t i, j;
3334
3335 #if ZONE_ALIAS_ADDR
3336 addr = zone_virtual_addr(addr);
3337 #endif
3338 #if MACH_ASSERT
3339 if (!from_zone_map(addr, size))
3340 panic("zone_page_init");
3341 #endif
3342
3343 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
3344 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
3345
3346 for (; i <= j; i++) {
3347 /* make sure entry exists before marking unused */
3348 zone_page_table_expand(i);
3349
3350 zp = zone_page_table_lookup(i);
3351 assert(zp);
3352 zp->alloc_count = ZONE_PAGE_UNUSED;
3353 zp->collect_count = 0;
3354 }
3355 }
3356
3357 void
3358 zone_page_alloc(
3359 vm_offset_t addr,
3360 vm_size_t size)
3361 {
3362 struct zone_page_table_entry *zp;
3363 zone_page_index_t i, j;
3364
3365 #if ZONE_ALIAS_ADDR
3366 addr = zone_virtual_addr(addr);
3367 #endif
3368 #if MACH_ASSERT
3369 if (!from_zone_map(addr, size))
3370 panic("zone_page_alloc");
3371 #endif
3372
3373 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
3374 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
3375
3376 for (; i <= j; i++) {
3377 zp = zone_page_table_lookup(i);
3378 assert(zp);
3379
3380 /*
3381 * Set alloc_count to ZONE_PAGE_USED if
3382 * it was previously set to ZONE_PAGE_UNUSED.
3383 */
3384 if (zp->alloc_count == ZONE_PAGE_UNUSED)
3385 zp->alloc_count = ZONE_PAGE_USED;
3386
3387 ++zp->alloc_count;
3388 }
3389 }
3390
3391 void
3392 zone_page_free_element(
3393 zone_page_index_t *free_page_head,
3394 zone_page_index_t *free_page_tail,
3395 vm_offset_t addr,
3396 vm_size_t size)
3397 {
3398 struct zone_page_table_entry *zp;
3399 zone_page_index_t i, j;
3400
3401 #if ZONE_ALIAS_ADDR
3402 addr = zone_virtual_addr(addr);
3403 #endif
3404 #if MACH_ASSERT
3405 if (!from_zone_map(addr, size))
3406 panic("zone_page_free_element");
3407 #endif
3408
3409 /* Clear out the old next and backup pointers */
3410 vm_offset_t *primary = (vm_offset_t *) addr;
3411 vm_offset_t *backup = get_backup_ptr(size, primary);
3412
3413 *primary = ZP_POISON;
3414 *backup = ZP_POISON;
3415
3416 i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
3417 j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
3418
3419 for (; i <= j; i++) {
3420 zp = zone_page_table_lookup(i);
3421
3422 if (zp->collect_count > 0)
3423 --zp->collect_count;
3424 if (--zp->alloc_count == 0) {
3425 vm_address_t free_page_address;
3426 vm_address_t prev_free_page_address;
3427
3428 zp->alloc_count = ZONE_PAGE_UNUSED;
3429 zp->collect_count = 0;
3430
3431
3432 /*
3433 * This element was the last one on this page, re-use the page's
3434 * storage for a page freelist
3435 */
3436 free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)i);
3437 *(zone_page_index_t *)free_page_address = ZONE_PAGE_INDEX_INVALID;
3438
3439 if (*free_page_head == ZONE_PAGE_INDEX_INVALID) {
3440 *free_page_head = i;
3441 *free_page_tail = i;
3442 } else {
3443 prev_free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)(*free_page_tail));
3444 *(zone_page_index_t *)prev_free_page_address = i;
3445 *free_page_tail = i;
3446 }
3447 }
3448 }
3449 }
3450
3451
3452 #define ZONEGC_SMALL_ELEMENT_SIZE 4096
3453
3454 struct {
3455 uint64_t zgc_invoked;
3456 uint64_t zgc_bailed;
3457 uint32_t pgs_freed;
3458
3459 uint32_t elems_collected,
3460 elems_freed,
3461 elems_kept;
3462 } zgc_stats;
3463
3464 /* Zone garbage collection
3465 *
3466 * zone_gc will walk through all the free elements in all the
3467 * zones that are marked collectable looking for reclaimable
3468 * pages. zone_gc is called by consider_zone_gc when the system
3469 * begins to run out of memory.
3470 */
3471 void
3472 zone_gc(boolean_t all_zones)
3473 {
3474 unsigned int max_zones;
3475 zone_t z;
3476 unsigned int i;
3477 uint32_t old_pgs_freed;
3478 zone_page_index_t zone_free_page_head;
3479 zone_page_index_t zone_free_page_tail;
3480 thread_t mythread = current_thread();
3481
3482 lck_mtx_lock(&zone_gc_lock);
3483
3484 zgc_stats.zgc_invoked++;
3485 old_pgs_freed = zgc_stats.pgs_freed;
3486
3487 simple_lock(&all_zones_lock);
3488 max_zones = num_zones;
3489 z = first_zone;
3490 simple_unlock(&all_zones_lock);
3491
3492 if (zalloc_debug & ZALLOC_DEBUG_ZONEGC)
3493 kprintf("zone_gc(all_zones=%s) starting...\n", all_zones ? "TRUE" : "FALSE");
3494
3495 /*
3496 * it's ok to allow eager kernel preemption while
3497 * while holding a zone lock since it's taken
3498 * as a spin lock (which prevents preemption)
3499 */
3500 thread_set_eager_preempt(mythread);
3501
3502 #if MACH_ASSERT
3503 for (i = 0; i < zone_pages; i++) {
3504 struct zone_page_table_entry *zp;
3505
3506 zp = zone_page_table_lookup(i);
3507 assert(!zp || (zp->collect_count == 0));
3508 }
3509 #endif /* MACH_ASSERT */
3510
3511 for (i = 0; i < max_zones; i++, z = z->next_zone) {
3512 unsigned int n, m;
3513 vm_size_t elt_size, size_freed;
3514 struct zone_free_element *elt, *base_elt, *base_prev, *prev, *scan, *keep, *tail;
3515 int kmem_frees = 0, total_freed_pages = 0;
3516 struct zone_page_metadata *page_meta;
3517 queue_head_t page_meta_head;
3518
3519 assert(z != ZONE_NULL);
3520
3521 if (!z->collectable)
3522 continue;
3523
3524 if (all_zones == FALSE && z->elem_size < ZONEGC_SMALL_ELEMENT_SIZE && !z->use_page_list)
3525 continue;
3526
3527 lock_zone(z);
3528
3529 elt_size = z->elem_size;
3530
3531 /*
3532 * Do a quick feasibility check before we scan the zone:
3533 * skip unless there is likelihood of getting pages back
3534 * (i.e we need a whole allocation block's worth of free
3535 * elements before we can garbage collect) and
3536 * the zone has more than 10 percent of it's elements free
3537 * or the element size is a multiple of the PAGE_SIZE
3538 */
3539 if ((elt_size & PAGE_MASK) &&
3540 !z->use_page_list &&
3541 (((z->cur_size - z->count * elt_size) <= (2 * z->alloc_size)) ||
3542 ((z->cur_size - z->count * elt_size) <= (z->cur_size / 10)))) {
3543 unlock_zone(z);
3544 continue;
3545 }
3546
3547 z->doing_gc = TRUE;
3548
3549 /*
3550 * Snatch all of the free elements away from the zone.
3551 */
3552
3553 if (z->use_page_list) {
3554 queue_new_head(&z->pages.all_free, &page_meta_head, struct zone_page_metadata *, pages);
3555 queue_init(&z->pages.all_free);
3556 } else {
3557 scan = (void *)z->free_elements;
3558 z->free_elements = 0;
3559 }
3560
3561 unlock_zone(z);
3562
3563 if (z->use_page_list) {
3564 /*
3565 * For zones that maintain page lists (which in turn
3566 * track free elements on those pages), zone_gc()
3567 * is incredibly easy, and we bypass all the logic
3568 * for scanning elements and mapping them to
3569 * collectable pages
3570 */
3571
3572 size_freed = 0;
3573
3574 queue_iterate(&page_meta_head, page_meta, struct zone_page_metadata *, pages) {
3575 assert(from_zone_map((vm_address_t)page_meta, sizeof(*page_meta))); /* foreign elements should be in any_free_foreign */
3576
3577 zgc_stats.elems_freed += page_meta->free_count;
3578 size_freed += elt_size * page_meta->free_count;
3579 zgc_stats.elems_collected += page_meta->free_count;
3580 }
3581
3582 lock_zone(z);
3583
3584 if (size_freed > 0) {
3585 z->cur_size -= size_freed;
3586 z->countfree -= size_freed/elt_size;
3587 }
3588
3589 z->doing_gc = FALSE;
3590 if (z->waiting) {
3591 z->waiting = FALSE;
3592 zone_wakeup(z);
3593 }
3594
3595 unlock_zone(z);
3596
3597 if (queue_empty(&page_meta_head))
3598 continue;
3599
3600 thread_clear_eager_preempt(mythread);
3601
3602 while ((page_meta = (struct zone_page_metadata *)dequeue_head(&page_meta_head)) != NULL) {
3603 vm_address_t free_page_address;
3604
3605 free_page_address = trunc_page((vm_address_t)page_meta);
3606 #if ZONE_ALIAS_ADDR
3607 free_page_address = zone_virtual_addr(free_page_address);
3608 #endif
3609 kmem_free(zone_map, free_page_address, PAGE_SIZE);
3610 ZONE_PAGE_COUNT_DECR(z, 1);
3611 total_freed_pages++;
3612 zgc_stats.pgs_freed += 1;
3613
3614 if (++kmem_frees == 32) {
3615 thread_yield_internal(1);
3616 kmem_frees = 0;
3617 }
3618 }
3619
3620 if (zalloc_debug & ZALLOC_DEBUG_ZONEGC)
3621 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages);
3622
3623 thread_set_eager_preempt(mythread);
3624 continue; /* go to next zone */
3625 }
3626
3627 /*
3628 * Pass 1:
3629 *
3630 * Determine which elements we can attempt to collect
3631 * and count them up in the page table. Foreign elements
3632 * are returned to the zone.
3633 */
3634
3635 prev = (void *)&scan;
3636 elt = scan;
3637 n = 0; tail = keep = NULL;
3638
3639 zone_free_page_head = ZONE_PAGE_INDEX_INVALID;
3640 zone_free_page_tail = ZONE_PAGE_INDEX_INVALID;
3641
3642
3643 while (elt != NULL) {
3644 if (from_zone_map(elt, elt_size)) {
3645 zone_page_collect((vm_offset_t)elt, elt_size);
3646
3647 prev = elt;
3648 elt = elt->next;
3649
3650 ++zgc_stats.elems_collected;
3651 }
3652 else {
3653 if (keep == NULL)
3654 keep = tail = elt;
3655 else {
3656 append_zone_element(z, tail, elt);
3657 tail = elt;
3658 }
3659
3660 append_zone_element(z, prev, elt->next);
3661 elt = elt->next;
3662 append_zone_element(z, tail, NULL);
3663 }
3664
3665 /*
3666 * Dribble back the elements we are keeping.
3667 * If there are none, give some elements that we haven't looked at yet
3668 * back to the freelist so that others waiting on the zone don't get stuck
3669 * for too long. This might prevent us from recovering some memory,
3670 * but allows us to avoid having to allocate new memory to serve requests
3671 * while zone_gc has all the free memory tied up.
3672 * <rdar://problem/3893406>
3673 */
3674
3675 if (++n >= 50) {
3676 if (z->waiting == TRUE) {
3677 /* z->waiting checked without lock held, rechecked below after locking */
3678 lock_zone(z);
3679
3680 if (keep != NULL) {
3681 add_list_to_zone(z, keep, tail);
3682 tail = keep = NULL;
3683 } else {
3684 m =0;
3685 base_elt = elt;
3686 base_prev = prev;
3687 while ((elt != NULL) && (++m < 50)) {
3688 prev = elt;
3689 elt = elt->next;
3690 }
3691 if (m !=0 ) {
3692 /* Extract the elements from the list and
3693 * give them back */
3694 append_zone_element(z, prev, NULL);
3695 add_list_to_zone(z, base_elt, prev);
3696 append_zone_element(z, base_prev, elt);
3697 prev = base_prev;
3698 }
3699 }
3700
3701 if (z->waiting) {
3702 z->waiting = FALSE;
3703 zone_wakeup(z);
3704 }
3705
3706 unlock_zone(z);
3707 }
3708 n =0;
3709 }
3710 }
3711
3712 /*
3713 * Return any remaining elements.
3714 */
3715
3716 if (keep != NULL) {
3717 lock_zone(z);
3718
3719 add_list_to_zone(z, keep, tail);
3720
3721 if (z->waiting) {
3722 z->waiting = FALSE;
3723 zone_wakeup(z);
3724 }
3725
3726 unlock_zone(z);
3727 }
3728
3729 /*
3730 * Pass 2:
3731 *
3732 * Determine which pages we can reclaim and
3733 * free those elements.
3734 */
3735
3736 size_freed = 0;
3737 elt = scan;
3738 n = 0; tail = keep = NULL;
3739
3740 while (elt != NULL) {
3741 if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
3742 struct zone_free_element *next_elt = elt->next;
3743
3744 size_freed += elt_size;
3745
3746 /*
3747 * If this is the last allocation on the page(s),
3748 * we may use their storage to maintain the linked
3749 * list of free-able pages. So store elt->next because
3750 * "elt" may be scribbled over.
3751 */
3752 zone_page_free_element(&zone_free_page_head, &zone_free_page_tail, (vm_offset_t)elt, elt_size);
3753
3754 elt = next_elt;
3755
3756 ++zgc_stats.elems_freed;
3757 }
3758 else {
3759 zone_page_keep((vm_offset_t)elt, elt_size);
3760
3761 if (keep == NULL)
3762 keep = tail = elt;
3763 else {
3764 append_zone_element(z, tail, elt);
3765 tail = elt;
3766 }
3767
3768 elt = elt->next;
3769 append_zone_element(z, tail, NULL);
3770
3771 ++zgc_stats.elems_kept;
3772 }
3773
3774 /*
3775 * Dribble back the elements we are keeping,
3776 * and update the zone size info.
3777 */
3778
3779 if (++n >= 50) {
3780 lock_zone(z);
3781
3782 z->cur_size -= size_freed;
3783 z->countfree -= size_freed/elt_size;
3784 size_freed = 0;
3785
3786 if (keep != NULL) {
3787 add_list_to_zone(z, keep, tail);
3788 }
3789
3790 if (z->waiting) {
3791 z->waiting = FALSE;
3792 zone_wakeup(z);
3793 }
3794
3795 unlock_zone(z);
3796
3797 n = 0; tail = keep = NULL;
3798 }
3799 }
3800
3801 /*
3802 * Return any remaining elements, and update
3803 * the zone size info.
3804 */
3805
3806 lock_zone(z);
3807
3808 if (size_freed > 0 || keep != NULL) {
3809
3810 z->cur_size -= size_freed;
3811 z->countfree -= size_freed/elt_size;
3812
3813 if (keep != NULL) {
3814 add_list_to_zone(z, keep, tail);
3815 }
3816
3817 }
3818
3819 z->doing_gc = FALSE;
3820 if (z->waiting) {
3821 z->waiting = FALSE;
3822 zone_wakeup(z);
3823 }
3824 unlock_zone(z);
3825
3826 if (zone_free_page_head == ZONE_PAGE_INDEX_INVALID)
3827 continue;
3828
3829 /*
3830 * we don't want to allow eager kernel preemption while holding the
3831 * various locks taken in the kmem_free path of execution
3832 */
3833 thread_clear_eager_preempt(mythread);
3834
3835
3836 /*
3837 * This loop counts the number of pages that should be freed by the
3838 * next loop that tries to coalesce the kmem_frees()
3839 */
3840 uint32_t pages_to_free_count = 0;
3841 vm_address_t fpa;
3842 zone_page_index_t index;
3843 for (index = zone_free_page_head; index != ZONE_PAGE_INDEX_INVALID;) {
3844 pages_to_free_count++;
3845 fpa = zone_map_min_address + PAGE_SIZE * ((vm_size_t)index);
3846 index = *(zone_page_index_t *)fpa;
3847 }
3848
3849 /*
3850 * Reclaim the pages we are freeing.
3851 */
3852 while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
3853 zone_page_index_t zind = zone_free_page_head;
3854 vm_address_t free_page_address;
3855 int page_count;
3856
3857 /*
3858 * Use the first word of the page about to be freed to find the next free page
3859 */
3860 free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)zind);
3861 zone_free_page_head = *(zone_page_index_t *)free_page_address;
3862
3863 page_count = 1;
3864 total_freed_pages++;
3865
3866 while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
3867 zone_page_index_t next_zind = zone_free_page_head;
3868 vm_address_t next_free_page_address;
3869
3870 next_free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)next_zind);
3871
3872 if (next_free_page_address == (free_page_address - PAGE_SIZE)) {
3873 free_page_address = next_free_page_address;
3874 } else if (next_free_page_address != (free_page_address + (PAGE_SIZE * page_count)))
3875 break;
3876
3877 zone_free_page_head = *(zone_page_index_t *)next_free_page_address;
3878 page_count++;
3879 total_freed_pages++;
3880 }
3881 kmem_free(zone_map, free_page_address, page_count * PAGE_SIZE);
3882 ZONE_PAGE_COUNT_DECR(z, page_count);
3883 zgc_stats.pgs_freed += page_count;
3884 pages_to_free_count -= page_count;
3885
3886 if (++kmem_frees == 32) {
3887 thread_yield_internal(1);
3888 kmem_frees = 0;
3889 }
3890 }
3891
3892 /* Check that we actually free the exact number of pages we were supposed to */
3893 assert(pages_to_free_count == 0);
3894
3895 if (zalloc_debug & ZALLOC_DEBUG_ZONEGC)
3896 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages);
3897
3898 thread_set_eager_preempt(mythread);
3899 }
3900
3901 if (old_pgs_freed == zgc_stats.pgs_freed)
3902 zgc_stats.zgc_bailed++;
3903
3904 thread_clear_eager_preempt(mythread);
3905
3906 lck_mtx_unlock(&zone_gc_lock);
3907
3908 }
3909
3910 extern vm_offset_t kmapoff_kaddr;
3911 extern unsigned int kmapoff_pgcnt;
3912
3913 /*
3914 * consider_zone_gc:
3915 *
3916 * Called by the pageout daemon when the system needs more free pages.
3917 */
3918
3919 void
3920 consider_zone_gc(boolean_t force)
3921 {
3922 boolean_t all_zones = FALSE;
3923
3924 if (kmapoff_kaddr != 0) {
3925 /*
3926 * One-time reclaim of kernel_map resources we allocated in
3927 * early boot.
3928 */
3929 (void) vm_deallocate(kernel_map,
3930 kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64);
3931 kmapoff_kaddr = 0;
3932 }
3933
3934 if (zone_gc_allowed &&
3935 (zone_gc_allowed_by_time_throttle ||
3936 zone_gc_forced ||
3937 force)) {
3938 if (zone_gc_allowed_by_time_throttle == TRUE) {
3939 zone_gc_allowed_by_time_throttle = FALSE;
3940 all_zones = TRUE;
3941 }
3942 zone_gc_forced = FALSE;
3943
3944 zone_gc(all_zones);
3945 }
3946 }
3947
3948 /*
3949 * By default, don't attempt zone GC more frequently
3950 * than once / 1 minutes.
3951 */
3952 void
3953 compute_zone_gc_throttle(void *arg __unused)
3954 {
3955 zone_gc_allowed_by_time_throttle = TRUE;
3956 }
3957
3958
3959 #if CONFIG_TASK_ZONE_INFO
3960
3961 kern_return_t
3962 task_zone_info(
3963 task_t task,
3964 mach_zone_name_array_t *namesp,
3965 mach_msg_type_number_t *namesCntp,
3966 task_zone_info_array_t *infop,
3967 mach_msg_type_number_t *infoCntp)
3968 {
3969 mach_zone_name_t *names;
3970 vm_offset_t names_addr;
3971 vm_size_t names_size;
3972 task_zone_info_t *info;
3973 vm_offset_t info_addr;
3974 vm_size_t info_size;
3975 unsigned int max_zones, i;
3976 zone_t z;
3977 mach_zone_name_t *zn;
3978 task_zone_info_t *zi;
3979 kern_return_t kr;
3980
3981 vm_size_t used;
3982 vm_map_copy_t copy;
3983
3984
3985 if (task == TASK_NULL)
3986 return KERN_INVALID_TASK;
3987
3988 /*
3989 * We assume that zones aren't freed once allocated.
3990 * We won't pick up any zones that are allocated later.
3991 */
3992
3993 simple_lock(&all_zones_lock);
3994 max_zones = (unsigned int)(num_zones + num_fake_zones);
3995 z = first_zone;
3996 simple_unlock(&all_zones_lock);
3997
3998 names_size = round_page(max_zones * sizeof *names);
3999 kr = kmem_alloc_pageable(ipc_kernel_map,
4000 &names_addr, names_size, VM_KERN_MEMORY_IPC);
4001 if (kr != KERN_SUCCESS)
4002 return kr;
4003 names = (mach_zone_name_t *) names_addr;
4004
4005 info_size = round_page(max_zones * sizeof *info);
4006 kr = kmem_alloc_pageable(ipc_kernel_map,
4007 &info_addr, info_size, VM_KERN_MEMORY_IPC);
4008 if (kr != KERN_SUCCESS) {
4009 kmem_free(ipc_kernel_map,
4010 names_addr, names_size);
4011 return kr;
4012 }
4013
4014 info = (task_zone_info_t *) info_addr;
4015
4016 zn = &names[0];
4017 zi = &info[0];
4018
4019 for (i = 0; i < max_zones - num_fake_zones; i++) {
4020 struct zone zcopy;
4021
4022 assert(z != ZONE_NULL);
4023
4024 lock_zone(z);
4025 zcopy = *z;
4026 unlock_zone(z);
4027
4028 simple_lock(&all_zones_lock);
4029 z = z->next_zone;
4030 simple_unlock(&all_zones_lock);
4031
4032 /* assuming here the name data is static */
4033 (void) strncpy(zn->mzn_name, zcopy.zone_name,
4034 sizeof zn->mzn_name);
4035 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
4036
4037 zi->tzi_count = (uint64_t)zcopy.count;
4038 zi->tzi_cur_size = ptoa_64(zcopy.page_count);
4039 zi->tzi_max_size = (uint64_t)zcopy.max_size;
4040 zi->tzi_elem_size = (uint64_t)zcopy.elem_size;
4041 zi->tzi_alloc_size = (uint64_t)zcopy.alloc_size;
4042 zi->tzi_sum_size = zcopy.sum_count * zcopy.elem_size;
4043 zi->tzi_exhaustible = (uint64_t)zcopy.exhaustible;
4044 zi->tzi_collectable = (uint64_t)zcopy.collectable;
4045 zi->tzi_caller_acct = (uint64_t)zcopy.caller_acct;
4046 if (task->tkm_zinfo != NULL) {
4047 zi->tzi_task_alloc = task->tkm_zinfo[zcopy.index].alloc;
4048 zi->tzi_task_free = task->tkm_zinfo[zcopy.index].free;
4049 } else {
4050 zi->tzi_task_alloc = 0;
4051 zi->tzi_task_free = 0;
4052 }
4053 zn++;
4054 zi++;
4055 }
4056
4057 /*
4058 * loop through the fake zones and fill them using the specialized
4059 * functions
4060 */
4061 for (i = 0; i < num_fake_zones; i++) {
4062 int count, collectable, exhaustible, caller_acct, index;
4063 vm_size_t cur_size, max_size, elem_size, alloc_size;
4064 uint64_t sum_size;
4065
4066 strncpy(zn->mzn_name, fake_zones[i].name, sizeof zn->mzn_name);
4067 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
4068 fake_zones[i].query(&count, &cur_size,
4069 &max_size, &elem_size,
4070 &alloc_size, &sum_size,
4071 &collectable, &exhaustible, &caller_acct);
4072 zi->tzi_count = (uint64_t)count;
4073 zi->tzi_cur_size = (uint64_t)cur_size;
4074 zi->tzi_max_size = (uint64_t)max_size;
4075 zi->tzi_elem_size = (uint64_t)elem_size;
4076 zi->tzi_alloc_size = (uint64_t)alloc_size;
4077 zi->tzi_sum_size = sum_size;
4078 zi->tzi_collectable = (uint64_t)collectable;
4079 zi->tzi_exhaustible = (uint64_t)exhaustible;
4080 zi->tzi_caller_acct = (uint64_t)caller_acct;
4081 if (task->tkm_zinfo != NULL) {
4082 index = ZINFO_SLOTS - num_fake_zones + i;
4083 zi->tzi_task_alloc = task->tkm_zinfo[index].alloc;
4084 zi->tzi_task_free = task->tkm_zinfo[index].free;
4085 } else {
4086 zi->tzi_task_alloc = 0;
4087 zi->tzi_task_free = 0;
4088 }
4089 zn++;
4090 zi++;
4091 }
4092
4093 used = max_zones * sizeof *names;
4094 if (used != names_size)
4095 bzero((char *) (names_addr + used), names_size - used);
4096
4097 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
4098 (vm_map_size_t)used, TRUE, &copy);
4099 assert(kr == KERN_SUCCESS);
4100
4101 *namesp = (mach_zone_name_t *) copy;
4102 *namesCntp = max_zones;
4103
4104 used = max_zones * sizeof *info;
4105
4106 if (used != info_size)
4107 bzero((char *) (info_addr + used), info_size - used);
4108
4109 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
4110 (vm_map_size_t)used, TRUE, &copy);
4111 assert(kr == KERN_SUCCESS);
4112
4113 *infop = (task_zone_info_t *) copy;
4114 *infoCntp = max_zones;
4115
4116 return KERN_SUCCESS;
4117 }
4118
4119 #else /* CONFIG_TASK_ZONE_INFO */
4120
4121 kern_return_t
4122 task_zone_info(
4123 __unused task_t task,
4124 __unused mach_zone_name_array_t *namesp,
4125 __unused mach_msg_type_number_t *namesCntp,
4126 __unused task_zone_info_array_t *infop,
4127 __unused mach_msg_type_number_t *infoCntp)
4128 {
4129 return KERN_FAILURE;
4130 }
4131
4132 #endif /* CONFIG_TASK_ZONE_INFO */
4133
4134 kern_return_t
4135 mach_zone_info(
4136 host_priv_t host,
4137 mach_zone_name_array_t *namesp,
4138 mach_msg_type_number_t *namesCntp,
4139 mach_zone_info_array_t *infop,
4140 mach_msg_type_number_t *infoCntp)
4141 {
4142 return (mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL));
4143 }
4144
4145 kern_return_t
4146 mach_memory_info(
4147 host_priv_t host,
4148 mach_zone_name_array_t *namesp,
4149 mach_msg_type_number_t *namesCntp,
4150 mach_zone_info_array_t *infop,
4151 mach_msg_type_number_t *infoCntp,
4152 mach_memory_info_array_t *memoryInfop,
4153 mach_msg_type_number_t *memoryInfoCntp)
4154 {
4155 mach_zone_name_t *names;
4156 vm_offset_t names_addr;
4157 vm_size_t names_size;
4158
4159 mach_zone_info_t *info;
4160 vm_offset_t info_addr;
4161 vm_size_t info_size;
4162
4163 mach_memory_info_t *memory_info;
4164 vm_offset_t memory_info_addr;
4165 vm_size_t memory_info_size;
4166 vm_size_t memory_info_vmsize;
4167 unsigned int num_sites;
4168
4169 unsigned int max_zones, i;
4170 zone_t z;
4171 mach_zone_name_t *zn;
4172 mach_zone_info_t *zi;
4173 kern_return_t kr;
4174
4175 vm_size_t used;
4176 vm_map_copy_t copy;
4177
4178
4179 if (host == HOST_NULL)
4180 return KERN_INVALID_HOST;
4181 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
4182 if (!PE_i_can_has_debugger(NULL))
4183 return KERN_INVALID_HOST;
4184 #endif
4185
4186 /*
4187 * We assume that zones aren't freed once allocated.
4188 * We won't pick up any zones that are allocated later.
4189 */
4190
4191 simple_lock(&all_zones_lock);
4192 max_zones = (unsigned int)(num_zones + num_fake_zones);
4193 z = first_zone;
4194 simple_unlock(&all_zones_lock);
4195
4196 names_size = round_page(max_zones * sizeof *names);
4197 kr = kmem_alloc_pageable(ipc_kernel_map,
4198 &names_addr, names_size, VM_KERN_MEMORY_IPC);
4199 if (kr != KERN_SUCCESS)
4200 return kr;
4201 names = (mach_zone_name_t *) names_addr;
4202
4203 info_size = round_page(max_zones * sizeof *info);
4204 kr = kmem_alloc_pageable(ipc_kernel_map,
4205 &info_addr, info_size, VM_KERN_MEMORY_IPC);
4206 if (kr != KERN_SUCCESS) {
4207 kmem_free(ipc_kernel_map,
4208 names_addr, names_size);
4209 return kr;
4210 }
4211 info = (mach_zone_info_t *) info_addr;
4212
4213 num_sites = 0;
4214 memory_info_addr = 0;
4215 if (memoryInfop && memoryInfoCntp)
4216 {
4217 num_sites = VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT;
4218 memory_info_size = num_sites * sizeof(*info);
4219 memory_info_vmsize = round_page(memory_info_size);
4220 kr = kmem_alloc_pageable(ipc_kernel_map,
4221 &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC);
4222 if (kr != KERN_SUCCESS) {
4223 kmem_free(ipc_kernel_map,
4224 names_addr, names_size);
4225 kmem_free(ipc_kernel_map,
4226 info_addr, info_size);
4227 return kr;
4228 }
4229
4230 kr = vm_map_wire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
4231 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE);
4232 assert(kr == KERN_SUCCESS);
4233
4234 memory_info = (mach_memory_info_t *) memory_info_addr;
4235 vm_page_diagnose(memory_info, num_sites);
4236
4237 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
4238 assert(kr == KERN_SUCCESS);
4239 }
4240
4241 zn = &names[0];
4242 zi = &info[0];
4243
4244 for (i = 0; i < max_zones - num_fake_zones; i++) {
4245 struct zone zcopy;
4246
4247 assert(z != ZONE_NULL);
4248
4249 lock_zone(z);
4250 zcopy = *z;
4251 unlock_zone(z);
4252
4253 simple_lock(&all_zones_lock);
4254 z = z->next_zone;
4255 simple_unlock(&all_zones_lock);
4256
4257 /* assuming here the name data is static */
4258 (void) strncpy(zn->mzn_name, zcopy.zone_name,
4259 sizeof zn->mzn_name);
4260 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
4261
4262 zi->mzi_count = (uint64_t)zcopy.count;
4263 zi->mzi_cur_size = ptoa_64(zcopy.page_count);
4264 zi->mzi_max_size = (uint64_t)zcopy.max_size;
4265 zi->mzi_elem_size = (uint64_t)zcopy.elem_size;
4266 zi->mzi_alloc_size = (uint64_t)zcopy.alloc_size;
4267 zi->mzi_sum_size = zcopy.sum_count * zcopy.elem_size;
4268 zi->mzi_exhaustible = (uint64_t)zcopy.exhaustible;
4269 zi->mzi_collectable = (uint64_t)zcopy.collectable;
4270 zn++;
4271 zi++;
4272 }
4273
4274 /*
4275 * loop through the fake zones and fill them using the specialized
4276 * functions
4277 */
4278 for (i = 0; i < num_fake_zones; i++) {
4279 int count, collectable, exhaustible, caller_acct;
4280 vm_size_t cur_size, max_size, elem_size, alloc_size;
4281 uint64_t sum_size;
4282
4283 strncpy(zn->mzn_name, fake_zones[i].name, sizeof zn->mzn_name);
4284 zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
4285 fake_zones[i].query(&count, &cur_size,
4286 &max_size, &elem_size,
4287 &alloc_size, &sum_size,
4288 &collectable, &exhaustible, &caller_acct);
4289 zi->mzi_count = (uint64_t)count;
4290 zi->mzi_cur_size = (uint64_t)cur_size;
4291 zi->mzi_max_size = (uint64_t)max_size;
4292 zi->mzi_elem_size = (uint64_t)elem_size;
4293 zi->mzi_alloc_size = (uint64_t)alloc_size;
4294 zi->mzi_sum_size = sum_size;
4295 zi->mzi_collectable = (uint64_t)collectable;
4296 zi->mzi_exhaustible = (uint64_t)exhaustible;
4297
4298 zn++;
4299 zi++;
4300 }
4301
4302 used = max_zones * sizeof *names;
4303 if (used != names_size)
4304 bzero((char *) (names_addr + used), names_size - used);
4305
4306 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
4307 (vm_map_size_t)used, TRUE, &copy);
4308 assert(kr == KERN_SUCCESS);
4309
4310 *namesp = (mach_zone_name_t *) copy;
4311 *namesCntp = max_zones;
4312
4313 used = max_zones * sizeof *info;
4314
4315 if (used != info_size)
4316 bzero((char *) (info_addr + used), info_size - used);
4317
4318 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
4319 (vm_map_size_t)used, TRUE, &copy);
4320 assert(kr == KERN_SUCCESS);
4321
4322 *infop = (mach_zone_info_t *) copy;
4323 *infoCntp = max_zones;
4324
4325 if (memoryInfop && memoryInfoCntp)
4326 {
4327 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
4328 (vm_map_size_t)memory_info_size, TRUE, &copy);
4329 assert(kr == KERN_SUCCESS);
4330
4331 *memoryInfop = (mach_memory_info_t *) copy;
4332 *memoryInfoCntp = num_sites;
4333 }
4334
4335 return KERN_SUCCESS;
4336 }
4337
4338 /*
4339 * host_zone_info - LEGACY user interface for Mach zone information
4340 * Should use mach_zone_info() instead!
4341 */
4342 kern_return_t
4343 host_zone_info(
4344 host_priv_t host,
4345 zone_name_array_t *namesp,
4346 mach_msg_type_number_t *namesCntp,
4347 zone_info_array_t *infop,
4348 mach_msg_type_number_t *infoCntp)
4349 {
4350 zone_name_t *names;
4351 vm_offset_t names_addr;
4352 vm_size_t names_size;
4353 zone_info_t *info;
4354 vm_offset_t info_addr;
4355 vm_size_t info_size;
4356 unsigned int max_zones, i;
4357 zone_t z;
4358 zone_name_t *zn;
4359 zone_info_t *zi;
4360 kern_return_t kr;
4361
4362 vm_size_t used;
4363 vm_map_copy_t copy;
4364
4365
4366 if (host == HOST_NULL)
4367 return KERN_INVALID_HOST;
4368 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
4369 if (!PE_i_can_has_debugger(NULL))
4370 return KERN_INVALID_HOST;
4371 #endif
4372
4373 #if defined(__LP64__)
4374 if (!thread_is_64bit(current_thread()))
4375 return KERN_NOT_SUPPORTED;
4376 #else
4377 if (thread_is_64bit(current_thread()))
4378 return KERN_NOT_SUPPORTED;
4379 #endif
4380
4381 /*
4382 * We assume that zones aren't freed once allocated.
4383 * We won't pick up any zones that are allocated later.
4384 */
4385
4386 simple_lock(&all_zones_lock);
4387 max_zones = (unsigned int)(num_zones + num_fake_zones);
4388 z = first_zone;
4389 simple_unlock(&all_zones_lock);
4390
4391 names_size = round_page(max_zones * sizeof *names);
4392 kr = kmem_alloc_pageable(ipc_kernel_map,
4393 &names_addr, names_size, VM_KERN_MEMORY_IPC);
4394 if (kr != KERN_SUCCESS)
4395 return kr;
4396 names = (zone_name_t *) names_addr;
4397
4398 info_size = round_page(max_zones * sizeof *info);
4399 kr = kmem_alloc_pageable(ipc_kernel_map,
4400 &info_addr, info_size, VM_KERN_MEMORY_IPC);
4401 if (kr != KERN_SUCCESS) {
4402 kmem_free(ipc_kernel_map,
4403 names_addr, names_size);
4404 return kr;
4405 }
4406
4407 info = (zone_info_t *) info_addr;
4408
4409 zn = &names[0];
4410 zi = &info[0];
4411
4412 for (i = 0; i < max_zones - num_fake_zones; i++) {
4413 struct zone zcopy;
4414
4415 assert(z != ZONE_NULL);
4416
4417 lock_zone(z);
4418 zcopy = *z;
4419 unlock_zone(z);
4420
4421 simple_lock(&all_zones_lock);
4422 z = z->next_zone;
4423 simple_unlock(&all_zones_lock);
4424
4425 /* assuming here the name data is static */
4426 (void) strncpy(zn->zn_name, zcopy.zone_name,
4427 sizeof zn->zn_name);
4428 zn->zn_name[sizeof zn->zn_name - 1] = '\0';
4429
4430 zi->zi_count = zcopy.count;
4431 zi->zi_cur_size = ptoa(zcopy.page_count);
4432 zi->zi_max_size = zcopy.max_size;
4433 zi->zi_elem_size = zcopy.elem_size;
4434 zi->zi_alloc_size = zcopy.alloc_size;
4435 zi->zi_exhaustible = zcopy.exhaustible;
4436 zi->zi_collectable = zcopy.collectable;
4437
4438 zn++;
4439 zi++;
4440 }
4441
4442 /*
4443 * loop through the fake zones and fill them using the specialized
4444 * functions
4445 */
4446 for (i = 0; i < num_fake_zones; i++) {
4447 int caller_acct;
4448 uint64_t sum_space;
4449 strncpy(zn->zn_name, fake_zones[i].name, sizeof zn->zn_name);
4450 zn->zn_name[sizeof zn->zn_name - 1] = '\0';
4451 fake_zones[i].query(&zi->zi_count, &zi->zi_cur_size,
4452 &zi->zi_max_size, &zi->zi_elem_size,
4453 &zi->zi_alloc_size, &sum_space,
4454 &zi->zi_collectable, &zi->zi_exhaustible, &caller_acct);
4455 zn++;
4456 zi++;
4457 }
4458
4459 used = max_zones * sizeof *names;
4460 if (used != names_size)
4461 bzero((char *) (names_addr + used), names_size - used);
4462
4463 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
4464 (vm_map_size_t)used, TRUE, &copy);
4465 assert(kr == KERN_SUCCESS);
4466
4467 *namesp = (zone_name_t *) copy;
4468 *namesCntp = max_zones;
4469
4470 used = max_zones * sizeof *info;
4471 if (used != info_size)
4472 bzero((char *) (info_addr + used), info_size - used);
4473
4474 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
4475 (vm_map_size_t)used, TRUE, &copy);
4476 assert(kr == KERN_SUCCESS);
4477
4478 *infop = (zone_info_t *) copy;
4479 *infoCntp = max_zones;
4480
4481 return KERN_SUCCESS;
4482 }
4483
4484 kern_return_t
4485 mach_zone_force_gc(
4486 host_t host)
4487 {
4488
4489 if (host == HOST_NULL)
4490 return KERN_INVALID_HOST;
4491
4492 consider_zone_gc(TRUE);
4493
4494 return (KERN_SUCCESS);
4495 }
4496
4497 extern unsigned int stack_total;
4498 extern unsigned long long stack_allocs;
4499
4500 #if defined(__i386__) || defined (__x86_64__)
4501 extern unsigned int inuse_ptepages_count;
4502 extern long long alloc_ptepages_count;
4503 #endif
4504
4505 void zone_display_zprint()
4506 {
4507 unsigned int i;
4508 zone_t the_zone;
4509
4510 if(first_zone!=NULL) {
4511 the_zone = first_zone;
4512 for (i = 0; i < num_zones; i++) {
4513 if(the_zone->cur_size > (1024*1024)) {
4514 printf("%.20s:\t%lu\n",the_zone->zone_name,(uintptr_t)the_zone->cur_size);
4515 }
4516
4517 if(the_zone->next_zone == NULL) {
4518 break;
4519 }
4520
4521 the_zone = the_zone->next_zone;
4522 }
4523 }
4524
4525 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size * stack_total));
4526
4527 #if defined(__i386__) || defined (__x86_64__)
4528 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count));
4529 #endif
4530
4531 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total);
4532 }
4533
4534 zone_t
4535 zone_find_largest(void)
4536 {
4537 unsigned int i;
4538 unsigned int max_zones;
4539 zone_t the_zone;
4540 zone_t zone_largest;
4541
4542 simple_lock(&all_zones_lock);
4543 the_zone = first_zone;
4544 max_zones = num_zones;
4545 simple_unlock(&all_zones_lock);
4546
4547 zone_largest = the_zone;
4548 for (i = 0; i < max_zones; i++) {
4549 if (the_zone->cur_size > zone_largest->cur_size) {
4550 zone_largest = the_zone;
4551 }
4552
4553 if (the_zone->next_zone == NULL) {
4554 break;
4555 }
4556
4557 the_zone = the_zone->next_zone;
4558 }
4559 return zone_largest;
4560 }
4561
4562 #if ZONE_DEBUG
4563
4564 /* should we care about locks here ? */
4565
4566 #define zone_in_use(z) ( z->count || z->free_elements \
4567 || !queue_empty(&z->pages.all_free) \
4568 || !queue_empty(&z->pages.intermediate) \
4569 || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign)))
4570
4571 void
4572 zone_debug_enable(
4573 zone_t z)
4574 {
4575 if (zone_debug_enabled(z) || zone_in_use(z) ||
4576 z->alloc_size < (z->elem_size + ZONE_DEBUG_OFFSET))
4577 return;
4578 queue_init(&z->active_zones);
4579 z->elem_size += ZONE_DEBUG_OFFSET;
4580 }
4581
4582 void
4583 zone_debug_disable(
4584 zone_t z)
4585 {
4586 if (!zone_debug_enabled(z) || zone_in_use(z))
4587 return;
4588 z->elem_size -= ZONE_DEBUG_OFFSET;
4589 z->active_zones.next = z->active_zones.prev = NULL;
4590 }
4591
4592
4593 #endif /* ZONE_DEBUG */