2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
65 #include <zone_debug.h>
66 #include <zone_alias_addr.h>
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach_debug/zone_info.h>
75 #include <mach/vm_map.h>
77 #include <kern/kern_types.h>
78 #include <kern/assert.h>
79 #include <kern/host.h>
80 #include <kern/macro_help.h>
81 #include <kern/sched.h>
82 #include <kern/locks.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <kern/thread_call.h>
86 #include <kern/zalloc.h>
87 #include <kern/kalloc.h>
88 #include <kern/btlog.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
95 #include <pexpert/pexpert.h>
97 #include <machine/machparam.h>
98 #include <machine/machine_routines.h> /* ml_cpu_get_info */
100 #include <libkern/OSDebug.h>
101 #include <libkern/OSAtomic.h>
102 #include <sys/kdebug.h>
107 * With this option enabled, zones with alloc_size <= PAGE_SIZE allocate
108 * a virtual page from the zone_map, but before zcram-ing the allocated memory
109 * into the zone, the page is translated to use the alias address of the page
110 * in the static kernel region. zone_gc reverses that translation when
111 * scanning the freelist to collect free pages so that it can look up the page
112 * in the zone_page_table, and free it to kmem_free.
114 * The static kernel region is a flat 1:1 mapping of physical memory passed
115 * to xnu by the booter. It is mapped to the range:
116 * [gVirtBase, gVirtBase + gPhysSize]
118 * Accessing memory via the static kernel region is faster due to the
119 * entire region being mapped via large pages, cutting down
122 * zinit favors using PAGE_SIZE backing allocations for a zone unless it would
123 * waste more than 10% space to use a single page, in order to take advantage
124 * of the speed benefit for as many zones as possible.
126 * Zones with > PAGE_SIZE allocations can't take advantage of this
127 * because kernel_memory_allocate doesn't give out physically contiguous pages.
129 * zone_virtual_addr()
130 * - translates an address from the static kernel region to the zone_map
131 * - returns the same address if it's not from the static kernel region
132 * It relies on the fact that a physical page mapped to the
133 * zone_map is not mapped anywhere else (except the static kernel region).
136 * - translates a virtual memory address from the zone_map to the
137 * corresponding address in the static kernel region
142 #define from_zone_map(addr, size) \
143 ((vm_offset_t)(addr) >= zone_map_min_address && \
144 ((vm_offset_t)(addr) + size - 1) < zone_map_max_address )
146 #define from_zone_map(addr, size) \
147 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) >= zone_map_min_address && \
148 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) + size -1) < zone_map_max_address )
152 * Zone Corruption Debugging
154 * We use three techniques to detect modification of a zone element
155 * after it's been freed.
157 * (1) Check the freelist next pointer for sanity.
158 * (2) Store a backup of the next pointer at the end of the element,
159 * and compare it to the primary next pointer when the element is allocated
160 * to detect corruption of the freelist due to use-after-free bugs.
161 * The backup pointer is also XORed with a per-boot random cookie.
162 * (3) Poison the freed element by overwriting it with 0xdeadbeef,
163 * and check for that value when the element is being reused to make sure
164 * no part of the element has been modified while it was on the freelist.
165 * This will also help catch read-after-frees, as code will now dereference
166 * 0xdeadbeef instead of a valid but freed pointer.
168 * (1) and (2) occur for every allocation and free to a zone.
169 * This is done to make it slightly more difficult for an attacker to
170 * manipulate the freelist to behave in a specific way.
172 * Poisoning (3) occurs periodically for every N frees (counted per-zone)
173 * and on every free for zones smaller than a cacheline. If -zp
174 * is passed as a boot arg, poisoning occurs for every free.
176 * Performance slowdown is inversely proportional to the frequency of poisoning,
177 * with a 4-5% hit around N=1, down to ~0.3% at N=16 and just "noise" at N=32
178 * and higher. You can expect to find a 100% reproducible bug in an average of
179 * N tries, with a standard deviation of about N, but you will want to set
180 * "-zp" to always poison every free if you are attempting to reproduce
183 * For a more heavyweight, but finer-grained method of detecting misuse
184 * of zone memory, look up the "Guard mode" zone allocator in gzalloc.c.
186 * Zone Corruption Logging
188 * You can also track where corruptions come from by using the boot-arguments
189 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later
190 * in this document for more implementation and usage information.
192 * Zone Leak Detection
194 * To debug leaks of zone memory, use the zone leak detection tool 'zleaks'
195 * found later in this file via the showtopztrace and showz* macros in kgmacros,
196 * or use zlog without the -zc argument.
200 /* Returns TRUE if we rolled over the counter at factor */
201 static inline boolean_t
202 sample_counter(volatile uint32_t * count_p
, uint32_t factor
)
204 uint32_t old_count
, new_count
;
205 boolean_t rolled_over
;
208 new_count
= old_count
= *count_p
;
210 if (++new_count
>= factor
) {
217 } while (!OSCompareAndSwap(old_count
, new_count
, count_p
));
222 #if defined(__LP64__)
223 #define ZP_POISON 0xdeadbeefdeadbeef
225 #define ZP_POISON 0xdeadbeef
228 #define ZP_DEFAULT_SAMPLING_FACTOR 16
229 #define ZP_DEFAULT_SCALE_FACTOR 4
232 * A zp_factor of 0 indicates zone poisoning is disabled,
233 * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline).
234 * Passing the -no-zp boot-arg disables even this behavior.
235 * In all cases, we record and check the integrity of a backup pointer.
238 /* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */
239 uint32_t zp_factor
= 0;
241 /* set by zp-scale=N boot arg, scales zp_factor by zone size */
242 uint32_t zp_scale
= 0;
244 /* set in zp_init, zero indicates -no-zp boot-arg */
245 vm_size_t zp_tiny_zone_limit
= 0;
247 /* initialized to a per-boot random value in zp_init */
248 uintptr_t zp_poisoned_cookie
= 0;
249 uintptr_t zp_nopoison_cookie
= 0;
253 * initialize zone poisoning
254 * called from zone_bootstrap before any allocations are made from zalloc
262 * Initialize backup pointer random cookie for poisoned elements
263 * Try not to call early_random() back to back, it may return
264 * the same value if mach_absolute_time doesn't have sufficient time
265 * to tick over between calls. <rdar://problem/11597395>
266 * (This is only a problem on embedded devices)
268 zp_poisoned_cookie
= (uintptr_t) early_random();
271 * Always poison zones smaller than a cacheline,
272 * because it's pretty close to free
274 ml_cpu_info_t cpu_info
;
275 ml_cpu_get_info(&cpu_info
);
276 zp_tiny_zone_limit
= (vm_size_t
) cpu_info
.cache_line_size
;
278 zp_factor
= ZP_DEFAULT_SAMPLING_FACTOR
;
279 zp_scale
= ZP_DEFAULT_SCALE_FACTOR
;
281 //TODO: Bigger permutation?
283 * Permute the default factor +/- 1 to make it less predictable
284 * This adds or subtracts ~4 poisoned objects per 1000 frees.
286 if (zp_factor
!= 0) {
287 uint32_t rand_bits
= early_random() & 0x3;
289 if (rand_bits
== 0x1)
291 else if (rand_bits
== 0x2)
293 /* if 0x0 or 0x3, leave it alone */
296 /* -zp: enable poisoning for every alloc and free */
297 if (PE_parse_boot_argn("-zp", temp_buf
, sizeof(temp_buf
))) {
301 /* -no-zp: disable poisoning completely even for tiny zones */
302 if (PE_parse_boot_argn("-no-zp", temp_buf
, sizeof(temp_buf
))) {
304 zp_tiny_zone_limit
= 0;
305 printf("Zone poisoning disabled\n");
308 /* zp-factor=XXXX: override how often to poison freed zone elements */
309 if (PE_parse_boot_argn("zp-factor", &zp_factor
, sizeof(zp_factor
))) {
310 printf("Zone poisoning factor override: %u\n", zp_factor
);
313 /* zp-scale=XXXX: override how much zone size scales zp-factor by */
314 if (PE_parse_boot_argn("zp-scale", &zp_scale
, sizeof(zp_scale
))) {
315 printf("Zone poisoning scale factor override: %u\n", zp_scale
);
318 /* Initialize backup pointer random cookie for unpoisoned elements */
319 zp_nopoison_cookie
= (uintptr_t) early_random();
322 if (zp_poisoned_cookie
== zp_nopoison_cookie
)
323 panic("early_random() is broken: %p and %p are not random\n",
324 (void *) zp_poisoned_cookie
, (void *) zp_nopoison_cookie
);
328 * Use the last bit in the backup pointer to hint poisoning state
329 * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so
330 * the low bits are zero.
332 zp_poisoned_cookie
|= (uintptr_t)0x1ULL
;
333 zp_nopoison_cookie
&= ~((uintptr_t)0x1ULL
);
335 #if defined(__LP64__)
337 * Make backup pointers more obvious in GDB for 64 bit
338 * by making OxFFFFFF... ^ cookie = 0xFACADE...
339 * (0xFACADE = 0xFFFFFF ^ 0x053521)
340 * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011)
341 * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked
342 * by the sanity check, so it's OK for that part of the cookie to be predictable.
344 * TODO: Use #defines, xors, and shifts
347 zp_poisoned_cookie
&= 0x000000FFFFFFFFFF;
348 zp_poisoned_cookie
|= 0x0535210000000000; /* 0xFACADE */
350 zp_nopoison_cookie
&= 0x000000FFFFFFFFFF;
351 zp_nopoison_cookie
|= 0x3f00110000000000; /* 0xC0FFEE */
355 /* zone_map page count for page table structure */
356 uint64_t zone_map_table_page_count
= 0;
359 * These macros are used to keep track of the number
360 * of pages being used by the zone currently. The
361 * z->page_count is protected by the zone lock.
363 #define ZONE_PAGE_COUNT_INCR(z, count) \
365 OSAddAtomic64(count, &(z->page_count)); \
368 #define ZONE_PAGE_COUNT_DECR(z, count) \
370 OSAddAtomic64(-count, &(z->page_count)); \
373 /* for is_sane_zone_element and garbage collection */
375 vm_offset_t zone_map_min_address
= 0; /* initialized in zone_init */
376 vm_offset_t zone_map_max_address
= 0;
378 /* Helpful for walking through a zone's free element list. */
379 struct zone_free_element
{
380 struct zone_free_element
*next
;
382 /* void *backup_ptr; */
385 struct zone_page_metadata
{
387 struct zone_free_element
*elements
;
389 uint16_t alloc_count
;
393 /* The backup pointer is stored in the last pointer-sized location in an element. */
394 static inline vm_offset_t
*
395 get_backup_ptr(vm_size_t elem_size
,
396 vm_offset_t
*element
)
398 return (vm_offset_t
*) ((vm_offset_t
)element
+ elem_size
- sizeof(vm_offset_t
));
401 static inline struct zone_page_metadata
*
402 get_zone_page_metadata(struct zone_free_element
*element
)
404 return (struct zone_page_metadata
*)(trunc_page((vm_offset_t
)element
));
408 * Zone checking helper function.
409 * A pointer that satisfies these conditions is OK to be a freelist next pointer
410 * A pointer that doesn't satisfy these conditions indicates corruption
412 static inline boolean_t
413 is_sane_zone_ptr(zone_t zone
,
417 /* Must be aligned to pointer boundary */
418 if (__improbable((addr
& (sizeof(vm_offset_t
) - 1)) != 0))
421 /* Must be a kernel address */
422 if (__improbable(!pmap_kernel_va(addr
)))
425 /* Must be from zone map if the zone only uses memory from the zone_map */
427 * TODO: Remove the zone->collectable check when every
428 * zone using foreign memory is properly tagged with allows_foreign
430 if (zone
->collectable
&& !zone
->allows_foreign
) {
433 * If this address is in the static kernel region, it might be
434 * the alias address of a valid zone element.
435 * If we tried to find the zone_virtual_addr() of an invalid
436 * address in the static kernel region, it will panic, so don't
437 * check addresses in this region.
439 * TODO: Use a safe variant of zone_virtual_addr to
440 * make this check more accurate
442 * The static kernel region is mapped at:
443 * [gVirtBase, gVirtBase + gPhysSize]
445 if ((addr
- gVirtBase
) < gPhysSize
)
448 /* check if addr is from zone map */
449 if (addr
>= zone_map_min_address
&&
450 (addr
+ obj_size
- 1) < zone_map_max_address
)
459 static inline boolean_t
460 is_sane_zone_page_metadata(zone_t zone
,
461 vm_offset_t page_meta
)
463 /* NULL page metadata structures are invalid */
466 return is_sane_zone_ptr(zone
, page_meta
, sizeof(struct zone_page_metadata
));
469 static inline boolean_t
470 is_sane_zone_element(zone_t zone
,
473 /* NULL is OK because it indicates the tail of the list */
476 return is_sane_zone_ptr(zone
, addr
, zone
->elem_size
);
479 /* Someone wrote to freed memory. */
480 static inline void /* noreturn */
481 zone_element_was_modified_panic(zone_t zone
,
484 vm_offset_t expected
,
487 panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p",
491 (void *) (expected
^ found
),
493 (uint32_t) zone
->elem_size
,
495 (void *) zp_nopoison_cookie
,
496 (void *) zp_poisoned_cookie
);
500 * The primary and backup pointers don't match.
501 * Determine which one was likely the corrupted pointer, find out what it
502 * probably should have been, and panic.
503 * I would like to mark this as noreturn, but panic() isn't marked noreturn.
505 static void /* noreturn */
506 backup_ptr_mismatch_panic(zone_t zone
,
511 vm_offset_t likely_backup
;
513 boolean_t sane_backup
;
514 boolean_t sane_primary
= is_sane_zone_element(zone
, primary
);
515 boolean_t element_was_poisoned
= (backup
& 0x1) ? TRUE
: FALSE
;
517 #if defined(__LP64__)
518 /* We can inspect the tag in the upper bits for additional confirmation */
519 if ((backup
& 0xFFFFFF0000000000) == 0xFACADE0000000000)
520 element_was_poisoned
= TRUE
;
521 else if ((backup
& 0xFFFFFF0000000000) == 0xC0FFEE0000000000)
522 element_was_poisoned
= FALSE
;
525 if (element_was_poisoned
) {
526 likely_backup
= backup
^ zp_poisoned_cookie
;
527 sane_backup
= is_sane_zone_element(zone
, likely_backup
);
529 likely_backup
= backup
^ zp_nopoison_cookie
;
530 sane_backup
= is_sane_zone_element(zone
, likely_backup
);
533 /* The primary is definitely the corrupted one */
534 if (!sane_primary
&& sane_backup
)
535 zone_element_was_modified_panic(zone
, element
, primary
, likely_backup
, 0);
537 /* The backup is definitely the corrupted one */
538 if (sane_primary
&& !sane_backup
)
539 zone_element_was_modified_panic(zone
, element
, backup
,
540 (primary
^ (element_was_poisoned
? zp_poisoned_cookie
: zp_nopoison_cookie
)),
541 zone
->elem_size
- sizeof(vm_offset_t
));
544 * Not sure which is the corrupted one.
545 * It's less likely that the backup pointer was overwritten with
546 * ( (sane address) ^ (valid cookie) ), so we'll guess that the
547 * primary pointer has been overwritten with a sane but incorrect address.
549 if (sane_primary
&& sane_backup
)
550 zone_element_was_modified_panic(zone
, element
, primary
, likely_backup
, 0);
552 /* Neither are sane, so just guess. */
553 zone_element_was_modified_panic(zone
, element
, primary
, likely_backup
, 0);
557 * Sets the next element of tail to elem.
559 * Preserves the poisoning state of the element.
562 append_zone_element(zone_t zone
,
563 struct zone_free_element
*tail
,
564 struct zone_free_element
*elem
)
566 vm_offset_t
*backup
= get_backup_ptr(zone
->elem_size
, (vm_offset_t
*) tail
);
568 vm_offset_t old_backup
= *backup
;
570 vm_offset_t old_next
= (vm_offset_t
) tail
->next
;
571 vm_offset_t new_next
= (vm_offset_t
) elem
;
573 if (old_next
== (old_backup
^ zp_nopoison_cookie
))
574 *backup
= new_next
^ zp_nopoison_cookie
;
575 else if (old_next
== (old_backup
^ zp_poisoned_cookie
))
576 *backup
= new_next
^ zp_poisoned_cookie
;
578 backup_ptr_mismatch_panic(zone
,
588 * Insert a linked list of elements (delineated by head and tail) at the head of
589 * the zone free list. Every element in the list being added has already gone
590 * through append_zone_element, so their backup pointers are already
592 * Precondition: There should be no elements after tail
595 add_list_to_zone(zone_t zone
,
596 struct zone_free_element
*head
,
597 struct zone_free_element
*tail
)
599 assert(tail
->next
== NULL
);
600 assert(!zone
->use_page_list
);
602 append_zone_element(zone
, tail
, zone
->free_elements
);
604 zone
->free_elements
= head
;
609 * Adds the element to the head of the zone's free list
610 * Keeps a backup next-pointer at the end of the element
613 free_to_zone(zone_t zone
,
617 vm_offset_t old_head
;
618 struct zone_page_metadata
*page_meta
;
620 vm_offset_t
*primary
= (vm_offset_t
*) element
;
621 vm_offset_t
*backup
= get_backup_ptr(zone
->elem_size
, primary
);
623 if (zone
->use_page_list
) {
624 page_meta
= get_zone_page_metadata((struct zone_free_element
*)element
);
625 assert(page_meta
->zone
== zone
);
626 old_head
= (vm_offset_t
)page_meta
->elements
;
628 old_head
= (vm_offset_t
)zone
->free_elements
;
632 if (__improbable(!is_sane_zone_element(zone
, old_head
)))
633 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
634 (void *) old_head
, zone
->zone_name
);
637 if (__improbable(!is_sane_zone_element(zone
, element
)))
638 panic("zfree: freeing invalid pointer %p to zone %s\n",
639 (void *) element
, zone
->zone_name
);
642 * Always write a redundant next pointer
643 * So that it is more difficult to forge, xor it with a random cookie
644 * A poisoned element is indicated by using zp_poisoned_cookie
645 * instead of zp_nopoison_cookie
648 *backup
= old_head
^ (poison
? zp_poisoned_cookie
: zp_nopoison_cookie
);
650 /* Insert this element at the head of the free list */
652 if (zone
->use_page_list
) {
653 page_meta
->elements
= (struct zone_free_element
*)element
;
654 page_meta
->free_count
++;
655 if (zone
->allows_foreign
&& !from_zone_map(element
, zone
->elem_size
)) {
656 if (page_meta
->free_count
== 1) {
657 /* first foreign element freed on page, move from all_used */
658 remqueue((queue_entry_t
)page_meta
);
659 enqueue_tail(&zone
->pages
.any_free_foreign
, (queue_entry_t
)page_meta
);
661 /* no other list transitions */
663 } else if (page_meta
->free_count
== page_meta
->alloc_count
) {
664 /* whether the page was on the intermediate or all_used, queue, move it to free */
665 remqueue((queue_entry_t
)page_meta
);
666 enqueue_tail(&zone
->pages
.all_free
, (queue_entry_t
)page_meta
);
667 } else if (page_meta
->free_count
== 1) {
668 /* first free element on page, move from all_used */
669 remqueue((queue_entry_t
)page_meta
);
670 enqueue_tail(&zone
->pages
.intermediate
, (queue_entry_t
)page_meta
);
673 zone
->free_elements
= (struct zone_free_element
*)element
;
681 * Removes an element from the zone's free list, returning 0 if the free list is empty.
682 * Verifies that the next-pointer and backup next-pointer are intact,
683 * and verifies that a poisoned element hasn't been modified.
685 static inline vm_offset_t
686 try_alloc_from_zone(zone_t zone
,
687 boolean_t
* check_poison
)
690 struct zone_page_metadata
*page_meta
;
692 *check_poison
= FALSE
;
694 /* if zone is empty, bail */
695 if (zone
->use_page_list
) {
696 if (zone
->allows_foreign
&& !queue_empty(&zone
->pages
.any_free_foreign
))
697 page_meta
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.any_free_foreign
);
698 else if (!queue_empty(&zone
->pages
.intermediate
))
699 page_meta
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.intermediate
);
700 else if (!queue_empty(&zone
->pages
.all_free
))
701 page_meta
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.all_free
);
706 /* Check if page_meta passes is_sane_zone_element */
707 if (__improbable(!is_sane_zone_page_metadata(zone
, (vm_offset_t
)page_meta
)))
708 panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
709 (void *) page_meta
, zone
->zone_name
);
710 assert(page_meta
->zone
== zone
);
711 element
= (vm_offset_t
)page_meta
->elements
;
713 if (zone
->free_elements
== NULL
)
716 element
= (vm_offset_t
)zone
->free_elements
;
720 if (__improbable(!is_sane_zone_element(zone
, element
)))
721 panic("zfree: invalid head pointer %p for freelist of zone %s\n",
722 (void *) element
, zone
->zone_name
);
725 vm_offset_t
*primary
= (vm_offset_t
*) element
;
726 vm_offset_t
*backup
= get_backup_ptr(zone
->elem_size
, primary
);
728 vm_offset_t next_element
= *primary
;
729 vm_offset_t next_element_backup
= *backup
;
732 * backup_ptr_mismatch_panic will determine what next_element
733 * should have been, and print it appropriately
735 if (__improbable(!is_sane_zone_element(zone
, next_element
)))
736 backup_ptr_mismatch_panic(zone
, element
, next_element
, next_element_backup
);
738 /* Check the backup pointer for the regular cookie */
739 if (__improbable(next_element
!= (next_element_backup
^ zp_nopoison_cookie
))) {
741 /* Check for the poisoned cookie instead */
742 if (__improbable(next_element
!= (next_element_backup
^ zp_poisoned_cookie
)))
743 /* Neither cookie is valid, corruption has occurred */
744 backup_ptr_mismatch_panic(zone
, element
, next_element
, next_element_backup
);
747 * Element was marked as poisoned, so check its integrity before using it.
749 *check_poison
= TRUE
;
752 if (zone
->use_page_list
) {
754 /* Make sure the page_meta is at the correct offset from the start of page */
755 if (__improbable(page_meta
!= get_zone_page_metadata((struct zone_free_element
*)element
)))
756 panic("zalloc: metadata located at incorrect location on page of zone %s\n",
759 /* Make sure next_element belongs to the same page as page_meta */
761 if (__improbable(page_meta
!= get_zone_page_metadata((struct zone_free_element
*)next_element
)))
762 panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
763 (void *)next_element
, (void *)element
, zone
->zone_name
);
767 /* Remove this element from the free list */
768 if (zone
->use_page_list
) {
770 page_meta
->elements
= (struct zone_free_element
*)next_element
;
771 page_meta
->free_count
--;
773 if (zone
->allows_foreign
&& !from_zone_map(element
, zone
->elem_size
)) {
774 if (page_meta
->free_count
== 0) {
775 /* move to all used */
776 remqueue((queue_entry_t
)page_meta
);
777 enqueue_tail(&zone
->pages
.all_used
, (queue_entry_t
)page_meta
);
779 /* no other list transitions */
781 } else if (page_meta
->free_count
== 0) {
782 /* remove from intermediate or free, move to all_used */
783 remqueue((queue_entry_t
)page_meta
);
784 enqueue_tail(&zone
->pages
.all_used
, (queue_entry_t
)page_meta
);
785 } else if (page_meta
->alloc_count
== page_meta
->free_count
+ 1) {
786 /* remove from free, move to intermediate */
787 remqueue((queue_entry_t
)page_meta
);
788 enqueue_tail(&zone
->pages
.intermediate
, (queue_entry_t
)page_meta
);
791 zone
->free_elements
= (struct zone_free_element
*)next_element
;
802 * End of zone poisoning
806 * Fake zones for things that want to report via zprint but are not actually zones.
808 struct fake_zone_info
{
812 vm_size_t
*, vm_size_t
*, vm_size_t
*, vm_size_t
*,
813 uint64_t *, int *, int *, int *);
816 static const struct fake_zone_info fake_zones
[] = {
818 static const unsigned int num_fake_zones
=
819 sizeof (fake_zones
) / sizeof (fake_zones
[0]);
824 boolean_t zinfo_per_task
= FALSE
; /* enabled by -zinfop in boot-args */
825 #define ZINFO_SLOTS 200 /* for now */
826 #define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
829 * Support for garbage collection of unused zone pages
831 * The kernel virtually allocates the "zone map" submap of the kernel
832 * map. When an individual zone needs more storage, memory is allocated
833 * out of the zone map, and the two-level "zone_page_table" is
834 * on-demand expanded so that it has entries for those pages.
835 * zone_page_init()/zone_page_alloc() initialize "alloc_count"
836 * to the number of zone elements that occupy the zone page (which may
837 * be a minimum of 1, including if a zone element spans multiple
840 * Asynchronously, the zone_gc() logic attempts to walk zone free
841 * lists to see if all the elements on a zone page are free. If
842 * "collect_count" (which it increments during the scan) matches
843 * "alloc_count", the zone page is a candidate for collection and the
844 * physical page is returned to the VM system. During this process, the
845 * first word of the zone page is re-used to maintain a linked list of
846 * to-be-collected zone pages.
848 typedef uint32_t zone_page_index_t
;
849 #define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
851 struct zone_page_table_entry
{
852 volatile uint16_t alloc_count
;
853 volatile uint16_t collect_count
;
856 #define ZONE_PAGE_USED 0
857 #define ZONE_PAGE_UNUSED 0xffff
864 void zone_page_alloc(
868 void zone_page_free_element(
869 zone_page_index_t
*free_page_head
,
870 zone_page_index_t
*free_page_tail
,
874 void zone_page_collect(
878 boolean_t
zone_page_collectable(
886 void zone_display_zprint(void);
888 zone_t
zone_find_largest(void);
891 * Async allocation of zones
892 * This mechanism allows for bootstrapping an empty zone which is setup with
893 * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call
894 * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free.
895 * This will prime the zone for the next use.
897 * Currently the thread_callout function (zalloc_async) will loop through all zones
898 * looking for any zone with async_pending set and do the work for it.
900 * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call,
901 * then zalloc_noblock to an empty zone may succeed.
904 thread_call_param_t p0
,
905 thread_call_param_t p1
);
907 static thread_call_data_t call_async_alloc
;
909 vm_map_t zone_map
= VM_MAP_NULL
;
911 zone_t zone_zone
= ZONE_NULL
; /* the zone containing other zones */
913 zone_t zinfo_zone
= ZONE_NULL
; /* zone of per-task zone info */
916 * The VM system gives us an initial chunk of memory.
917 * It has to be big enough to allocate the zone_zone
918 * all the way through the pmap zone.
922 vm_size_t zdata_size
;
924 * Align elements that use the zone page list to 32 byte boundaries.
926 #define ZONE_ELEMENT_ALIGNMENT 32
928 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
929 #define zone_sleep(zone) \
930 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
933 * The zone_locks_grp allows for collecting lock statistics.
934 * All locks are associated to this group in zinit.
935 * Look at tools/lockstat for debugging lock contention.
938 lck_grp_t zone_locks_grp
;
939 lck_grp_attr_t zone_locks_grp_attr
;
941 #define lock_zone_init(zone) \
943 lck_attr_setdefault(&(zone)->lock_attr); \
944 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
945 &zone_locks_grp, &(zone)->lock_attr); \
948 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
951 * Garbage collection map information
953 #define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
954 struct zone_page_table_entry
* volatile zone_page_table
[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
];
955 vm_size_t zone_page_table_used_size
;
956 unsigned int zone_pages
;
957 unsigned int zone_page_table_second_level_size
; /* power of 2 */
958 unsigned int zone_page_table_second_level_shift_amount
;
960 #define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
961 #define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
963 void zone_page_table_expand(zone_page_index_t pindex
);
964 struct zone_page_table_entry
*zone_page_table_lookup(zone_page_index_t pindex
);
967 * Exclude more than one concurrent garbage collection
969 decl_lck_mtx_data(, zone_gc_lock
)
971 lck_attr_t zone_gc_lck_attr
;
972 lck_grp_t zone_gc_lck_grp
;
973 lck_grp_attr_t zone_gc_lck_grp_attr
;
974 lck_mtx_ext_t zone_gc_lck_ext
;
977 * Protects first_zone, last_zone, num_zones,
978 * and the next_zone field of zones.
980 decl_simple_lock_data(, all_zones_lock
)
983 unsigned int num_zones
;
985 boolean_t zone_gc_allowed
= TRUE
;
986 boolean_t zone_gc_forced
= FALSE
;
987 boolean_t panic_include_zprint
= FALSE
;
988 boolean_t zone_gc_allowed_by_time_throttle
= TRUE
;
990 vm_offset_t panic_kext_memory_info
= 0;
991 vm_size_t panic_kext_memory_size
= 0;
993 #define ZALLOC_DEBUG_ZONEGC 0x00000001
994 #define ZALLOC_DEBUG_ZCRAM 0x00000002
995 uint32_t zalloc_debug
= 0;
998 * Zone leak debugging code
1000 * When enabled, this code keeps a log to track allocations to a particular zone that have not
1001 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
1002 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
1005 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
1006 * is the name of the zone you wish to log.
1008 * This code only tracks one zone, so you need to identify which one is leaking first.
1009 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
1010 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
1011 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
1012 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
1013 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
1014 * See the help in the kgmacros for usage info.
1017 * Zone corruption logging
1019 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
1020 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
1021 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
1022 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
1023 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
1024 * corrupted to examine its history. This should lead to the source of the corruption.
1027 static int log_records
; /* size of the log, expressed in number of records */
1029 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
1031 static char zone_name_to_log
[MAX_ZONE_NAME
] = ""; /* the zone name we're logging, if any */
1033 /* Log allocations and frees to help debug a zone element corruption */
1034 boolean_t corruption_debug_flag
= FALSE
; /* enabled by "-zc" boot-arg */
1037 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
1038 * the number of records you want in the log. For example, "zrecs=1000" sets it to 1000 records. Note
1039 * that the larger the size of the log, the slower the system will run due to linear searching in the log,
1040 * but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
1041 * records since going much larger than this tends to make the system unresponsive and unbootable on small
1042 * memory configurations. The default value is 4000 records.
1045 #if defined(__LP64__)
1046 #define ZRECORDS_MAX 128000 /* Max records allowed in the log */
1048 #define ZRECORDS_MAX 8000 /* Max records allowed in the log */
1050 #define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
1053 * Each record in the log contains a pointer to the zone element it refers to,
1054 * and a small array to hold the pc's from the stack trace. A
1055 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
1056 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
1057 * If the log fills, old records are replaced as if it were a circular buffer.
1062 * Opcodes for the btlog operation field:
1069 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
1071 static btlog_t
*zlog_btlog
; /* the log itself, dynamically allocated when logging is enabled */
1072 static zone_t zone_of_interest
= NULL
; /* the zone being watched; corresponds to zone_name_to_log */
1075 * Decide if we want to log this zone by doing a string compare between a zone name and the name
1076 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
1077 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
1078 * match a space in the zone name.
1082 log_this_zone(const char *zonename
, const char *logname
)
1085 const char *zc
= zonename
;
1086 const char *lc
= logname
;
1089 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
1092 for (len
= 1; len
<= MAX_ZONE_NAME
; zc
++, lc
++, len
++) {
1095 * If the current characters don't match, check for a space in
1096 * in the zone name and a corresponding period in the log name.
1097 * If that's not there, then the strings don't match.
1100 if (*zc
!= *lc
&& !(*zc
== ' ' && *lc
== '.'))
1104 * The strings are equal so far. If we're at the end, then it's a match.
1116 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
1117 * the buffer for the records has been allocated.
1120 #define DO_LOGGING(z) (zlog_btlog && (z) == zone_of_interest)
1122 extern boolean_t kmem_alloc_ready
;
1126 #pragma mark Zone Leak Detection
1129 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
1130 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
1131 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
1132 * and stop tracking it if it was being tracked.
1134 * We track the allocations in the zallocations hash table, which stores the address that was returned from
1135 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
1136 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
1137 * backtraces - we don't store them more than once.
1139 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
1140 * a large amount of virtual space.
1142 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
1143 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
1144 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
1145 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
1146 uint32_t zleak_state
= 0; /* State of collection, as above */
1148 boolean_t panic_include_ztrace
= FALSE
; /* Enable zleak logging on panic */
1149 vm_size_t zleak_global_tracking_threshold
; /* Size of zone map at which to start collecting data */
1150 vm_size_t zleak_per_zone_tracking_threshold
; /* Size a zone will have before we will collect data on it */
1151 unsigned int zleak_sample_factor
= 1000; /* Allocations per sample attempt */
1154 * Counters for allocation statistics.
1157 /* Times two active records want to occupy the same spot */
1158 unsigned int z_alloc_collisions
= 0;
1159 unsigned int z_trace_collisions
= 0;
1161 /* Times a new record lands on a spot previously occupied by a freed allocation */
1162 unsigned int z_alloc_overwrites
= 0;
1163 unsigned int z_trace_overwrites
= 0;
1165 /* Times a new alloc or trace is put into the hash table */
1166 unsigned int z_alloc_recorded
= 0;
1167 unsigned int z_trace_recorded
= 0;
1169 /* Times zleak_log returned false due to not being able to acquire the lock */
1170 unsigned int z_total_conflicts
= 0;
1173 #pragma mark struct zallocation
1175 * Structure for keeping track of an allocation
1176 * An allocation bucket is in use if its element is not NULL
1178 struct zallocation
{
1179 uintptr_t za_element
; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
1180 vm_size_t za_size
; /* how much memory did this allocation take up? */
1181 uint32_t za_trace_index
; /* index into ztraces for backtrace associated with allocation */
1182 /* TODO: #if this out */
1183 uint32_t za_hit_count
; /* for determining effectiveness of hash function */
1186 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
1187 uint32_t zleak_alloc_buckets
= CONFIG_ZLEAK_ALLOCATION_MAP_NUM
;
1188 uint32_t zleak_trace_buckets
= CONFIG_ZLEAK_TRACE_MAP_NUM
;
1190 vm_size_t zleak_max_zonemap_size
;
1192 /* Hashmaps of allocations and their corresponding traces */
1193 static struct zallocation
* zallocations
;
1194 static struct ztrace
* ztraces
;
1196 /* not static so that panic can see this, see kern/debug.c */
1197 struct ztrace
* top_ztrace
;
1199 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
1200 static lck_spin_t zleak_lock
;
1201 static lck_attr_t zleak_lock_attr
;
1202 static lck_grp_t zleak_lock_grp
;
1203 static lck_grp_attr_t zleak_lock_grp_attr
;
1206 * Initializes the zone leak monitor. Called from zone_init()
1209 zleak_init(vm_size_t max_zonemap_size
)
1211 char scratch_buf
[16];
1212 boolean_t zleak_enable_flag
= FALSE
;
1214 zleak_max_zonemap_size
= max_zonemap_size
;
1215 zleak_global_tracking_threshold
= max_zonemap_size
/ 2;
1216 zleak_per_zone_tracking_threshold
= zleak_global_tracking_threshold
/ 8;
1218 /* -zleakoff (flag to disable zone leak monitor) */
1219 if (PE_parse_boot_argn("-zleakoff", scratch_buf
, sizeof(scratch_buf
))) {
1220 zleak_enable_flag
= FALSE
;
1221 printf("zone leak detection disabled\n");
1223 zleak_enable_flag
= TRUE
;
1224 printf("zone leak detection enabled\n");
1227 /* zfactor=XXXX (override how often to sample the zone allocator) */
1228 if (PE_parse_boot_argn("zfactor", &zleak_sample_factor
, sizeof(zleak_sample_factor
))) {
1229 printf("Zone leak factor override: %u\n", zleak_sample_factor
);
1232 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
1233 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets
, sizeof(zleak_alloc_buckets
))) {
1234 printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets
);
1235 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1236 if (zleak_alloc_buckets
== 0 || (zleak_alloc_buckets
& (zleak_alloc_buckets
-1))) {
1237 printf("Override isn't a power of two, bad things might happen!\n");
1241 /* zleak-traces=XXXX (override number of buckets in ztraces) */
1242 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets
, sizeof(zleak_trace_buckets
))) {
1243 printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets
);
1244 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
1245 if (zleak_trace_buckets
== 0 || (zleak_trace_buckets
& (zleak_trace_buckets
-1))) {
1246 printf("Override isn't a power of two, bad things might happen!\n");
1250 /* allocate the zleak_lock */
1251 lck_grp_attr_setdefault(&zleak_lock_grp_attr
);
1252 lck_grp_init(&zleak_lock_grp
, "zleak_lock", &zleak_lock_grp_attr
);
1253 lck_attr_setdefault(&zleak_lock_attr
);
1254 lck_spin_init(&zleak_lock
, &zleak_lock_grp
, &zleak_lock_attr
);
1256 if (zleak_enable_flag
) {
1257 zleak_state
= ZLEAK_STATE_ENABLED
;
1264 * Support for kern.zleak.active sysctl - a simplified
1265 * version of the zleak_state variable.
1268 get_zleak_state(void)
1270 if (zleak_state
& ZLEAK_STATE_FAILED
)
1272 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
1281 zleak_activate(void)
1283 kern_return_t retval
;
1284 vm_size_t z_alloc_size
= zleak_alloc_buckets
* sizeof(struct zallocation
);
1285 vm_size_t z_trace_size
= zleak_trace_buckets
* sizeof(struct ztrace
);
1286 void *allocations_ptr
= NULL
;
1287 void *traces_ptr
= NULL
;
1289 /* Only one thread attempts to activate at a time */
1290 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
1291 return KERN_SUCCESS
;
1294 /* Indicate that we're doing the setup */
1295 lck_spin_lock(&zleak_lock
);
1296 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
1297 lck_spin_unlock(&zleak_lock
);
1298 return KERN_SUCCESS
;
1301 zleak_state
|= ZLEAK_STATE_ACTIVATING
;
1302 lck_spin_unlock(&zleak_lock
);
1304 /* Allocate and zero tables */
1305 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&allocations_ptr
, z_alloc_size
, VM_KERN_MEMORY_OSFMK
);
1306 if (retval
!= KERN_SUCCESS
) {
1310 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&traces_ptr
, z_trace_size
, VM_KERN_MEMORY_OSFMK
);
1311 if (retval
!= KERN_SUCCESS
) {
1315 bzero(allocations_ptr
, z_alloc_size
);
1316 bzero(traces_ptr
, z_trace_size
);
1318 /* Everything's set. Install tables, mark active. */
1319 zallocations
= allocations_ptr
;
1320 ztraces
= traces_ptr
;
1323 * Initialize the top_ztrace to the first entry in ztraces,
1324 * so we don't have to check for null in zleak_log
1326 top_ztrace
= &ztraces
[0];
1329 * Note that we do need a barrier between installing
1330 * the tables and setting the active flag, because the zfree()
1331 * path accesses the table without a lock if we're active.
1333 lck_spin_lock(&zleak_lock
);
1334 zleak_state
|= ZLEAK_STATE_ACTIVE
;
1335 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
1336 lck_spin_unlock(&zleak_lock
);
1342 * If we fail to allocate memory, don't further tax
1343 * the system by trying again.
1345 lck_spin_lock(&zleak_lock
);
1346 zleak_state
|= ZLEAK_STATE_FAILED
;
1347 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
1348 lck_spin_unlock(&zleak_lock
);
1350 if (allocations_ptr
!= NULL
) {
1351 kmem_free(kernel_map
, (vm_offset_t
)allocations_ptr
, z_alloc_size
);
1354 if (traces_ptr
!= NULL
) {
1355 kmem_free(kernel_map
, (vm_offset_t
)traces_ptr
, z_trace_size
);
1362 * TODO: What about allocations that never get deallocated,
1363 * especially ones with unique backtraces? Should we wait to record
1364 * until after boot has completed?
1365 * (How many persistent zallocs are there?)
1369 * This function records the allocation in the allocations table,
1370 * and stores the associated backtrace in the traces table
1371 * (or just increments the refcount if the trace is already recorded)
1372 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
1373 * the associated trace's refcount is decremented.
1374 * If the trace slot is in use, it returns.
1375 * The refcount is incremented by the amount of memory the allocation consumes.
1376 * The return value indicates whether to try again next time.
1379 zleak_log(uintptr_t* bt
,
1382 vm_size_t allocation_size
)
1384 /* Quit if there's someone else modifying the hash tables */
1385 if (!lck_spin_try_lock(&zleak_lock
)) {
1386 z_total_conflicts
++;
1390 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
1392 uint32_t trace_index
= hashbacktrace(bt
, depth
, zleak_trace_buckets
);
1393 struct ztrace
* trace
= &ztraces
[trace_index
];
1395 allocation
->za_hit_count
++;
1396 trace
->zt_hit_count
++;
1399 * If the allocation bucket we want to be in is occupied, and if the occupier
1400 * has the same trace as us, just bail.
1402 if (allocation
->za_element
!= (uintptr_t) 0 && trace_index
== allocation
->za_trace_index
) {
1403 z_alloc_collisions
++;
1405 lck_spin_unlock(&zleak_lock
);
1409 /* STEP 1: Store the backtrace in the traces array. */
1410 /* A size of zero indicates that the trace bucket is free. */
1412 if (trace
->zt_size
> 0 && bcmp(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t))) != 0 ) {
1414 * Different unique trace with same hash!
1415 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
1416 * and get out of the way for later chances
1418 trace
->zt_collisions
++;
1419 z_trace_collisions
++;
1421 lck_spin_unlock(&zleak_lock
);
1423 } else if (trace
->zt_size
> 0) {
1424 /* Same trace, already added, so increment refcount */
1425 trace
->zt_size
+= allocation_size
;
1427 /* Found an unused trace bucket, record the trace here! */
1428 if (trace
->zt_depth
!= 0) /* if this slot was previously used but not currently in use */
1429 z_trace_overwrites
++;
1432 trace
->zt_size
= allocation_size
;
1433 memcpy(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t)) );
1435 trace
->zt_depth
= depth
;
1436 trace
->zt_collisions
= 0;
1439 /* STEP 2: Store the allocation record in the allocations array. */
1441 if (allocation
->za_element
!= (uintptr_t) 0) {
1443 * Straight up replace any allocation record that was there. We don't want to do the work
1444 * to preserve the allocation entries that were there, because we only record a subset of the
1445 * allocations anyways.
1448 z_alloc_collisions
++;
1450 struct ztrace
* associated_trace
= &ztraces
[allocation
->za_trace_index
];
1451 /* Knock off old allocation's size, not the new allocation */
1452 associated_trace
->zt_size
-= allocation
->za_size
;
1453 } else if (allocation
->za_trace_index
!= 0) {
1454 /* Slot previously used but not currently in use */
1455 z_alloc_overwrites
++;
1458 allocation
->za_element
= addr
;
1459 allocation
->za_trace_index
= trace_index
;
1460 allocation
->za_size
= allocation_size
;
1464 if (top_ztrace
->zt_size
< trace
->zt_size
)
1467 lck_spin_unlock(&zleak_lock
);
1472 * Free the allocation record and release the stacktrace.
1473 * This should be as fast as possible because it will be called for every free.
1476 zleak_free(uintptr_t addr
,
1477 vm_size_t allocation_size
)
1479 if (addr
== (uintptr_t) 0)
1482 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
1484 /* Double-checked locking: check to find out if we're interested, lock, check to make
1485 * sure it hasn't changed, then modify it, and release the lock.
1488 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
1489 /* if the allocation was the one, grab the lock, check again, then delete it */
1490 lck_spin_lock(&zleak_lock
);
1492 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
1493 struct ztrace
*trace
;
1495 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
1496 if (allocation
->za_size
!= allocation_size
) {
1497 panic("Freeing as size %lu memory that was allocated with size %lu\n",
1498 (uintptr_t)allocation_size
, (uintptr_t)allocation
->za_size
);
1501 trace
= &ztraces
[allocation
->za_trace_index
];
1503 /* size of 0 indicates trace bucket is unused */
1504 if (trace
->zt_size
> 0) {
1505 trace
->zt_size
-= allocation_size
;
1508 /* A NULL element means the allocation bucket is unused */
1509 allocation
->za_element
= 0;
1511 lck_spin_unlock(&zleak_lock
);
1515 #endif /* CONFIG_ZLEAKS */
1517 /* These functions outside of CONFIG_ZLEAKS because they are also used in
1518 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
1522 * This function captures a backtrace from the current stack and
1523 * returns the number of frames captured, limited by max_frames.
1524 * It's fast because it does no checking to make sure there isn't bad data.
1525 * Since it's only called from threads that we're going to keep executing,
1526 * if there's bad data we were going to die eventually.
1527 * If this function is inlined, it doesn't record the frame of the function it's inside.
1528 * (because there's no stack frame!)
1532 fastbacktrace(uintptr_t* bt
, uint32_t max_frames
)
1534 uintptr_t* frameptr
= NULL
, *frameptr_next
= NULL
;
1535 uintptr_t retaddr
= 0;
1536 uint32_t frame_index
= 0, frames
= 0;
1537 uintptr_t kstackb
, kstackt
;
1538 thread_t cthread
= current_thread();
1540 if (__improbable(cthread
== NULL
))
1543 kstackb
= cthread
->kernel_stack
;
1544 kstackt
= kstackb
+ kernel_stack_size
;
1545 /* Load stack frame pointer (EBP on x86) into frameptr */
1546 frameptr
= __builtin_frame_address(0);
1547 if (((uintptr_t)frameptr
> kstackt
) || ((uintptr_t)frameptr
< kstackb
))
1550 while (frameptr
!= NULL
&& frame_index
< max_frames
) {
1551 /* Next frame pointer is pointed to by the previous one */
1552 frameptr_next
= (uintptr_t*) *frameptr
;
1554 /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
1555 /* That also means the return address is worthless, so don't record it */
1556 if (frameptr_next
== NULL
)
1558 /* Verify thread stack bounds */
1559 if (((uintptr_t)frameptr_next
> kstackt
) || ((uintptr_t)frameptr_next
< kstackb
))
1561 /* Pull return address from one spot above the frame pointer */
1562 retaddr
= *(frameptr
+ 1);
1564 /* Store it in the backtrace array */
1565 bt
[frame_index
++] = retaddr
;
1567 frameptr
= frameptr_next
;
1570 /* Save the number of frames captured for return value */
1571 frames
= frame_index
;
1573 /* Fill in the rest of the backtrace with zeros */
1574 while (frame_index
< max_frames
)
1575 bt
[frame_index
++] = 0;
1580 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
1582 hash_mix(uintptr_t x
)
1605 hashbacktrace(uintptr_t* bt
, uint32_t depth
, uint32_t max_size
)
1609 uintptr_t mask
= max_size
- 1;
1612 hash
+= bt
[--depth
];
1615 hash
= hash_mix(hash
) & mask
;
1617 assert(hash
< max_size
);
1619 return (uint32_t) hash
;
1623 * TODO: Determine how well distributed this is
1624 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
1627 hashaddr(uintptr_t pt
, uint32_t max_size
)
1630 uintptr_t mask
= max_size
- 1;
1632 hash
= hash_mix(pt
) & mask
;
1634 assert(hash
< max_size
);
1636 return (uint32_t) hash
;
1639 /* End of all leak-detection code */
1643 * zinit initializes a new zone. The zone data structures themselves
1644 * are stored in a zone, which is initially a static structure that
1645 * is initialized by zone_init.
1649 vm_size_t size
, /* the size of an element */
1650 vm_size_t max
, /* maximum memory to use */
1651 vm_size_t alloc
, /* allocation size */
1652 const char *name
) /* a name for the zone */
1655 boolean_t use_page_list
= FALSE
;
1657 if (zone_zone
== ZONE_NULL
) {
1659 z
= (struct zone
*)zdata
;
1660 /* special handling in zcram() because the first element is being used */
1662 z
= (zone_t
) zalloc(zone_zone
);
1667 /* Zone elements must fit both a next pointer and a backup pointer */
1668 vm_size_t minimum_element_size
= sizeof(vm_offset_t
) * 2;
1669 if (size
< minimum_element_size
)
1670 size
= minimum_element_size
;
1673 * Round element size to a multiple of sizeof(pointer)
1674 * This also enforces that allocations will be aligned on pointer boundaries
1676 size
= ((size
-1) + sizeof(vm_offset_t
)) -
1677 ((size
-1) % sizeof(vm_offset_t
));
1682 alloc
= round_page(alloc
);
1683 max
= round_page(max
);
1686 * we look for an allocation size with less than 1% waste
1687 * up to 5 pages in size...
1688 * otherwise, we look for an allocation size with least fragmentation
1689 * in the range of 1 - 5 pages
1690 * This size will be used unless
1691 * the user suggestion is larger AND has less fragmentation
1694 /* Favor PAGE_SIZE allocations unless we waste >10% space */
1695 if ((size
< PAGE_SIZE
) && (PAGE_SIZE
% size
<= PAGE_SIZE
/ 10))
1699 #if defined(__LP64__)
1700 if (((alloc
% size
) != 0) || (alloc
> PAGE_SIZE
* 8))
1703 vm_size_t best
, waste
; unsigned int i
;
1705 waste
= best
% size
;
1707 for (i
= 1; i
<= 5; i
++) {
1708 vm_size_t tsize
, twaste
;
1710 tsize
= i
* PAGE_SIZE
;
1712 if ((tsize
% size
) < (tsize
/ 100)) {
1714 goto use_this_allocation
;
1716 twaste
= tsize
% size
;
1718 best
= tsize
, waste
= twaste
;
1720 if (alloc
<= best
|| (alloc
% size
>= waste
))
1723 use_this_allocation
:
1724 if (max
&& (max
< alloc
))
1728 * Opt into page list tracking if we can reliably map an allocation
1729 * to its page_metadata, and if the wastage in the tail of
1730 * the allocation is not too large
1733 /* zone_zone can't use page metadata since the page metadata will overwrite zone metadata */
1734 if (alloc
== PAGE_SIZE
&& zone_zone
!= ZONE_NULL
) {
1735 vm_offset_t first_element_offset
;
1736 size_t zone_page_metadata_size
= sizeof(struct zone_page_metadata
);
1738 if (zone_page_metadata_size
% ZONE_ELEMENT_ALIGNMENT
== 0) {
1739 first_element_offset
= zone_page_metadata_size
;
1741 first_element_offset
= zone_page_metadata_size
+ (ZONE_ELEMENT_ALIGNMENT
- (zone_page_metadata_size
% ZONE_ELEMENT_ALIGNMENT
));
1744 if (((PAGE_SIZE
- first_element_offset
) % size
) <= PAGE_SIZE
/ 100) {
1745 use_page_list
= TRUE
;
1749 z
->free_elements
= NULL
;
1750 queue_init(&z
->pages
.any_free_foreign
);
1751 queue_init(&z
->pages
.all_free
);
1752 queue_init(&z
->pages
.intermediate
);
1753 queue_init(&z
->pages
.all_used
);
1757 z
->elem_size
= size
;
1758 z
->alloc_size
= alloc
;
1759 z
->zone_name
= name
;
1763 z
->doing_alloc_without_vm_priv
= FALSE
;
1764 z
->doing_alloc_with_vm_priv
= FALSE
;
1765 z
->doing_gc
= FALSE
;
1766 z
->exhaustible
= FALSE
;
1767 z
->collectable
= TRUE
;
1768 z
->allows_foreign
= FALSE
;
1769 z
->expandable
= TRUE
;
1771 z
->async_pending
= FALSE
;
1772 z
->caller_acct
= TRUE
;
1773 z
->noencrypt
= FALSE
;
1774 z
->no_callout
= FALSE
;
1775 z
->async_prio_refill
= FALSE
;
1776 z
->gzalloc_exempt
= FALSE
;
1777 z
->alignment_required
= FALSE
;
1778 z
->use_page_list
= use_page_list
;
1779 z
->prio_refill_watermark
= 0;
1780 z
->zone_replenish_thread
= NULL
;
1783 z
->zleak_capture
= 0;
1784 z
->zleak_on
= FALSE
;
1785 #endif /* CONFIG_ZLEAKS */
1788 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
1789 zone_debug_enable(z
);
1790 #endif /* ZONE_DEBUG */
1794 * Add the zone to the all-zones list.
1795 * If we are tracking zone info per task, and we have
1796 * already used all the available stat slots, then keep
1797 * using the overflow zone slot.
1799 z
->next_zone
= ZONE_NULL
;
1800 simple_lock(&all_zones_lock
);
1802 last_zone
= &z
->next_zone
;
1803 z
->index
= num_zones
;
1804 if (zinfo_per_task
) {
1805 if (num_zones
> ZONES_MAX
)
1806 z
->index
= ZONES_MAX
;
1809 simple_unlock(&all_zones_lock
);
1812 * Check if we should be logging this zone. If so, remember the zone pointer.
1814 if (log_this_zone(z
->zone_name
, zone_name_to_log
)) {
1815 zone_of_interest
= z
;
1819 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1820 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to
1821 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
1822 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1823 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1826 if (zone_of_interest
!= NULL
&& zlog_btlog
== NULL
&& kmem_alloc_ready
) {
1827 zlog_btlog
= btlog_create(log_records
, MAX_ZTRACE_DEPTH
, NULL
, NULL
, NULL
);
1829 printf("zone: logging started for zone %s\n", zone_of_interest
->zone_name
);
1831 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1832 zone_of_interest
= NULL
;
1836 gzalloc_zone_init(z
);
1840 unsigned zone_replenish_loops
, zone_replenish_wakeups
, zone_replenish_wakeups_initiated
, zone_replenish_throttle_count
;
1842 static void zone_replenish_thread(zone_t
);
1844 /* High priority VM privileged thread used to asynchronously refill a designated
1845 * zone, such as the reserved VM map entry zone.
1847 static void zone_replenish_thread(zone_t z
) {
1848 vm_size_t free_size
;
1849 current_thread()->options
|= TH_OPT_VMPRIV
;
1853 assert(z
->prio_refill_watermark
!= 0);
1854 while ((free_size
= (z
->cur_size
- (z
->count
* z
->elem_size
))) < (z
->prio_refill_watermark
* z
->elem_size
)) {
1855 assert(z
->doing_alloc_without_vm_priv
== FALSE
);
1856 assert(z
->doing_alloc_with_vm_priv
== FALSE
);
1857 assert(z
->async_prio_refill
== TRUE
);
1860 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
1861 vm_offset_t space
, alloc_size
;
1865 alloc_size
= round_page(z
->elem_size
);
1867 alloc_size
= z
->alloc_size
;
1870 zflags
|= KMA_NOENCRYPT
;
1872 kr
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
, VM_KERN_MEMORY_ZONE
);
1874 if (kr
== KERN_SUCCESS
) {
1876 if (alloc_size
== PAGE_SIZE
)
1877 space
= zone_alias_addr(space
);
1879 zcram(z
, space
, alloc_size
);
1880 } else if (kr
== KERN_RESOURCE_SHORTAGE
) {
1882 } else if (kr
== KERN_NO_SPACE
) {
1883 kr
= kernel_memory_allocate(kernel_map
, &space
, alloc_size
, 0, zflags
, VM_KERN_MEMORY_ZONE
);
1884 if (kr
== KERN_SUCCESS
) {
1886 if (alloc_size
== PAGE_SIZE
)
1887 space
= zone_alias_addr(space
);
1889 zcram(z
, space
, alloc_size
);
1891 assert_wait_timeout(&z
->zone_replenish_thread
, THREAD_UNINT
, 1, 100 * NSEC_PER_USEC
);
1892 thread_block(THREAD_CONTINUE_NULL
);
1897 zone_replenish_loops
++;
1901 /* Signal any potential throttled consumers, terminating
1902 * their timer-bounded waits.
1906 assert_wait(&z
->zone_replenish_thread
, THREAD_UNINT
);
1907 thread_block(THREAD_CONTINUE_NULL
);
1908 zone_replenish_wakeups
++;
1913 zone_prio_refill_configure(zone_t z
, vm_size_t low_water_mark
) {
1914 z
->prio_refill_watermark
= low_water_mark
;
1916 z
->async_prio_refill
= TRUE
;
1918 kern_return_t tres
= kernel_thread_start_priority((thread_continue_t
)zone_replenish_thread
, z
, MAXPRI_KERNEL
, &z
->zone_replenish_thread
);
1920 if (tres
!= KERN_SUCCESS
) {
1921 panic("zone_prio_refill_configure, thread create: 0x%x", tres
);
1924 thread_deallocate(z
->zone_replenish_thread
);
1928 * Cram the given memory into the specified zone. Update the zone page count accordingly.
1936 vm_size_t elem_size
;
1937 boolean_t from_zm
= FALSE
;
1939 /* Basic sanity checks */
1940 assert(zone
!= ZONE_NULL
&& newmem
!= (vm_offset_t
)0);
1941 assert(!zone
->collectable
|| zone
->allows_foreign
1942 || (from_zone_map(newmem
, size
)));
1944 elem_size
= zone
->elem_size
;
1946 if (from_zone_map(newmem
, size
))
1949 if (zalloc_debug
& ZALLOC_DEBUG_ZCRAM
)
1950 kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone
, zone
->zone_name
,
1951 (unsigned long)newmem
, from_zm
? "" : "[F]", (unsigned long)size
);
1953 if (from_zm
&& !zone
->use_page_list
)
1954 zone_page_init(newmem
, size
);
1956 ZONE_PAGE_COUNT_INCR(zone
, (size
/ PAGE_SIZE
));
1960 if (zone
->use_page_list
) {
1961 struct zone_page_metadata
*page_metadata
;
1962 size_t zone_page_metadata_size
= sizeof(struct zone_page_metadata
);
1964 assert((newmem
& PAGE_MASK
) == 0);
1965 assert((size
& PAGE_MASK
) == 0);
1966 for (; size
> 0; newmem
+= PAGE_SIZE
, size
-= PAGE_SIZE
) {
1968 vm_size_t pos_in_page
;
1969 page_metadata
= (struct zone_page_metadata
*)(newmem
);
1971 page_metadata
->pages
.next
= NULL
;
1972 page_metadata
->pages
.prev
= NULL
;
1973 page_metadata
->elements
= NULL
;
1974 page_metadata
->zone
= zone
;
1975 page_metadata
->alloc_count
= 0;
1976 page_metadata
->free_count
= 0;
1978 enqueue_tail(&zone
->pages
.all_used
, (queue_entry_t
)page_metadata
);
1980 vm_offset_t first_element_offset
;
1981 if (zone_page_metadata_size
% ZONE_ELEMENT_ALIGNMENT
== 0){
1982 first_element_offset
= zone_page_metadata_size
;
1984 first_element_offset
= zone_page_metadata_size
+ (ZONE_ELEMENT_ALIGNMENT
- (zone_page_metadata_size
% ZONE_ELEMENT_ALIGNMENT
));
1987 for (pos_in_page
= first_element_offset
; (newmem
+ pos_in_page
+ elem_size
) < (vm_offset_t
)(newmem
+ PAGE_SIZE
); pos_in_page
+= elem_size
) {
1988 page_metadata
->alloc_count
++;
1989 zone
->count
++; /* compensate for free_to_zone */
1990 free_to_zone(zone
, newmem
+ pos_in_page
, FALSE
);
1991 zone
->cur_size
+= elem_size
;
1995 while (size
>= elem_size
) {
1996 zone
->count
++; /* compensate for free_to_zone */
1997 if (newmem
== (vm_offset_t
)zone
) {
1998 /* Don't free zone_zone zone */
2000 free_to_zone(zone
, newmem
, FALSE
);
2003 zone_page_alloc(newmem
, elem_size
);
2005 newmem
+= elem_size
;
2006 zone
->cur_size
+= elem_size
;
2014 * Steal memory for the zone package. Called from
2015 * vm_page_bootstrap().
2018 zone_steal_memory(void)
2021 gzalloc_configure();
2023 /* Request enough early memory to get to the pmap zone */
2024 zdata_size
= 12 * sizeof(struct zone
);
2025 zdata_size
= round_page(zdata_size
);
2026 zdata
= (vm_offset_t
)pmap_steal_memory(zdata_size
);
2031 * Fill a zone with enough memory to contain at least nelem elements.
2032 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
2033 * Return the number of elements actually put into the zone, which may
2034 * be more than the caller asked for since the memory allocation is
2035 * rounded up to a full page.
2050 size
= nelem
* zone
->elem_size
;
2051 size
= round_page(size
);
2052 kr
= kmem_alloc_kobject(kernel_map
, &memory
, size
, VM_KERN_MEMORY_ZONE
);
2053 if (kr
!= KERN_SUCCESS
)
2056 zone_change(zone
, Z_FOREIGN
, TRUE
);
2057 zcram(zone
, memory
, size
);
2058 nalloc
= (int)(size
/ zone
->elem_size
);
2059 assert(nalloc
>= nelem
);
2065 * Initialize the "zone of zones" which uses fixed memory allocated
2066 * earlier in memory initialization. zone_bootstrap is called
2070 zone_bootstrap(void)
2074 if (PE_parse_boot_argn("-zinfop", temp_buf
, sizeof(temp_buf
))) {
2075 zinfo_per_task
= TRUE
;
2078 if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug
, sizeof(zalloc_debug
)))
2081 /* Set up zone element poisoning */
2084 /* should zlog log to debug zone corruption instead of leaks? */
2085 if (PE_parse_boot_argn("-zc", temp_buf
, sizeof(temp_buf
))) {
2086 corruption_debug_flag
= TRUE
;
2090 * Check for and set up zone leak detection if requested via boot-args. We recognized two
2093 * zlog=<zone_to_log>
2094 * zrecs=<num_records_in_log>
2096 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
2097 * control the size of the log. If zrecs is not specified, a default value is used.
2100 if (PE_parse_boot_argn("zlog", zone_name_to_log
, sizeof(zone_name_to_log
)) == TRUE
) {
2101 if (PE_parse_boot_argn("zrecs", &log_records
, sizeof(log_records
)) == TRUE
) {
2104 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
2105 * This prevents accidentally hogging too much kernel memory and making the system
2109 log_records
= MIN(ZRECORDS_MAX
, log_records
);
2112 log_records
= ZRECORDS_DEFAULT
;
2116 simple_lock_init(&all_zones_lock
, 0);
2118 first_zone
= ZONE_NULL
;
2119 last_zone
= &first_zone
;
2121 thread_call_setup(&call_async_alloc
, zalloc_async
, NULL
);
2123 /* assertion: nobody else called zinit before us */
2124 assert(zone_zone
== ZONE_NULL
);
2126 /* initializing global lock group for zones */
2127 lck_grp_attr_setdefault(&zone_locks_grp_attr
);
2128 lck_grp_init(&zone_locks_grp
, "zone_locks", &zone_locks_grp_attr
);
2130 zone_zone
= zinit(sizeof(struct zone
), 128 * sizeof(struct zone
),
2131 sizeof(struct zone
), "zones");
2132 zone_change(zone_zone
, Z_COLLECT
, FALSE
);
2133 zone_change(zone_zone
, Z_CALLERACCT
, FALSE
);
2134 zone_change(zone_zone
, Z_NOENCRYPT
, TRUE
);
2136 zcram(zone_zone
, zdata
, zdata_size
);
2137 VM_PAGE_MOVE_STOLEN(atop_64(zdata_size
));
2139 /* initialize fake zones and zone info if tracking by task */
2140 if (zinfo_per_task
) {
2141 vm_size_t zisize
= sizeof(zinfo_usage_store_t
) * ZINFO_SLOTS
;
2144 for (i
= 0; i
< num_fake_zones
; i
++)
2145 fake_zones
[i
].init(ZINFO_SLOTS
- num_fake_zones
+ i
);
2146 zinfo_zone
= zinit(zisize
, zisize
* CONFIG_TASK_MAX
,
2147 zisize
, "per task zinfo");
2148 zone_change(zinfo_zone
, Z_CALLERACCT
, FALSE
);
2153 zinfo_task_init(task_t task
)
2155 if (zinfo_per_task
) {
2156 task
->tkm_zinfo
= zalloc(zinfo_zone
);
2157 memset(task
->tkm_zinfo
, 0, sizeof(zinfo_usage_store_t
) * ZINFO_SLOTS
);
2159 task
->tkm_zinfo
= NULL
;
2164 zinfo_task_free(task_t task
)
2166 assert(task
!= kernel_task
);
2167 if (task
->tkm_zinfo
!= NULL
) {
2168 zfree(zinfo_zone
, task
->tkm_zinfo
);
2169 task
->tkm_zinfo
= NULL
;
2173 /* Global initialization of Zone Allocator.
2174 * Runs after zone_bootstrap.
2178 vm_size_t max_zonemap_size
)
2180 kern_return_t retval
;
2181 vm_offset_t zone_min
;
2182 vm_offset_t zone_max
;
2184 retval
= kmem_suballoc(kernel_map
, &zone_min
, max_zonemap_size
,
2185 FALSE
, VM_FLAGS_ANYWHERE
| VM_FLAGS_PERMANENT
| VM_MAKE_TAG(VM_KERN_MEMORY_ZONE
),
2188 if (retval
!= KERN_SUCCESS
)
2189 panic("zone_init: kmem_suballoc failed");
2190 zone_max
= zone_min
+ round_page(max_zonemap_size
);
2192 gzalloc_init(max_zonemap_size
);
2195 * Setup garbage collection information:
2197 zone_map_min_address
= zone_min
;
2198 zone_map_max_address
= zone_max
;
2200 #if defined(__LP64__)
2202 * ensure that any vm_page_t that gets created from
2203 * the vm_page zone can be packed properly (see vm_page.h
2204 * for the packing requirements
2206 if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_min_address
)) != (vm_page_t
)zone_map_min_address
)
2207 panic("VM_PAGE_PACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address
);
2209 if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address
)) != (vm_page_t
)zone_map_max_address
)
2210 panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address
);
2213 zone_pages
= (unsigned int)atop_kernel(zone_max
- zone_min
);
2214 zone_page_table_used_size
= sizeof(zone_page_table
);
2216 zone_page_table_second_level_size
= 1;
2217 zone_page_table_second_level_shift_amount
= 0;
2220 * Find the power of 2 for the second level that allows
2221 * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
2224 while ((zone_page_table_first_level_slot(zone_pages
-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
) {
2225 zone_page_table_second_level_size
<<= 1;
2226 zone_page_table_second_level_shift_amount
++;
2229 lck_grp_attr_setdefault(&zone_gc_lck_grp_attr
);
2230 lck_grp_init(&zone_gc_lck_grp
, "zone_gc", &zone_gc_lck_grp_attr
);
2231 lck_attr_setdefault(&zone_gc_lck_attr
);
2232 lck_mtx_init_ext(&zone_gc_lock
, &zone_gc_lck_ext
, &zone_gc_lck_grp
, &zone_gc_lck_attr
);
2236 * Initialize the zone leak monitor
2238 zleak_init(max_zonemap_size
);
2239 #endif /* CONFIG_ZLEAKS */
2243 zone_page_table_expand(zone_page_index_t pindex
)
2245 unsigned int first_index
;
2246 struct zone_page_table_entry
* volatile * first_level_ptr
;
2248 assert(pindex
< zone_pages
);
2250 first_index
= zone_page_table_first_level_slot(pindex
);
2251 first_level_ptr
= &zone_page_table
[first_index
];
2253 if (*first_level_ptr
== NULL
) {
2255 * We were able to verify the old first-level slot
2256 * had NULL, so attempt to populate it.
2259 vm_offset_t second_level_array
= 0;
2260 vm_size_t second_level_size
= round_page(zone_page_table_second_level_size
* sizeof(struct zone_page_table_entry
));
2261 zone_page_index_t i
;
2262 struct zone_page_table_entry
*entry_array
;
2264 if (kmem_alloc_kobject(zone_map
, &second_level_array
,
2265 second_level_size
, VM_KERN_MEMORY_OSFMK
) != KERN_SUCCESS
) {
2266 panic("zone_page_table_expand");
2268 zone_map_table_page_count
+= (second_level_size
/ PAGE_SIZE
);
2271 * zone_gc() may scan the "zone_page_table" directly,
2272 * so make sure any slots have a valid unused state.
2274 entry_array
= (struct zone_page_table_entry
*)second_level_array
;
2275 for (i
=0; i
< zone_page_table_second_level_size
; i
++) {
2276 entry_array
[i
].alloc_count
= ZONE_PAGE_UNUSED
;
2277 entry_array
[i
].collect_count
= 0;
2280 if (OSCompareAndSwapPtr(NULL
, entry_array
, first_level_ptr
)) {
2281 /* Old slot was NULL, replaced with expanded level */
2282 OSAddAtomicLong(second_level_size
, &zone_page_table_used_size
);
2284 /* Old slot was not NULL, someone else expanded first */
2285 kmem_free(zone_map
, second_level_array
, second_level_size
);
2286 zone_map_table_page_count
-= (second_level_size
/ PAGE_SIZE
);
2289 /* Old slot was not NULL, already been expanded */
2293 struct zone_page_table_entry
*
2294 zone_page_table_lookup(zone_page_index_t pindex
)
2296 unsigned int first_index
= zone_page_table_first_level_slot(pindex
);
2297 struct zone_page_table_entry
*second_level
= zone_page_table
[first_index
];
2300 return &second_level
[zone_page_table_second_level_slot(pindex
)];
2306 extern volatile SInt32 kfree_nop_count
;
2309 #pragma mark zalloc_canblock
2312 * zalloc returns an element from the specified zone.
2318 boolean_t nopagewait
)
2320 vm_offset_t addr
= 0;
2321 kern_return_t retval
;
2322 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used in zone leak logging and zone leak detection */
2324 boolean_t zone_replenish_wakeup
= FALSE
, zone_alloc_throttle
= FALSE
;
2325 #if CONFIG_GZALLOC || ZONE_DEBUG
2326 boolean_t did_gzalloc
= FALSE
;
2328 thread_t thr
= current_thread();
2329 boolean_t check_poison
= FALSE
;
2330 boolean_t set_doing_alloc_with_vm_priv
= FALSE
;
2333 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
2334 #endif /* CONFIG_ZLEAKS */
2336 assert(zone
!= ZONE_NULL
);
2339 addr
= gzalloc_alloc(zone
, canblock
);
2340 did_gzalloc
= (addr
!= 0);
2344 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2346 if (__improbable(DO_LOGGING(zone
)))
2347 numsaved
= OSBacktrace((void*) zbt
, MAX_ZTRACE_DEPTH
);
2351 * Zone leak detection: capture a backtrace every zleak_sample_factor
2352 * allocations in this zone.
2354 if (__improbable(zone
->zleak_on
&& sample_counter(&zone
->zleak_capture
, zleak_sample_factor
) == TRUE
)) {
2355 /* Avoid backtracing twice if zone logging is on */
2357 zleak_tracedepth
= fastbacktrace(zbt
, MAX_ZTRACE_DEPTH
);
2359 zleak_tracedepth
= numsaved
;
2361 #endif /* CONFIG_ZLEAKS */
2365 if (zone
->async_prio_refill
&& zone
->zone_replenish_thread
) {
2367 vm_size_t zfreec
= (zone
->cur_size
- (zone
->count
* zone
->elem_size
));
2368 vm_size_t zrefillwm
= zone
->prio_refill_watermark
* zone
->elem_size
;
2369 zone_replenish_wakeup
= (zfreec
< zrefillwm
);
2370 zone_alloc_throttle
= (zfreec
< (zrefillwm
/ 2)) && ((thr
->options
& TH_OPT_VMPRIV
) == 0);
2372 if (zone_replenish_wakeup
) {
2373 zone_replenish_wakeups_initiated
++;
2375 /* Signal the potentially waiting
2378 thread_wakeup(&zone
->zone_replenish_thread
);
2380 /* Scheduling latencies etc. may prevent
2381 * the refill thread from keeping up
2382 * with demand. Throttle consumers
2383 * when we fall below half the
2384 * watermark, unless VM privileged
2386 if (zone_alloc_throttle
) {
2387 zone_replenish_throttle_count
++;
2388 assert_wait_timeout(zone
, THREAD_UNINT
, 1, NSEC_PER_MSEC
);
2389 thread_block(THREAD_CONTINUE_NULL
);
2393 } while (zone_alloc_throttle
== TRUE
);
2396 if (__probable(addr
== 0))
2397 addr
= try_alloc_from_zone(zone
, &check_poison
);
2400 while ((addr
== 0) && canblock
) {
2402 * zone is empty, try to expand it
2404 * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged)
2405 * to expand the zone concurrently... this is necessary to avoid stalling
2406 * vm_privileged threads running critical code necessary to continue compressing/swapping
2407 * pages (i.e. making new free pages) from stalling behind non-vm_privileged threads
2408 * waiting to acquire free pages when the vm_page_free_count is below the
2409 * vm_page_free_reserved limit.
2411 if ((zone
->doing_alloc_without_vm_priv
|| zone
->doing_alloc_with_vm_priv
) &&
2412 (((thr
->options
& TH_OPT_VMPRIV
) == 0) || zone
->doing_alloc_with_vm_priv
)) {
2414 * This is a non-vm_privileged thread and a non-vm_privileged or
2415 * a vm_privileged thread is already expanding the zone...
2417 * this is a vm_privileged thread and a vm_privileged thread is
2418 * already expanding the zone...
2420 * In either case wait for a thread to finish, then try again.
2422 zone
->waiting
= TRUE
;
2424 } else if (zone
->doing_gc
) {
2426 * zone_gc() is running. Since we need an element
2427 * from the free list that is currently being
2428 * collected, set the waiting bit and
2429 * wait for the GC process to finish
2430 * before trying again
2432 zone
->waiting
= TRUE
;
2436 vm_size_t alloc_size
;
2439 if ((zone
->cur_size
+ zone
->elem_size
) >
2441 if (zone
->exhaustible
)
2443 if (zone
->expandable
) {
2445 * We're willing to overflow certain
2446 * zones, but not without complaining.
2448 * This is best used in conjunction
2449 * with the collectable flag. What we
2450 * want is an assurance we can get the
2451 * memory back, assuming there's no
2454 zone
->max_size
+= (zone
->max_size
>> 1);
2458 panic_include_zprint
= TRUE
;
2460 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
2461 panic_include_ztrace
= TRUE
;
2462 #endif /* CONFIG_ZLEAKS */
2463 panic("zalloc: zone \"%s\" empty.", zone
->zone_name
);
2466 if ((thr
->options
& TH_OPT_VMPRIV
)) {
2467 zone
->doing_alloc_with_vm_priv
= TRUE
;
2468 set_doing_alloc_with_vm_priv
= TRUE
;
2470 zone
->doing_alloc_without_vm_priv
= TRUE
;
2475 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
2477 if (vm_pool_low() || retry
>= 1)
2479 round_page(zone
->elem_size
);
2481 alloc_size
= zone
->alloc_size
;
2483 if (zone
->noencrypt
)
2484 zflags
|= KMA_NOENCRYPT
;
2486 retval
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
, VM_KERN_MEMORY_ZONE
);
2487 if (retval
== KERN_SUCCESS
) {
2489 if (alloc_size
== PAGE_SIZE
)
2490 space
= zone_alias_addr(space
);
2494 if ((zleak_state
& (ZLEAK_STATE_ENABLED
| ZLEAK_STATE_ACTIVE
)) == ZLEAK_STATE_ENABLED
) {
2495 if (zone_map
->size
>= zleak_global_tracking_threshold
) {
2498 kr
= zleak_activate();
2499 if (kr
!= KERN_SUCCESS
) {
2500 printf("Failed to activate live zone leak debugging (%d).\n", kr
);
2505 if ((zleak_state
& ZLEAK_STATE_ACTIVE
) && !(zone
->zleak_on
)) {
2506 if (zone
->cur_size
> zleak_per_zone_tracking_threshold
) {
2507 zone
->zleak_on
= TRUE
;
2510 #endif /* CONFIG_ZLEAKS */
2511 zcram(zone
, space
, alloc_size
);
2514 } else if (retval
!= KERN_RESOURCE_SHORTAGE
) {
2519 printf("zalloc did gc\n");
2520 zone_display_zprint();
2523 panic_include_zprint
= TRUE
;
2525 if ((zleak_state
& ZLEAK_STATE_ACTIVE
)) {
2526 panic_include_ztrace
= TRUE
;
2528 #endif /* CONFIG_ZLEAKS */
2529 if (retval
== KERN_NO_SPACE
) {
2530 zone_t zone_largest
= zone_find_largest();
2531 panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)",
2532 zone
->zone_name
, zone_largest
->zone_name
,
2533 (unsigned long)zone_largest
->cur_size
, zone_largest
->count
);
2536 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone
->zone_name
, zone
->count
, retval
, (int)kfree_nop_count
);
2544 if (set_doing_alloc_with_vm_priv
== TRUE
)
2545 zone
->doing_alloc_with_vm_priv
= FALSE
;
2547 zone
->doing_alloc_without_vm_priv
= FALSE
;
2549 if (zone
->waiting
) {
2550 zone
->waiting
= FALSE
;
2553 addr
= try_alloc_from_zone(zone
, &check_poison
);
2555 retval
== KERN_RESOURCE_SHORTAGE
) {
2556 if (nopagewait
== TRUE
)
2557 break; /* out of the main while loop */
2565 addr
= try_alloc_from_zone(zone
, &check_poison
);
2569 /* Zone leak detection:
2570 * If we're sampling this allocation, add it to the zleaks hash table.
2572 if (addr
&& zleak_tracedepth
> 0) {
2573 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2574 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
2575 /* If it failed, roll back the counter so we sample the next allocation instead. */
2576 zone
->zleak_capture
= zleak_sample_factor
;
2579 #endif /* CONFIG_ZLEAKS */
2582 if ((addr
== 0) && (!canblock
|| nopagewait
) && (zone
->async_pending
== FALSE
) && (zone
->no_callout
== FALSE
) && (zone
->exhaustible
== FALSE
) && (!vm_pool_low())) {
2583 zone
->async_pending
= TRUE
;
2585 thread_call_enter(&call_async_alloc
);
2587 addr
= try_alloc_from_zone(zone
, &check_poison
);
2591 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
2592 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
2593 * since it protects the various log related data structures.
2596 if (__improbable(DO_LOGGING(zone
) && addr
)) {
2597 btlog_add_entry(zlog_btlog
, (void *)addr
, ZOP_ALLOC
, (void **)zbt
, numsaved
);
2600 vm_offset_t inner_size
= zone
->elem_size
;
2603 if (!did_gzalloc
&& addr
&& zone_debug_enabled(zone
)) {
2604 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
2605 addr
+= ZONE_DEBUG_OFFSET
;
2606 inner_size
-= ZONE_DEBUG_OFFSET
;
2612 if (__improbable(check_poison
&& addr
)) {
2613 vm_offset_t
*element_cursor
= ((vm_offset_t
*) addr
) + 1;
2614 vm_offset_t
*backup
= get_backup_ptr(inner_size
, (vm_offset_t
*) addr
);
2616 for ( ; element_cursor
< backup
; element_cursor
++)
2617 if (__improbable(*element_cursor
!= ZP_POISON
))
2618 zone_element_was_modified_panic(zone
,
2622 ((vm_offset_t
)element_cursor
) - addr
);
2627 * Clear out the old next pointer and backup to avoid leaking the cookie
2628 * and so that only values on the freelist have a valid cookie
2631 vm_offset_t
*primary
= (vm_offset_t
*) addr
;
2632 vm_offset_t
*backup
= get_backup_ptr(inner_size
, primary
);
2634 *primary
= ZP_POISON
;
2635 *backup
= ZP_POISON
;
2638 TRACE_MACHLEAKS(ZALLOC_CODE
, ZALLOC_CODE_2
, zone
->elem_size
, addr
);
2642 zinfo_usage_t zinfo
;
2643 vm_size_t sz
= zone
->elem_size
;
2645 if (zone
->caller_acct
)
2646 ledger_credit(thr
->t_ledger
, task_ledgers
.tkm_private
, sz
);
2648 ledger_credit(thr
->t_ledger
, task_ledgers
.tkm_shared
, sz
);
2650 if ((task
= thr
->task
) != NULL
&& (zinfo
= task
->tkm_zinfo
) != NULL
)
2651 OSAddAtomic64(sz
, (int64_t *)&zinfo
[zone
->index
].alloc
);
2653 return((void *)addr
);
2660 return (zalloc_internal(zone
, TRUE
, FALSE
));
2664 zalloc_noblock(zone_t zone
)
2666 return (zalloc_internal(zone
, FALSE
, FALSE
));
2670 zalloc_nopagewait(zone_t zone
)
2672 return (zalloc_internal(zone
, TRUE
, TRUE
));
2676 zalloc_canblock(zone_t zone
, boolean_t canblock
)
2678 return (zalloc_internal(zone
, canblock
, FALSE
));
2684 __unused thread_call_param_t p0
,
2685 __unused thread_call_param_t p1
)
2687 zone_t current_z
= NULL
, head_z
;
2688 unsigned int max_zones
, i
;
2690 boolean_t pending
= FALSE
;
2692 simple_lock(&all_zones_lock
);
2693 head_z
= first_zone
;
2694 max_zones
= num_zones
;
2695 simple_unlock(&all_zones_lock
);
2697 for (i
= 0; i
< max_zones
; i
++) {
2698 lock_zone(current_z
);
2699 if (current_z
->async_pending
== TRUE
) {
2700 current_z
->async_pending
= FALSE
;
2703 unlock_zone(current_z
);
2705 if (pending
== TRUE
) {
2706 elt
= zalloc_canblock(current_z
, TRUE
);
2707 zfree(current_z
, elt
);
2711 * This is based on assumption that zones never get
2712 * freed once allocated and linked.
2713 * Hence a read outside of lock is OK.
2715 current_z
= current_z
->next_zone
;
2720 * zget returns an element from the specified zone
2721 * and immediately returns nothing if there is nothing there.
2723 * This form should be used when you can not block (like when
2724 * processing an interrupt).
2726 * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
2727 * friend vm_page_more_fictitious can block, so it doesn't seem like
2728 * this is used for interrupts any more....
2732 register zone_t zone
)
2735 boolean_t check_poison
= FALSE
;
2738 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used for zone leak detection */
2739 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
2740 #endif /* CONFIG_ZLEAKS */
2742 assert( zone
!= ZONE_NULL
);
2746 * Zone leak detection: capture a backtrace
2748 if (__improbable(zone
->zleak_on
&& sample_counter(&zone
->zleak_capture
, zleak_sample_factor
) == TRUE
)) {
2749 zleak_tracedepth
= fastbacktrace(zbt
, MAX_ZTRACE_DEPTH
);
2751 #endif /* CONFIG_ZLEAKS */
2753 if (!lock_try_zone(zone
))
2756 addr
= try_alloc_from_zone(zone
, &check_poison
);
2758 vm_offset_t inner_size
= zone
->elem_size
;
2761 if (addr
&& zone_debug_enabled(zone
)) {
2762 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
2763 addr
+= ZONE_DEBUG_OFFSET
;
2764 inner_size
-= ZONE_DEBUG_OFFSET
;
2766 #endif /* ZONE_DEBUG */
2770 * Zone leak detection: record the allocation
2772 if (zone
->zleak_on
&& zleak_tracedepth
> 0 && addr
) {
2773 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2774 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
2775 /* If it failed, roll back the counter so we sample the next allocation instead. */
2776 zone
->zleak_capture
= zleak_sample_factor
;
2779 #endif /* CONFIG_ZLEAKS */
2783 if (__improbable(check_poison
&& addr
)) {
2784 vm_offset_t
*element_cursor
= ((vm_offset_t
*) addr
) + 1;
2785 vm_offset_t
*backup
= get_backup_ptr(inner_size
, (vm_offset_t
*) addr
);
2787 for ( ; element_cursor
< backup
; element_cursor
++)
2788 if (__improbable(*element_cursor
!= ZP_POISON
))
2789 zone_element_was_modified_panic(zone
,
2793 ((vm_offset_t
)element_cursor
) - addr
);
2798 * Clear out the old next pointer and backup to avoid leaking the cookie
2799 * and so that only values on the freelist have a valid cookie
2801 vm_offset_t
*primary
= (vm_offset_t
*) addr
;
2802 vm_offset_t
*backup
= get_backup_ptr(inner_size
, primary
);
2804 *primary
= ZP_POISON
;
2805 *backup
= ZP_POISON
;
2808 return((void *) addr
);
2811 /* Keep this FALSE by default. Large memory machine run orders of magnitude
2812 slower in debug mode when true. Use debugger to enable if needed */
2813 /* static */ boolean_t zone_check
= FALSE
;
2815 static void zone_check_freelist(zone_t zone
, vm_offset_t elem
)
2817 struct zone_free_element
*this;
2818 struct zone_page_metadata
*thispage
;
2820 if (zone
->use_page_list
) {
2821 if (zone
->allows_foreign
) {
2822 for (thispage
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.any_free_foreign
);
2823 !queue_end(&zone
->pages
.any_free_foreign
, (queue_entry_t
)thispage
);
2824 thispage
= (struct zone_page_metadata
*)queue_next((queue_chain_t
*)thispage
)) {
2825 for (this = thispage
->elements
;
2827 this = this->next
) {
2828 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
2829 panic("zone_check_freelist");
2833 for (thispage
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.all_free
);
2834 !queue_end(&zone
->pages
.all_free
, (queue_entry_t
)thispage
);
2835 thispage
= (struct zone_page_metadata
*)queue_next((queue_chain_t
*)thispage
)) {
2836 for (this = thispage
->elements
;
2838 this = this->next
) {
2839 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
2840 panic("zone_check_freelist");
2843 for (thispage
= (struct zone_page_metadata
*)queue_first(&zone
->pages
.intermediate
);
2844 !queue_end(&zone
->pages
.intermediate
, (queue_entry_t
)thispage
);
2845 thispage
= (struct zone_page_metadata
*)queue_next((queue_chain_t
*)thispage
)) {
2846 for (this = thispage
->elements
;
2848 this = this->next
) {
2849 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
2850 panic("zone_check_freelist");
2854 for (this = zone
->free_elements
;
2856 this = this->next
) {
2857 if (!is_sane_zone_element(zone
, (vm_address_t
)this) || (vm_address_t
)this == elem
)
2858 panic("zone_check_freelist");
2863 static zone_t zone_last_bogus_zone
= ZONE_NULL
;
2864 static vm_offset_t zone_last_bogus_elem
= 0;
2868 register zone_t zone
,
2871 vm_offset_t elem
= (vm_offset_t
) addr
;
2872 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* only used if zone logging is enabled via boot-args */
2874 boolean_t gzfreed
= FALSE
;
2875 boolean_t poison
= FALSE
;
2877 assert(zone
!= ZONE_NULL
);
2880 if (zone
->use_page_list
) {
2881 struct zone_page_metadata
*page_meta
= get_zone_page_metadata((struct zone_free_element
*)addr
);
2882 if (zone
!= page_meta
->zone
) {
2884 * Something bad has happened. Someone tried to zfree a pointer but the metadata says it is from
2885 * a different zone (or maybe it's from a zone that doesn't use page free lists at all). We can repair
2886 * some cases of this, if:
2887 * 1) The specified zone had use_page_list, and the true zone also has use_page_list set. In that case
2888 * we can swap the zone_t
2889 * 2) The specified zone had use_page_list, but the true zone does not. In this case page_meta is garbage,
2890 * and dereferencing page_meta->zone might panic.
2891 * To distinguish the two, we enumerate the zone list to match it up.
2892 * We do not handle the case where an incorrect zone is passed that does not have use_page_list set,
2893 * even if the true zone did have this set.
2895 zone_t fixed_zone
= NULL
;
2896 int fixed_i
, max_zones
;
2898 simple_lock(&all_zones_lock
);
2899 max_zones
= num_zones
;
2900 fixed_zone
= first_zone
;
2901 simple_unlock(&all_zones_lock
);
2903 for (fixed_i
=0; fixed_i
< max_zones
; fixed_i
++, fixed_zone
= fixed_zone
->next_zone
) {
2904 if (fixed_zone
== page_meta
->zone
&& fixed_zone
->use_page_list
) {
2905 /* we can fix this */
2906 printf("Fixing incorrect zfree from zone %s to zone %s\n", zone
->zone_name
, fixed_zone
->zone_name
);
2916 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2919 if (__improbable(DO_LOGGING(zone
) && corruption_debug_flag
))
2920 numsaved
= OSBacktrace((void *)zbt
, MAX_ZTRACE_DEPTH
);
2923 /* Basic sanity checks */
2924 if (zone
== ZONE_NULL
|| elem
== (vm_offset_t
)0)
2925 panic("zfree: NULL");
2926 /* zone_gc assumes zones are never freed */
2927 if (zone
== zone_zone
)
2928 panic("zfree: freeing to zone_zone breaks zone_gc!");
2932 gzfreed
= gzalloc_free(zone
, addr
);
2935 TRACE_MACHLEAKS(ZFREE_CODE
, ZFREE_CODE_2
, zone
->elem_size
, (uintptr_t)addr
);
2937 if (__improbable(!gzfreed
&& zone
->collectable
&& !zone
->allows_foreign
&&
2938 !from_zone_map(elem
, zone
->elem_size
))) {
2940 panic("zfree: non-allocated memory in collectable zone!");
2942 zone_last_bogus_zone
= zone
;
2943 zone_last_bogus_elem
= elem
;
2947 if ((zp_factor
!= 0 || zp_tiny_zone_limit
!= 0) && !gzfreed
) {
2949 * Poison the memory before it ends up on the freelist to catch
2950 * use-after-free and use of uninitialized memory
2952 * Always poison tiny zones' elements (limit is 0 if -no-zp is set)
2953 * Also poison larger elements periodically
2956 vm_offset_t inner_size
= zone
->elem_size
;
2959 if (!gzfreed
&& zone_debug_enabled(zone
)) {
2960 inner_size
-= ZONE_DEBUG_OFFSET
;
2963 uint32_t sample_factor
= zp_factor
+ (((uint32_t)inner_size
) >> zp_scale
);
2965 if (inner_size
<= zp_tiny_zone_limit
)
2967 else if (zp_factor
!= 0 && sample_counter(&zone
->zp_count
, sample_factor
) == TRUE
)
2970 if (__improbable(poison
)) {
2972 /* memset_pattern{4|8} could help make this faster: <rdar://problem/4662004> */
2973 /* Poison everything but primary and backup */
2974 vm_offset_t
*element_cursor
= ((vm_offset_t
*) elem
) + 1;
2975 vm_offset_t
*backup
= get_backup_ptr(inner_size
, (vm_offset_t
*)elem
);
2977 for ( ; element_cursor
< backup
; element_cursor
++)
2978 *element_cursor
= ZP_POISON
;
2985 * See if we're doing logging on this zone. There are two styles of logging used depending on
2986 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
2989 if (__improbable(DO_LOGGING(zone
))) {
2990 if (corruption_debug_flag
) {
2992 * We're logging to catch a corruption. Add a record of this zfree operation
2995 btlog_add_entry(zlog_btlog
, (void *)addr
, ZOP_FREE
, (void **)zbt
, numsaved
);
2998 * We're logging to catch a leak. Remove any record we might have for this
2999 * element since it's being freed. Note that we may not find it if the buffer
3000 * overflowed and that's OK. Since the log is of a limited size, old records
3001 * get overwritten if there are more zallocs than zfrees.
3003 btlog_remove_entries_for_element(zlog_btlog
, (void *)addr
);
3008 if (!gzfreed
&& zone_debug_enabled(zone
)) {
3011 elem
-= ZONE_DEBUG_OFFSET
;
3013 /* check the zone's consistency */
3015 for (tmp_elem
= queue_first(&zone
->active_zones
);
3016 !queue_end(tmp_elem
, &zone
->active_zones
);
3017 tmp_elem
= queue_next(tmp_elem
))
3018 if (elem
== (vm_offset_t
)tmp_elem
)
3020 if (elem
!= (vm_offset_t
)tmp_elem
)
3021 panic("zfree()ing element from wrong zone");
3023 remqueue((queue_t
) elem
);
3025 #endif /* ZONE_DEBUG */
3027 zone_check_freelist(zone
, elem
);
3030 if (__probable(!gzfreed
))
3031 free_to_zone(zone
, elem
, poison
);
3034 if (zone
->count
< 0)
3035 panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone",
3036 zone
->zone_name
, addr
);
3042 * Zone leak detection: un-track the allocation
3044 if (zone
->zleak_on
) {
3045 zleak_free(elem
, zone
->elem_size
);
3047 #endif /* CONFIG_ZLEAKS */
3050 * If elements have one or more pages, and memory is low,
3051 * request to run the garbage collection in the zone the next
3052 * time the pageout thread runs.
3054 if (zone
->elem_size
>= PAGE_SIZE
&&
3056 zone_gc_forced
= TRUE
;
3061 thread_t thr
= current_thread();
3063 zinfo_usage_t zinfo
;
3064 vm_size_t sz
= zone
->elem_size
;
3066 if (zone
->caller_acct
)
3067 ledger_debit(thr
->t_ledger
, task_ledgers
.tkm_private
, sz
);
3069 ledger_debit(thr
->t_ledger
, task_ledgers
.tkm_shared
, sz
);
3071 if ((task
= thr
->task
) != NULL
&& (zinfo
= task
->tkm_zinfo
) != NULL
)
3072 OSAddAtomic64(sz
, (int64_t *)&zinfo
[zone
->index
].free
);
3077 /* Change a zone's flags.
3078 * This routine must be called immediately after zinit.
3086 assert( zone
!= ZONE_NULL
);
3087 assert( value
== TRUE
|| value
== FALSE
);
3091 zone
->noencrypt
= value
;
3094 zone
->exhaustible
= value
;
3097 zone
->collectable
= value
;
3100 zone
->expandable
= value
;
3103 zone
->allows_foreign
= value
;
3106 zone
->caller_acct
= value
;
3109 zone
->no_callout
= value
;
3111 case Z_GZALLOC_EXEMPT
:
3112 zone
->gzalloc_exempt
= value
;
3114 gzalloc_reconfigure(zone
);
3117 case Z_ALIGNMENT_REQUIRED
:
3118 zone
->alignment_required
= value
;
3120 * Disable the page list optimization here to provide
3121 * more of an alignment guarantee. This prevents
3122 * the alignment from being modified by the metadata stored
3123 * at the beginning of the page.
3125 zone
->use_page_list
= FALSE
;
3127 zone_debug_disable(zone
);
3130 gzalloc_reconfigure(zone
);
3134 panic("Zone_change: Wrong Item Type!");
3140 * Return the expected number of free elements in the zone.
3141 * This calculation will be incorrect if items are zfree'd that
3142 * were never zalloc'd/zget'd. The correct way to stuff memory
3143 * into a zone is by zcram.
3147 zone_free_count(zone_t zone
)
3149 integer_t free_count
;
3152 free_count
= zone
->countfree
;
3155 assert(free_count
>= 0);
3161 * Zone garbage collection subroutines
3165 zone_page_collectable(
3169 struct zone_page_table_entry
*zp
;
3170 zone_page_index_t i
, j
;
3173 addr
= zone_virtual_addr(addr
);
3176 if (!from_zone_map(addr
, size
))
3177 panic("zone_page_collectable");
3180 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
3181 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
3183 for (; i
<= j
; i
++) {
3184 zp
= zone_page_table_lookup(i
);
3185 if (zp
->collect_count
== zp
->alloc_count
)
3197 struct zone_page_table_entry
*zp
;
3198 zone_page_index_t i
, j
;
3201 addr
= zone_virtual_addr(addr
);
3204 if (!from_zone_map(addr
, size
))
3205 panic("zone_page_keep");
3208 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
3209 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
3211 for (; i
<= j
; i
++) {
3212 zp
= zone_page_table_lookup(i
);
3213 zp
->collect_count
= 0;
3222 struct zone_page_table_entry
*zp
;
3223 zone_page_index_t i
, j
;
3226 addr
= zone_virtual_addr(addr
);
3229 if (!from_zone_map(addr
, size
))
3230 panic("zone_page_collect");
3233 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
3234 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
3236 for (; i
<= j
; i
++) {
3237 zp
= zone_page_table_lookup(i
);
3238 ++zp
->collect_count
;
3247 struct zone_page_table_entry
*zp
;
3248 zone_page_index_t i
, j
;
3251 addr
= zone_virtual_addr(addr
);
3254 if (!from_zone_map(addr
, size
))
3255 panic("zone_page_init");
3258 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
3259 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
3261 for (; i
<= j
; i
++) {
3262 /* make sure entry exists before marking unused */
3263 zone_page_table_expand(i
);
3265 zp
= zone_page_table_lookup(i
);
3267 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
3268 zp
->collect_count
= 0;
3277 struct zone_page_table_entry
*zp
;
3278 zone_page_index_t i
, j
;
3281 addr
= zone_virtual_addr(addr
);
3284 if (!from_zone_map(addr
, size
))
3285 panic("zone_page_alloc");
3288 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
3289 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
3291 for (; i
<= j
; i
++) {
3292 zp
= zone_page_table_lookup(i
);
3296 * Set alloc_count to ZONE_PAGE_USED if
3297 * it was previously set to ZONE_PAGE_UNUSED.
3299 if (zp
->alloc_count
== ZONE_PAGE_UNUSED
)
3300 zp
->alloc_count
= ZONE_PAGE_USED
;
3307 zone_page_free_element(
3308 zone_page_index_t
*free_page_head
,
3309 zone_page_index_t
*free_page_tail
,
3313 struct zone_page_table_entry
*zp
;
3314 zone_page_index_t i
, j
;
3317 addr
= zone_virtual_addr(addr
);
3320 if (!from_zone_map(addr
, size
))
3321 panic("zone_page_free_element");
3324 /* Clear out the old next and backup pointers */
3325 vm_offset_t
*primary
= (vm_offset_t
*) addr
;
3326 vm_offset_t
*backup
= get_backup_ptr(size
, primary
);
3328 *primary
= ZP_POISON
;
3329 *backup
= ZP_POISON
;
3331 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
3332 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
3334 for (; i
<= j
; i
++) {
3335 zp
= zone_page_table_lookup(i
);
3337 if (zp
->collect_count
> 0)
3338 --zp
->collect_count
;
3339 if (--zp
->alloc_count
== 0) {
3340 vm_address_t free_page_address
;
3341 vm_address_t prev_free_page_address
;
3343 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
3344 zp
->collect_count
= 0;
3348 * This element was the last one on this page, re-use the page's
3349 * storage for a page freelist
3351 free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)i
);
3352 *(zone_page_index_t
*)free_page_address
= ZONE_PAGE_INDEX_INVALID
;
3354 if (*free_page_head
== ZONE_PAGE_INDEX_INVALID
) {
3355 *free_page_head
= i
;
3356 *free_page_tail
= i
;
3358 prev_free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)(*free_page_tail
));
3359 *(zone_page_index_t
*)prev_free_page_address
= i
;
3360 *free_page_tail
= i
;
3367 #define ZONEGC_SMALL_ELEMENT_SIZE 4096
3370 uint64_t zgc_invoked
;
3371 uint64_t zgc_bailed
;
3374 uint32_t elems_collected
,
3379 /* Zone garbage collection
3381 * zone_gc will walk through all the free elements in all the
3382 * zones that are marked collectable looking for reclaimable
3383 * pages. zone_gc is called by consider_zone_gc when the system
3384 * begins to run out of memory.
3387 zone_gc(boolean_t all_zones
)
3389 unsigned int max_zones
;
3392 uint32_t old_pgs_freed
;
3393 zone_page_index_t zone_free_page_head
;
3394 zone_page_index_t zone_free_page_tail
;
3395 thread_t mythread
= current_thread();
3397 lck_mtx_lock(&zone_gc_lock
);
3399 zgc_stats
.zgc_invoked
++;
3400 old_pgs_freed
= zgc_stats
.pgs_freed
;
3402 simple_lock(&all_zones_lock
);
3403 max_zones
= num_zones
;
3405 simple_unlock(&all_zones_lock
);
3407 if (zalloc_debug
& ZALLOC_DEBUG_ZONEGC
)
3408 kprintf("zone_gc(all_zones=%s) starting...\n", all_zones
? "TRUE" : "FALSE");
3411 * it's ok to allow eager kernel preemption while
3412 * while holding a zone lock since it's taken
3413 * as a spin lock (which prevents preemption)
3415 thread_set_eager_preempt(mythread
);
3418 for (i
= 0; i
< zone_pages
; i
++) {
3419 struct zone_page_table_entry
*zp
;
3421 zp
= zone_page_table_lookup(i
);
3422 assert(!zp
|| (zp
->collect_count
== 0));
3424 #endif /* MACH_ASSERT */
3426 for (i
= 0; i
< max_zones
; i
++, z
= z
->next_zone
) {
3428 vm_size_t elt_size
, size_freed
;
3429 struct zone_free_element
*elt
, *base_elt
, *base_prev
, *prev
, *scan
, *keep
, *tail
;
3430 int kmem_frees
= 0, total_freed_pages
= 0;
3431 struct zone_page_metadata
*page_meta
;
3432 queue_head_t page_meta_head
;
3434 assert(z
!= ZONE_NULL
);
3436 if (!z
->collectable
)
3439 if (all_zones
== FALSE
&& z
->elem_size
< ZONEGC_SMALL_ELEMENT_SIZE
&& !z
->use_page_list
)
3444 elt_size
= z
->elem_size
;
3447 * Do a quick feasibility check before we scan the zone:
3448 * skip unless there is likelihood of getting pages back
3449 * (i.e we need a whole allocation block's worth of free
3450 * elements before we can garbage collect) and
3451 * the zone has more than 10 percent of it's elements free
3452 * or the element size is a multiple of the PAGE_SIZE
3454 if ((elt_size
& PAGE_MASK
) &&
3455 !z
->use_page_list
&&
3456 (((z
->cur_size
- z
->count
* elt_size
) <= (2 * z
->alloc_size
)) ||
3457 ((z
->cur_size
- z
->count
* elt_size
) <= (z
->cur_size
/ 10)))) {
3465 * Snatch all of the free elements away from the zone.
3468 if (z
->use_page_list
) {
3469 queue_new_head(&z
->pages
.all_free
, &page_meta_head
, struct zone_page_metadata
*, pages
);
3470 queue_init(&z
->pages
.all_free
);
3472 scan
= (void *)z
->free_elements
;
3473 z
->free_elements
= 0;
3478 if (z
->use_page_list
) {
3480 * For zones that maintain page lists (which in turn
3481 * track free elements on those pages), zone_gc()
3482 * is incredibly easy, and we bypass all the logic
3483 * for scanning elements and mapping them to
3489 queue_iterate(&page_meta_head
, page_meta
, struct zone_page_metadata
*, pages
) {
3490 assert(from_zone_map((vm_address_t
)page_meta
, sizeof(*page_meta
))); /* foreign elements should be in any_free_foreign */
3492 zgc_stats
.elems_freed
+= page_meta
->free_count
;
3493 size_freed
+= elt_size
* page_meta
->free_count
;
3494 zgc_stats
.elems_collected
+= page_meta
->free_count
;
3499 if (size_freed
> 0) {
3500 z
->cur_size
-= size_freed
;
3501 z
->countfree
-= size_freed
/elt_size
;
3504 z
->doing_gc
= FALSE
;
3512 if (queue_empty(&page_meta_head
))
3515 thread_clear_eager_preempt(mythread
);
3517 while ((page_meta
= (struct zone_page_metadata
*)dequeue_head(&page_meta_head
)) != NULL
) {
3518 vm_address_t free_page_address
;
3520 free_page_address
= trunc_page((vm_address_t
)page_meta
);
3522 free_page_address
= zone_virtual_addr(free_page_address
);
3524 kmem_free(zone_map
, free_page_address
, PAGE_SIZE
);
3525 ZONE_PAGE_COUNT_DECR(z
, 1);
3526 total_freed_pages
++;
3527 zgc_stats
.pgs_freed
+= 1;
3529 if (++kmem_frees
== 32) {
3530 thread_yield_internal(1);
3535 if (zalloc_debug
& ZALLOC_DEBUG_ZONEGC
)
3536 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z
->zone_name
, (unsigned long)size_freed
/elt_size
, total_freed_pages
);
3538 thread_set_eager_preempt(mythread
);
3539 continue; /* go to next zone */
3545 * Determine which elements we can attempt to collect
3546 * and count them up in the page table. Foreign elements
3547 * are returned to the zone.
3550 prev
= (void *)&scan
;
3552 n
= 0; tail
= keep
= NULL
;
3554 zone_free_page_head
= ZONE_PAGE_INDEX_INVALID
;
3555 zone_free_page_tail
= ZONE_PAGE_INDEX_INVALID
;
3558 while (elt
!= NULL
) {
3559 if (from_zone_map(elt
, elt_size
)) {
3560 zone_page_collect((vm_offset_t
)elt
, elt_size
);
3565 ++zgc_stats
.elems_collected
;
3571 append_zone_element(z
, tail
, elt
);
3575 append_zone_element(z
, prev
, elt
->next
);
3577 append_zone_element(z
, tail
, NULL
);
3581 * Dribble back the elements we are keeping.
3582 * If there are none, give some elements that we haven't looked at yet
3583 * back to the freelist so that others waiting on the zone don't get stuck
3584 * for too long. This might prevent us from recovering some memory,
3585 * but allows us to avoid having to allocate new memory to serve requests
3586 * while zone_gc has all the free memory tied up.
3587 * <rdar://problem/3893406>
3591 if (z
->waiting
== TRUE
) {
3592 /* z->waiting checked without lock held, rechecked below after locking */
3596 add_list_to_zone(z
, keep
, tail
);
3602 while ((elt
!= NULL
) && (++m
< 50)) {
3607 /* Extract the elements from the list and
3609 append_zone_element(z
, prev
, NULL
);
3610 add_list_to_zone(z
, base_elt
, prev
);
3611 append_zone_element(z
, base_prev
, elt
);
3628 * Return any remaining elements.
3634 add_list_to_zone(z
, keep
, tail
);
3647 * Determine which pages we can reclaim and
3648 * free those elements.
3653 n
= 0; tail
= keep
= NULL
;
3655 while (elt
!= NULL
) {
3656 if (zone_page_collectable((vm_offset_t
)elt
, elt_size
)) {
3657 struct zone_free_element
*next_elt
= elt
->next
;
3659 size_freed
+= elt_size
;
3662 * If this is the last allocation on the page(s),
3663 * we may use their storage to maintain the linked
3664 * list of free-able pages. So store elt->next because
3665 * "elt" may be scribbled over.
3667 zone_page_free_element(&zone_free_page_head
, &zone_free_page_tail
, (vm_offset_t
)elt
, elt_size
);
3671 ++zgc_stats
.elems_freed
;
3674 zone_page_keep((vm_offset_t
)elt
, elt_size
);
3679 append_zone_element(z
, tail
, elt
);
3684 append_zone_element(z
, tail
, NULL
);
3686 ++zgc_stats
.elems_kept
;
3690 * Dribble back the elements we are keeping,
3691 * and update the zone size info.
3697 z
->cur_size
-= size_freed
;
3698 z
->countfree
-= size_freed
/elt_size
;
3702 add_list_to_zone(z
, keep
, tail
);
3712 n
= 0; tail
= keep
= NULL
;
3717 * Return any remaining elements, and update
3718 * the zone size info.
3723 if (size_freed
> 0 || keep
!= NULL
) {
3725 z
->cur_size
-= size_freed
;
3726 z
->countfree
-= size_freed
/elt_size
;
3729 add_list_to_zone(z
, keep
, tail
);
3734 z
->doing_gc
= FALSE
;
3741 if (zone_free_page_head
== ZONE_PAGE_INDEX_INVALID
)
3745 * we don't want to allow eager kernel preemption while holding the
3746 * various locks taken in the kmem_free path of execution
3748 thread_clear_eager_preempt(mythread
);
3752 * This loop counts the number of pages that should be freed by the
3753 * next loop that tries to coalesce the kmem_frees()
3755 uint32_t pages_to_free_count
= 0;
3757 zone_page_index_t index
;
3758 for (index
= zone_free_page_head
; index
!= ZONE_PAGE_INDEX_INVALID
;) {
3759 pages_to_free_count
++;
3760 fpa
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)index
);
3761 index
= *(zone_page_index_t
*)fpa
;
3765 * Reclaim the pages we are freeing.
3767 while (zone_free_page_head
!= ZONE_PAGE_INDEX_INVALID
) {
3768 zone_page_index_t zind
= zone_free_page_head
;
3769 vm_address_t free_page_address
;
3773 * Use the first word of the page about to be freed to find the next free page
3775 free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)zind
);
3776 zone_free_page_head
= *(zone_page_index_t
*)free_page_address
;
3779 total_freed_pages
++;
3781 while (zone_free_page_head
!= ZONE_PAGE_INDEX_INVALID
) {
3782 zone_page_index_t next_zind
= zone_free_page_head
;
3783 vm_address_t next_free_page_address
;
3785 next_free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)next_zind
);
3787 if (next_free_page_address
== (free_page_address
- PAGE_SIZE
)) {
3788 free_page_address
= next_free_page_address
;
3789 } else if (next_free_page_address
!= (free_page_address
+ (PAGE_SIZE
* page_count
)))
3792 zone_free_page_head
= *(zone_page_index_t
*)next_free_page_address
;
3794 total_freed_pages
++;
3796 kmem_free(zone_map
, free_page_address
, page_count
* PAGE_SIZE
);
3797 ZONE_PAGE_COUNT_DECR(z
, page_count
);
3798 zgc_stats
.pgs_freed
+= page_count
;
3799 pages_to_free_count
-= page_count
;
3801 if (++kmem_frees
== 32) {
3802 thread_yield_internal(1);
3807 /* Check that we actually free the exact number of pages we were supposed to */
3808 assert(pages_to_free_count
== 0);
3810 if (zalloc_debug
& ZALLOC_DEBUG_ZONEGC
)
3811 kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z
->zone_name
, (unsigned long)size_freed
/elt_size
, total_freed_pages
);
3813 thread_set_eager_preempt(mythread
);
3816 if (old_pgs_freed
== zgc_stats
.pgs_freed
)
3817 zgc_stats
.zgc_bailed
++;
3819 thread_clear_eager_preempt(mythread
);
3821 lck_mtx_unlock(&zone_gc_lock
);
3825 extern vm_offset_t kmapoff_kaddr
;
3826 extern unsigned int kmapoff_pgcnt
;
3831 * Called by the pageout daemon when the system needs more free pages.
3835 consider_zone_gc(boolean_t force
)
3837 boolean_t all_zones
= FALSE
;
3839 if (kmapoff_kaddr
!= 0) {
3841 * One-time reclaim of kernel_map resources we allocated in
3844 (void) vm_deallocate(kernel_map
,
3845 kmapoff_kaddr
, kmapoff_pgcnt
* PAGE_SIZE_64
);
3849 if (zone_gc_allowed
&&
3850 (zone_gc_allowed_by_time_throttle
||
3853 if (zone_gc_allowed_by_time_throttle
== TRUE
) {
3854 zone_gc_allowed_by_time_throttle
= FALSE
;
3857 zone_gc_forced
= FALSE
;
3864 * By default, don't attempt zone GC more frequently
3865 * than once / 1 minutes.
3868 compute_zone_gc_throttle(void *arg __unused
)
3870 zone_gc_allowed_by_time_throttle
= TRUE
;
3874 #if CONFIG_TASK_ZONE_INFO
3879 mach_zone_name_array_t
*namesp
,
3880 mach_msg_type_number_t
*namesCntp
,
3881 task_zone_info_array_t
*infop
,
3882 mach_msg_type_number_t
*infoCntp
)
3884 mach_zone_name_t
*names
;
3885 vm_offset_t names_addr
;
3886 vm_size_t names_size
;
3887 task_zone_info_t
*info
;
3888 vm_offset_t info_addr
;
3889 vm_size_t info_size
;
3890 unsigned int max_zones
, i
;
3892 mach_zone_name_t
*zn
;
3893 task_zone_info_t
*zi
;
3900 if (task
== TASK_NULL
)
3901 return KERN_INVALID_TASK
;
3904 * We assume that zones aren't freed once allocated.
3905 * We won't pick up any zones that are allocated later.
3908 simple_lock(&all_zones_lock
);
3909 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
3911 simple_unlock(&all_zones_lock
);
3913 names_size
= round_page(max_zones
* sizeof *names
);
3914 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3915 &names_addr
, names_size
, VM_KERN_MEMORY_IPC
);
3916 if (kr
!= KERN_SUCCESS
)
3918 names
= (mach_zone_name_t
*) names_addr
;
3920 info_size
= round_page(max_zones
* sizeof *info
);
3921 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3922 &info_addr
, info_size
, VM_KERN_MEMORY_IPC
);
3923 if (kr
!= KERN_SUCCESS
) {
3924 kmem_free(ipc_kernel_map
,
3925 names_addr
, names_size
);
3929 info
= (task_zone_info_t
*) info_addr
;
3934 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
3937 assert(z
!= ZONE_NULL
);
3943 simple_lock(&all_zones_lock
);
3945 simple_unlock(&all_zones_lock
);
3947 /* assuming here the name data is static */
3948 (void) strncpy(zn
->mzn_name
, zcopy
.zone_name
,
3949 sizeof zn
->mzn_name
);
3950 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3952 zi
->tzi_count
= (uint64_t)zcopy
.count
;
3953 zi
->tzi_cur_size
= ptoa_64(zcopy
.page_count
);
3954 zi
->tzi_max_size
= (uint64_t)zcopy
.max_size
;
3955 zi
->tzi_elem_size
= (uint64_t)zcopy
.elem_size
;
3956 zi
->tzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
3957 zi
->tzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
3958 zi
->tzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
3959 zi
->tzi_collectable
= (uint64_t)zcopy
.collectable
;
3960 zi
->tzi_caller_acct
= (uint64_t)zcopy
.caller_acct
;
3961 if (task
->tkm_zinfo
!= NULL
) {
3962 zi
->tzi_task_alloc
= task
->tkm_zinfo
[zcopy
.index
].alloc
;
3963 zi
->tzi_task_free
= task
->tkm_zinfo
[zcopy
.index
].free
;
3965 zi
->tzi_task_alloc
= 0;
3966 zi
->tzi_task_free
= 0;
3973 * loop through the fake zones and fill them using the specialized
3976 for (i
= 0; i
< num_fake_zones
; i
++) {
3977 int count
, collectable
, exhaustible
, caller_acct
, index
;
3978 vm_size_t cur_size
, max_size
, elem_size
, alloc_size
;
3981 strncpy(zn
->mzn_name
, fake_zones
[i
].name
, sizeof zn
->mzn_name
);
3982 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3983 fake_zones
[i
].query(&count
, &cur_size
,
3984 &max_size
, &elem_size
,
3985 &alloc_size
, &sum_size
,
3986 &collectable
, &exhaustible
, &caller_acct
);
3987 zi
->tzi_count
= (uint64_t)count
;
3988 zi
->tzi_cur_size
= (uint64_t)cur_size
;
3989 zi
->tzi_max_size
= (uint64_t)max_size
;
3990 zi
->tzi_elem_size
= (uint64_t)elem_size
;
3991 zi
->tzi_alloc_size
= (uint64_t)alloc_size
;
3992 zi
->tzi_sum_size
= sum_size
;
3993 zi
->tzi_collectable
= (uint64_t)collectable
;
3994 zi
->tzi_exhaustible
= (uint64_t)exhaustible
;
3995 zi
->tzi_caller_acct
= (uint64_t)caller_acct
;
3996 if (task
->tkm_zinfo
!= NULL
) {
3997 index
= ZINFO_SLOTS
- num_fake_zones
+ i
;
3998 zi
->tzi_task_alloc
= task
->tkm_zinfo
[index
].alloc
;
3999 zi
->tzi_task_free
= task
->tkm_zinfo
[index
].free
;
4001 zi
->tzi_task_alloc
= 0;
4002 zi
->tzi_task_free
= 0;
4008 used
= max_zones
* sizeof *names
;
4009 if (used
!= names_size
)
4010 bzero((char *) (names_addr
+ used
), names_size
- used
);
4012 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
4013 (vm_map_size_t
)used
, TRUE
, ©
);
4014 assert(kr
== KERN_SUCCESS
);
4016 *namesp
= (mach_zone_name_t
*) copy
;
4017 *namesCntp
= max_zones
;
4019 used
= max_zones
* sizeof *info
;
4021 if (used
!= info_size
)
4022 bzero((char *) (info_addr
+ used
), info_size
- used
);
4024 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
4025 (vm_map_size_t
)used
, TRUE
, ©
);
4026 assert(kr
== KERN_SUCCESS
);
4028 *infop
= (task_zone_info_t
*) copy
;
4029 *infoCntp
= max_zones
;
4031 return KERN_SUCCESS
;
4034 #else /* CONFIG_TASK_ZONE_INFO */
4038 __unused task_t task
,
4039 __unused mach_zone_name_array_t
*namesp
,
4040 __unused mach_msg_type_number_t
*namesCntp
,
4041 __unused task_zone_info_array_t
*infop
,
4042 __unused mach_msg_type_number_t
*infoCntp
)
4044 return KERN_FAILURE
;
4047 #endif /* CONFIG_TASK_ZONE_INFO */
4052 mach_zone_name_array_t
*namesp
,
4053 mach_msg_type_number_t
*namesCntp
,
4054 mach_zone_info_array_t
*infop
,
4055 mach_msg_type_number_t
*infoCntp
)
4057 return (mach_memory_info(host
, namesp
, namesCntp
, infop
, infoCntp
, NULL
, NULL
));
4063 mach_zone_name_array_t
*namesp
,
4064 mach_msg_type_number_t
*namesCntp
,
4065 mach_zone_info_array_t
*infop
,
4066 mach_msg_type_number_t
*infoCntp
,
4067 mach_memory_info_array_t
*memoryInfop
,
4068 mach_msg_type_number_t
*memoryInfoCntp
)
4070 mach_zone_name_t
*names
;
4071 vm_offset_t names_addr
;
4072 vm_size_t names_size
;
4074 mach_zone_info_t
*info
;
4075 vm_offset_t info_addr
;
4076 vm_size_t info_size
;
4078 mach_memory_info_t
*memory_info
;
4079 vm_offset_t memory_info_addr
;
4080 vm_size_t memory_info_size
;
4081 vm_size_t memory_info_vmsize
;
4082 unsigned int num_sites
;
4084 unsigned int max_zones
, i
;
4086 mach_zone_name_t
*zn
;
4087 mach_zone_info_t
*zi
;
4094 if (host
== HOST_NULL
)
4095 return KERN_INVALID_HOST
;
4096 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
4097 if (!PE_i_can_has_debugger(NULL
))
4098 return KERN_INVALID_HOST
;
4102 * We assume that zones aren't freed once allocated.
4103 * We won't pick up any zones that are allocated later.
4106 simple_lock(&all_zones_lock
);
4107 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
4109 simple_unlock(&all_zones_lock
);
4111 names_size
= round_page(max_zones
* sizeof *names
);
4112 kr
= kmem_alloc_pageable(ipc_kernel_map
,
4113 &names_addr
, names_size
, VM_KERN_MEMORY_IPC
);
4114 if (kr
!= KERN_SUCCESS
)
4116 names
= (mach_zone_name_t
*) names_addr
;
4118 info_size
= round_page(max_zones
* sizeof *info
);
4119 kr
= kmem_alloc_pageable(ipc_kernel_map
,
4120 &info_addr
, info_size
, VM_KERN_MEMORY_IPC
);
4121 if (kr
!= KERN_SUCCESS
) {
4122 kmem_free(ipc_kernel_map
,
4123 names_addr
, names_size
);
4126 info
= (mach_zone_info_t
*) info_addr
;
4129 memory_info_addr
= 0;
4130 if (memoryInfop
&& memoryInfoCntp
)
4132 num_sites
= VM_KERN_MEMORY_COUNT
+ VM_KERN_COUNTER_COUNT
;
4133 memory_info_size
= num_sites
* sizeof(*info
);
4134 memory_info_vmsize
= round_page(memory_info_size
);
4135 kr
= kmem_alloc_pageable(ipc_kernel_map
,
4136 &memory_info_addr
, memory_info_vmsize
, VM_KERN_MEMORY_IPC
);
4137 if (kr
!= KERN_SUCCESS
) {
4138 kmem_free(ipc_kernel_map
,
4139 names_addr
, names_size
);
4140 kmem_free(ipc_kernel_map
,
4141 info_addr
, info_size
);
4145 kr
= vm_map_wire(ipc_kernel_map
, memory_info_addr
, memory_info_addr
+ memory_info_vmsize
,
4146 VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC
), FALSE
);
4147 assert(kr
== KERN_SUCCESS
);
4149 memory_info
= (mach_memory_info_t
*) memory_info_addr
;
4150 vm_page_diagnose(memory_info
, num_sites
);
4152 kr
= vm_map_unwire(ipc_kernel_map
, memory_info_addr
, memory_info_addr
+ memory_info_vmsize
, FALSE
);
4153 assert(kr
== KERN_SUCCESS
);
4159 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
4162 assert(z
!= ZONE_NULL
);
4168 simple_lock(&all_zones_lock
);
4170 simple_unlock(&all_zones_lock
);
4172 /* assuming here the name data is static */
4173 (void) strncpy(zn
->mzn_name
, zcopy
.zone_name
,
4174 sizeof zn
->mzn_name
);
4175 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
4177 zi
->mzi_count
= (uint64_t)zcopy
.count
;
4178 zi
->mzi_cur_size
= ptoa_64(zcopy
.page_count
);
4179 zi
->mzi_max_size
= (uint64_t)zcopy
.max_size
;
4180 zi
->mzi_elem_size
= (uint64_t)zcopy
.elem_size
;
4181 zi
->mzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
4182 zi
->mzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
4183 zi
->mzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
4184 zi
->mzi_collectable
= (uint64_t)zcopy
.collectable
;
4190 * loop through the fake zones and fill them using the specialized
4193 for (i
= 0; i
< num_fake_zones
; i
++) {
4194 int count
, collectable
, exhaustible
, caller_acct
;
4195 vm_size_t cur_size
, max_size
, elem_size
, alloc_size
;
4198 strncpy(zn
->mzn_name
, fake_zones
[i
].name
, sizeof zn
->mzn_name
);
4199 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
4200 fake_zones
[i
].query(&count
, &cur_size
,
4201 &max_size
, &elem_size
,
4202 &alloc_size
, &sum_size
,
4203 &collectable
, &exhaustible
, &caller_acct
);
4204 zi
->mzi_count
= (uint64_t)count
;
4205 zi
->mzi_cur_size
= (uint64_t)cur_size
;
4206 zi
->mzi_max_size
= (uint64_t)max_size
;
4207 zi
->mzi_elem_size
= (uint64_t)elem_size
;
4208 zi
->mzi_alloc_size
= (uint64_t)alloc_size
;
4209 zi
->mzi_sum_size
= sum_size
;
4210 zi
->mzi_collectable
= (uint64_t)collectable
;
4211 zi
->mzi_exhaustible
= (uint64_t)exhaustible
;
4217 used
= max_zones
* sizeof *names
;
4218 if (used
!= names_size
)
4219 bzero((char *) (names_addr
+ used
), names_size
- used
);
4221 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
4222 (vm_map_size_t
)used
, TRUE
, ©
);
4223 assert(kr
== KERN_SUCCESS
);
4225 *namesp
= (mach_zone_name_t
*) copy
;
4226 *namesCntp
= max_zones
;
4228 used
= max_zones
* sizeof *info
;
4230 if (used
!= info_size
)
4231 bzero((char *) (info_addr
+ used
), info_size
- used
);
4233 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
4234 (vm_map_size_t
)used
, TRUE
, ©
);
4235 assert(kr
== KERN_SUCCESS
);
4237 *infop
= (mach_zone_info_t
*) copy
;
4238 *infoCntp
= max_zones
;
4240 if (memoryInfop
&& memoryInfoCntp
)
4242 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)memory_info_addr
,
4243 (vm_map_size_t
)memory_info_size
, TRUE
, ©
);
4244 assert(kr
== KERN_SUCCESS
);
4246 *memoryInfop
= (mach_memory_info_t
*) copy
;
4247 *memoryInfoCntp
= num_sites
;
4250 return KERN_SUCCESS
;
4254 * host_zone_info - LEGACY user interface for Mach zone information
4255 * Should use mach_zone_info() instead!
4260 zone_name_array_t
*namesp
,
4261 mach_msg_type_number_t
*namesCntp
,
4262 zone_info_array_t
*infop
,
4263 mach_msg_type_number_t
*infoCntp
)
4266 vm_offset_t names_addr
;
4267 vm_size_t names_size
;
4269 vm_offset_t info_addr
;
4270 vm_size_t info_size
;
4271 unsigned int max_zones
, i
;
4281 if (host
== HOST_NULL
)
4282 return KERN_INVALID_HOST
;
4283 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
4284 if (!PE_i_can_has_debugger(NULL
))
4285 return KERN_INVALID_HOST
;
4288 #if defined(__LP64__)
4289 if (!thread_is_64bit(current_thread()))
4290 return KERN_NOT_SUPPORTED
;
4292 if (thread_is_64bit(current_thread()))
4293 return KERN_NOT_SUPPORTED
;
4297 * We assume that zones aren't freed once allocated.
4298 * We won't pick up any zones that are allocated later.
4301 simple_lock(&all_zones_lock
);
4302 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
4304 simple_unlock(&all_zones_lock
);
4306 names_size
= round_page(max_zones
* sizeof *names
);
4307 kr
= kmem_alloc_pageable(ipc_kernel_map
,
4308 &names_addr
, names_size
, VM_KERN_MEMORY_IPC
);
4309 if (kr
!= KERN_SUCCESS
)
4311 names
= (zone_name_t
*) names_addr
;
4313 info_size
= round_page(max_zones
* sizeof *info
);
4314 kr
= kmem_alloc_pageable(ipc_kernel_map
,
4315 &info_addr
, info_size
, VM_KERN_MEMORY_IPC
);
4316 if (kr
!= KERN_SUCCESS
) {
4317 kmem_free(ipc_kernel_map
,
4318 names_addr
, names_size
);
4322 info
= (zone_info_t
*) info_addr
;
4327 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
4330 assert(z
!= ZONE_NULL
);
4336 simple_lock(&all_zones_lock
);
4338 simple_unlock(&all_zones_lock
);
4340 /* assuming here the name data is static */
4341 (void) strncpy(zn
->zn_name
, zcopy
.zone_name
,
4342 sizeof zn
->zn_name
);
4343 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
4345 zi
->zi_count
= zcopy
.count
;
4346 zi
->zi_cur_size
= ptoa(zcopy
.page_count
);
4347 zi
->zi_max_size
= zcopy
.max_size
;
4348 zi
->zi_elem_size
= zcopy
.elem_size
;
4349 zi
->zi_alloc_size
= zcopy
.alloc_size
;
4350 zi
->zi_exhaustible
= zcopy
.exhaustible
;
4351 zi
->zi_collectable
= zcopy
.collectable
;
4358 * loop through the fake zones and fill them using the specialized
4361 for (i
= 0; i
< num_fake_zones
; i
++) {
4364 strncpy(zn
->zn_name
, fake_zones
[i
].name
, sizeof zn
->zn_name
);
4365 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
4366 fake_zones
[i
].query(&zi
->zi_count
, &zi
->zi_cur_size
,
4367 &zi
->zi_max_size
, &zi
->zi_elem_size
,
4368 &zi
->zi_alloc_size
, &sum_space
,
4369 &zi
->zi_collectable
, &zi
->zi_exhaustible
, &caller_acct
);
4374 used
= max_zones
* sizeof *names
;
4375 if (used
!= names_size
)
4376 bzero((char *) (names_addr
+ used
), names_size
- used
);
4378 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
4379 (vm_map_size_t
)used
, TRUE
, ©
);
4380 assert(kr
== KERN_SUCCESS
);
4382 *namesp
= (zone_name_t
*) copy
;
4383 *namesCntp
= max_zones
;
4385 used
= max_zones
* sizeof *info
;
4386 if (used
!= info_size
)
4387 bzero((char *) (info_addr
+ used
), info_size
- used
);
4389 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
4390 (vm_map_size_t
)used
, TRUE
, ©
);
4391 assert(kr
== KERN_SUCCESS
);
4393 *infop
= (zone_info_t
*) copy
;
4394 *infoCntp
= max_zones
;
4396 return KERN_SUCCESS
;
4404 if (host
== HOST_NULL
)
4405 return KERN_INVALID_HOST
;
4407 consider_zone_gc(TRUE
);
4409 return (KERN_SUCCESS
);
4412 extern unsigned int stack_total
;
4413 extern unsigned long long stack_allocs
;
4415 #if defined(__i386__) || defined (__x86_64__)
4416 extern unsigned int inuse_ptepages_count
;
4417 extern long long alloc_ptepages_count
;
4420 void zone_display_zprint()
4425 if(first_zone
!=NULL
) {
4426 the_zone
= first_zone
;
4427 for (i
= 0; i
< num_zones
; i
++) {
4428 if(the_zone
->cur_size
> (1024*1024)) {
4429 printf("%.20s:\t%lu\n",the_zone
->zone_name
,(uintptr_t)the_zone
->cur_size
);
4432 if(the_zone
->next_zone
== NULL
) {
4436 the_zone
= the_zone
->next_zone
;
4440 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size
* stack_total
));
4442 #if defined(__i386__) || defined (__x86_64__)
4443 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE
* inuse_ptepages_count
));
4446 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total
);
4450 zone_find_largest(void)
4453 unsigned int max_zones
;
4455 zone_t zone_largest
;
4457 simple_lock(&all_zones_lock
);
4458 the_zone
= first_zone
;
4459 max_zones
= num_zones
;
4460 simple_unlock(&all_zones_lock
);
4462 zone_largest
= the_zone
;
4463 for (i
= 0; i
< max_zones
; i
++) {
4464 if (the_zone
->cur_size
> zone_largest
->cur_size
) {
4465 zone_largest
= the_zone
;
4468 if (the_zone
->next_zone
== NULL
) {
4472 the_zone
= the_zone
->next_zone
;
4474 return zone_largest
;
4479 /* should we care about locks here ? */
4481 #define zone_in_use(z) ( z->count || z->free_elements \
4482 || !queue_empty(&z->pages.all_free) \
4483 || !queue_empty(&z->pages.intermediate) \
4484 || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign)))
4490 if (zone_debug_enabled(z
) || zone_in_use(z
) ||
4491 z
->alloc_size
< (z
->elem_size
+ ZONE_DEBUG_OFFSET
))
4493 queue_init(&z
->active_zones
);
4494 z
->elem_size
+= ZONE_DEBUG_OFFSET
;
4501 if (!zone_debug_enabled(z
) || zone_in_use(z
))
4503 z
->elem_size
-= ZONE_DEBUG_OFFSET
;
4504 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
4508 #endif /* ZONE_DEBUG */