2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
65 #include <zone_debug.h>
66 #include <zone_alias_addr.h>
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach_debug/zone_info.h>
75 #include <mach/vm_map.h>
77 #include <kern/kern_types.h>
78 #include <kern/assert.h>
79 #include <kern/host.h>
80 #include <kern/macro_help.h>
81 #include <kern/sched.h>
82 #include <kern/locks.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <kern/thread_call.h>
86 #include <kern/zalloc.h>
87 #include <kern/kalloc.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
94 #include <pexpert/pexpert.h>
96 #include <machine/machparam.h>
98 #include <libkern/OSDebug.h>
99 #include <libkern/OSAtomic.h>
100 #include <sys/kdebug.h>
103 * Zone Corruption Debugging
105 * We perform three methods to detect use of a zone element after it's been freed. These
106 * checks are enabled for every N'th element (counted per-zone) by specifying
107 * "zp-factor=N" as a boot-arg. To turn this feature off, set "zp-factor=0" or "-no-zp".
109 * (1) Range-check the free-list "next" pointer for sanity.
110 * (2) Store the pointer in two different words, one at the beginning of the freed element
111 * and one at the end, and compare them against each other when re-using the element,
112 * to detect modifications.
113 * (3) Poison the freed memory by overwriting it with 0xdeadbeef, and check it when the
114 * memory is being reused to make sure it is still poisoned.
116 * As a result, each element (that is large enough to hold this data inside) must be marked
117 * as either "ZP_POISONED" or "ZP_NOT_POISONED" in the first integer within the would-be
118 * poisoned segment after the first free-list pointer.
120 * Performance slowdown is inversely proportional to the frequency with which you check
121 * (as would be expected), with a 4-5% hit around N=1, down to ~0.3% at N=16 and just
122 * "noise" at N=32 and higher. You can expect to find a 100% reproducible
123 * bug in an average of N tries, with a standard deviation of about N, but you will probably
124 * want to set "zp-factor=1" or "-zp" if you are attempting to reproduce a known bug.
127 * Zone corruption logging
129 * You can also track where corruptions come from by using the boot-arguments:
130 * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later in this
131 * document for more implementation and usage information.
134 #define ZP_POISON 0xdeadbeef
135 #define ZP_POISONED 0xfeedface
136 #define ZP_NOT_POISONED 0xbaddecaf
139 #define ZP_DEFAULT_SAMPLING_FACTOR 0
140 #else /* CONFIG_EMBEDDED */
141 #define ZP_DEFAULT_SAMPLING_FACTOR 16
142 #endif /* CONFIG_EMBEDDED */
144 uint32_t free_check_sample_factor
= 0; /* set by zp-factor=N boot arg */
145 boolean_t corruption_debug_flag
= FALSE
; /* enabled by "-zc" boot-arg */
148 * Zone checking helper macro.
150 #define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
153 * Frees the specified element, which is within the specified zone. If this
154 * element should be poisoned and its free list checker should be set, both are
155 * done here. These checks will only be enabled if the element size is at least
156 * large enough to hold two vm_offset_t's and one uint32_t (to enable both types
160 free_to_zone(zone_t zone
, void *elem
) {
161 /* get the index of the first uint32_t beyond the 'next' pointer */
162 unsigned int i
= sizeof(vm_offset_t
) / sizeof(uint32_t);
164 /* should we run checks on this piece of memory? */
165 if (free_check_sample_factor
!= 0 &&
166 zone
->free_check_count
++ % free_check_sample_factor
== 0 &&
167 zone
->elem_size
>= (2 * sizeof(vm_offset_t
) + sizeof(uint32_t))) {
168 zone
->free_check_count
= 1;
169 ((uint32_t *) elem
)[i
] = ZP_POISONED
;
170 for (i
++; i
< zone
->elem_size
/ sizeof(uint32_t); i
++) {
171 ((uint32_t *) elem
)[i
] = ZP_POISON
;
173 ((vm_offset_t
*) elem
)[((zone
->elem_size
)/sizeof(vm_offset_t
))-1] = zone
->free_elements
;
175 ((uint32_t *) elem
)[i
] = ZP_NOT_POISONED
;
178 /* maintain free list and decrement number of active objects in zone */
179 ((vm_offset_t
*) elem
)[0] = zone
->free_elements
;
180 zone
->free_elements
= (vm_offset_t
) elem
;
185 * Allocates an element from the specifed zone, storing its address in the
186 * return arg. This function will look for corruptions revealed through zone
187 * poisoning and free list checks.
190 alloc_from_zone(zone_t zone
, void **ret
) {
191 void *elem
= (void *) zone
->free_elements
;
193 /* get the index of the first uint32_t beyond the 'next' pointer */
194 unsigned int i
= sizeof(vm_offset_t
) / sizeof(uint32_t);
196 /* first int in data section must be ZP_POISONED or ZP_NOT_POISONED */
197 if (((uint32_t *) elem
)[i
] == ZP_POISONED
&&
198 zone
->elem_size
>= (2 * sizeof(vm_offset_t
) + sizeof(uint32_t))) {
199 /* check the free list pointers */
200 if (!is_kernel_data_addr(((vm_offset_t
*) elem
)[0]) ||
201 ((vm_offset_t
*) elem
)[0] !=
202 ((vm_offset_t
*) elem
)[(zone
->elem_size
/sizeof(vm_offset_t
))-1]) {
203 panic("a freed zone element has been modified in zone: %s",
207 /* check for poisoning in free space */
209 i
< zone
->elem_size
/ sizeof(uint32_t) -
210 sizeof(vm_offset_t
) / sizeof(uint32_t);
212 if (((uint32_t *) elem
)[i
] != ZP_POISON
) {
213 panic("a freed zone element has been modified in zone: %s",
217 } else if (((uint32_t *) elem
)[i
] != ZP_NOT_POISONED
) {
218 panic("a freed zone element has been modified in zone: %s",
224 zone
->free_elements
= ((vm_offset_t
*) elem
)[0];
231 * Fake zones for things that want to report via zprint but are not actually zones.
233 struct fake_zone_info
{
237 vm_size_t
*, vm_size_t
*, vm_size_t
*, vm_size_t
*,
238 uint64_t *, int *, int *, int *);
241 static const struct fake_zone_info fake_zones
[] = {
243 .name
= "kernel_stacks",
244 .init
= stack_fake_zone_init
,
245 .query
= stack_fake_zone_info
,
248 .name
= "page_tables",
249 .init
= pt_fake_zone_init
,
250 .query
= pt_fake_zone_info
,
253 .name
= "kalloc.large",
254 .init
= kalloc_fake_zone_init
,
255 .query
= kalloc_fake_zone_info
,
258 static const unsigned int num_fake_zones
=
259 sizeof (fake_zones
) / sizeof (fake_zones
[0]);
264 boolean_t zinfo_per_task
= FALSE
; /* enabled by -zinfop in boot-args */
265 #define ZINFO_SLOTS 200 /* for now */
266 #define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
269 * Support for garbage collection of unused zone pages
271 * The kernel virtually allocates the "zone map" submap of the kernel
272 * map. When an individual zone needs more storage, memory is allocated
273 * out of the zone map, and the two-level "zone_page_table" is
274 * on-demand expanded so that it has entries for those pages.
275 * zone_page_init()/zone_page_alloc() initialize "alloc_count"
276 * to the number of zone elements that occupy the zone page (which may
277 * be a minimum of 1, including if a zone element spans multiple
280 * Asynchronously, the zone_gc() logic attempts to walk zone free
281 * lists to see if all the elements on a zone page are free. If
282 * "collect_count" (which it increments during the scan) matches
283 * "alloc_count", the zone page is a candidate for collection and the
284 * physical page is returned to the VM system. During this process, the
285 * first word of the zone page is re-used to maintain a linked list of
286 * to-be-collected zone pages.
288 typedef uint32_t zone_page_index_t
;
289 #define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
291 struct zone_page_table_entry
{
292 volatile uint16_t alloc_count
;
293 volatile uint16_t collect_count
;
296 #define ZONE_PAGE_USED 0
297 #define ZONE_PAGE_UNUSED 0xffff
304 void zone_page_alloc(
308 void zone_page_free_element(
309 zone_page_index_t
*free_page_head
,
310 zone_page_index_t
*free_page_tail
,
314 void zone_page_collect(
318 boolean_t
zone_page_collectable(
327 thread_call_param_t p0
,
328 thread_call_param_t p1
);
330 void zone_display_zprint( void );
332 vm_map_t zone_map
= VM_MAP_NULL
;
334 zone_t zone_zone
= ZONE_NULL
; /* the zone containing other zones */
336 zone_t zinfo_zone
= ZONE_NULL
; /* zone of per-task zone info */
339 * The VM system gives us an initial chunk of memory.
340 * It has to be big enough to allocate the zone_zone
341 * all the way through the pmap zone.
345 vm_size_t zdata_size
;
347 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
348 #define zone_sleep(zone) \
349 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
352 #define lock_zone_init(zone) \
355 (void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
356 lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
357 lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
358 lck_attr_setdefault(&(zone)->lock_attr); \
359 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
360 &(zone)->lock_grp, &(zone)->lock_attr); \
363 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
366 * Garbage collection map information
368 #define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
369 struct zone_page_table_entry
* volatile zone_page_table
[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
];
370 vm_size_t zone_page_table_used_size
;
371 vm_offset_t zone_map_min_address
;
372 vm_offset_t zone_map_max_address
;
373 unsigned int zone_pages
;
374 unsigned int zone_page_table_second_level_size
; /* power of 2 */
375 unsigned int zone_page_table_second_level_shift_amount
;
377 #define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
378 #define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
380 void zone_page_table_expand(zone_page_index_t pindex
);
381 struct zone_page_table_entry
*zone_page_table_lookup(zone_page_index_t pindex
);
384 * Exclude more than one concurrent garbage collection
386 decl_lck_mtx_data(, zone_gc_lock
)
388 lck_attr_t zone_lck_attr
;
389 lck_grp_t zone_lck_grp
;
390 lck_grp_attr_t zone_lck_grp_attr
;
391 lck_mtx_ext_t zone_lck_ext
;
394 #define from_zone_map(addr, size) \
395 ((vm_offset_t)(addr) >= zone_map_min_address && \
396 ((vm_offset_t)(addr) + size -1) < zone_map_max_address)
398 #define from_zone_map(addr, size) \
399 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) >= zone_map_min_address && \
400 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) + size -1) < zone_map_max_address)
404 * Protects first_zone, last_zone, num_zones,
405 * and the next_zone field of zones.
407 decl_simple_lock_data(, all_zones_lock
)
410 unsigned int num_zones
;
412 boolean_t zone_gc_allowed
= TRUE
;
413 boolean_t zone_gc_forced
= FALSE
;
414 boolean_t panic_include_zprint
= FALSE
;
415 boolean_t zone_gc_allowed_by_time_throttle
= TRUE
;
418 * Zone leak debugging code
420 * When enabled, this code keeps a log to track allocations to a particular zone that have not
421 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
422 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
425 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
426 * is the name of the zone you wish to log.
428 * This code only tracks one zone, so you need to identify which one is leaking first.
429 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
430 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
431 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
432 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
433 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
434 * See the help in the kgmacros for usage info.
437 * Zone corruption logging
439 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
440 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
441 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
442 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
443 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
444 * corrupted to examine its history. This should lead to the source of the corruption.
447 static int log_records
; /* size of the log, expressed in number of records */
449 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
451 static char zone_name_to_log
[MAX_ZONE_NAME
] = ""; /* the zone name we're logging, if any */
454 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
455 * the number of records you want in the log. For example, "zrecs=1000" sets it to 1000 records. Note
456 * that the larger the size of the log, the slower the system will run due to linear searching in the log,
457 * but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
458 * records since going much larger than this tends to make the system unresponsive and unbootable on small
459 * memory configurations. The default value is 4000 records.
462 #if defined(__LP64__)
463 #define ZRECORDS_MAX 128000 /* Max records allowed in the log */
465 #define ZRECORDS_MAX 8000 /* Max records allowed in the log */
467 #define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
470 * Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
471 * the records to be ordered chronologically, and a small array to hold the pc's from the stack trace. A
472 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
473 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
474 * If the log fills, old records are replaced as if it were a circular buffer.
478 void *z_element
; /* the element that was zalloc'ed of zfree'ed */
479 uint32_t z_opcode
:1, /* whether it was a zalloc or zfree */
480 z_time
:31; /* time index when operation was done */
481 void *z_pc
[MAX_ZTRACE_DEPTH
]; /* stack trace of caller */
485 * Opcodes for the z_opcode field:
492 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
495 static struct zrecord
*zrecords
; /* the log itself, dynamically allocated when logging is enabled */
496 static int zcurrent
= 0; /* index of the next slot in the log to use */
497 static int zrecorded
= 0; /* number of allocations recorded in the log */
498 static unsigned int ztime
= 0; /* a timestamp of sorts */
499 static zone_t zone_of_interest
= NULL
; /* the zone being watched; corresponds to zone_name_to_log */
502 * Decide if we want to log this zone by doing a string compare between a zone name and the name
503 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
504 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
505 * match a space in the zone name.
509 log_this_zone(const char *zonename
, const char *logname
)
512 const char *zc
= zonename
;
513 const char *lc
= logname
;
516 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
519 for (len
= 1; len
<= MAX_ZONE_NAME
; zc
++, lc
++, len
++) {
522 * If the current characters don't match, check for a space in
523 * in the zone name and a corresponding period in the log name.
524 * If that's not there, then the strings don't match.
527 if (*zc
!= *lc
&& !(*zc
== ' ' && *lc
== '.'))
531 * The strings are equal so far. If we're at the end, then it's a match.
543 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
544 * the buffer for the records has been allocated.
547 #define DO_LOGGING(z) (zrecords && (z) == zone_of_interest)
549 extern boolean_t zlog_ready
;
553 #pragma mark Zone Leak Detection
556 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
557 * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
558 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
559 * and stop tracking it if it was being tracked.
561 * We track the allocations in the zallocations hash table, which stores the address that was returned from
562 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
563 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
564 * backtraces - we don't store them more than once.
566 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
567 * a large amount of virtual space.
569 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
570 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
571 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
572 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
573 uint32_t zleak_state
= 0; /* State of collection, as above */
575 boolean_t panic_include_ztrace
= FALSE
; /* Enable zleak logging on panic */
576 vm_size_t zleak_global_tracking_threshold
; /* Size of zone map at which to start collecting data */
577 vm_size_t zleak_per_zone_tracking_threshold
; /* Size a zone will have before we will collect data on it */
578 unsigned int zleak_sample_factor
= 1000; /* Allocations per sample attempt */
581 * Counters for allocation statistics.
584 /* Times two active records want to occupy the same spot */
585 unsigned int z_alloc_collisions
= 0;
586 unsigned int z_trace_collisions
= 0;
588 /* Times a new record lands on a spot previously occupied by a freed allocation */
589 unsigned int z_alloc_overwrites
= 0;
590 unsigned int z_trace_overwrites
= 0;
592 /* Times a new alloc or trace is put into the hash table */
593 unsigned int z_alloc_recorded
= 0;
594 unsigned int z_trace_recorded
= 0;
596 /* Times zleak_log returned false due to not being able to acquire the lock */
597 unsigned int z_total_conflicts
= 0;
600 #pragma mark struct zallocation
602 * Structure for keeping track of an allocation
603 * An allocation bucket is in use if its element is not NULL
606 uintptr_t za_element
; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
607 vm_size_t za_size
; /* how much memory did this allocation take up? */
608 uint32_t za_trace_index
; /* index into ztraces for backtrace associated with allocation */
609 /* TODO: #if this out */
610 uint32_t za_hit_count
; /* for determining effectiveness of hash function */
613 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
614 uint32_t zleak_alloc_buckets
= CONFIG_ZLEAK_ALLOCATION_MAP_NUM
;
615 uint32_t zleak_trace_buckets
= CONFIG_ZLEAK_TRACE_MAP_NUM
;
617 vm_size_t zleak_max_zonemap_size
;
619 /* Hashmaps of allocations and their corresponding traces */
620 static struct zallocation
* zallocations
;
621 static struct ztrace
* ztraces
;
623 /* not static so that panic can see this, see kern/debug.c */
624 struct ztrace
* top_ztrace
;
626 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
627 static lck_spin_t zleak_lock
;
628 static lck_attr_t zleak_lock_attr
;
629 static lck_grp_t zleak_lock_grp
;
630 static lck_grp_attr_t zleak_lock_grp_attr
;
633 * Initializes the zone leak monitor. Called from zone_init()
636 zleak_init(vm_size_t max_zonemap_size
)
638 char scratch_buf
[16];
639 boolean_t zleak_enable_flag
= FALSE
;
641 zleak_max_zonemap_size
= max_zonemap_size
;
642 zleak_global_tracking_threshold
= max_zonemap_size
/ 2;
643 zleak_per_zone_tracking_threshold
= zleak_global_tracking_threshold
/ 8;
646 if (PE_parse_boot_argn("-zleakon", scratch_buf
, sizeof(scratch_buf
))) {
647 zleak_enable_flag
= TRUE
;
648 printf("zone leak detection enabled\n");
650 zleak_enable_flag
= FALSE
;
651 printf("zone leak detection disabled\n");
653 #else /* CONFIG_EMBEDDED */
654 /* -zleakoff (flag to disable zone leak monitor) */
655 if (PE_parse_boot_argn("-zleakoff", scratch_buf
, sizeof(scratch_buf
))) {
656 zleak_enable_flag
= FALSE
;
657 printf("zone leak detection disabled\n");
659 zleak_enable_flag
= TRUE
;
660 printf("zone leak detection enabled\n");
662 #endif /* CONFIG_EMBEDDED */
664 /* zfactor=XXXX (override how often to sample the zone allocator) */
665 if (PE_parse_boot_argn("zfactor", &zleak_sample_factor
, sizeof(zleak_sample_factor
))) {
666 printf("Zone leak factor override:%u\n", zleak_sample_factor
);
669 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
670 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets
, sizeof(zleak_alloc_buckets
))) {
671 printf("Zone leak alloc buckets override:%u\n", zleak_alloc_buckets
);
672 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
673 if (zleak_alloc_buckets
== 0 || (zleak_alloc_buckets
& (zleak_alloc_buckets
-1))) {
674 printf("Override isn't a power of two, bad things might happen!");
678 /* zleak-traces=XXXX (override number of buckets in ztraces) */
679 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets
, sizeof(zleak_trace_buckets
))) {
680 printf("Zone leak trace buckets override:%u\n", zleak_trace_buckets
);
681 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
682 if (zleak_trace_buckets
== 0 || (zleak_trace_buckets
& (zleak_trace_buckets
-1))) {
683 printf("Override isn't a power of two, bad things might happen!");
687 /* allocate the zleak_lock */
688 lck_grp_attr_setdefault(&zleak_lock_grp_attr
);
689 lck_grp_init(&zleak_lock_grp
, "zleak_lock", &zleak_lock_grp_attr
);
690 lck_attr_setdefault(&zleak_lock_attr
);
691 lck_spin_init(&zleak_lock
, &zleak_lock_grp
, &zleak_lock_attr
);
693 if (zleak_enable_flag
) {
694 zleak_state
= ZLEAK_STATE_ENABLED
;
701 * Support for kern.zleak.active sysctl - a simplified
702 * version of the zleak_state variable.
705 get_zleak_state(void)
707 if (zleak_state
& ZLEAK_STATE_FAILED
)
709 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
720 kern_return_t retval
;
721 vm_size_t z_alloc_size
= zleak_alloc_buckets
* sizeof(struct zallocation
);
722 vm_size_t z_trace_size
= zleak_trace_buckets
* sizeof(struct ztrace
);
723 void *allocations_ptr
= NULL
;
724 void *traces_ptr
= NULL
;
726 /* Only one thread attempts to activate at a time */
727 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
731 /* Indicate that we're doing the setup */
732 lck_spin_lock(&zleak_lock
);
733 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
734 lck_spin_unlock(&zleak_lock
);
738 zleak_state
|= ZLEAK_STATE_ACTIVATING
;
739 lck_spin_unlock(&zleak_lock
);
741 /* Allocate and zero tables */
742 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&allocations_ptr
, z_alloc_size
);
743 if (retval
!= KERN_SUCCESS
) {
747 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&traces_ptr
, z_trace_size
);
748 if (retval
!= KERN_SUCCESS
) {
752 bzero(allocations_ptr
, z_alloc_size
);
753 bzero(traces_ptr
, z_trace_size
);
755 /* Everything's set. Install tables, mark active. */
756 zallocations
= allocations_ptr
;
757 ztraces
= traces_ptr
;
760 * Initialize the top_ztrace to the first entry in ztraces,
761 * so we don't have to check for null in zleak_log
763 top_ztrace
= &ztraces
[0];
766 * Note that we do need a barrier between installing
767 * the tables and setting the active flag, because the zfree()
768 * path accesses the table without a lock if we're active.
770 lck_spin_lock(&zleak_lock
);
771 zleak_state
|= ZLEAK_STATE_ACTIVE
;
772 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
773 lck_spin_unlock(&zleak_lock
);
779 * If we fail to allocate memory, don't further tax
780 * the system by trying again.
782 lck_spin_lock(&zleak_lock
);
783 zleak_state
|= ZLEAK_STATE_FAILED
;
784 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
785 lck_spin_unlock(&zleak_lock
);
787 if (allocations_ptr
!= NULL
) {
788 kmem_free(kernel_map
, (vm_offset_t
)allocations_ptr
, z_alloc_size
);
791 if (traces_ptr
!= NULL
) {
792 kmem_free(kernel_map
, (vm_offset_t
)traces_ptr
, z_trace_size
);
799 * TODO: What about allocations that never get deallocated,
800 * especially ones with unique backtraces? Should we wait to record
801 * until after boot has completed?
802 * (How many persistent zallocs are there?)
806 * This function records the allocation in the allocations table,
807 * and stores the associated backtrace in the traces table
808 * (or just increments the refcount if the trace is already recorded)
809 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
810 * the associated trace's refcount is decremented.
811 * If the trace slot is in use, it returns.
812 * The refcount is incremented by the amount of memory the allocation consumes.
813 * The return value indicates whether to try again next time.
816 zleak_log(uintptr_t* bt
,
819 vm_size_t allocation_size
)
821 /* Quit if there's someone else modifying the hash tables */
822 if (!lck_spin_try_lock(&zleak_lock
)) {
827 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
829 uint32_t trace_index
= hashbacktrace(bt
, depth
, zleak_trace_buckets
);
830 struct ztrace
* trace
= &ztraces
[trace_index
];
832 allocation
->za_hit_count
++;
833 trace
->zt_hit_count
++;
836 * If the allocation bucket we want to be in is occupied, and if the occupier
837 * has the same trace as us, just bail.
839 if (allocation
->za_element
!= (uintptr_t) 0 && trace_index
== allocation
->za_trace_index
) {
840 z_alloc_collisions
++;
842 lck_spin_unlock(&zleak_lock
);
846 /* STEP 1: Store the backtrace in the traces array. */
847 /* A size of zero indicates that the trace bucket is free. */
849 if (trace
->zt_size
> 0 && bcmp(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t))) != 0 ) {
851 * Different unique trace with same hash!
852 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
853 * and get out of the way for later chances
855 trace
->zt_collisions
++;
856 z_trace_collisions
++;
858 lck_spin_unlock(&zleak_lock
);
860 } else if (trace
->zt_size
> 0) {
861 /* Same trace, already added, so increment refcount */
862 trace
->zt_size
+= allocation_size
;
864 /* Found an unused trace bucket, record the trace here! */
865 if (trace
->zt_depth
!= 0) /* if this slot was previously used but not currently in use */
866 z_trace_overwrites
++;
869 trace
->zt_size
= allocation_size
;
870 memcpy(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t)) );
872 trace
->zt_depth
= depth
;
873 trace
->zt_collisions
= 0;
876 /* STEP 2: Store the allocation record in the allocations array. */
878 if (allocation
->za_element
!= (uintptr_t) 0) {
880 * Straight up replace any allocation record that was there. We don't want to do the work
881 * to preserve the allocation entries that were there, because we only record a subset of the
882 * allocations anyways.
885 z_alloc_collisions
++;
887 struct ztrace
* associated_trace
= &ztraces
[allocation
->za_trace_index
];
888 /* Knock off old allocation's size, not the new allocation */
889 associated_trace
->zt_size
-= allocation
->za_size
;
890 } else if (allocation
->za_trace_index
!= 0) {
891 /* Slot previously used but not currently in use */
892 z_alloc_overwrites
++;
895 allocation
->za_element
= addr
;
896 allocation
->za_trace_index
= trace_index
;
897 allocation
->za_size
= allocation_size
;
901 if (top_ztrace
->zt_size
< trace
->zt_size
)
904 lck_spin_unlock(&zleak_lock
);
909 * Free the allocation record and release the stacktrace.
910 * This should be as fast as possible because it will be called for every free.
913 zleak_free(uintptr_t addr
,
914 vm_size_t allocation_size
)
916 if (addr
== (uintptr_t) 0)
919 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
921 /* Double-checked locking: check to find out if we're interested, lock, check to make
922 * sure it hasn't changed, then modify it, and release the lock.
925 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
926 /* if the allocation was the one, grab the lock, check again, then delete it */
927 lck_spin_lock(&zleak_lock
);
929 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
930 struct ztrace
*trace
;
932 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
933 if (allocation
->za_size
!= allocation_size
) {
934 panic("Freeing as size %lu memory that was allocated with size %lu\n",
935 (uintptr_t)allocation_size
, (uintptr_t)allocation
->za_size
);
938 trace
= &ztraces
[allocation
->za_trace_index
];
940 /* size of 0 indicates trace bucket is unused */
941 if (trace
->zt_size
> 0) {
942 trace
->zt_size
-= allocation_size
;
945 /* A NULL element means the allocation bucket is unused */
946 allocation
->za_element
= 0;
948 lck_spin_unlock(&zleak_lock
);
952 #endif /* CONFIG_ZLEAKS */
954 /* These functions outside of CONFIG_ZLEAKS because they are also used in
955 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
959 * This function captures a backtrace from the current stack and
960 * returns the number of frames captured, limited by max_frames.
961 * It's fast because it does no checking to make sure there isn't bad data.
962 * Since it's only called from threads that we're going to keep executing,
963 * if there's bad data we were going to die eventually.
964 * If this function is inlined, it doesn't record the frame of the function it's inside.
965 * (because there's no stack frame!)
969 fastbacktrace(uintptr_t* bt
, uint32_t max_frames
)
971 uintptr_t* frameptr
= NULL
, *frameptr_next
= NULL
;
972 uintptr_t retaddr
= 0;
973 uint32_t frame_index
= 0, frames
= 0;
974 uintptr_t kstackb
, kstackt
;
975 thread_t cthread
= current_thread();
977 if (__improbable(cthread
== NULL
))
980 kstackb
= cthread
->kernel_stack
;
981 kstackt
= kstackb
+ kernel_stack_size
;
982 /* Load stack frame pointer (EBP on x86) into frameptr */
983 frameptr
= __builtin_frame_address(0);
985 while (frameptr
!= NULL
&& frame_index
< max_frames
) {
986 /* Next frame pointer is pointed to by the previous one */
987 frameptr_next
= (uintptr_t*) *frameptr
;
989 /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
990 /* That also means the return address is worthless, so don't record it */
991 if (frameptr_next
== NULL
)
993 /* Verify thread stack bounds */
994 if (((uintptr_t)frameptr_next
> kstackt
) || ((uintptr_t)frameptr_next
< kstackb
))
996 /* Pull return address from one spot above the frame pointer */
997 retaddr
= *(frameptr
+ 1);
999 /* Store it in the backtrace array */
1000 bt
[frame_index
++] = retaddr
;
1002 frameptr
= frameptr_next
;
1005 /* Save the number of frames captured for return value */
1006 frames
= frame_index
;
1008 /* Fill in the rest of the backtrace with zeros */
1009 while (frame_index
< max_frames
)
1010 bt
[frame_index
++] = 0;
1015 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
1017 hash_mix(uintptr_t x
)
1040 hashbacktrace(uintptr_t* bt
, uint32_t depth
, uint32_t max_size
)
1044 uintptr_t mask
= max_size
- 1;
1047 hash
+= bt
[--depth
];
1050 hash
= hash_mix(hash
) & mask
;
1052 assert(hash
< max_size
);
1054 return (uint32_t) hash
;
1058 * TODO: Determine how well distributed this is
1059 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
1062 hashaddr(uintptr_t pt
, uint32_t max_size
)
1065 uintptr_t mask
= max_size
- 1;
1067 hash
= hash_mix(pt
) & mask
;
1069 assert(hash
< max_size
);
1071 return (uint32_t) hash
;
1074 /* End of all leak-detection code */
1078 * zinit initializes a new zone. The zone data structures themselves
1079 * are stored in a zone, which is initially a static structure that
1080 * is initialized by zone_init.
1084 vm_size_t size
, /* the size of an element */
1085 vm_size_t max
, /* maximum memory to use */
1086 vm_size_t alloc
, /* allocation size */
1087 const char *name
) /* a name for the zone */
1091 if (zone_zone
== ZONE_NULL
) {
1093 z
= (struct zone
*)zdata
;
1094 zdata
+= sizeof(*z
);
1095 zdata_size
-= sizeof(*z
);
1097 z
= (zone_t
) zalloc(zone_zone
);
1103 * Round off all the parameters appropriately.
1105 if (size
< sizeof(z
->free_elements
))
1106 size
= sizeof(z
->free_elements
);
1107 size
= ((size
-1) + sizeof(z
->free_elements
)) -
1108 ((size
-1) % sizeof(z
->free_elements
));
1111 alloc
= round_page(alloc
);
1112 max
= round_page(max
);
1114 * we look for an allocation size with less than 1% waste
1115 * up to 5 pages in size...
1116 * otherwise, we look for an allocation size with least fragmentation
1117 * in the range of 1 - 5 pages
1118 * This size will be used unless
1119 * the user suggestion is larger AND has less fragmentation
1122 if ((size
< PAGE_SIZE
) && (PAGE_SIZE
% size
<= PAGE_SIZE
/ 10))
1126 #if defined(__LP64__)
1127 if (((alloc
% size
) != 0) || (alloc
> PAGE_SIZE
* 8))
1130 vm_size_t best
, waste
; unsigned int i
;
1132 waste
= best
% size
;
1134 for (i
= 1; i
<= 5; i
++) {
1135 vm_size_t tsize
, twaste
;
1137 tsize
= i
* PAGE_SIZE
;
1139 if ((tsize
% size
) < (tsize
/ 100)) {
1141 goto use_this_allocation
;
1143 twaste
= tsize
% size
;
1145 best
= tsize
, waste
= twaste
;
1147 if (alloc
<= best
|| (alloc
% size
>= waste
))
1150 use_this_allocation
:
1151 if (max
&& (max
< alloc
))
1154 z
->free_elements
= 0;
1157 z
->elem_size
= size
;
1158 z
->alloc_size
= alloc
;
1159 z
->zone_name
= name
;
1162 z
->doing_alloc
= FALSE
;
1163 z
->doing_gc
= FALSE
;
1164 z
->exhaustible
= FALSE
;
1165 z
->collectable
= TRUE
;
1166 z
->allows_foreign
= FALSE
;
1167 z
->expandable
= TRUE
;
1169 z
->async_pending
= FALSE
;
1170 z
->caller_acct
= TRUE
;
1171 z
->noencrypt
= FALSE
;
1172 z
->no_callout
= FALSE
;
1173 z
->async_prio_refill
= FALSE
;
1174 z
->gzalloc_exempt
= FALSE
;
1175 z
->alignment_required
= FALSE
;
1176 z
->prio_refill_watermark
= 0;
1177 z
->zone_replenish_thread
= NULL
;
1181 z
->zleak_capture
= 0;
1182 z
->zleak_on
= FALSE
;
1183 #endif /* CONFIG_ZLEAKS */
1186 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
1187 zone_debug_enable(z
);
1188 #endif /* ZONE_DEBUG */
1192 * Add the zone to the all-zones list.
1193 * If we are tracking zone info per task, and we have
1194 * already used all the available stat slots, then keep
1195 * using the overflow zone slot.
1197 z
->next_zone
= ZONE_NULL
;
1198 thread_call_setup(&z
->call_async_alloc
, zalloc_async
, z
);
1199 simple_lock(&all_zones_lock
);
1201 last_zone
= &z
->next_zone
;
1202 z
->index
= num_zones
;
1203 if (zinfo_per_task
) {
1204 if (num_zones
> ZONES_MAX
)
1205 z
->index
= ZONES_MAX
;
1208 simple_unlock(&all_zones_lock
);
1211 * Check if we should be logging this zone. If so, remember the zone pointer.
1213 if (log_this_zone(z
->zone_name
, zone_name_to_log
)) {
1214 zone_of_interest
= z
;
1218 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1219 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. zlog_ready is set to
1220 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
1221 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1222 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1225 if (zone_of_interest
!= NULL
&& zrecords
== NULL
&& zlog_ready
) {
1226 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&zrecords
, log_records
* sizeof(struct zrecord
)) == KERN_SUCCESS
) {
1229 * We got the memory for the log. Zero it out since the code needs this to identify unused records.
1230 * At this point, everything is set up and we're ready to start logging this zone.
1233 bzero((void *)zrecords
, log_records
* sizeof(struct zrecord
));
1234 printf("zone: logging started for zone %s (%p)\n", zone_of_interest
->zone_name
, zone_of_interest
);
1237 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1238 zone_of_interest
= NULL
;
1242 gzalloc_zone_init(z
);
1246 unsigned zone_replenish_loops
, zone_replenish_wakeups
, zone_replenish_wakeups_initiated
;
1248 static void zone_replenish_thread(zone_t
);
1250 /* High priority VM privileged thread used to asynchronously refill a designated
1251 * zone, such as the reserved VM map entry zone.
1253 static void zone_replenish_thread(zone_t z
) {
1254 vm_size_t free_size
;
1255 current_thread()->options
|= TH_OPT_VMPRIV
;
1259 assert(z
->prio_refill_watermark
!= 0);
1260 while ((free_size
= (z
->cur_size
- (z
->count
* z
->elem_size
))) < (z
->prio_refill_watermark
* z
->elem_size
)) {
1261 assert(z
->doing_alloc
== FALSE
);
1262 assert(z
->async_prio_refill
== TRUE
);
1265 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
1266 vm_offset_t space
, alloc_size
;
1270 alloc_size
= round_page(z
->elem_size
);
1272 alloc_size
= z
->alloc_size
;
1275 zflags
|= KMA_NOENCRYPT
;
1277 kr
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
);
1279 if (kr
== KERN_SUCCESS
) {
1281 if (alloc_size
== PAGE_SIZE
)
1282 space
= zone_alias_addr(space
);
1284 zcram(z
, space
, alloc_size
);
1285 } else if (kr
== KERN_RESOURCE_SHORTAGE
) {
1287 } else if (kr
== KERN_NO_SPACE
) {
1288 kr
= kernel_memory_allocate(kernel_map
, &space
, alloc_size
, 0, zflags
);
1289 if (kr
== KERN_SUCCESS
) {
1291 if (alloc_size
== PAGE_SIZE
)
1292 space
= zone_alias_addr(space
);
1294 zcram(z
, space
, alloc_size
);
1296 assert_wait_timeout(&z
->zone_replenish_thread
, THREAD_UNINT
, 1, 100 * NSEC_PER_USEC
);
1297 thread_block(THREAD_CONTINUE_NULL
);
1302 zone_replenish_loops
++;
1306 assert_wait(&z
->zone_replenish_thread
, THREAD_UNINT
);
1307 thread_block(THREAD_CONTINUE_NULL
);
1308 zone_replenish_wakeups
++;
1313 zone_prio_refill_configure(zone_t z
, vm_size_t low_water_mark
) {
1314 z
->prio_refill_watermark
= low_water_mark
;
1316 z
->async_prio_refill
= TRUE
;
1318 kern_return_t tres
= kernel_thread_start_priority((thread_continue_t
)zone_replenish_thread
, z
, MAXPRI_KERNEL
, &z
->zone_replenish_thread
);
1320 if (tres
!= KERN_SUCCESS
) {
1321 panic("zone_prio_refill_configure, thread create: 0x%x", tres
);
1324 thread_deallocate(z
->zone_replenish_thread
);
1328 * Cram the given memory into the specified zone.
1336 vm_size_t elem_size
;
1337 boolean_t from_zm
= FALSE
;
1339 /* Basic sanity checks */
1340 assert(zone
!= ZONE_NULL
&& newmem
!= (vm_offset_t
)0);
1341 assert(!zone
->collectable
|| zone
->allows_foreign
1342 || (from_zone_map(newmem
, size
)));
1344 elem_size
= zone
->elem_size
;
1346 if (from_zone_map(newmem
, size
))
1350 zone_page_init(newmem
, size
);
1353 while (size
>= elem_size
) {
1354 free_to_zone(zone
, (void *) newmem
);
1356 zone_page_alloc(newmem
, elem_size
);
1357 zone
->count
++; /* compensate for free_to_zone */
1359 newmem
+= elem_size
;
1360 zone
->cur_size
+= elem_size
;
1367 * Steal memory for the zone package. Called from
1368 * vm_page_bootstrap().
1371 zone_steal_memory(void)
1374 gzalloc_configure();
1376 /* Request enough early memory to get to the pmap zone */
1377 zdata_size
= 12 * sizeof(struct zone
);
1378 zdata
= (vm_offset_t
)pmap_steal_memory(round_page(zdata_size
));
1383 * Fill a zone with enough memory to contain at least nelem elements.
1384 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
1385 * Return the number of elements actually put into the zone, which may
1386 * be more than the caller asked for since the memory allocation is
1387 * rounded up to a full page.
1402 size
= nelem
* zone
->elem_size
;
1403 size
= round_page(size
);
1404 kr
= kmem_alloc_kobject(kernel_map
, &memory
, size
);
1405 if (kr
!= KERN_SUCCESS
)
1408 zone_change(zone
, Z_FOREIGN
, TRUE
);
1409 zcram(zone
, memory
, size
);
1410 nalloc
= (int)(size
/ zone
->elem_size
);
1411 assert(nalloc
>= nelem
);
1417 * Initialize the "zone of zones" which uses fixed memory allocated
1418 * earlier in memory initialization. zone_bootstrap is called
1422 zone_bootstrap(void)
1426 if (PE_parse_boot_argn("-zinfop", temp_buf
, sizeof(temp_buf
))) {
1427 zinfo_per_task
= TRUE
;
1430 /* do we want corruption-style debugging with zlog? */
1431 if (PE_parse_boot_argn("-zc", temp_buf
, sizeof(temp_buf
))) {
1432 corruption_debug_flag
= TRUE
;
1435 /* Set up zone poisoning */
1437 free_check_sample_factor
= ZP_DEFAULT_SAMPLING_FACTOR
;
1439 /* support for old zone poisoning boot-args */
1440 if (PE_parse_boot_argn("-zp", temp_buf
, sizeof(temp_buf
))) {
1441 free_check_sample_factor
= 1;
1443 if (PE_parse_boot_argn("-no-zp", temp_buf
, sizeof(temp_buf
))) {
1444 free_check_sample_factor
= 0;
1447 /* zp-factor=XXXX (override how often to poison freed zone elements) */
1448 if (PE_parse_boot_argn("zp-factor", &free_check_sample_factor
, sizeof(free_check_sample_factor
))) {
1449 printf("Zone poisoning factor override:%u\n", free_check_sample_factor
);
1453 * Check for and set up zone leak detection if requested via boot-args. We recognized two
1456 * zlog=<zone_to_log>
1457 * zrecs=<num_records_in_log>
1459 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
1460 * control the size of the log. If zrecs is not specified, a default value is used.
1463 if (PE_parse_boot_argn("zlog", zone_name_to_log
, sizeof(zone_name_to_log
)) == TRUE
) {
1464 if (PE_parse_boot_argn("zrecs", &log_records
, sizeof(log_records
)) == TRUE
) {
1467 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
1468 * This prevents accidentally hogging too much kernel memory and making the system
1472 log_records
= MIN(ZRECORDS_MAX
, log_records
);
1475 log_records
= ZRECORDS_DEFAULT
;
1479 simple_lock_init(&all_zones_lock
, 0);
1481 first_zone
= ZONE_NULL
;
1482 last_zone
= &first_zone
;
1485 /* assertion: nobody else called zinit before us */
1486 assert(zone_zone
== ZONE_NULL
);
1487 zone_zone
= zinit(sizeof(struct zone
), 128 * sizeof(struct zone
),
1488 sizeof(struct zone
), "zones");
1489 zone_change(zone_zone
, Z_COLLECT
, FALSE
);
1490 zone_change(zone_zone
, Z_CALLERACCT
, FALSE
);
1491 zone_change(zone_zone
, Z_NOENCRYPT
, TRUE
);
1493 zcram(zone_zone
, zdata
, zdata_size
);
1495 /* initialize fake zones and zone info if tracking by task */
1496 if (zinfo_per_task
) {
1497 vm_size_t zisize
= sizeof(zinfo_usage_store_t
) * ZINFO_SLOTS
;
1500 for (i
= 0; i
< num_fake_zones
; i
++)
1501 fake_zones
[i
].init(ZINFO_SLOTS
- num_fake_zones
+ i
);
1502 zinfo_zone
= zinit(zisize
, zisize
* CONFIG_TASK_MAX
,
1503 zisize
, "per task zinfo");
1504 zone_change(zinfo_zone
, Z_CALLERACCT
, FALSE
);
1509 zinfo_task_init(task_t task
)
1511 if (zinfo_per_task
) {
1512 task
->tkm_zinfo
= zalloc(zinfo_zone
);
1513 memset(task
->tkm_zinfo
, 0, sizeof(zinfo_usage_store_t
) * ZINFO_SLOTS
);
1515 task
->tkm_zinfo
= NULL
;
1520 zinfo_task_free(task_t task
)
1522 assert(task
!= kernel_task
);
1523 if (task
->tkm_zinfo
!= NULL
) {
1524 zfree(zinfo_zone
, task
->tkm_zinfo
);
1525 task
->tkm_zinfo
= NULL
;
1531 vm_size_t max_zonemap_size
)
1533 kern_return_t retval
;
1534 vm_offset_t zone_min
;
1535 vm_offset_t zone_max
;
1537 retval
= kmem_suballoc(kernel_map
, &zone_min
, max_zonemap_size
,
1538 FALSE
, VM_FLAGS_ANYWHERE
| VM_FLAGS_PERMANENT
,
1541 if (retval
!= KERN_SUCCESS
)
1542 panic("zone_init: kmem_suballoc failed");
1543 zone_max
= zone_min
+ round_page(max_zonemap_size
);
1545 gzalloc_init(max_zonemap_size
);
1548 * Setup garbage collection information:
1550 zone_map_min_address
= zone_min
;
1551 zone_map_max_address
= zone_max
;
1553 zone_pages
= (unsigned int)atop_kernel(zone_max
- zone_min
);
1554 zone_page_table_used_size
= sizeof(zone_page_table
);
1556 zone_page_table_second_level_size
= 1;
1557 zone_page_table_second_level_shift_amount
= 0;
1560 * Find the power of 2 for the second level that allows
1561 * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
1564 while ((zone_page_table_first_level_slot(zone_pages
-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
) {
1565 zone_page_table_second_level_size
<<= 1;
1566 zone_page_table_second_level_shift_amount
++;
1569 lck_grp_attr_setdefault(&zone_lck_grp_attr
);
1570 lck_grp_init(&zone_lck_grp
, "zones", &zone_lck_grp_attr
);
1571 lck_attr_setdefault(&zone_lck_attr
);
1572 lck_mtx_init_ext(&zone_gc_lock
, &zone_lck_ext
, &zone_lck_grp
, &zone_lck_attr
);
1576 * Initialize the zone leak monitor
1578 zleak_init(max_zonemap_size
);
1579 #endif /* CONFIG_ZLEAKS */
1583 zone_page_table_expand(zone_page_index_t pindex
)
1585 unsigned int first_index
;
1586 struct zone_page_table_entry
* volatile * first_level_ptr
;
1588 assert(pindex
< zone_pages
);
1590 first_index
= zone_page_table_first_level_slot(pindex
);
1591 first_level_ptr
= &zone_page_table
[first_index
];
1593 if (*first_level_ptr
== NULL
) {
1595 * We were able to verify the old first-level slot
1596 * had NULL, so attempt to populate it.
1599 vm_offset_t second_level_array
= 0;
1600 vm_size_t second_level_size
= round_page(zone_page_table_second_level_size
* sizeof(struct zone_page_table_entry
));
1601 zone_page_index_t i
;
1602 struct zone_page_table_entry
*entry_array
;
1604 if (kmem_alloc_kobject(zone_map
, &second_level_array
,
1605 second_level_size
) != KERN_SUCCESS
) {
1606 panic("zone_page_table_expand");
1610 * zone_gc() may scan the "zone_page_table" directly,
1611 * so make sure any slots have a valid unused state.
1613 entry_array
= (struct zone_page_table_entry
*)second_level_array
;
1614 for (i
=0; i
< zone_page_table_second_level_size
; i
++) {
1615 entry_array
[i
].alloc_count
= ZONE_PAGE_UNUSED
;
1616 entry_array
[i
].collect_count
= 0;
1619 if (OSCompareAndSwapPtr(NULL
, entry_array
, first_level_ptr
)) {
1620 /* Old slot was NULL, replaced with expanded level */
1621 OSAddAtomicLong(second_level_size
, &zone_page_table_used_size
);
1623 /* Old slot was not NULL, someone else expanded first */
1624 kmem_free(zone_map
, second_level_array
, second_level_size
);
1627 /* Old slot was not NULL, already been expanded */
1631 struct zone_page_table_entry
*
1632 zone_page_table_lookup(zone_page_index_t pindex
)
1634 unsigned int first_index
= zone_page_table_first_level_slot(pindex
);
1635 struct zone_page_table_entry
*second_level
= zone_page_table
[first_index
];
1638 return &second_level
[zone_page_table_second_level_slot(pindex
)];
1644 extern volatile SInt32 kfree_nop_count
;
1647 #pragma mark zalloc_canblock
1650 * zalloc returns an element from the specified zone.
1654 register zone_t zone
,
1657 vm_offset_t addr
= 0;
1658 kern_return_t retval
;
1659 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used in zone leak logging and zone leak detection */
1662 boolean_t zone_replenish_wakeup
= FALSE
;
1663 boolean_t did_gzalloc
;
1665 did_gzalloc
= FALSE
;
1667 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
1668 #endif /* CONFIG_ZLEAKS */
1670 assert(zone
!= ZONE_NULL
);
1673 addr
= gzalloc_alloc(zone
, canblock
);
1674 did_gzalloc
= (addr
!= 0);
1680 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
1683 if (DO_LOGGING(zone
))
1684 numsaved
= OSBacktrace((void*) zbt
, MAX_ZTRACE_DEPTH
);
1688 * Zone leak detection: capture a backtrace every zleak_sample_factor
1689 * allocations in this zone.
1691 if (zone
->zleak_on
&& (zone
->zleak_capture
++ % zleak_sample_factor
== 0)) {
1692 zone
->zleak_capture
= 1;
1694 /* Avoid backtracing twice if zone logging is on */
1696 zleak_tracedepth
= fastbacktrace(zbt
, MAX_ZTRACE_DEPTH
);
1698 zleak_tracedepth
= numsaved
;
1700 #endif /* CONFIG_ZLEAKS */
1702 if (__probable(addr
== 0))
1703 alloc_from_zone(zone
, (void **) &addr
);
1705 if (zone
->async_prio_refill
&&
1706 ((zone
->cur_size
- (zone
->count
* zone
->elem_size
)) <
1707 (zone
->prio_refill_watermark
* zone
->elem_size
))) {
1708 zone_replenish_wakeup
= TRUE
;
1709 zone_replenish_wakeups_initiated
++;
1712 while ((addr
== 0) && canblock
) {
1714 * If nothing was there, try to get more
1716 if (zone
->doing_alloc
) {
1718 * Someone is allocating memory for this zone.
1719 * Wait for it to show up, then try again.
1721 zone
->waiting
= TRUE
;
1723 } else if (zone
->doing_gc
) {
1724 /* zone_gc() is running. Since we need an element
1725 * from the free list that is currently being
1726 * collected, set the waiting bit and try to
1727 * interrupt the GC process, and try again
1728 * when we obtain the lock.
1730 zone
->waiting
= TRUE
;
1734 vm_size_t alloc_size
;
1737 if ((zone
->cur_size
+ zone
->elem_size
) >
1739 if (zone
->exhaustible
)
1741 if (zone
->expandable
) {
1743 * We're willing to overflow certain
1744 * zones, but not without complaining.
1746 * This is best used in conjunction
1747 * with the collectable flag. What we
1748 * want is an assurance we can get the
1749 * memory back, assuming there's no
1752 zone
->max_size
+= (zone
->max_size
>> 1);
1756 panic_include_zprint
= TRUE
;
1758 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
1759 panic_include_ztrace
= TRUE
;
1760 #endif /* CONFIG_ZLEAKS */
1761 panic("zalloc: zone \"%s\" empty.", zone
->zone_name
);
1764 zone
->doing_alloc
= TRUE
;
1768 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
1770 if (vm_pool_low() || retry
>= 1)
1772 round_page(zone
->elem_size
);
1774 alloc_size
= zone
->alloc_size
;
1776 if (zone
->noencrypt
)
1777 zflags
|= KMA_NOENCRYPT
;
1779 retval
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
);
1780 if (retval
== KERN_SUCCESS
) {
1782 if (alloc_size
== PAGE_SIZE
)
1783 space
= zone_alias_addr(space
);
1787 if ((zleak_state
& (ZLEAK_STATE_ENABLED
| ZLEAK_STATE_ACTIVE
)) == ZLEAK_STATE_ENABLED
) {
1788 if (zone_map
->size
>= zleak_global_tracking_threshold
) {
1791 kr
= zleak_activate();
1792 if (kr
!= KERN_SUCCESS
) {
1793 printf("Failed to activate live zone leak debugging (%d).\n", kr
);
1798 if ((zleak_state
& ZLEAK_STATE_ACTIVE
) && !(zone
->zleak_on
)) {
1799 if (zone
->cur_size
> zleak_per_zone_tracking_threshold
) {
1800 zone
->zleak_on
= TRUE
;
1803 #endif /* CONFIG_ZLEAKS */
1805 zcram(zone
, space
, alloc_size
);
1808 } else if (retval
!= KERN_RESOURCE_SHORTAGE
) {
1813 printf("zalloc did gc\n");
1814 zone_display_zprint();
1817 panic_include_zprint
= TRUE
;
1819 if ((zleak_state
& ZLEAK_STATE_ACTIVE
)) {
1820 panic_include_ztrace
= TRUE
;
1822 #endif /* CONFIG_ZLEAKS */
1823 /* TODO: Change this to something more descriptive, perhaps
1824 * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
1826 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone
->zone_name
, zone
->count
, retval
, (int)kfree_nop_count
);
1833 zone
->doing_alloc
= FALSE
;
1834 if (zone
->waiting
) {
1835 zone
->waiting
= FALSE
;
1838 alloc_from_zone(zone
, (void **) &addr
);
1840 retval
== KERN_RESOURCE_SHORTAGE
) {
1848 alloc_from_zone(zone
, (void **) &addr
);
1852 /* Zone leak detection:
1853 * If we're sampling this allocation, add it to the zleaks hash table.
1855 if (addr
&& zleak_tracedepth
> 0) {
1856 /* Sampling can fail if another sample is happening at the same time in a different zone. */
1857 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
1858 /* If it failed, roll back the counter so we sample the next allocation instead. */
1859 zone
->zleak_capture
= zleak_sample_factor
;
1862 #endif /* CONFIG_ZLEAKS */
1866 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
1867 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
1868 * since it protects the various log related data structures.
1871 if (DO_LOGGING(zone
) && addr
) {
1874 * Look for a place to record this new allocation. We implement two different logging strategies
1875 * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
1876 * for a leak, we want to log as many allocations as possible in order to clearly identify the leaker
1877 * among all the records. So we look for an unused slot in the log and fill that in before overwriting
1878 * an old entry. When looking for a corruption however, it's better to have a chronological log of all
1879 * the allocations and frees done in the zone so that the history of operations for a specific zone
1880 * element can be inspected. So in this case, we treat the log as a circular buffer and overwrite the
1881 * oldest entry whenever a new one needs to be added.
1883 * The corruption_debug_flag flag tells us what style of logging to do. It's set if we're supposed to be
1884 * doing corruption style logging (indicated via -zc in the boot-args).
1887 if (!corruption_debug_flag
&& zrecords
[zcurrent
].z_element
&& zrecorded
< log_records
) {
1890 * If we get here, we're doing leak style logging and there's still some unused entries in
1891 * the log (since zrecorded is smaller than the size of the log). Look for an unused slot
1892 * starting at zcurrent and wrap-around if we reach the end of the buffer. If the buffer
1893 * is already full, we just fall through and overwrite the element indexed by zcurrent.
1896 for (i
= zcurrent
; i
< log_records
; i
++) {
1897 if (zrecords
[i
].z_element
== NULL
) {
1903 for (i
= 0; i
< zcurrent
; i
++) {
1904 if (zrecords
[i
].z_element
== NULL
) {
1912 * Save a record of this allocation
1916 if (zrecords
[zcurrent
].z_element
== NULL
)
1919 zrecords
[zcurrent
].z_element
= (void *)addr
;
1920 zrecords
[zcurrent
].z_time
= ztime
++;
1921 zrecords
[zcurrent
].z_opcode
= ZOP_ALLOC
;
1923 for (i
= 0; i
< numsaved
; i
++)
1924 zrecords
[zcurrent
].z_pc
[i
] = (void*) zbt
[i
];
1926 for (; i
< MAX_ZTRACE_DEPTH
; i
++)
1927 zrecords
[zcurrent
].z_pc
[i
] = 0;
1931 if (zcurrent
>= log_records
)
1935 if ((addr
== 0) && !canblock
&& (zone
->async_pending
== FALSE
) && (zone
->no_callout
== FALSE
) && (zone
->exhaustible
== FALSE
) && (!vm_pool_low())) {
1936 zone
->async_pending
= TRUE
;
1938 thread_call_enter(&zone
->call_async_alloc
);
1940 alloc_from_zone(zone
, (void **) &addr
);
1944 if (!did_gzalloc
&& addr
&& zone_debug_enabled(zone
)) {
1945 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
1946 addr
+= ZONE_DEBUG_OFFSET
;
1954 #endif /* CONFIG_ZLEAKS */
1958 if (zone_replenish_wakeup
)
1959 thread_wakeup(&zone
->zone_replenish_thread
);
1961 TRACE_MACHLEAKS(ZALLOC_CODE
, ZALLOC_CODE_2
, zone
->elem_size
, addr
);
1964 thread_t thr
= current_thread();
1966 zinfo_usage_t zinfo
;
1967 vm_size_t sz
= zone
->elem_size
;
1969 if (zone
->caller_acct
)
1970 ledger_credit(thr
->t_ledger
, task_ledgers
.tkm_private
, sz
);
1972 ledger_credit(thr
->t_ledger
, task_ledgers
.tkm_shared
, sz
);
1974 if ((task
= thr
->task
) != NULL
&& (zinfo
= task
->tkm_zinfo
) != NULL
)
1975 OSAddAtomic64(sz
, (int64_t *)&zinfo
[zone
->index
].alloc
);
1977 return((void *)addr
);
1983 register zone_t zone
)
1985 return( zalloc_canblock(zone
, TRUE
) );
1990 register zone_t zone
)
1992 return( zalloc_canblock(zone
, FALSE
) );
1997 thread_call_param_t p0
,
1998 __unused thread_call_param_t p1
)
2002 elt
= zalloc_canblock((zone_t
)p0
, TRUE
);
2003 zfree((zone_t
)p0
, elt
);
2004 lock_zone(((zone_t
)p0
));
2005 ((zone_t
)p0
)->async_pending
= FALSE
;
2006 unlock_zone(((zone_t
)p0
));
2010 * zget returns an element from the specified zone
2011 * and immediately returns nothing if there is nothing there.
2013 * This form should be used when you can not block (like when
2014 * processing an interrupt).
2016 * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
2017 * friend vm_page_more_fictitious can block, so it doesn't seem like
2018 * this is used for interrupts any more....
2022 register zone_t zone
)
2027 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used for zone leak detection */
2028 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
2029 #endif /* CONFIG_ZLEAKS */
2031 assert( zone
!= ZONE_NULL
);
2033 if (!lock_try_zone(zone
))
2038 * Zone leak detection: capture a backtrace
2040 if (zone
->zleak_on
&& (zone
->zleak_capture
++ % zleak_sample_factor
== 0)) {
2041 zone
->zleak_capture
= 1;
2042 zleak_tracedepth
= fastbacktrace(zbt
, MAX_ZTRACE_DEPTH
);
2044 #endif /* CONFIG_ZLEAKS */
2046 alloc_from_zone(zone
, (void **) &addr
);
2048 if (addr
&& zone_debug_enabled(zone
)) {
2049 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
2050 addr
+= ZONE_DEBUG_OFFSET
;
2052 #endif /* ZONE_DEBUG */
2056 * Zone leak detection: record the allocation
2058 if (zone
->zleak_on
&& zleak_tracedepth
> 0 && addr
) {
2059 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2060 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
2061 /* If it failed, roll back the counter so we sample the next allocation instead. */
2062 zone
->zleak_capture
= zleak_sample_factor
;
2069 #endif /* CONFIG_ZLEAKS */
2073 return((void *) addr
);
2076 /* Keep this FALSE by default. Large memory machine run orders of magnitude
2077 slower in debug mode when true. Use debugger to enable if needed */
2078 /* static */ boolean_t zone_check
= FALSE
;
2080 static zone_t zone_last_bogus_zone
= ZONE_NULL
;
2081 static vm_offset_t zone_last_bogus_elem
= 0;
2085 register zone_t zone
,
2088 vm_offset_t elem
= (vm_offset_t
) addr
;
2089 void *zbt
[MAX_ZTRACE_DEPTH
]; /* only used if zone logging is enabled via boot-args */
2091 boolean_t gzfreed
= FALSE
;
2093 assert(zone
!= ZONE_NULL
);
2096 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2099 if (DO_LOGGING(zone
))
2100 numsaved
= OSBacktrace(&zbt
[0], MAX_ZTRACE_DEPTH
);
2103 /* Basic sanity checks */
2104 if (zone
== ZONE_NULL
|| elem
== (vm_offset_t
)0)
2105 panic("zfree: NULL");
2106 /* zone_gc assumes zones are never freed */
2107 if (zone
== zone_zone
)
2108 panic("zfree: freeing to zone_zone breaks zone_gc!");
2112 gzfreed
= gzalloc_free(zone
, addr
);
2115 TRACE_MACHLEAKS(ZFREE_CODE
, ZFREE_CODE_2
, zone
->elem_size
, (uintptr_t)addr
);
2117 if (__improbable(!gzfreed
&& zone
->collectable
&& !zone
->allows_foreign
&&
2118 !from_zone_map(elem
, zone
->elem_size
))) {
2120 panic("zfree: non-allocated memory in collectable zone!");
2122 zone_last_bogus_zone
= zone
;
2123 zone_last_bogus_elem
= elem
;
2130 * See if we're doing logging on this zone. There are two styles of logging used depending on
2131 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
2134 if (DO_LOGGING(zone
)) {
2137 if (corruption_debug_flag
) {
2140 * We're logging to catch a corruption. Add a record of this zfree operation
2144 if (zrecords
[zcurrent
].z_element
== NULL
)
2147 zrecords
[zcurrent
].z_element
= (void *)addr
;
2148 zrecords
[zcurrent
].z_time
= ztime
++;
2149 zrecords
[zcurrent
].z_opcode
= ZOP_FREE
;
2151 for (i
= 0; i
< numsaved
; i
++)
2152 zrecords
[zcurrent
].z_pc
[i
] = zbt
[i
];
2154 for (; i
< MAX_ZTRACE_DEPTH
; i
++)
2155 zrecords
[zcurrent
].z_pc
[i
] = 0;
2159 if (zcurrent
>= log_records
)
2165 * We're logging to catch a leak. Remove any record we might have for this
2166 * element since it's being freed. Note that we may not find it if the buffer
2167 * overflowed and that's OK. Since the log is of a limited size, old records
2168 * get overwritten if there are more zallocs than zfrees.
2171 for (i
= 0; i
< log_records
; i
++) {
2172 if (zrecords
[i
].z_element
== addr
) {
2173 zrecords
[i
].z_element
= NULL
;
2184 if (!gzfreed
&& zone_debug_enabled(zone
)) {
2187 elem
-= ZONE_DEBUG_OFFSET
;
2189 /* check the zone's consistency */
2191 for (tmp_elem
= queue_first(&zone
->active_zones
);
2192 !queue_end(tmp_elem
, &zone
->active_zones
);
2193 tmp_elem
= queue_next(tmp_elem
))
2194 if (elem
== (vm_offset_t
)tmp_elem
)
2196 if (elem
!= (vm_offset_t
)tmp_elem
)
2197 panic("zfree()ing element from wrong zone");
2199 remqueue((queue_t
) elem
);
2201 #endif /* ZONE_DEBUG */
2205 /* check the zone's consistency */
2207 for (this = zone
->free_elements
;
2209 this = * (vm_offset_t
*) this)
2210 if (!pmap_kernel_va(this) || this == elem
)
2214 if (__probable(!gzfreed
))
2215 free_to_zone(zone
, (void *) elem
);
2218 if (zone
->count
< 0)
2219 panic("zfree: count < 0!");
2227 * Zone leak detection: un-track the allocation
2229 if (zone
->zleak_on
) {
2230 zleak_free(elem
, zone
->elem_size
);
2232 #endif /* CONFIG_ZLEAKS */
2235 * If elements have one or more pages, and memory is low,
2236 * request to run the garbage collection in the zone the next
2237 * time the pageout thread runs.
2239 if (zone
->elem_size
>= PAGE_SIZE
&&
2241 zone_gc_forced
= TRUE
;
2246 thread_t thr
= current_thread();
2248 zinfo_usage_t zinfo
;
2249 vm_size_t sz
= zone
->elem_size
;
2251 if (zone
->caller_acct
)
2252 ledger_debit(thr
->t_ledger
, task_ledgers
.tkm_private
, sz
);
2254 ledger_debit(thr
->t_ledger
, task_ledgers
.tkm_shared
, sz
);
2256 if ((task
= thr
->task
) != NULL
&& (zinfo
= task
->tkm_zinfo
) != NULL
)
2257 OSAddAtomic64(sz
, (int64_t *)&zinfo
[zone
->index
].free
);
2262 /* Change a zone's flags.
2263 * This routine must be called immediately after zinit.
2271 assert( zone
!= ZONE_NULL
);
2272 assert( value
== TRUE
|| value
== FALSE
);
2276 zone
->noencrypt
= value
;
2279 zone
->exhaustible
= value
;
2282 zone
->collectable
= value
;
2285 zone
->expandable
= value
;
2288 zone
->allows_foreign
= value
;
2291 zone
->caller_acct
= value
;
2294 zone
->no_callout
= value
;
2296 case Z_GZALLOC_EXEMPT
:
2297 zone
->gzalloc_exempt
= value
;
2299 gzalloc_reconfigure(zone
);
2302 case Z_ALIGNMENT_REQUIRED
:
2303 zone
->alignment_required
= value
;
2305 zone_debug_disable(zone
);
2308 gzalloc_reconfigure(zone
);
2312 panic("Zone_change: Wrong Item Type!");
2318 * Return the expected number of free elements in the zone.
2319 * This calculation will be incorrect if items are zfree'd that
2320 * were never zalloc'd/zget'd. The correct way to stuff memory
2321 * into a zone is by zcram.
2325 zone_free_count(zone_t zone
)
2327 integer_t free_count
;
2330 free_count
= (integer_t
)(zone
->cur_size
/zone
->elem_size
- zone
->count
);
2333 assert(free_count
>= 0);
2339 * Zone garbage collection subroutines
2343 zone_page_collectable(
2347 struct zone_page_table_entry
*zp
;
2348 zone_page_index_t i
, j
;
2351 addr
= zone_virtual_addr(addr
);
2354 if (!from_zone_map(addr
, size
))
2355 panic("zone_page_collectable");
2358 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2359 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2361 for (; i
<= j
; i
++) {
2362 zp
= zone_page_table_lookup(i
);
2363 if (zp
->collect_count
== zp
->alloc_count
)
2375 struct zone_page_table_entry
*zp
;
2376 zone_page_index_t i
, j
;
2379 addr
= zone_virtual_addr(addr
);
2382 if (!from_zone_map(addr
, size
))
2383 panic("zone_page_keep");
2386 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2387 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2389 for (; i
<= j
; i
++) {
2390 zp
= zone_page_table_lookup(i
);
2391 zp
->collect_count
= 0;
2400 struct zone_page_table_entry
*zp
;
2401 zone_page_index_t i
, j
;
2404 addr
= zone_virtual_addr(addr
);
2407 if (!from_zone_map(addr
, size
))
2408 panic("zone_page_collect");
2411 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2412 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2414 for (; i
<= j
; i
++) {
2415 zp
= zone_page_table_lookup(i
);
2416 ++zp
->collect_count
;
2425 struct zone_page_table_entry
*zp
;
2426 zone_page_index_t i
, j
;
2429 addr
= zone_virtual_addr(addr
);
2432 if (!from_zone_map(addr
, size
))
2433 panic("zone_page_init");
2436 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2437 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2439 for (; i
<= j
; i
++) {
2440 /* make sure entry exists before marking unused */
2441 zone_page_table_expand(i
);
2443 zp
= zone_page_table_lookup(i
);
2445 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
2446 zp
->collect_count
= 0;
2455 struct zone_page_table_entry
*zp
;
2456 zone_page_index_t i
, j
;
2459 addr
= zone_virtual_addr(addr
);
2462 if (!from_zone_map(addr
, size
))
2463 panic("zone_page_alloc");
2466 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2467 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2469 for (; i
<= j
; i
++) {
2470 zp
= zone_page_table_lookup(i
);
2474 * Set alloc_count to ZONE_PAGE_USED if
2475 * it was previously set to ZONE_PAGE_UNUSED.
2477 if (zp
->alloc_count
== ZONE_PAGE_UNUSED
)
2478 zp
->alloc_count
= ZONE_PAGE_USED
;
2485 zone_page_free_element(
2486 zone_page_index_t
*free_page_head
,
2487 zone_page_index_t
*free_page_tail
,
2491 struct zone_page_table_entry
*zp
;
2492 zone_page_index_t i
, j
;
2495 addr
= zone_virtual_addr(addr
);
2498 if (!from_zone_map(addr
, size
))
2499 panic("zone_page_free_element");
2502 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2503 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2505 for (; i
<= j
; i
++) {
2506 zp
= zone_page_table_lookup(i
);
2508 if (zp
->collect_count
> 0)
2509 --zp
->collect_count
;
2510 if (--zp
->alloc_count
== 0) {
2511 vm_address_t free_page_address
;
2512 vm_address_t prev_free_page_address
;
2514 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
2515 zp
->collect_count
= 0;
2519 * This element was the last one on this page, re-use the page's
2520 * storage for a page freelist
2522 free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)i
);
2523 *(zone_page_index_t
*)free_page_address
= ZONE_PAGE_INDEX_INVALID
;
2525 if (*free_page_head
== ZONE_PAGE_INDEX_INVALID
) {
2526 *free_page_head
= i
;
2527 *free_page_tail
= i
;
2529 prev_free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)(*free_page_tail
));
2530 *(zone_page_index_t
*)prev_free_page_address
= i
;
2531 *free_page_tail
= i
;
2538 /* This is used for walking through a zone's free element list.
2540 struct zone_free_element
{
2541 struct zone_free_element
* next
;
2545 * Add a linked list of pages starting at base back into the zone
2546 * free list. Tail points to the last element on the list.
2548 #define ADD_LIST_TO_ZONE(zone, base, tail) \
2550 (tail)->next = (void *)((zone)->free_elements); \
2551 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
2552 ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2553 (zone)->free_elements; \
2555 (zone)->free_elements = (unsigned long)(base); \
2559 * Add an element to the chain pointed to by prev.
2561 #define ADD_ELEMENT(zone, prev, elem) \
2563 (prev)->next = (elem); \
2564 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
2565 ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2566 (vm_offset_t)(elem); \
2573 uint32_t elems_collected
,
2578 /* Zone garbage collection
2580 * zone_gc will walk through all the free elements in all the
2581 * zones that are marked collectable looking for reclaimable
2582 * pages. zone_gc is called by consider_zone_gc when the system
2583 * begins to run out of memory.
2586 zone_gc(boolean_t all_zones
)
2588 unsigned int max_zones
;
2591 zone_page_index_t zone_free_page_head
;
2592 zone_page_index_t zone_free_page_tail
;
2593 thread_t mythread
= current_thread();
2595 lck_mtx_lock(&zone_gc_lock
);
2597 simple_lock(&all_zones_lock
);
2598 max_zones
= num_zones
;
2600 simple_unlock(&all_zones_lock
);
2604 * it's ok to allow eager kernel preemption while
2605 * while holding a zone lock since it's taken
2606 * as a spin lock (which prevents preemption)
2608 thread_set_eager_preempt(mythread
);
2611 for (i
= 0; i
< zone_pages
; i
++) {
2612 struct zone_page_table_entry
*zp
;
2614 zp
= zone_page_table_lookup(i
);
2615 assert(!zp
|| (zp
->collect_count
== 0));
2617 #endif /* MACH_ASSERT */
2619 for (i
= 0; i
< max_zones
; i
++, z
= z
->next_zone
) {
2621 vm_size_t elt_size
, size_freed
;
2622 struct zone_free_element
*elt
, *base_elt
, *base_prev
, *prev
, *scan
, *keep
, *tail
;
2625 assert(z
!= ZONE_NULL
);
2627 if (!z
->collectable
)
2630 if (all_zones
== FALSE
&& z
->elem_size
< PAGE_SIZE
)
2635 elt_size
= z
->elem_size
;
2638 * Do a quick feasibility check before we scan the zone:
2639 * skip unless there is likelihood of getting pages back
2640 * (i.e we need a whole allocation block's worth of free
2641 * elements before we can garbage collect) and
2642 * the zone has more than 10 percent of it's elements free
2643 * or the element size is a multiple of the PAGE_SIZE
2645 if ((elt_size
& PAGE_MASK
) &&
2646 (((z
->cur_size
- z
->count
* elt_size
) <= (2 * z
->alloc_size
)) ||
2647 ((z
->cur_size
- z
->count
* elt_size
) <= (z
->cur_size
/ 10)))) {
2655 * Snatch all of the free elements away from the zone.
2658 scan
= (void *)z
->free_elements
;
2659 z
->free_elements
= 0;
2666 * Determine which elements we can attempt to collect
2667 * and count them up in the page table. Foreign elements
2668 * are returned to the zone.
2671 prev
= (void *)&scan
;
2673 n
= 0; tail
= keep
= NULL
;
2675 zone_free_page_head
= ZONE_PAGE_INDEX_INVALID
;
2676 zone_free_page_tail
= ZONE_PAGE_INDEX_INVALID
;
2679 while (elt
!= NULL
) {
2680 if (from_zone_map(elt
, elt_size
)) {
2681 zone_page_collect((vm_offset_t
)elt
, elt_size
);
2686 ++zgc_stats
.elems_collected
;
2692 ADD_ELEMENT(z
, tail
, elt
);
2696 ADD_ELEMENT(z
, prev
, elt
->next
);
2698 ADD_ELEMENT(z
, tail
, NULL
);
2702 * Dribble back the elements we are keeping.
2706 if (z
->waiting
== TRUE
) {
2707 /* z->waiting checked without lock held, rechecked below after locking */
2711 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2717 while ((elt
!= NULL
) && (++m
< 50)) {
2722 ADD_LIST_TO_ZONE(z
, base_elt
, prev
);
2723 ADD_ELEMENT(z
, base_prev
, elt
);
2740 * Return any remaining elements.
2746 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2759 * Determine which pages we can reclaim and
2760 * free those elements.
2765 n
= 0; tail
= keep
= NULL
;
2767 while (elt
!= NULL
) {
2768 if (zone_page_collectable((vm_offset_t
)elt
, elt_size
)) {
2769 struct zone_free_element
*next_elt
= elt
->next
;
2771 size_freed
+= elt_size
;
2774 * If this is the last allocation on the page(s),
2775 * we may use their storage to maintain the linked
2776 * list of free-able pages. So store elt->next because
2777 * "elt" may be scribbled over.
2779 zone_page_free_element(&zone_free_page_head
, &zone_free_page_tail
, (vm_offset_t
)elt
, elt_size
);
2783 ++zgc_stats
.elems_freed
;
2786 zone_page_keep((vm_offset_t
)elt
, elt_size
);
2791 ADD_ELEMENT(z
, tail
, elt
);
2796 ADD_ELEMENT(z
, tail
, NULL
);
2798 ++zgc_stats
.elems_kept
;
2802 * Dribble back the elements we are keeping,
2803 * and update the zone size info.
2809 z
->cur_size
-= size_freed
;
2813 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2823 n
= 0; tail
= keep
= NULL
;
2828 * Return any remaining elements, and update
2829 * the zone size info.
2834 if (size_freed
> 0 || keep
!= NULL
) {
2836 z
->cur_size
-= size_freed
;
2839 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2844 z
->doing_gc
= FALSE
;
2852 if (zone_free_page_head
== ZONE_PAGE_INDEX_INVALID
)
2856 * we don't want to allow eager kernel preemption while holding the
2857 * various locks taken in the kmem_free path of execution
2859 thread_clear_eager_preempt(mythread
);
2862 * Reclaim the pages we are freeing.
2864 while (zone_free_page_head
!= ZONE_PAGE_INDEX_INVALID
) {
2865 zone_page_index_t zind
= zone_free_page_head
;
2866 vm_address_t free_page_address
;
2870 * Use the first word of the page about to be freed to find the next free page
2872 free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)zind
);
2873 zone_free_page_head
= *(zone_page_index_t
*)free_page_address
;
2877 while (zone_free_page_head
!= ZONE_PAGE_INDEX_INVALID
) {
2878 zone_page_index_t next_zind
= zone_free_page_head
;
2879 vm_address_t next_free_page_address
;
2881 next_free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)next_zind
);
2883 if (next_free_page_address
== (free_page_address
- PAGE_SIZE
)) {
2884 free_page_address
= next_free_page_address
;
2885 } else if (next_free_page_address
!= (free_page_address
+ (PAGE_SIZE
* page_count
)))
2888 zone_free_page_head
= *(zone_page_index_t
*)next_free_page_address
;
2891 kmem_free(zone_map
, free_page_address
, page_count
* PAGE_SIZE
);
2893 zgc_stats
.pgs_freed
+= page_count
;
2895 if (++kmem_frees
== 32) {
2896 thread_yield_internal(1);
2900 thread_set_eager_preempt(mythread
);
2902 thread_clear_eager_preempt(mythread
);
2904 lck_mtx_unlock(&zone_gc_lock
);
2908 extern vm_offset_t kmapoff_kaddr
;
2909 extern unsigned int kmapoff_pgcnt
;
2914 * Called by the pageout daemon when the system needs more free pages.
2918 consider_zone_gc(boolean_t force
)
2920 boolean_t all_zones
= FALSE
;
2922 if (kmapoff_kaddr
!= 0) {
2924 * One-time reclaim of kernel_map resources we allocated in
2927 (void) vm_deallocate(kernel_map
,
2928 kmapoff_kaddr
, kmapoff_pgcnt
* PAGE_SIZE_64
);
2932 if (zone_gc_allowed
&&
2933 (zone_gc_allowed_by_time_throttle
||
2936 if (zone_gc_allowed_by_time_throttle
== TRUE
) {
2937 zone_gc_allowed_by_time_throttle
= FALSE
;
2940 zone_gc_forced
= FALSE
;
2947 * By default, don't attempt zone GC more frequently
2948 * than once / 1 minutes.
2951 compute_zone_gc_throttle(void *arg __unused
)
2953 zone_gc_allowed_by_time_throttle
= TRUE
;
2957 #if CONFIG_TASK_ZONE_INFO
2962 mach_zone_name_array_t
*namesp
,
2963 mach_msg_type_number_t
*namesCntp
,
2964 task_zone_info_array_t
*infop
,
2965 mach_msg_type_number_t
*infoCntp
)
2967 mach_zone_name_t
*names
;
2968 vm_offset_t names_addr
;
2969 vm_size_t names_size
;
2970 task_zone_info_t
*info
;
2971 vm_offset_t info_addr
;
2972 vm_size_t info_size
;
2973 unsigned int max_zones
, i
;
2975 mach_zone_name_t
*zn
;
2976 task_zone_info_t
*zi
;
2983 if (task
== TASK_NULL
)
2984 return KERN_INVALID_TASK
;
2987 * We assume that zones aren't freed once allocated.
2988 * We won't pick up any zones that are allocated later.
2991 simple_lock(&all_zones_lock
);
2992 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
2994 simple_unlock(&all_zones_lock
);
2996 names_size
= round_page(max_zones
* sizeof *names
);
2997 kr
= kmem_alloc_pageable(ipc_kernel_map
,
2998 &names_addr
, names_size
);
2999 if (kr
!= KERN_SUCCESS
)
3001 names
= (mach_zone_name_t
*) names_addr
;
3003 info_size
= round_page(max_zones
* sizeof *info
);
3004 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3005 &info_addr
, info_size
);
3006 if (kr
!= KERN_SUCCESS
) {
3007 kmem_free(ipc_kernel_map
,
3008 names_addr
, names_size
);
3012 info
= (task_zone_info_t
*) info_addr
;
3017 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
3020 assert(z
!= ZONE_NULL
);
3026 simple_lock(&all_zones_lock
);
3028 simple_unlock(&all_zones_lock
);
3030 /* assuming here the name data is static */
3031 (void) strncpy(zn
->mzn_name
, zcopy
.zone_name
,
3032 sizeof zn
->mzn_name
);
3033 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3035 zi
->tzi_count
= (uint64_t)zcopy
.count
;
3036 zi
->tzi_cur_size
= (uint64_t)zcopy
.cur_size
;
3037 zi
->tzi_max_size
= (uint64_t)zcopy
.max_size
;
3038 zi
->tzi_elem_size
= (uint64_t)zcopy
.elem_size
;
3039 zi
->tzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
3040 zi
->tzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
3041 zi
->tzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
3042 zi
->tzi_collectable
= (uint64_t)zcopy
.collectable
;
3043 zi
->tzi_caller_acct
= (uint64_t)zcopy
.caller_acct
;
3044 if (task
->tkm_zinfo
!= NULL
) {
3045 zi
->tzi_task_alloc
= task
->tkm_zinfo
[zcopy
.index
].alloc
;
3046 zi
->tzi_task_free
= task
->tkm_zinfo
[zcopy
.index
].free
;
3048 zi
->tzi_task_alloc
= 0;
3049 zi
->tzi_task_free
= 0;
3056 * loop through the fake zones and fill them using the specialized
3059 for (i
= 0; i
< num_fake_zones
; i
++) {
3060 int count
, collectable
, exhaustible
, caller_acct
, index
;
3061 vm_size_t cur_size
, max_size
, elem_size
, alloc_size
;
3064 strncpy(zn
->mzn_name
, fake_zones
[i
].name
, sizeof zn
->mzn_name
);
3065 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3066 fake_zones
[i
].query(&count
, &cur_size
,
3067 &max_size
, &elem_size
,
3068 &alloc_size
, &sum_size
,
3069 &collectable
, &exhaustible
, &caller_acct
);
3070 zi
->tzi_count
= (uint64_t)count
;
3071 zi
->tzi_cur_size
= (uint64_t)cur_size
;
3072 zi
->tzi_max_size
= (uint64_t)max_size
;
3073 zi
->tzi_elem_size
= (uint64_t)elem_size
;
3074 zi
->tzi_alloc_size
= (uint64_t)alloc_size
;
3075 zi
->tzi_sum_size
= sum_size
;
3076 zi
->tzi_collectable
= (uint64_t)collectable
;
3077 zi
->tzi_exhaustible
= (uint64_t)exhaustible
;
3078 zi
->tzi_caller_acct
= (uint64_t)caller_acct
;
3079 if (task
->tkm_zinfo
!= NULL
) {
3080 index
= ZINFO_SLOTS
- num_fake_zones
+ i
;
3081 zi
->tzi_task_alloc
= task
->tkm_zinfo
[index
].alloc
;
3082 zi
->tzi_task_free
= task
->tkm_zinfo
[index
].free
;
3084 zi
->tzi_task_alloc
= 0;
3085 zi
->tzi_task_free
= 0;
3091 used
= max_zones
* sizeof *names
;
3092 if (used
!= names_size
)
3093 bzero((char *) (names_addr
+ used
), names_size
- used
);
3095 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
3096 (vm_map_size_t
)names_size
, TRUE
, ©
);
3097 assert(kr
== KERN_SUCCESS
);
3099 *namesp
= (mach_zone_name_t
*) copy
;
3100 *namesCntp
= max_zones
;
3102 used
= max_zones
* sizeof *info
;
3104 if (used
!= info_size
)
3105 bzero((char *) (info_addr
+ used
), info_size
- used
);
3107 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
3108 (vm_map_size_t
)info_size
, TRUE
, ©
);
3109 assert(kr
== KERN_SUCCESS
);
3111 *infop
= (task_zone_info_t
*) copy
;
3112 *infoCntp
= max_zones
;
3114 return KERN_SUCCESS
;
3117 #else /* CONFIG_TASK_ZONE_INFO */
3121 __unused task_t task
,
3122 __unused mach_zone_name_array_t
*namesp
,
3123 __unused mach_msg_type_number_t
*namesCntp
,
3124 __unused task_zone_info_array_t
*infop
,
3125 __unused mach_msg_type_number_t
*infoCntp
)
3127 return KERN_FAILURE
;
3130 #endif /* CONFIG_TASK_ZONE_INFO */
3135 mach_zone_name_array_t
*namesp
,
3136 mach_msg_type_number_t
*namesCntp
,
3137 mach_zone_info_array_t
*infop
,
3138 mach_msg_type_number_t
*infoCntp
)
3140 mach_zone_name_t
*names
;
3141 vm_offset_t names_addr
;
3142 vm_size_t names_size
;
3143 mach_zone_info_t
*info
;
3144 vm_offset_t info_addr
;
3145 vm_size_t info_size
;
3146 unsigned int max_zones
, i
;
3148 mach_zone_name_t
*zn
;
3149 mach_zone_info_t
*zi
;
3156 if (host
== HOST_NULL
)
3157 return KERN_INVALID_HOST
;
3158 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
3159 if (!PE_i_can_has_debugger(NULL
))
3160 return KERN_INVALID_HOST
;
3164 * We assume that zones aren't freed once allocated.
3165 * We won't pick up any zones that are allocated later.
3168 simple_lock(&all_zones_lock
);
3169 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
3171 simple_unlock(&all_zones_lock
);
3173 names_size
= round_page(max_zones
* sizeof *names
);
3174 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3175 &names_addr
, names_size
);
3176 if (kr
!= KERN_SUCCESS
)
3178 names
= (mach_zone_name_t
*) names_addr
;
3180 info_size
= round_page(max_zones
* sizeof *info
);
3181 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3182 &info_addr
, info_size
);
3183 if (kr
!= KERN_SUCCESS
) {
3184 kmem_free(ipc_kernel_map
,
3185 names_addr
, names_size
);
3189 info
= (mach_zone_info_t
*) info_addr
;
3194 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
3197 assert(z
!= ZONE_NULL
);
3203 simple_lock(&all_zones_lock
);
3205 simple_unlock(&all_zones_lock
);
3207 /* assuming here the name data is static */
3208 (void) strncpy(zn
->mzn_name
, zcopy
.zone_name
,
3209 sizeof zn
->mzn_name
);
3210 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3212 zi
->mzi_count
= (uint64_t)zcopy
.count
;
3213 zi
->mzi_cur_size
= (uint64_t)zcopy
.cur_size
;
3214 zi
->mzi_max_size
= (uint64_t)zcopy
.max_size
;
3215 zi
->mzi_elem_size
= (uint64_t)zcopy
.elem_size
;
3216 zi
->mzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
3217 zi
->mzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
3218 zi
->mzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
3219 zi
->mzi_collectable
= (uint64_t)zcopy
.collectable
;
3225 * loop through the fake zones and fill them using the specialized
3228 for (i
= 0; i
< num_fake_zones
; i
++) {
3229 int count
, collectable
, exhaustible
, caller_acct
;
3230 vm_size_t cur_size
, max_size
, elem_size
, alloc_size
;
3233 strncpy(zn
->mzn_name
, fake_zones
[i
].name
, sizeof zn
->mzn_name
);
3234 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3235 fake_zones
[i
].query(&count
, &cur_size
,
3236 &max_size
, &elem_size
,
3237 &alloc_size
, &sum_size
,
3238 &collectable
, &exhaustible
, &caller_acct
);
3239 zi
->mzi_count
= (uint64_t)count
;
3240 zi
->mzi_cur_size
= (uint64_t)cur_size
;
3241 zi
->mzi_max_size
= (uint64_t)max_size
;
3242 zi
->mzi_elem_size
= (uint64_t)elem_size
;
3243 zi
->mzi_alloc_size
= (uint64_t)alloc_size
;
3244 zi
->mzi_sum_size
= sum_size
;
3245 zi
->mzi_collectable
= (uint64_t)collectable
;
3246 zi
->mzi_exhaustible
= (uint64_t)exhaustible
;
3252 used
= max_zones
* sizeof *names
;
3253 if (used
!= names_size
)
3254 bzero((char *) (names_addr
+ used
), names_size
- used
);
3256 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
3257 (vm_map_size_t
)names_size
, TRUE
, ©
);
3258 assert(kr
== KERN_SUCCESS
);
3260 *namesp
= (mach_zone_name_t
*) copy
;
3261 *namesCntp
= max_zones
;
3263 used
= max_zones
* sizeof *info
;
3265 if (used
!= info_size
)
3266 bzero((char *) (info_addr
+ used
), info_size
- used
);
3268 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
3269 (vm_map_size_t
)info_size
, TRUE
, ©
);
3270 assert(kr
== KERN_SUCCESS
);
3272 *infop
= (mach_zone_info_t
*) copy
;
3273 *infoCntp
= max_zones
;
3275 return KERN_SUCCESS
;
3279 * host_zone_info - LEGACY user interface for Mach zone information
3280 * Should use mach_zone_info() instead!
3285 zone_name_array_t
*namesp
,
3286 mach_msg_type_number_t
*namesCntp
,
3287 zone_info_array_t
*infop
,
3288 mach_msg_type_number_t
*infoCntp
)
3291 vm_offset_t names_addr
;
3292 vm_size_t names_size
;
3294 vm_offset_t info_addr
;
3295 vm_size_t info_size
;
3296 unsigned int max_zones
, i
;
3306 if (host
== HOST_NULL
)
3307 return KERN_INVALID_HOST
;
3308 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
3309 if (!PE_i_can_has_debugger(NULL
))
3310 return KERN_INVALID_HOST
;
3313 #if defined(__LP64__)
3314 if (!thread_is_64bit(current_thread()))
3315 return KERN_NOT_SUPPORTED
;
3317 if (thread_is_64bit(current_thread()))
3318 return KERN_NOT_SUPPORTED
;
3322 * We assume that zones aren't freed once allocated.
3323 * We won't pick up any zones that are allocated later.
3326 simple_lock(&all_zones_lock
);
3327 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
3329 simple_unlock(&all_zones_lock
);
3331 names_size
= round_page(max_zones
* sizeof *names
);
3332 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3333 &names_addr
, names_size
);
3334 if (kr
!= KERN_SUCCESS
)
3336 names
= (zone_name_t
*) names_addr
;
3338 info_size
= round_page(max_zones
* sizeof *info
);
3339 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3340 &info_addr
, info_size
);
3341 if (kr
!= KERN_SUCCESS
) {
3342 kmem_free(ipc_kernel_map
,
3343 names_addr
, names_size
);
3347 info
= (zone_info_t
*) info_addr
;
3352 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
3355 assert(z
!= ZONE_NULL
);
3361 simple_lock(&all_zones_lock
);
3363 simple_unlock(&all_zones_lock
);
3365 /* assuming here the name data is static */
3366 (void) strncpy(zn
->zn_name
, zcopy
.zone_name
,
3367 sizeof zn
->zn_name
);
3368 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
3370 zi
->zi_count
= zcopy
.count
;
3371 zi
->zi_cur_size
= zcopy
.cur_size
;
3372 zi
->zi_max_size
= zcopy
.max_size
;
3373 zi
->zi_elem_size
= zcopy
.elem_size
;
3374 zi
->zi_alloc_size
= zcopy
.alloc_size
;
3375 zi
->zi_exhaustible
= zcopy
.exhaustible
;
3376 zi
->zi_collectable
= zcopy
.collectable
;
3383 * loop through the fake zones and fill them using the specialized
3386 for (i
= 0; i
< num_fake_zones
; i
++) {
3389 strncpy(zn
->zn_name
, fake_zones
[i
].name
, sizeof zn
->zn_name
);
3390 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
3391 fake_zones
[i
].query(&zi
->zi_count
, &zi
->zi_cur_size
,
3392 &zi
->zi_max_size
, &zi
->zi_elem_size
,
3393 &zi
->zi_alloc_size
, &sum_space
,
3394 &zi
->zi_collectable
, &zi
->zi_exhaustible
, &caller_acct
);
3399 used
= max_zones
* sizeof *names
;
3400 if (used
!= names_size
)
3401 bzero((char *) (names_addr
+ used
), names_size
- used
);
3403 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
3404 (vm_map_size_t
)names_size
, TRUE
, ©
);
3405 assert(kr
== KERN_SUCCESS
);
3407 *namesp
= (zone_name_t
*) copy
;
3408 *namesCntp
= max_zones
;
3410 used
= max_zones
* sizeof *info
;
3411 if (used
!= info_size
)
3412 bzero((char *) (info_addr
+ used
), info_size
- used
);
3414 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
3415 (vm_map_size_t
)info_size
, TRUE
, ©
);
3416 assert(kr
== KERN_SUCCESS
);
3418 *infop
= (zone_info_t
*) copy
;
3419 *infoCntp
= max_zones
;
3421 return KERN_SUCCESS
;
3429 if (host
== HOST_NULL
)
3430 return KERN_INVALID_HOST
;
3432 consider_zone_gc(TRUE
);
3434 return (KERN_SUCCESS
);
3437 extern unsigned int stack_total
;
3438 extern unsigned long long stack_allocs
;
3440 #if defined(__i386__) || defined (__x86_64__)
3441 extern unsigned int inuse_ptepages_count
;
3442 extern long long alloc_ptepages_count
;
3445 void zone_display_zprint()
3450 if(first_zone
!=NULL
) {
3451 the_zone
= first_zone
;
3452 for (i
= 0; i
< num_zones
; i
++) {
3453 if(the_zone
->cur_size
> (1024*1024)) {
3454 printf("%.20s:\t%lu\n",the_zone
->zone_name
,(uintptr_t)the_zone
->cur_size
);
3457 if(the_zone
->next_zone
== NULL
) {
3461 the_zone
= the_zone
->next_zone
;
3465 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size
* stack_total
));
3467 #if defined(__i386__) || defined (__x86_64__)
3468 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE
* inuse_ptepages_count
));
3471 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total
);
3476 /* should we care about locks here ? */
3478 #define zone_in_use(z) ( z->count || z->free_elements )
3484 if (zone_debug_enabled(z
) || zone_in_use(z
) ||
3485 z
->alloc_size
< (z
->elem_size
+ ZONE_DEBUG_OFFSET
))
3487 queue_init(&z
->active_zones
);
3488 z
->elem_size
+= ZONE_DEBUG_OFFSET
;
3495 if (!zone_debug_enabled(z
) || zone_in_use(z
))
3497 z
->elem_size
-= ZONE_DEBUG_OFFSET
;
3498 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
3502 #endif /* ZONE_DEBUG */