2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
65 #include <zone_debug.h>
66 #include <zone_alias_addr.h>
70 #include <mach/mach_types.h>
71 #include <mach/vm_param.h>
72 #include <mach/kern_return.h>
73 #include <mach/mach_host_server.h>
74 #include <mach/task_server.h>
75 #include <mach/machine/vm_types.h>
76 #include <mach_debug/zone_info.h>
78 #include <kern/kern_types.h>
79 #include <kern/assert.h>
80 #include <kern/host.h>
81 #include <kern/macro_help.h>
82 #include <kern/sched.h>
83 #include <kern/locks.h>
84 #include <kern/sched_prim.h>
85 #include <kern/misc_protos.h>
86 #include <kern/thread_call.h>
87 #include <kern/zalloc.h>
88 #include <kern/kalloc.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_page.h>
95 #include <machine/machparam.h>
97 #include <libkern/OSDebug.h>
98 #include <libkern/OSAtomic.h>
99 #include <sys/kdebug.h>
102 * Zone Corruption Debugging
104 * We provide three methods to detect use of a zone element after it's been freed. These
105 * checks are enabled by specifying "-zc" and/or "-zp" in the boot-args:
107 * (1) Range-check the free-list "next" ptr for sanity.
108 * (2) Store the ptr in two different words, and compare them against
109 * each other when re-using the zone element, to detect modifications.
110 * (3) poison the freed memory by overwriting it with 0xdeadbeef.
112 * The first two checks are fairly light weight and are enabled by specifying "-zc"
113 * in the boot-args. If you want more aggressive checking for use-after-free bugs
114 * and you don't mind the additional overhead, then turn on poisoning by adding
115 * "-zp" to the boot-args in addition to "-zc". If you specify -zp without -zc,
116 * it still poisons the memory when it's freed, but doesn't check if the memory
117 * has been altered later when it's reallocated.
120 boolean_t check_freed_element
= FALSE
; /* enabled by -zc in boot-args */
121 boolean_t zfree_clear
= FALSE
; /* enabled by -zp in boot-args */
124 * Fake zones for things that want to report via zprint but are not actually zones.
126 struct fake_zone_info
{
130 vm_size_t
*, vm_size_t
*, vm_size_t
*, vm_size_t
*,
131 uint64_t *, int *, int *, int *);
134 static struct fake_zone_info fake_zones
[] = {
136 .name
= "kernel_stacks",
137 .init
= stack_fake_zone_init
,
138 .query
= stack_fake_zone_info
,
140 #if defined(__i386__) || defined (__x86_64__)
142 .name
= "page_tables",
143 .init
= pt_fake_zone_init
,
144 .query
= pt_fake_zone_info
,
148 .name
= "kalloc.large",
149 .init
= kalloc_fake_zone_init
,
150 .query
= kalloc_fake_zone_info
,
153 unsigned int num_fake_zones
= sizeof(fake_zones
)/sizeof(fake_zones
[0]);
158 boolean_t zinfo_per_task
= FALSE
; /* enabled by -zinfop in boot-args */
159 #define ZINFO_SLOTS 200 /* for now */
160 #define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
163 * Allocation helper macros
165 #define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
167 #define ADD_TO_ZONE(zone, element) \
172 i < zone->elem_size/sizeof(uint32_t); \
174 ((uint32_t *)(element))[i] = 0xdeadbeef; \
176 *((vm_offset_t *)(element)) = (zone)->free_elements; \
177 if (check_freed_element) { \
178 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
179 ((vm_offset_t *)(element))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
180 (zone)->free_elements; \
182 (zone)->free_elements = (vm_offset_t) (element); \
186 #define REMOVE_FROM_ZONE(zone, ret, type) \
188 (ret) = (type) (zone)->free_elements; \
189 if ((ret) != (type) 0) { \
190 if (check_freed_element) { \
191 if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0]) || \
192 ((zone)->elem_size >= (2 * sizeof(vm_offset_t)) && \
193 ((vm_offset_t *)(ret))[((zone)->elem_size/sizeof(vm_offset_t))-1] != \
194 ((vm_offset_t *)(ret))[0])) \
195 panic("a freed zone element has been modified");\
198 for (ii = sizeof(vm_offset_t) / sizeof(uint32_t); \
199 ii < (zone)->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
201 if (((uint32_t *)(ret))[ii] != (uint32_t)0xdeadbeef) \
202 panic("a freed zone element has been modified");\
206 (zone)->sum_count++; \
207 (zone)->free_elements = *((vm_offset_t *)(ret)); \
212 #define zone_debug_enabled(z) z->active_zones.next
213 #define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
214 #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
215 #endif /* ZONE_DEBUG */
218 * Support for garbage collection of unused zone pages
220 * The kernel virtually allocates the "zone map" submap of the kernel
221 * map. When an individual zone needs more storage, memory is allocated
222 * out of the zone map, and the two-level "zone_page_table" is
223 * on-demand expanded so that it has entries for those pages.
224 * zone_page_init()/zone_page_alloc() initialize "alloc_count"
225 * to the number of zone elements that occupy the zone page (which may
226 * be a minimum of 1, including if a zone element spans multiple
229 * Asynchronously, the zone_gc() logic attempts to walk zone free
230 * lists to see if all the elements on a zone page are free. If
231 * "collect_count" (which it increments during the scan) matches
232 * "alloc_count", the zone page is a candidate for collection and the
233 * physical page is returned to the VM system. During this process, the
234 * first word of the zone page is re-used to maintain a linked list of
235 * to-be-collected zone pages.
237 typedef uint32_t zone_page_index_t
;
238 #define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
240 struct zone_page_table_entry
{
241 volatile uint16_t alloc_count
;
242 volatile uint16_t collect_count
;
245 #define ZONE_PAGE_USED 0
246 #define ZONE_PAGE_UNUSED 0xffff
253 void zone_page_alloc(
257 void zone_page_free_element(
258 zone_page_index_t
*free_page_list
,
262 void zone_page_collect(
266 boolean_t
zone_page_collectable(
275 thread_call_param_t p0
,
276 thread_call_param_t p1
);
278 void zone_display_zprint( void );
280 #if ZONE_DEBUG && MACH_KDB
284 #endif /* ZONE_DEBUG && MACH_KDB */
286 vm_map_t zone_map
= VM_MAP_NULL
;
288 zone_t zone_zone
= ZONE_NULL
; /* the zone containing other zones */
290 zone_t zinfo_zone
= ZONE_NULL
; /* zone of per-task zone info */
293 * The VM system gives us an initial chunk of memory.
294 * It has to be big enough to allocate the zone_zone
295 * all the way through the pmap zone.
299 vm_size_t zdata_size
;
301 #define lock_zone(zone) \
303 lck_mtx_lock_spin(&(zone)->lock); \
306 #define unlock_zone(zone) \
308 lck_mtx_unlock(&(zone)->lock); \
311 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
312 #define zone_sleep(zone) \
313 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
316 #define lock_zone_init(zone) \
319 (void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
320 lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
321 lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
322 lck_attr_setdefault(&(zone)->lock_attr); \
323 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
324 &(zone)->lock_grp, &(zone)->lock_attr); \
327 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
330 * Garbage collection map information
332 #define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
333 struct zone_page_table_entry
* volatile zone_page_table
[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
];
334 vm_size_t zone_page_table_used_size
;
335 vm_offset_t zone_map_min_address
;
336 vm_offset_t zone_map_max_address
;
337 unsigned int zone_pages
;
338 unsigned int zone_page_table_second_level_size
; /* power of 2 */
339 unsigned int zone_page_table_second_level_shift_amount
;
341 #define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
342 #define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
344 void zone_page_table_expand(zone_page_index_t pindex
);
345 struct zone_page_table_entry
*zone_page_table_lookup(zone_page_index_t pindex
);
348 * Exclude more than one concurrent garbage collection
350 decl_lck_mtx_data(, zone_gc_lock
)
352 lck_attr_t zone_lck_attr
;
353 lck_grp_t zone_lck_grp
;
354 lck_grp_attr_t zone_lck_grp_attr
;
355 lck_mtx_ext_t zone_lck_ext
;
359 #define from_zone_map(addr, size) \
360 ((vm_offset_t)(addr) >= zone_map_min_address && \
361 ((vm_offset_t)(addr) + size -1) < zone_map_max_address)
363 #define from_zone_map(addr, size) \
364 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) >= zone_map_min_address && \
365 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) + size -1) < zone_map_max_address)
369 * Protects first_zone, last_zone, num_zones,
370 * and the next_zone field of zones.
372 decl_simple_lock_data(, all_zones_lock
)
375 unsigned int num_zones
;
377 boolean_t zone_gc_allowed
= TRUE
;
378 boolean_t zone_gc_forced
= FALSE
;
379 boolean_t panic_include_zprint
= FALSE
;
380 boolean_t zone_gc_allowed_by_time_throttle
= TRUE
;
383 * Zone leak debugging code
385 * When enabled, this code keeps a log to track allocations to a particular zone that have not
386 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
387 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
390 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
391 * is the name of the zone you wish to log.
393 * This code only tracks one zone, so you need to identify which one is leaking first.
394 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
395 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
396 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
397 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
398 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
399 * See the help in the kgmacros for usage info.
402 * Zone corruption logging
404 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
405 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
406 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
407 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
408 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
409 * corrupted to examine its history. This should lead to the source of the corruption.
412 static int log_records
; /* size of the log, expressed in number of records */
414 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
416 static char zone_name_to_log
[MAX_ZONE_NAME
] = ""; /* the zone name we're logging, if any */
419 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
420 * the number of records you want in the log. For example, "zrecs=1000" sets it to 1000 records. Note
421 * that the larger the size of the log, the slower the system will run due to linear searching in the log,
422 * but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
423 * records since going much larger than this tends to make the system unresponsive and unbootable on small
424 * memory configurations. The default value is 4000 records.
426 #if defined(__LP64__)
427 #define ZRECORDS_MAX 16000 /* Max records allowed in the log */
429 #define ZRECORDS_MAX 8000 /* Max records allowed in the log */
431 #define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
434 * Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
435 * the records to be ordered chronologically, and a small array to hold the pc's from the stack trace. A
436 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
437 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
438 * If the log fills, old records are replaced as if it were a circular buffer.
442 void *z_element
; /* the element that was zalloc'ed of zfree'ed */
443 uint32_t z_opcode
:1, /* whether it was a zalloc or zfree */
444 z_time
:31; /* time index when operation was done */
445 void *z_pc
[MAX_ZTRACE_DEPTH
]; /* stack trace of caller */
449 * Opcodes for the z_opcode field:
456 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
459 static struct zrecord
*zrecords
; /* the log itself, dynamically allocated when logging is enabled */
460 static int zcurrent
= 0; /* index of the next slot in the log to use */
461 static int zrecorded
= 0; /* number of allocations recorded in the log */
462 static unsigned int ztime
= 0; /* a timestamp of sorts */
463 static zone_t zone_of_interest
= NULL
; /* the zone being watched; corresponds to zone_name_to_log */
466 * Decide if we want to log this zone by doing a string compare between a zone name and the name
467 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
468 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
469 * match a space in the zone name.
473 log_this_zone(const char *zonename
, const char *logname
)
476 const char *zc
= zonename
;
477 const char *lc
= logname
;
480 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
483 for (len
= 1; len
<= MAX_ZONE_NAME
; zc
++, lc
++, len
++) {
486 * If the current characters don't match, check for a space in
487 * in the zone name and a corresponding period in the log name.
488 * If that's not there, then the strings don't match.
491 if (*zc
!= *lc
&& !(*zc
== ' ' && *lc
== '.'))
495 * The strings are equal so far. If we're at the end, then it's a match.
507 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
508 * the buffer for the records has been allocated.
511 #define DO_LOGGING(z) (zrecords && (z) == zone_of_interest)
513 extern boolean_t zlog_ready
;
517 #pragma mark Zone Leak Detection
520 * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
521 * allocations made by the zone allocator. Every z_sample_factor allocations in each zone, we capture a
522 * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
523 * and stop tracking it if it was being tracked.
525 * We track the allocations in the zallocations hash table, which stores the address that was returned from
526 * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
527 * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
528 * backtraces - we don't store them more than once.
530 * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
531 * a large amount of virtual space.
533 #define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
534 #define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
535 #define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
536 #define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
537 uint32_t zleak_state
= 0; /* State of collection, as above */
539 boolean_t panic_include_ztrace
= FALSE
; /* Enable zleak logging on panic */
540 vm_size_t zleak_global_tracking_threshold
; /* Size of zone map at which to start collecting data */
541 vm_size_t zleak_per_zone_tracking_threshold
; /* Size a zone will have before we will collect data on it */
542 unsigned int z_sample_factor
= 1000; /* Allocations per sample attempt */
545 * Counters for allocation statistics.
548 /* Times two active records want to occupy the same spot */
549 unsigned int z_alloc_collisions
= 0;
550 unsigned int z_trace_collisions
= 0;
552 /* Times a new record lands on a spot previously occupied by a freed allocation */
553 unsigned int z_alloc_overwrites
= 0;
554 unsigned int z_trace_overwrites
= 0;
556 /* Times a new alloc or trace is put into the hash table */
557 unsigned int z_alloc_recorded
= 0;
558 unsigned int z_trace_recorded
= 0;
560 /* Times zleak_log returned false due to not being able to acquire the lock */
561 unsigned int z_total_conflicts
= 0;
564 #pragma mark struct zallocation
566 * Structure for keeping track of an allocation
567 * An allocation bucket is in use if its element is not NULL
570 uintptr_t za_element
; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
571 vm_size_t za_size
; /* how much memory did this allocation take up? */
572 uint32_t za_trace_index
; /* index into ztraces for backtrace associated with allocation */
573 /* TODO: #if this out */
574 uint32_t za_hit_count
; /* for determining effectiveness of hash function */
577 /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
578 #define ZLEAK_ALLOCATION_MAP_NUM 16384
579 #define ZLEAK_TRACE_MAP_NUM 8192
581 uint32_t zleak_alloc_buckets
= ZLEAK_ALLOCATION_MAP_NUM
;
582 uint32_t zleak_trace_buckets
= ZLEAK_TRACE_MAP_NUM
;
584 vm_size_t zleak_max_zonemap_size
;
586 /* Hashmaps of allocations and their corresponding traces */
587 static struct zallocation
* zallocations
;
588 static struct ztrace
* ztraces
;
590 /* not static so that panic can see this, see kern/debug.c */
591 struct ztrace
* top_ztrace
;
593 /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
594 static lck_mtx_t zleak_lock
;
595 static lck_attr_t zleak_lock_attr
;
596 static lck_grp_t zleak_lock_grp
;
597 static lck_grp_attr_t zleak_lock_grp_attr
;
600 * Initializes the zone leak monitor. Called from zone_init()
603 zleak_init(vm_size_t max_zonemap_size
)
605 char scratch_buf
[16];
606 boolean_t zleak_enable_flag
= FALSE
;
608 zleak_max_zonemap_size
= max_zonemap_size
;
609 zleak_global_tracking_threshold
= max_zonemap_size
/ 2;
610 zleak_per_zone_tracking_threshold
= zleak_global_tracking_threshold
/ 8;
612 /* -zleakoff (flag to disable zone leak monitor) */
613 if (PE_parse_boot_argn("-zleakoff", scratch_buf
, sizeof(scratch_buf
))) {
614 zleak_enable_flag
= FALSE
;
615 printf("zone leak detection disabled\n");
617 zleak_enable_flag
= TRUE
;
618 printf("zone leak detection enabled\n");
621 /* zfactor=XXXX (override how often to sample the zone allocator) */
622 if (PE_parse_boot_argn("zfactor", &z_sample_factor
, sizeof(z_sample_factor
))) {
623 printf("Zone leak factor override:%u\n", z_sample_factor
);
626 /* zleak-allocs=XXXX (override number of buckets in zallocations) */
627 if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets
, sizeof(zleak_alloc_buckets
))) {
628 printf("Zone leak alloc buckets override:%u\n", zleak_alloc_buckets
);
629 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
630 if (zleak_alloc_buckets
== 0 || (zleak_alloc_buckets
& (zleak_alloc_buckets
-1))) {
631 printf("Override isn't a power of two, bad things might happen!");
635 /* zleak-traces=XXXX (override number of buckets in ztraces) */
636 if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets
, sizeof(zleak_trace_buckets
))) {
637 printf("Zone leak trace buckets override:%u\n", zleak_trace_buckets
);
638 /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
639 if (zleak_trace_buckets
== 0 || (zleak_trace_buckets
& (zleak_trace_buckets
-1))) {
640 printf("Override isn't a power of two, bad things might happen!");
644 /* allocate the zleak_lock */
645 lck_grp_attr_setdefault(&zleak_lock_grp_attr
);
646 lck_grp_init(&zleak_lock_grp
, "zleak_lock", &zleak_lock_grp_attr
);
647 lck_attr_setdefault(&zleak_lock_attr
);
648 lck_mtx_init(&zleak_lock
, &zleak_lock_grp
, &zleak_lock_attr
);
650 if (zleak_enable_flag
) {
651 zleak_state
= ZLEAK_STATE_ENABLED
;
658 * Support for kern.zleak.active sysctl - a simplified
659 * simplified version of the zleak_state variable.
662 get_zleak_state(void)
664 if (zleak_state
& ZLEAK_STATE_FAILED
)
666 if (zleak_state
& ZLEAK_STATE_ACTIVE
)
677 kern_return_t retval
;
678 vm_size_t z_alloc_size
= zleak_alloc_buckets
* sizeof(struct zallocation
);
679 vm_size_t z_trace_size
= zleak_trace_buckets
* sizeof(struct ztrace
);
680 void *allocations_ptr
= NULL
;
681 void *traces_ptr
= NULL
;
683 /* Only one thread attempts to activate at a time */
684 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
688 /* Indicate that we're doing the setup */
689 lck_mtx_lock_spin(&zleak_lock
);
690 if (zleak_state
& (ZLEAK_STATE_ACTIVE
| ZLEAK_STATE_ACTIVATING
| ZLEAK_STATE_FAILED
)) {
691 lck_mtx_unlock(&zleak_lock
);
695 zleak_state
|= ZLEAK_STATE_ACTIVATING
;
696 lck_mtx_unlock(&zleak_lock
);
698 /* Allocate and zero tables */
699 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&allocations_ptr
, z_alloc_size
);
700 if (retval
!= KERN_SUCCESS
) {
704 retval
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&traces_ptr
, z_trace_size
);
705 if (retval
!= KERN_SUCCESS
) {
709 bzero(allocations_ptr
, z_alloc_size
);
710 bzero(traces_ptr
, z_trace_size
);
712 /* Everything's set. Install tables, mark active. */
713 zallocations
= allocations_ptr
;
714 ztraces
= traces_ptr
;
717 * Initialize the top_ztrace to the first entry in ztraces,
718 * so we don't have to check for null in zleak_log
720 top_ztrace
= &ztraces
[0];
723 * Note that we do need a barrier between installing
724 * the tables and setting the active flag, because the zfree()
725 * path accesses the table without a lock if we're active.
727 lck_mtx_lock_spin(&zleak_lock
);
728 zleak_state
|= ZLEAK_STATE_ACTIVE
;
729 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
730 lck_mtx_unlock(&zleak_lock
);
736 * If we fail to allocate memory, don't further tax
737 * the system by trying again.
739 lck_mtx_lock_spin(&zleak_lock
);
740 zleak_state
|= ZLEAK_STATE_FAILED
;
741 zleak_state
&= ~ZLEAK_STATE_ACTIVATING
;
742 lck_mtx_unlock(&zleak_lock
);
744 if (allocations_ptr
!= NULL
) {
745 kmem_free(kernel_map
, (vm_offset_t
)allocations_ptr
, z_alloc_size
);
748 if (traces_ptr
!= NULL
) {
749 kmem_free(kernel_map
, (vm_offset_t
)traces_ptr
, z_trace_size
);
756 * TODO: What about allocations that never get deallocated,
757 * especially ones with unique backtraces? Should we wait to record
758 * until after boot has completed?
759 * (How many persistent zallocs are there?)
763 * This function records the allocation in the allocations table,
764 * and stores the associated backtrace in the traces table
765 * (or just increments the refcount if the trace is already recorded)
766 * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
767 * the associated trace's refcount is decremented.
768 * If the trace slot is in use, it returns.
769 * The refcount is incremented by the amount of memory the allocation consumes.
770 * The return value indicates whether to try again next time.
773 zleak_log(uintptr_t* bt
,
776 vm_size_t allocation_size
)
778 /* Quit if there's someone else modifying the hash tables */
779 if (!lck_mtx_try_lock_spin(&zleak_lock
)) {
784 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
786 uint32_t trace_index
= hashbacktrace(bt
, depth
, zleak_trace_buckets
);
787 struct ztrace
* trace
= &ztraces
[trace_index
];
789 allocation
->za_hit_count
++;
790 trace
->zt_hit_count
++;
793 * If the allocation bucket we want to be in is occupied, and if the occupier
794 * has the same trace as us, just bail.
796 if (allocation
->za_element
!= (uintptr_t) 0 && trace_index
== allocation
->za_trace_index
) {
797 z_alloc_collisions
++;
799 lck_mtx_unlock(&zleak_lock
);
803 /* STEP 1: Store the backtrace in the traces array. */
804 /* A size of zero indicates that the trace bucket is free. */
806 if (trace
->zt_size
> 0 && bcmp(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t))) != 0 ) {
808 * Different unique trace with same hash!
809 * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
810 * and get out of the way for later chances
812 trace
->zt_collisions
++;
813 z_trace_collisions
++;
815 lck_mtx_unlock(&zleak_lock
);
817 } else if (trace
->zt_size
> 0) {
818 /* Same trace, already added, so increment refcount */
819 trace
->zt_size
+= allocation_size
;
821 /* Found an unused trace bucket, record the trace here! */
822 if (trace
->zt_depth
!= 0) /* if this slot was previously used but not currently in use */
823 z_trace_overwrites
++;
826 trace
->zt_size
= allocation_size
;
827 memcpy(trace
->zt_stack
, bt
, (depth
* sizeof(uintptr_t)) );
829 trace
->zt_depth
= depth
;
830 trace
->zt_collisions
= 0;
833 /* STEP 2: Store the allocation record in the allocations array. */
835 if (allocation
->za_element
!= (uintptr_t) 0) {
837 * Straight up replace any allocation record that was there. We don't want to do the work
838 * to preserve the allocation entries that were there, because we only record a subset of the
839 * allocations anyways.
842 z_alloc_collisions
++;
844 struct ztrace
* associated_trace
= &ztraces
[allocation
->za_trace_index
];
845 /* Knock off old allocation's size, not the new allocation */
846 associated_trace
->zt_size
-= allocation
->za_size
;
847 } else if (allocation
->za_trace_index
!= 0) {
848 /* Slot previously used but not currently in use */
849 z_alloc_overwrites
++;
852 allocation
->za_element
= addr
;
853 allocation
->za_trace_index
= trace_index
;
854 allocation
->za_size
= allocation_size
;
858 if (top_ztrace
->zt_size
< trace
->zt_size
)
861 lck_mtx_unlock(&zleak_lock
);
866 * Free the allocation record and release the stacktrace.
867 * This should be as fast as possible because it will be called for every free.
870 zleak_free(uintptr_t addr
,
871 vm_size_t allocation_size
)
873 if (addr
== (uintptr_t) 0)
876 struct zallocation
* allocation
= &zallocations
[hashaddr(addr
, zleak_alloc_buckets
)];
878 /* Double-checked locking: check to find out if we're interested, lock, check to make
879 * sure it hasn't changed, then modify it, and release the lock.
882 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
883 /* if the allocation was the one, grab the lock, check again, then delete it */
884 lck_mtx_lock_spin(&zleak_lock
);
886 if (allocation
->za_element
== addr
&& allocation
->za_trace_index
< zleak_trace_buckets
) {
887 struct ztrace
*trace
;
889 /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
890 if (allocation
->za_size
!= allocation_size
) {
891 panic("Freeing as size %lu memory that was allocated with size %lu\n",
892 (uintptr_t)allocation_size
, (uintptr_t)allocation
->za_size
);
895 trace
= &ztraces
[allocation
->za_trace_index
];
897 /* size of 0 indicates trace bucket is unused */
898 if (trace
->zt_size
> 0) {
899 trace
->zt_size
-= allocation_size
;
902 /* A NULL element means the allocation bucket is unused */
903 allocation
->za_element
= 0;
905 lck_mtx_unlock(&zleak_lock
);
909 #endif /* CONFIG_ZLEAKS */
911 /* These functions outside of CONFIG_ZLEAKS because they are also used in
912 * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
916 * This function captures a backtrace from the current stack and
917 * returns the number of frames captured, limited by max_frames.
918 * It's fast because it does no checking to make sure there isn't bad data.
919 * Since it's only called from threads that we're going to keep executing,
920 * if there's bad data we were going to die eventually.
921 * This seems to work for x86 and X86_64.
922 * ARMTODO: Test it on ARM, I think it will work but I can't test it. If it works, remove the ifdef.
923 * If this function is inlined, it doesn't record the frame of the function it's inside.
924 * (because there's no stack frame!)
927 fastbacktrace(uintptr_t* bt
, uint32_t max_frames
)
929 #if defined(__x86_64__) || defined(__i386__)
930 uintptr_t* frameptr
= NULL
, *frameptr_next
= NULL
;
931 uintptr_t retaddr
= 0;
932 uint32_t frame_index
= 0, frames
= 0;
933 uintptr_t kstackb
, kstackt
;
935 kstackb
= current_thread()->kernel_stack
;
936 kstackt
= kstackb
+ kernel_stack_size
;
937 /* Load stack frame pointer (EBP on x86) into frameptr */
938 frameptr
= __builtin_frame_address(0);
940 while (frameptr
!= NULL
&& frame_index
< max_frames
) {
941 /* Next frame pointer is pointed to by the previous one */
942 frameptr_next
= (uintptr_t*) *frameptr
;
944 /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
945 /* That also means the return address is worthless, so don't record it */
946 if (frameptr_next
== NULL
)
948 /* Verify thread stack bounds */
949 if (((uintptr_t)frameptr_next
> kstackt
) || ((uintptr_t)frameptr_next
< kstackb
))
951 /* Pull return address from one spot above the frame pointer */
952 retaddr
= *(frameptr
+ 1);
954 /* Store it in the backtrace array */
955 bt
[frame_index
++] = retaddr
;
957 frameptr
= frameptr_next
;
960 /* Save the number of frames captured for return value */
961 frames
= frame_index
;
963 /* Fill in the rest of the backtrace with zeros */
964 while (frame_index
< max_frames
)
965 bt
[frame_index
++] = 0;
969 return OSBacktrace((void*)bt
, max_frames
);
973 /* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
975 hash_mix(uintptr_t x
)
998 hashbacktrace(uintptr_t* bt
, uint32_t depth
, uint32_t max_size
)
1002 uintptr_t mask
= max_size
- 1;
1008 hash
= hash_mix(hash
) & mask
;
1010 assert(hash
< max_size
);
1012 return (uint32_t) hash
;
1016 * TODO: Determine how well distributed this is
1017 * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
1020 hashaddr(uintptr_t pt
, uint32_t max_size
)
1023 uintptr_t mask
= max_size
- 1;
1025 hash
= hash_mix(pt
) & mask
;
1027 assert(hash
< max_size
);
1029 return (uint32_t) hash
;
1032 /* End of all leak-detection code */
1036 * zinit initializes a new zone. The zone data structures themselves
1037 * are stored in a zone, which is initially a static structure that
1038 * is initialized by zone_init.
1042 vm_size_t size
, /* the size of an element */
1043 vm_size_t max
, /* maximum memory to use */
1044 vm_size_t alloc
, /* allocation size */
1045 const char *name
) /* a name for the zone */
1049 if (zone_zone
== ZONE_NULL
) {
1051 z
= (struct zone
*)zdata
;
1052 zdata
+= sizeof(*z
);
1053 zdata_size
-= sizeof(*z
);
1055 z
= (zone_t
) zalloc(zone_zone
);
1060 * Round off all the parameters appropriately.
1062 if (size
< sizeof(z
->free_elements
))
1063 size
= sizeof(z
->free_elements
);
1064 size
= ((size
-1) + sizeof(z
->free_elements
)) -
1065 ((size
-1) % sizeof(z
->free_elements
));
1068 alloc
= round_page(alloc
);
1069 max
= round_page(max
);
1071 * we look for an allocation size with less than 1% waste
1072 * up to 5 pages in size...
1073 * otherwise, we look for an allocation size with least fragmentation
1074 * in the range of 1 - 5 pages
1075 * This size will be used unless
1076 * the user suggestion is larger AND has less fragmentation
1079 if ((size
< PAGE_SIZE
) && (PAGE_SIZE
% size
<= PAGE_SIZE
/ 10))
1083 #if defined(__LP64__)
1084 if (((alloc
% size
) != 0) || (alloc
> PAGE_SIZE
* 8))
1087 vm_size_t best
, waste
; unsigned int i
;
1089 waste
= best
% size
;
1091 for (i
= 1; i
<= 5; i
++) {
1092 vm_size_t tsize
, twaste
;
1094 tsize
= i
* PAGE_SIZE
;
1096 if ((tsize
% size
) < (tsize
/ 100)) {
1098 goto use_this_allocation
;
1100 twaste
= tsize
% size
;
1102 best
= tsize
, waste
= twaste
;
1104 if (alloc
<= best
|| (alloc
% size
>= waste
))
1107 use_this_allocation
:
1108 if (max
&& (max
< alloc
))
1111 z
->free_elements
= 0;
1114 z
->elem_size
= size
;
1115 z
->alloc_size
= alloc
;
1116 z
->zone_name
= name
;
1119 z
->doing_alloc
= FALSE
;
1120 z
->doing_gc
= FALSE
;
1121 z
->exhaustible
= FALSE
;
1122 z
->collectable
= TRUE
;
1123 z
->allows_foreign
= FALSE
;
1124 z
->expandable
= TRUE
;
1126 z
->async_pending
= FALSE
;
1127 z
->caller_acct
= TRUE
;
1128 z
->noencrypt
= FALSE
;
1129 z
->no_callout
= FALSE
;
1130 z
->async_prio_refill
= FALSE
;
1131 z
->prio_refill_watermark
= 0;
1132 z
->zone_replenish_thread
= NULL
;
1136 z
->zleak_capture
= 0;
1137 z
->zleak_on
= FALSE
;
1138 #endif /* CONFIG_ZLEAKS */
1141 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
1142 zone_debug_enable(z
);
1143 #endif /* ZONE_DEBUG */
1147 * Add the zone to the all-zones list.
1148 * If we are tracking zone info per task, and we have
1149 * already used all the available stat slots, then keep
1150 * using the overflow zone slot.
1152 z
->next_zone
= ZONE_NULL
;
1153 thread_call_setup(&z
->call_async_alloc
, zalloc_async
, z
);
1154 simple_lock(&all_zones_lock
);
1156 last_zone
= &z
->next_zone
;
1157 z
->index
= num_zones
;
1158 if (zinfo_per_task
) {
1159 if (num_zones
> ZONES_MAX
)
1160 z
->index
= ZONES_MAX
;
1163 simple_unlock(&all_zones_lock
);
1166 * Check if we should be logging this zone. If so, remember the zone pointer.
1169 if (log_this_zone(z
->zone_name
, zone_name_to_log
)) {
1170 zone_of_interest
= z
;
1174 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
1175 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. zlog_ready is set to
1176 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
1177 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
1178 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
1182 if (zone_of_interest
!= NULL
&& zrecords
== NULL
&& zlog_ready
) {
1183 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&zrecords
, log_records
* sizeof(struct zrecord
)) == KERN_SUCCESS
) {
1186 * We got the memory for the log. Zero it out since the code needs this to identify unused records.
1187 * At this point, everything is set up and we're ready to start logging this zone.
1190 bzero((void *)zrecords
, log_records
* sizeof(struct zrecord
));
1191 printf("zone: logging started for zone %s (%p)\n", zone_of_interest
->zone_name
, zone_of_interest
);
1194 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
1195 zone_of_interest
= NULL
;
1201 unsigned zone_replenish_loops
, zone_replenish_wakeups
, zone_replenish_wakeups_initiated
;
1203 static void zone_replenish_thread(zone_t
);
1205 /* High priority VM privileged thread used to asynchronously refill a designated
1206 * zone, such as the reserved VM map entry zone.
1208 static void zone_replenish_thread(zone_t z
) {
1209 vm_size_t free_size
;
1210 current_thread()->options
|= TH_OPT_VMPRIV
;
1214 assert(z
->prio_refill_watermark
!= 0);
1215 while ((free_size
= (z
->cur_size
- (z
->count
* z
->elem_size
))) < (z
->prio_refill_watermark
* z
->elem_size
)) {
1216 assert(z
->doing_alloc
== FALSE
);
1217 assert(z
->async_prio_refill
== TRUE
);
1220 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
1221 vm_offset_t space
, alloc_size
;
1225 alloc_size
= round_page(z
->elem_size
);
1227 alloc_size
= z
->alloc_size
;
1230 zflags
|= KMA_NOENCRYPT
;
1232 kr
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
);
1234 if (kr
== KERN_SUCCESS
) {
1236 if (alloc_size
== PAGE_SIZE
)
1237 space
= zone_alias_addr(space
);
1239 zcram(z
, space
, alloc_size
);
1240 } else if (kr
== KERN_RESOURCE_SHORTAGE
) {
1242 } else if (kr
== KERN_NO_SPACE
) {
1243 kr
= kernel_memory_allocate(kernel_map
, &space
, alloc_size
, 0, zflags
);
1244 if (kr
== KERN_SUCCESS
) {
1246 if (alloc_size
== PAGE_SIZE
)
1247 space
= zone_alias_addr(space
);
1249 zcram(z
, space
, alloc_size
);
1251 assert_wait_timeout(&z
->zone_replenish_thread
, THREAD_UNINT
, 1, 100 * NSEC_PER_USEC
);
1252 thread_block(THREAD_CONTINUE_NULL
);
1257 zone_replenish_loops
++;
1261 assert_wait(&z
->zone_replenish_thread
, THREAD_UNINT
);
1262 thread_block(THREAD_CONTINUE_NULL
);
1263 zone_replenish_wakeups
++;
1268 zone_prio_refill_configure(zone_t z
, vm_size_t low_water_mark
) {
1269 z
->prio_refill_watermark
= low_water_mark
;
1271 z
->async_prio_refill
= TRUE
;
1273 kern_return_t tres
= kernel_thread_start_priority((thread_continue_t
)zone_replenish_thread
, z
, MAXPRI_KERNEL
, &z
->zone_replenish_thread
);
1275 if (tres
!= KERN_SUCCESS
) {
1276 panic("zone_prio_refill_configure, thread create: 0x%x", tres
);
1279 thread_deallocate(z
->zone_replenish_thread
);
1283 * Cram the given memory into the specified zone.
1291 vm_size_t elem_size
;
1292 boolean_t from_zm
= FALSE
;
1294 /* Basic sanity checks */
1295 assert(zone
!= ZONE_NULL
&& newmem
!= (vm_offset_t
)0);
1296 assert(!zone
->collectable
|| zone
->allows_foreign
1297 || (from_zone_map(newmem
, size
)));
1299 elem_size
= zone
->elem_size
;
1301 if (from_zone_map(newmem
, size
))
1305 zone_page_init(newmem
, size
);
1308 while (size
>= elem_size
) {
1309 ADD_TO_ZONE(zone
, newmem
);
1311 zone_page_alloc(newmem
, elem_size
);
1312 zone
->count
++; /* compensate for ADD_TO_ZONE */
1314 newmem
+= elem_size
;
1315 zone
->cur_size
+= elem_size
;
1322 * Steal memory for the zone package. Called from
1323 * vm_page_bootstrap().
1326 zone_steal_memory(void)
1328 /* Request enough early memory to get to the pmap zone */
1329 zdata_size
= 12 * sizeof(struct zone
);
1330 zdata
= (vm_offset_t
)pmap_steal_memory(round_page(zdata_size
));
1335 * Fill a zone with enough memory to contain at least nelem elements.
1336 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
1337 * Return the number of elements actually put into the zone, which may
1338 * be more than the caller asked for since the memory allocation is
1339 * rounded up to a full page.
1354 size
= nelem
* zone
->elem_size
;
1355 size
= round_page(size
);
1356 kr
= kmem_alloc_kobject(kernel_map
, &memory
, size
);
1357 if (kr
!= KERN_SUCCESS
)
1360 zone_change(zone
, Z_FOREIGN
, TRUE
);
1361 zcram(zone
, memory
, size
);
1362 nalloc
= (int)(size
/ zone
->elem_size
);
1363 assert(nalloc
>= nelem
);
1369 * Initialize the "zone of zones" which uses fixed memory allocated
1370 * earlier in memory initialization. zone_bootstrap is called
1374 zone_bootstrap(void)
1379 /* enable zone checks by default, to try and catch offenders... */
1381 /* 7968354: turn "-zc" back off */
1382 check_freed_element
= TRUE
;
1383 /* 7995202: turn "-zp" back off */
1387 /* ... but allow them to be turned off explicitely */
1388 if (PE_parse_boot_argn("-no_zc", temp_buf
, sizeof (temp_buf
))) {
1389 check_freed_element
= FALSE
;
1391 if (PE_parse_boot_argn("-no_zp", temp_buf
, sizeof (temp_buf
))) {
1392 zfree_clear
= FALSE
;
1396 /* see if we want freed zone element checking and/or poisoning */
1397 if (PE_parse_boot_argn("-zc", temp_buf
, sizeof (temp_buf
))) {
1398 check_freed_element
= TRUE
;
1401 if (PE_parse_boot_argn("-zp", temp_buf
, sizeof (temp_buf
))) {
1405 if (PE_parse_boot_argn("-zinfop", temp_buf
, sizeof (temp_buf
))) {
1406 zinfo_per_task
= TRUE
;
1410 * Check for and set up zone leak detection if requested via boot-args. We recognized two
1413 * zlog=<zone_to_log>
1414 * zrecs=<num_records_in_log>
1416 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
1417 * control the size of the log. If zrecs is not specified, a default value is used.
1420 if (PE_parse_boot_argn("zlog", zone_name_to_log
, sizeof(zone_name_to_log
)) == TRUE
) {
1421 if (PE_parse_boot_argn("zrecs", &log_records
, sizeof(log_records
)) == TRUE
) {
1424 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
1425 * This prevents accidentally hogging too much kernel memory and making the system
1429 log_records
= MIN(ZRECORDS_MAX
, log_records
);
1432 log_records
= ZRECORDS_DEFAULT
;
1436 simple_lock_init(&all_zones_lock
, 0);
1438 first_zone
= ZONE_NULL
;
1439 last_zone
= &first_zone
;
1442 /* assertion: nobody else called zinit before us */
1443 assert(zone_zone
== ZONE_NULL
);
1444 zone_zone
= zinit(sizeof(struct zone
), 128 * sizeof(struct zone
),
1445 sizeof(struct zone
), "zones");
1446 zone_change(zone_zone
, Z_COLLECT
, FALSE
);
1447 zone_change(zone_zone
, Z_CALLERACCT
, FALSE
);
1448 zone_change(zone_zone
, Z_NOENCRYPT
, TRUE
);
1450 zcram(zone_zone
, zdata
, zdata_size
);
1452 /* initialize fake zones and zone info if tracking by task */
1453 if (zinfo_per_task
) {
1454 vm_size_t zisize
= sizeof(zinfo_usage_store_t
) * ZINFO_SLOTS
;
1457 for (i
= 0; i
< num_fake_zones
; i
++)
1458 fake_zones
[i
].init(ZINFO_SLOTS
- num_fake_zones
+ i
);
1459 zinfo_zone
= zinit(zisize
, zisize
* CONFIG_TASK_MAX
,
1460 zisize
, "per task zinfo");
1461 zone_change(zinfo_zone
, Z_CALLERACCT
, FALSE
);
1466 zinfo_task_init(task_t task
)
1468 if (zinfo_per_task
) {
1469 task
->tkm_zinfo
= zalloc(zinfo_zone
);
1470 memset(task
->tkm_zinfo
, 0, sizeof(zinfo_usage_store_t
) * ZINFO_SLOTS
);
1472 task
->tkm_zinfo
= NULL
;
1477 zinfo_task_free(task_t task
)
1479 assert(task
!= kernel_task
);
1480 if (task
->tkm_zinfo
!= NULL
) {
1481 zfree(zinfo_zone
, task
->tkm_zinfo
);
1482 task
->tkm_zinfo
= NULL
;
1488 vm_size_t max_zonemap_size
)
1490 kern_return_t retval
;
1491 vm_offset_t zone_min
;
1492 vm_offset_t zone_max
;
1494 retval
= kmem_suballoc(kernel_map
, &zone_min
, max_zonemap_size
,
1495 FALSE
, VM_FLAGS_ANYWHERE
| VM_FLAGS_PERMANENT
,
1498 if (retval
!= KERN_SUCCESS
)
1499 panic("zone_init: kmem_suballoc failed");
1500 zone_max
= zone_min
+ round_page(max_zonemap_size
);
1502 * Setup garbage collection information:
1504 zone_map_min_address
= zone_min
;
1505 zone_map_max_address
= zone_max
;
1507 zone_pages
= (unsigned int)atop_kernel(zone_max
- zone_min
);
1508 zone_page_table_used_size
= sizeof(zone_page_table
);
1510 zone_page_table_second_level_size
= 1;
1511 zone_page_table_second_level_shift_amount
= 0;
1514 * Find the power of 2 for the second level that allows
1515 * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
1518 while ((zone_page_table_first_level_slot(zone_pages
-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
) {
1519 zone_page_table_second_level_size
<<= 1;
1520 zone_page_table_second_level_shift_amount
++;
1523 lck_grp_attr_setdefault(&zone_lck_grp_attr
);
1524 lck_grp_init(&zone_lck_grp
, "zones", &zone_lck_grp_attr
);
1525 lck_attr_setdefault(&zone_lck_attr
);
1526 lck_mtx_init_ext(&zone_gc_lock
, &zone_lck_ext
, &zone_lck_grp
, &zone_lck_attr
);
1530 * Initialize the zone leak monitor
1532 zleak_init(max_zonemap_size
);
1533 #endif /* CONFIG_ZLEAKS */
1537 zone_page_table_expand(zone_page_index_t pindex
)
1539 unsigned int first_index
;
1540 struct zone_page_table_entry
* volatile * first_level_ptr
;
1542 assert(pindex
< zone_pages
);
1544 first_index
= zone_page_table_first_level_slot(pindex
);
1545 first_level_ptr
= &zone_page_table
[first_index
];
1547 if (*first_level_ptr
== NULL
) {
1549 * We were able to verify the old first-level slot
1550 * had NULL, so attempt to populate it.
1553 vm_offset_t second_level_array
= 0;
1554 vm_size_t second_level_size
= round_page(zone_page_table_second_level_size
* sizeof(struct zone_page_table_entry
));
1555 zone_page_index_t i
;
1556 struct zone_page_table_entry
*entry_array
;
1558 if (kmem_alloc_kobject(zone_map
, &second_level_array
,
1559 second_level_size
) != KERN_SUCCESS
) {
1560 panic("zone_page_table_expand");
1564 * zone_gc() may scan the "zone_page_table" directly,
1565 * so make sure any slots have a valid unused state.
1567 entry_array
= (struct zone_page_table_entry
*)second_level_array
;
1568 for (i
=0; i
< zone_page_table_second_level_size
; i
++) {
1569 entry_array
[i
].alloc_count
= ZONE_PAGE_UNUSED
;
1570 entry_array
[i
].collect_count
= 0;
1573 if (OSCompareAndSwapPtr(NULL
, entry_array
, first_level_ptr
)) {
1574 /* Old slot was NULL, replaced with expanded level */
1575 OSAddAtomicLong(second_level_size
, &zone_page_table_used_size
);
1577 /* Old slot was not NULL, someone else expanded first */
1578 kmem_free(zone_map
, second_level_array
, second_level_size
);
1581 /* Old slot was not NULL, already been expanded */
1585 struct zone_page_table_entry
*
1586 zone_page_table_lookup(zone_page_index_t pindex
)
1588 unsigned int first_index
= zone_page_table_first_level_slot(pindex
);
1589 struct zone_page_table_entry
*second_level
= zone_page_table
[first_index
];
1592 return &second_level
[zone_page_table_second_level_slot(pindex
)];
1598 extern volatile SInt32 kfree_nop_count
;
1601 #pragma mark zalloc_canblock
1604 * zalloc returns an element from the specified zone.
1608 register zone_t zone
,
1612 kern_return_t retval
;
1613 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used in zone leak logging and zone leak detection */
1616 boolean_t zone_replenish_wakeup
= FALSE
;
1619 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
1620 #endif /* CONFIG_ZLEAKS */
1622 assert(zone
!= ZONE_NULL
);
1627 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
1630 if (DO_LOGGING(zone
))
1631 numsaved
= OSBacktrace((void*) zbt
, MAX_ZTRACE_DEPTH
);
1635 * Zone leak detection: capture a backtrace every z_sample_factor
1636 * allocations in this zone.
1638 if (zone
->zleak_on
&& (zone
->zleak_capture
++ % z_sample_factor
== 0)) {
1639 zone
->zleak_capture
= 1;
1641 /* Avoid backtracing twice if zone logging is on */
1643 zleak_tracedepth
= fastbacktrace(zbt
, MAX_ZTRACE_DEPTH
);
1645 zleak_tracedepth
= numsaved
;
1647 #endif /* CONFIG_ZLEAKS */
1649 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1651 if (zone
->async_prio_refill
&&
1652 ((zone
->cur_size
- (zone
->count
* zone
->elem_size
)) < (zone
->prio_refill_watermark
* zone
->elem_size
))) {
1653 zone_replenish_wakeup
= TRUE
;
1654 zone_replenish_wakeups_initiated
++;
1657 while ((addr
== 0) && canblock
) {
1659 * If nothing was there, try to get more
1661 if (zone
->doing_alloc
) {
1663 * Someone is allocating memory for this zone.
1664 * Wait for it to show up, then try again.
1666 zone
->waiting
= TRUE
;
1668 } else if (zone
->doing_gc
) {
1669 /* zone_gc() is running. Since we need an element
1670 * from the free list that is currently being
1671 * collected, set the waiting bit and try to
1672 * interrupt the GC process, and try again
1673 * when we obtain the lock.
1675 zone
->waiting
= TRUE
;
1679 vm_size_t alloc_size
;
1682 if ((zone
->cur_size
+ zone
->elem_size
) >
1684 if (zone
->exhaustible
)
1686 if (zone
->expandable
) {
1688 * We're willing to overflow certain
1689 * zones, but not without complaining.
1691 * This is best used in conjunction
1692 * with the collectable flag. What we
1693 * want is an assurance we can get the
1694 * memory back, assuming there's no
1697 zone
->max_size
+= (zone
->max_size
>> 1);
1701 panic("zalloc: zone \"%s\" empty.", zone
->zone_name
);
1704 zone
->doing_alloc
= TRUE
;
1708 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
1710 if (vm_pool_low() || retry
>= 1)
1712 round_page(zone
->elem_size
);
1714 alloc_size
= zone
->alloc_size
;
1716 if (zone
->noencrypt
)
1717 zflags
|= KMA_NOENCRYPT
;
1719 retval
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
);
1720 if (retval
== KERN_SUCCESS
) {
1722 if (alloc_size
== PAGE_SIZE
)
1723 space
= zone_alias_addr(space
);
1727 if ((zleak_state
& (ZLEAK_STATE_ENABLED
| ZLEAK_STATE_ACTIVE
)) == ZLEAK_STATE_ENABLED
) {
1728 if (zone_map
->size
>= zleak_global_tracking_threshold
) {
1731 kr
= zleak_activate();
1732 if (kr
!= KERN_SUCCESS
) {
1733 printf("Failed to activate live zone leak debugging (%d).\n", kr
);
1738 if ((zleak_state
& ZLEAK_STATE_ACTIVE
) && !(zone
->zleak_on
)) {
1739 if (zone
->cur_size
> zleak_per_zone_tracking_threshold
) {
1740 zone
->zleak_on
= TRUE
;
1743 #endif /* CONFIG_ZLEAKS */
1745 zcram(zone
, space
, alloc_size
);
1748 } else if (retval
!= KERN_RESOURCE_SHORTAGE
) {
1753 printf("zalloc did gc\n");
1754 zone_display_zprint();
1757 panic_include_zprint
= TRUE
;
1759 if ((zleak_state
& ZLEAK_STATE_ACTIVE
)) {
1760 panic_include_ztrace
= TRUE
;
1762 #endif /* CONFIG_ZLEAKS */
1763 /* TODO: Change this to something more descriptive, perhaps
1764 * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
1766 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone
->zone_name
, zone
->count
, retval
, (int)kfree_nop_count
);
1773 zone
->doing_alloc
= FALSE
;
1774 if (zone
->waiting
) {
1775 zone
->waiting
= FALSE
;
1778 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1780 retval
== KERN_RESOURCE_SHORTAGE
) {
1788 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1792 /* Zone leak detection:
1793 * If we're sampling this allocation, add it to the zleaks hash table.
1795 if (addr
&& zleak_tracedepth
> 0) {
1796 /* Sampling can fail if another sample is happening at the same time in a different zone. */
1797 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
1798 /* If it failed, roll back the counter so we sample the next allocation instead. */
1799 zone
->zleak_capture
= z_sample_factor
;
1802 #endif /* CONFIG_ZLEAKS */
1806 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
1807 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
1808 * since it protects the various log related data structures.
1811 if (DO_LOGGING(zone
) && addr
) {
1814 * Look for a place to record this new allocation. We implement two different logging strategies
1815 * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
1816 * for a leak, we want to log as many allocations as possible in order to clearly identify the leaker
1817 * among all the records. So we look for an unused slot in the log and fill that in before overwriting
1818 * an old entry. When looking for a corrution however, it's better to have a chronological log of all
1819 * the allocations and frees done in the zone so that the history of operations for a specific zone
1820 * element can be inspected. So in this case, we treat the log as a circular buffer and overwrite the
1821 * oldest entry whenever a new one needs to be added.
1823 * The check_freed_element flag tells us what style of logging to do. It's set if we're supposed to be
1824 * doing corruption style logging (indicated via -zc in the boot-args).
1827 if (!check_freed_element
&& zrecords
[zcurrent
].z_element
&& zrecorded
< log_records
) {
1830 * If we get here, we're doing leak style logging and there's still some unused entries in
1831 * the log (since zrecorded is smaller than the size of the log). Look for an unused slot
1832 * starting at zcurrent and wrap-around if we reach the end of the buffer. If the buffer
1833 * is already full, we just fall through and overwrite the element indexed by zcurrent.
1836 for (i
= zcurrent
; i
< log_records
; i
++) {
1837 if (zrecords
[i
].z_element
== NULL
) {
1843 for (i
= 0; i
< zcurrent
; i
++) {
1844 if (zrecords
[i
].z_element
== NULL
) {
1852 * Save a record of this allocation
1856 if (zrecords
[zcurrent
].z_element
== NULL
)
1859 zrecords
[zcurrent
].z_element
= (void *)addr
;
1860 zrecords
[zcurrent
].z_time
= ztime
++;
1861 zrecords
[zcurrent
].z_opcode
= ZOP_ALLOC
;
1863 for (i
= 0; i
< numsaved
; i
++)
1864 zrecords
[zcurrent
].z_pc
[i
] = (void*) zbt
[i
];
1866 for (; i
< MAX_ZTRACE_DEPTH
; i
++)
1867 zrecords
[zcurrent
].z_pc
[i
] = 0;
1871 if (zcurrent
>= log_records
)
1875 if ((addr
== 0) && !canblock
&& (zone
->async_pending
== FALSE
) && (zone
->no_callout
== FALSE
) && (zone
->exhaustible
== FALSE
) && (!vm_pool_low())) {
1876 zone
->async_pending
= TRUE
;
1878 thread_call_enter(&zone
->call_async_alloc
);
1880 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1884 if (addr
&& zone_debug_enabled(zone
)) {
1885 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
1886 addr
+= ZONE_DEBUG_OFFSET
;
1894 #endif /* CONFIG_ZLEAKS */
1898 if (zone_replenish_wakeup
)
1899 thread_wakeup(&zone
->zone_replenish_thread
);
1901 TRACE_MACHLEAKS(ZALLOC_CODE
, ZALLOC_CODE_2
, zone
->elem_size
, addr
);
1904 thread_t thr
= current_thread();
1906 zinfo_usage_t zinfo
;
1908 if (zone
->caller_acct
)
1909 thr
->tkm_private
.alloc
+= zone
->elem_size
;
1911 thr
->tkm_shared
.alloc
+= zone
->elem_size
;
1913 if ((task
= thr
->task
) != NULL
&& (zinfo
= task
->tkm_zinfo
) != NULL
)
1914 OSAddAtomic64(zone
->elem_size
, (int64_t *)&zinfo
[zone
->index
].alloc
);
1916 return((void *)addr
);
1922 register zone_t zone
)
1924 return( zalloc_canblock(zone
, TRUE
) );
1929 register zone_t zone
)
1931 return( zalloc_canblock(zone
, FALSE
) );
1936 thread_call_param_t p0
,
1937 __unused thread_call_param_t p1
)
1941 elt
= zalloc_canblock((zone_t
)p0
, TRUE
);
1942 zfree((zone_t
)p0
, elt
);
1943 lock_zone(((zone_t
)p0
));
1944 ((zone_t
)p0
)->async_pending
= FALSE
;
1945 unlock_zone(((zone_t
)p0
));
1950 * zget returns an element from the specified zone
1951 * and immediately returns nothing if there is nothing there.
1953 * This form should be used when you can not block (like when
1954 * processing an interrupt).
1956 * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
1957 * friend vm_page_more_fictitious can block, so it doesn't seem like
1958 * this is used for interrupts any more....
1962 register zone_t zone
)
1964 register vm_offset_t addr
;
1967 uintptr_t zbt
[MAX_ZTRACE_DEPTH
]; /* used for zone leak detection */
1968 uint32_t zleak_tracedepth
= 0; /* log this allocation if nonzero */
1969 #endif /* CONFIG_ZLEAKS */
1971 assert( zone
!= ZONE_NULL
);
1973 if (!lock_try_zone(zone
))
1978 * Zone leak detection: capture a backtrace
1980 if (zone
->zleak_on
&& (zone
->zleak_capture
++ % z_sample_factor
== 0)) {
1981 zone
->zleak_capture
= 1;
1982 zleak_tracedepth
= fastbacktrace(zbt
, MAX_ZTRACE_DEPTH
);
1984 #endif /* CONFIG_ZLEAKS */
1986 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1988 if (addr
&& zone_debug_enabled(zone
)) {
1989 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
1990 addr
+= ZONE_DEBUG_OFFSET
;
1992 #endif /* ZONE_DEBUG */
1996 * Zone leak detection: record the allocation
1998 if (zone
->zleak_on
&& zleak_tracedepth
> 0 && addr
) {
1999 /* Sampling can fail if another sample is happening at the same time in a different zone. */
2000 if (!zleak_log(zbt
, addr
, zleak_tracedepth
, zone
->elem_size
)) {
2001 /* If it failed, roll back the counter so we sample the next allocation instead. */
2002 zone
->zleak_capture
= z_sample_factor
;
2009 #endif /* CONFIG_ZLEAKS */
2013 return((void *) addr
);
2016 /* Keep this FALSE by default. Large memory machine run orders of magnitude
2017 slower in debug mode when true. Use debugger to enable if needed */
2018 /* static */ boolean_t zone_check
= FALSE
;
2020 static zone_t zone_last_bogus_zone
= ZONE_NULL
;
2021 static vm_offset_t zone_last_bogus_elem
= 0;
2025 register zone_t zone
,
2028 vm_offset_t elem
= (vm_offset_t
) addr
;
2029 void *zbt
[MAX_ZTRACE_DEPTH
]; /* only used if zone logging is enabled via boot-args */
2032 assert(zone
!= ZONE_NULL
);
2035 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
2038 if (DO_LOGGING(zone
))
2039 numsaved
= OSBacktrace(&zbt
[0], MAX_ZTRACE_DEPTH
);
2042 /* Basic sanity checks */
2043 if (zone
== ZONE_NULL
|| elem
== (vm_offset_t
)0)
2044 panic("zfree: NULL");
2045 /* zone_gc assumes zones are never freed */
2046 if (zone
== zone_zone
)
2047 panic("zfree: freeing to zone_zone breaks zone_gc!");
2050 TRACE_MACHLEAKS(ZFREE_CODE
, ZFREE_CODE_2
, zone
->elem_size
, (uintptr_t)addr
);
2052 if (zone
->collectable
&& !zone
->allows_foreign
&&
2053 !from_zone_map(elem
, zone
->elem_size
)) {
2055 panic("zfree: non-allocated memory in collectable zone!");
2057 zone_last_bogus_zone
= zone
;
2058 zone_last_bogus_elem
= elem
;
2065 * See if we're doing logging on this zone. There are two styles of logging used depending on
2066 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
2069 if (DO_LOGGING(zone
)) {
2072 if (check_freed_element
) {
2075 * We're logging to catch a corruption. Add a record of this zfree operation
2079 if (zrecords
[zcurrent
].z_element
== NULL
)
2082 zrecords
[zcurrent
].z_element
= (void *)addr
;
2083 zrecords
[zcurrent
].z_time
= ztime
++;
2084 zrecords
[zcurrent
].z_opcode
= ZOP_FREE
;
2086 for (i
= 0; i
< numsaved
; i
++)
2087 zrecords
[zcurrent
].z_pc
[i
] = zbt
[i
];
2089 for (; i
< MAX_ZTRACE_DEPTH
; i
++)
2090 zrecords
[zcurrent
].z_pc
[i
] = 0;
2094 if (zcurrent
>= log_records
)
2100 * We're logging to catch a leak. Remove any record we might have for this
2101 * element since it's being freed. Note that we may not find it if the buffer
2102 * overflowed and that's OK. Since the log is of a limited size, old records
2103 * get overwritten if there are more zallocs than zfrees.
2106 for (i
= 0; i
< log_records
; i
++) {
2107 if (zrecords
[i
].z_element
== addr
) {
2108 zrecords
[i
].z_element
= NULL
;
2119 if (zone_debug_enabled(zone
)) {
2122 elem
-= ZONE_DEBUG_OFFSET
;
2124 /* check the zone's consistency */
2126 for (tmp_elem
= queue_first(&zone
->active_zones
);
2127 !queue_end(tmp_elem
, &zone
->active_zones
);
2128 tmp_elem
= queue_next(tmp_elem
))
2129 if (elem
== (vm_offset_t
)tmp_elem
)
2131 if (elem
!= (vm_offset_t
)tmp_elem
)
2132 panic("zfree()ing element from wrong zone");
2134 remqueue((queue_t
) elem
);
2136 #endif /* ZONE_DEBUG */
2140 /* check the zone's consistency */
2142 for (this = zone
->free_elements
;
2144 this = * (vm_offset_t
*) this)
2145 if (!pmap_kernel_va(this) || this == elem
)
2148 ADD_TO_ZONE(zone
, elem
);
2150 if (zone
->count
< 0)
2151 panic("zfree: count < 0!");
2159 * Zone leak detection: un-track the allocation
2161 if (zone
->zleak_on
) {
2162 zleak_free(elem
, zone
->elem_size
);
2164 #endif /* CONFIG_ZLEAKS */
2167 * If elements have one or more pages, and memory is low,
2168 * request to run the garbage collection in the zone the next
2169 * time the pageout thread runs.
2171 if (zone
->elem_size
>= PAGE_SIZE
&&
2173 zone_gc_forced
= TRUE
;
2178 thread_t thr
= current_thread();
2180 zinfo_usage_t zinfo
;
2182 if (zone
->caller_acct
)
2183 thr
->tkm_private
.free
+= zone
->elem_size
;
2185 thr
->tkm_shared
.free
+= zone
->elem_size
;
2186 if ((task
= thr
->task
) != NULL
&& (zinfo
= task
->tkm_zinfo
) != NULL
)
2187 OSAddAtomic64(zone
->elem_size
,
2188 (int64_t *)&zinfo
[zone
->index
].free
);
2193 /* Change a zone's flags.
2194 * This routine must be called immediately after zinit.
2202 assert( zone
!= ZONE_NULL
);
2203 assert( value
== TRUE
|| value
== FALSE
);
2207 zone
->noencrypt
= value
;
2210 zone
->exhaustible
= value
;
2213 zone
->collectable
= value
;
2216 zone
->expandable
= value
;
2219 zone
->allows_foreign
= value
;
2222 zone
->caller_acct
= value
;
2225 zone
->no_callout
= value
;
2229 panic("Zone_change: Wrong Item Type!");
2236 * Return the expected number of free elements in the zone.
2237 * This calculation will be incorrect if items are zfree'd that
2238 * were never zalloc'd/zget'd. The correct way to stuff memory
2239 * into a zone is by zcram.
2243 zone_free_count(zone_t zone
)
2245 integer_t free_count
;
2248 free_count
= (integer_t
)(zone
->cur_size
/zone
->elem_size
- zone
->count
);
2251 assert(free_count
>= 0);
2257 * zprealloc preallocates wired memory, exanding the specified
2258 * zone to the specified size
2268 if (kmem_alloc_kobject(zone_map
, &addr
, size
) != KERN_SUCCESS
)
2270 zcram(zone
, addr
, size
);
2275 * Zone garbage collection subroutines
2279 zone_page_collectable(
2283 struct zone_page_table_entry
*zp
;
2284 zone_page_index_t i
, j
;
2287 addr
= zone_virtual_addr(addr
);
2290 if (!from_zone_map(addr
, size
))
2291 panic("zone_page_collectable");
2294 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2295 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2297 for (; i
<= j
; i
++) {
2298 zp
= zone_page_table_lookup(i
);
2299 if (zp
->collect_count
== zp
->alloc_count
)
2311 struct zone_page_table_entry
*zp
;
2312 zone_page_index_t i
, j
;
2315 addr
= zone_virtual_addr(addr
);
2318 if (!from_zone_map(addr
, size
))
2319 panic("zone_page_keep");
2322 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2323 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2325 for (; i
<= j
; i
++) {
2326 zp
= zone_page_table_lookup(i
);
2327 zp
->collect_count
= 0;
2336 struct zone_page_table_entry
*zp
;
2337 zone_page_index_t i
, j
;
2340 addr
= zone_virtual_addr(addr
);
2343 if (!from_zone_map(addr
, size
))
2344 panic("zone_page_collect");
2347 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2348 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2350 for (; i
<= j
; i
++) {
2351 zp
= zone_page_table_lookup(i
);
2352 ++zp
->collect_count
;
2361 struct zone_page_table_entry
*zp
;
2362 zone_page_index_t i
, j
;
2365 addr
= zone_virtual_addr(addr
);
2368 if (!from_zone_map(addr
, size
))
2369 panic("zone_page_init");
2372 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2373 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2375 for (; i
<= j
; i
++) {
2376 /* make sure entry exists before marking unused */
2377 zone_page_table_expand(i
);
2379 zp
= zone_page_table_lookup(i
);
2381 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
2382 zp
->collect_count
= 0;
2391 struct zone_page_table_entry
*zp
;
2392 zone_page_index_t i
, j
;
2395 addr
= zone_virtual_addr(addr
);
2398 if (!from_zone_map(addr
, size
))
2399 panic("zone_page_alloc");
2402 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2403 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2405 for (; i
<= j
; i
++) {
2406 zp
= zone_page_table_lookup(i
);
2410 * Set alloc_count to ZONE_PAGE_USED if
2411 * it was previously set to ZONE_PAGE_UNUSED.
2413 if (zp
->alloc_count
== ZONE_PAGE_UNUSED
)
2414 zp
->alloc_count
= ZONE_PAGE_USED
;
2421 zone_page_free_element(
2422 zone_page_index_t
*free_page_list
,
2426 struct zone_page_table_entry
*zp
;
2427 zone_page_index_t i
, j
;
2430 addr
= zone_virtual_addr(addr
);
2433 if (!from_zone_map(addr
, size
))
2434 panic("zone_page_free_element");
2437 i
= (zone_page_index_t
)atop_kernel(addr
-zone_map_min_address
);
2438 j
= (zone_page_index_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
2440 for (; i
<= j
; i
++) {
2441 zp
= zone_page_table_lookup(i
);
2443 if (zp
->collect_count
> 0)
2444 --zp
->collect_count
;
2445 if (--zp
->alloc_count
== 0) {
2446 vm_address_t free_page_address
;
2448 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
2449 zp
->collect_count
= 0;
2453 * This element was the last one on this page, re-use the page's
2454 * storage for a page freelist
2456 free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)i
);
2457 *(zone_page_index_t
*)free_page_address
= *free_page_list
;
2458 *free_page_list
= i
;
2464 /* This is used for walking through a zone's free element list.
2466 struct zone_free_element
{
2467 struct zone_free_element
* next
;
2471 * Add a linked list of pages starting at base back into the zone
2472 * free list. Tail points to the last element on the list.
2475 #define ADD_LIST_TO_ZONE(zone, base, tail) \
2477 (tail)->next = (void *)((zone)->free_elements); \
2478 if (check_freed_element) { \
2479 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
2480 ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2481 (zone)->free_elements; \
2483 (zone)->free_elements = (unsigned long)(base); \
2487 * Add an element to the chain pointed to by prev.
2490 #define ADD_ELEMENT(zone, prev, elem) \
2492 (prev)->next = (elem); \
2493 if (check_freed_element) { \
2494 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
2495 ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
2496 (vm_offset_t)(elem); \
2503 uint32_t elems_collected
,
2508 /* Zone garbage collection
2510 * zone_gc will walk through all the free elements in all the
2511 * zones that are marked collectable looking for reclaimable
2512 * pages. zone_gc is called by consider_zone_gc when the system
2513 * begins to run out of memory.
2518 unsigned int max_zones
;
2521 zone_page_index_t zone_free_page_head
;
2523 lck_mtx_lock(&zone_gc_lock
);
2525 simple_lock(&all_zones_lock
);
2526 max_zones
= num_zones
;
2528 simple_unlock(&all_zones_lock
);
2531 for (i
= 0; i
< zone_pages
; i
++) {
2532 struct zone_page_table_entry
*zp
;
2534 zp
= zone_page_table_lookup(i
);
2535 assert(!zp
|| (zp
->collect_count
== 0));
2537 #endif /* MACH_ASSERT */
2539 zone_free_page_head
= ZONE_PAGE_INDEX_INVALID
;
2541 for (i
= 0; i
< max_zones
; i
++, z
= z
->next_zone
) {
2543 vm_size_t elt_size
, size_freed
;
2544 struct zone_free_element
*elt
, *base_elt
, *base_prev
, *prev
, *scan
, *keep
, *tail
;
2546 assert(z
!= ZONE_NULL
);
2548 if (!z
->collectable
)
2553 elt_size
= z
->elem_size
;
2556 * Do a quick feasability check before we scan the zone:
2557 * skip unless there is likelihood of getting pages back
2558 * (i.e we need a whole allocation block's worth of free
2559 * elements before we can garbage collect) and
2560 * the zone has more than 10 percent of it's elements free
2561 * or the element size is a multiple of the PAGE_SIZE
2563 if ((elt_size
& PAGE_MASK
) &&
2564 (((z
->cur_size
- z
->count
* elt_size
) <= (2 * z
->alloc_size
)) ||
2565 ((z
->cur_size
- z
->count
* elt_size
) <= (z
->cur_size
/ 10)))) {
2573 * Snatch all of the free elements away from the zone.
2576 scan
= (void *)z
->free_elements
;
2577 z
->free_elements
= 0;
2584 * Determine which elements we can attempt to collect
2585 * and count them up in the page table. Foreign elements
2586 * are returned to the zone.
2589 prev
= (void *)&scan
;
2591 n
= 0; tail
= keep
= NULL
;
2592 while (elt
!= NULL
) {
2593 if (from_zone_map(elt
, elt_size
)) {
2594 zone_page_collect((vm_offset_t
)elt
, elt_size
);
2599 ++zgc_stats
.elems_collected
;
2605 ADD_ELEMENT(z
, tail
, elt
);
2609 ADD_ELEMENT(z
, prev
, elt
->next
);
2611 ADD_ELEMENT(z
, tail
, NULL
);
2615 * Dribble back the elements we are keeping.
2619 if (z
->waiting
== TRUE
) {
2620 /* z->waiting checked without lock held, rechecked below after locking */
2624 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2630 while ((elt
!= NULL
) && (++m
< 50)) {
2635 ADD_LIST_TO_ZONE(z
, base_elt
, prev
);
2636 ADD_ELEMENT(z
, base_prev
, elt
);
2653 * Return any remaining elements.
2659 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2672 * Determine which pages we can reclaim and
2673 * free those elements.
2678 n
= 0; tail
= keep
= NULL
;
2679 while (elt
!= NULL
) {
2680 if (zone_page_collectable((vm_offset_t
)elt
, elt_size
)) {
2681 struct zone_free_element
*next_elt
= elt
->next
;
2683 size_freed
+= elt_size
;
2686 * If this is the last allocation on the page(s),
2687 * we may use their storage to maintain the linked
2688 * list of free-able pages. So store elt->next because
2689 * "elt" may be scribbled over.
2691 zone_page_free_element(&zone_free_page_head
,
2692 (vm_offset_t
)elt
, elt_size
);
2696 ++zgc_stats
.elems_freed
;
2699 zone_page_keep((vm_offset_t
)elt
, elt_size
);
2704 ADD_ELEMENT(z
, tail
, elt
);
2709 ADD_ELEMENT(z
, tail
, NULL
);
2711 ++zgc_stats
.elems_kept
;
2715 * Dribble back the elements we are keeping,
2716 * and update the zone size info.
2722 z
->cur_size
-= size_freed
;
2726 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2736 n
= 0; tail
= keep
= NULL
;
2741 * Return any remaining elements, and update
2742 * the zone size info.
2747 if (size_freed
> 0 || keep
!= NULL
) {
2749 z
->cur_size
-= size_freed
;
2752 ADD_LIST_TO_ZONE(z
, keep
, tail
);
2757 z
->doing_gc
= FALSE
;
2766 * Reclaim the pages we are freeing.
2769 while (zone_free_page_head
!= ZONE_PAGE_INDEX_INVALID
) {
2770 zone_page_index_t zind
= zone_free_page_head
;
2771 vm_address_t free_page_address
;
2773 z
= (zone_t
)zone_virtual_addr((vm_map_address_t
)z
);
2775 /* Use the first word of the page about to be freed to find the next free page */
2776 free_page_address
= zone_map_min_address
+ PAGE_SIZE
* ((vm_size_t
)zind
);
2777 zone_free_page_head
= *(zone_page_index_t
*)free_page_address
;
2779 kmem_free(zone_map
, free_page_address
, PAGE_SIZE
);
2780 ++zgc_stats
.pgs_freed
;
2783 lck_mtx_unlock(&zone_gc_lock
);
2789 * Called by the pageout daemon when the system needs more free pages.
2793 consider_zone_gc(boolean_t force
)
2796 if (zone_gc_allowed
&&
2797 (zone_gc_allowed_by_time_throttle
||
2800 zone_gc_forced
= FALSE
;
2801 zone_gc_allowed_by_time_throttle
= FALSE
; /* reset periodically */
2807 * By default, don't attempt zone GC more frequently
2808 * than once / 1 minutes.
2811 compute_zone_gc_throttle(void *arg __unused
)
2813 zone_gc_allowed_by_time_throttle
= TRUE
;
2820 mach_zone_name_array_t
*namesp
,
2821 mach_msg_type_number_t
*namesCntp
,
2822 task_zone_info_array_t
*infop
,
2823 mach_msg_type_number_t
*infoCntp
)
2825 mach_zone_name_t
*names
;
2826 vm_offset_t names_addr
;
2827 vm_size_t names_size
;
2828 task_zone_info_t
*info
;
2829 vm_offset_t info_addr
;
2830 vm_size_t info_size
;
2831 unsigned int max_zones
, i
;
2833 mach_zone_name_t
*zn
;
2834 task_zone_info_t
*zi
;
2841 if (task
== TASK_NULL
)
2842 return KERN_INVALID_TASK
;
2845 * We assume that zones aren't freed once allocated.
2846 * We won't pick up any zones that are allocated later.
2849 simple_lock(&all_zones_lock
);
2850 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
2852 simple_unlock(&all_zones_lock
);
2854 names_size
= round_page(max_zones
* sizeof *names
);
2855 kr
= kmem_alloc_pageable(ipc_kernel_map
,
2856 &names_addr
, names_size
);
2857 if (kr
!= KERN_SUCCESS
)
2859 names
= (mach_zone_name_t
*) names_addr
;
2861 info_size
= round_page(max_zones
* sizeof *info
);
2862 kr
= kmem_alloc_pageable(ipc_kernel_map
,
2863 &info_addr
, info_size
);
2864 if (kr
!= KERN_SUCCESS
) {
2865 kmem_free(ipc_kernel_map
,
2866 names_addr
, names_size
);
2870 info
= (task_zone_info_t
*) info_addr
;
2875 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
2878 assert(z
!= ZONE_NULL
);
2884 simple_lock(&all_zones_lock
);
2886 simple_unlock(&all_zones_lock
);
2888 /* assuming here the name data is static */
2889 (void) strncpy(zn
->mzn_name
, zcopy
.zone_name
,
2890 sizeof zn
->mzn_name
);
2891 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
2893 zi
->tzi_count
= (uint64_t)zcopy
.count
;
2894 zi
->tzi_cur_size
= (uint64_t)zcopy
.cur_size
;
2895 zi
->tzi_max_size
= (uint64_t)zcopy
.max_size
;
2896 zi
->tzi_elem_size
= (uint64_t)zcopy
.elem_size
;
2897 zi
->tzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
2898 zi
->tzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
2899 zi
->tzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
2900 zi
->tzi_collectable
= (uint64_t)zcopy
.collectable
;
2901 zi
->tzi_caller_acct
= (uint64_t)zcopy
.caller_acct
;
2902 if (task
->tkm_zinfo
!= NULL
) {
2903 zi
->tzi_task_alloc
= task
->tkm_zinfo
[zcopy
.index
].alloc
;
2904 zi
->tzi_task_free
= task
->tkm_zinfo
[zcopy
.index
].free
;
2906 zi
->tzi_task_alloc
= 0;
2907 zi
->tzi_task_free
= 0;
2914 * loop through the fake zones and fill them using the specialized
2917 for (i
= 0; i
< num_fake_zones
; i
++) {
2918 int count
, collectable
, exhaustible
, caller_acct
, index
;
2919 vm_size_t cur_size
, max_size
, elem_size
, alloc_size
;
2922 strncpy(zn
->mzn_name
, fake_zones
[i
].name
, sizeof zn
->mzn_name
);
2923 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
2924 fake_zones
[i
].query(&count
, &cur_size
,
2925 &max_size
, &elem_size
,
2926 &alloc_size
, &sum_size
,
2927 &collectable
, &exhaustible
, &caller_acct
);
2928 zi
->tzi_count
= (uint64_t)count
;
2929 zi
->tzi_cur_size
= (uint64_t)cur_size
;
2930 zi
->tzi_max_size
= (uint64_t)max_size
;
2931 zi
->tzi_elem_size
= (uint64_t)elem_size
;
2932 zi
->tzi_alloc_size
= (uint64_t)alloc_size
;
2933 zi
->tzi_sum_size
= sum_size
;
2934 zi
->tzi_collectable
= (uint64_t)collectable
;
2935 zi
->tzi_exhaustible
= (uint64_t)exhaustible
;
2936 zi
->tzi_caller_acct
= (uint64_t)caller_acct
;
2937 if (task
->tkm_zinfo
!= NULL
) {
2938 index
= ZINFO_SLOTS
- num_fake_zones
+ i
;
2939 zi
->tzi_task_alloc
= task
->tkm_zinfo
[index
].alloc
;
2940 zi
->tzi_task_free
= task
->tkm_zinfo
[index
].free
;
2942 zi
->tzi_task_alloc
= 0;
2943 zi
->tzi_task_free
= 0;
2949 used
= max_zones
* sizeof *names
;
2950 if (used
!= names_size
)
2951 bzero((char *) (names_addr
+ used
), names_size
- used
);
2953 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
2954 (vm_map_size_t
)names_size
, TRUE
, ©
);
2955 assert(kr
== KERN_SUCCESS
);
2957 *namesp
= (mach_zone_name_t
*) copy
;
2958 *namesCntp
= max_zones
;
2960 used
= max_zones
* sizeof *info
;
2962 if (used
!= info_size
)
2963 bzero((char *) (info_addr
+ used
), info_size
- used
);
2965 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
2966 (vm_map_size_t
)info_size
, TRUE
, ©
);
2967 assert(kr
== KERN_SUCCESS
);
2969 *infop
= (task_zone_info_t
*) copy
;
2970 *infoCntp
= max_zones
;
2972 return KERN_SUCCESS
;
2978 mach_zone_name_array_t
*namesp
,
2979 mach_msg_type_number_t
*namesCntp
,
2980 mach_zone_info_array_t
*infop
,
2981 mach_msg_type_number_t
*infoCntp
)
2983 mach_zone_name_t
*names
;
2984 vm_offset_t names_addr
;
2985 vm_size_t names_size
;
2986 mach_zone_info_t
*info
;
2987 vm_offset_t info_addr
;
2988 vm_size_t info_size
;
2989 unsigned int max_zones
, i
;
2991 mach_zone_name_t
*zn
;
2992 mach_zone_info_t
*zi
;
2999 if (host
== HOST_NULL
)
3000 return KERN_INVALID_HOST
;
3002 num_fake_zones
= sizeof fake_zones
/ sizeof fake_zones
[0];
3005 * We assume that zones aren't freed once allocated.
3006 * We won't pick up any zones that are allocated later.
3009 simple_lock(&all_zones_lock
);
3010 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
3012 simple_unlock(&all_zones_lock
);
3014 names_size
= round_page(max_zones
* sizeof *names
);
3015 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3016 &names_addr
, names_size
);
3017 if (kr
!= KERN_SUCCESS
)
3019 names
= (mach_zone_name_t
*) names_addr
;
3021 info_size
= round_page(max_zones
* sizeof *info
);
3022 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3023 &info_addr
, info_size
);
3024 if (kr
!= KERN_SUCCESS
) {
3025 kmem_free(ipc_kernel_map
,
3026 names_addr
, names_size
);
3030 info
= (mach_zone_info_t
*) info_addr
;
3035 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
3038 assert(z
!= ZONE_NULL
);
3044 simple_lock(&all_zones_lock
);
3046 simple_unlock(&all_zones_lock
);
3048 /* assuming here the name data is static */
3049 (void) strncpy(zn
->mzn_name
, zcopy
.zone_name
,
3050 sizeof zn
->mzn_name
);
3051 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3053 zi
->mzi_count
= (uint64_t)zcopy
.count
;
3054 zi
->mzi_cur_size
= (uint64_t)zcopy
.cur_size
;
3055 zi
->mzi_max_size
= (uint64_t)zcopy
.max_size
;
3056 zi
->mzi_elem_size
= (uint64_t)zcopy
.elem_size
;
3057 zi
->mzi_alloc_size
= (uint64_t)zcopy
.alloc_size
;
3058 zi
->mzi_sum_size
= zcopy
.sum_count
* zcopy
.elem_size
;
3059 zi
->mzi_exhaustible
= (uint64_t)zcopy
.exhaustible
;
3060 zi
->mzi_collectable
= (uint64_t)zcopy
.collectable
;
3066 * loop through the fake zones and fill them using the specialized
3069 for (i
= 0; i
< num_fake_zones
; i
++) {
3070 int count
, collectable
, exhaustible
, caller_acct
;
3071 vm_size_t cur_size
, max_size
, elem_size
, alloc_size
;
3074 strncpy(zn
->mzn_name
, fake_zones
[i
].name
, sizeof zn
->mzn_name
);
3075 zn
->mzn_name
[sizeof zn
->mzn_name
- 1] = '\0';
3076 fake_zones
[i
].query(&count
, &cur_size
,
3077 &max_size
, &elem_size
,
3078 &alloc_size
, &sum_size
,
3079 &collectable
, &exhaustible
, &caller_acct
);
3080 zi
->mzi_count
= (uint64_t)count
;
3081 zi
->mzi_cur_size
= (uint64_t)cur_size
;
3082 zi
->mzi_max_size
= (uint64_t)max_size
;
3083 zi
->mzi_elem_size
= (uint64_t)elem_size
;
3084 zi
->mzi_alloc_size
= (uint64_t)alloc_size
;
3085 zi
->mzi_sum_size
= sum_size
;
3086 zi
->mzi_collectable
= (uint64_t)collectable
;
3087 zi
->mzi_exhaustible
= (uint64_t)exhaustible
;
3093 used
= max_zones
* sizeof *names
;
3094 if (used
!= names_size
)
3095 bzero((char *) (names_addr
+ used
), names_size
- used
);
3097 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
3098 (vm_map_size_t
)names_size
, TRUE
, ©
);
3099 assert(kr
== KERN_SUCCESS
);
3101 *namesp
= (mach_zone_name_t
*) copy
;
3102 *namesCntp
= max_zones
;
3104 used
= max_zones
* sizeof *info
;
3106 if (used
!= info_size
)
3107 bzero((char *) (info_addr
+ used
), info_size
- used
);
3109 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
3110 (vm_map_size_t
)info_size
, TRUE
, ©
);
3111 assert(kr
== KERN_SUCCESS
);
3113 *infop
= (mach_zone_info_t
*) copy
;
3114 *infoCntp
= max_zones
;
3116 return KERN_SUCCESS
;
3120 * host_zone_info - LEGACY user interface for Mach zone information
3121 * Should use mach_zone_info() instead!
3126 zone_name_array_t
*namesp
,
3127 mach_msg_type_number_t
*namesCntp
,
3128 zone_info_array_t
*infop
,
3129 mach_msg_type_number_t
*infoCntp
)
3132 vm_offset_t names_addr
;
3133 vm_size_t names_size
;
3135 vm_offset_t info_addr
;
3136 vm_size_t info_size
;
3137 unsigned int max_zones
, i
;
3147 if (host
== HOST_NULL
)
3148 return KERN_INVALID_HOST
;
3150 #if defined(__LP64__)
3151 if (!thread_is_64bit(current_thread()))
3152 return KERN_NOT_SUPPORTED
;
3154 if (thread_is_64bit(current_thread()))
3155 return KERN_NOT_SUPPORTED
;
3158 num_fake_zones
= sizeof fake_zones
/ sizeof fake_zones
[0];
3161 * We assume that zones aren't freed once allocated.
3162 * We won't pick up any zones that are allocated later.
3165 simple_lock(&all_zones_lock
);
3166 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
3168 simple_unlock(&all_zones_lock
);
3170 names_size
= round_page(max_zones
* sizeof *names
);
3171 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3172 &names_addr
, names_size
);
3173 if (kr
!= KERN_SUCCESS
)
3175 names
= (zone_name_t
*) names_addr
;
3177 info_size
= round_page(max_zones
* sizeof *info
);
3178 kr
= kmem_alloc_pageable(ipc_kernel_map
,
3179 &info_addr
, info_size
);
3180 if (kr
!= KERN_SUCCESS
) {
3181 kmem_free(ipc_kernel_map
,
3182 names_addr
, names_size
);
3186 info
= (zone_info_t
*) info_addr
;
3191 for (i
= 0; i
< max_zones
- num_fake_zones
; i
++) {
3194 assert(z
!= ZONE_NULL
);
3200 simple_lock(&all_zones_lock
);
3202 simple_unlock(&all_zones_lock
);
3204 /* assuming here the name data is static */
3205 (void) strncpy(zn
->zn_name
, zcopy
.zone_name
,
3206 sizeof zn
->zn_name
);
3207 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
3209 zi
->zi_count
= zcopy
.count
;
3210 zi
->zi_cur_size
= zcopy
.cur_size
;
3211 zi
->zi_max_size
= zcopy
.max_size
;
3212 zi
->zi_elem_size
= zcopy
.elem_size
;
3213 zi
->zi_alloc_size
= zcopy
.alloc_size
;
3214 zi
->zi_exhaustible
= zcopy
.exhaustible
;
3215 zi
->zi_collectable
= zcopy
.collectable
;
3222 * loop through the fake zones and fill them using the specialized
3225 for (i
= 0; i
< num_fake_zones
; i
++) {
3228 strncpy(zn
->zn_name
, fake_zones
[i
].name
, sizeof zn
->zn_name
);
3229 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
3230 fake_zones
[i
].query(&zi
->zi_count
, &zi
->zi_cur_size
,
3231 &zi
->zi_max_size
, &zi
->zi_elem_size
,
3232 &zi
->zi_alloc_size
, &sum_space
,
3233 &zi
->zi_collectable
, &zi
->zi_exhaustible
, &caller_acct
);
3238 used
= max_zones
* sizeof *names
;
3239 if (used
!= names_size
)
3240 bzero((char *) (names_addr
+ used
), names_size
- used
);
3242 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
3243 (vm_map_size_t
)names_size
, TRUE
, ©
);
3244 assert(kr
== KERN_SUCCESS
);
3246 *namesp
= (zone_name_t
*) copy
;
3247 *namesCntp
= max_zones
;
3249 used
= max_zones
* sizeof *info
;
3250 if (used
!= info_size
)
3251 bzero((char *) (info_addr
+ used
), info_size
- used
);
3253 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
3254 (vm_map_size_t
)info_size
, TRUE
, ©
);
3255 assert(kr
== KERN_SUCCESS
);
3257 *infop
= (zone_info_t
*) copy
;
3258 *infoCntp
= max_zones
;
3260 return KERN_SUCCESS
;
3263 extern unsigned int stack_total
;
3264 extern unsigned long long stack_allocs
;
3266 #if defined(__i386__) || defined (__x86_64__)
3267 extern unsigned int inuse_ptepages_count
;
3268 extern long long alloc_ptepages_count
;
3271 void zone_display_zprint()
3276 if(first_zone
!=NULL
) {
3277 the_zone
= first_zone
;
3278 for (i
= 0; i
< num_zones
; i
++) {
3279 if(the_zone
->cur_size
> (1024*1024)) {
3280 printf("%.20s:\t%lu\n",the_zone
->zone_name
,(uintptr_t)the_zone
->cur_size
);
3283 if(the_zone
->next_zone
== NULL
) {
3287 the_zone
= the_zone
->next_zone
;
3291 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size
* stack_total
));
3293 #if defined(__i386__) || defined (__x86_64__)
3294 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE
* inuse_ptepages_count
));
3297 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total
);
3303 #include <ddb/db_command.h>
3304 #include <ddb/db_output.h>
3305 #include <kern/kern_print.h>
3307 const char *zone_labels
=
3308 "ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME";
3315 void db_zone_check_active(
3317 void db_zone_print_active(
3319 #endif /* ZONE_DEBUG */
3320 void db_zone_print_free(
3330 db_printf("%8x %8x %8x %8x %6x %8x %s ",
3331 addr
, zcopy
.count
, zcopy
.cur_size
,
3332 zcopy
.max_size
, zcopy
.elem_size
,
3333 zcopy
.alloc_size
, zcopy
.zone_name
);
3334 if (zcopy
.exhaustible
)
3336 if (zcopy
.collectable
)
3338 if (zcopy
.expandable
)
3340 if (zcopy
.caller_acct
)
3347 db_show_one_zone(db_expr_t addr
, boolean_t have_addr
,
3348 __unused db_expr_t count
, __unused
char *modif
)
3350 struct zone
*z
= (zone_t
)((char *)0 + addr
);
3352 if (z
== ZONE_NULL
|| !have_addr
){
3353 db_error("No Zone\n");
3357 db_printf("%s\n", zone_labels
);
3363 db_show_all_zones(__unused db_expr_t addr
, boolean_t have_addr
, db_expr_t count
,
3364 __unused
char *modif
)
3370 * Don't risk hanging by unconditionally locking,
3371 * risk of incoherent data is small (zones aren't freed).
3373 have_addr
= simple_lock_try(&all_zones_lock
);
3377 simple_unlock(&all_zones_lock
);
3380 db_printf("%s\n", zone_labels
);
3381 for ( ; count
> 0; count
--) {
3383 db_error("Mangled Zone List\n");
3387 total
+= z
->cur_size
,
3389 have_addr
= simple_lock_try(&all_zones_lock
);
3392 simple_unlock(&all_zones_lock
);
3395 db_printf("\nTotal %8x", total
);
3396 db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats
.pgs_freed
);
3401 db_zone_check_active(
3407 if (!zone_debug_enabled(zone
) || !zone_check
)
3409 tmp_elem
= queue_first(&zone
->active_zones
);
3410 while (count
< zone
->count
) {
3412 if (tmp_elem
== 0) {
3413 printf("unexpected zero element, zone=%p, count=%d\n",
3418 if (queue_end(tmp_elem
, &zone
->active_zones
)) {
3419 printf("unexpected queue_end, zone=%p, count=%d\n",
3424 tmp_elem
= queue_next(tmp_elem
);
3426 if (!queue_end(tmp_elem
, &zone
->active_zones
)) {
3427 printf("not at queue_end, zone=%p, tmp_elem=%p\n",
3434 db_zone_print_active(
3440 if (!zone_debug_enabled(zone
)) {
3441 printf("zone %p debug not enabled\n", zone
);
3445 printf("zone_check FALSE\n");
3449 printf("zone %p, active elements %d\n", zone
, zone
->count
);
3450 printf("active list:\n");
3451 tmp_elem
= queue_first(&zone
->active_zones
);
3452 while (count
< zone
->count
) {
3453 printf(" %p", tmp_elem
);
3455 if ((count
% 6) == 0)
3457 if (tmp_elem
== 0) {
3458 printf("\nunexpected zero element, count=%d\n", count
);
3461 if (queue_end(tmp_elem
, &zone
->active_zones
)) {
3462 printf("\nunexpected queue_end, count=%d\n", count
);
3465 tmp_elem
= queue_next(tmp_elem
);
3467 if (!queue_end(tmp_elem
, &zone
->active_zones
))
3468 printf("\nnot at queue_end, tmp_elem=%p\n", tmp_elem
);
3472 #endif /* ZONE_DEBUG */
3482 freecount
= zone_free_count(zone
);
3483 printf("zone %p, free elements %d\n", zone
, freecount
);
3484 printf("free list:\n");
3485 elem
= zone
->free_elements
;
3486 while (count
< freecount
) {
3487 printf(" 0x%x", elem
);
3489 if ((count
% 6) == 0)
3492 printf("\nunexpected zero element, count=%d\n", count
);
3495 elem
= *((vm_offset_t
*)elem
);
3498 printf("\nnot at end of free list, elem=0x%x\n", elem
);
3503 #endif /* MACH_KDB */
3508 /* should we care about locks here ? */
3516 char *elt
= (char *)prev
;
3518 if (!zone_debug_enabled(z
))
3520 elt
-= ZONE_DEBUG_OFFSET
;
3521 elt
= (char *) queue_next((queue_t
) elt
);
3522 if ((queue_t
) elt
== &z
->active_zones
)
3524 elt
+= ZONE_DEBUG_OFFSET
;
3534 if (!zone_debug_enabled(z
))
3536 if (queue_empty(&z
->active_zones
))
3538 elt
= (char *)queue_first(&z
->active_zones
);
3539 elt
+= ZONE_DEBUG_OFFSET
;
3544 * Second arg controls how many zone elements are printed:
3547 * n, n > 0 => last n on active list
3556 boolean_t print
= (tail
!= 0);
3560 if (z
->count
< tail
)
3562 tail
= z
->count
- tail
;
3563 for (elt
= first_element(z
); elt
; elt
= next_element(z
, elt
)) {
3564 if (print
&& tail
<= count
)
3565 db_printf("%8x\n", elt
);
3568 assert(count
== z
->count
);
3571 #endif /* MACH_KDB */
3573 #define zone_in_use(z) ( z->count || z->free_elements )
3579 if (zone_debug_enabled(z
) || zone_in_use(z
) ||
3580 z
->alloc_size
< (z
->elem_size
+ ZONE_DEBUG_OFFSET
))
3582 queue_init(&z
->active_zones
);
3583 z
->elem_size
+= ZONE_DEBUG_OFFSET
;
3590 if (!zone_debug_enabled(z
) || zone_in_use(z
))
3592 z
->elem_size
-= ZONE_DEBUG_OFFSET
;
3593 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
3597 #endif /* ZONE_DEBUG */