2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
65 #include <zone_debug.h>
66 #include <zone_alias_addr.h>
70 #include <mach/mach_types.h>
71 #include <mach/vm_param.h>
72 #include <mach/kern_return.h>
73 #include <mach/mach_host_server.h>
74 #include <mach/machine/vm_types.h>
75 #include <mach_debug/zone_info.h>
77 #include <kern/kern_types.h>
78 #include <kern/assert.h>
79 #include <kern/host.h>
80 #include <kern/macro_help.h>
81 #include <kern/sched.h>
82 #include <kern/locks.h>
83 #include <kern/sched_prim.h>
84 #include <kern/misc_protos.h>
85 #include <kern/thread_call.h>
86 #include <kern/zalloc.h>
87 #include <kern/kalloc.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
94 #include <machine/machparam.h>
96 #include <libkern/OSDebug.h>
97 #include <sys/kdebug.h>
100 /* for fake zone stat routines */
101 #include <ppc/savearea.h>
102 #include <ppc/mappings.h>
107 * Zone Corruption Debugging
109 * We provide three methods to detect use of a zone element after it's been freed. These
110 * checks are enabled by specifying "-zc" and/or "-zp" in the boot-args:
112 * (1) Range-check the free-list "next" ptr for sanity.
113 * (2) Store the ptr in two different words, and compare them against
114 * each other when re-using the zone element, to detect modifications.
115 * (3) poison the freed memory by overwriting it with 0xdeadbeef.
117 * The first two checks are farily light weight and are enabled by specifying "-zc"
118 * in the boot-args. If you want more aggressive checking for use-after-free bugs
119 * and you don't mind the additional overhead, then turn on poisoning by adding
120 * "-zp" to the boot-args in addition to "-zc". If you specify -zp without -zc,
121 * it still poisons the memory when it's freed, but doesn't check if the memory
122 * has been altered later when it's reallocated.
125 boolean_t check_freed_element
= FALSE
; /* enabled by -zc in boot-args */
126 boolean_t zfree_clear
= FALSE
; /* enabled by -zp in boot-args */
128 #define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
130 #define ADD_TO_ZONE(zone, element) \
135 i < zone->elem_size/sizeof(uint32_t); \
137 ((uint32_t *)(element))[i] = 0xdeadbeef; \
139 *((vm_offset_t *)(element)) = (zone)->free_elements; \
140 if (check_freed_element) { \
141 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
142 ((vm_offset_t *)(element))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
143 (zone)->free_elements; \
145 (zone)->free_elements = (vm_offset_t) (element); \
149 #define REMOVE_FROM_ZONE(zone, ret, type) \
151 (ret) = (type) (zone)->free_elements; \
152 if ((ret) != (type) 0) { \
153 if (check_freed_element) { \
154 if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0]) || \
155 ((zone)->elem_size >= (2 * sizeof(vm_offset_t)) && \
156 ((vm_offset_t *)(ret))[((zone)->elem_size/sizeof(vm_offset_t))-1] != \
157 ((vm_offset_t *)(ret))[0])) \
158 panic("a freed zone element has been modified");\
161 for (ii = sizeof(vm_offset_t) / sizeof(uint32_t); \
162 ii < zone->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
164 if (((uint32_t *)(ret))[ii] != (uint32_t)0xdeadbeef) \
165 panic("a freed zone element has been modified");\
169 (zone)->free_elements = *((vm_offset_t *)(ret)); \
174 #define zone_debug_enabled(z) z->active_zones.next
175 #define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
176 #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
177 #endif /* ZONE_DEBUG */
180 * Support for garbage collection of unused zone pages:
183 struct zone_page_table_entry
{
184 struct zone_page_table_entry
*link
;
195 void zone_page_alloc(
199 void zone_page_free_element(
200 struct zone_page_table_entry
**free_pages
,
204 void zone_page_collect(
208 boolean_t
zone_page_collectable(
217 thread_call_param_t p0
,
218 thread_call_param_t p1
);
220 void zone_display_zprint( void );
222 #if ZONE_DEBUG && MACH_KDB
226 #endif /* ZONE_DEBUG && MACH_KDB */
228 vm_map_t zone_map
= VM_MAP_NULL
;
230 zone_t zone_zone
= ZONE_NULL
; /* the zone containing other zones */
233 * The VM system gives us an initial chunk of memory.
234 * It has to be big enough to allocate the zone_zone
238 vm_size_t zdata_size
;
240 #define lock_zone(zone) \
242 lck_mtx_lock_spin(&(zone)->lock); \
245 #define unlock_zone(zone) \
247 lck_mtx_unlock(&(zone)->lock); \
250 #define zone_wakeup(zone) thread_wakeup((event_t)(zone))
251 #define zone_sleep(zone) \
252 (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
255 #define lock_zone_init(zone) \
258 (void) snprintf(_name, sizeof (_name), "zone.%s", (zone)->zone_name); \
259 lck_grp_attr_setdefault(&(zone)->lock_grp_attr); \
260 lck_grp_init(&(zone)->lock_grp, _name, &(zone)->lock_grp_attr); \
261 lck_attr_setdefault(&(zone)->lock_attr); \
262 lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
263 &(zone)->lock_grp, &(zone)->lock_attr); \
266 #define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
268 kern_return_t
zget_space(
271 vm_offset_t
*result
);
273 decl_simple_lock_data(,zget_space_lock
)
274 vm_offset_t zalloc_next_space
;
275 vm_offset_t zalloc_end_of_space
;
276 vm_size_t zalloc_wasted_space
;
279 * Garbage collection map information
281 struct zone_page_table_entry
* zone_page_table
;
282 vm_offset_t zone_map_min_address
;
283 vm_offset_t zone_map_max_address
;
284 unsigned int zone_pages
;
287 * Exclude more than one concurrent garbage collection
289 decl_lck_mtx_data(, zone_gc_lock
)
291 lck_attr_t zone_lck_attr
;
292 lck_grp_t zone_lck_grp
;
293 lck_grp_attr_t zone_lck_grp_attr
;
294 lck_mtx_ext_t zone_lck_ext
;
298 #define from_zone_map(addr, size) \
299 ((vm_offset_t)(addr) >= zone_map_min_address && \
300 ((vm_offset_t)(addr) + size -1) < zone_map_max_address)
302 #define from_zone_map(addr, size) \
303 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) >= zone_map_min_address && \
304 ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) + size -1) < zone_map_max_address)
307 #define ZONE_PAGE_USED 0
308 #define ZONE_PAGE_UNUSED -1
312 * Protects first_zone, last_zone, num_zones,
313 * and the next_zone field of zones.
315 decl_simple_lock_data(, all_zones_lock
)
318 unsigned int num_zones
;
320 boolean_t zone_gc_allowed
= TRUE
;
321 boolean_t zone_gc_forced
= FALSE
;
322 boolean_t panic_include_zprint
= FALSE
;
323 unsigned zone_gc_last_tick
= 0;
324 unsigned zone_gc_max_rate
= 0; /* in ticks */
327 * Zone leak debugging code
329 * When enabled, this code keeps a log to track allocations to a particular zone that have not
330 * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
331 * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
334 * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
335 * is the name of the zone you wish to log.
337 * This code only tracks one zone, so you need to identify which one is leaking first.
338 * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
339 * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
340 * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
341 * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
342 * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
343 * See the help in the kgmacros for usage info.
346 * Zone corruption logging
348 * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
349 * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
350 * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
351 * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
352 * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
353 * corrupted to examine its history. This should lead to the source of the corruption.
356 static int log_records
; /* size of the log, expressed in number of records */
358 #define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */
360 static char zone_name_to_log
[MAX_ZONE_NAME
] = ""; /* the zone name we're logging, if any */
363 * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
364 * the number of records you want in the log. For example, "zrecs=1000" sets it to 1000 records. Note
365 * that the larger the size of the log, the slower the system will run due to linear searching in the log,
366 * but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
367 * records since going much larger than this tends to make the system unresponsive and unbootable on small
368 * memory configurations. The default value is 4000 records.
370 * MAX_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interrest. 15
371 * levels is usually enough to get past all the layers of code in kalloc and IOKit and see who the actual
372 * caller is up above these lower levels.
375 #define ZRECORDS_MAX 8000 /* Max records allowed in the log */
376 #define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
377 #define MAX_DEPTH 15 /* number of levels of the stack trace to record */
380 * Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
381 * the records to be ordered chronologically, and a small array to hold the pc's from the stack trace. A
382 * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
383 * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
384 * If the log fills, old records are replaced as if it were a circular buffer.
388 void *z_element
; /* the element that was zalloc'ed of zfree'ed */
389 uint32_t z_opcode
:1, /* whether it was a zalloc or zfree */
390 z_time
:31; /* time index when operation was done */
391 void *z_pc
[MAX_DEPTH
]; /* stack trace of caller */
395 * Opcodes for the z_opcode field:
402 * The allocation log and all the related variables are protected by the zone lock for the zone_of_interest
405 static struct zrecord
*zrecords
; /* the log itself, dynamically allocated when logging is enabled */
406 static int zcurrent
= 0; /* index of the next slot in the log to use */
407 static int zrecorded
= 0; /* number of allocations recorded in the log */
408 static unsigned int ztime
= 0; /* a timestamp of sorts */
409 static zone_t zone_of_interest
= NULL
; /* the zone being watched; corresponds to zone_name_to_log */
412 * Decide if we want to log this zone by doing a string compare between a zone name and the name
413 * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
414 * possible to include spaces in strings passed in via the boot-args, a period in the logname will
415 * match a space in the zone name.
419 log_this_zone(const char *zonename
, const char *logname
)
422 const char *zc
= zonename
;
423 const char *lc
= logname
;
426 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
429 for (len
= 1; len
<= MAX_ZONE_NAME
; zc
++, lc
++, len
++) {
432 * If the current characters don't match, check for a space in
433 * in the zone name and a corresponding period in the log name.
434 * If that's not there, then the strings don't match.
437 if (*zc
!= *lc
&& !(*zc
== ' ' && *lc
== '.'))
441 * The strings are equal so far. If we're at the end, then it's a match.
453 * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
454 * the buffer for the records has been allocated.
457 #define DO_LOGGING(z) (zrecords && (z) == zone_of_interest)
459 extern boolean_t zlog_ready
;
463 * zinit initializes a new zone. The zone data structures themselves
464 * are stored in a zone, which is initially a static structure that
465 * is initialized by zone_init.
469 vm_size_t size
, /* the size of an element */
470 vm_size_t max
, /* maximum memory to use */
471 vm_size_t alloc
, /* allocation size */
472 const char *name
) /* a name for the zone */
476 if (zone_zone
== ZONE_NULL
) {
477 if (zget_space(NULL
, sizeof(struct zone
), (vm_offset_t
*)&z
)
481 z
= (zone_t
) zalloc(zone_zone
);
486 * Round off all the parameters appropriately.
488 if (size
< sizeof(z
->free_elements
))
489 size
= sizeof(z
->free_elements
);
490 size
= ((size
-1) + sizeof(z
->free_elements
)) -
491 ((size
-1) % sizeof(z
->free_elements
));
494 alloc
= round_page(alloc
);
495 max
= round_page(max
);
497 * we look for an allocation size with less than 1% waste
498 * up to 5 pages in size...
499 * otherwise, we look for an allocation size with least fragmentation
500 * in the range of 1 - 5 pages
501 * This size will be used unless
502 * the user suggestion is larger AND has less fragmentation
505 if ((size
< PAGE_SIZE
) && (PAGE_SIZE
% size
<= PAGE_SIZE
/ 10))
509 { vm_size_t best
, waste
; unsigned int i
;
513 for (i
= 1; i
<= 5; i
++) {
514 vm_size_t tsize
, twaste
;
516 tsize
= i
* PAGE_SIZE
;
518 if ((tsize
% size
) < (tsize
/ 100)) {
520 goto use_this_allocation
;
522 twaste
= tsize
% size
;
524 best
= tsize
, waste
= twaste
;
526 if (alloc
<= best
|| (alloc
% size
>= waste
))
530 if (max
&& (max
< alloc
))
533 z
->free_elements
= 0;
537 z
->alloc_size
= alloc
;
540 z
->doing_alloc
= FALSE
;
542 z
->exhaustible
= FALSE
;
543 z
->collectable
= TRUE
;
544 z
->allows_foreign
= FALSE
;
545 z
->expandable
= TRUE
;
547 z
->async_pending
= FALSE
;
548 z
->noencrypt
= FALSE
;
551 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
552 zone_debug_enable(z
);
553 #endif /* ZONE_DEBUG */
557 * Add the zone to the all-zones list.
560 z
->next_zone
= ZONE_NULL
;
561 thread_call_setup(&z
->call_async_alloc
, zalloc_async
, z
);
562 simple_lock(&all_zones_lock
);
564 last_zone
= &z
->next_zone
;
566 simple_unlock(&all_zones_lock
);
569 * Check if we should be logging this zone. If so, remember the zone pointer.
572 if (log_this_zone(z
->zone_name
, zone_name_to_log
)) {
573 zone_of_interest
= z
;
577 * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
578 * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. zlog_ready is set to
579 * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
580 * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
581 * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
585 if (zone_of_interest
!= NULL
&& zrecords
== NULL
&& zlog_ready
) {
586 if (kmem_alloc(kernel_map
, (vm_offset_t
*)&zrecords
, log_records
* sizeof(struct zrecord
)) == KERN_SUCCESS
) {
589 * We got the memory for the log. Zero it out since the code needs this to identify unused records.
590 * At this point, everything is set up and we're ready to start logging this zone.
593 bzero((void *)zrecords
, log_records
* sizeof(struct zrecord
));
594 printf("zone: logging started for zone %s (%p)\n", zone_of_interest
->zone_name
, zone_of_interest
);
597 printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
598 zone_of_interest
= NULL
;
606 * Cram the given memory into the specified zone.
610 register zone_t zone
,
614 register vm_size_t elem_size
;
615 vm_offset_t newmem
= (vm_offset_t
) newaddr
;
617 /* Basic sanity checks */
618 assert(zone
!= ZONE_NULL
&& newmem
!= (vm_offset_t
)0);
619 assert(!zone
->collectable
|| zone
->allows_foreign
620 || (from_zone_map(newmem
, size
)));
622 elem_size
= zone
->elem_size
;
625 while (size
>= elem_size
) {
626 ADD_TO_ZONE(zone
, newmem
);
627 if (from_zone_map(newmem
, elem_size
))
628 zone_page_alloc(newmem
, elem_size
);
629 zone
->count
++; /* compensate for ADD_TO_ZONE */
632 zone
->cur_size
+= elem_size
;
638 * Contiguous space allocator for non-paged zones. Allocates "size" amount
639 * of memory from zone_map.
648 vm_offset_t new_space
= 0;
649 vm_size_t space_to_add
= 0;
651 simple_lock(&zget_space_lock
);
652 while ((zalloc_next_space
+ size
) > zalloc_end_of_space
) {
654 * Add at least one page to allocation area.
657 space_to_add
= round_page(size
);
659 if (new_space
== 0) {
660 kern_return_t retval
;
661 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
664 * Memory cannot be wired down while holding
665 * any locks that the pageout daemon might
666 * need to free up pages. [Making the zget_space
667 * lock a complex lock does not help in this
670 * Unlock and allocate memory. Because several
671 * threads might try to do this at once, don't
672 * use the memory before checking for available
676 simple_unlock(&zget_space_lock
);
678 if (zone
== NULL
|| zone
->noencrypt
)
679 zflags
|= KMA_NOENCRYPT
;
681 retval
= kernel_memory_allocate(zone_map
, &new_space
, space_to_add
, 0, zflags
);
682 if (retval
!= KERN_SUCCESS
)
685 if (space_to_add
== PAGE_SIZE
)
686 new_space
= zone_alias_addr(new_space
);
688 zone_page_init(new_space
, space_to_add
,
690 simple_lock(&zget_space_lock
);
696 * Memory was allocated in a previous iteration.
698 * Check whether the new region is contiguous
702 if (new_space
!= zalloc_end_of_space
) {
704 * Throw away the remainder of the
705 * old space, and start a new one.
707 zalloc_wasted_space
+=
708 zalloc_end_of_space
- zalloc_next_space
;
709 zalloc_next_space
= new_space
;
712 zalloc_end_of_space
= new_space
+ space_to_add
;
716 *result
= zalloc_next_space
;
717 zalloc_next_space
+= size
;
718 simple_unlock(&zget_space_lock
);
721 kmem_free(zone_map
, new_space
, space_to_add
);
723 return(KERN_SUCCESS
);
728 * Steal memory for the zone package. Called from
729 * vm_page_bootstrap().
732 zone_steal_memory(void)
734 zdata_size
= round_page(128*sizeof(struct zone
));
735 zdata
= (vm_offset_t
)((char *)pmap_steal_memory(zdata_size
) - (char *)0);
740 * Fill a zone with enough memory to contain at least nelem elements.
741 * Memory is obtained with kmem_alloc_kobject from the kernel_map.
742 * Return the number of elements actually put into the zone, which may
743 * be more than the caller asked for since the memory allocation is
744 * rounded up to a full page.
759 size
= nelem
* zone
->elem_size
;
760 size
= round_page(size
);
761 kr
= kmem_alloc_kobject(kernel_map
, &memory
, size
);
762 if (kr
!= KERN_SUCCESS
)
765 zone_change(zone
, Z_FOREIGN
, TRUE
);
766 zcram(zone
, (void *)memory
, size
);
767 nalloc
= (int)(size
/ zone
->elem_size
);
768 assert(nalloc
>= nelem
);
774 * Initialize the "zone of zones" which uses fixed memory allocated
775 * earlier in memory initialization. zone_bootstrap is called
781 vm_size_t zone_zone_size
;
782 vm_offset_t zone_zone_space
;
785 /* see if we want freed zone element checking and/or poisoning */
786 if (PE_parse_boot_argn("-zc", temp_buf
, sizeof (temp_buf
))) {
787 check_freed_element
= TRUE
;
790 if (PE_parse_boot_argn("-zp", temp_buf
, sizeof (temp_buf
))) {
795 * Check for and set up zone leak detection if requested via boot-args. We recognized two
799 * zrecs=<num_records_in_log>
801 * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
802 * control the size of the log. If zrecs is not specified, a default value is used.
805 if (PE_parse_boot_argn("zlog", zone_name_to_log
, sizeof(zone_name_to_log
)) == TRUE
) {
806 if (PE_parse_boot_argn("zrecs", &log_records
, sizeof(log_records
)) == TRUE
) {
809 * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
810 * This prevents accidentally hogging too much kernel memory and making the system
814 log_records
= MIN(ZRECORDS_MAX
, log_records
);
817 log_records
= ZRECORDS_DEFAULT
;
821 simple_lock_init(&all_zones_lock
, 0);
823 first_zone
= ZONE_NULL
;
824 last_zone
= &first_zone
;
827 simple_lock_init(&zget_space_lock
, 0);
828 zalloc_next_space
= zdata
;
829 zalloc_end_of_space
= zdata
+ zdata_size
;
830 zalloc_wasted_space
= 0;
832 /* assertion: nobody else called zinit before us */
833 assert(zone_zone
== ZONE_NULL
);
834 zone_zone
= zinit(sizeof(struct zone
), 128 * sizeof(struct zone
),
835 sizeof(struct zone
), "zones");
836 zone_change(zone_zone
, Z_COLLECT
, FALSE
);
837 zone_change(zone_zone
, Z_NOENCRYPT
, TRUE
);
839 zone_zone_size
= zalloc_end_of_space
- zalloc_next_space
;
840 zget_space(NULL
, zone_zone_size
, &zone_zone_space
);
841 zcram(zone_zone
, (void *)zone_zone_space
, zone_zone_size
);
846 vm_size_t max_zonemap_size
)
848 kern_return_t retval
;
849 vm_offset_t zone_min
;
850 vm_offset_t zone_max
;
851 vm_size_t zone_table_size
;
853 retval
= kmem_suballoc(kernel_map
, &zone_min
, max_zonemap_size
,
854 FALSE
, VM_FLAGS_ANYWHERE
| VM_FLAGS_PERMANENT
,
857 if (retval
!= KERN_SUCCESS
)
858 panic("zone_init: kmem_suballoc failed");
859 zone_max
= zone_min
+ round_page(max_zonemap_size
);
861 * Setup garbage collection information:
863 zone_table_size
= atop_kernel(zone_max
- zone_min
) *
864 sizeof(struct zone_page_table_entry
);
865 if (kmem_alloc_kobject(zone_map
, (vm_offset_t
*) &zone_page_table
,
866 zone_table_size
) != KERN_SUCCESS
)
868 zone_min
= (vm_offset_t
)zone_page_table
+ round_page(zone_table_size
);
869 zone_pages
= (unsigned int)atop_kernel(zone_max
- zone_min
);
870 zone_map_min_address
= zone_min
;
871 zone_map_max_address
= zone_max
;
873 lck_grp_attr_setdefault(&zone_lck_grp_attr
);
874 lck_grp_init(&zone_lck_grp
, "zones", &zone_lck_grp_attr
);
875 lck_attr_setdefault(&zone_lck_attr
);
876 lck_mtx_init_ext(&zone_gc_lock
, &zone_lck_ext
, &zone_lck_grp
, &zone_lck_attr
);
878 zone_page_init(zone_min
, zone_max
- zone_min
, ZONE_PAGE_UNUSED
);
881 extern volatile SInt32 kfree_nop_count
;
884 * zalloc returns an element from the specified zone.
888 register zone_t zone
,
892 kern_return_t retval
;
893 void *bt
[MAX_DEPTH
]; /* only used if zone logging is enabled */
897 assert(zone
!= ZONE_NULL
);
900 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
903 if (DO_LOGGING(zone
))
904 numsaved
= OSBacktrace(&bt
[0], MAX_DEPTH
);
908 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
910 while ((addr
== 0) && canblock
&& (zone
->doing_gc
)) {
911 zone
->waiting
= TRUE
;
913 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
916 while ((addr
== 0) && canblock
) {
918 * If nothing was there, try to get more
920 if (zone
->doing_alloc
) {
922 * Someone is allocating memory for this zone.
923 * Wait for it to show up, then try again.
925 zone
->waiting
= TRUE
;
929 if ((zone
->cur_size
+ zone
->elem_size
) >
931 if (zone
->exhaustible
)
933 if (zone
->expandable
) {
935 * We're willing to overflow certain
936 * zones, but not without complaining.
938 * This is best used in conjunction
939 * with the collectable flag. What we
940 * want is an assurance we can get the
941 * memory back, assuming there's no
944 zone
->max_size
+= (zone
->max_size
>> 1);
948 panic("zalloc: zone \"%s\" empty.", zone
->zone_name
);
951 zone
->doing_alloc
= TRUE
;
954 if (zone
->collectable
) {
956 vm_size_t alloc_size
;
960 int zflags
= KMA_KOBJECT
|KMA_NOPAGEWAIT
;
962 if (vm_pool_low() || retry
>= 1)
964 round_page(zone
->elem_size
);
966 alloc_size
= zone
->alloc_size
;
969 zflags
|= KMA_NOENCRYPT
;
971 retval
= kernel_memory_allocate(zone_map
, &space
, alloc_size
, 0, zflags
);
972 if (retval
== KERN_SUCCESS
) {
974 if (alloc_size
== PAGE_SIZE
)
975 space
= zone_alias_addr(space
);
977 zone_page_init(space
, alloc_size
,
979 zcram(zone
, (void *)space
, alloc_size
);
982 } else if (retval
!= KERN_RESOURCE_SHORTAGE
) {
987 printf("zalloc did gc\n");
988 zone_display_zprint();
991 panic_include_zprint
= TRUE
;
992 panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone
->zone_name
, zone
->count
, retval
, (int)kfree_nop_count
);
999 zone
->doing_alloc
= FALSE
;
1000 if (zone
->waiting
) {
1001 zone
->waiting
= FALSE
;
1004 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1006 retval
== KERN_RESOURCE_SHORTAGE
) {
1014 retval
= zget_space(zone
, zone
->elem_size
, &space
);
1017 zone
->doing_alloc
= FALSE
;
1018 if (zone
->waiting
) {
1019 zone
->waiting
= FALSE
;
1020 thread_wakeup((event_t
)zone
);
1022 if (retval
== KERN_SUCCESS
) {
1024 zone
->cur_size
+= zone
->elem_size
;
1026 if (zone_debug_enabled(zone
)) {
1027 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)space
);
1031 zone_page_alloc(space
, zone
->elem_size
);
1033 if (zone_debug_enabled(zone
))
1034 space
+= ZONE_DEBUG_OFFSET
;
1039 if (retval
== KERN_RESOURCE_SHORTAGE
) {
1045 panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone
->zone_name
, zone
->count
, retval
);
1050 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1054 * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
1055 * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
1056 * since it protects the various log related data structures.
1059 if (DO_LOGGING(zone
) && addr
) {
1062 * Look for a place to record this new allocation. We implement two different logging strategies
1063 * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
1064 * for a leak, we want to log as many allocations as possible in order to clearly identify the leaker
1065 * among all the records. So we look for an unused slot in the log and fill that in before overwriting
1066 * an old entry. When looking for a corrution however, it's better to have a chronological log of all
1067 * the allocations and frees done in the zone so that the history of operations for a specific zone
1068 * element can be inspected. So in this case, we treat the log as a circular buffer and overwrite the
1069 * oldest entry whenever a new one needs to be added.
1071 * The check_freed_element flag tells us what style of logging to do. It's set if we're supposed to be
1072 * doing corruption style logging (indicated via -zc in the boot-args).
1075 if (!check_freed_element
&& zrecords
[zcurrent
].z_element
&& zrecorded
< log_records
) {
1078 * If we get here, we're doing leak style logging and there's still some unused entries in
1079 * the log (since zrecorded is smaller than the size of the log). Look for an unused slot
1080 * starting at zcurrent and wrap-around if we reach the end of the buffer. If the buffer
1081 * is already full, we just fall through and overwrite the element indexed by zcurrent.
1084 for (i
= zcurrent
; i
< log_records
; i
++) {
1085 if (zrecords
[i
].z_element
== NULL
) {
1091 for (i
= 0; i
< zcurrent
; i
++) {
1092 if (zrecords
[i
].z_element
== NULL
) {
1100 * Save a record of this allocation
1104 if (zrecords
[zcurrent
].z_element
== NULL
)
1107 zrecords
[zcurrent
].z_element
= (void *)addr
;
1108 zrecords
[zcurrent
].z_time
= ztime
++;
1109 zrecords
[zcurrent
].z_opcode
= ZOP_ALLOC
;
1111 for (i
= 0; i
< numsaved
; i
++)
1112 zrecords
[zcurrent
].z_pc
[i
] = bt
[i
];
1114 for (; i
< MAX_DEPTH
; i
++)
1115 zrecords
[zcurrent
].z_pc
[i
] = 0;
1119 if (zcurrent
>= log_records
)
1123 if ((addr
== 0) && !canblock
&& (zone
->async_pending
== FALSE
) && (zone
->exhaustible
== FALSE
) && (!vm_pool_low())) {
1124 zone
->async_pending
= TRUE
;
1126 thread_call_enter(&zone
->call_async_alloc
);
1128 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1132 if (addr
&& zone_debug_enabled(zone
)) {
1133 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
1134 addr
+= ZONE_DEBUG_OFFSET
;
1141 TRACE_MACHLEAKS(ZALLOC_CODE
, ZALLOC_CODE_2
, zone
->elem_size
, addr
);
1143 return((void *)addr
);
1149 register zone_t zone
)
1151 return( zalloc_canblock(zone
, TRUE
) );
1156 register zone_t zone
)
1158 return( zalloc_canblock(zone
, FALSE
) );
1163 thread_call_param_t p0
,
1164 __unused thread_call_param_t p1
)
1168 elt
= zalloc_canblock((zone_t
)p0
, TRUE
);
1169 zfree((zone_t
)p0
, elt
);
1170 lock_zone(((zone_t
)p0
));
1171 ((zone_t
)p0
)->async_pending
= FALSE
;
1172 unlock_zone(((zone_t
)p0
));
1177 * zget returns an element from the specified zone
1178 * and immediately returns nothing if there is nothing there.
1180 * This form should be used when you can not block (like when
1181 * processing an interrupt).
1185 register zone_t zone
)
1187 register vm_offset_t addr
;
1189 assert( zone
!= ZONE_NULL
);
1191 if (!lock_try_zone(zone
))
1194 REMOVE_FROM_ZONE(zone
, addr
, vm_offset_t
);
1196 if (addr
&& zone_debug_enabled(zone
)) {
1197 enqueue_tail(&zone
->active_zones
, (queue_entry_t
)addr
);
1198 addr
+= ZONE_DEBUG_OFFSET
;
1200 #endif /* ZONE_DEBUG */
1203 return((void *) addr
);
1206 /* Keep this FALSE by default. Large memory machine run orders of magnitude
1207 slower in debug mode when true. Use debugger to enable if needed */
1208 /* static */ boolean_t zone_check
= FALSE
;
1210 static zone_t zone_last_bogus_zone
= ZONE_NULL
;
1211 static vm_offset_t zone_last_bogus_elem
= 0;
1215 register zone_t zone
,
1218 vm_offset_t elem
= (vm_offset_t
) addr
;
1219 void *bt
[MAX_DEPTH
]; /* only used if zone logging is enable via boot-args */
1222 assert(zone
!= ZONE_NULL
);
1225 * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
1228 if (DO_LOGGING(zone
))
1229 numsaved
= OSBacktrace(&bt
[0], MAX_DEPTH
);
1232 /* Basic sanity checks */
1233 if (zone
== ZONE_NULL
|| elem
== (vm_offset_t
)0)
1234 panic("zfree: NULL");
1235 /* zone_gc assumes zones are never freed */
1236 if (zone
== zone_zone
)
1237 panic("zfree: freeing to zone_zone breaks zone_gc!");
1240 TRACE_MACHLEAKS(ZFREE_CODE
, ZFREE_CODE_2
, zone
->elem_size
, (uintptr_t)addr
);
1242 if (zone
->collectable
&& !zone
->allows_foreign
&&
1243 !from_zone_map(elem
, zone
->elem_size
)) {
1245 panic("zfree: non-allocated memory in collectable zone!");
1247 zone_last_bogus_zone
= zone
;
1248 zone_last_bogus_elem
= elem
;
1255 * See if we're doing logging on this zone. There are two styles of logging used depending on
1256 * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
1259 if (DO_LOGGING(zone
)) {
1262 if (check_freed_element
) {
1265 * We're logging to catch a corruption. Add a record of this zfree operation
1269 if (zrecords
[zcurrent
].z_element
== NULL
)
1272 zrecords
[zcurrent
].z_element
= (void *)addr
;
1273 zrecords
[zcurrent
].z_time
= ztime
++;
1274 zrecords
[zcurrent
].z_opcode
= ZOP_FREE
;
1276 for (i
= 0; i
< numsaved
; i
++)
1277 zrecords
[zcurrent
].z_pc
[i
] = bt
[i
];
1279 for (; i
< MAX_DEPTH
; i
++)
1280 zrecords
[zcurrent
].z_pc
[i
] = 0;
1284 if (zcurrent
>= log_records
)
1290 * We're logging to catch a leak. Remove any record we might have for this
1291 * element since it's being freed. Note that we may not find it if the buffer
1292 * overflowed and that's OK. Since the log is of a limited size, old records
1293 * get overwritten if there are more zallocs than zfrees.
1296 for (i
= 0; i
< log_records
; i
++) {
1297 if (zrecords
[i
].z_element
== addr
) {
1298 zrecords
[i
].z_element
= NULL
;
1309 if (zone_debug_enabled(zone
)) {
1312 elem
-= ZONE_DEBUG_OFFSET
;
1314 /* check the zone's consistency */
1316 for (tmp_elem
= queue_first(&zone
->active_zones
);
1317 !queue_end(tmp_elem
, &zone
->active_zones
);
1318 tmp_elem
= queue_next(tmp_elem
))
1319 if (elem
== (vm_offset_t
)tmp_elem
)
1321 if (elem
!= (vm_offset_t
)tmp_elem
)
1322 panic("zfree()ing element from wrong zone");
1324 remqueue(&zone
->active_zones
, (queue_t
) elem
);
1326 #endif /* ZONE_DEBUG */
1330 /* check the zone's consistency */
1332 for (this = zone
->free_elements
;
1334 this = * (vm_offset_t
*) this)
1335 if (!pmap_kernel_va(this) || this == elem
)
1338 ADD_TO_ZONE(zone
, elem
);
1340 if (zone
->count
< 0)
1341 panic("zfree: count < 0!");
1345 * If elements have one or more pages, and memory is low,
1346 * request to run the garbage collection in the zone the next
1347 * time the pageout thread runs.
1349 if (zone
->elem_size
>= PAGE_SIZE
&&
1351 zone_gc_forced
= TRUE
;
1357 /* Change a zone's flags.
1358 * This routine must be called immediately after zinit.
1366 assert( zone
!= ZONE_NULL
);
1367 assert( value
== TRUE
|| value
== FALSE
);
1371 zone
->noencrypt
= value
;
1374 zone
->exhaustible
= value
;
1377 zone
->collectable
= value
;
1380 zone
->expandable
= value
;
1383 zone
->allows_foreign
= value
;
1387 panic("Zone_change: Wrong Item Type!");
1394 * Return the expected number of free elements in the zone.
1395 * This calculation will be incorrect if items are zfree'd that
1396 * were never zalloc'd/zget'd. The correct way to stuff memory
1397 * into a zone is by zcram.
1401 zone_free_count(zone_t zone
)
1403 integer_t free_count
;
1406 free_count
= (integer_t
)(zone
->cur_size
/zone
->elem_size
- zone
->count
);
1409 assert(free_count
>= 0);
1415 * zprealloc preallocates wired memory, exanding the specified
1416 * zone to the specified size
1426 if (kmem_alloc_kobject(zone_map
, &addr
, size
) != KERN_SUCCESS
)
1428 zone_page_init(addr
, size
, ZONE_PAGE_USED
);
1429 zcram(zone
, (void *)addr
, size
);
1434 * Zone garbage collection subroutines
1438 zone_page_collectable(
1442 struct zone_page_table_entry
*zp
;
1446 addr
= zone_virtual_addr(addr
);
1449 if (!from_zone_map(addr
, size
))
1450 panic("zone_page_collectable");
1453 i
= (natural_t
)atop_kernel(addr
-zone_map_min_address
);
1454 j
= (natural_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
1456 for (zp
= zone_page_table
+ i
; i
<= j
; zp
++, i
++)
1457 if (zp
->collect_count
== zp
->alloc_count
)
1468 struct zone_page_table_entry
*zp
;
1472 addr
= zone_virtual_addr(addr
);
1475 if (!from_zone_map(addr
, size
))
1476 panic("zone_page_keep");
1479 i
= (natural_t
)atop_kernel(addr
-zone_map_min_address
);
1480 j
= (natural_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
1482 for (zp
= zone_page_table
+ i
; i
<= j
; zp
++, i
++)
1483 zp
->collect_count
= 0;
1491 struct zone_page_table_entry
*zp
;
1495 addr
= zone_virtual_addr(addr
);
1498 if (!from_zone_map(addr
, size
))
1499 panic("zone_page_collect");
1502 i
= (natural_t
)atop_kernel(addr
-zone_map_min_address
);
1503 j
= (natural_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
1505 for (zp
= zone_page_table
+ i
; i
<= j
; zp
++, i
++)
1506 ++zp
->collect_count
;
1515 struct zone_page_table_entry
*zp
;
1519 addr
= zone_virtual_addr(addr
);
1522 if (!from_zone_map(addr
, size
))
1523 panic("zone_page_init");
1526 i
= (natural_t
)atop_kernel(addr
-zone_map_min_address
);
1527 j
= (natural_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
1529 for (zp
= zone_page_table
+ i
; i
<= j
; zp
++, i
++) {
1530 zp
->alloc_count
= value
;
1531 zp
->collect_count
= 0;
1540 struct zone_page_table_entry
*zp
;
1544 addr
= zone_virtual_addr(addr
);
1547 if (!from_zone_map(addr
, size
))
1548 panic("zone_page_alloc");
1551 i
= (natural_t
)atop_kernel(addr
-zone_map_min_address
);
1552 j
= (natural_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
1554 for (zp
= zone_page_table
+ i
; i
<= j
; zp
++, i
++) {
1556 * Set alloc_count to (ZONE_PAGE_USED + 1) if
1557 * it was previously set to ZONE_PAGE_UNUSED.
1559 if (zp
->alloc_count
== ZONE_PAGE_UNUSED
)
1560 zp
->alloc_count
= 1;
1567 zone_page_free_element(
1568 struct zone_page_table_entry
**free_pages
,
1572 struct zone_page_table_entry
*zp
;
1576 addr
= zone_virtual_addr(addr
);
1579 if (!from_zone_map(addr
, size
))
1580 panic("zone_page_free_element");
1583 i
= (natural_t
)atop_kernel(addr
-zone_map_min_address
);
1584 j
= (natural_t
)atop_kernel((addr
+size
-1) - zone_map_min_address
);
1586 for (zp
= zone_page_table
+ i
; i
<= j
; zp
++, i
++) {
1587 if (zp
->collect_count
> 0)
1588 --zp
->collect_count
;
1589 if (--zp
->alloc_count
== 0) {
1590 zp
->alloc_count
= ZONE_PAGE_UNUSED
;
1591 zp
->collect_count
= 0;
1593 zp
->link
= *free_pages
;
1600 /* This is used for walking through a zone's free element list.
1602 struct zone_free_element
{
1603 struct zone_free_element
* next
;
1607 * Add a linked list of pages starting at base back into the zone
1608 * free list. Tail points to the last element on the list.
1611 #define ADD_LIST_TO_ZONE(zone, base, tail) \
1613 (tail)->next = (void *)((zone)->free_elements); \
1614 if (check_freed_element) { \
1615 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
1616 ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
1617 (zone)->free_elements; \
1619 (zone)->free_elements = (unsigned long)(base); \
1623 * Add an element to the chain pointed to by prev.
1626 #define ADD_ELEMENT(zone, prev, elem) \
1628 (prev)->next = (elem); \
1629 if (check_freed_element) { \
1630 if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
1631 ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
1632 (vm_offset_t)(elem); \
1639 uint32_t elems_collected
,
1644 /* Zone garbage collection
1646 * zone_gc will walk through all the free elements in all the
1647 * zones that are marked collectable looking for reclaimable
1648 * pages. zone_gc is called by consider_zone_gc when the system
1649 * begins to run out of memory.
1654 unsigned int max_zones
;
1657 struct zone_page_table_entry
*zp
, *zone_free_pages
;
1659 lck_mtx_lock(&zone_gc_lock
);
1661 simple_lock(&all_zones_lock
);
1662 max_zones
= num_zones
;
1664 simple_unlock(&all_zones_lock
);
1667 for (i
= 0; i
< zone_pages
; i
++)
1668 assert(zone_page_table
[i
].collect_count
== 0);
1669 #endif /* MACH_ASSERT */
1671 zone_free_pages
= NULL
;
1673 for (i
= 0; i
< max_zones
; i
++, z
= z
->next_zone
) {
1675 vm_size_t elt_size
, size_freed
;
1676 struct zone_free_element
*elt
, *base_elt
, *base_prev
, *prev
, *scan
, *keep
, *tail
;
1678 assert(z
!= ZONE_NULL
);
1680 if (!z
->collectable
)
1685 elt_size
= z
->elem_size
;
1688 * Do a quick feasability check before we scan the zone:
1689 * skip unless there is likelihood of getting pages back
1690 * (i.e we need a whole allocation block's worth of free
1691 * elements before we can garbage collect) and
1692 * the zone has more than 10 percent of it's elements free
1693 * or the element size is a multiple of the PAGE_SIZE
1695 if ((elt_size
& PAGE_MASK
) &&
1696 (((z
->cur_size
- z
->count
* elt_size
) <= (2 * z
->alloc_size
)) ||
1697 ((z
->cur_size
- z
->count
* elt_size
) <= (z
->cur_size
/ 10)))) {
1705 * Snatch all of the free elements away from the zone.
1708 scan
= (void *)z
->free_elements
;
1709 z
->free_elements
= 0;
1716 * Determine which elements we can attempt to collect
1717 * and count them up in the page table. Foreign elements
1718 * are returned to the zone.
1721 prev
= (void *)&scan
;
1723 n
= 0; tail
= keep
= NULL
;
1724 while (elt
!= NULL
) {
1725 if (from_zone_map(elt
, elt_size
)) {
1726 zone_page_collect((vm_offset_t
)elt
, elt_size
);
1731 ++zgc_stats
.elems_collected
;
1737 ADD_ELEMENT(z
, tail
, elt
);
1741 ADD_ELEMENT(z
, prev
, elt
->next
);
1743 ADD_ELEMENT(z
, tail
, NULL
);
1747 * Dribble back the elements we are keeping.
1751 if (z
->waiting
== TRUE
) {
1755 ADD_LIST_TO_ZONE(z
, keep
, tail
);
1761 while ((elt
!= NULL
) && (++m
< 50)) {
1766 ADD_LIST_TO_ZONE(z
, base_elt
, prev
);
1767 ADD_ELEMENT(z
, base_prev
, elt
);
1784 * Return any remaining elements.
1790 ADD_LIST_TO_ZONE(z
, keep
, tail
);
1798 * Determine which pages we can reclaim and
1799 * free those elements.
1804 n
= 0; tail
= keep
= NULL
;
1805 while (elt
!= NULL
) {
1806 if (zone_page_collectable((vm_offset_t
)elt
, elt_size
)) {
1807 size_freed
+= elt_size
;
1808 zone_page_free_element(&zone_free_pages
,
1809 (vm_offset_t
)elt
, elt_size
);
1813 ++zgc_stats
.elems_freed
;
1816 zone_page_keep((vm_offset_t
)elt
, elt_size
);
1821 ADD_ELEMENT(z
, tail
, elt
);
1826 ADD_ELEMENT(z
, tail
, NULL
);
1828 ++zgc_stats
.elems_kept
;
1832 * Dribble back the elements we are keeping,
1833 * and update the zone size info.
1839 z
->cur_size
-= size_freed
;
1843 ADD_LIST_TO_ZONE(z
, keep
, tail
);
1853 n
= 0; tail
= keep
= NULL
;
1858 * Return any remaining elements, and update
1859 * the zone size info.
1864 if (size_freed
> 0 || keep
!= NULL
) {
1866 z
->cur_size
-= size_freed
;
1869 ADD_LIST_TO_ZONE(z
, keep
, tail
);
1874 z
->doing_gc
= FALSE
;
1883 * Reclaim the pages we are freeing.
1886 while ((zp
= zone_free_pages
) != NULL
) {
1887 zone_free_pages
= zp
->link
;
1889 z
= zone_virtual_addr((vm_map_address_t
)z
);
1891 kmem_free(zone_map
, zone_map_min_address
+ PAGE_SIZE
*
1892 (zp
- zone_page_table
), PAGE_SIZE
);
1893 ++zgc_stats
.pgs_freed
;
1896 lck_mtx_unlock(&zone_gc_lock
);
1902 * Called by the pageout daemon when the system needs more free pages.
1906 consider_zone_gc(boolean_t force
)
1909 * By default, don't attempt zone GC more frequently
1910 * than once / 1 minutes.
1913 if (zone_gc_max_rate
== 0)
1914 zone_gc_max_rate
= (60 << SCHED_TICK_SHIFT
) + 1;
1916 if (zone_gc_allowed
&&
1917 ((sched_tick
> (zone_gc_last_tick
+ zone_gc_max_rate
)) ||
1920 zone_gc_forced
= FALSE
;
1921 zone_gc_last_tick
= sched_tick
;
1926 struct fake_zone_info
{
1928 void (*func
)(int *, vm_size_t
*, vm_size_t
*, vm_size_t
*, vm_size_t
*,
1932 static struct fake_zone_info fake_zones
[] = {
1934 .name
= "kernel_stacks",
1935 .func
= stack_fake_zone_info
,
1939 .name
= "save_areas",
1940 .func
= save_fake_zone_info
,
1943 .name
= "pmap_mappings",
1944 .func
= mapping_fake_zone_info
,
1947 #if defined(__i386__) || defined (__x86_64__)
1949 .name
= "page_tables",
1950 .func
= pt_fake_zone_info
,
1954 .name
= "kalloc.large",
1955 .func
= kalloc_fake_zone_info
,
1962 zone_name_array_t
*namesp
,
1963 mach_msg_type_number_t
*namesCntp
,
1964 zone_info_array_t
*infop
,
1965 mach_msg_type_number_t
*infoCntp
)
1968 vm_offset_t names_addr
;
1969 vm_size_t names_size
;
1971 vm_offset_t info_addr
;
1972 vm_size_t info_size
;
1973 unsigned int max_zones
, i
;
1978 size_t num_fake_zones
;
1981 if (host
== HOST_NULL
)
1982 return KERN_INVALID_HOST
;
1984 #if defined(__LP64__)
1985 if (!thread_is_64bit(current_thread()))
1986 return KERN_NOT_SUPPORTED
;
1988 if (thread_is_64bit(current_thread()))
1989 return KERN_NOT_SUPPORTED
;
1992 num_fake_zones
= sizeof fake_zones
/ sizeof fake_zones
[0];
1995 * We assume that zones aren't freed once allocated.
1996 * We won't pick up any zones that are allocated later.
1999 simple_lock(&all_zones_lock
);
2000 max_zones
= (unsigned int)(num_zones
+ num_fake_zones
);
2002 simple_unlock(&all_zones_lock
);
2004 if (max_zones
<= *namesCntp
) {
2005 /* use in-line memory */
2006 names_size
= *namesCntp
* sizeof *names
;
2009 names_size
= round_page(max_zones
* sizeof *names
);
2010 kr
= kmem_alloc_pageable(ipc_kernel_map
,
2011 &names_addr
, names_size
);
2012 if (kr
!= KERN_SUCCESS
)
2014 names
= (zone_name_t
*) names_addr
;
2017 if (max_zones
<= *infoCntp
) {
2018 /* use in-line memory */
2019 info_size
= *infoCntp
* sizeof *info
;
2022 info_size
= round_page(max_zones
* sizeof *info
);
2023 kr
= kmem_alloc_pageable(ipc_kernel_map
,
2024 &info_addr
, info_size
);
2025 if (kr
!= KERN_SUCCESS
) {
2026 if (names
!= *namesp
)
2027 kmem_free(ipc_kernel_map
,
2028 names_addr
, names_size
);
2032 info
= (zone_info_t
*) info_addr
;
2037 for (i
= 0; i
< num_zones
; i
++) {
2040 assert(z
!= ZONE_NULL
);
2046 simple_lock(&all_zones_lock
);
2048 simple_unlock(&all_zones_lock
);
2050 /* assuming here the name data is static */
2051 (void) strncpy(zn
->zn_name
, zcopy
.zone_name
,
2052 sizeof zn
->zn_name
);
2053 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
2055 zi
->zi_count
= zcopy
.count
;
2056 zi
->zi_cur_size
= zcopy
.cur_size
;
2057 zi
->zi_max_size
= zcopy
.max_size
;
2058 zi
->zi_elem_size
= zcopy
.elem_size
;
2059 zi
->zi_alloc_size
= zcopy
.alloc_size
;
2060 zi
->zi_exhaustible
= zcopy
.exhaustible
;
2061 zi
->zi_collectable
= zcopy
.collectable
;
2068 * loop through the fake zones and fill them using the specialized
2071 for (i
= 0; i
< num_fake_zones
; i
++) {
2072 strncpy(zn
->zn_name
, fake_zones
[i
].name
, sizeof zn
->zn_name
);
2073 zn
->zn_name
[sizeof zn
->zn_name
- 1] = '\0';
2074 fake_zones
[i
].func(&zi
->zi_count
, &zi
->zi_cur_size
,
2075 &zi
->zi_max_size
, &zi
->zi_elem_size
,
2076 &zi
->zi_alloc_size
, &zi
->zi_collectable
,
2077 &zi
->zi_exhaustible
);
2082 if (names
!= *namesp
) {
2086 used
= max_zones
* sizeof *names
;
2088 if (used
!= names_size
)
2089 bzero((char *) (names_addr
+ used
), names_size
- used
);
2091 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)names_addr
,
2092 (vm_map_size_t
)names_size
, TRUE
, ©
);
2093 assert(kr
== KERN_SUCCESS
);
2095 *namesp
= (zone_name_t
*) copy
;
2097 *namesCntp
= max_zones
;
2099 if (info
!= *infop
) {
2103 used
= max_zones
* sizeof *info
;
2105 if (used
!= info_size
)
2106 bzero((char *) (info_addr
+ used
), info_size
- used
);
2108 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)info_addr
,
2109 (vm_map_size_t
)info_size
, TRUE
, ©
);
2110 assert(kr
== KERN_SUCCESS
);
2112 *infop
= (zone_info_t
*) copy
;
2114 *infoCntp
= max_zones
;
2116 return KERN_SUCCESS
;
2119 extern unsigned int stack_total
;
2121 #if defined(__i386__) || defined (__x86_64__)
2122 extern unsigned int inuse_ptepages_count
;
2125 void zone_display_zprint()
2130 if(first_zone
!=NULL
) {
2131 the_zone
= first_zone
;
2132 for (i
= 0; i
< num_zones
; i
++) {
2133 if(the_zone
->cur_size
> (1024*1024)) {
2134 printf("%.20s:\t%lu\n",the_zone
->zone_name
,(uintptr_t)the_zone
->cur_size
);
2137 if(the_zone
->next_zone
== NULL
) {
2141 the_zone
= the_zone
->next_zone
;
2145 printf("Kernel Stacks:\t%lu\n",(uintptr_t)(kernel_stack_size
* stack_total
));
2147 #if defined(__i386__) || defined (__x86_64__)
2148 printf("PageTables:\t%lu\n",(uintptr_t)(PAGE_SIZE
* inuse_ptepages_count
));
2151 printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total
);
2157 #include <ddb/db_command.h>
2158 #include <ddb/db_output.h>
2159 #include <kern/kern_print.h>
2161 const char *zone_labels
=
2162 "ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME";
2169 void db_zone_check_active(
2171 void db_zone_print_active(
2173 #endif /* ZONE_DEBUG */
2174 void db_zone_print_free(
2184 db_printf("%8x %8x %8x %8x %6x %8x %s ",
2185 addr
, zcopy
.count
, zcopy
.cur_size
,
2186 zcopy
.max_size
, zcopy
.elem_size
,
2187 zcopy
.alloc_size
, zcopy
.zone_name
);
2188 if (zcopy
.exhaustible
)
2190 if (zcopy
.collectable
)
2192 if (zcopy
.expandable
)
2199 db_show_one_zone(db_expr_t addr
, boolean_t have_addr
,
2200 __unused db_expr_t count
, __unused
char *modif
)
2202 struct zone
*z
= (zone_t
)((char *)0 + addr
);
2204 if (z
== ZONE_NULL
|| !have_addr
){
2205 db_error("No Zone\n");
2209 db_printf("%s\n", zone_labels
);
2215 db_show_all_zones(__unused db_expr_t addr
, boolean_t have_addr
, db_expr_t count
,
2216 __unused
char *modif
)
2222 * Don't risk hanging by unconditionally locking,
2223 * risk of incoherent data is small (zones aren't freed).
2225 have_addr
= simple_lock_try(&all_zones_lock
);
2229 simple_unlock(&all_zones_lock
);
2232 db_printf("%s\n", zone_labels
);
2233 for ( ; count
> 0; count
--) {
2235 db_error("Mangled Zone List\n");
2239 total
+= z
->cur_size
,
2241 have_addr
= simple_lock_try(&all_zones_lock
);
2244 simple_unlock(&all_zones_lock
);
2247 db_printf("\nTotal %8x", total
);
2248 db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats
.pgs_freed
);
2253 db_zone_check_active(
2259 if (!zone_debug_enabled(zone
) || !zone_check
)
2261 tmp_elem
= queue_first(&zone
->active_zones
);
2262 while (count
< zone
->count
) {
2264 if (tmp_elem
== 0) {
2265 printf("unexpected zero element, zone=%p, count=%d\n",
2270 if (queue_end(tmp_elem
, &zone
->active_zones
)) {
2271 printf("unexpected queue_end, zone=%p, count=%d\n",
2276 tmp_elem
= queue_next(tmp_elem
);
2278 if (!queue_end(tmp_elem
, &zone
->active_zones
)) {
2279 printf("not at queue_end, zone=%p, tmp_elem=%p\n",
2286 db_zone_print_active(
2292 if (!zone_debug_enabled(zone
)) {
2293 printf("zone %p debug not enabled\n", zone
);
2297 printf("zone_check FALSE\n");
2301 printf("zone %p, active elements %d\n", zone
, zone
->count
);
2302 printf("active list:\n");
2303 tmp_elem
= queue_first(&zone
->active_zones
);
2304 while (count
< zone
->count
) {
2305 printf(" %p", tmp_elem
);
2307 if ((count
% 6) == 0)
2309 if (tmp_elem
== 0) {
2310 printf("\nunexpected zero element, count=%d\n", count
);
2313 if (queue_end(tmp_elem
, &zone
->active_zones
)) {
2314 printf("\nunexpected queue_end, count=%d\n", count
);
2317 tmp_elem
= queue_next(tmp_elem
);
2319 if (!queue_end(tmp_elem
, &zone
->active_zones
))
2320 printf("\nnot at queue_end, tmp_elem=%p\n", tmp_elem
);
2324 #endif /* ZONE_DEBUG */
2334 freecount
= zone_free_count(zone
);
2335 printf("zone %p, free elements %d\n", zone
, freecount
);
2336 printf("free list:\n");
2337 elem
= zone
->free_elements
;
2338 while (count
< freecount
) {
2339 printf(" 0x%x", elem
);
2341 if ((count
% 6) == 0)
2344 printf("\nunexpected zero element, count=%d\n", count
);
2347 elem
= *((vm_offset_t
*)elem
);
2350 printf("\nnot at end of free list, elem=0x%x\n", elem
);
2355 #endif /* MACH_KDB */
2360 /* should we care about locks here ? */
2368 char *elt
= (char *)prev
;
2370 if (!zone_debug_enabled(z
))
2372 elt
-= ZONE_DEBUG_OFFSET
;
2373 elt
= (char *) queue_next((queue_t
) elt
);
2374 if ((queue_t
) elt
== &z
->active_zones
)
2376 elt
+= ZONE_DEBUG_OFFSET
;
2386 if (!zone_debug_enabled(z
))
2388 if (queue_empty(&z
->active_zones
))
2390 elt
= (char *)queue_first(&z
->active_zones
);
2391 elt
+= ZONE_DEBUG_OFFSET
;
2396 * Second arg controls how many zone elements are printed:
2399 * n, n > 0 => last n on active list
2408 boolean_t print
= (tail
!= 0);
2412 if (z
->count
< tail
)
2414 tail
= z
->count
- tail
;
2415 for (elt
= first_element(z
); elt
; elt
= next_element(z
, elt
)) {
2416 if (print
&& tail
<= count
)
2417 db_printf("%8x\n", elt
);
2420 assert(count
== z
->count
);
2423 #endif /* MACH_KDB */
2425 #define zone_in_use(z) ( z->count || z->free_elements )
2431 if (zone_debug_enabled(z
) || zone_in_use(z
) ||
2432 z
->alloc_size
< (z
->elem_size
+ ZONE_DEBUG_OFFSET
))
2434 queue_init(&z
->active_zones
);
2435 z
->elem_size
+= ZONE_DEBUG_OFFSET
;
2442 if (!zone_debug_enabled(z
) || zone_in_use(z
))
2444 z
->elem_size
-= ZONE_DEBUG_OFFSET
;
2445 z
->active_zones
.next
= z
->active_zones
.prev
= NULL
;
2449 #endif /* ZONE_DEBUG */