/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
#include <zone_debug.h>
#include <zone_alias_addr.h>
-#include <norma_vm.h>
-#include <mach_kdb.h>
#include <mach/mach_types.h>
#include <mach/vm_param.h>
#include <mach/task_server.h>
#include <mach/machine/vm_types.h>
#include <mach_debug/zone_info.h>
+#include <mach/vm_map.h>
#include <kern/kern_types.h>
#include <kern/assert.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
+#include <pexpert/pexpert.h>
+
#include <machine/machparam.h>
#include <libkern/OSDebug.h>
+#include <libkern/OSAtomic.h>
#include <sys/kdebug.h>
/*
* Zone Corruption Debugging
*
- * We provide three methods to detect use of a zone element after it's been freed. These
- * checks are enabled by specifying "-zc" and/or "-zp" in the boot-args:
+ * We perform three methods to detect use of a zone element after it's been freed. These
+ * checks are enabled for every N'th element (counted per-zone) by specifying
+ * "zp-factor=N" as a boot-arg. To turn this feature off, set "zp-factor=0" or "-no-zp".
+ *
+ * (1) Range-check the free-list "next" pointer for sanity.
+ * (2) Store the pointer in two different words, one at the beginning of the freed element
+ * and one at the end, and compare them against each other when re-using the element,
+ * to detect modifications.
+ * (3) Poison the freed memory by overwriting it with 0xdeadbeef, and check it when the
+ * memory is being reused to make sure it is still poisoned.
+ *
+ * As a result, each element (that is large enough to hold this data inside) must be marked
+ * as either "ZP_POISONED" or "ZP_NOT_POISONED" in the first integer within the would-be
+ * poisoned segment after the first free-list pointer.
+ *
+ * Performance slowdown is inversely proportional to the frequency with which you check
+ * (as would be expected), with a 4-5% hit around N=1, down to ~0.3% at N=16 and just
+ * "noise" at N=32 and higher. You can expect to find a 100% reproducible
+ * bug in an average of N tries, with a standard deviation of about N, but you will probably
+ * want to set "zp-factor=1" or "-zp" if you are attempting to reproduce a known bug.
*
- * (1) Range-check the free-list "next" ptr for sanity.
- * (2) Store the ptr in two different words, and compare them against
- * each other when re-using the zone element, to detect modifications.
- * (3) poison the freed memory by overwriting it with 0xdeadbeef.
*
- * The first two checks are fairly light weight and are enabled by specifying "-zc"
- * in the boot-args. If you want more aggressive checking for use-after-free bugs
- * and you don't mind the additional overhead, then turn on poisoning by adding
- * "-zp" to the boot-args in addition to "-zc". If you specify -zp without -zc,
- * it still poisons the memory when it's freed, but doesn't check if the memory
- * has been altered later when it's reallocated.
+ * Zone corruption logging
+ *
+ * You can also track where corruptions come from by using the boot-arguments:
+ * "zlog=<zone name to log> -zc". Search for "Zone corruption logging" later in this
+ * document for more implementation and usage information.
+ */
+
+#define ZP_POISON 0xdeadbeef
+#define ZP_POISONED 0xfeedface
+#define ZP_NOT_POISONED 0xbaddecaf
+
+#if CONFIG_EMBEDDED
+ #define ZP_DEFAULT_SAMPLING_FACTOR 0
+#else /* CONFIG_EMBEDDED */
+ #define ZP_DEFAULT_SAMPLING_FACTOR 16
+#endif /* CONFIG_EMBEDDED */
+
+uint32_t free_check_sample_factor = 0; /* set by zp-factor=N boot arg */
+boolean_t corruption_debug_flag = FALSE; /* enabled by "-zc" boot-arg */
+
+/*
+ * Zone checking helper macro.
+ */
+#define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
+
+/*
+ * Frees the specified element, which is within the specified zone. If this
+ * element should be poisoned and its free list checker should be set, both are
+ * done here. These checks will only be enabled if the element size is at least
+ * large enough to hold two vm_offset_t's and one uint32_t (to enable both types
+ * of checks).
*/
+static inline void
+free_to_zone(zone_t zone, void *elem) {
+ /* get the index of the first uint32_t beyond the 'next' pointer */
+ unsigned int i = sizeof(vm_offset_t) / sizeof(uint32_t);
+
+ /* should we run checks on this piece of memory? */
+ if (free_check_sample_factor != 0 &&
+ zone->free_check_count++ % free_check_sample_factor == 0 &&
+ zone->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) {
+ zone->free_check_count = 1;
+ ((uint32_t *) elem)[i] = ZP_POISONED;
+ for (i++; i < zone->elem_size / sizeof(uint32_t); i++) {
+ ((uint32_t *) elem)[i] = ZP_POISON;
+ }
+ ((vm_offset_t *) elem)[((zone->elem_size)/sizeof(vm_offset_t))-1] = zone->free_elements;
+ } else {
+ ((uint32_t *) elem)[i] = ZP_NOT_POISONED;
+ }
+
+ /* maintain free list and decrement number of active objects in zone */
+ ((vm_offset_t *) elem)[0] = zone->free_elements;
+ zone->free_elements = (vm_offset_t) elem;
+ zone->count--;
+}
+
+/*
+ * Allocates an element from the specifed zone, storing its address in the
+ * return arg. This function will look for corruptions revealed through zone
+ * poisoning and free list checks.
+ */
+static inline void
+alloc_from_zone(zone_t zone, void **ret) {
+ void *elem = (void *) zone->free_elements;
+ if (elem != NULL) {
+ /* get the index of the first uint32_t beyond the 'next' pointer */
+ unsigned int i = sizeof(vm_offset_t) / sizeof(uint32_t);
+
+ /* first int in data section must be ZP_POISONED or ZP_NOT_POISONED */
+ if (((uint32_t *) elem)[i] == ZP_POISONED &&
+ zone->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) {
+ /* check the free list pointers */
+ if (!is_kernel_data_addr(((vm_offset_t *) elem)[0]) ||
+ ((vm_offset_t *) elem)[0] !=
+ ((vm_offset_t *) elem)[(zone->elem_size/sizeof(vm_offset_t))-1]) {
+ panic("a freed zone element has been modified in zone: %s",
+ zone->zone_name);
+ }
+
+ /* check for poisoning in free space */
+ for (i++;
+ i < zone->elem_size / sizeof(uint32_t) -
+ sizeof(vm_offset_t) / sizeof(uint32_t);
+ i++) {
+ if (((uint32_t *) elem)[i] != ZP_POISON) {
+ panic("a freed zone element has been modified in zone: %s",
+ zone->zone_name);
+ }
+ }
+ } else if (((uint32_t *) elem)[i] != ZP_NOT_POISONED) {
+ panic("a freed zone element has been modified in zone: %s",
+ zone->zone_name);
+ }
+
+ zone->count++;
+ zone->sum_count++;
+ zone->free_elements = ((vm_offset_t *) elem)[0];
+ }
+ *ret = elem;
+}
-boolean_t check_freed_element = FALSE; /* enabled by -zc in boot-args */
-boolean_t zfree_clear = FALSE; /* enabled by -zp in boot-args */
/*
* Fake zones for things that want to report via zprint but are not actually zones.
uint64_t *, int *, int *, int *);
};
-static struct fake_zone_info fake_zones[] = {
+static const struct fake_zone_info fake_zones[] = {
{
.name = "kernel_stacks",
.init = stack_fake_zone_init,
.query = stack_fake_zone_info,
},
-#if defined(__i386__) || defined (__x86_64__)
{
.name = "page_tables",
.init = pt_fake_zone_init,
.query = pt_fake_zone_info,
},
-#endif /* i386 */
{
.name = "kalloc.large",
.init = kalloc_fake_zone_init,
.query = kalloc_fake_zone_info,
},
};
-unsigned int num_fake_zones = sizeof(fake_zones)/sizeof(fake_zones[0]);
+static const unsigned int num_fake_zones =
+ sizeof (fake_zones) / sizeof (fake_zones[0]);
/*
* Zone info options
#define ZINFO_SLOTS 200 /* for now */
#define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
-/*
- * Allocation helper macros
- */
-#define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
-
-#define ADD_TO_ZONE(zone, element) \
-MACRO_BEGIN \
- if (zfree_clear) \
- { unsigned int i; \
- for (i=0; \
- i < zone->elem_size/sizeof(uint32_t); \
- i++) \
- ((uint32_t *)(element))[i] = 0xdeadbeef; \
- } \
- *((vm_offset_t *)(element)) = (zone)->free_elements; \
- if (check_freed_element) { \
- if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
- ((vm_offset_t *)(element))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
- (zone)->free_elements; \
- } \
- (zone)->free_elements = (vm_offset_t) (element); \
- (zone)->count--; \
-MACRO_END
-
-#define REMOVE_FROM_ZONE(zone, ret, type) \
-MACRO_BEGIN \
- (ret) = (type) (zone)->free_elements; \
- if ((ret) != (type) 0) { \
- if (check_freed_element) { \
- if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0]) || \
- ((zone)->elem_size >= (2 * sizeof(vm_offset_t)) && \
- ((vm_offset_t *)(ret))[((zone)->elem_size/sizeof(vm_offset_t))-1] != \
- ((vm_offset_t *)(ret))[0])) \
- panic("a freed zone element has been modified");\
- if (zfree_clear) { \
- unsigned int ii; \
- for (ii = sizeof(vm_offset_t) / sizeof(uint32_t); \
- ii < (zone)->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
- ii++) \
- if (((uint32_t *)(ret))[ii] != (uint32_t)0xdeadbeef) \
- panic("a freed zone element has been modified");\
- } \
- } \
- (zone)->count++; \
- (zone)->sum_count++; \
- (zone)->free_elements = *((vm_offset_t *)(ret)); \
- } \
-MACRO_END
-
-#if ZONE_DEBUG
-#define zone_debug_enabled(z) z->active_zones.next
-#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
-#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
-#endif /* ZONE_DEBUG */
-
/*
- * Support for garbage collection of unused zone pages:
+ * Support for garbage collection of unused zone pages
+ *
+ * The kernel virtually allocates the "zone map" submap of the kernel
+ * map. When an individual zone needs more storage, memory is allocated
+ * out of the zone map, and the two-level "zone_page_table" is
+ * on-demand expanded so that it has entries for those pages.
+ * zone_page_init()/zone_page_alloc() initialize "alloc_count"
+ * to the number of zone elements that occupy the zone page (which may
+ * be a minimum of 1, including if a zone element spans multiple
+ * pages).
+ *
+ * Asynchronously, the zone_gc() logic attempts to walk zone free
+ * lists to see if all the elements on a zone page are free. If
+ * "collect_count" (which it increments during the scan) matches
+ * "alloc_count", the zone page is a candidate for collection and the
+ * physical page is returned to the VM system. During this process, the
+ * first word of the zone page is re-used to maintain a linked list of
+ * to-be-collected zone pages.
*/
+typedef uint32_t zone_page_index_t;
+#define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
struct zone_page_table_entry {
- struct zone_page_table_entry *link;
- short alloc_count;
- short collect_count;
+ volatile uint16_t alloc_count;
+ volatile uint16_t collect_count;
};
+#define ZONE_PAGE_USED 0
+#define ZONE_PAGE_UNUSED 0xffff
+
/* Forwards */
void zone_page_init(
vm_offset_t addr,
- vm_size_t size,
- int value);
+ vm_size_t size);
void zone_page_alloc(
vm_offset_t addr,
vm_size_t size);
void zone_page_free_element(
- struct zone_page_table_entry **free_pages,
+ zone_page_index_t *free_page_head,
+ zone_page_index_t *free_page_tail,
vm_offset_t addr,
vm_size_t size);
void zone_display_zprint( void );
-#if ZONE_DEBUG && MACH_KDB
-int zone_count(
- zone_t z,
- int tail);
-#endif /* ZONE_DEBUG && MACH_KDB */
-
vm_map_t zone_map = VM_MAP_NULL;
zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
/*
* The VM system gives us an initial chunk of memory.
* It has to be big enough to allocate the zone_zone
+ * all the way through the pmap zone.
*/
vm_offset_t zdata;
vm_size_t zdata_size;
-#define lock_zone(zone) \
-MACRO_BEGIN \
- lck_mtx_lock_spin(&(zone)->lock); \
-MACRO_END
-
-#define unlock_zone(zone) \
-MACRO_BEGIN \
- lck_mtx_unlock(&(zone)->lock); \
-MACRO_END
-
#define zone_wakeup(zone) thread_wakeup((event_t)(zone))
#define zone_sleep(zone) \
(void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN, (event_t)(zone), THREAD_UNINT);
#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
-kern_return_t zget_space(
- zone_t zone,
- vm_offset_t size,
- vm_offset_t *result);
-
-decl_simple_lock_data(,zget_space_lock)
-vm_offset_t zalloc_next_space;
-vm_offset_t zalloc_end_of_space;
-vm_size_t zalloc_wasted_space;
-
/*
* Garbage collection map information
*/
-struct zone_page_table_entry * zone_page_table;
+#define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
+struct zone_page_table_entry * volatile zone_page_table[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE];
+vm_size_t zone_page_table_used_size;
vm_offset_t zone_map_min_address;
vm_offset_t zone_map_max_address;
unsigned int zone_pages;
+unsigned int zone_page_table_second_level_size; /* power of 2 */
+unsigned int zone_page_table_second_level_shift_amount;
+
+#define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
+#define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
+
+void zone_page_table_expand(zone_page_index_t pindex);
+struct zone_page_table_entry *zone_page_table_lookup(zone_page_index_t pindex);
/*
* Exclude more than one concurrent garbage collection
lck_grp_attr_t zone_lck_grp_attr;
lck_mtx_ext_t zone_lck_ext;
-
#if !ZONE_ALIAS_ADDR
#define from_zone_map(addr, size) \
((vm_offset_t)(addr) >= zone_map_min_address && \
((vm_offset_t)(addr) + size -1) < zone_map_max_address)
#else
#define from_zone_map(addr, size) \
- ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) >= zone_map_min_address && \
- ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) + size -1) < zone_map_max_address)
+ ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) >= zone_map_min_address && \
+ ((vm_offset_t)(zone_virtual_addr((vm_map_address_t)(uintptr_t)addr)) + size -1) < zone_map_max_address)
#endif
-#define ZONE_PAGE_USED 0
-#define ZONE_PAGE_UNUSED -1
-
-
/*
* Protects first_zone, last_zone, num_zones,
* and the next_zone field of zones.
* records since going much larger than this tends to make the system unresponsive and unbootable on small
* memory configurations. The default value is 4000 records.
*/
+
#if defined(__LP64__)
-#define ZRECORDS_MAX 16000 /* Max records allowed in the log */
+#define ZRECORDS_MAX 128000 /* Max records allowed in the log */
#else
#define ZRECORDS_MAX 8000 /* Max records allowed in the log */
#endif
/*
* The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
- * allocations made by the zone allocator. Every z_sample_factor allocations in each zone, we capture a
+ * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
* backtrace. Every free, we examine the table and determine if the allocation was being tracked,
* and stop tracking it if it was being tracked.
*
boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */
vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
-unsigned int z_sample_factor = 1000; /* Allocations per sample attempt */
+unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */
/*
* Counters for allocation statistics.
};
/* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
-#define ZLEAK_ALLOCATION_MAP_NUM 16384
-#define ZLEAK_TRACE_MAP_NUM 8192
-
-uint32_t zleak_alloc_buckets = ZLEAK_ALLOCATION_MAP_NUM;
-uint32_t zleak_trace_buckets = ZLEAK_TRACE_MAP_NUM;
+uint32_t zleak_alloc_buckets = CONFIG_ZLEAK_ALLOCATION_MAP_NUM;
+uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM;
vm_size_t zleak_max_zonemap_size;
struct ztrace* top_ztrace;
/* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
-static lck_mtx_t zleak_lock;
+static lck_spin_t zleak_lock;
static lck_attr_t zleak_lock_attr;
static lck_grp_t zleak_lock_grp;
static lck_grp_attr_t zleak_lock_grp_attr;
zleak_global_tracking_threshold = max_zonemap_size / 2;
zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8;
+#if CONFIG_EMBEDDED
+ if (PE_parse_boot_argn("-zleakon", scratch_buf, sizeof(scratch_buf))) {
+ zleak_enable_flag = TRUE;
+ printf("zone leak detection enabled\n");
+ } else {
+ zleak_enable_flag = FALSE;
+ printf("zone leak detection disabled\n");
+ }
+#else /* CONFIG_EMBEDDED */
/* -zleakoff (flag to disable zone leak monitor) */
if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) {
zleak_enable_flag = FALSE;
zleak_enable_flag = TRUE;
printf("zone leak detection enabled\n");
}
+#endif /* CONFIG_EMBEDDED */
/* zfactor=XXXX (override how often to sample the zone allocator) */
- if (PE_parse_boot_argn("zfactor", &z_sample_factor, sizeof(z_sample_factor))) {
- printf("Zone leak factor override:%u\n", z_sample_factor);
+ if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) {
+ printf("Zone leak factor override:%u\n", zleak_sample_factor);
}
-
+
/* zleak-allocs=XXXX (override number of buckets in zallocations) */
if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) {
printf("Zone leak alloc buckets override:%u\n", zleak_alloc_buckets);
lck_grp_attr_setdefault(&zleak_lock_grp_attr);
lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr);
lck_attr_setdefault(&zleak_lock_attr);
- lck_mtx_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr);
+ lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr);
if (zleak_enable_flag) {
zleak_state = ZLEAK_STATE_ENABLED;
/*
* Support for kern.zleak.active sysctl - a simplified
- * simplified version of the zleak_state variable.
+ * version of the zleak_state variable.
*/
int
get_zleak_state(void)
}
/* Indicate that we're doing the setup */
- lck_mtx_lock_spin(&zleak_lock);
+ lck_spin_lock(&zleak_lock);
if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
return KERN_SUCCESS;
}
zleak_state |= ZLEAK_STATE_ACTIVATING;
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
/* Allocate and zero tables */
retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size);
* the tables and setting the active flag, because the zfree()
* path accesses the table without a lock if we're active.
*/
- lck_mtx_lock_spin(&zleak_lock);
+ lck_spin_lock(&zleak_lock);
zleak_state |= ZLEAK_STATE_ACTIVE;
zleak_state &= ~ZLEAK_STATE_ACTIVATING;
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
return 0;
* If we fail to allocate memory, don't further tax
* the system by trying again.
*/
- lck_mtx_lock_spin(&zleak_lock);
+ lck_spin_lock(&zleak_lock);
zleak_state |= ZLEAK_STATE_FAILED;
zleak_state &= ~ZLEAK_STATE_ACTIVATING;
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
if (allocations_ptr != NULL) {
kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
vm_size_t allocation_size)
{
/* Quit if there's someone else modifying the hash tables */
- if (!lck_mtx_try_lock_spin(&zleak_lock)) {
+ if (!lck_spin_try_lock(&zleak_lock)) {
z_total_conflicts++;
return FALSE;
}
if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
z_alloc_collisions++;
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
return TRUE;
}
trace->zt_collisions++;
z_trace_collisions++;
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
return TRUE;
} else if (trace->zt_size > 0) {
/* Same trace, already added, so increment refcount */
if (top_ztrace->zt_size < trace->zt_size)
top_ztrace = trace;
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
return TRUE;
}
if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
/* if the allocation was the one, grab the lock, check again, then delete it */
- lck_mtx_lock_spin(&zleak_lock);
+ lck_spin_lock(&zleak_lock);
if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
struct ztrace *trace;
/* A NULL element means the allocation bucket is unused */
allocation->za_element = 0;
}
- lck_mtx_unlock(&zleak_lock);
+ lck_spin_unlock(&zleak_lock);
}
}
* It's fast because it does no checking to make sure there isn't bad data.
* Since it's only called from threads that we're going to keep executing,
* if there's bad data we were going to die eventually.
- * This seems to work for x86 and X86_64.
- * ARMTODO: Test it on ARM, I think it will work but I can't test it. If it works, remove the ifdef.
* If this function is inlined, it doesn't record the frame of the function it's inside.
* (because there's no stack frame!)
*/
+
uint32_t
fastbacktrace(uintptr_t* bt, uint32_t max_frames)
{
-#if defined(__x86_64__) || defined(__i386__)
uintptr_t* frameptr = NULL, *frameptr_next = NULL;
uintptr_t retaddr = 0;
uint32_t frame_index = 0, frames = 0;
uintptr_t kstackb, kstackt;
+ thread_t cthread = current_thread();
+
+ if (__improbable(cthread == NULL))
+ return 0;
- kstackb = current_thread()->kernel_stack;
+ kstackb = cthread->kernel_stack;
kstackt = kstackb + kernel_stack_size;
/* Load stack frame pointer (EBP on x86) into frameptr */
frameptr = __builtin_frame_address(0);
bt[frame_index++] = 0;
return frames;
-#else
- return OSBacktrace((void*)bt, max_frames);
-#endif
}
/* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
uintptr_t hash = 0;
uintptr_t mask = max_size - 1;
- while (--depth) {
- hash += bt[depth];
+ while (depth) {
+ hash += bt[--depth];
}
hash = hash_mix(hash) & mask;
zone_t z;
if (zone_zone == ZONE_NULL) {
- if (zget_space(NULL, sizeof(struct zone), (vm_offset_t *)&z)
- != KERN_SUCCESS)
- return(ZONE_NULL);
+
+ z = (struct zone *)zdata;
+ zdata += sizeof(*z);
+ zdata_size -= sizeof(*z);
} else
z = (zone_t) zalloc(zone_zone);
+
if (z == ZONE_NULL)
return(ZONE_NULL);
alloc = PAGE_SIZE;
else
#endif
- { vm_size_t best, waste; unsigned int i;
+#if defined(__LP64__)
+ if (((alloc % size) != 0) || (alloc > PAGE_SIZE * 8))
+#endif
+ {
+ vm_size_t best, waste; unsigned int i;
best = PAGE_SIZE;
waste = best % size;
z->async_pending = FALSE;
z->caller_acct = TRUE;
z->noencrypt = FALSE;
-
+ z->no_callout = FALSE;
+ z->async_prio_refill = FALSE;
+ z->gzalloc_exempt = FALSE;
+ z->alignment_required = FALSE;
+ z->prio_refill_watermark = 0;
+ z->zone_replenish_thread = NULL;
#if CONFIG_ZLEAKS
z->num_allocs = 0;
z->num_frees = 0;
/*
* Check if we should be logging this zone. If so, remember the zone pointer.
*/
-
- if (log_this_zone(z->zone_name, zone_name_to_log)) {
+ if (log_this_zone(z->zone_name, zone_name_to_log)) {
zone_of_interest = z;
}
* later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
* right now.
*/
-
if (zone_of_interest != NULL && zrecords == NULL && zlog_ready) {
if (kmem_alloc(kernel_map, (vm_offset_t *)&zrecords, log_records * sizeof(struct zrecord)) == KERN_SUCCESS) {
zone_of_interest = NULL;
}
}
-
+#if CONFIG_GZALLOC
+ gzalloc_zone_init(z);
+#endif
return(z);
}
+unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated;
+
+static void zone_replenish_thread(zone_t);
+
+/* High priority VM privileged thread used to asynchronously refill a designated
+ * zone, such as the reserved VM map entry zone.
+ */
+static void zone_replenish_thread(zone_t z) {
+ vm_size_t free_size;
+ current_thread()->options |= TH_OPT_VMPRIV;
+
+ for (;;) {
+ lock_zone(z);
+ assert(z->prio_refill_watermark != 0);
+ while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
+ assert(z->doing_alloc == FALSE);
+ assert(z->async_prio_refill == TRUE);
+
+ unlock_zone(z);
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+ vm_offset_t space, alloc_size;
+ kern_return_t kr;
+
+ if (vm_pool_low())
+ alloc_size = round_page(z->elem_size);
+ else
+ alloc_size = z->alloc_size;
+
+ if (z->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
+
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else if (kr == KERN_RESOURCE_SHORTAGE) {
+ VM_PAGE_WAIT();
+ } else if (kr == KERN_NO_SPACE) {
+ kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags);
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else {
+ assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+ }
+
+ lock_zone(z);
+ zone_replenish_loops++;
+ }
+
+ unlock_zone(z);
+ assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
+ thread_block(THREAD_CONTINUE_NULL);
+ zone_replenish_wakeups++;
+ }
+}
+
+void
+zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
+ z->prio_refill_watermark = low_water_mark;
+
+ z->async_prio_refill = TRUE;
+ OSMemoryBarrier();
+ kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
+
+ if (tres != KERN_SUCCESS) {
+ panic("zone_prio_refill_configure, thread create: 0x%x", tres);
+ }
+
+ thread_deallocate(z->zone_replenish_thread);
+}
/*
* Cram the given memory into the specified zone.
*/
void
zcram(
- register zone_t zone,
- void *newaddr,
+ zone_t zone,
+ vm_offset_t newmem,
vm_size_t size)
{
- register vm_size_t elem_size;
- vm_offset_t newmem = (vm_offset_t) newaddr;
+ vm_size_t elem_size;
+ boolean_t from_zm = FALSE;
/* Basic sanity checks */
assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
elem_size = zone->elem_size;
+ if (from_zone_map(newmem, size))
+ from_zm = TRUE;
+
+ if (from_zm)
+ zone_page_init(newmem, size);
+
lock_zone(zone);
while (size >= elem_size) {
- ADD_TO_ZONE(zone, newmem);
- if (from_zone_map(newmem, elem_size))
+ free_to_zone(zone, (void *) newmem);
+ if (from_zm)
zone_page_alloc(newmem, elem_size);
- zone->count++; /* compensate for ADD_TO_ZONE */
+ zone->count++; /* compensate for free_to_zone */
size -= elem_size;
newmem += elem_size;
zone->cur_size += elem_size;
unlock_zone(zone);
}
-/*
- * Contiguous space allocator for non-paged zones. Allocates "size" amount
- * of memory from zone_map.
- */
-
-kern_return_t
-zget_space(
- zone_t zone,
- vm_offset_t size,
- vm_offset_t *result)
-{
- vm_offset_t new_space = 0;
- vm_size_t space_to_add = 0;
-
- simple_lock(&zget_space_lock);
- while ((zalloc_next_space + size) > zalloc_end_of_space) {
- /*
- * Add at least one page to allocation area.
- */
-
- space_to_add = round_page(size);
-
- if (new_space == 0) {
- kern_return_t retval;
- int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
-
- /*
- * Memory cannot be wired down while holding
- * any locks that the pageout daemon might
- * need to free up pages. [Making the zget_space
- * lock a complex lock does not help in this
- * regard.]
- *
- * Unlock and allocate memory. Because several
- * threads might try to do this at once, don't
- * use the memory before checking for available
- * space again.
- */
-
- simple_unlock(&zget_space_lock);
-
- if (zone == NULL || zone->noencrypt)
- zflags |= KMA_NOENCRYPT;
-
- retval = kernel_memory_allocate(zone_map, &new_space, space_to_add, 0, zflags);
- if (retval != KERN_SUCCESS)
- return(retval);
-#if ZONE_ALIAS_ADDR
- if (space_to_add == PAGE_SIZE)
- new_space = zone_alias_addr(new_space);
-#endif
- zone_page_init(new_space, space_to_add,
- ZONE_PAGE_USED);
- simple_lock(&zget_space_lock);
- continue;
- }
-
-
- /*
- * Memory was allocated in a previous iteration.
- *
- * Check whether the new region is contiguous
- * with the old one.
- */
-
- if (new_space != zalloc_end_of_space) {
- /*
- * Throw away the remainder of the
- * old space, and start a new one.
- */
- zalloc_wasted_space +=
- zalloc_end_of_space - zalloc_next_space;
- zalloc_next_space = new_space;
- }
-
- zalloc_end_of_space = new_space + space_to_add;
-
- new_space = 0;
- }
- *result = zalloc_next_space;
- zalloc_next_space += size;
- simple_unlock(&zget_space_lock);
-
- if (new_space != 0)
- kmem_free(zone_map, new_space, space_to_add);
-
- return(KERN_SUCCESS);
-}
-
/*
* Steal memory for the zone package. Called from
void
zone_steal_memory(void)
{
- zdata_size = round_page(128*sizeof(struct zone));
- zdata = (vm_offset_t)((char *)pmap_steal_memory(zdata_size) - (char *)0);
+#if CONFIG_GZALLOC
+ gzalloc_configure();
+#endif
+ /* Request enough early memory to get to the pmap zone */
+ zdata_size = 12 * sizeof(struct zone);
+ zdata = (vm_offset_t)pmap_steal_memory(round_page(zdata_size));
}
return 0;
zone_change(zone, Z_FOREIGN, TRUE);
- zcram(zone, (void *)memory, size);
+ zcram(zone, memory, size);
nalloc = (int)(size / zone->elem_size);
assert(nalloc >= nelem);
void
zone_bootstrap(void)
{
- vm_size_t zone_zone_size;
- vm_offset_t zone_zone_space;
char temp_buf[16];
-#if 6094439
- /* enable zone checks by default, to try and catch offenders... */
-#if 0
- /* 7968354: turn "-zc" back off */
- check_freed_element = TRUE;
- /* 7995202: turn "-zp" back off */
- zfree_clear = TRUE;
-#endif
-
- /* ... but allow them to be turned off explicitely */
- if (PE_parse_boot_argn("-no_zc", temp_buf, sizeof (temp_buf))) {
- check_freed_element = FALSE;
- }
- if (PE_parse_boot_argn("-no_zp", temp_buf, sizeof (temp_buf))) {
- zfree_clear = FALSE;
+ if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof(temp_buf))) {
+ zinfo_per_task = TRUE;
}
-#endif
- /* see if we want freed zone element checking and/or poisoning */
- if (PE_parse_boot_argn("-zc", temp_buf, sizeof (temp_buf))) {
- check_freed_element = TRUE;
+ /* do we want corruption-style debugging with zlog? */
+ if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) {
+ corruption_debug_flag = TRUE;
}
+
+ /* Set up zone poisoning */
- if (PE_parse_boot_argn("-zp", temp_buf, sizeof (temp_buf))) {
- zfree_clear = TRUE;
+ free_check_sample_factor = ZP_DEFAULT_SAMPLING_FACTOR;
+
+ /* support for old zone poisoning boot-args */
+ if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
+ free_check_sample_factor = 1;
+ }
+ if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
+ free_check_sample_factor = 0;
}
- if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof (temp_buf))) {
- zinfo_per_task = TRUE;
+ /* zp-factor=XXXX (override how often to poison freed zone elements) */
+ if (PE_parse_boot_argn("zp-factor", &free_check_sample_factor, sizeof(free_check_sample_factor))) {
+ printf("Zone poisoning factor override:%u\n", free_check_sample_factor);
}
/*
last_zone = &first_zone;
num_zones = 0;
- simple_lock_init(&zget_space_lock, 0);
- zalloc_next_space = zdata;
- zalloc_end_of_space = zdata + zdata_size;
- zalloc_wasted_space = 0;
-
/* assertion: nobody else called zinit before us */
assert(zone_zone == ZONE_NULL);
zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
zone_change(zone_zone, Z_CALLERACCT, FALSE);
zone_change(zone_zone, Z_NOENCRYPT, TRUE);
- zone_zone_size = zalloc_end_of_space - zalloc_next_space;
- zget_space(NULL, zone_zone_size, &zone_zone_space);
- zcram(zone_zone, (void *)zone_zone_space, zone_zone_size);
+ zcram(zone_zone, zdata, zdata_size);
/* initialize fake zones and zone info if tracking by task */
if (zinfo_per_task) {
kern_return_t retval;
vm_offset_t zone_min;
vm_offset_t zone_max;
- vm_size_t zone_table_size;
retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
if (retval != KERN_SUCCESS)
panic("zone_init: kmem_suballoc failed");
zone_max = zone_min + round_page(max_zonemap_size);
+#if CONFIG_GZALLOC
+ gzalloc_init(max_zonemap_size);
+#endif
/*
* Setup garbage collection information:
*/
- zone_table_size = atop_kernel(zone_max - zone_min) *
- sizeof(struct zone_page_table_entry);
- if (kmem_alloc_kobject(zone_map, (vm_offset_t *) &zone_page_table,
- zone_table_size) != KERN_SUCCESS)
- panic("zone_init");
- zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
- zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
zone_map_min_address = zone_min;
zone_map_max_address = zone_max;
+
+ zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
+ zone_page_table_used_size = sizeof(zone_page_table);
+
+ zone_page_table_second_level_size = 1;
+ zone_page_table_second_level_shift_amount = 0;
+
+ /*
+ * Find the power of 2 for the second level that allows
+ * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
+ * slots.
+ */
+ while ((zone_page_table_first_level_slot(zone_pages-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE) {
+ zone_page_table_second_level_size <<= 1;
+ zone_page_table_second_level_shift_amount++;
+ }
lck_grp_attr_setdefault(&zone_lck_grp_attr);
lck_grp_init(&zone_lck_grp, "zones", &zone_lck_grp_attr);
lck_attr_setdefault(&zone_lck_attr);
lck_mtx_init_ext(&zone_gc_lock, &zone_lck_ext, &zone_lck_grp, &zone_lck_attr);
- zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
-
#if CONFIG_ZLEAKS
/*
* Initialize the zone leak monitor
#endif /* CONFIG_ZLEAKS */
}
+void
+zone_page_table_expand(zone_page_index_t pindex)
+{
+ unsigned int first_index;
+ struct zone_page_table_entry * volatile * first_level_ptr;
+
+ assert(pindex < zone_pages);
+
+ first_index = zone_page_table_first_level_slot(pindex);
+ first_level_ptr = &zone_page_table[first_index];
+
+ if (*first_level_ptr == NULL) {
+ /*
+ * We were able to verify the old first-level slot
+ * had NULL, so attempt to populate it.
+ */
+
+ vm_offset_t second_level_array = 0;
+ vm_size_t second_level_size = round_page(zone_page_table_second_level_size * sizeof(struct zone_page_table_entry));
+ zone_page_index_t i;
+ struct zone_page_table_entry *entry_array;
+
+ if (kmem_alloc_kobject(zone_map, &second_level_array,
+ second_level_size) != KERN_SUCCESS) {
+ panic("zone_page_table_expand");
+ }
+
+ /*
+ * zone_gc() may scan the "zone_page_table" directly,
+ * so make sure any slots have a valid unused state.
+ */
+ entry_array = (struct zone_page_table_entry *)second_level_array;
+ for (i=0; i < zone_page_table_second_level_size; i++) {
+ entry_array[i].alloc_count = ZONE_PAGE_UNUSED;
+ entry_array[i].collect_count = 0;
+ }
+
+ if (OSCompareAndSwapPtr(NULL, entry_array, first_level_ptr)) {
+ /* Old slot was NULL, replaced with expanded level */
+ OSAddAtomicLong(second_level_size, &zone_page_table_used_size);
+ } else {
+ /* Old slot was not NULL, someone else expanded first */
+ kmem_free(zone_map, second_level_array, second_level_size);
+ }
+ } else {
+ /* Old slot was not NULL, already been expanded */
+ }
+}
+
+struct zone_page_table_entry *
+zone_page_table_lookup(zone_page_index_t pindex)
+{
+ unsigned int first_index = zone_page_table_first_level_slot(pindex);
+ struct zone_page_table_entry *second_level = zone_page_table[first_index];
+
+ if (second_level) {
+ return &second_level[zone_page_table_second_level_slot(pindex)];
+ }
+
+ return NULL;
+}
+
extern volatile SInt32 kfree_nop_count;
#pragma mark -
register zone_t zone,
boolean_t canblock)
{
- vm_offset_t addr;
- kern_return_t retval;
+ vm_offset_t addr = 0;
+ kern_return_t retval;
uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
int numsaved = 0;
- int i;
+ int i;
+ boolean_t zone_replenish_wakeup = FALSE;
+ boolean_t did_gzalloc;
+ did_gzalloc = FALSE;
#if CONFIG_ZLEAKS
uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
#endif /* CONFIG_ZLEAKS */
assert(zone != ZONE_NULL);
-
+
+#if CONFIG_GZALLOC
+ addr = gzalloc_alloc(zone, canblock);
+ did_gzalloc = (addr != 0);
+#endif
+
lock_zone(zone);
/*
#if CONFIG_ZLEAKS
/*
- * Zone leak detection: capture a backtrace every z_sample_factor
+ * Zone leak detection: capture a backtrace every zleak_sample_factor
* allocations in this zone.
*/
- if (zone->zleak_on && (zone->zleak_capture++ % z_sample_factor == 0)) {
+ if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
zone->zleak_capture = 1;
/* Avoid backtracing twice if zone logging is on */
}
#endif /* CONFIG_ZLEAKS */
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ if (__probable(addr == 0))
+ alloc_from_zone(zone, (void **) &addr);
- while ((addr == 0) && canblock && (zone->doing_gc)) {
- zone->waiting = TRUE;
- zone_sleep(zone);
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ if (zone->async_prio_refill &&
+ ((zone->cur_size - (zone->count * zone->elem_size)) <
+ (zone->prio_refill_watermark * zone->elem_size))) {
+ zone_replenish_wakeup = TRUE;
+ zone_replenish_wakeups_initiated++;
}
while ((addr == 0) && canblock) {
*/
zone->waiting = TRUE;
zone_sleep(zone);
- }
- else {
+ } else if (zone->doing_gc) {
+ /* zone_gc() is running. Since we need an element
+ * from the free list that is currently being
+ * collected, set the waiting bit and try to
+ * interrupt the GC process, and try again
+ * when we obtain the lock.
+ */
+ zone->waiting = TRUE;
+ zone_sleep(zone);
+ } else {
+ vm_offset_t space;
+ vm_size_t alloc_size;
+ int retry = 0;
+
if ((zone->cur_size + zone->elem_size) >
zone->max_size) {
if (zone->exhaustible)
} else {
unlock_zone(zone);
+ panic_include_zprint = TRUE;
+#if CONFIG_ZLEAKS
+ if (zleak_state & ZLEAK_STATE_ACTIVE)
+ panic_include_ztrace = TRUE;
+#endif /* CONFIG_ZLEAKS */
panic("zalloc: zone \"%s\" empty.", zone->zone_name);
}
}
zone->doing_alloc = TRUE;
unlock_zone(zone);
- if (zone->collectable) {
- vm_offset_t space;
- vm_size_t alloc_size;
- int retry = 0;
-
- for (;;) {
- int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
-
- if (vm_pool_low() || retry >= 1)
- alloc_size =
- round_page(zone->elem_size);
- else
- alloc_size = zone->alloc_size;
-
- if (zone->noencrypt)
- zflags |= KMA_NOENCRYPT;
-
- retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
- if (retval == KERN_SUCCESS) {
+ for (;;) {
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+
+ if (vm_pool_low() || retry >= 1)
+ alloc_size =
+ round_page(zone->elem_size);
+ else
+ alloc_size = zone->alloc_size;
+
+ if (zone->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
+ if (retval == KERN_SUCCESS) {
#if ZONE_ALIAS_ADDR
- if (alloc_size == PAGE_SIZE)
- space = zone_alias_addr(space);
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
#endif
-
+
#if CONFIG_ZLEAKS
- if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
- if (zone_map->size >= zleak_global_tracking_threshold) {
- kern_return_t kr;
-
- kr = zleak_activate();
- if (kr != KERN_SUCCESS) {
- printf("Failed to activate live zone leak debugging (%d).\n", kr);
- }
+ if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
+ if (zone_map->size >= zleak_global_tracking_threshold) {
+ kern_return_t kr;
+
+ kr = zleak_activate();
+ if (kr != KERN_SUCCESS) {
+ printf("Failed to activate live zone leak debugging (%d).\n", kr);
}
}
-
- if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
- if (zone->cur_size > zleak_per_zone_tracking_threshold) {
- zone->zleak_on = TRUE;
- }
- }
-#endif /* CONFIG_ZLEAKS */
-
- zone_page_init(space, alloc_size,
- ZONE_PAGE_USED);
- zcram(zone, (void *)space, alloc_size);
-
- break;
- } else if (retval != KERN_RESOURCE_SHORTAGE) {
- retry++;
-
- if (retry == 2) {
- zone_gc();
- printf("zalloc did gc\n");
- zone_display_zprint();
- }
- if (retry == 3) {
- panic_include_zprint = TRUE;
-#if CONFIG_ZLEAKS
- if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
- panic_include_ztrace = TRUE;
- }
-#endif /* CONFIG_ZLEAKS */
- /* TODO: Change this to something more descriptive, perhaps
- * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
- */
- panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
- }
- } else {
- break;
}
- }
- lock_zone(zone);
- zone->doing_alloc = FALSE;
- if (zone->waiting) {
- zone->waiting = FALSE;
- zone_wakeup(zone);
- }
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- if (addr == 0 &&
- retval == KERN_RESOURCE_SHORTAGE) {
- unlock_zone(zone);
- VM_PAGE_WAIT();
- lock_zone(zone);
- }
- } else {
- vm_offset_t space;
- retval = zget_space(zone, zone->elem_size, &space);
-
- lock_zone(zone);
- zone->doing_alloc = FALSE;
- if (zone->waiting) {
- zone->waiting = FALSE;
- thread_wakeup((event_t)zone);
- }
- if (retval == KERN_SUCCESS) {
- zone->count++;
- zone->sum_count++;
- zone->cur_size += zone->elem_size;
-#if ZONE_DEBUG
- if (zone_debug_enabled(zone)) {
- enqueue_tail(&zone->active_zones, (queue_entry_t)space);
+ if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
+ if (zone->cur_size > zleak_per_zone_tracking_threshold) {
+ zone->zleak_on = TRUE;
+ }
}
-#endif
- unlock_zone(zone);
- zone_page_alloc(space, zone->elem_size);
-#if ZONE_DEBUG
- if (zone_debug_enabled(zone))
- space += ZONE_DEBUG_OFFSET;
-#endif
- addr = space;
- goto success;
- }
- if (retval == KERN_RESOURCE_SHORTAGE) {
- unlock_zone(zone);
+#endif /* CONFIG_ZLEAKS */
- VM_PAGE_WAIT();
- lock_zone(zone);
- } else {
- /*
- * Equivalent to a 'retry fail 3', we're out of address space in the zone_map
- * (if it returned KERN_NO_SPACE)
- */
- if (retval == KERN_NO_SPACE) {
+ zcram(zone, space, alloc_size);
+
+ break;
+ } else if (retval != KERN_RESOURCE_SHORTAGE) {
+ retry++;
+
+ if (retry == 2) {
+ zone_gc(TRUE);
+ printf("zalloc did gc\n");
+ zone_display_zprint();
+ }
+ if (retry == 3) {
panic_include_zprint = TRUE;
#if CONFIG_ZLEAKS
- if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
+ if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
panic_include_ztrace = TRUE;
}
-#endif /* CONFIG_ZLEAKS */
+#endif /* CONFIG_ZLEAKS */
+ /* TODO: Change this to something more descriptive, perhaps
+ * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
+ */
+ panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
}
- panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone->zone_name, zone->count, retval);
+ } else {
+ break;
}
}
+ lock_zone(zone);
+ zone->doing_alloc = FALSE;
+ if (zone->waiting) {
+ zone->waiting = FALSE;
+ zone_wakeup(zone);
+ }
+ alloc_from_zone(zone, (void **) &addr);
+ if (addr == 0 &&
+ retval == KERN_RESOURCE_SHORTAGE) {
+ unlock_zone(zone);
+
+ VM_PAGE_WAIT();
+ lock_zone(zone);
+ }
}
if (addr == 0)
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ alloc_from_zone(zone, (void **) &addr);
}
#if CONFIG_ZLEAKS
/* Sampling can fail if another sample is happening at the same time in a different zone. */
if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
/* If it failed, roll back the counter so we sample the next allocation instead. */
- zone->zleak_capture = z_sample_factor;
+ zone->zleak_capture = zleak_sample_factor;
}
}
#endif /* CONFIG_ZLEAKS */
* depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
* for a leak, we want to log as many allocations as possible in order to clearly identify the leaker
* among all the records. So we look for an unused slot in the log and fill that in before overwriting
- * an old entry. When looking for a corrution however, it's better to have a chronological log of all
+ * an old entry. When looking for a corruption however, it's better to have a chronological log of all
* the allocations and frees done in the zone so that the history of operations for a specific zone
* element can be inspected. So in this case, we treat the log as a circular buffer and overwrite the
* oldest entry whenever a new one needs to be added.
*
- * The check_freed_element flag tells us what style of logging to do. It's set if we're supposed to be
+ * The corruption_debug_flag flag tells us what style of logging to do. It's set if we're supposed to be
* doing corruption style logging (indicated via -zc in the boot-args).
*/
- if (!check_freed_element && zrecords[zcurrent].z_element && zrecorded < log_records) {
+ if (!corruption_debug_flag && zrecords[zcurrent].z_element && zrecorded < log_records) {
/*
* If we get here, we're doing leak style logging and there's still some unused entries in
* starting at zcurrent and wrap-around if we reach the end of the buffer. If the buffer
* is already full, we just fall through and overwrite the element indexed by zcurrent.
*/
-
- for (i = zcurrent; i < log_records; i++) {
+
+ for (i = zcurrent; i < log_records; i++) {
if (zrecords[i].z_element == NULL) {
zcurrent = i;
goto empty_slot;
zcurrent = 0;
}
- if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
+ if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
zone->async_pending = TRUE;
unlock_zone(zone);
thread_call_enter(&zone->call_async_alloc);
lock_zone(zone);
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ alloc_from_zone(zone, (void **) &addr);
}
#if ZONE_DEBUG
- if (addr && zone_debug_enabled(zone)) {
+ if (!did_gzalloc && addr && zone_debug_enabled(zone)) {
enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
addr += ZONE_DEBUG_OFFSET;
}
unlock_zone(zone);
-success:
+ if (zone_replenish_wakeup)
+ thread_wakeup(&zone->zone_replenish_thread);
+
TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
if (addr) {
thread_t thr = current_thread();
task_t task;
zinfo_usage_t zinfo;
+ vm_size_t sz = zone->elem_size;
if (zone->caller_acct)
- thr->tkm_private.alloc += zone->elem_size;
+ ledger_credit(thr->t_ledger, task_ledgers.tkm_private, sz);
else
- thr->tkm_shared.alloc += zone->elem_size;
+ ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, sz);
if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
- OSAddAtomic64(zone->elem_size, (int64_t *)&zinfo[zone->index].alloc);
+ OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].alloc);
}
return((void *)addr);
}
unlock_zone(((zone_t)p0));
}
-
/*
* zget returns an element from the specified zone
* and immediately returns nothing if there is nothing there.
zget(
register zone_t zone)
{
- register vm_offset_t addr;
+ vm_offset_t addr;
#if CONFIG_ZLEAKS
uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
/*
* Zone leak detection: capture a backtrace
*/
- if (zone->zleak_on && (zone->zleak_capture++ % z_sample_factor == 0)) {
+ if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
zone->zleak_capture = 1;
zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
}
#endif /* CONFIG_ZLEAKS */
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ alloc_from_zone(zone, (void **) &addr);
#if ZONE_DEBUG
if (addr && zone_debug_enabled(zone)) {
enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
/* Sampling can fail if another sample is happening at the same time in a different zone. */
if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
/* If it failed, roll back the counter so we sample the next allocation instead. */
- zone->zleak_capture = z_sample_factor;
+ zone->zleak_capture = zleak_sample_factor;
}
}
void *addr)
{
vm_offset_t elem = (vm_offset_t) addr;
- void *zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
+ void *zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
int numsaved = 0;
+ boolean_t gzfreed = FALSE;
assert(zone != ZONE_NULL);
panic("zfree: freeing to zone_zone breaks zone_gc!");
#endif
+#if CONFIG_GZALLOC
+ gzfreed = gzalloc_free(zone, addr);
+#endif
+
TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
- if (zone->collectable && !zone->allows_foreign &&
- !from_zone_map(elem, zone->elem_size)) {
+ if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
+ !from_zone_map(elem, zone->elem_size))) {
#if MACH_ASSERT
panic("zfree: non-allocated memory in collectable zone!");
#endif
if (DO_LOGGING(zone)) {
int i;
- if (check_freed_element) {
+ if (corruption_debug_flag) {
/*
* We're logging to catch a corruption. Add a record of this zfree operation
#if ZONE_DEBUG
- if (zone_debug_enabled(zone)) {
+ if (!gzfreed && zone_debug_enabled(zone)) {
queue_t tmp_elem;
elem -= ZONE_DEBUG_OFFSET;
if (!pmap_kernel_va(this) || this == elem)
panic("zfree");
}
- ADD_TO_ZONE(zone, elem);
+
+ if (__probable(!gzfreed))
+ free_to_zone(zone, (void *) elem);
+
#if MACH_ASSERT
if (zone->count < 0)
panic("zfree: count < 0!");
thread_t thr = current_thread();
task_t task;
zinfo_usage_t zinfo;
+ vm_size_t sz = zone->elem_size;
if (zone->caller_acct)
- thr->tkm_private.free += zone->elem_size;
+ ledger_debit(thr->t_ledger, task_ledgers.tkm_private, sz);
else
- thr->tkm_shared.free += zone->elem_size;
+ ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, sz);
+
if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
- OSAddAtomic64(zone->elem_size,
- (int64_t *)&zinfo[zone->index].free);
+ OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].free);
}
}
case Z_CALLERACCT:
zone->caller_acct = value;
break;
-#if MACH_ASSERT
+ case Z_NOCALLOUT:
+ zone->no_callout = value;
+ break;
+ case Z_GZALLOC_EXEMPT:
+ zone->gzalloc_exempt = value;
+#if CONFIG_GZALLOC
+ gzalloc_reconfigure(zone);
+#endif
+ break;
+ case Z_ALIGNMENT_REQUIRED:
+ zone->alignment_required = value;
+#if ZONE_DEBUG
+ zone_debug_disable(zone);
+#endif
+#if CONFIG_GZALLOC
+ gzalloc_reconfigure(zone);
+#endif
+ break;
default:
panic("Zone_change: Wrong Item Type!");
/* break; */
-#endif
}
}
return(free_count);
}
-/*
- * zprealloc preallocates wired memory, exanding the specified
- * zone to the specified size
- */
-void
-zprealloc(
- zone_t zone,
- vm_size_t size)
-{
- vm_offset_t addr;
-
- if (size != 0) {
- if (kmem_alloc_kobject(zone_map, &addr, size) != KERN_SUCCESS)
- panic("zprealloc");
- zone_page_init(addr, size, ZONE_PAGE_USED);
- zcram(zone, (void *)addr, size);
- }
-}
-
/*
* Zone garbage collection subroutines
*/
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_collectable");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++)
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
if (zp->collect_count == zp->alloc_count)
return (TRUE);
+ }
return (FALSE);
}
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_keep");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++)
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
zp->collect_count = 0;
+ }
}
void
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_collect");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++)
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
++zp->collect_count;
+ }
}
void
zone_page_init(
vm_offset_t addr,
- vm_size_t size,
- int value)
+ vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_init");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++) {
- zp->alloc_count = value;
+ for (; i <= j; i++) {
+ /* make sure entry exists before marking unused */
+ zone_page_table_expand(i);
+
+ zp = zone_page_table_lookup(i);
+ assert(zp);
+ zp->alloc_count = ZONE_PAGE_UNUSED;
zp->collect_count = 0;
}
}
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_alloc");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
+ assert(zp);
- for (zp = zone_page_table + i; i <= j; zp++, i++) {
/*
- * Set alloc_count to (ZONE_PAGE_USED + 1) if
+ * Set alloc_count to ZONE_PAGE_USED if
* it was previously set to ZONE_PAGE_UNUSED.
*/
if (zp->alloc_count == ZONE_PAGE_UNUSED)
- zp->alloc_count = 1;
- else
- ++zp->alloc_count;
+ zp->alloc_count = ZONE_PAGE_USED;
+
+ ++zp->alloc_count;
}
}
void
zone_page_free_element(
- struct zone_page_table_entry **free_pages,
+ zone_page_index_t *free_page_head,
+ zone_page_index_t *free_page_tail,
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_free_element");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
- for (zp = zone_page_table + i; i <= j; zp++, i++) {
if (zp->collect_count > 0)
--zp->collect_count;
if (--zp->alloc_count == 0) {
+ vm_address_t free_page_address;
+ vm_address_t prev_free_page_address;
+
zp->alloc_count = ZONE_PAGE_UNUSED;
zp->collect_count = 0;
- zp->link = *free_pages;
- *free_pages = zp;
+
+ /*
+ * This element was the last one on this page, re-use the page's
+ * storage for a page freelist
+ */
+ free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)i);
+ *(zone_page_index_t *)free_page_address = ZONE_PAGE_INDEX_INVALID;
+
+ if (*free_page_head == ZONE_PAGE_INDEX_INVALID) {
+ *free_page_head = i;
+ *free_page_tail = i;
+ } else {
+ prev_free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)(*free_page_tail));
+ *(zone_page_index_t *)prev_free_page_address = i;
+ *free_page_tail = i;
+ }
}
}
}
* Add a linked list of pages starting at base back into the zone
* free list. Tail points to the last element on the list.
*/
-
#define ADD_LIST_TO_ZONE(zone, base, tail) \
MACRO_BEGIN \
(tail)->next = (void *)((zone)->free_elements); \
- if (check_freed_element) { \
- if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
- ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
- (zone)->free_elements; \
+ if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
+ ((vm_offset_t *)(tail))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
+ (zone)->free_elements; \
} \
(zone)->free_elements = (unsigned long)(base); \
MACRO_END
/*
* Add an element to the chain pointed to by prev.
*/
-
-#define ADD_ELEMENT(zone, prev, elem) \
+#define ADD_ELEMENT(zone, prev, elem) \
MACRO_BEGIN \
(prev)->next = (elem); \
- if (check_freed_element) { \
- if ((zone)->elem_size >= (2 * sizeof(vm_offset_t))) \
- ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
- (vm_offset_t)(elem); \
- } \
+ if ((zone)->elem_size >= (2 * sizeof(vm_offset_t) + sizeof(uint32_t))) { \
+ ((vm_offset_t *)(prev))[((zone)->elem_size/sizeof(vm_offset_t))-1] = \
+ (vm_offset_t)(elem); \
+ } \
MACRO_END
struct {
* begins to run out of memory.
*/
void
-zone_gc(void)
+zone_gc(boolean_t all_zones)
{
unsigned int max_zones;
zone_t z;
unsigned int i;
- struct zone_page_table_entry *zp, *zone_free_pages;
+ zone_page_index_t zone_free_page_head;
+ zone_page_index_t zone_free_page_tail;
+ thread_t mythread = current_thread();
lck_mtx_lock(&zone_gc_lock);
z = first_zone;
simple_unlock(&all_zones_lock);
+
+ /*
+ * it's ok to allow eager kernel preemption while
+ * while holding a zone lock since it's taken
+ * as a spin lock (which prevents preemption)
+ */
+ thread_set_eager_preempt(mythread);
+
#if MACH_ASSERT
- for (i = 0; i < zone_pages; i++)
- assert(zone_page_table[i].collect_count == 0);
+ for (i = 0; i < zone_pages; i++) {
+ struct zone_page_table_entry *zp;
+
+ zp = zone_page_table_lookup(i);
+ assert(!zp || (zp->collect_count == 0));
+ }
#endif /* MACH_ASSERT */
- zone_free_pages = NULL;
-
for (i = 0; i < max_zones; i++, z = z->next_zone) {
- unsigned int n, m;
- vm_size_t elt_size, size_freed;
+ unsigned int n, m;
+ vm_size_t elt_size, size_freed;
struct zone_free_element *elt, *base_elt, *base_prev, *prev, *scan, *keep, *tail;
+ int kmem_frees = 0;
assert(z != ZONE_NULL);
if (!z->collectable)
continue;
+ if (all_zones == FALSE && z->elem_size < PAGE_SIZE)
+ continue;
+
lock_zone(z);
elt_size = z->elem_size;
/*
- * Do a quick feasability check before we scan the zone:
+ * Do a quick feasibility check before we scan the zone:
* skip unless there is likelihood of getting pages back
* (i.e we need a whole allocation block's worth of free
* elements before we can garbage collect) and
prev = (void *)&scan;
elt = scan;
n = 0; tail = keep = NULL;
+
+ zone_free_page_head = ZONE_PAGE_INDEX_INVALID;
+ zone_free_page_tail = ZONE_PAGE_INDEX_INVALID;
+
+
while (elt != NULL) {
if (from_zone_map(elt, elt_size)) {
zone_page_collect((vm_offset_t)elt, elt_size);
if (++n >= 50) {
if (z->waiting == TRUE) {
+ /* z->waiting checked without lock held, rechecked below after locking */
lock_zone(z);
if (keep != NULL) {
ADD_LIST_TO_ZONE(z, keep, tail);
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
+
unlock_zone(z);
}
size_freed = 0;
elt = scan;
n = 0; tail = keep = NULL;
+
while (elt != NULL) {
if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
+ struct zone_free_element *next_elt = elt->next;
+
size_freed += elt_size;
- zone_page_free_element(&zone_free_pages,
- (vm_offset_t)elt, elt_size);
- elt = elt->next;
+ /*
+ * If this is the last allocation on the page(s),
+ * we may use their storage to maintain the linked
+ * list of free-able pages. So store elt->next because
+ * "elt" may be scribbled over.
+ */
+ zone_page_free_element(&zone_free_page_head, &zone_free_page_tail, (vm_offset_t)elt, elt_size);
+
+ elt = next_elt;
++zgc_stats.elems_freed;
}
zone_wakeup(z);
}
unlock_zone(z);
- }
- /*
- * Reclaim the pages we are freeing.
- */
- while ((zp = zone_free_pages) != NULL) {
- zone_free_pages = zp->link;
-#if ZONE_ALIAS_ADDR
- z = (zone_t)zone_virtual_addr((vm_map_address_t)z);
-#endif
- kmem_free(zone_map, zone_map_min_address + PAGE_SIZE *
- (zp - zone_page_table), PAGE_SIZE);
- ++zgc_stats.pgs_freed;
+ if (zone_free_page_head == ZONE_PAGE_INDEX_INVALID)
+ continue;
+
+ /*
+ * we don't want to allow eager kernel preemption while holding the
+ * various locks taken in the kmem_free path of execution
+ */
+ thread_clear_eager_preempt(mythread);
+
+ /*
+ * Reclaim the pages we are freeing.
+ */
+ while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
+ zone_page_index_t zind = zone_free_page_head;
+ vm_address_t free_page_address;
+ int page_count;
+
+ /*
+ * Use the first word of the page about to be freed to find the next free page
+ */
+ free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)zind);
+ zone_free_page_head = *(zone_page_index_t *)free_page_address;
+
+ page_count = 1;
+
+ while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
+ zone_page_index_t next_zind = zone_free_page_head;
+ vm_address_t next_free_page_address;
+
+ next_free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)next_zind);
+
+ if (next_free_page_address == (free_page_address - PAGE_SIZE)) {
+ free_page_address = next_free_page_address;
+ } else if (next_free_page_address != (free_page_address + (PAGE_SIZE * page_count)))
+ break;
+
+ zone_free_page_head = *(zone_page_index_t *)next_free_page_address;
+ page_count++;
+ }
+ kmem_free(zone_map, free_page_address, page_count * PAGE_SIZE);
+
+ zgc_stats.pgs_freed += page_count;
+
+ if (++kmem_frees == 32) {
+ thread_yield_internal(1);
+ kmem_frees = 0;
+ }
+ }
+ thread_set_eager_preempt(mythread);
}
+ thread_clear_eager_preempt(mythread);
lck_mtx_unlock(&zone_gc_lock);
+
}
+extern vm_offset_t kmapoff_kaddr;
+extern unsigned int kmapoff_pgcnt;
+
/*
* consider_zone_gc:
*
void
consider_zone_gc(boolean_t force)
{
+ boolean_t all_zones = FALSE;
+
+ if (kmapoff_kaddr != 0) {
+ /*
+ * One-time reclaim of kernel_map resources we allocated in
+ * early boot.
+ */
+ (void) vm_deallocate(kernel_map,
+ kmapoff_kaddr, kmapoff_pgcnt * PAGE_SIZE_64);
+ kmapoff_kaddr = 0;
+ }
if (zone_gc_allowed &&
(zone_gc_allowed_by_time_throttle ||
zone_gc_forced ||
force)) {
+ if (zone_gc_allowed_by_time_throttle == TRUE) {
+ zone_gc_allowed_by_time_throttle = FALSE;
+ all_zones = TRUE;
+ }
zone_gc_forced = FALSE;
- zone_gc_allowed_by_time_throttle = FALSE; /* reset periodically */
- zone_gc();
+
+ zone_gc(all_zones);
}
}
}
+#if CONFIG_TASK_ZONE_INFO
+
kern_return_t
task_zone_info(
task_t task,
return KERN_SUCCESS;
}
+#else /* CONFIG_TASK_ZONE_INFO */
+
+kern_return_t
+task_zone_info(
+ __unused task_t task,
+ __unused mach_zone_name_array_t *namesp,
+ __unused mach_msg_type_number_t *namesCntp,
+ __unused task_zone_info_array_t *infop,
+ __unused mach_msg_type_number_t *infoCntp)
+{
+ return KERN_FAILURE;
+}
+
+#endif /* CONFIG_TASK_ZONE_INFO */
+
kern_return_t
mach_zone_info(
- host_t host,
+ host_priv_t host,
mach_zone_name_array_t *namesp,
mach_msg_type_number_t *namesCntp,
mach_zone_info_array_t *infop,
if (host == HOST_NULL)
return KERN_INVALID_HOST;
-
- num_fake_zones = sizeof fake_zones / sizeof fake_zones[0];
+#if CONFIG_DEBUGGER_FOR_ZONE_INFO
+ if (!PE_i_can_has_debugger(NULL))
+ return KERN_INVALID_HOST;
+#endif
/*
* We assume that zones aren't freed once allocated.
*/
kern_return_t
host_zone_info(
- host_t host,
+ host_priv_t host,
zone_name_array_t *namesp,
mach_msg_type_number_t *namesCntp,
zone_info_array_t *infop,
if (host == HOST_NULL)
return KERN_INVALID_HOST;
+#if CONFIG_DEBUGGER_FOR_ZONE_INFO
+ if (!PE_i_can_has_debugger(NULL))
+ return KERN_INVALID_HOST;
+#endif
#if defined(__LP64__)
if (!thread_is_64bit(current_thread()))
return KERN_NOT_SUPPORTED;
#endif
- num_fake_zones = sizeof fake_zones / sizeof fake_zones[0];
-
/*
* We assume that zones aren't freed once allocated.
* We won't pick up any zones that are allocated later.
return KERN_SUCCESS;
}
+kern_return_t
+mach_zone_force_gc(
+ host_t host)
+{
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ consider_zone_gc(TRUE);
+
+ return (KERN_SUCCESS);
+}
+
extern unsigned int stack_total;
extern unsigned long long stack_allocs;
printf("Kalloc.Large:\t%lu\n",(uintptr_t)kalloc_large_total);
}
-
-
-#if MACH_KDB
-#include <ddb/db_command.h>
-#include <ddb/db_output.h>
-#include <kern/kern_print.h>
-
-const char *zone_labels =
-"ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME";
-
-/* Forwards */
-void db_print_zone(
- zone_t addr);
-
-#if ZONE_DEBUG
-void db_zone_check_active(
- zone_t zone);
-void db_zone_print_active(
- zone_t zone);
-#endif /* ZONE_DEBUG */
-void db_zone_print_free(
- zone_t zone);
-void
-db_print_zone(
- zone_t addr)
-{
- struct zone zcopy;
-
- zcopy = *addr;
-
- db_printf("%8x %8x %8x %8x %6x %8x %s ",
- addr, zcopy.count, zcopy.cur_size,
- zcopy.max_size, zcopy.elem_size,
- zcopy.alloc_size, zcopy.zone_name);
- if (zcopy.exhaustible)
- db_printf("H");
- if (zcopy.collectable)
- db_printf("C");
- if (zcopy.expandable)
- db_printf("X");
- if (zcopy.caller_acct)
- db_printf("A");
- db_printf("\n");
-}
-
-/*ARGSUSED*/
-void
-db_show_one_zone(db_expr_t addr, boolean_t have_addr,
- __unused db_expr_t count, __unused char *modif)
-{
- struct zone *z = (zone_t)((char *)0 + addr);
-
- if (z == ZONE_NULL || !have_addr){
- db_error("No Zone\n");
- /*NOTREACHED*/
- }
-
- db_printf("%s\n", zone_labels);
- db_print_zone(z);
-}
-
-/*ARGSUSED*/
-void
-db_show_all_zones(__unused db_expr_t addr, boolean_t have_addr, db_expr_t count,
- __unused char *modif)
-{
- zone_t z;
- unsigned total = 0;
-
- /*
- * Don't risk hanging by unconditionally locking,
- * risk of incoherent data is small (zones aren't freed).
- */
- have_addr = simple_lock_try(&all_zones_lock);
- count = num_zones;
- z = first_zone;
- if (have_addr) {
- simple_unlock(&all_zones_lock);
- }
-
- db_printf("%s\n", zone_labels);
- for ( ; count > 0; count--) {
- if (!z) {
- db_error("Mangled Zone List\n");
- /*NOTREACHED*/
- }
- db_print_zone(z);
- total += z->cur_size,
-
- have_addr = simple_lock_try(&all_zones_lock);
- z = z->next_zone;
- if (have_addr) {
- simple_unlock(&all_zones_lock);
- }
- }
- db_printf("\nTotal %8x", total);
- db_printf("\n\nzone_gc() has reclaimed %d pages\n", zgc_stats.pgs_freed);
-}
-
-#if ZONE_DEBUG
-void
-db_zone_check_active(
- zone_t zone)
-{
- int count = 0;
- queue_t tmp_elem;
-
- if (!zone_debug_enabled(zone) || !zone_check)
- return;
- tmp_elem = queue_first(&zone->active_zones);
- while (count < zone->count) {
- count++;
- if (tmp_elem == 0) {
- printf("unexpected zero element, zone=%p, count=%d\n",
- zone, count);
- assert(FALSE);
- break;
- }
- if (queue_end(tmp_elem, &zone->active_zones)) {
- printf("unexpected queue_end, zone=%p, count=%d\n",
- zone, count);
- assert(FALSE);
- break;
- }
- tmp_elem = queue_next(tmp_elem);
- }
- if (!queue_end(tmp_elem, &zone->active_zones)) {
- printf("not at queue_end, zone=%p, tmp_elem=%p\n",
- zone, tmp_elem);
- assert(FALSE);
- }
-}
-
-void
-db_zone_print_active(
- zone_t zone)
-{
- int count = 0;
- queue_t tmp_elem;
-
- if (!zone_debug_enabled(zone)) {
- printf("zone %p debug not enabled\n", zone);
- return;
- }
- if (!zone_check) {
- printf("zone_check FALSE\n");
- return;
- }
-
- printf("zone %p, active elements %d\n", zone, zone->count);
- printf("active list:\n");
- tmp_elem = queue_first(&zone->active_zones);
- while (count < zone->count) {
- printf(" %p", tmp_elem);
- count++;
- if ((count % 6) == 0)
- printf("\n");
- if (tmp_elem == 0) {
- printf("\nunexpected zero element, count=%d\n", count);
- break;
- }
- if (queue_end(tmp_elem, &zone->active_zones)) {
- printf("\nunexpected queue_end, count=%d\n", count);
- break;
- }
- tmp_elem = queue_next(tmp_elem);
- }
- if (!queue_end(tmp_elem, &zone->active_zones))
- printf("\nnot at queue_end, tmp_elem=%p\n", tmp_elem);
- else
- printf("\n");
-}
-#endif /* ZONE_DEBUG */
-
-void
-db_zone_print_free(
- zone_t zone)
-{
- int count = 0;
- int freecount;
- vm_offset_t elem;
-
- freecount = zone_free_count(zone);
- printf("zone %p, free elements %d\n", zone, freecount);
- printf("free list:\n");
- elem = zone->free_elements;
- while (count < freecount) {
- printf(" 0x%x", elem);
- count++;
- if ((count % 6) == 0)
- printf("\n");
- if (elem == 0) {
- printf("\nunexpected zero element, count=%d\n", count);
- break;
- }
- elem = *((vm_offset_t *)elem);
- }
- if (elem != 0)
- printf("\nnot at end of free list, elem=0x%x\n", elem);
- else
- printf("\n");
-}
-
-#endif /* MACH_KDB */
-
-
#if ZONE_DEBUG
/* should we care about locks here ? */
-#if MACH_KDB
-void *
-next_element(
- zone_t z,
- void *prev)
-{
- char *elt = (char *)prev;
-
- if (!zone_debug_enabled(z))
- return(NULL);
- elt -= ZONE_DEBUG_OFFSET;
- elt = (char *) queue_next((queue_t) elt);
- if ((queue_t) elt == &z->active_zones)
- return(NULL);
- elt += ZONE_DEBUG_OFFSET;
- return(elt);
-}
-
-void *
-first_element(
- zone_t z)
-{
- char *elt;
-
- if (!zone_debug_enabled(z))
- return(NULL);
- if (queue_empty(&z->active_zones))
- return(NULL);
- elt = (char *)queue_first(&z->active_zones);
- elt += ZONE_DEBUG_OFFSET;
- return(elt);
-}
-
-/*
- * Second arg controls how many zone elements are printed:
- * 0 => none
- * n, n < 0 => all
- * n, n > 0 => last n on active list
- */
-int
-zone_count(
- zone_t z,
- int tail)
-{
- void *elt;
- int count = 0;
- boolean_t print = (tail != 0);
-
- if (tail < 0)
- tail = z->count;
- if (z->count < tail)
- tail = 0;
- tail = z->count - tail;
- for (elt = first_element(z); elt; elt = next_element(z, elt)) {
- if (print && tail <= count)
- db_printf("%8x\n", elt);
- count++;
- }
- assert(count == z->count);
- return(count);
-}
-#endif /* MACH_KDB */
-
#define zone_in_use(z) ( z->count || z->free_elements )
void