/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <mach/vm_param.h>
#include <mach/kern_return.h>
#include <mach/mach_host_server.h>
+#include <mach/task_server.h>
#include <mach/machine/vm_types.h>
#include <mach_debug/zone_info.h>
#include <machine/machparam.h>
#include <libkern/OSDebug.h>
+#include <libkern/OSAtomic.h>
#include <sys/kdebug.h>
-#if defined(__ppc__)
-/* for fake zone stat routines */
-#include <ppc/savearea.h>
-#include <ppc/mappings.h>
-#endif
-
-
/*
* Zone Corruption Debugging
*
* each other when re-using the zone element, to detect modifications.
* (3) poison the freed memory by overwriting it with 0xdeadbeef.
*
- * The first two checks are farily light weight and are enabled by specifying "-zc"
+ * The first two checks are fairly light weight and are enabled by specifying "-zc"
* in the boot-args. If you want more aggressive checking for use-after-free bugs
* and you don't mind the additional overhead, then turn on poisoning by adding
* "-zp" to the boot-args in addition to "-zc". If you specify -zp without -zc,
boolean_t check_freed_element = FALSE; /* enabled by -zc in boot-args */
boolean_t zfree_clear = FALSE; /* enabled by -zp in boot-args */
+/*
+ * Fake zones for things that want to report via zprint but are not actually zones.
+ */
+struct fake_zone_info {
+ const char* name;
+ void (*init)(int);
+ void (*query)(int *,
+ vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
+ uint64_t *, int *, int *, int *);
+};
+
+static struct fake_zone_info fake_zones[] = {
+ {
+ .name = "kernel_stacks",
+ .init = stack_fake_zone_init,
+ .query = stack_fake_zone_info,
+ },
+#if defined(__i386__) || defined (__x86_64__)
+ {
+ .name = "page_tables",
+ .init = pt_fake_zone_init,
+ .query = pt_fake_zone_info,
+ },
+#endif /* i386 */
+ {
+ .name = "kalloc.large",
+ .init = kalloc_fake_zone_init,
+ .query = kalloc_fake_zone_info,
+ },
+};
+unsigned int num_fake_zones = sizeof(fake_zones)/sizeof(fake_zones[0]);
+
+/*
+ * Zone info options
+ */
+boolean_t zinfo_per_task = FALSE; /* enabled by -zinfop in boot-args */
+#define ZINFO_SLOTS 200 /* for now */
+#define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
+
+/*
+ * Allocation helper macros
+ */
#define is_kernel_data_addr(a) (!(a) || ((a) >= vm_min_kernel_address && !((a) & 0x3)))
#define ADD_TO_ZONE(zone, element) \
if (zfree_clear) { \
unsigned int ii; \
for (ii = sizeof(vm_offset_t) / sizeof(uint32_t); \
- ii < zone->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
+ ii < (zone)->elem_size/sizeof(uint32_t) - sizeof(vm_offset_t) / sizeof(uint32_t); \
ii++) \
if (((uint32_t *)(ret))[ii] != (uint32_t)0xdeadbeef) \
panic("a freed zone element has been modified");\
} \
} \
(zone)->count++; \
+ (zone)->sum_count++; \
(zone)->free_elements = *((vm_offset_t *)(ret)); \
} \
MACRO_END
#endif /* ZONE_DEBUG */
/*
- * Support for garbage collection of unused zone pages:
+ * Support for garbage collection of unused zone pages
+ *
+ * The kernel virtually allocates the "zone map" submap of the kernel
+ * map. When an individual zone needs more storage, memory is allocated
+ * out of the zone map, and the two-level "zone_page_table" is
+ * on-demand expanded so that it has entries for those pages.
+ * zone_page_init()/zone_page_alloc() initialize "alloc_count"
+ * to the number of zone elements that occupy the zone page (which may
+ * be a minimum of 1, including if a zone element spans multiple
+ * pages).
+ *
+ * Asynchronously, the zone_gc() logic attempts to walk zone free
+ * lists to see if all the elements on a zone page are free. If
+ * "collect_count" (which it increments during the scan) matches
+ * "alloc_count", the zone page is a candidate for collection and the
+ * physical page is returned to the VM system. During this process, the
+ * first word of the zone page is re-used to maintain a linked list of
+ * to-be-collected zone pages.
*/
+typedef uint32_t zone_page_index_t;
+#define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)
struct zone_page_table_entry {
- struct zone_page_table_entry *link;
- short alloc_count;
- short collect_count;
+ volatile uint16_t alloc_count;
+ volatile uint16_t collect_count;
};
+#define ZONE_PAGE_USED 0
+#define ZONE_PAGE_UNUSED 0xffff
+
/* Forwards */
void zone_page_init(
vm_offset_t addr,
- vm_size_t size,
- int value);
+ vm_size_t size);
void zone_page_alloc(
vm_offset_t addr,
vm_size_t size);
void zone_page_free_element(
- struct zone_page_table_entry **free_pages,
+ zone_page_index_t *free_page_list,
vm_offset_t addr,
vm_size_t size);
zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */
+zone_t zinfo_zone = ZONE_NULL; /* zone of per-task zone info */
+
/*
* The VM system gives us an initial chunk of memory.
* It has to be big enough to allocate the zone_zone
+ * all the way through the pmap zone.
*/
vm_offset_t zdata;
#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
-kern_return_t zget_space(
- vm_offset_t size,
- vm_offset_t *result);
-
-decl_simple_lock_data(,zget_space_lock)
-vm_offset_t zalloc_next_space;
-vm_offset_t zalloc_end_of_space;
-vm_size_t zalloc_wasted_space;
-
/*
* Garbage collection map information
*/
-struct zone_page_table_entry * zone_page_table;
+#define ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE (32)
+struct zone_page_table_entry * volatile zone_page_table[ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE];
+vm_size_t zone_page_table_used_size;
vm_offset_t zone_map_min_address;
vm_offset_t zone_map_max_address;
unsigned int zone_pages;
+unsigned int zone_page_table_second_level_size; /* power of 2 */
+unsigned int zone_page_table_second_level_shift_amount;
+
+#define zone_page_table_first_level_slot(x) ((x) >> zone_page_table_second_level_shift_amount)
+#define zone_page_table_second_level_slot(x) ((x) & (zone_page_table_second_level_size - 1))
+
+void zone_page_table_expand(zone_page_index_t pindex);
+struct zone_page_table_entry *zone_page_table_lookup(zone_page_index_t pindex);
/*
* Exclude more than one concurrent garbage collection
((vm_offset_t)(zone_virtual_addr((vm_map_address_t)addr)) + size -1) < zone_map_max_address)
#endif
-#define ZONE_PAGE_USED 0
-#define ZONE_PAGE_UNUSED -1
-
-
/*
* Protects first_zone, last_zone, num_zones,
* and the next_zone field of zones.
boolean_t zone_gc_allowed = TRUE;
boolean_t zone_gc_forced = FALSE;
boolean_t panic_include_zprint = FALSE;
-unsigned zone_gc_last_tick = 0;
-unsigned zone_gc_max_rate = 0; /* in ticks */
+boolean_t zone_gc_allowed_by_time_throttle = TRUE;
/*
* Zone leak debugging code
* but one doesn't generally care about performance when tracking down a leak. The log is capped at 8000
* records since going much larger than this tends to make the system unresponsive and unbootable on small
* memory configurations. The default value is 4000 records.
- *
- * MAX_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interrest. 15
- * levels is usually enough to get past all the layers of code in kalloc and IOKit and see who the actual
- * caller is up above these lower levels.
*/
-
+#if defined(__LP64__)
+#define ZRECORDS_MAX 16000 /* Max records allowed in the log */
+#else
#define ZRECORDS_MAX 8000 /* Max records allowed in the log */
+#endif
#define ZRECORDS_DEFAULT 4000 /* default records in log if zrecs is not specificed in boot-args */
-#define MAX_DEPTH 15 /* number of levels of the stack trace to record */
/*
* Each record in the log contains a pointer to the zone element it refers to, a "time" number that allows
void *z_element; /* the element that was zalloc'ed of zfree'ed */
uint32_t z_opcode:1, /* whether it was a zalloc or zfree */
z_time:31; /* time index when operation was done */
- void *z_pc[MAX_DEPTH]; /* stack trace of caller */
+ void *z_pc[MAX_ZTRACE_DEPTH]; /* stack trace of caller */
};
/*
extern boolean_t zlog_ready;
+#if CONFIG_ZLEAKS
+#pragma mark -
+#pragma mark Zone Leak Detection
+
+/*
+ * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
+ * allocations made by the zone allocator. Every z_sample_factor allocations in each zone, we capture a
+ * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
+ * and stop tracking it if it was being tracked.
+ *
+ * We track the allocations in the zallocations hash table, which stores the address that was returned from
+ * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
+ * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
+ * backtraces - we don't store them more than once.
+ *
+ * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
+ * a large amount of virtual space.
+ */
+#define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
+#define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
+#define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
+#define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
+uint32_t zleak_state = 0; /* State of collection, as above */
+
+boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */
+vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
+vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
+unsigned int z_sample_factor = 1000; /* Allocations per sample attempt */
+
+/*
+ * Counters for allocation statistics.
+ */
+
+/* Times two active records want to occupy the same spot */
+unsigned int z_alloc_collisions = 0;
+unsigned int z_trace_collisions = 0;
+
+/* Times a new record lands on a spot previously occupied by a freed allocation */
+unsigned int z_alloc_overwrites = 0;
+unsigned int z_trace_overwrites = 0;
+
+/* Times a new alloc or trace is put into the hash table */
+unsigned int z_alloc_recorded = 0;
+unsigned int z_trace_recorded = 0;
+
+/* Times zleak_log returned false due to not being able to acquire the lock */
+unsigned int z_total_conflicts = 0;
+
+
+#pragma mark struct zallocation
+/*
+ * Structure for keeping track of an allocation
+ * An allocation bucket is in use if its element is not NULL
+ */
+struct zallocation {
+ uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
+ vm_size_t za_size; /* how much memory did this allocation take up? */
+ uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */
+ /* TODO: #if this out */
+ uint32_t za_hit_count; /* for determining effectiveness of hash function */
+};
+
+/* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
+#define ZLEAK_ALLOCATION_MAP_NUM 16384
+#define ZLEAK_TRACE_MAP_NUM 8192
+
+uint32_t zleak_alloc_buckets = ZLEAK_ALLOCATION_MAP_NUM;
+uint32_t zleak_trace_buckets = ZLEAK_TRACE_MAP_NUM;
+
+vm_size_t zleak_max_zonemap_size;
+
+/* Hashmaps of allocations and their corresponding traces */
+static struct zallocation* zallocations;
+static struct ztrace* ztraces;
+
+/* not static so that panic can see this, see kern/debug.c */
+struct ztrace* top_ztrace;
+
+/* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
+static lck_mtx_t zleak_lock;
+static lck_attr_t zleak_lock_attr;
+static lck_grp_t zleak_lock_grp;
+static lck_grp_attr_t zleak_lock_grp_attr;
+
+/*
+ * Initializes the zone leak monitor. Called from zone_init()
+ */
+static void
+zleak_init(vm_size_t max_zonemap_size)
+{
+ char scratch_buf[16];
+ boolean_t zleak_enable_flag = FALSE;
+
+ zleak_max_zonemap_size = max_zonemap_size;
+ zleak_global_tracking_threshold = max_zonemap_size / 2;
+ zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8;
+
+ /* -zleakoff (flag to disable zone leak monitor) */
+ if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) {
+ zleak_enable_flag = FALSE;
+ printf("zone leak detection disabled\n");
+ } else {
+ zleak_enable_flag = TRUE;
+ printf("zone leak detection enabled\n");
+ }
+
+ /* zfactor=XXXX (override how often to sample the zone allocator) */
+ if (PE_parse_boot_argn("zfactor", &z_sample_factor, sizeof(z_sample_factor))) {
+ printf("Zone leak factor override:%u\n", z_sample_factor);
+ }
+ /* zleak-allocs=XXXX (override number of buckets in zallocations) */
+ if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) {
+ printf("Zone leak alloc buckets override:%u\n", zleak_alloc_buckets);
+ /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
+ if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets-1))) {
+ printf("Override isn't a power of two, bad things might happen!");
+ }
+ }
+
+ /* zleak-traces=XXXX (override number of buckets in ztraces) */
+ if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) {
+ printf("Zone leak trace buckets override:%u\n", zleak_trace_buckets);
+ /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
+ if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets-1))) {
+ printf("Override isn't a power of two, bad things might happen!");
+ }
+ }
+
+ /* allocate the zleak_lock */
+ lck_grp_attr_setdefault(&zleak_lock_grp_attr);
+ lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr);
+ lck_attr_setdefault(&zleak_lock_attr);
+ lck_mtx_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr);
+
+ if (zleak_enable_flag) {
+ zleak_state = ZLEAK_STATE_ENABLED;
+ }
+}
+
+#if CONFIG_ZLEAKS
+
+/*
+ * Support for kern.zleak.active sysctl - a simplified
+ * simplified version of the zleak_state variable.
+ */
+int
+get_zleak_state(void)
+{
+ if (zleak_state & ZLEAK_STATE_FAILED)
+ return (-1);
+ if (zleak_state & ZLEAK_STATE_ACTIVE)
+ return (1);
+ return (0);
+}
+
+#endif
+
+
+kern_return_t
+zleak_activate(void)
+{
+ kern_return_t retval;
+ vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
+ vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
+ void *allocations_ptr = NULL;
+ void *traces_ptr = NULL;
+
+ /* Only one thread attempts to activate at a time */
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ return KERN_SUCCESS;
+ }
+
+ /* Indicate that we're doing the setup */
+ lck_mtx_lock_spin(&zleak_lock);
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ lck_mtx_unlock(&zleak_lock);
+ return KERN_SUCCESS;
+ }
+
+ zleak_state |= ZLEAK_STATE_ACTIVATING;
+ lck_mtx_unlock(&zleak_lock);
+
+ /* Allocate and zero tables */
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ bzero(allocations_ptr, z_alloc_size);
+ bzero(traces_ptr, z_trace_size);
+
+ /* Everything's set. Install tables, mark active. */
+ zallocations = allocations_ptr;
+ ztraces = traces_ptr;
+
+ /*
+ * Initialize the top_ztrace to the first entry in ztraces,
+ * so we don't have to check for null in zleak_log
+ */
+ top_ztrace = &ztraces[0];
+
+ /*
+ * Note that we do need a barrier between installing
+ * the tables and setting the active flag, because the zfree()
+ * path accesses the table without a lock if we're active.
+ */
+ lck_mtx_lock_spin(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_ACTIVE;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_mtx_unlock(&zleak_lock);
+
+ return 0;
+
+fail:
+ /*
+ * If we fail to allocate memory, don't further tax
+ * the system by trying again.
+ */
+ lck_mtx_lock_spin(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_FAILED;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_mtx_unlock(&zleak_lock);
+
+ if (allocations_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
+ }
+
+ if (traces_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
+ }
+
+ return retval;
+}
+
+/*
+ * TODO: What about allocations that never get deallocated,
+ * especially ones with unique backtraces? Should we wait to record
+ * until after boot has completed?
+ * (How many persistent zallocs are there?)
+ */
+
+/*
+ * This function records the allocation in the allocations table,
+ * and stores the associated backtrace in the traces table
+ * (or just increments the refcount if the trace is already recorded)
+ * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
+ * the associated trace's refcount is decremented.
+ * If the trace slot is in use, it returns.
+ * The refcount is incremented by the amount of memory the allocation consumes.
+ * The return value indicates whether to try again next time.
+ */
+static boolean_t
+zleak_log(uintptr_t* bt,
+ uintptr_t addr,
+ uint32_t depth,
+ vm_size_t allocation_size)
+{
+ /* Quit if there's someone else modifying the hash tables */
+ if (!lck_mtx_try_lock_spin(&zleak_lock)) {
+ z_total_conflicts++;
+ return FALSE;
+ }
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
+ struct ztrace* trace = &ztraces[trace_index];
+
+ allocation->za_hit_count++;
+ trace->zt_hit_count++;
+
+ /*
+ * If the allocation bucket we want to be in is occupied, and if the occupier
+ * has the same trace as us, just bail.
+ */
+ if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
+ z_alloc_collisions++;
+
+ lck_mtx_unlock(&zleak_lock);
+ return TRUE;
+ }
+
+ /* STEP 1: Store the backtrace in the traces array. */
+ /* A size of zero indicates that the trace bucket is free. */
+
+ if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) {
+ /*
+ * Different unique trace with same hash!
+ * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
+ * and get out of the way for later chances
+ */
+ trace->zt_collisions++;
+ z_trace_collisions++;
+
+ lck_mtx_unlock(&zleak_lock);
+ return TRUE;
+ } else if (trace->zt_size > 0) {
+ /* Same trace, already added, so increment refcount */
+ trace->zt_size += allocation_size;
+ } else {
+ /* Found an unused trace bucket, record the trace here! */
+ if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */
+ z_trace_overwrites++;
+
+ z_trace_recorded++;
+ trace->zt_size = allocation_size;
+ memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) );
+
+ trace->zt_depth = depth;
+ trace->zt_collisions = 0;
+ }
+
+ /* STEP 2: Store the allocation record in the allocations array. */
+
+ if (allocation->za_element != (uintptr_t) 0) {
+ /*
+ * Straight up replace any allocation record that was there. We don't want to do the work
+ * to preserve the allocation entries that were there, because we only record a subset of the
+ * allocations anyways.
+ */
+
+ z_alloc_collisions++;
+
+ struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
+ /* Knock off old allocation's size, not the new allocation */
+ associated_trace->zt_size -= allocation->za_size;
+ } else if (allocation->za_trace_index != 0) {
+ /* Slot previously used but not currently in use */
+ z_alloc_overwrites++;
+ }
+
+ allocation->za_element = addr;
+ allocation->za_trace_index = trace_index;
+ allocation->za_size = allocation_size;
+
+ z_alloc_recorded++;
+
+ if (top_ztrace->zt_size < trace->zt_size)
+ top_ztrace = trace;
+
+ lck_mtx_unlock(&zleak_lock);
+ return TRUE;
+}
+
+/*
+ * Free the allocation record and release the stacktrace.
+ * This should be as fast as possible because it will be called for every free.
+ */
+static void
+zleak_free(uintptr_t addr,
+ vm_size_t allocation_size)
+{
+ if (addr == (uintptr_t) 0)
+ return;
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ /* Double-checked locking: check to find out if we're interested, lock, check to make
+ * sure it hasn't changed, then modify it, and release the lock.
+ */
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ /* if the allocation was the one, grab the lock, check again, then delete it */
+ lck_mtx_lock_spin(&zleak_lock);
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ struct ztrace *trace;
+
+ /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
+ if (allocation->za_size != allocation_size) {
+ panic("Freeing as size %lu memory that was allocated with size %lu\n",
+ (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
+ }
+
+ trace = &ztraces[allocation->za_trace_index];
+
+ /* size of 0 indicates trace bucket is unused */
+ if (trace->zt_size > 0) {
+ trace->zt_size -= allocation_size;
+ }
+
+ /* A NULL element means the allocation bucket is unused */
+ allocation->za_element = 0;
+ }
+ lck_mtx_unlock(&zleak_lock);
+ }
+}
+
+#endif /* CONFIG_ZLEAKS */
+
+/* These functions outside of CONFIG_ZLEAKS because they are also used in
+ * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
+ */
+
+/*
+ * This function captures a backtrace from the current stack and
+ * returns the number of frames captured, limited by max_frames.
+ * It's fast because it does no checking to make sure there isn't bad data.
+ * Since it's only called from threads that we're going to keep executing,
+ * if there's bad data we were going to die eventually.
+ * This seems to work for x86 and X86_64.
+ * ARMTODO: Test it on ARM, I think it will work but I can't test it. If it works, remove the ifdef.
+ * If this function is inlined, it doesn't record the frame of the function it's inside.
+ * (because there's no stack frame!)
+ */
+uint32_t
+fastbacktrace(uintptr_t* bt, uint32_t max_frames)
+{
+#if defined(__x86_64__) || defined(__i386__)
+ uintptr_t* frameptr = NULL, *frameptr_next = NULL;
+ uintptr_t retaddr = 0;
+ uint32_t frame_index = 0, frames = 0;
+ uintptr_t kstackb, kstackt;
+
+ kstackb = current_thread()->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+
+ while (frameptr != NULL && frame_index < max_frames ) {
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
+ /* That also means the return address is worthless, so don't record it */
+ if (frameptr_next == NULL)
+ break;
+ /* Verify thread stack bounds */
+ if (((uintptr_t)frameptr_next > kstackt) || ((uintptr_t)frameptr_next < kstackb))
+ break;
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+ /* Store it in the backtrace array */
+ bt[frame_index++] = retaddr;
+
+ frameptr = frameptr_next;
+ }
+
+ /* Save the number of frames captured for return value */
+ frames = frame_index;
+
+ /* Fill in the rest of the backtrace with zeros */
+ while (frame_index < max_frames)
+ bt[frame_index++] = 0;
+
+ return frames;
+#else
+ return OSBacktrace((void*)bt, max_frames);
+#endif
+}
+
+/* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
+uintptr_t
+hash_mix(uintptr_t x)
+{
+#ifndef __LP64__
+ x += ~(x << 15);
+ x ^= (x >> 10);
+ x += (x << 3 );
+ x ^= (x >> 6 );
+ x += ~(x << 11);
+ x ^= (x >> 16);
+#else
+ x += ~(x << 32);
+ x ^= (x >> 22);
+ x += ~(x << 13);
+ x ^= (x >> 8 );
+ x += (x << 3 );
+ x ^= (x >> 15);
+ x += ~(x << 27);
+ x ^= (x >> 31);
+#endif
+ return x;
+}
+
+uint32_t
+hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
+{
+
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ while (--depth) {
+ hash += bt[depth];
+ }
+
+ hash = hash_mix(hash) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/*
+ * TODO: Determine how well distributed this is
+ * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
+ */
+uint32_t
+hashaddr(uintptr_t pt, uint32_t max_size)
+{
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ hash = hash_mix(pt) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/* End of all leak-detection code */
+#pragma mark -
+
/*
* zinit initializes a new zone. The zone data structures themselves
* are stored in a zone, which is initially a static structure that
zone_t z;
if (zone_zone == ZONE_NULL) {
- if (zget_space(sizeof(struct zone), (vm_offset_t *)&z)
- != KERN_SUCCESS)
- return(ZONE_NULL);
+
+ z = (struct zone *)zdata;
+ zdata += sizeof(*z);
+ zdata_size -= sizeof(*z);
} else
z = (zone_t) zalloc(zone_zone);
if (z == ZONE_NULL)
alloc = PAGE_SIZE;
else
#endif
- { vm_size_t best, waste; unsigned int i;
+#if defined(__LP64__)
+ if (((alloc % size) != 0) || (alloc > PAGE_SIZE * 8))
+#endif
+ {
+ vm_size_t best, waste; unsigned int i;
best = PAGE_SIZE;
waste = best % size;
z->alloc_size = alloc;
z->zone_name = name;
z->count = 0;
+ z->sum_count = 0LL;
z->doing_alloc = FALSE;
z->doing_gc = FALSE;
z->exhaustible = FALSE;
z->expandable = TRUE;
z->waiting = FALSE;
z->async_pending = FALSE;
+ z->caller_acct = TRUE;
+ z->noencrypt = FALSE;
+ z->no_callout = FALSE;
+ z->async_prio_refill = FALSE;
+ z->prio_refill_watermark = 0;
+ z->zone_replenish_thread = NULL;
+#if CONFIG_ZLEAKS
+ z->num_allocs = 0;
+ z->num_frees = 0;
+ z->zleak_capture = 0;
+ z->zleak_on = FALSE;
+#endif /* CONFIG_ZLEAKS */
#if ZONE_DEBUG
z->active_zones.next = z->active_zones.prev = NULL;
/*
* Add the zone to the all-zones list.
+ * If we are tracking zone info per task, and we have
+ * already used all the available stat slots, then keep
+ * using the overflow zone slot.
*/
-
z->next_zone = ZONE_NULL;
thread_call_setup(&z->call_async_alloc, zalloc_async, z);
simple_lock(&all_zones_lock);
*last_zone = z;
last_zone = &z->next_zone;
+ z->index = num_zones;
+ if (zinfo_per_task) {
+ if (num_zones > ZONES_MAX)
+ z->index = ZONES_MAX;
+ }
num_zones++;
simple_unlock(&all_zones_lock);
return(z);
}
+unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated;
+
+static void zone_replenish_thread(zone_t);
+
+/* High priority VM privileged thread used to asynchronously refill a designated
+ * zone, such as the reserved VM map entry zone.
+ */
+static void zone_replenish_thread(zone_t z) {
+ vm_size_t free_size;
+ current_thread()->options |= TH_OPT_VMPRIV;
+
+ for (;;) {
+ lock_zone(z);
+ assert(z->prio_refill_watermark != 0);
+ while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
+ assert(z->doing_alloc == FALSE);
+ assert(z->async_prio_refill == TRUE);
+
+ unlock_zone(z);
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+ vm_offset_t space, alloc_size;
+ kern_return_t kr;
+
+ if (vm_pool_low())
+ alloc_size = round_page(z->elem_size);
+ else
+ alloc_size = z->alloc_size;
+
+ if (z->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
+
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else if (kr == KERN_RESOURCE_SHORTAGE) {
+ VM_PAGE_WAIT();
+ } else if (kr == KERN_NO_SPACE) {
+ kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags);
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else {
+ assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+ }
+
+ lock_zone(z);
+ zone_replenish_loops++;
+ }
+
+ unlock_zone(z);
+ assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
+ thread_block(THREAD_CONTINUE_NULL);
+ zone_replenish_wakeups++;
+ }
+}
+
+void
+zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
+ z->prio_refill_watermark = low_water_mark;
+
+ z->async_prio_refill = TRUE;
+ OSMemoryBarrier();
+ kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
+
+ if (tres != KERN_SUCCESS) {
+ panic("zone_prio_refill_configure, thread create: 0x%x", tres);
+ }
+
+ thread_deallocate(z->zone_replenish_thread);
+}
/*
* Cram the given memory into the specified zone.
*/
void
zcram(
- register zone_t zone,
- void *newaddr,
+ zone_t zone,
+ vm_offset_t newmem,
vm_size_t size)
{
- register vm_size_t elem_size;
- vm_offset_t newmem = (vm_offset_t) newaddr;
+ vm_size_t elem_size;
+ boolean_t from_zm = FALSE;
/* Basic sanity checks */
assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
elem_size = zone->elem_size;
+ if (from_zone_map(newmem, size))
+ from_zm = TRUE;
+
+ if (from_zm)
+ zone_page_init(newmem, size);
+
lock_zone(zone);
while (size >= elem_size) {
ADD_TO_ZONE(zone, newmem);
- if (from_zone_map(newmem, elem_size))
+ if (from_zm)
zone_page_alloc(newmem, elem_size);
zone->count++; /* compensate for ADD_TO_ZONE */
size -= elem_size;
unlock_zone(zone);
}
-/*
- * Contiguous space allocator for non-paged zones. Allocates "size" amount
- * of memory from zone_map.
- */
-
-kern_return_t
-zget_space(
- vm_offset_t size,
- vm_offset_t *result)
-{
- vm_offset_t new_space = 0;
- vm_size_t space_to_add = 0;
-
- simple_lock(&zget_space_lock);
- while ((zalloc_next_space + size) > zalloc_end_of_space) {
- /*
- * Add at least one page to allocation area.
- */
-
- space_to_add = round_page(size);
-
- if (new_space == 0) {
- kern_return_t retval;
- /*
- * Memory cannot be wired down while holding
- * any locks that the pageout daemon might
- * need to free up pages. [Making the zget_space
- * lock a complex lock does not help in this
- * regard.]
- *
- * Unlock and allocate memory. Because several
- * threads might try to do this at once, don't
- * use the memory before checking for available
- * space again.
- */
-
- simple_unlock(&zget_space_lock);
-
- retval = kernel_memory_allocate(zone_map, &new_space,
- space_to_add, 0, KMA_KOBJECT|KMA_NOPAGEWAIT);
- if (retval != KERN_SUCCESS)
- return(retval);
-#if ZONE_ALIAS_ADDR
- if (space_to_add == PAGE_SIZE)
- new_space = zone_alias_addr(new_space);
-#endif
- zone_page_init(new_space, space_to_add,
- ZONE_PAGE_USED);
- simple_lock(&zget_space_lock);
- continue;
- }
-
-
- /*
- * Memory was allocated in a previous iteration.
- *
- * Check whether the new region is contiguous
- * with the old one.
- */
-
- if (new_space != zalloc_end_of_space) {
- /*
- * Throw away the remainder of the
- * old space, and start a new one.
- */
- zalloc_wasted_space +=
- zalloc_end_of_space - zalloc_next_space;
- zalloc_next_space = new_space;
- }
-
- zalloc_end_of_space = new_space + space_to_add;
-
- new_space = 0;
- }
- *result = zalloc_next_space;
- zalloc_next_space += size;
- simple_unlock(&zget_space_lock);
-
- if (new_space != 0)
- kmem_free(zone_map, new_space, space_to_add);
-
- return(KERN_SUCCESS);
-}
-
/*
* Steal memory for the zone package. Called from
void
zone_steal_memory(void)
{
- zdata_size = round_page(128*sizeof(struct zone));
- zdata = (vm_offset_t)((char *)pmap_steal_memory(zdata_size) - (char *)0);
+ /* Request enough early memory to get to the pmap zone */
+ zdata_size = 12 * sizeof(struct zone);
+ zdata = (vm_offset_t)pmap_steal_memory(round_page(zdata_size));
}
return 0;
zone_change(zone, Z_FOREIGN, TRUE);
- zcram(zone, (void *)memory, size);
+ zcram(zone, memory, size);
nalloc = (int)(size / zone->elem_size);
assert(nalloc >= nelem);
void
zone_bootstrap(void)
{
- vm_size_t zone_zone_size;
- vm_offset_t zone_zone_space;
char temp_buf[16];
+#if 6094439
+ /* enable zone checks by default, to try and catch offenders... */
+#if 0
+ /* 7968354: turn "-zc" back off */
+ check_freed_element = TRUE;
+ /* 7995202: turn "-zp" back off */
+ zfree_clear = TRUE;
+#endif
+
+ /* ... but allow them to be turned off explicitely */
+ if (PE_parse_boot_argn("-no_zc", temp_buf, sizeof (temp_buf))) {
+ check_freed_element = FALSE;
+ }
+ if (PE_parse_boot_argn("-no_zp", temp_buf, sizeof (temp_buf))) {
+ zfree_clear = FALSE;
+ }
+#endif
+
/* see if we want freed zone element checking and/or poisoning */
if (PE_parse_boot_argn("-zc", temp_buf, sizeof (temp_buf))) {
check_freed_element = TRUE;
zfree_clear = TRUE;
}
+ if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof (temp_buf))) {
+ zinfo_per_task = TRUE;
+ }
+
/*
* Check for and set up zone leak detection if requested via boot-args. We recognized two
* boot-args:
last_zone = &first_zone;
num_zones = 0;
- simple_lock_init(&zget_space_lock, 0);
- zalloc_next_space = zdata;
- zalloc_end_of_space = zdata + zdata_size;
- zalloc_wasted_space = 0;
-
/* assertion: nobody else called zinit before us */
assert(zone_zone == ZONE_NULL);
zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
sizeof(struct zone), "zones");
zone_change(zone_zone, Z_COLLECT, FALSE);
- zone_zone_size = zalloc_end_of_space - zalloc_next_space;
- zget_space(zone_zone_size, &zone_zone_space);
- zcram(zone_zone, (void *)zone_zone_space, zone_zone_size);
+ zone_change(zone_zone, Z_CALLERACCT, FALSE);
+ zone_change(zone_zone, Z_NOENCRYPT, TRUE);
+
+ zcram(zone_zone, zdata, zdata_size);
+
+ /* initialize fake zones and zone info if tracking by task */
+ if (zinfo_per_task) {
+ vm_size_t zisize = sizeof(zinfo_usage_store_t) * ZINFO_SLOTS;
+ unsigned int i;
+
+ for (i = 0; i < num_fake_zones; i++)
+ fake_zones[i].init(ZINFO_SLOTS - num_fake_zones + i);
+ zinfo_zone = zinit(zisize, zisize * CONFIG_TASK_MAX,
+ zisize, "per task zinfo");
+ zone_change(zinfo_zone, Z_CALLERACCT, FALSE);
+ }
}
+void
+zinfo_task_init(task_t task)
+{
+ if (zinfo_per_task) {
+ task->tkm_zinfo = zalloc(zinfo_zone);
+ memset(task->tkm_zinfo, 0, sizeof(zinfo_usage_store_t) * ZINFO_SLOTS);
+ } else {
+ task->tkm_zinfo = NULL;
+ }
+}
+
+void
+zinfo_task_free(task_t task)
+{
+ assert(task != kernel_task);
+ if (task->tkm_zinfo != NULL) {
+ zfree(zinfo_zone, task->tkm_zinfo);
+ task->tkm_zinfo = NULL;
+ }
+}
+
void
zone_init(
vm_size_t max_zonemap_size)
kern_return_t retval;
vm_offset_t zone_min;
vm_offset_t zone_max;
- vm_size_t zone_table_size;
retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
/*
* Setup garbage collection information:
*/
- zone_table_size = atop_kernel(zone_max - zone_min) *
- sizeof(struct zone_page_table_entry);
- if (kmem_alloc_kobject(zone_map, (vm_offset_t *) &zone_page_table,
- zone_table_size) != KERN_SUCCESS)
- panic("zone_init");
- zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size);
- zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
zone_map_min_address = zone_min;
zone_map_max_address = zone_max;
+
+ zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
+ zone_page_table_used_size = sizeof(zone_page_table);
+
+ zone_page_table_second_level_size = 1;
+ zone_page_table_second_level_shift_amount = 0;
+
+ /*
+ * Find the power of 2 for the second level that allows
+ * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
+ * slots.
+ */
+ while ((zone_page_table_first_level_slot(zone_pages-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE) {
+ zone_page_table_second_level_size <<= 1;
+ zone_page_table_second_level_shift_amount++;
+ }
lck_grp_attr_setdefault(&zone_lck_grp_attr);
lck_grp_init(&zone_lck_grp, "zones", &zone_lck_grp_attr);
lck_attr_setdefault(&zone_lck_attr);
lck_mtx_init_ext(&zone_gc_lock, &zone_lck_ext, &zone_lck_grp, &zone_lck_attr);
- zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED);
+#if CONFIG_ZLEAKS
+ /*
+ * Initialize the zone leak monitor
+ */
+ zleak_init(max_zonemap_size);
+#endif /* CONFIG_ZLEAKS */
+}
+
+void
+zone_page_table_expand(zone_page_index_t pindex)
+{
+ unsigned int first_index;
+ struct zone_page_table_entry * volatile * first_level_ptr;
+
+ assert(pindex < zone_pages);
+
+ first_index = zone_page_table_first_level_slot(pindex);
+ first_level_ptr = &zone_page_table[first_index];
+
+ if (*first_level_ptr == NULL) {
+ /*
+ * We were able to verify the old first-level slot
+ * had NULL, so attempt to populate it.
+ */
+
+ vm_offset_t second_level_array = 0;
+ vm_size_t second_level_size = round_page(zone_page_table_second_level_size * sizeof(struct zone_page_table_entry));
+ zone_page_index_t i;
+ struct zone_page_table_entry *entry_array;
+
+ if (kmem_alloc_kobject(zone_map, &second_level_array,
+ second_level_size) != KERN_SUCCESS) {
+ panic("zone_page_table_expand");
+ }
+
+ /*
+ * zone_gc() may scan the "zone_page_table" directly,
+ * so make sure any slots have a valid unused state.
+ */
+ entry_array = (struct zone_page_table_entry *)second_level_array;
+ for (i=0; i < zone_page_table_second_level_size; i++) {
+ entry_array[i].alloc_count = ZONE_PAGE_UNUSED;
+ entry_array[i].collect_count = 0;
+ }
+
+ if (OSCompareAndSwapPtr(NULL, entry_array, first_level_ptr)) {
+ /* Old slot was NULL, replaced with expanded level */
+ OSAddAtomicLong(second_level_size, &zone_page_table_used_size);
+ } else {
+ /* Old slot was not NULL, someone else expanded first */
+ kmem_free(zone_map, second_level_array, second_level_size);
+ }
+ } else {
+ /* Old slot was not NULL, already been expanded */
+ }
+}
+
+struct zone_page_table_entry *
+zone_page_table_lookup(zone_page_index_t pindex)
+{
+ unsigned int first_index = zone_page_table_first_level_slot(pindex);
+ struct zone_page_table_entry *second_level = zone_page_table[first_index];
+
+ if (second_level) {
+ return &second_level[zone_page_table_second_level_slot(pindex)];
+ }
+
+ return NULL;
}
extern volatile SInt32 kfree_nop_count;
+#pragma mark -
+#pragma mark zalloc_canblock
+
/*
* zalloc returns an element from the specified zone.
*/
{
vm_offset_t addr;
kern_return_t retval;
- void *bt[MAX_DEPTH]; /* only used if zone logging is enabled */
+ uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
int numsaved = 0;
- int i;
+ int i;
+ boolean_t zone_replenish_wakeup = FALSE;
+
+#if CONFIG_ZLEAKS
+ uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
+#endif /* CONFIG_ZLEAKS */
assert(zone != ZONE_NULL);
+
+ lock_zone(zone);
/*
* If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
*/
-
+
if (DO_LOGGING(zone))
- numsaved = OSBacktrace(&bt[0], MAX_DEPTH);
-
- lock_zone(zone);
+ numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH);
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: capture a backtrace every z_sample_factor
+ * allocations in this zone.
+ */
+ if (zone->zleak_on && (zone->zleak_capture++ % z_sample_factor == 0)) {
+ zone->zleak_capture = 1;
+
+ /* Avoid backtracing twice if zone logging is on */
+ if (numsaved == 0 )
+ zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
+ else
+ zleak_tracedepth = numsaved;
+ }
+#endif /* CONFIG_ZLEAKS */
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- while ((addr == 0) && canblock && (zone->doing_gc)) {
- zone->waiting = TRUE;
- zone_sleep(zone);
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ if (zone->async_prio_refill &&
+ ((zone->cur_size - (zone->count * zone->elem_size)) < (zone->prio_refill_watermark * zone->elem_size))) {
+ zone_replenish_wakeup = TRUE;
+ zone_replenish_wakeups_initiated++;
}
while ((addr == 0) && canblock) {
*/
zone->waiting = TRUE;
zone_sleep(zone);
- }
- else {
+ } else if (zone->doing_gc) {
+ /* zone_gc() is running. Since we need an element
+ * from the free list that is currently being
+ * collected, set the waiting bit and try to
+ * interrupt the GC process, and try again
+ * when we obtain the lock.
+ */
+ zone->waiting = TRUE;
+ zone_sleep(zone);
+ } else {
+ vm_offset_t space;
+ vm_size_t alloc_size;
+ int retry = 0;
+
if ((zone->cur_size + zone->elem_size) >
zone->max_size) {
if (zone->exhaustible)
zone->doing_alloc = TRUE;
unlock_zone(zone);
- if (zone->collectable) {
- vm_offset_t space;
- vm_size_t alloc_size;
- int retry = 0;
-
- for (;;) {
-
- if (vm_pool_low() || retry >= 1)
- alloc_size =
- round_page(zone->elem_size);
- else
- alloc_size = zone->alloc_size;
-
- retval = kernel_memory_allocate(zone_map,
- &space, alloc_size, 0,
- KMA_KOBJECT|KMA_NOPAGEWAIT);
- if (retval == KERN_SUCCESS) {
+ for (;;) {
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+
+ if (vm_pool_low() || retry >= 1)
+ alloc_size =
+ round_page(zone->elem_size);
+ else
+ alloc_size = zone->alloc_size;
+
+ if (zone->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
+ if (retval == KERN_SUCCESS) {
#if ZONE_ALIAS_ADDR
- if (alloc_size == PAGE_SIZE)
- space = zone_alias_addr(space);
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
#endif
- zone_page_init(space, alloc_size,
- ZONE_PAGE_USED);
- zcram(zone, (void *)space, alloc_size);
-
- break;
- } else if (retval != KERN_RESOURCE_SHORTAGE) {
- retry++;
-
- if (retry == 2) {
- zone_gc();
- printf("zalloc did gc\n");
- zone_display_zprint();
- }
- if (retry == 3) {
- panic_include_zprint = TRUE;
- panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
- }
- } else {
- break;
- }
- }
- lock_zone(zone);
- zone->doing_alloc = FALSE;
- if (zone->waiting) {
- zone->waiting = FALSE;
- zone_wakeup(zone);
- }
- REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
- if (addr == 0 &&
- retval == KERN_RESOURCE_SHORTAGE) {
- unlock_zone(zone);
- VM_PAGE_WAIT();
- lock_zone(zone);
- }
- } else {
- vm_offset_t space;
- retval = zget_space(zone->elem_size, &space);
-
- lock_zone(zone);
- zone->doing_alloc = FALSE;
- if (zone->waiting) {
- zone->waiting = FALSE;
- thread_wakeup((event_t)zone);
- }
- if (retval == KERN_SUCCESS) {
- zone->count++;
- zone->cur_size += zone->elem_size;
-#if ZONE_DEBUG
- if (zone_debug_enabled(zone)) {
- enqueue_tail(&zone->active_zones, (queue_entry_t)space);
+#if CONFIG_ZLEAKS
+ if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
+ if (zone_map->size >= zleak_global_tracking_threshold) {
+ kern_return_t kr;
+
+ kr = zleak_activate();
+ if (kr != KERN_SUCCESS) {
+ printf("Failed to activate live zone leak debugging (%d).\n", kr);
+ }
+ }
}
-#endif
- unlock_zone(zone);
- zone_page_alloc(space, zone->elem_size);
-#if ZONE_DEBUG
- if (zone_debug_enabled(zone))
- space += ZONE_DEBUG_OFFSET;
-#endif
- addr = space;
- goto success;
- }
- if (retval == KERN_RESOURCE_SHORTAGE) {
- unlock_zone(zone);
- VM_PAGE_WAIT();
- lock_zone(zone);
+ if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
+ if (zone->cur_size > zleak_per_zone_tracking_threshold) {
+ zone->zleak_on = TRUE;
+ }
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ zcram(zone, space, alloc_size);
+
+ break;
+ } else if (retval != KERN_RESOURCE_SHORTAGE) {
+ retry++;
+
+ if (retry == 2) {
+ zone_gc();
+ printf("zalloc did gc\n");
+ zone_display_zprint();
+ }
+ if (retry == 3) {
+ panic_include_zprint = TRUE;
+#if CONFIG_ZLEAKS
+ if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
+ panic_include_ztrace = TRUE;
+ }
+#endif /* CONFIG_ZLEAKS */
+ /* TODO: Change this to something more descriptive, perhaps
+ * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
+ */
+ panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
+ }
} else {
- panic("zalloc: \"%s\" (%d elements) zget_space returned %d", zone->zone_name, zone->count, retval);
+ break;
}
}
+ lock_zone(zone);
+ zone->doing_alloc = FALSE;
+ if (zone->waiting) {
+ zone->waiting = FALSE;
+ zone_wakeup(zone);
+ }
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ if (addr == 0 &&
+ retval == KERN_RESOURCE_SHORTAGE) {
+ unlock_zone(zone);
+
+ VM_PAGE_WAIT();
+ lock_zone(zone);
+ }
}
if (addr == 0)
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
}
+#if CONFIG_ZLEAKS
+ /* Zone leak detection:
+ * If we're sampling this allocation, add it to the zleaks hash table.
+ */
+ if (addr && zleak_tracedepth > 0) {
+ /* Sampling can fail if another sample is happening at the same time in a different zone. */
+ if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
+ /* If it failed, roll back the counter so we sample the next allocation instead. */
+ zone->zleak_capture = z_sample_factor;
+ }
+ }
+#endif /* CONFIG_ZLEAKS */
+
+
/*
* See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
* suspected, so this code rarely executes. We need to do this code while still holding the zone lock
zrecords[zcurrent].z_opcode = ZOP_ALLOC;
for (i = 0; i < numsaved; i++)
- zrecords[zcurrent].z_pc[i] = bt[i];
+ zrecords[zcurrent].z_pc[i] = (void*) zbt[i];
- for (; i < MAX_DEPTH; i++)
+ for (; i < MAX_ZTRACE_DEPTH; i++)
zrecords[zcurrent].z_pc[i] = 0;
zcurrent++;
zcurrent = 0;
}
- if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
+ if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
zone->async_pending = TRUE;
unlock_zone(zone);
thread_call_enter(&zone->call_async_alloc);
addr += ZONE_DEBUG_OFFSET;
}
#endif
+
+#if CONFIG_ZLEAKS
+ if (addr != 0) {
+ zone->num_allocs++;
+ }
+#endif /* CONFIG_ZLEAKS */
unlock_zone(zone);
-success:
+ if (zone_replenish_wakeup)
+ thread_wakeup(&zone->zone_replenish_thread);
+
TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
+ if (addr) {
+ thread_t thr = current_thread();
+ task_t task;
+ zinfo_usage_t zinfo;
+
+ if (zone->caller_acct)
+ thr->tkm_private.alloc += zone->elem_size;
+ else
+ thr->tkm_shared.alloc += zone->elem_size;
+
+ if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(zone->elem_size, (int64_t *)&zinfo[zone->index].alloc);
+ }
return((void *)addr);
}
*
* This form should be used when you can not block (like when
* processing an interrupt).
+ *
+ * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
+ * friend vm_page_more_fictitious can block, so it doesn't seem like
+ * this is used for interrupts any more....
*/
void *
zget(
register zone_t zone)
{
register vm_offset_t addr;
+
+#if CONFIG_ZLEAKS
+ uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
+ uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
+#endif /* CONFIG_ZLEAKS */
assert( zone != ZONE_NULL );
if (!lock_try_zone(zone))
return NULL;
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: capture a backtrace
+ */
+ if (zone->zleak_on && (zone->zleak_capture++ % z_sample_factor == 0)) {
+ zone->zleak_capture = 1;
+ zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
+ }
+#endif /* CONFIG_ZLEAKS */
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
#if ZONE_DEBUG
addr += ZONE_DEBUG_OFFSET;
}
#endif /* ZONE_DEBUG */
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: record the allocation
+ */
+ if (zone->zleak_on && zleak_tracedepth > 0 && addr) {
+ /* Sampling can fail if another sample is happening at the same time in a different zone. */
+ if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
+ /* If it failed, roll back the counter so we sample the next allocation instead. */
+ zone->zleak_capture = z_sample_factor;
+ }
+ }
+
+ if (addr != 0) {
+ zone->num_allocs++;
+ }
+#endif /* CONFIG_ZLEAKS */
+
unlock_zone(zone);
return((void *) addr);
void *addr)
{
vm_offset_t elem = (vm_offset_t) addr;
- void *bt[MAX_DEPTH]; /* only used if zone logging is enable via boot-args */
+ void *zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
int numsaved = 0;
assert(zone != ZONE_NULL);
*/
if (DO_LOGGING(zone))
- numsaved = OSBacktrace(&bt[0], MAX_DEPTH);
+ numsaved = OSBacktrace(&zbt[0], MAX_ZTRACE_DEPTH);
#if MACH_ASSERT
/* Basic sanity checks */
zrecords[zcurrent].z_opcode = ZOP_FREE;
for (i = 0; i < numsaved; i++)
- zrecords[zcurrent].z_pc[i] = bt[i];
+ zrecords[zcurrent].z_pc[i] = zbt[i];
- for (; i < MAX_DEPTH; i++)
+ for (; i < MAX_ZTRACE_DEPTH; i++)
zrecords[zcurrent].z_pc[i] = 0;
zcurrent++;
if (elem != (vm_offset_t)tmp_elem)
panic("zfree()ing element from wrong zone");
}
- remqueue(&zone->active_zones, (queue_t) elem);
+ remqueue((queue_t) elem);
}
#endif /* ZONE_DEBUG */
if (zone_check) {
if (zone->count < 0)
panic("zfree: count < 0!");
#endif
+
+#if CONFIG_ZLEAKS
+ zone->num_frees++;
+
+ /*
+ * Zone leak detection: un-track the allocation
+ */
+ if (zone->zleak_on) {
+ zleak_free(elem, zone->elem_size);
+ }
+#endif /* CONFIG_ZLEAKS */
+
/*
* If elements have one or more pages, and memory is low,
* request to run the garbage collection in the zone the next
zone_gc_forced = TRUE;
}
unlock_zone(zone);
+
+ {
+ thread_t thr = current_thread();
+ task_t task;
+ zinfo_usage_t zinfo;
+
+ if (zone->caller_acct)
+ thr->tkm_private.free += zone->elem_size;
+ else
+ thr->tkm_shared.free += zone->elem_size;
+ if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(zone->elem_size,
+ (int64_t *)&zinfo[zone->index].free);
+ }
}
assert( value == TRUE || value == FALSE );
switch(item){
+ case Z_NOENCRYPT:
+ zone->noencrypt = value;
+ break;
case Z_EXHAUST:
zone->exhaustible = value;
break;
case Z_FOREIGN:
zone->allows_foreign = value;
break;
+ case Z_CALLERACCT:
+ zone->caller_acct = value;
+ break;
+ case Z_NOCALLOUT:
+ zone->no_callout = value;
+ break;
#if MACH_ASSERT
default:
panic("Zone_change: Wrong Item Type!");
if (size != 0) {
if (kmem_alloc_kobject(zone_map, &addr, size) != KERN_SUCCESS)
panic("zprealloc");
- zone_page_init(addr, size, ZONE_PAGE_USED);
- zcram(zone, (void *)addr, size);
+ zcram(zone, addr, size);
}
}
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_collectable");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++)
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
if (zp->collect_count == zp->alloc_count)
return (TRUE);
+ }
return (FALSE);
}
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_keep");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++)
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
zp->collect_count = 0;
+ }
}
void
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_collect");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
- for (zp = zone_page_table + i; i <= j; zp++, i++)
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
++zp->collect_count;
+ }
}
void
zone_page_init(
vm_offset_t addr,
- vm_size_t size,
- int value)
+ vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_init");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ /* make sure entry exists before marking unused */
+ zone_page_table_expand(i);
- for (zp = zone_page_table + i; i <= j; zp++, i++) {
- zp->alloc_count = value;
+ zp = zone_page_table_lookup(i);
+ assert(zp);
+ zp->alloc_count = ZONE_PAGE_UNUSED;
zp->collect_count = 0;
}
}
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_alloc");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
+ assert(zp);
- for (zp = zone_page_table + i; i <= j; zp++, i++) {
/*
- * Set alloc_count to (ZONE_PAGE_USED + 1) if
+ * Set alloc_count to ZONE_PAGE_USED if
* it was previously set to ZONE_PAGE_UNUSED.
*/
if (zp->alloc_count == ZONE_PAGE_UNUSED)
- zp->alloc_count = 1;
- else
- ++zp->alloc_count;
+ zp->alloc_count = ZONE_PAGE_USED;
+
+ ++zp->alloc_count;
}
}
void
zone_page_free_element(
- struct zone_page_table_entry **free_pages,
+ zone_page_index_t *free_page_list,
vm_offset_t addr,
vm_size_t size)
{
struct zone_page_table_entry *zp;
- natural_t i, j;
+ zone_page_index_t i, j;
#if ZONE_ALIAS_ADDR
addr = zone_virtual_addr(addr);
panic("zone_page_free_element");
#endif
- i = (natural_t)atop_kernel(addr-zone_map_min_address);
- j = (natural_t)atop_kernel((addr+size-1) - zone_map_min_address);
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
- for (zp = zone_page_table + i; i <= j; zp++, i++) {
if (zp->collect_count > 0)
--zp->collect_count;
if (--zp->alloc_count == 0) {
+ vm_address_t free_page_address;
+
zp->alloc_count = ZONE_PAGE_UNUSED;
zp->collect_count = 0;
- zp->link = *free_pages;
- *free_pages = zp;
+
+ /*
+ * This element was the last one on this page, re-use the page's
+ * storage for a page freelist
+ */
+ free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)i);
+ *(zone_page_index_t *)free_page_address = *free_page_list;
+ *free_page_list = i;
}
}
}
unsigned int max_zones;
zone_t z;
unsigned int i;
- struct zone_page_table_entry *zp, *zone_free_pages;
+ zone_page_index_t zone_free_page_head;
lck_mtx_lock(&zone_gc_lock);
simple_unlock(&all_zones_lock);
#if MACH_ASSERT
- for (i = 0; i < zone_pages; i++)
- assert(zone_page_table[i].collect_count == 0);
+ for (i = 0; i < zone_pages; i++) {
+ struct zone_page_table_entry *zp;
+
+ zp = zone_page_table_lookup(i);
+ assert(!zp || (zp->collect_count == 0));
+ }
#endif /* MACH_ASSERT */
- zone_free_pages = NULL;
+ zone_free_page_head = ZONE_PAGE_INDEX_INVALID;
for (i = 0; i < max_zones; i++, z = z->next_zone) {
unsigned int n, m;
if (++n >= 50) {
if (z->waiting == TRUE) {
+ /* z->waiting checked without lock held, rechecked below after locking */
lock_zone(z);
if (keep != NULL) {
ADD_LIST_TO_ZONE(z, keep, tail);
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
+
unlock_zone(z);
}
n = 0; tail = keep = NULL;
while (elt != NULL) {
if (zone_page_collectable((vm_offset_t)elt, elt_size)) {
+ struct zone_free_element *next_elt = elt->next;
+
size_freed += elt_size;
- zone_page_free_element(&zone_free_pages,
+
+ /*
+ * If this is the last allocation on the page(s),
+ * we may use their storage to maintain the linked
+ * list of free-able pages. So store elt->next because
+ * "elt" may be scribbled over.
+ */
+ zone_page_free_element(&zone_free_page_head,
(vm_offset_t)elt, elt_size);
- elt = elt->next;
+ elt = next_elt;
++zgc_stats.elems_freed;
}
* Reclaim the pages we are freeing.
*/
- while ((zp = zone_free_pages) != NULL) {
- zone_free_pages = zp->link;
+ while (zone_free_page_head != ZONE_PAGE_INDEX_INVALID) {
+ zone_page_index_t zind = zone_free_page_head;
+ vm_address_t free_page_address;
#if ZONE_ALIAS_ADDR
- z = zone_virtual_addr((vm_map_address_t)z);
+ z = (zone_t)zone_virtual_addr((vm_map_address_t)z);
#endif
- kmem_free(zone_map, zone_map_min_address + PAGE_SIZE *
- (zp - zone_page_table), PAGE_SIZE);
+ /* Use the first word of the page about to be freed to find the next free page */
+ free_page_address = zone_map_min_address + PAGE_SIZE * ((vm_size_t)zind);
+ zone_free_page_head = *(zone_page_index_t *)free_page_address;
+
+ kmem_free(zone_map, free_page_address, PAGE_SIZE);
++zgc_stats.pgs_freed;
}
void
consider_zone_gc(boolean_t force)
{
- /*
- * By default, don't attempt zone GC more frequently
- * than once / 1 minutes.
- */
-
- if (zone_gc_max_rate == 0)
- zone_gc_max_rate = (60 << SCHED_TICK_SHIFT) + 1;
if (zone_gc_allowed &&
- ((sched_tick > (zone_gc_last_tick + zone_gc_max_rate)) ||
+ (zone_gc_allowed_by_time_throttle ||
zone_gc_forced ||
force)) {
zone_gc_forced = FALSE;
- zone_gc_last_tick = sched_tick;
+ zone_gc_allowed_by_time_throttle = FALSE; /* reset periodically */
zone_gc();
}
}
-struct fake_zone_info {
- const char* name;
- void (*func)(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
- int *, int *);
-};
+/*
+ * By default, don't attempt zone GC more frequently
+ * than once / 1 minutes.
+ */
+void
+compute_zone_gc_throttle(void *arg __unused)
+{
+ zone_gc_allowed_by_time_throttle = TRUE;
+}
-static struct fake_zone_info fake_zones[] = {
- {
- .name = "kernel_stacks",
- .func = stack_fake_zone_info,
- },
-#ifdef ppc
- {
- .name = "save_areas",
- .func = save_fake_zone_info,
- },
- {
- .name = "pmap_mappings",
- .func = mapping_fake_zone_info,
- },
-#endif /* ppc */
-#if defined(__i386__) || defined (__x86_64__)
- {
- .name = "page_tables",
- .func = pt_fake_zone_info,
- },
-#endif /* i386 */
- {
- .name = "kalloc.large",
- .func = kalloc_fake_zone_info,
- },
-};
+kern_return_t
+task_zone_info(
+ task_t task,
+ mach_zone_name_array_t *namesp,
+ mach_msg_type_number_t *namesCntp,
+ task_zone_info_array_t *infop,
+ mach_msg_type_number_t *infoCntp)
+{
+ mach_zone_name_t *names;
+ vm_offset_t names_addr;
+ vm_size_t names_size;
+ task_zone_info_t *info;
+ vm_offset_t info_addr;
+ vm_size_t info_size;
+ unsigned int max_zones, i;
+ zone_t z;
+ mach_zone_name_t *zn;
+ task_zone_info_t *zi;
+ kern_return_t kr;
+
+ vm_size_t used;
+ vm_map_copy_t copy;
+
+
+ if (task == TASK_NULL)
+ return KERN_INVALID_TASK;
+
+ /*
+ * We assume that zones aren't freed once allocated.
+ * We won't pick up any zones that are allocated later.
+ */
+
+ simple_lock(&all_zones_lock);
+ max_zones = (unsigned int)(num_zones + num_fake_zones);
+ z = first_zone;
+ simple_unlock(&all_zones_lock);
+
+ names_size = round_page(max_zones * sizeof *names);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &names_addr, names_size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ names = (mach_zone_name_t *) names_addr;
+
+ info_size = round_page(max_zones * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &info_addr, info_size);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map,
+ names_addr, names_size);
+ return kr;
+ }
+
+ info = (task_zone_info_t *) info_addr;
+
+ zn = &names[0];
+ zi = &info[0];
+
+ for (i = 0; i < max_zones - num_fake_zones; i++) {
+ struct zone zcopy;
+
+ assert(z != ZONE_NULL);
+
+ lock_zone(z);
+ zcopy = *z;
+ unlock_zone(z);
+
+ simple_lock(&all_zones_lock);
+ z = z->next_zone;
+ simple_unlock(&all_zones_lock);
+
+ /* assuming here the name data is static */
+ (void) strncpy(zn->mzn_name, zcopy.zone_name,
+ sizeof zn->mzn_name);
+ zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
+
+ zi->tzi_count = (uint64_t)zcopy.count;
+ zi->tzi_cur_size = (uint64_t)zcopy.cur_size;
+ zi->tzi_max_size = (uint64_t)zcopy.max_size;
+ zi->tzi_elem_size = (uint64_t)zcopy.elem_size;
+ zi->tzi_alloc_size = (uint64_t)zcopy.alloc_size;
+ zi->tzi_sum_size = zcopy.sum_count * zcopy.elem_size;
+ zi->tzi_exhaustible = (uint64_t)zcopy.exhaustible;
+ zi->tzi_collectable = (uint64_t)zcopy.collectable;
+ zi->tzi_caller_acct = (uint64_t)zcopy.caller_acct;
+ if (task->tkm_zinfo != NULL) {
+ zi->tzi_task_alloc = task->tkm_zinfo[zcopy.index].alloc;
+ zi->tzi_task_free = task->tkm_zinfo[zcopy.index].free;
+ } else {
+ zi->tzi_task_alloc = 0;
+ zi->tzi_task_free = 0;
+ }
+ zn++;
+ zi++;
+ }
+
+ /*
+ * loop through the fake zones and fill them using the specialized
+ * functions
+ */
+ for (i = 0; i < num_fake_zones; i++) {
+ int count, collectable, exhaustible, caller_acct, index;
+ vm_size_t cur_size, max_size, elem_size, alloc_size;
+ uint64_t sum_size;
+
+ strncpy(zn->mzn_name, fake_zones[i].name, sizeof zn->mzn_name);
+ zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
+ fake_zones[i].query(&count, &cur_size,
+ &max_size, &elem_size,
+ &alloc_size, &sum_size,
+ &collectable, &exhaustible, &caller_acct);
+ zi->tzi_count = (uint64_t)count;
+ zi->tzi_cur_size = (uint64_t)cur_size;
+ zi->tzi_max_size = (uint64_t)max_size;
+ zi->tzi_elem_size = (uint64_t)elem_size;
+ zi->tzi_alloc_size = (uint64_t)alloc_size;
+ zi->tzi_sum_size = sum_size;
+ zi->tzi_collectable = (uint64_t)collectable;
+ zi->tzi_exhaustible = (uint64_t)exhaustible;
+ zi->tzi_caller_acct = (uint64_t)caller_acct;
+ if (task->tkm_zinfo != NULL) {
+ index = ZINFO_SLOTS - num_fake_zones + i;
+ zi->tzi_task_alloc = task->tkm_zinfo[index].alloc;
+ zi->tzi_task_free = task->tkm_zinfo[index].free;
+ } else {
+ zi->tzi_task_alloc = 0;
+ zi->tzi_task_free = 0;
+ }
+ zn++;
+ zi++;
+ }
+
+ used = max_zones * sizeof *names;
+ if (used != names_size)
+ bzero((char *) (names_addr + used), names_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
+ (vm_map_size_t)names_size, TRUE, ©);
+ assert(kr == KERN_SUCCESS);
+
+ *namesp = (mach_zone_name_t *) copy;
+ *namesCntp = max_zones;
+
+ used = max_zones * sizeof *info;
+
+ if (used != info_size)
+ bzero((char *) (info_addr + used), info_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
+ (vm_map_size_t)info_size, TRUE, ©);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (task_zone_info_t *) copy;
+ *infoCntp = max_zones;
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_zone_info(
+ host_t host,
+ mach_zone_name_array_t *namesp,
+ mach_msg_type_number_t *namesCntp,
+ mach_zone_info_array_t *infop,
+ mach_msg_type_number_t *infoCntp)
+{
+ mach_zone_name_t *names;
+ vm_offset_t names_addr;
+ vm_size_t names_size;
+ mach_zone_info_t *info;
+ vm_offset_t info_addr;
+ vm_size_t info_size;
+ unsigned int max_zones, i;
+ zone_t z;
+ mach_zone_name_t *zn;
+ mach_zone_info_t *zi;
+ kern_return_t kr;
+
+ vm_size_t used;
+ vm_map_copy_t copy;
+
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ num_fake_zones = sizeof fake_zones / sizeof fake_zones[0];
+
+ /*
+ * We assume that zones aren't freed once allocated.
+ * We won't pick up any zones that are allocated later.
+ */
+
+ simple_lock(&all_zones_lock);
+ max_zones = (unsigned int)(num_zones + num_fake_zones);
+ z = first_zone;
+ simple_unlock(&all_zones_lock);
+
+ names_size = round_page(max_zones * sizeof *names);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &names_addr, names_size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ names = (mach_zone_name_t *) names_addr;
+
+ info_size = round_page(max_zones * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &info_addr, info_size);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map,
+ names_addr, names_size);
+ return kr;
+ }
+
+ info = (mach_zone_info_t *) info_addr;
+
+ zn = &names[0];
+ zi = &info[0];
+
+ for (i = 0; i < max_zones - num_fake_zones; i++) {
+ struct zone zcopy;
+
+ assert(z != ZONE_NULL);
+
+ lock_zone(z);
+ zcopy = *z;
+ unlock_zone(z);
+
+ simple_lock(&all_zones_lock);
+ z = z->next_zone;
+ simple_unlock(&all_zones_lock);
+
+ /* assuming here the name data is static */
+ (void) strncpy(zn->mzn_name, zcopy.zone_name,
+ sizeof zn->mzn_name);
+ zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
+
+ zi->mzi_count = (uint64_t)zcopy.count;
+ zi->mzi_cur_size = (uint64_t)zcopy.cur_size;
+ zi->mzi_max_size = (uint64_t)zcopy.max_size;
+ zi->mzi_elem_size = (uint64_t)zcopy.elem_size;
+ zi->mzi_alloc_size = (uint64_t)zcopy.alloc_size;
+ zi->mzi_sum_size = zcopy.sum_count * zcopy.elem_size;
+ zi->mzi_exhaustible = (uint64_t)zcopy.exhaustible;
+ zi->mzi_collectable = (uint64_t)zcopy.collectable;
+ zn++;
+ zi++;
+ }
+
+ /*
+ * loop through the fake zones and fill them using the specialized
+ * functions
+ */
+ for (i = 0; i < num_fake_zones; i++) {
+ int count, collectable, exhaustible, caller_acct;
+ vm_size_t cur_size, max_size, elem_size, alloc_size;
+ uint64_t sum_size;
+
+ strncpy(zn->mzn_name, fake_zones[i].name, sizeof zn->mzn_name);
+ zn->mzn_name[sizeof zn->mzn_name - 1] = '\0';
+ fake_zones[i].query(&count, &cur_size,
+ &max_size, &elem_size,
+ &alloc_size, &sum_size,
+ &collectable, &exhaustible, &caller_acct);
+ zi->mzi_count = (uint64_t)count;
+ zi->mzi_cur_size = (uint64_t)cur_size;
+ zi->mzi_max_size = (uint64_t)max_size;
+ zi->mzi_elem_size = (uint64_t)elem_size;
+ zi->mzi_alloc_size = (uint64_t)alloc_size;
+ zi->mzi_sum_size = sum_size;
+ zi->mzi_collectable = (uint64_t)collectable;
+ zi->mzi_exhaustible = (uint64_t)exhaustible;
+
+ zn++;
+ zi++;
+ }
+
+ used = max_zones * sizeof *names;
+ if (used != names_size)
+ bzero((char *) (names_addr + used), names_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
+ (vm_map_size_t)names_size, TRUE, ©);
+ assert(kr == KERN_SUCCESS);
+
+ *namesp = (mach_zone_name_t *) copy;
+ *namesCntp = max_zones;
+
+ used = max_zones * sizeof *info;
+
+ if (used != info_size)
+ bzero((char *) (info_addr + used), info_size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
+ (vm_map_size_t)info_size, TRUE, ©);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (mach_zone_info_t *) copy;
+ *infoCntp = max_zones;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * host_zone_info - LEGACY user interface for Mach zone information
+ * Should use mach_zone_info() instead!
+ */
kern_return_t
host_zone_info(
host_t host,
zone_name_t *zn;
zone_info_t *zi;
kern_return_t kr;
- size_t num_fake_zones;
+
+ vm_size_t used;
+ vm_map_copy_t copy;
if (host == HOST_NULL)
z = first_zone;
simple_unlock(&all_zones_lock);
- if (max_zones <= *namesCntp) {
- /* use in-line memory */
- names_size = *namesCntp * sizeof *names;
- names = *namesp;
- } else {
- names_size = round_page(max_zones * sizeof *names);
- kr = kmem_alloc_pageable(ipc_kernel_map,
- &names_addr, names_size);
- if (kr != KERN_SUCCESS)
- return kr;
- names = (zone_name_t *) names_addr;
- }
-
- if (max_zones <= *infoCntp) {
- /* use in-line memory */
- info_size = *infoCntp * sizeof *info;
- info = *infop;
- } else {
- info_size = round_page(max_zones * sizeof *info);
- kr = kmem_alloc_pageable(ipc_kernel_map,
- &info_addr, info_size);
- if (kr != KERN_SUCCESS) {
- if (names != *namesp)
- kmem_free(ipc_kernel_map,
- names_addr, names_size);
- return kr;
- }
-
- info = (zone_info_t *) info_addr;
+ names_size = round_page(max_zones * sizeof *names);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &names_addr, names_size);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ names = (zone_name_t *) names_addr;
+
+ info_size = round_page(max_zones * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map,
+ &info_addr, info_size);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map,
+ names_addr, names_size);
+ return kr;
}
+
+ info = (zone_info_t *) info_addr;
+
zn = &names[0];
zi = &info[0];
- for (i = 0; i < num_zones; i++) {
+ for (i = 0; i < max_zones - num_fake_zones; i++) {
struct zone zcopy;
assert(z != ZONE_NULL);
* functions
*/
for (i = 0; i < num_fake_zones; i++) {
+ int caller_acct;
+ uint64_t sum_space;
strncpy(zn->zn_name, fake_zones[i].name, sizeof zn->zn_name);
zn->zn_name[sizeof zn->zn_name - 1] = '\0';
- fake_zones[i].func(&zi->zi_count, &zi->zi_cur_size,
- &zi->zi_max_size, &zi->zi_elem_size,
- &zi->zi_alloc_size, &zi->zi_collectable,
- &zi->zi_exhaustible);
+ fake_zones[i].query(&zi->zi_count, &zi->zi_cur_size,
+ &zi->zi_max_size, &zi->zi_elem_size,
+ &zi->zi_alloc_size, &sum_space,
+ &zi->zi_collectable, &zi->zi_exhaustible, &caller_acct);
zn++;
zi++;
}
- if (names != *namesp) {
- vm_size_t used;
- vm_map_copy_t copy;
-
- used = max_zones * sizeof *names;
-
- if (used != names_size)
- bzero((char *) (names_addr + used), names_size - used);
+ used = max_zones * sizeof *names;
+ if (used != names_size)
+ bzero((char *) (names_addr + used), names_size - used);
- kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
- (vm_map_size_t)names_size, TRUE, ©);
- assert(kr == KERN_SUCCESS);
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
+ (vm_map_size_t)names_size, TRUE, ©);
+ assert(kr == KERN_SUCCESS);
- *namesp = (zone_name_t *) copy;
- }
+ *namesp = (zone_name_t *) copy;
*namesCntp = max_zones;
- if (info != *infop) {
- vm_size_t used;
- vm_map_copy_t copy;
-
- used = max_zones * sizeof *info;
+ used = max_zones * sizeof *info;
+ if (used != info_size)
+ bzero((char *) (info_addr + used), info_size - used);
- if (used != info_size)
- bzero((char *) (info_addr + used), info_size - used);
+ kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
+ (vm_map_size_t)info_size, TRUE, ©);
+ assert(kr == KERN_SUCCESS);
- kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
- (vm_map_size_t)info_size, TRUE, ©);
- assert(kr == KERN_SUCCESS);
-
- *infop = (zone_info_t *) copy;
- }
+ *infop = (zone_info_t *) copy;
*infoCntp = max_zones;
return KERN_SUCCESS;
}
extern unsigned int stack_total;
+extern unsigned long long stack_allocs;
#if defined(__i386__) || defined (__x86_64__)
extern unsigned int inuse_ptepages_count;
+extern long long alloc_ptepages_count;
#endif
void zone_display_zprint()
db_printf("C");
if (zcopy.expandable)
db_printf("X");
+ if (zcopy.caller_acct)
+ db_printf("A");
db_printf("\n");
}