+ kern_return_t kr;
+ vm_size_t size;
+ vm_offset_t memory;
+ int nalloc;
+
+ assert(nelem > 0);
+ if (nelem <= 0)
+ return 0;
+ size = nelem * zone->elem_size;
+ size = round_page(size);
+ kr = kmem_alloc_kobject(kernel_map, &memory, size);
+ if (kr != KERN_SUCCESS)
+ return 0;
+
+ zone_change(zone, Z_FOREIGN, TRUE);
+ zcram(zone, memory, size);
+ nalloc = (int)(size / zone->elem_size);
+ assert(nalloc >= nelem);
+
+ return nalloc;
+}
+
+/*
+ * Initialize the "zone of zones" which uses fixed memory allocated
+ * earlier in memory initialization. zone_bootstrap is called
+ * before zone_init.
+ */
+void
+zone_bootstrap(void)
+{
+ char temp_buf[16];
+
+ if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof(temp_buf))) {
+ zinfo_per_task = TRUE;
+ }
+
+ /* do we want corruption-style debugging with zlog? */
+ if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) {
+ corruption_debug_flag = TRUE;
+ }
+
+ /* Set up zone poisoning */
+
+ free_check_sample_factor = ZP_DEFAULT_SAMPLING_FACTOR;
+
+ /* support for old zone poisoning boot-args */
+ if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
+ free_check_sample_factor = 1;
+ }
+ if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
+ free_check_sample_factor = 0;
+ }
+
+ /* zp-factor=XXXX (override how often to poison freed zone elements) */
+ if (PE_parse_boot_argn("zp-factor", &free_check_sample_factor, sizeof(free_check_sample_factor))) {
+ printf("Zone poisoning factor override:%u\n", free_check_sample_factor);
+ }
+
+ /*
+ * Check for and set up zone leak detection if requested via boot-args. We recognized two
+ * boot-args:
+ *
+ * zlog=<zone_to_log>
+ * zrecs=<num_records_in_log>
+ *
+ * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
+ * control the size of the log. If zrecs is not specified, a default value is used.
+ */
+
+ if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
+ if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) {
+
+ /*
+ * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
+ * This prevents accidentally hogging too much kernel memory and making the system
+ * unusable.
+ */
+
+ log_records = MIN(ZRECORDS_MAX, log_records);
+
+ } else {
+ log_records = ZRECORDS_DEFAULT;
+ }
+ }
+
+ simple_lock_init(&all_zones_lock, 0);
+
+ first_zone = ZONE_NULL;
+ last_zone = &first_zone;
+ num_zones = 0;
+
+ /* assertion: nobody else called zinit before us */
+ assert(zone_zone == ZONE_NULL);
+ zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
+ sizeof(struct zone), "zones");
+ zone_change(zone_zone, Z_COLLECT, FALSE);
+ zone_change(zone_zone, Z_CALLERACCT, FALSE);
+ zone_change(zone_zone, Z_NOENCRYPT, TRUE);
+
+ zcram(zone_zone, zdata, zdata_size);
+
+ /* initialize fake zones and zone info if tracking by task */
+ if (zinfo_per_task) {
+ vm_size_t zisize = sizeof(zinfo_usage_store_t) * ZINFO_SLOTS;
+ unsigned int i;
+
+ for (i = 0; i < num_fake_zones; i++)
+ fake_zones[i].init(ZINFO_SLOTS - num_fake_zones + i);
+ zinfo_zone = zinit(zisize, zisize * CONFIG_TASK_MAX,
+ zisize, "per task zinfo");
+ zone_change(zinfo_zone, Z_CALLERACCT, FALSE);
+ }
+}
+
+void
+zinfo_task_init(task_t task)
+{
+ if (zinfo_per_task) {
+ task->tkm_zinfo = zalloc(zinfo_zone);
+ memset(task->tkm_zinfo, 0, sizeof(zinfo_usage_store_t) * ZINFO_SLOTS);
+ } else {
+ task->tkm_zinfo = NULL;
+ }
+}
+
+void
+zinfo_task_free(task_t task)
+{
+ assert(task != kernel_task);
+ if (task->tkm_zinfo != NULL) {
+ zfree(zinfo_zone, task->tkm_zinfo);
+ task->tkm_zinfo = NULL;
+ }
+}
+
+void
+zone_init(
+ vm_size_t max_zonemap_size)
+{
+ kern_return_t retval;
+ vm_offset_t zone_min;
+ vm_offset_t zone_max;
+
+ retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
+ FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT,
+ &zone_map);
+
+ if (retval != KERN_SUCCESS)
+ panic("zone_init: kmem_suballoc failed");
+ zone_max = zone_min + round_page(max_zonemap_size);
+#if CONFIG_GZALLOC
+ gzalloc_init(max_zonemap_size);
+#endif
+ /*
+ * Setup garbage collection information:
+ */
+ zone_map_min_address = zone_min;
+ zone_map_max_address = zone_max;
+
+ zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
+ zone_page_table_used_size = sizeof(zone_page_table);
+
+ zone_page_table_second_level_size = 1;
+ zone_page_table_second_level_shift_amount = 0;
+
+ /*
+ * Find the power of 2 for the second level that allows
+ * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
+ * slots.
+ */
+ while ((zone_page_table_first_level_slot(zone_pages-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE) {
+ zone_page_table_second_level_size <<= 1;
+ zone_page_table_second_level_shift_amount++;
+ }
+
+ lck_grp_attr_setdefault(&zone_lck_grp_attr);
+ lck_grp_init(&zone_lck_grp, "zones", &zone_lck_grp_attr);
+ lck_attr_setdefault(&zone_lck_attr);
+ lck_mtx_init_ext(&zone_gc_lock, &zone_lck_ext, &zone_lck_grp, &zone_lck_attr);
+
+#if CONFIG_ZLEAKS
+ /*
+ * Initialize the zone leak monitor
+ */
+ zleak_init(max_zonemap_size);
+#endif /* CONFIG_ZLEAKS */
+}
+
+void
+zone_page_table_expand(zone_page_index_t pindex)
+{
+ unsigned int first_index;
+ struct zone_page_table_entry * volatile * first_level_ptr;
+
+ assert(pindex < zone_pages);
+
+ first_index = zone_page_table_first_level_slot(pindex);
+ first_level_ptr = &zone_page_table[first_index];
+
+ if (*first_level_ptr == NULL) {
+ /*
+ * We were able to verify the old first-level slot
+ * had NULL, so attempt to populate it.
+ */
+
+ vm_offset_t second_level_array = 0;
+ vm_size_t second_level_size = round_page(zone_page_table_second_level_size * sizeof(struct zone_page_table_entry));
+ zone_page_index_t i;
+ struct zone_page_table_entry *entry_array;
+
+ if (kmem_alloc_kobject(zone_map, &second_level_array,
+ second_level_size) != KERN_SUCCESS) {
+ panic("zone_page_table_expand");
+ }
+
+ /*
+ * zone_gc() may scan the "zone_page_table" directly,
+ * so make sure any slots have a valid unused state.
+ */
+ entry_array = (struct zone_page_table_entry *)second_level_array;
+ for (i=0; i < zone_page_table_second_level_size; i++) {
+ entry_array[i].alloc_count = ZONE_PAGE_UNUSED;
+ entry_array[i].collect_count = 0;
+ }
+
+ if (OSCompareAndSwapPtr(NULL, entry_array, first_level_ptr)) {
+ /* Old slot was NULL, replaced with expanded level */
+ OSAddAtomicLong(second_level_size, &zone_page_table_used_size);
+ } else {
+ /* Old slot was not NULL, someone else expanded first */
+ kmem_free(zone_map, second_level_array, second_level_size);
+ }
+ } else {
+ /* Old slot was not NULL, already been expanded */
+ }
+}
+
+struct zone_page_table_entry *
+zone_page_table_lookup(zone_page_index_t pindex)
+{
+ unsigned int first_index = zone_page_table_first_level_slot(pindex);
+ struct zone_page_table_entry *second_level = zone_page_table[first_index];
+
+ if (second_level) {
+ return &second_level[zone_page_table_second_level_slot(pindex)];
+ }
+
+ return NULL;
+}
+
+extern volatile SInt32 kfree_nop_count;
+
+#pragma mark -
+#pragma mark zalloc_canblock
+
+/*
+ * zalloc returns an element from the specified zone.
+ */
+void *
+zalloc_canblock(
+ register zone_t zone,
+ boolean_t canblock)
+{
+ vm_offset_t addr = 0;
+ kern_return_t retval;
+ uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
+ int numsaved = 0;
+ int i;
+ boolean_t zone_replenish_wakeup = FALSE;
+ boolean_t did_gzalloc;
+
+ did_gzalloc = FALSE;
+#if CONFIG_ZLEAKS
+ uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
+#endif /* CONFIG_ZLEAKS */
+
+ assert(zone != ZONE_NULL);
+
+#if CONFIG_GZALLOC
+ addr = gzalloc_alloc(zone, canblock);
+ did_gzalloc = (addr != 0);
+#endif
+
+ lock_zone(zone);
+
+ /*
+ * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
+ */
+
+ if (DO_LOGGING(zone))
+ numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH);
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: capture a backtrace every zleak_sample_factor
+ * allocations in this zone.
+ */
+ if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
+ zone->zleak_capture = 1;
+
+ /* Avoid backtracing twice if zone logging is on */
+ if (numsaved == 0 )
+ zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
+ else
+ zleak_tracedepth = numsaved;
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ if (__probable(addr == 0))
+ alloc_from_zone(zone, (void **) &addr);
+
+ if (zone->async_prio_refill &&
+ ((zone->cur_size - (zone->count * zone->elem_size)) <
+ (zone->prio_refill_watermark * zone->elem_size))) {
+ zone_replenish_wakeup = TRUE;
+ zone_replenish_wakeups_initiated++;
+ }
+
+ while ((addr == 0) && canblock) {
+ /*
+ * If nothing was there, try to get more
+ */
+ if (zone->doing_alloc) {
+ /*
+ * Someone is allocating memory for this zone.
+ * Wait for it to show up, then try again.
+ */
+ zone->waiting = TRUE;
+ zone_sleep(zone);
+ } else if (zone->doing_gc) {
+ /* zone_gc() is running. Since we need an element
+ * from the free list that is currently being
+ * collected, set the waiting bit and try to
+ * interrupt the GC process, and try again
+ * when we obtain the lock.
+ */
+ zone->waiting = TRUE;
+ zone_sleep(zone);
+ } else {
+ vm_offset_t space;
+ vm_size_t alloc_size;
+ int retry = 0;
+
+ if ((zone->cur_size + zone->elem_size) >
+ zone->max_size) {
+ if (zone->exhaustible)
+ break;
+ if (zone->expandable) {
+ /*
+ * We're willing to overflow certain
+ * zones, but not without complaining.
+ *
+ * This is best used in conjunction
+ * with the collectable flag. What we
+ * want is an assurance we can get the
+ * memory back, assuming there's no
+ * leak.
+ */
+ zone->max_size += (zone->max_size >> 1);
+ } else {
+ unlock_zone(zone);
+
+ panic_include_zprint = TRUE;
+#if CONFIG_ZLEAKS
+ if (zleak_state & ZLEAK_STATE_ACTIVE)
+ panic_include_ztrace = TRUE;
+#endif /* CONFIG_ZLEAKS */
+ panic("zalloc: zone \"%s\" empty.", zone->zone_name);
+ }
+ }
+ zone->doing_alloc = TRUE;
+ unlock_zone(zone);
+
+ for (;;) {
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+
+ if (vm_pool_low() || retry >= 1)
+ alloc_size =
+ round_page(zone->elem_size);
+ else
+ alloc_size = zone->alloc_size;
+
+ if (zone->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ retval = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
+ if (retval == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+
+#if CONFIG_ZLEAKS
+ if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) {
+ if (zone_map->size >= zleak_global_tracking_threshold) {
+ kern_return_t kr;
+
+ kr = zleak_activate();
+ if (kr != KERN_SUCCESS) {
+ printf("Failed to activate live zone leak debugging (%d).\n", kr);
+ }
+ }
+ }
+
+ if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) {
+ if (zone->cur_size > zleak_per_zone_tracking_threshold) {
+ zone->zleak_on = TRUE;
+ }
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ zcram(zone, space, alloc_size);
+
+ break;
+ } else if (retval != KERN_RESOURCE_SHORTAGE) {
+ retry++;
+
+ if (retry == 2) {
+ zone_gc(TRUE);
+ printf("zalloc did gc\n");
+ zone_display_zprint();
+ }
+ if (retry == 3) {
+ panic_include_zprint = TRUE;
+#if CONFIG_ZLEAKS
+ if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
+ panic_include_ztrace = TRUE;
+ }
+#endif /* CONFIG_ZLEAKS */
+ /* TODO: Change this to something more descriptive, perhaps
+ * 'zone_map exhausted' only if we get retval 3 (KERN_NO_SPACE).
+ */
+ panic("zalloc: \"%s\" (%d elements) retry fail %d, kfree_nop_count: %d", zone->zone_name, zone->count, retval, (int)kfree_nop_count);
+ }
+ } else {
+ break;
+ }
+ }
+ lock_zone(zone);
+ zone->doing_alloc = FALSE;
+ if (zone->waiting) {
+ zone->waiting = FALSE;
+ zone_wakeup(zone);
+ }
+ alloc_from_zone(zone, (void **) &addr);
+ if (addr == 0 &&
+ retval == KERN_RESOURCE_SHORTAGE) {
+ unlock_zone(zone);
+
+ VM_PAGE_WAIT();
+ lock_zone(zone);
+ }
+ }
+ if (addr == 0)
+ alloc_from_zone(zone, (void **) &addr);
+ }
+
+#if CONFIG_ZLEAKS
+ /* Zone leak detection:
+ * If we're sampling this allocation, add it to the zleaks hash table.
+ */
+ if (addr && zleak_tracedepth > 0) {
+ /* Sampling can fail if another sample is happening at the same time in a different zone. */
+ if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
+ /* If it failed, roll back the counter so we sample the next allocation instead. */
+ zone->zleak_capture = zleak_sample_factor;
+ }
+ }
+#endif /* CONFIG_ZLEAKS */
+
+
+ /*
+ * See if we should be logging allocations in this zone. Logging is rarely done except when a leak is
+ * suspected, so this code rarely executes. We need to do this code while still holding the zone lock
+ * since it protects the various log related data structures.
+ */
+
+ if (DO_LOGGING(zone) && addr) {
+
+ /*
+ * Look for a place to record this new allocation. We implement two different logging strategies
+ * depending on whether we're looking for the source of a zone leak or a zone corruption. When looking
+ * for a leak, we want to log as many allocations as possible in order to clearly identify the leaker
+ * among all the records. So we look for an unused slot in the log and fill that in before overwriting
+ * an old entry. When looking for a corruption however, it's better to have a chronological log of all
+ * the allocations and frees done in the zone so that the history of operations for a specific zone
+ * element can be inspected. So in this case, we treat the log as a circular buffer and overwrite the
+ * oldest entry whenever a new one needs to be added.
+ *
+ * The corruption_debug_flag flag tells us what style of logging to do. It's set if we're supposed to be
+ * doing corruption style logging (indicated via -zc in the boot-args).
+ */
+
+ if (!corruption_debug_flag && zrecords[zcurrent].z_element && zrecorded < log_records) {
+
+ /*
+ * If we get here, we're doing leak style logging and there's still some unused entries in
+ * the log (since zrecorded is smaller than the size of the log). Look for an unused slot
+ * starting at zcurrent and wrap-around if we reach the end of the buffer. If the buffer
+ * is already full, we just fall through and overwrite the element indexed by zcurrent.
+ */
+
+ for (i = zcurrent; i < log_records; i++) {
+ if (zrecords[i].z_element == NULL) {
+ zcurrent = i;
+ goto empty_slot;
+ }
+ }
+
+ for (i = 0; i < zcurrent; i++) {
+ if (zrecords[i].z_element == NULL) {
+ zcurrent = i;
+ goto empty_slot;
+ }
+ }
+ }
+
+ /*
+ * Save a record of this allocation
+ */
+
+empty_slot:
+ if (zrecords[zcurrent].z_element == NULL)
+ zrecorded++;
+
+ zrecords[zcurrent].z_element = (void *)addr;
+ zrecords[zcurrent].z_time = ztime++;
+ zrecords[zcurrent].z_opcode = ZOP_ALLOC;
+
+ for (i = 0; i < numsaved; i++)
+ zrecords[zcurrent].z_pc[i] = (void*) zbt[i];
+
+ for (; i < MAX_ZTRACE_DEPTH; i++)
+ zrecords[zcurrent].z_pc[i] = 0;
+
+ zcurrent++;
+
+ if (zcurrent >= log_records)
+ zcurrent = 0;
+ }
+
+ if ((addr == 0) && !canblock && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) {
+ zone->async_pending = TRUE;
+ unlock_zone(zone);
+ thread_call_enter(&zone->call_async_alloc);
+ lock_zone(zone);
+ alloc_from_zone(zone, (void **) &addr);
+ }
+
+#if ZONE_DEBUG
+ if (!did_gzalloc && addr && zone_debug_enabled(zone)) {
+ enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
+ addr += ZONE_DEBUG_OFFSET;
+ }
+#endif
+
+#if CONFIG_ZLEAKS
+ if (addr != 0) {
+ zone->num_allocs++;
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ unlock_zone(zone);
+
+ if (zone_replenish_wakeup)
+ thread_wakeup(&zone->zone_replenish_thread);
+
+ TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, zone->elem_size, addr);
+
+ if (addr) {
+ thread_t thr = current_thread();
+ task_t task;
+ zinfo_usage_t zinfo;
+ vm_size_t sz = zone->elem_size;
+
+ if (zone->caller_acct)
+ ledger_credit(thr->t_ledger, task_ledgers.tkm_private, sz);
+ else
+ ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, sz);
+
+ if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].alloc);
+ }
+ return((void *)addr);
+}
+
+
+void *
+zalloc(
+ register zone_t zone)
+{
+ return( zalloc_canblock(zone, TRUE) );
+}
+
+void *
+zalloc_noblock(
+ register zone_t zone)
+{
+ return( zalloc_canblock(zone, FALSE) );
+}
+
+void
+zalloc_async(
+ thread_call_param_t p0,
+ __unused thread_call_param_t p1)
+{
+ void *elt;
+
+ elt = zalloc_canblock((zone_t)p0, TRUE);
+ zfree((zone_t)p0, elt);
+ lock_zone(((zone_t)p0));
+ ((zone_t)p0)->async_pending = FALSE;
+ unlock_zone(((zone_t)p0));
+}
+
+/*
+ * zget returns an element from the specified zone
+ * and immediately returns nothing if there is nothing there.
+ *
+ * This form should be used when you can not block (like when
+ * processing an interrupt).
+ *
+ * XXX: It seems like only vm_page_grab_fictitious_common uses this, and its
+ * friend vm_page_more_fictitious can block, so it doesn't seem like
+ * this is used for interrupts any more....
+ */
+void *
+zget(
+ register zone_t zone)
+{
+ vm_offset_t addr;
+
+#if CONFIG_ZLEAKS
+ uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used for zone leak detection */
+ uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */
+#endif /* CONFIG_ZLEAKS */
+
+ assert( zone != ZONE_NULL );
+
+ if (!lock_try_zone(zone))
+ return NULL;
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: capture a backtrace
+ */
+ if (zone->zleak_on && (zone->zleak_capture++ % zleak_sample_factor == 0)) {
+ zone->zleak_capture = 1;
+ zleak_tracedepth = fastbacktrace(zbt, MAX_ZTRACE_DEPTH);
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ alloc_from_zone(zone, (void **) &addr);
+#if ZONE_DEBUG
+ if (addr && zone_debug_enabled(zone)) {
+ enqueue_tail(&zone->active_zones, (queue_entry_t)addr);
+ addr += ZONE_DEBUG_OFFSET;
+ }
+#endif /* ZONE_DEBUG */
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: record the allocation
+ */
+ if (zone->zleak_on && zleak_tracedepth > 0 && addr) {
+ /* Sampling can fail if another sample is happening at the same time in a different zone. */
+ if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) {
+ /* If it failed, roll back the counter so we sample the next allocation instead. */
+ zone->zleak_capture = zleak_sample_factor;
+ }
+ }
+
+ if (addr != 0) {
+ zone->num_allocs++;
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ unlock_zone(zone);
+
+ return((void *) addr);
+}
+
+/* Keep this FALSE by default. Large memory machine run orders of magnitude
+ slower in debug mode when true. Use debugger to enable if needed */
+/* static */ boolean_t zone_check = FALSE;
+
+static zone_t zone_last_bogus_zone = ZONE_NULL;
+static vm_offset_t zone_last_bogus_elem = 0;
+
+void
+zfree(
+ register zone_t zone,
+ void *addr)
+{
+ vm_offset_t elem = (vm_offset_t) addr;
+ void *zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */
+ int numsaved = 0;
+ boolean_t gzfreed = FALSE;
+
+ assert(zone != ZONE_NULL);
+
+ /*
+ * If zone logging is turned on and this is the zone we're tracking, grab a backtrace.
+ */
+
+ if (DO_LOGGING(zone))
+ numsaved = OSBacktrace(&zbt[0], MAX_ZTRACE_DEPTH);
+
+#if MACH_ASSERT
+ /* Basic sanity checks */
+ if (zone == ZONE_NULL || elem == (vm_offset_t)0)
+ panic("zfree: NULL");
+ /* zone_gc assumes zones are never freed */
+ if (zone == zone_zone)
+ panic("zfree: freeing to zone_zone breaks zone_gc!");
+#endif
+
+#if CONFIG_GZALLOC
+ gzfreed = gzalloc_free(zone, addr);
+#endif
+
+ TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr);
+
+ if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign &&
+ !from_zone_map(elem, zone->elem_size))) {
+#if MACH_ASSERT
+ panic("zfree: non-allocated memory in collectable zone!");
+#endif
+ zone_last_bogus_zone = zone;
+ zone_last_bogus_elem = elem;
+ return;
+ }
+
+ lock_zone(zone);
+
+ /*
+ * See if we're doing logging on this zone. There are two styles of logging used depending on
+ * whether we're trying to catch a leak or corruption. See comments above in zalloc for details.
+ */
+
+ if (DO_LOGGING(zone)) {
+ int i;
+
+ if (corruption_debug_flag) {
+
+ /*
+ * We're logging to catch a corruption. Add a record of this zfree operation
+ * to log.
+ */
+
+ if (zrecords[zcurrent].z_element == NULL)
+ zrecorded++;
+
+ zrecords[zcurrent].z_element = (void *)addr;
+ zrecords[zcurrent].z_time = ztime++;
+ zrecords[zcurrent].z_opcode = ZOP_FREE;
+
+ for (i = 0; i < numsaved; i++)
+ zrecords[zcurrent].z_pc[i] = zbt[i];
+
+ for (; i < MAX_ZTRACE_DEPTH; i++)
+ zrecords[zcurrent].z_pc[i] = 0;
+
+ zcurrent++;
+
+ if (zcurrent >= log_records)
+ zcurrent = 0;
+
+ } else {
+
+ /*
+ * We're logging to catch a leak. Remove any record we might have for this
+ * element since it's being freed. Note that we may not find it if the buffer
+ * overflowed and that's OK. Since the log is of a limited size, old records
+ * get overwritten if there are more zallocs than zfrees.
+ */
+
+ for (i = 0; i < log_records; i++) {
+ if (zrecords[i].z_element == addr) {
+ zrecords[i].z_element = NULL;
+ zcurrent = i;
+ zrecorded--;
+ break;
+ }
+ }
+ }
+ }
+
+
+#if ZONE_DEBUG
+ if (!gzfreed && zone_debug_enabled(zone)) {
+ queue_t tmp_elem;
+
+ elem -= ZONE_DEBUG_OFFSET;
+ if (zone_check) {
+ /* check the zone's consistency */
+
+ for (tmp_elem = queue_first(&zone->active_zones);
+ !queue_end(tmp_elem, &zone->active_zones);
+ tmp_elem = queue_next(tmp_elem))
+ if (elem == (vm_offset_t)tmp_elem)
+ break;
+ if (elem != (vm_offset_t)tmp_elem)
+ panic("zfree()ing element from wrong zone");
+ }
+ remqueue((queue_t) elem);
+ }
+#endif /* ZONE_DEBUG */
+ if (zone_check) {
+ vm_offset_t this;
+
+ /* check the zone's consistency */
+
+ for (this = zone->free_elements;
+ this != 0;
+ this = * (vm_offset_t *) this)
+ if (!pmap_kernel_va(this) || this == elem)
+ panic("zfree");
+ }
+
+ if (__probable(!gzfreed))
+ free_to_zone(zone, (void *) elem);
+
+#if MACH_ASSERT
+ if (zone->count < 0)
+ panic("zfree: count < 0!");
+#endif
+
+
+#if CONFIG_ZLEAKS
+ zone->num_frees++;
+
+ /*
+ * Zone leak detection: un-track the allocation
+ */
+ if (zone->zleak_on) {
+ zleak_free(elem, zone->elem_size);
+ }
+#endif /* CONFIG_ZLEAKS */
+
+ /*
+ * If elements have one or more pages, and memory is low,
+ * request to run the garbage collection in the zone the next
+ * time the pageout thread runs.
+ */
+ if (zone->elem_size >= PAGE_SIZE &&
+ vm_pool_low()){
+ zone_gc_forced = TRUE;
+ }
+ unlock_zone(zone);
+
+ {
+ thread_t thr = current_thread();
+ task_t task;
+ zinfo_usage_t zinfo;
+ vm_size_t sz = zone->elem_size;
+
+ if (zone->caller_acct)
+ ledger_debit(thr->t_ledger, task_ledgers.tkm_private, sz);
+ else
+ ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, sz);
+
+ if ((task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
+ OSAddAtomic64(sz, (int64_t *)&zinfo[zone->index].free);
+ }
+}
+
+
+/* Change a zone's flags.
+ * This routine must be called immediately after zinit.
+ */
+void
+zone_change(
+ zone_t zone,
+ unsigned int item,
+ boolean_t value)
+{
+ assert( zone != ZONE_NULL );
+ assert( value == TRUE || value == FALSE );
+
+ switch(item){
+ case Z_NOENCRYPT:
+ zone->noencrypt = value;
+ break;
+ case Z_EXHAUST:
+ zone->exhaustible = value;
+ break;
+ case Z_COLLECT:
+ zone->collectable = value;
+ break;
+ case Z_EXPAND:
+ zone->expandable = value;
+ break;
+ case Z_FOREIGN:
+ zone->allows_foreign = value;
+ break;
+ case Z_CALLERACCT:
+ zone->caller_acct = value;
+ break;
+ case Z_NOCALLOUT:
+ zone->no_callout = value;
+ break;
+ case Z_GZALLOC_EXEMPT:
+ zone->gzalloc_exempt = value;
+#if CONFIG_GZALLOC
+ gzalloc_reconfigure(zone);
+#endif
+ break;
+ case Z_ALIGNMENT_REQUIRED:
+ zone->alignment_required = value;
+#if ZONE_DEBUG
+ zone_debug_disable(zone);
+#endif
+#if CONFIG_GZALLOC
+ gzalloc_reconfigure(zone);
+#endif
+ break;
+ default:
+ panic("Zone_change: Wrong Item Type!");
+ /* break; */
+ }
+}
+
+/*
+ * Return the expected number of free elements in the zone.
+ * This calculation will be incorrect if items are zfree'd that
+ * were never zalloc'd/zget'd. The correct way to stuff memory
+ * into a zone is by zcram.
+ */
+
+integer_t
+zone_free_count(zone_t zone)
+{
+ integer_t free_count;
+
+ lock_zone(zone);
+ free_count = (integer_t)(zone->cur_size/zone->elem_size - zone->count);
+ unlock_zone(zone);
+
+ assert(free_count >= 0);
+
+ return(free_count);
+}
+
+/*
+ * Zone garbage collection subroutines
+ */
+
+boolean_t
+zone_page_collectable(
+ vm_offset_t addr,
+ vm_size_t size)
+{
+ struct zone_page_table_entry *zp;
+ zone_page_index_t i, j;
+
+#if ZONE_ALIAS_ADDR
+ addr = zone_virtual_addr(addr);
+#endif
+#if MACH_ASSERT
+ if (!from_zone_map(addr, size))
+ panic("zone_page_collectable");
+#endif
+
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
+ if (zp->collect_count == zp->alloc_count)
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+void
+zone_page_keep(
+ vm_offset_t addr,
+ vm_size_t size)
+{
+ struct zone_page_table_entry *zp;
+ zone_page_index_t i, j;
+
+#if ZONE_ALIAS_ADDR
+ addr = zone_virtual_addr(addr);
+#endif
+#if MACH_ASSERT
+ if (!from_zone_map(addr, size))
+ panic("zone_page_keep");
+#endif
+
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
+ zp->collect_count = 0;
+ }
+}
+
+void
+zone_page_collect(
+ vm_offset_t addr,
+ vm_size_t size)
+{
+ struct zone_page_table_entry *zp;
+ zone_page_index_t i, j;
+
+#if ZONE_ALIAS_ADDR
+ addr = zone_virtual_addr(addr);
+#endif
+#if MACH_ASSERT
+ if (!from_zone_map(addr, size))
+ panic("zone_page_collect");
+#endif
+
+ i = (zone_page_index_t)atop_kernel(addr-zone_map_min_address);
+ j = (zone_page_index_t)atop_kernel((addr+size-1) - zone_map_min_address);
+
+ for (; i <= j; i++) {
+ zp = zone_page_table_lookup(i);
+ ++zp->collect_count;
+ }
+}
+
+void
+zone_page_init(
+ vm_offset_t addr,
+ vm_size_t size)
+{
+ struct zone_page_table_entry *zp;
+ zone_page_index_t i, j;