+ * Support for kern.zleak.active sysctl - a simplified
+ * version of the zleak_state variable.
+ */
+int
+get_zleak_state(void)
+{
+ if (zleak_state & ZLEAK_STATE_FAILED)
+ return (-1);
+ if (zleak_state & ZLEAK_STATE_ACTIVE)
+ return (1);
+ return (0);
+}
+
+#endif
+
+
+kern_return_t
+zleak_activate(void)
+{
+ kern_return_t retval;
+ vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
+ vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
+ void *allocations_ptr = NULL;
+ void *traces_ptr = NULL;
+
+ /* Only one thread attempts to activate at a time */
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ return KERN_SUCCESS;
+ }
+
+ /* Indicate that we're doing the setup */
+ lck_spin_lock(&zleak_lock);
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ lck_spin_unlock(&zleak_lock);
+ return KERN_SUCCESS;
+ }
+
+ zleak_state |= ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ /* Allocate and zero tables */
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ bzero(allocations_ptr, z_alloc_size);
+ bzero(traces_ptr, z_trace_size);
+
+ /* Everything's set. Install tables, mark active. */
+ zallocations = allocations_ptr;
+ ztraces = traces_ptr;
+
+ /*
+ * Initialize the top_ztrace to the first entry in ztraces,
+ * so we don't have to check for null in zleak_log
+ */
+ top_ztrace = &ztraces[0];
+
+ /*
+ * Note that we do need a barrier between installing
+ * the tables and setting the active flag, because the zfree()
+ * path accesses the table without a lock if we're active.
+ */
+ lck_spin_lock(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_ACTIVE;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ return 0;
+
+fail:
+ /*
+ * If we fail to allocate memory, don't further tax
+ * the system by trying again.
+ */
+ lck_spin_lock(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_FAILED;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ if (allocations_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
+ }
+
+ if (traces_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
+ }
+
+ return retval;
+}
+
+/*
+ * TODO: What about allocations that never get deallocated,
+ * especially ones with unique backtraces? Should we wait to record
+ * until after boot has completed?
+ * (How many persistent zallocs are there?)
+ */
+
+/*
+ * This function records the allocation in the allocations table,
+ * and stores the associated backtrace in the traces table
+ * (or just increments the refcount if the trace is already recorded)
+ * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
+ * the associated trace's refcount is decremented.
+ * If the trace slot is in use, it returns.
+ * The refcount is incremented by the amount of memory the allocation consumes.
+ * The return value indicates whether to try again next time.
+ */
+static boolean_t
+zleak_log(uintptr_t* bt,
+ uintptr_t addr,
+ uint32_t depth,
+ vm_size_t allocation_size)
+{
+ /* Quit if there's someone else modifying the hash tables */
+ if (!lck_spin_try_lock(&zleak_lock)) {
+ z_total_conflicts++;
+ return FALSE;
+ }
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
+ struct ztrace* trace = &ztraces[trace_index];
+
+ allocation->za_hit_count++;
+ trace->zt_hit_count++;
+
+ /*
+ * If the allocation bucket we want to be in is occupied, and if the occupier
+ * has the same trace as us, just bail.
+ */
+ if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
+ z_alloc_collisions++;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+ }
+
+ /* STEP 1: Store the backtrace in the traces array. */
+ /* A size of zero indicates that the trace bucket is free. */
+
+ if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) {
+ /*
+ * Different unique trace with same hash!
+ * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
+ * and get out of the way for later chances
+ */
+ trace->zt_collisions++;
+ z_trace_collisions++;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+ } else if (trace->zt_size > 0) {
+ /* Same trace, already added, so increment refcount */
+ trace->zt_size += allocation_size;
+ } else {
+ /* Found an unused trace bucket, record the trace here! */
+ if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */
+ z_trace_overwrites++;
+
+ z_trace_recorded++;
+ trace->zt_size = allocation_size;
+ memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) );
+
+ trace->zt_depth = depth;
+ trace->zt_collisions = 0;
+ }
+
+ /* STEP 2: Store the allocation record in the allocations array. */
+
+ if (allocation->za_element != (uintptr_t) 0) {
+ /*
+ * Straight up replace any allocation record that was there. We don't want to do the work
+ * to preserve the allocation entries that were there, because we only record a subset of the
+ * allocations anyways.
+ */
+
+ z_alloc_collisions++;
+
+ struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
+ /* Knock off old allocation's size, not the new allocation */
+ associated_trace->zt_size -= allocation->za_size;
+ } else if (allocation->za_trace_index != 0) {
+ /* Slot previously used but not currently in use */
+ z_alloc_overwrites++;
+ }
+
+ allocation->za_element = addr;
+ allocation->za_trace_index = trace_index;
+ allocation->za_size = allocation_size;
+
+ z_alloc_recorded++;
+
+ if (top_ztrace->zt_size < trace->zt_size)
+ top_ztrace = trace;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+}
+
+/*
+ * Free the allocation record and release the stacktrace.
+ * This should be as fast as possible because it will be called for every free.
+ */
+static void
+zleak_free(uintptr_t addr,
+ vm_size_t allocation_size)
+{
+ if (addr == (uintptr_t) 0)
+ return;
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ /* Double-checked locking: check to find out if we're interested, lock, check to make
+ * sure it hasn't changed, then modify it, and release the lock.
+ */
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ /* if the allocation was the one, grab the lock, check again, then delete it */
+ lck_spin_lock(&zleak_lock);
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ struct ztrace *trace;
+
+ /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
+ if (allocation->za_size != allocation_size) {
+ panic("Freeing as size %lu memory that was allocated with size %lu\n",
+ (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
+ }
+
+ trace = &ztraces[allocation->za_trace_index];
+
+ /* size of 0 indicates trace bucket is unused */
+ if (trace->zt_size > 0) {
+ trace->zt_size -= allocation_size;
+ }
+
+ /* A NULL element means the allocation bucket is unused */
+ allocation->za_element = 0;
+ }
+ lck_spin_unlock(&zleak_lock);
+ }
+}
+
+#endif /* CONFIG_ZLEAKS */
+
+/* These functions outside of CONFIG_ZLEAKS because they are also used in
+ * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
+ */
+
+/*
+ * This function captures a backtrace from the current stack and
+ * returns the number of frames captured, limited by max_frames.
+ * It's fast because it does no checking to make sure there isn't bad data.
+ * Since it's only called from threads that we're going to keep executing,
+ * if there's bad data we were going to die eventually.
+ * If this function is inlined, it doesn't record the frame of the function it's inside.
+ * (because there's no stack frame!)
+ */
+
+uint32_t
+fastbacktrace(uintptr_t* bt, uint32_t max_frames)
+{
+ uintptr_t* frameptr = NULL, *frameptr_next = NULL;
+ uintptr_t retaddr = 0;
+ uint32_t frame_index = 0, frames = 0;
+ uintptr_t kstackb, kstackt;
+ thread_t cthread = current_thread();
+
+ if (__improbable(cthread == NULL))
+ return 0;
+
+ kstackb = cthread->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+
+ while (frameptr != NULL && frame_index < max_frames ) {
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
+ /* That also means the return address is worthless, so don't record it */
+ if (frameptr_next == NULL)
+ break;
+ /* Verify thread stack bounds */
+ if (((uintptr_t)frameptr_next > kstackt) || ((uintptr_t)frameptr_next < kstackb))
+ break;
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+ /* Store it in the backtrace array */
+ bt[frame_index++] = retaddr;
+
+ frameptr = frameptr_next;
+ }
+
+ /* Save the number of frames captured for return value */
+ frames = frame_index;
+
+ /* Fill in the rest of the backtrace with zeros */
+ while (frame_index < max_frames)
+ bt[frame_index++] = 0;
+
+ return frames;
+}
+
+/* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
+uintptr_t
+hash_mix(uintptr_t x)
+{
+#ifndef __LP64__
+ x += ~(x << 15);
+ x ^= (x >> 10);
+ x += (x << 3 );
+ x ^= (x >> 6 );
+ x += ~(x << 11);
+ x ^= (x >> 16);
+#else
+ x += ~(x << 32);
+ x ^= (x >> 22);
+ x += ~(x << 13);
+ x ^= (x >> 8 );
+ x += (x << 3 );
+ x ^= (x >> 15);
+ x += ~(x << 27);
+ x ^= (x >> 31);
+#endif
+ return x;
+}
+
+uint32_t
+hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
+{
+
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ while (depth) {
+ hash += bt[--depth];
+ }
+
+ hash = hash_mix(hash) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/*
+ * TODO: Determine how well distributed this is
+ * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
+ */
+uint32_t
+hashaddr(uintptr_t pt, uint32_t max_size)
+{
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ hash = hash_mix(pt) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/* End of all leak-detection code */
+#pragma mark -
+
+/*
+ * zinit initializes a new zone. The zone data structures themselves
+ * are stored in a zone, which is initially a static structure that
+ * is initialized by zone_init.
+ */
+zone_t
+zinit(
+ vm_size_t size, /* the size of an element */
+ vm_size_t max, /* maximum memory to use */
+ vm_size_t alloc, /* allocation size */
+ const char *name) /* a name for the zone */
+{
+ zone_t z;
+
+ if (zone_zone == ZONE_NULL) {
+
+ z = (struct zone *)zdata;
+ zdata += sizeof(*z);
+ zdata_size -= sizeof(*z);
+ } else
+ z = (zone_t) zalloc(zone_zone);
+
+ if (z == ZONE_NULL)
+ return(ZONE_NULL);
+
+ /*
+ * Round off all the parameters appropriately.
+ */
+ if (size < sizeof(z->free_elements))
+ size = sizeof(z->free_elements);
+ size = ((size-1) + sizeof(z->free_elements)) -
+ ((size-1) % sizeof(z->free_elements));
+ if (alloc == 0)
+ alloc = PAGE_SIZE;
+ alloc = round_page(alloc);
+ max = round_page(max);
+ /*
+ * we look for an allocation size with less than 1% waste
+ * up to 5 pages in size...
+ * otherwise, we look for an allocation size with least fragmentation
+ * in the range of 1 - 5 pages
+ * This size will be used unless
+ * the user suggestion is larger AND has less fragmentation
+ */
+#if ZONE_ALIAS_ADDR
+ if ((size < PAGE_SIZE) && (PAGE_SIZE % size <= PAGE_SIZE / 10))
+ alloc = PAGE_SIZE;
+ else
+#endif
+#if defined(__LP64__)
+ if (((alloc % size) != 0) || (alloc > PAGE_SIZE * 8))
+#endif
+ {
+ vm_size_t best, waste; unsigned int i;
+ best = PAGE_SIZE;
+ waste = best % size;
+
+ for (i = 1; i <= 5; i++) {
+ vm_size_t tsize, twaste;
+
+ tsize = i * PAGE_SIZE;
+
+ if ((tsize % size) < (tsize / 100)) {
+ alloc = tsize;
+ goto use_this_allocation;
+ }
+ twaste = tsize % size;
+ if (twaste < waste)
+ best = tsize, waste = twaste;
+ }
+ if (alloc <= best || (alloc % size >= waste))
+ alloc = best;
+ }
+use_this_allocation:
+ if (max && (max < alloc))
+ max = alloc;
+
+ z->free_elements = 0;
+ z->cur_size = 0;
+ z->max_size = max;
+ z->elem_size = size;
+ z->alloc_size = alloc;
+ z->zone_name = name;
+ z->count = 0;
+ z->sum_count = 0LL;
+ z->doing_alloc = FALSE;
+ z->doing_gc = FALSE;
+ z->exhaustible = FALSE;
+ z->collectable = TRUE;
+ z->allows_foreign = FALSE;
+ z->expandable = TRUE;
+ z->waiting = FALSE;
+ z->async_pending = FALSE;
+ z->caller_acct = TRUE;
+ z->noencrypt = FALSE;
+ z->no_callout = FALSE;
+ z->async_prio_refill = FALSE;
+ z->gzalloc_exempt = FALSE;
+ z->alignment_required = FALSE;
+ z->prio_refill_watermark = 0;
+ z->zone_replenish_thread = NULL;
+#if CONFIG_ZLEAKS
+ z->num_allocs = 0;
+ z->num_frees = 0;
+ z->zleak_capture = 0;
+ z->zleak_on = FALSE;
+#endif /* CONFIG_ZLEAKS */
+
+#if ZONE_DEBUG
+ z->active_zones.next = z->active_zones.prev = NULL;
+ zone_debug_enable(z);
+#endif /* ZONE_DEBUG */
+ lock_zone_init(z);
+
+ /*
+ * Add the zone to the all-zones list.
+ * If we are tracking zone info per task, and we have
+ * already used all the available stat slots, then keep
+ * using the overflow zone slot.
+ */
+ z->next_zone = ZONE_NULL;
+ thread_call_setup(&z->call_async_alloc, zalloc_async, z);
+ simple_lock(&all_zones_lock);
+ *last_zone = z;
+ last_zone = &z->next_zone;
+ z->index = num_zones;
+ if (zinfo_per_task) {
+ if (num_zones > ZONES_MAX)
+ z->index = ZONES_MAX;
+ }
+ num_zones++;
+ simple_unlock(&all_zones_lock);
+
+ /*
+ * Check if we should be logging this zone. If so, remember the zone pointer.
+ */
+ if (log_this_zone(z->zone_name, zone_name_to_log)) {
+ zone_of_interest = z;
+ }
+
+ /*
+ * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
+ * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. zlog_ready is set to
+ * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
+ * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
+ * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
+ * right now.
+ */
+ if (zone_of_interest != NULL && zrecords == NULL && zlog_ready) {
+ if (kmem_alloc(kernel_map, (vm_offset_t *)&zrecords, log_records * sizeof(struct zrecord)) == KERN_SUCCESS) {
+
+ /*
+ * We got the memory for the log. Zero it out since the code needs this to identify unused records.
+ * At this point, everything is set up and we're ready to start logging this zone.
+ */
+
+ bzero((void *)zrecords, log_records * sizeof(struct zrecord));
+ printf("zone: logging started for zone %s (%p)\n", zone_of_interest->zone_name, zone_of_interest);
+
+ } else {
+ printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
+ zone_of_interest = NULL;
+ }
+ }
+#if CONFIG_GZALLOC
+ gzalloc_zone_init(z);
+#endif
+ return(z);
+}
+unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated;
+
+static void zone_replenish_thread(zone_t);
+
+/* High priority VM privileged thread used to asynchronously refill a designated
+ * zone, such as the reserved VM map entry zone.
+ */
+static void zone_replenish_thread(zone_t z) {
+ vm_size_t free_size;
+ current_thread()->options |= TH_OPT_VMPRIV;
+
+ for (;;) {
+ lock_zone(z);
+ assert(z->prio_refill_watermark != 0);
+ while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
+ assert(z->doing_alloc == FALSE);
+ assert(z->async_prio_refill == TRUE);
+
+ unlock_zone(z);
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+ vm_offset_t space, alloc_size;
+ kern_return_t kr;
+
+ if (vm_pool_low())
+ alloc_size = round_page(z->elem_size);
+ else
+ alloc_size = z->alloc_size;
+
+ if (z->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags);
+
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else if (kr == KERN_RESOURCE_SHORTAGE) {
+ VM_PAGE_WAIT();
+ } else if (kr == KERN_NO_SPACE) {
+ kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags);
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else {
+ assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+ }
+
+ lock_zone(z);
+ zone_replenish_loops++;
+ }
+
+ unlock_zone(z);
+ assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
+ thread_block(THREAD_CONTINUE_NULL);
+ zone_replenish_wakeups++;
+ }
+}
+
+void
+zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
+ z->prio_refill_watermark = low_water_mark;
+
+ z->async_prio_refill = TRUE;
+ OSMemoryBarrier();
+ kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
+
+ if (tres != KERN_SUCCESS) {
+ panic("zone_prio_refill_configure, thread create: 0x%x", tres);
+ }
+
+ thread_deallocate(z->zone_replenish_thread);
+}
+
+/*
+ * Cram the given memory into the specified zone.
+ */
+void
+zcram(
+ zone_t zone,
+ vm_offset_t newmem,
+ vm_size_t size)
+{
+ vm_size_t elem_size;
+ boolean_t from_zm = FALSE;
+
+ /* Basic sanity checks */
+ assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
+ assert(!zone->collectable || zone->allows_foreign
+ || (from_zone_map(newmem, size)));
+
+ elem_size = zone->elem_size;
+
+ if (from_zone_map(newmem, size))
+ from_zm = TRUE;
+
+ if (from_zm)
+ zone_page_init(newmem, size);
+
+ lock_zone(zone);
+ while (size >= elem_size) {
+ free_to_zone(zone, (void *) newmem);
+ if (from_zm)
+ zone_page_alloc(newmem, elem_size);
+ zone->count++; /* compensate for free_to_zone */
+ size -= elem_size;
+ newmem += elem_size;
+ zone->cur_size += elem_size;
+ }
+ unlock_zone(zone);
+}
+
+
+/*
+ * Steal memory for the zone package. Called from
+ * vm_page_bootstrap().
+ */
+void
+zone_steal_memory(void)
+{
+#if CONFIG_GZALLOC
+ gzalloc_configure();
+#endif
+ /* Request enough early memory to get to the pmap zone */
+ zdata_size = 12 * sizeof(struct zone);
+ zdata = (vm_offset_t)pmap_steal_memory(round_page(zdata_size));
+}
+
+
+/*
+ * Fill a zone with enough memory to contain at least nelem elements.
+ * Memory is obtained with kmem_alloc_kobject from the kernel_map.
+ * Return the number of elements actually put into the zone, which may
+ * be more than the caller asked for since the memory allocation is
+ * rounded up to a full page.