+get_zleak_state(void)
+{
+ if (zleak_state & ZLEAK_STATE_FAILED)
+ return (-1);
+ if (zleak_state & ZLEAK_STATE_ACTIVE)
+ return (1);
+ return (0);
+}
+
+#endif
+
+
+kern_return_t
+zleak_activate(void)
+{
+ kern_return_t retval;
+ vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
+ vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
+ void *allocations_ptr = NULL;
+ void *traces_ptr = NULL;
+
+ /* Only one thread attempts to activate at a time */
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ return KERN_SUCCESS;
+ }
+
+ /* Indicate that we're doing the setup */
+ lck_spin_lock(&zleak_lock);
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ lck_spin_unlock(&zleak_lock);
+ return KERN_SUCCESS;
+ }
+
+ zleak_state |= ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ /* Allocate and zero tables */
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size, VM_KERN_MEMORY_OSFMK);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size, VM_KERN_MEMORY_OSFMK);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ bzero(allocations_ptr, z_alloc_size);
+ bzero(traces_ptr, z_trace_size);
+
+ /* Everything's set. Install tables, mark active. */
+ zallocations = allocations_ptr;
+ ztraces = traces_ptr;
+
+ /*
+ * Initialize the top_ztrace to the first entry in ztraces,
+ * so we don't have to check for null in zleak_log
+ */
+ top_ztrace = &ztraces[0];
+
+ /*
+ * Note that we do need a barrier between installing
+ * the tables and setting the active flag, because the zfree()
+ * path accesses the table without a lock if we're active.
+ */
+ lck_spin_lock(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_ACTIVE;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ return 0;
+
+fail:
+ /*
+ * If we fail to allocate memory, don't further tax
+ * the system by trying again.
+ */
+ lck_spin_lock(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_FAILED;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ if (allocations_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
+ }
+
+ if (traces_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
+ }
+
+ return retval;
+}
+
+/*
+ * TODO: What about allocations that never get deallocated,
+ * especially ones with unique backtraces? Should we wait to record
+ * until after boot has completed?
+ * (How many persistent zallocs are there?)
+ */
+
+/*
+ * This function records the allocation in the allocations table,
+ * and stores the associated backtrace in the traces table
+ * (or just increments the refcount if the trace is already recorded)
+ * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
+ * the associated trace's refcount is decremented.
+ * If the trace slot is in use, it returns.
+ * The refcount is incremented by the amount of memory the allocation consumes.
+ * The return value indicates whether to try again next time.
+ */
+static boolean_t
+zleak_log(uintptr_t* bt,
+ uintptr_t addr,
+ uint32_t depth,
+ vm_size_t allocation_size)
+{
+ /* Quit if there's someone else modifying the hash tables */
+ if (!lck_spin_try_lock(&zleak_lock)) {
+ z_total_conflicts++;
+ return FALSE;
+ }
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
+ struct ztrace* trace = &ztraces[trace_index];
+
+ allocation->za_hit_count++;
+ trace->zt_hit_count++;
+
+ /*
+ * If the allocation bucket we want to be in is occupied, and if the occupier
+ * has the same trace as us, just bail.
+ */
+ if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
+ z_alloc_collisions++;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+ }
+
+ /* STEP 1: Store the backtrace in the traces array. */
+ /* A size of zero indicates that the trace bucket is free. */
+
+ if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) {
+ /*
+ * Different unique trace with same hash!
+ * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
+ * and get out of the way for later chances
+ */
+ trace->zt_collisions++;
+ z_trace_collisions++;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+ } else if (trace->zt_size > 0) {
+ /* Same trace, already added, so increment refcount */
+ trace->zt_size += allocation_size;
+ } else {
+ /* Found an unused trace bucket, record the trace here! */
+ if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */
+ z_trace_overwrites++;
+
+ z_trace_recorded++;
+ trace->zt_size = allocation_size;
+ memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) );
+
+ trace->zt_depth = depth;
+ trace->zt_collisions = 0;
+ }
+
+ /* STEP 2: Store the allocation record in the allocations array. */
+
+ if (allocation->za_element != (uintptr_t) 0) {
+ /*
+ * Straight up replace any allocation record that was there. We don't want to do the work
+ * to preserve the allocation entries that were there, because we only record a subset of the
+ * allocations anyways.
+ */
+
+ z_alloc_collisions++;
+
+ struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
+ /* Knock off old allocation's size, not the new allocation */
+ associated_trace->zt_size -= allocation->za_size;
+ } else if (allocation->za_trace_index != 0) {
+ /* Slot previously used but not currently in use */
+ z_alloc_overwrites++;
+ }
+
+ allocation->za_element = addr;
+ allocation->za_trace_index = trace_index;
+ allocation->za_size = allocation_size;
+
+ z_alloc_recorded++;
+
+ if (top_ztrace->zt_size < trace->zt_size)
+ top_ztrace = trace;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+}
+
+/*
+ * Free the allocation record and release the stacktrace.
+ * This should be as fast as possible because it will be called for every free.
+ */
+static void
+zleak_free(uintptr_t addr,
+ vm_size_t allocation_size)
+{
+ if (addr == (uintptr_t) 0)
+ return;
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ /* Double-checked locking: check to find out if we're interested, lock, check to make
+ * sure it hasn't changed, then modify it, and release the lock.
+ */
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ /* if the allocation was the one, grab the lock, check again, then delete it */
+ lck_spin_lock(&zleak_lock);
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ struct ztrace *trace;
+
+ /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
+ if (allocation->za_size != allocation_size) {
+ panic("Freeing as size %lu memory that was allocated with size %lu\n",
+ (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
+ }
+
+ trace = &ztraces[allocation->za_trace_index];
+
+ /* size of 0 indicates trace bucket is unused */
+ if (trace->zt_size > 0) {
+ trace->zt_size -= allocation_size;
+ }
+
+ /* A NULL element means the allocation bucket is unused */
+ allocation->za_element = 0;
+ }
+ lck_spin_unlock(&zleak_lock);
+ }
+}
+
+#endif /* CONFIG_ZLEAKS */
+
+/* These functions outside of CONFIG_ZLEAKS because they are also used in
+ * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
+ */
+
+/*
+ * This function captures a backtrace from the current stack and
+ * returns the number of frames captured, limited by max_frames.
+ * It's fast because it does no checking to make sure there isn't bad data.
+ * Since it's only called from threads that we're going to keep executing,
+ * if there's bad data we were going to die eventually.
+ * If this function is inlined, it doesn't record the frame of the function it's inside.
+ * (because there's no stack frame!)
+ */
+
+uint32_t
+fastbacktrace(uintptr_t* bt, uint32_t max_frames)
+{
+ uintptr_t* frameptr = NULL, *frameptr_next = NULL;
+ uintptr_t retaddr = 0;
+ uint32_t frame_index = 0, frames = 0;
+ uintptr_t kstackb, kstackt;
+ thread_t cthread = current_thread();
+
+ if (__improbable(cthread == NULL))
+ return 0;
+
+ kstackb = cthread->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+ if (((uintptr_t)frameptr > kstackt) || ((uintptr_t)frameptr < kstackb))
+ frameptr = NULL;
+
+ while (frameptr != NULL && frame_index < max_frames ) {
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Bail if we see a zero in the stack frame, that means we've reached the top of the stack */
+ /* That also means the return address is worthless, so don't record it */
+ if (frameptr_next == NULL)
+ break;
+ /* Verify thread stack bounds */
+ if (((uintptr_t)frameptr_next > kstackt) || ((uintptr_t)frameptr_next < kstackb))
+ break;
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+ /* Store it in the backtrace array */
+ bt[frame_index++] = retaddr;
+
+ frameptr = frameptr_next;
+ }
+
+ /* Save the number of frames captured for return value */
+ frames = frame_index;
+
+ /* Fill in the rest of the backtrace with zeros */
+ while (frame_index < max_frames)
+ bt[frame_index++] = 0;
+
+ return frames;
+}
+
+/* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
+uintptr_t
+hash_mix(uintptr_t x)
+{
+#ifndef __LP64__
+ x += ~(x << 15);
+ x ^= (x >> 10);
+ x += (x << 3 );
+ x ^= (x >> 6 );
+ x += ~(x << 11);
+ x ^= (x >> 16);
+#else
+ x += ~(x << 32);
+ x ^= (x >> 22);
+ x += ~(x << 13);
+ x ^= (x >> 8 );
+ x += (x << 3 );
+ x ^= (x >> 15);
+ x += ~(x << 27);
+ x ^= (x >> 31);
+#endif
+ return x;
+}
+
+uint32_t
+hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
+{
+
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ while (depth) {
+ hash += bt[--depth];
+ }
+
+ hash = hash_mix(hash) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/*
+ * TODO: Determine how well distributed this is
+ * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
+ */
+uint32_t
+hashaddr(uintptr_t pt, uint32_t max_size)
+{
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ hash = hash_mix(pt) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/* End of all leak-detection code */
+#pragma mark -
+
+/*
+ * zinit initializes a new zone. The zone data structures themselves
+ * are stored in a zone, which is initially a static structure that
+ * is initialized by zone_init.
+ */
+zone_t
+zinit(
+ vm_size_t size, /* the size of an element */
+ vm_size_t max, /* maximum memory to use */
+ vm_size_t alloc, /* allocation size */
+ const char *name) /* a name for the zone */
+{
+ zone_t z;
+ boolean_t use_page_list = FALSE;
+
+ if (zone_zone == ZONE_NULL) {
+
+ z = (struct zone *)zdata;
+ /* special handling in zcram() because the first element is being used */
+ } else
+ z = (zone_t) zalloc(zone_zone);
+
+ if (z == ZONE_NULL)
+ return(ZONE_NULL);
+
+ /* Zone elements must fit both a next pointer and a backup pointer */
+ vm_size_t minimum_element_size = sizeof(vm_offset_t) * 2;
+ if (size < minimum_element_size)
+ size = minimum_element_size;
+
+ /*
+ * Round element size to a multiple of sizeof(pointer)
+ * This also enforces that allocations will be aligned on pointer boundaries
+ */
+ size = ((size-1) + sizeof(vm_offset_t)) -
+ ((size-1) % sizeof(vm_offset_t));
+
+ if (alloc == 0)
+ alloc = PAGE_SIZE;
+
+ alloc = round_page(alloc);
+ max = round_page(max);
+
+ /*
+ * we look for an allocation size with less than 1% waste
+ * up to 5 pages in size...
+ * otherwise, we look for an allocation size with least fragmentation
+ * in the range of 1 - 5 pages
+ * This size will be used unless
+ * the user suggestion is larger AND has less fragmentation
+ */
+#if ZONE_ALIAS_ADDR
+ /* Favor PAGE_SIZE allocations unless we waste >10% space */
+ if ((size < PAGE_SIZE) && (PAGE_SIZE % size <= PAGE_SIZE / 10))
+ alloc = PAGE_SIZE;
+ else
+#endif
+#if defined(__LP64__)
+ if (((alloc % size) != 0) || (alloc > PAGE_SIZE * 8))
+#endif
+ {
+ vm_size_t best, waste; unsigned int i;
+ best = PAGE_SIZE;
+ waste = best % size;
+
+ for (i = 1; i <= 5; i++) {
+ vm_size_t tsize, twaste;
+
+ tsize = i * PAGE_SIZE;
+
+ if ((tsize % size) < (tsize / 100)) {
+ alloc = tsize;
+ goto use_this_allocation;
+ }
+ twaste = tsize % size;
+ if (twaste < waste)
+ best = tsize, waste = twaste;
+ }
+ if (alloc <= best || (alloc % size >= waste))
+ alloc = best;
+ }
+use_this_allocation:
+ if (max && (max < alloc))
+ max = alloc;
+
+ /*
+ * Opt into page list tracking if we can reliably map an allocation
+ * to its page_metadata, and if the wastage in the tail of
+ * the allocation is not too large
+ */
+
+ /* zone_zone can't use page metadata since the page metadata will overwrite zone metadata */
+ if (alloc == PAGE_SIZE && zone_zone != ZONE_NULL) {
+ vm_offset_t first_element_offset;
+ size_t zone_page_metadata_size = sizeof(struct zone_page_metadata);
+
+ if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0) {
+ first_element_offset = zone_page_metadata_size;
+ } else {
+ first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT));
+ }
+
+ if (((PAGE_SIZE - first_element_offset) % size) <= PAGE_SIZE / 100) {
+ use_page_list = TRUE;
+ }
+ }
+
+ z->free_elements = NULL;
+ queue_init(&z->pages.any_free_foreign);
+ queue_init(&z->pages.all_free);
+ queue_init(&z->pages.intermediate);
+ queue_init(&z->pages.all_used);
+ z->cur_size = 0;
+ z->page_count = 0;
+ z->max_size = max;
+ z->elem_size = size;
+ z->alloc_size = alloc;
+ z->zone_name = name;
+ z->count = 0;
+ z->countfree = 0;
+ z->sum_count = 0LL;
+ z->doing_alloc_without_vm_priv = FALSE;
+ z->doing_alloc_with_vm_priv = FALSE;
+ z->doing_gc = FALSE;
+ z->exhaustible = FALSE;
+ z->collectable = TRUE;
+ z->allows_foreign = FALSE;
+ z->expandable = TRUE;
+ z->waiting = FALSE;
+ z->async_pending = FALSE;
+ z->caller_acct = TRUE;
+ z->noencrypt = FALSE;
+ z->no_callout = FALSE;
+ z->async_prio_refill = FALSE;
+ z->gzalloc_exempt = FALSE;
+ z->alignment_required = FALSE;
+ z->use_page_list = use_page_list;
+ z->prio_refill_watermark = 0;
+ z->zone_replenish_thread = NULL;
+ z->zp_count = 0;
+#if CONFIG_ZLEAKS
+ z->zleak_capture = 0;
+ z->zleak_on = FALSE;
+#endif /* CONFIG_ZLEAKS */
+
+#if ZONE_DEBUG
+ z->active_zones.next = z->active_zones.prev = NULL;
+ zone_debug_enable(z);
+#endif /* ZONE_DEBUG */
+ lock_zone_init(z);
+
+ /*
+ * Add the zone to the all-zones list.
+ * If we are tracking zone info per task, and we have
+ * already used all the available stat slots, then keep
+ * using the overflow zone slot.
+ */
+ z->next_zone = ZONE_NULL;
+ simple_lock(&all_zones_lock);
+ *last_zone = z;
+ last_zone = &z->next_zone;
+ z->index = num_zones;
+ if (zinfo_per_task) {
+ if (num_zones > ZONES_MAX)
+ z->index = ZONES_MAX;
+ }
+ num_zones++;
+ simple_unlock(&all_zones_lock);
+
+ /*
+ * Check if we should be logging this zone. If so, remember the zone pointer.
+ */
+ if (log_this_zone(z->zone_name, zone_name_to_log)) {
+ zone_of_interest = z;
+ }
+
+ /*
+ * If we want to log a zone, see if we need to allocate buffer space for the log. Some vm related zones are
+ * zinit'ed before we can do a kmem_alloc, so we have to defer allocation in that case. kmem_alloc_ready is set to
+ * TRUE once enough of the VM system is up and running to allow a kmem_alloc to work. If we want to log one
+ * of the VM related zones that's set up early on, we will skip allocation of the log until zinit is called again
+ * later on some other zone. So note we may be allocating a buffer to log a zone other than the one being initialized
+ * right now.
+ */
+ if (zone_of_interest != NULL && zlog_btlog == NULL && kmem_alloc_ready) {
+ zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, NULL, NULL, NULL);
+ if (zlog_btlog) {
+ printf("zone: logging started for zone %s\n", zone_of_interest->zone_name);
+ } else {
+ printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
+ zone_of_interest = NULL;
+ }
+ }
+#if CONFIG_GZALLOC
+ gzalloc_zone_init(z);
+#endif
+ return(z);
+}
+unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated, zone_replenish_throttle_count;
+
+static void zone_replenish_thread(zone_t);
+
+/* High priority VM privileged thread used to asynchronously refill a designated
+ * zone, such as the reserved VM map entry zone.
+ */
+static void zone_replenish_thread(zone_t z) {
+ vm_size_t free_size;
+ current_thread()->options |= TH_OPT_VMPRIV;
+
+ for (;;) {
+ lock_zone(z);
+ assert(z->prio_refill_watermark != 0);
+ while ((free_size = (z->cur_size - (z->count * z->elem_size))) < (z->prio_refill_watermark * z->elem_size)) {
+ assert(z->doing_alloc_without_vm_priv == FALSE);
+ assert(z->doing_alloc_with_vm_priv == FALSE);
+ assert(z->async_prio_refill == TRUE);
+
+ unlock_zone(z);
+ int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT;
+ vm_offset_t space, alloc_size;
+ kern_return_t kr;
+
+ if (vm_pool_low())
+ alloc_size = round_page(z->elem_size);
+ else
+ alloc_size = z->alloc_size;
+
+ if (z->noencrypt)
+ zflags |= KMA_NOENCRYPT;
+
+ kr = kernel_memory_allocate(zone_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
+
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else if (kr == KERN_RESOURCE_SHORTAGE) {
+ VM_PAGE_WAIT();
+ } else if (kr == KERN_NO_SPACE) {
+ kr = kernel_memory_allocate(kernel_map, &space, alloc_size, 0, zflags, VM_KERN_MEMORY_ZONE);
+ if (kr == KERN_SUCCESS) {
+#if ZONE_ALIAS_ADDR
+ if (alloc_size == PAGE_SIZE)
+ space = zone_alias_addr(space);
+#endif
+ zcram(z, space, alloc_size);
+ } else {
+ assert_wait_timeout(&z->zone_replenish_thread, THREAD_UNINT, 1, 100 * NSEC_PER_USEC);
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+ }
+
+ lock_zone(z);
+ zone_replenish_loops++;
+ }
+
+ unlock_zone(z);
+ /* Signal any potential throttled consumers, terminating
+ * their timer-bounded waits.
+ */
+ thread_wakeup(z);
+
+ assert_wait(&z->zone_replenish_thread, THREAD_UNINT);
+ thread_block(THREAD_CONTINUE_NULL);
+ zone_replenish_wakeups++;
+ }
+}
+
+void
+zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) {
+ z->prio_refill_watermark = low_water_mark;
+
+ z->async_prio_refill = TRUE;
+ OSMemoryBarrier();
+ kern_return_t tres = kernel_thread_start_priority((thread_continue_t)zone_replenish_thread, z, MAXPRI_KERNEL, &z->zone_replenish_thread);
+
+ if (tres != KERN_SUCCESS) {
+ panic("zone_prio_refill_configure, thread create: 0x%x", tres);
+ }
+
+ thread_deallocate(z->zone_replenish_thread);
+}
+
+/*
+ * Boolean Random Number Generator for generating booleans to randomize
+ * the order of elements in newly zcram()'ed memory. The algorithm is a
+ * modified version of the KISS RNG proposed in the paper:
+ * http://stat.fsu.edu/techreports/M802.pdf
+ * The modifications have been documented in the technical paper
+ * paper from UCL:
+ * http://www0.cs.ucl.ac.uk/staff/d.jones/GoodPracticeRNG.pdf
+ */
+
+static void random_bool_gen_entropy(
+ int *buffer,
+ int count)
+{
+
+ int i, t;
+ simple_lock(&bool_gen_lock);
+ for (i = 0; i < count; i++) {
+ bool_gen_seed[1] ^= (bool_gen_seed[1] << 5);
+ bool_gen_seed[1] ^= (bool_gen_seed[1] >> 7);
+ bool_gen_seed[1] ^= (bool_gen_seed[1] << 22);
+ t = bool_gen_seed[2] + bool_gen_seed[3] + bool_gen_global;
+ bool_gen_seed[2] = bool_gen_seed[3];
+ bool_gen_global = t < 0;
+ bool_gen_seed[3] = t &2147483647;
+ bool_gen_seed[0] += 1411392427;
+ buffer[i] = (bool_gen_seed[0] + bool_gen_seed[1] + bool_gen_seed[3]);
+ }
+ simple_unlock(&bool_gen_lock);
+}
+
+static boolean_t random_bool_gen(
+ int *buffer,
+ int index,
+ int bufsize)
+{
+ int valindex, bitpos;
+ valindex = (index / (8 * sizeof(int))) % bufsize;
+ bitpos = index % (8 * sizeof(int));
+ return (boolean_t)(buffer[valindex] & (1 << bitpos));
+}
+
+static void
+random_free_to_zone(
+ zone_t zone,
+ vm_offset_t newmem,
+ vm_offset_t first_element_offset,
+ int element_count,
+ boolean_t from_zm,
+ int *entropy_buffer)
+{
+ vm_offset_t last_element_offset;
+ vm_offset_t element_addr;
+ vm_size_t elem_size;
+ int index;
+
+ elem_size = zone->elem_size;
+ last_element_offset = first_element_offset + ((element_count * elem_size) - elem_size);
+ for (index = 0; index < element_count; index++) {
+ assert(first_element_offset <= last_element_offset);
+ if (random_bool_gen(entropy_buffer, index, MAX_ENTROPY_PER_ZCRAM)) {
+ element_addr = newmem + first_element_offset;
+ first_element_offset += elem_size;
+ } else {
+ element_addr = newmem + last_element_offset;
+ last_element_offset -= elem_size;
+ }
+ if (element_addr != (vm_offset_t)zone) {
+ zone->count++; /* compensate for free_to_zone */
+ free_to_zone(zone, element_addr, FALSE);
+ }
+ if (!zone->use_page_list && from_zm) {
+ zone_page_alloc(element_addr, elem_size);
+ }
+ zone->cur_size += elem_size;
+ }
+}
+
+/*
+ * Cram the given memory into the specified zone. Update the zone page count accordingly.
+ */
+void
+zcram(
+ zone_t zone,
+ vm_offset_t newmem,
+ vm_size_t size)
+{
+ vm_size_t elem_size;
+ boolean_t from_zm = FALSE;
+ vm_offset_t first_element_offset;
+ int element_count;
+ int entropy_buffer[MAX_ENTROPY_PER_ZCRAM];
+
+ /* Basic sanity checks */
+ assert(zone != ZONE_NULL && newmem != (vm_offset_t)0);
+ assert(!zone->collectable || zone->allows_foreign
+ || (from_zone_map(newmem, size)));
+
+ elem_size = zone->elem_size;
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, VM_KERNEL_ADDRPERM(zone), size, 0, 0, 0);
+
+ if (from_zone_map(newmem, size))
+ from_zm = TRUE;
+
+ if (zalloc_debug & ZALLOC_DEBUG_ZCRAM)
+ kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name,
+ (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size);
+
+ if (from_zm && !zone->use_page_list)
+ zone_page_init(newmem, size);
+
+ ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE));
+
+ random_bool_gen_entropy(entropy_buffer, MAX_ENTROPY_PER_ZCRAM);
+
+ lock_zone(zone);
+
+ if (zone->use_page_list) {
+ struct zone_page_metadata *page_metadata;
+ size_t zone_page_metadata_size = sizeof(struct zone_page_metadata);
+
+ assert((newmem & PAGE_MASK) == 0);
+ assert((size & PAGE_MASK) == 0);
+ for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) {
+
+ page_metadata = (struct zone_page_metadata *)(newmem);
+
+ page_metadata->pages.next = NULL;
+ page_metadata->pages.prev = NULL;
+ page_metadata->elements = NULL;
+ page_metadata->zone = zone;
+ page_metadata->alloc_count = 0;
+ page_metadata->free_count = 0;
+
+ enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_metadata);
+
+ if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0){
+ first_element_offset = zone_page_metadata_size;
+ } else {
+ first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT));
+ }
+ element_count = (int)((PAGE_SIZE - first_element_offset) / elem_size);
+ page_metadata->alloc_count += element_count;
+ random_free_to_zone(zone, newmem, first_element_offset, element_count, from_zm, entropy_buffer);
+ }
+ } else {
+ first_element_offset = 0;
+ element_count = (int)((size - first_element_offset) / elem_size);
+ random_free_to_zone(zone, newmem, first_element_offset, element_count, from_zm, entropy_buffer);
+ }
+ unlock_zone(zone);
+
+ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, VM_KERNEL_ADDRPERM(zone), 0, 0, 0, 0);
+
+}
+
+
+/*
+ * Steal memory for the zone package. Called from
+ * vm_page_bootstrap().
+ */
+void
+zone_steal_memory(void)
+{
+#if CONFIG_GZALLOC
+ gzalloc_configure();
+#endif
+ /* Request enough early memory to get to the pmap zone */
+ zdata_size = 12 * sizeof(struct zone);
+ zdata_size = round_page(zdata_size);
+ zdata = (vm_offset_t)pmap_steal_memory(zdata_size);
+}
+
+
+/*
+ * Fill a zone with enough memory to contain at least nelem elements.
+ * Memory is obtained with kmem_alloc_kobject from the kernel_map.
+ * Return the number of elements actually put into the zone, which may
+ * be more than the caller asked for since the memory allocation is
+ * rounded up to a full page.
+ */
+int
+zfill(
+ zone_t zone,
+ int nelem)
+{
+ kern_return_t kr;
+ vm_size_t size;
+ vm_offset_t memory;
+ int nalloc;
+
+ assert(nelem > 0);
+ if (nelem <= 0)
+ return 0;
+ size = nelem * zone->elem_size;
+ size = round_page(size);
+ kr = kmem_alloc_kobject(kernel_map, &memory, size, VM_KERN_MEMORY_ZONE);
+ if (kr != KERN_SUCCESS)
+ return 0;
+
+ zone_change(zone, Z_FOREIGN, TRUE);
+ zcram(zone, memory, size);
+ nalloc = (int)(size / zone->elem_size);
+ assert(nalloc >= nelem);
+
+ return nalloc;
+}
+
+/*
+ * Initialize the "zone of zones" which uses fixed memory allocated
+ * earlier in memory initialization. zone_bootstrap is called
+ * before zone_init.
+ */
+void
+zone_bootstrap(void)
+{
+ char temp_buf[16];
+ unsigned int i;
+
+ if (PE_parse_boot_argn("-zinfop", temp_buf, sizeof(temp_buf))) {
+ zinfo_per_task = TRUE;
+ }
+
+ if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug)))
+ zalloc_debug = 0;
+
+ /* Set up zone element poisoning */
+ zp_init();
+
+ /* Seed the random boolean generator for elements in zone free list */
+ for (i = 0; i < RANDOM_BOOL_GEN_SEED_COUNT; i++) {
+ bool_gen_seed[i] = (unsigned int)early_random();
+ }
+ simple_lock_init(&bool_gen_lock, 0);
+
+ /* should zlog log to debug zone corruption instead of leaks? */
+ if (PE_parse_boot_argn("-zc", temp_buf, sizeof(temp_buf))) {
+ corruption_debug_flag = TRUE;
+ }
+
+ /*
+ * Check for and set up zone leak detection if requested via boot-args. We recognized two
+ * boot-args:
+ *
+ * zlog=<zone_to_log>
+ * zrecs=<num_records_in_log>
+ *
+ * The zlog arg is used to specify the zone name that should be logged, and zrecs is used to
+ * control the size of the log. If zrecs is not specified, a default value is used.
+ */
+
+ if (PE_parse_boot_argn("zlog", zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) {
+ if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) {
+
+ /*
+ * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
+ * This prevents accidentally hogging too much kernel memory and making the system
+ * unusable.
+ */
+
+ log_records = MIN(ZRECORDS_MAX, log_records);
+
+ } else {
+ log_records = ZRECORDS_DEFAULT;
+ }
+ }
+
+ simple_lock_init(&all_zones_lock, 0);
+
+ first_zone = ZONE_NULL;
+ last_zone = &first_zone;
+ num_zones = 0;
+ thread_call_setup(&call_async_alloc, zalloc_async, NULL);
+
+ /* assertion: nobody else called zinit before us */
+ assert(zone_zone == ZONE_NULL);
+
+ /* initializing global lock group for zones */
+ lck_grp_attr_setdefault(&zone_locks_grp_attr);
+ lck_grp_init(&zone_locks_grp, "zone_locks", &zone_locks_grp_attr);
+
+ zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone),
+ sizeof(struct zone), "zones");
+ zone_change(zone_zone, Z_COLLECT, FALSE);
+ zone_change(zone_zone, Z_CALLERACCT, FALSE);
+ zone_change(zone_zone, Z_NOENCRYPT, TRUE);
+
+ zcram(zone_zone, zdata, zdata_size);
+ VM_PAGE_MOVE_STOLEN(atop_64(zdata_size));
+
+ /* initialize fake zones and zone info if tracking by task */
+ if (zinfo_per_task) {
+ vm_size_t zisize = sizeof(zinfo_usage_store_t) * ZINFO_SLOTS;
+
+ for (i = 0; i < num_fake_zones; i++)
+ fake_zones[i].init(ZINFO_SLOTS - num_fake_zones + i);
+ zinfo_zone = zinit(zisize, zisize * CONFIG_TASK_MAX,
+ zisize, "per task zinfo");
+ zone_change(zinfo_zone, Z_CALLERACCT, FALSE);
+ }
+}
+
+void
+zinfo_task_init(task_t task)
+{
+ if (zinfo_per_task) {
+ task->tkm_zinfo = zalloc(zinfo_zone);
+ memset(task->tkm_zinfo, 0, sizeof(zinfo_usage_store_t) * ZINFO_SLOTS);
+ } else {
+ task->tkm_zinfo = NULL;
+ }
+}
+
+void
+zinfo_task_free(task_t task)
+{
+ assert(task != kernel_task);
+ if (task->tkm_zinfo != NULL) {
+ zfree(zinfo_zone, task->tkm_zinfo);
+ task->tkm_zinfo = NULL;
+ }
+}
+
+/* Global initialization of Zone Allocator.
+ * Runs after zone_bootstrap.
+ */
+void
+zone_init(
+ vm_size_t max_zonemap_size)
+{
+ kern_return_t retval;
+ vm_offset_t zone_min;
+ vm_offset_t zone_max;
+
+ retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size,
+ FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT | VM_MAKE_TAG(VM_KERN_MEMORY_ZONE),
+ &zone_map);
+
+ if (retval != KERN_SUCCESS)
+ panic("zone_init: kmem_suballoc failed");
+ zone_max = zone_min + round_page(max_zonemap_size);
+#if CONFIG_GZALLOC
+ gzalloc_init(max_zonemap_size);
+#endif
+ /*
+ * Setup garbage collection information:
+ */
+ zone_map_min_address = zone_min;
+ zone_map_max_address = zone_max;
+
+#if defined(__LP64__)
+ /*
+ * ensure that any vm_page_t that gets created from
+ * the vm_page zone can be packed properly (see vm_page.h
+ * for the packing requirements
+ */
+ if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_min_address)) != (vm_page_t)zone_map_min_address)
+ panic("VM_PAGE_PACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address);
+
+ if (VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address)) != (vm_page_t)zone_map_max_address)
+ panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address);
+#endif
+
+ zone_pages = (unsigned int)atop_kernel(zone_max - zone_min);
+ zone_page_table_used_size = sizeof(zone_page_table);
+
+ zone_page_table_second_level_size = 1;
+ zone_page_table_second_level_shift_amount = 0;
+
+ /*
+ * Find the power of 2 for the second level that allows
+ * the first level to fit in ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE
+ * slots.
+ */
+ while ((zone_page_table_first_level_slot(zone_pages-1)) >= ZONE_PAGE_TABLE_FIRST_LEVEL_SIZE) {
+ zone_page_table_second_level_size <<= 1;
+ zone_page_table_second_level_shift_amount++;
+ }
+
+ lck_grp_attr_setdefault(&zone_gc_lck_grp_attr);
+ lck_grp_init(&zone_gc_lck_grp, "zone_gc", &zone_gc_lck_grp_attr);
+ lck_attr_setdefault(&zone_gc_lck_attr);
+ lck_mtx_init_ext(&zone_gc_lock, &zone_gc_lck_ext, &zone_gc_lck_grp, &zone_gc_lck_attr);
+
+#if CONFIG_ZLEAKS
+ /*
+ * Initialize the zone leak monitor
+ */
+ zleak_init(max_zonemap_size);
+#endif /* CONFIG_ZLEAKS */
+}
+
+void
+zone_page_table_expand(zone_page_index_t pindex)
+{
+ unsigned int first_index;
+ struct zone_page_table_entry * volatile * first_level_ptr;
+
+ assert(pindex < zone_pages);
+
+ first_index = zone_page_table_first_level_slot(pindex);
+ first_level_ptr = &zone_page_table[first_index];
+
+ if (*first_level_ptr == NULL) {
+ /*
+ * We were able to verify the old first-level slot
+ * had NULL, so attempt to populate it.
+ */
+
+ vm_offset_t second_level_array = 0;
+ vm_size_t second_level_size = round_page(zone_page_table_second_level_size * sizeof(struct zone_page_table_entry));
+ zone_page_index_t i;
+ struct zone_page_table_entry *entry_array;
+
+ if (kmem_alloc_kobject(zone_map, &second_level_array,
+ second_level_size, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) {
+ panic("zone_page_table_expand");
+ }
+ zone_map_table_page_count += (second_level_size / PAGE_SIZE);
+
+ /*
+ * zone_gc() may scan the "zone_page_table" directly,
+ * so make sure any slots have a valid unused state.
+ */
+ entry_array = (struct zone_page_table_entry *)second_level_array;
+ for (i=0; i < zone_page_table_second_level_size; i++) {
+ entry_array[i].alloc_count = ZONE_PAGE_UNUSED;
+ entry_array[i].collect_count = 0;
+ }
+
+ if (OSCompareAndSwapPtr(NULL, entry_array, first_level_ptr)) {
+ /* Old slot was NULL, replaced with expanded level */
+ OSAddAtomicLong(second_level_size, &zone_page_table_used_size);
+ } else {
+ /* Old slot was not NULL, someone else expanded first */
+ kmem_free(zone_map, second_level_array, second_level_size);
+ zone_map_table_page_count -= (second_level_size / PAGE_SIZE);
+ }
+ } else {
+ /* Old slot was not NULL, already been expanded */
+ }
+}
+
+struct zone_page_table_entry *
+zone_page_table_lookup(zone_page_index_t pindex)
+{
+ unsigned int first_index = zone_page_table_first_level_slot(pindex);
+ struct zone_page_table_entry *second_level = zone_page_table[first_index];
+
+ if (second_level) {
+ return &second_level[zone_page_table_second_level_slot(pindex)];
+ }
+
+ return NULL;
+}
+
+extern volatile SInt32 kfree_nop_count;
+
+#pragma mark -
+#pragma mark zalloc_canblock
+
+/*
+ * zalloc returns an element from the specified zone.
+ */
+static void *
+zalloc_internal(