+ return -1U;
+}
+
+__startup_func
+static void
+zone_tagging_init(vm_size_t max_zonemap_size)
+{
+ kern_return_t ret;
+ vm_map_kernel_flags_t vmk_flags;
+ uint32_t idx;
+
+ // allocate submaps VM_KERN_MEMORY_DIAG
+
+ zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ ret = kmem_suballoc(kernel_map, &zone_tagbase_min, zone_tagbase_map_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG,
+ &zone_tagbase_map);
+
+ if (ret != KERN_SUCCESS) {
+ panic("zone_init: kmem_suballoc failed");
+ }
+ zone_tagbase_max = zone_tagbase_min + round_page(zone_tagbase_map_size);
+
+ zone_tags_map_size = 2048 * 1024 * sizeof(vm_tag_t);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ ret = kmem_suballoc(kernel_map, &zone_tags_min, zone_tags_map_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG,
+ &zone_tags_map);
+
+ if (ret != KERN_SUCCESS) {
+ panic("zone_init: kmem_suballoc failed");
+ }
+ zone_tags_max = zone_tags_min + round_page(zone_tags_map_size);
+
+ ztBlocks = (ztBlock *) zone_tags_min;
+ ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock));
+
+ // initialize the qheads
+ lck_mtx_lock(&ztLock);
+
+ ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0);
+ for (idx = 0; idx < ztFreeIndexCount; idx++) {
+ ztBlocks[idx].free = TRUE;
+ ztBlocks[idx].next = idx;
+ ztBlocks[idx].prev = idx;
+ ztBlocks[idx].size = 0;
+ }
+ // free remaining space
+ ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount);
+
+ lck_mtx_unlock(&ztLock);
+}
+
+static void
+ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size)
+{
+ uint32_t * tagbase;
+ uint32_t count, block, blocks, idx;
+ size_t pages;
+
+ pages = atop(size);
+ tagbase = ZTAGBASE(zone, mem);
+
+ lck_mtx_lock(&ztLock);
+
+ // fault tagbase
+ ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0);
+
+ if (!zone->tags_inline) {
+ // allocate tags
+ count = (uint32_t)(size / zone_elem_size(zone));
+ blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
+ block = ztAlloc(zone, blocks);
+ if (-1U == block) {
+ ztDump(false);
+ }
+ assert(-1U != block);
+ }
+
+ lck_mtx_unlock(&ztLock);
+
+ if (!zone->tags_inline) {
+ // set tag base for each page
+ block *= ztTagsPerBlock;
+ for (idx = 0; idx < pages; idx++) {
+ vm_offset_t esize = zone_elem_size(zone);
+ tagbase[idx] = block + (uint32_t)((ptoa(idx) + esize - 1) / esize);
+ }
+ }
+}
+
+static void
+ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size)
+{
+ uint32_t * tagbase;
+ uint32_t count, block, blocks, idx;
+ size_t pages;
+
+ // set tag base for each page
+ pages = atop(size);
+ tagbase = ZTAGBASE(zone, mem);
+ block = tagbase[0];
+ for (idx = 0; idx < pages; idx++) {
+ tagbase[idx] = 0xFFFFFFFF;
+ }
+
+ lck_mtx_lock(&ztLock);
+ if (!zone->tags_inline) {
+ count = (uint32_t)(size / zone_elem_size(zone));
+ blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
+ assert(block != 0xFFFFFFFF);
+ block /= ztTagsPerBlock;
+ ztFree(NULL /* zone is unlocked */, block, blocks);
+ }
+
+ lck_mtx_unlock(&ztLock);
+}
+
+uint32_t
+zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size)
+{
+ simple_lock(&all_zones_lock, &zone_locks_grp);
+
+ zone_index_foreach(idx) {
+ zone_t z = &zone_array[idx];
+ if (!z->tags) {
+ continue;
+ }
+ if (tag_zone_index != z->tag_zone_index) {
+ continue;
+ }
+
+ *elem_size = zone_elem_size(z);
+ simple_unlock(&all_zones_lock);
+ return idx;
+ }
+
+ simple_unlock(&all_zones_lock);
+
+ return -1U;
+}
+
+#endif /* VM_MAX_TAG_ZONES */
+#endif /* !ZALLOC_TEST */
+#pragma mark zalloc helpers
+#if !ZALLOC_TEST
+
+__pure2
+static inline uint16_t
+zc_mag_size(void)
+{
+ return zc_magazine_size;
+}
+
+__attribute__((noinline, cold))
+static void
+zone_lock_was_contended(zone_t zone, zone_cache_t zc)
+{
+ lck_spin_lock_nopreempt(&zone->z_lock);
+
+ /*
+ * If zone caching has been disabled due to memory pressure,
+ * then recording contention is not useful, give the system
+ * time to recover.
+ */
+ if (__improbable(zone_caching_disabled)) {
+ return;
+ }
+
+ zone->z_contention_cur++;
+
+ if (zc == NULL || zc->zc_depot_max >= INT16_MAX * zc_mag_size()) {
+ return;
+ }
+
+ /*
+ * Let the depot grow based on how bad the contention is,
+ * and how populated the zone is.
+ */
+ if (zone->z_contention_wma < 2 * Z_CONTENTION_WMA_UNIT) {
+ if (zc->zc_depot_max * zpercpu_count() * 20u >=
+ zone->z_elems_avail) {
+ return;
+ }
+ }
+ if (zone->z_contention_wma < 4 * Z_CONTENTION_WMA_UNIT) {
+ if (zc->zc_depot_max * zpercpu_count() * 10u >=
+ zone->z_elems_avail) {
+ return;
+ }
+ }
+ if (!zc_grow_threshold || zone->z_contention_wma <
+ zc_grow_threshold * Z_CONTENTION_WMA_UNIT) {
+ return;
+ }
+
+ zc->zc_depot_max++;
+}
+
+static inline void
+zone_lock_nopreempt_check_contention(zone_t zone, zone_cache_t zc)
+{
+ if (lck_spin_try_lock_nopreempt(&zone->z_lock)) {
+ return;
+ }
+
+ zone_lock_was_contended(zone, zc);
+}
+
+static inline void
+zone_lock_check_contention(zone_t zone, zone_cache_t zc)
+{
+ disable_preemption();
+ zone_lock_nopreempt_check_contention(zone, zc);
+}
+
+static inline void
+zone_unlock_nopreempt(zone_t zone)
+{
+ lck_spin_unlock_nopreempt(&zone->z_lock);
+}
+
+static inline void
+zone_depot_lock_nopreempt(zone_cache_t zc)
+{
+ hw_lock_bit_nopreempt(&zc->zc_depot_lock, 0, &zone_locks_grp);
+}
+
+static inline void
+zone_depot_unlock_nopreempt(zone_cache_t zc)
+{
+ hw_unlock_bit_nopreempt(&zc->zc_depot_lock, 0);
+}
+
+static inline void
+zone_depot_lock(zone_cache_t zc)
+{
+ hw_lock_bit(&zc->zc_depot_lock, 0, &zone_locks_grp);
+}
+
+static inline void
+zone_depot_unlock(zone_cache_t zc)
+{
+ hw_unlock_bit(&zc->zc_depot_lock, 0);
+}
+
+const char *
+zone_name(zone_t z)
+{
+ return z->z_name;
+}
+
+const char *
+zone_heap_name(zone_t z)
+{
+ if (__probable(z->kalloc_heap < KHEAP_ID_COUNT)) {
+ return kalloc_heap_names[z->kalloc_heap];
+ }
+ return "invalid";
+}
+
+static uint32_t
+zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
+{
+ vm_size_t elem_count, chunks;
+
+ elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) / zone_elem_size(z);
+ chunks = (max_elems + elem_count - 1) / elem_count;
+
+ return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
+}
+
+static inline vm_size_t
+zone_submaps_approx_size(void)
+{
+ vm_size_t size = 0;
+
+ for (unsigned idx = 0; idx <= zone_last_submap_idx; idx++) {
+ size += zone_submaps[idx]->size;
+ }
+
+ return size;
+}
+
+static void
+zone_cache_swap_magazines(zone_cache_t cache)
+{
+ uint16_t count_a = cache->zc_alloc_cur;
+ uint16_t count_f = cache->zc_free_cur;
+ zone_element_t *elems_a = cache->zc_alloc_elems;
+ zone_element_t *elems_f = cache->zc_free_elems;
+
+ z_debug_assert(count_a <= zc_mag_size());
+ z_debug_assert(count_f <= zc_mag_size());
+
+ cache->zc_alloc_cur = count_f;
+ cache->zc_free_cur = count_a;
+ cache->zc_alloc_elems = elems_f;
+ cache->zc_free_elems = elems_a;
+}
+
+/*!
+ * @function zone_magazine_load
+ *
+ * @brief
+ * Cache the value of @c zm_cur on the cache to avoid a dependent load
+ * on the allocation fastpath.
+ */
+static void
+zone_magazine_load(uint16_t *count, zone_element_t **elems, zone_magazine_t mag)
+{
+ z_debug_assert(mag->zm_cur <= zc_mag_size());
+ *count = mag->zm_cur;
+ *elems = mag->zm_elems;
+}
+
+/*!
+ * @function zone_magazine_replace
+ *
+ * @brief
+ * Unlod a magazine and load a new one instead.
+ */
+static zone_magazine_t
+zone_magazine_replace(uint16_t *count, zone_element_t **elems,
+ zone_magazine_t mag)
+{
+ zone_magazine_t old;
+
+ old = (zone_magazine_t)((uintptr_t)*elems -
+ offsetof(struct zone_magazine, zm_elems));
+ old->zm_cur = *count;
+ z_debug_assert(old->zm_cur <= zc_mag_size());
+ zone_magazine_load(count, elems, mag);
+
+ return old;
+}
+
+static zone_magazine_t
+zone_magazine_alloc(zalloc_flags_t flags)
+{
+ return zalloc_ext(zc_magazine_zone, zc_magazine_zone->z_stats,
+ flags | Z_ZERO);
+}
+
+static void
+zone_magazine_free(zone_magazine_t mag)
+{
+ zfree_ext(zc_magazine_zone, zc_magazine_zone->z_stats, mag);
+}
+
+static void
+zone_enable_caching(zone_t zone)
+{
+ zone_cache_t caches;
+
+ caches = zalloc_percpu_permanent_type(struct zone_cache);
+ zpercpu_foreach(zc, caches) {
+ zone_magazine_load(&zc->zc_alloc_cur, &zc->zc_alloc_elems,
+ zone_magazine_alloc(Z_WAITOK | Z_NOFAIL));
+ zone_magazine_load(&zc->zc_free_cur, &zc->zc_free_elems,
+ zone_magazine_alloc(Z_WAITOK | Z_NOFAIL));
+ STAILQ_INIT(&zc->zc_depot);
+ }
+
+ if (os_atomic_xchg(&zone->z_pcpu_cache, caches, release)) {
+ panic("allocating caches for zone %s twice", zone->z_name);
+ }
+}
+
+bool
+zone_maps_owned(vm_address_t addr, vm_size_t size)
+{
+ return from_zone_map(addr, size, ZONE_ADDR_NATIVE);
+}
+
+void
+zone_map_sizes(
+ vm_map_size_t *psize,
+ vm_map_size_t *pfree,
+ vm_map_size_t *plargest_free)
+{
+ vm_map_size_t size, free, largest;
+
+ vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
+
+ for (uint32_t i = 1; i <= zone_last_submap_idx; i++) {
+ vm_map_sizes(zone_submaps[i], &size, &free, &largest);
+ *psize += size;
+ *pfree += free;
+ *plargest_free = MAX(*plargest_free, largest);
+ }
+}
+
+__attribute__((always_inline))
+vm_map_t
+zone_submap(zone_t zone)
+{
+ return zone_submaps[zone->z_submap_idx];
+}
+
+unsigned
+zpercpu_count(void)
+{
+ return zpercpu_early_count;
+}
+
+int
+track_this_zone(const char *zonename, const char *logname)
+{
+ unsigned int len;
+ const char *zc = zonename;
+ const char *lc = logname;
+
+ /*
+ * Compare the strings. We bound the compare by MAX_ZONE_NAME.
+ */
+
+ for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
+ /*
+ * If the current characters don't match, check for a space in
+ * in the zone name and a corresponding period in the log name.
+ * If that's not there, then the strings don't match.
+ */
+
+ if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
+ break;
+ }
+
+ /*
+ * The strings are equal so far. If we're at the end, then it's a match.
+ */
+
+ if (*zc == '\0') {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+#if DEBUG || DEVELOPMENT
+
+vm_size_t
+zone_element_info(void *addr, vm_tag_t * ptag)
+{
+ vm_size_t size = 0;
+ vm_tag_t tag = VM_KERN_MEMORY_NONE;
+ struct zone *src_zone;
+
+ if (from_zone_map(addr, sizeof(void *), ZONE_ADDR_NATIVE) ||
+ from_zone_map(addr, sizeof(void *), ZONE_ADDR_FOREIGN)) {
+ src_zone = &zone_array[zone_index_from_ptr(addr)];
+#if VM_MAX_TAG_ZONES
+ if (__improbable(src_zone->tags)) {
+ tag = *ztSlot(src_zone, (vm_offset_t)addr) >> 1;
+ }
+#endif /* VM_MAX_TAG_ZONES */
+ size = zone_elem_size(src_zone);
+ } else {
+#if CONFIG_GZALLOC
+ gzalloc_element_size(addr, NULL, &size);
+#endif /* CONFIG_GZALLOC */
+ }
+ *ptag = tag;
+ return size;
+}
+
+#endif /* DEBUG || DEVELOPMENT */
+
+/* The backup pointer is stored in the last pointer-sized location in an element. */
+__header_always_inline vm_offset_t *
+get_primary_ptr(vm_offset_t elem)
+{
+ return (vm_offset_t *)elem;
+}
+
+__header_always_inline vm_offset_t *
+get_backup_ptr(vm_offset_t elem, vm_size_t elem_size)
+{
+ return (vm_offset_t *)(elem + elem_size - sizeof(vm_offset_t));
+}
+
+#endif /* !ZALLOC_TEST */
+#pragma mark Zone poisoning/zeroing and early random
+#if !ZALLOC_TEST
+
+#define ZONE_ENTROPY_CNT 2
+static struct zone_bool_gen {
+ struct bool_gen zbg_bg;
+ uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
+} zone_bool_gen[MAX_CPUS];
+
+/*
+ * Initialize zone poisoning
+ * called from zone_bootstrap before any allocations are made from zalloc
+ */
+__startup_func
+static void
+zp_bootstrap(void)
+{
+ char temp_buf[16];
+
+ /*
+ * Initialize canary random cookie.
+ *
+ * Make sure that (zp_canary ^ pointer) have non zero low bits (01)
+ * different from ZONE_POISON (11).
+ *
+ * On LP64, have (zp_canary ^ pointer) have the high bits equal 0xC0FFEE...
+ */
+ static_assert(ZONE_POISON % 4 == 3);
+ zp_canary = (uintptr_t)early_random();
+#if __LP64__
+ zp_canary &= 0x000000fffffffffc;
+ zp_canary |= 0xc0ffee0000000001 ^ 0xffffff0000000000;
+#else
+ zp_canary &= 0xfffffffc;
+ zp_canary |= 0x00000001;
+#endif
+
+ /* -zp: enable poisoning for every alloc and free */
+ if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
+ zp_factor = 1;
+ }
+
+ /* -no-zp: disable poisoning */
+ if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
+ zp_factor = 0;
+ printf("Zone poisoning disabled\n");
+ }
+
+ zpercpu_foreach_cpu(cpu) {
+ random_bool_init(&zone_bool_gen[cpu].zbg_bg);
+ }
+}
+
+static inline uint32_t
+zone_poison_count_init(zone_t zone)
+{
+ return zp_factor + (((uint32_t)zone_elem_size(zone)) >> zp_scale) ^
+ (mach_absolute_time() & 0x7);
+}
+
+/*
+ * Zero the element if zone has z_free_zeroes flag set else poison
+ * the element if zs_poison_seqno hits 0.
+ */
+static zprot_mode_t
+zfree_clear_or_poison(zone_t zone, vm_offset_t addr, vm_offset_t elem_size)
+{
+ if (zone->z_free_zeroes) {
+ if (zone->z_percpu) {
+ zpercpu_foreach_cpu(i) {
+ bzero((void *)(addr + ptoa(i)), elem_size);
+ }
+ } else {
+ bzero((void *)addr, elem_size);
+ }
+ return ZPM_ZERO;
+ }
+
+ zprot_mode_t poison = ZPM_AUTO;
+#if ZALLOC_ENABLE_POISONING
+ if (__improbable(zp_factor == 1)) {
+ poison = ZPM_POISON;
+ } else if (__probable(zp_factor != 0)) {
+ uint32_t *seqnop = &zpercpu_get(zone->z_stats)->zs_poison_seqno;
+ uint32_t seqno = os_atomic_load(seqnop, relaxed);
+ if (seqno == 0) {
+ os_atomic_store(seqnop, zone_poison_count_init(zone), relaxed);
+ poison = ZPM_POISON;
+ } else {
+ os_atomic_store(seqnop, seqno - 1, relaxed);
+ }
+ }
+ if (poison == ZPM_POISON) {
+ /* memset_pattern{4|8} could help make this faster: <rdar://problem/4662004> */
+ for (size_t i = 0; i < elem_size / sizeof(vm_offset_t); i++) {
+ ((vm_offset_t *)addr)[i] = ZONE_POISON;
+ }
+ } else {
+ /*
+ * Set a canary at the extremities.
+ *
+ * Zero first zp_min_size bytes of elements that aren't being
+ * poisoned.
+ *
+ * Element size is larger than zp_min_size in this path,
+ * zones with smaller elements have z_free_zeroes set.
+ */
+ *get_primary_ptr(addr) = zp_canary ^ (uintptr_t)addr;
+ bzero((void *)addr + sizeof(vm_offset_t),
+ zp_min_size - sizeof(vm_offset_t));
+ *get_backup_ptr(addr, elem_size) = zp_canary ^ (uintptr_t)addr;
+
+ poison = ZPM_CANARY;
+ }
+#endif /* ZALLOC_ENABLE_POISONING */
+
+ return poison;
+}
+
+#if ZALLOC_ENABLE_POISONING
+
+__abortlike
+static void
+zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size, zprot_mode_t zpm)
+{
+ uint32_t esize = (uint32_t)zone_elem_size(z);
+ uint32_t first_offs = ~0u;
+ uintptr_t first_bits = 0, v;
+ char buf[1024];
+ int pos = 0;
+ const char *how;
+
+#if __LP64__
+#define ZPF "0x%016lx"
+#else
+#define ZPF "0x%08lx"
+#endif
+
+ buf[0] = '\0';
+
+ if (zpm == ZPM_CANARY) {
+ how = "canaries";
+
+ v = *get_primary_ptr(elem);
+ if (v != (elem ^ zp_canary)) {
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
+ "%5d: got "ZPF", want "ZPF" (xor: "ZPF")",
+ 0, v, (elem ^ zp_canary), (v ^ elem ^ zp_canary));
+ if (first_offs > 0) {
+ first_offs = 0;
+ first_bits = v;
+ }
+ }
+
+ v = *get_backup_ptr(elem, esize);
+ if (v != (elem ^ zp_canary)) {
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
+ "%5d: got "ZPF", want "ZPF" (xor: "ZPF")",
+ esize - (int)sizeof(v), v, (elem ^ zp_canary),
+ (v ^ elem ^ zp_canary));
+ if (first_offs > esize - sizeof(v)) {
+ first_offs = esize - sizeof(v);
+ first_bits = v;
+ }
+ }
+
+ for (uint32_t o = sizeof(v); o < zp_min_size; o += sizeof(v)) {
+ if ((v = *(uintptr_t *)(elem + o)) == 0) {
+ continue;
+ }
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
+ "%5d: "ZPF, o, v);
+ if (first_offs > o) {
+ first_offs = o;
+ first_bits = v;
+ }
+ }
+ } else if (zpm == ZPM_ZERO) {
+ how = "zero";
+
+ for (uint32_t o = 0; o < size; o += sizeof(v)) {
+ if ((v = *(uintptr_t *)(elem + o)) == 0) {
+ continue;
+ }
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
+ "%5d: "ZPF, o, v);
+ if (first_offs > o) {
+ first_offs = o;
+ first_bits = v;
+ }
+ }
+ } else {
+ how = "poison";
+
+ for (uint32_t o = 0; o < size; o += sizeof(v)) {
+ if ((v = *(uintptr_t *)(elem + o)) == ZONE_POISON) {
+ continue;
+ }
+ pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
+ "%5d: "ZPF" (xor: "ZPF")",
+ o, v, (v ^ ZONE_POISON));
+ if (first_offs > o) {
+ first_offs = o;
+ first_bits = v;
+ }
+ }
+ }
+
+ (panic)("[%s%s]: element modified after free "
+ "(off:%d, val:"ZPF", sz:%d, ptr:%p, prot:%s)%s",
+ zone_heap_name(z), zone_name(z),
+ first_offs, first_bits, esize, (void *)elem, how, buf);
+
+#undef ZPF
+}
+
+static void
+zalloc_validate_element_zero(zone_t zone, vm_offset_t elem, vm_size_t size)
+{
+ if (memcmp_zero_ptr_aligned((void *)elem, size)) {
+ zalloc_uaf_panic(zone, elem, size, ZPM_ZERO);
+ }
+ if (!zone->z_percpu) {
+ return;
+ }
+ for (size_t i = zpercpu_count(); --i > 0;) {
+ elem += PAGE_SIZE;
+ if (memcmp_zero_ptr_aligned((void *)elem, size)) {
+ zalloc_uaf_panic(zone, elem, size, ZPM_ZERO);
+ }
+ }
+}
+
+#if __arm64__ || __arm__
+typedef __attribute__((ext_vector_type(2))) vm_offset_t zpair_t;
+#else
+typedef struct {
+ vm_offset_t x;
+ vm_offset_t y;
+} zpair_t;
+#endif
+
+
+__attribute__((noinline))
+static void
+zalloc_validate_element_poison(zone_t zone, vm_offset_t elem, vm_size_t size)
+{
+ vm_offset_t p = elem;
+ vm_offset_t end = elem + size;
+
+ const zpair_t poison = { ZONE_POISON, ZONE_POISON };
+ zpair_t a, b;
+
+ a.x = *(const vm_offset_t *)p;
+ a.y = *(const vm_offset_t *)(end - sizeof(vm_offset_t));
+
+ a.x ^= poison.x;
+ a.y ^= poison.y;
+
+ /*
+ * align p to the next double-wide boundary
+ * align end to the previous double-wide boundary
+ */
+ p = (p + sizeof(zpair_t) - 1) & -sizeof(zpair_t);
+ end &= -sizeof(zpair_t);
+
+ if ((end - p) % (2 * sizeof(zpair_t)) == 0) {
+ b.y = 0;
+ b.y = 0;
+ } else {
+ end -= sizeof(zpair_t);
+ b.x = ((zpair_t *)end)[0].x ^ poison.x;
+ b.y = ((zpair_t *)end)[0].y ^ poison.y;
+ }
+
+ for (; p < end; p += 2 * sizeof(zpair_t)) {
+ a.x |= ((zpair_t *)p)[0].x ^ poison.x;
+ a.y |= ((zpair_t *)p)[0].y ^ poison.y;
+ b.x |= ((zpair_t *)p)[1].x ^ poison.x;
+ b.y |= ((zpair_t *)p)[1].y ^ poison.y;
+ }
+
+ a.x |= b.x;
+ a.y |= b.y;
+
+ if (a.x || a.y) {
+ zalloc_uaf_panic(zone, elem, size, ZPM_POISON);
+ }
+}
+
+static void
+zalloc_validate_element(zone_t zone, vm_offset_t elem, vm_size_t size,
+ zprot_mode_t zpm)
+{
+ vm_offset_t *primary = get_primary_ptr(elem);
+ vm_offset_t *backup = get_backup_ptr(elem, size);
+
+#if CONFIG_GZALLOC
+ if (zone->gzalloc_tracked) {
+ return;
+ }
+#endif /* CONFIG_GZALLOC */
+
+ if (zone->z_free_zeroes) {
+ return zalloc_validate_element_zero(zone, elem, size);
+ }
+
+ switch (zpm) {
+ case ZPM_AUTO:
+ if (*backup == 0) {
+ size -= sizeof(vm_size_t);
+ return zalloc_validate_element_zero(zone, elem, size);
+ }
+ if (*backup == ZONE_POISON) {
+ size -= sizeof(vm_size_t);
+ return zalloc_validate_element_poison(zone, elem, size);
+ }
+ OS_FALLTHROUGH;
+
+ case ZPM_CANARY:
+ if ((*primary ^ zp_canary) != elem || (*backup ^ zp_canary) != elem) {
+ zalloc_uaf_panic(zone, elem, size, ZPM_CANARY);
+ }
+ *primary = *backup = 0;
+ size = zp_min_size;
+ OS_FALLTHROUGH;
+
+ case ZPM_ZERO:
+ return zalloc_validate_element_zero(zone, elem, size);
+
+ case ZPM_POISON:
+ return zalloc_validate_element_poison(zone, elem, size);
+ }
+}
+
+#endif /* ZALLOC_ENABLE_POISONING */
+#if ZALLOC_EARLY_GAPS
+
+__attribute__((noinline))
+static void
+zone_early_gap_drop(int n)
+{
+ while (n-- > 0) {
+ zone_t zone0 = &zone_array[0];
+ struct zone_page_metadata *meta = NULL;
+ vm_offset_t addr;
+ uint16_t pages;
+ vm_map_t map;
+
+ lck_mtx_lock(&zone_metadata_region_lck);
+
+ if (!zone_pva_is_null(zone0->z_pageq_va)) {
+ meta = zone_meta_queue_pop_native(zone0,
+ &zone0->z_pageq_va, &addr);
+ map = zone_submaps[meta->zm_chunk_len];
+ pages = meta->zm_alloc_size;
+ __builtin_bzero(meta, sizeof(struct zone_page_metadata));
+ }
+
+ lck_mtx_unlock(&zone_metadata_region_lck);
+
+ if (!meta) {
+ break;
+ }
+
+ kmem_free(map, addr, ptoa(pages));
+ }
+}
+
+static void
+zone_early_gap_add(zone_t z, uint16_t pages)
+{
+ struct zone_page_metadata *meta = NULL;
+ zone_t zone0 = &zone_array[0];
+ kern_return_t kr;
+ vm_offset_t addr;
+
+ kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO | KMA_VAONLY;
+ if (z->z_submap_idx == Z_SUBMAP_IDX_GENERAL &&
+ z->kalloc_heap != KHEAP_ID_NONE) {
+ kmaflags |= KMA_KHEAP;
+ }
+
+ kr = kernel_memory_allocate(zone_submap(z), &addr, ptoa(pages), 0,
+ kmaflags, VM_KERN_MEMORY_ZONE);
+
+ if (kr != KERN_SUCCESS) {
+ panic("unable to allocate early gap (%d pages): %d", pages, kr);
+ }
+
+ zone_meta_populate(addr, ptoa(pages));
+
+ meta = zone_meta_from_addr(addr);
+ meta->zm_alloc_size = pages;
+ meta->zm_chunk_len = z->z_submap_idx;
+
+ lck_mtx_lock(&zone_metadata_region_lck);
+ zone_meta_queue_push(zone0, &zone0->z_pageq_va, meta);
+ lck_mtx_unlock(&zone_metadata_region_lck);
+}
+
+/*
+ * Roughly until pd1 is made, introduce random gaps
+ * between allocated pages.
+ *
+ * This way the early boot allocations are not in a completely
+ * predictible order and relative position.
+ *
+ * Those gaps are returned to the maps afterwards.
+ *
+ * We abuse the zone 0 (which is unused) "va" pageq to remember
+ * those ranges.
+ */
+__attribute__((noinline))
+static void
+zone_allocate_random_early_gap(zone_t z)
+{
+ int16_t pages = early_random() % 16;
+
+ /*
+ * 6% of the time: drop 2 gaps
+ * 25% of the time: drop 1 gap
+ * 37% of the time: do nothing
+ * 18% of the time: add 1 gap
+ * 12% of the time: add 2 gaps
+ */
+ if (pages > 10) {
+ zone_early_gap_drop(pages == 15 ? 2 : 1);
+ }
+ if (pages < 5) {
+ /* values are 6 through 16 */
+ zone_early_gap_add(z, 6 + 2 * pages);
+ }
+ if (pages < 2) {
+ zone_early_gap_add(z, 6 + early_random() % 16);
+ }
+}
+
+static inline void
+zone_cleanup_early_gaps_if_needed(void)
+{
+ if (__improbable(!zone_pva_is_null(zone_array[0].z_pageq_va))) {
+ zone_early_gap_drop(10);
+ }
+}
+
+#endif /* ZALLOC_EARLY_GAPS */
+
+static void
+zone_early_scramble_rr(zone_t zone, zone_stats_t zstats)
+{
+ int cpu = cpu_number();
+ zone_stats_t zs = zpercpu_get_cpu(zstats, cpu);
+ uint32_t bits;
+
+ bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
+ zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
+
+ zs->zs_alloc_rr += bits;
+ zs->zs_alloc_rr %= zone->z_chunk_elems;
+}
+
+#endif /* !ZALLOC_TEST */
+#pragma mark Zone Leak Detection
+#if !ZALLOC_TEST
+
+/*
+ * Zone leak debugging code
+ *
+ * When enabled, this code keeps a log to track allocations to a particular zone that have not
+ * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
+ * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
+ * off by default.
+ *
+ * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
+ * is the name of the zone you wish to log.
+ *
+ * This code only tracks one zone, so you need to identify which one is leaking first.
+ * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
+ * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
+ * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
+ * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
+ * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
+ * See the help in the kgmacros for usage info.
+ *
+ *
+ * Zone corruption logging
+ *
+ * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
+ * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
+ * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
+ * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
+ * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
+ * corrupted to examine its history. This should lead to the source of the corruption.
+ */
+
+/* Returns TRUE if we rolled over the counter at factor */
+__header_always_inline bool
+sample_counter(volatile uint32_t *count_p, uint32_t factor)
+{
+ uint32_t old_count, new_count = 0;
+ if (count_p != NULL) {
+ os_atomic_rmw_loop(count_p, old_count, new_count, relaxed, {
+ new_count = old_count + 1;
+ if (new_count >= factor) {
+ new_count = 0;
+ }
+ });
+ }
+
+ return new_count == 0;
+}
+
+#if ZONE_ENABLE_LOGGING
+/* Log allocations and frees to help debug a zone element corruption */
+static TUNABLE(bool, corruption_debug_flag, "-zc", false);
+
+#define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */
+
+static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING;
+static int num_zones_logged = 0;
+
+/*
+ * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
+ * the number of records you want in the log. For example, "zrecs=10" sets it to 10 records. Since this
+ * is the number of stacks suspected of leaking, we don't need many records.
+ */
+
+#if defined(__LP64__)
+#define ZRECORDS_MAX 2560 /* Max records allowed in the log */
+#else
+#define ZRECORDS_MAX 1536 /* Max records allowed in the log */
+#endif
+#define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */
+
+static TUNABLE(uint32_t, log_records, "zrecs", ZRECORDS_DEFAULT);
+
+static void
+zone_enable_logging(zone_t z)
+{
+ z->zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH,
+ (corruption_debug_flag == FALSE) /* caller_will_remove_entries_for_element? */);
+
+ if (z->zlog_btlog) {
+ printf("zone: logging started for zone %s%s\n",
+ zone_heap_name(z), z->z_name);
+ } else {
+ printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n");
+ z->zone_logging = false;
+ }
+}
+
+/**
+ * @function zone_setup_logging
+ *
+ * @abstract
+ * Optionally sets up a zone for logging.
+ *
+ * @discussion
+ * We recognized two boot-args:
+ *
+ * zlog=<zone_to_log>
+ * zrecs=<num_records_in_log>
+ *
+ * The zlog arg is used to specify the zone name that should be logged,
+ * and zrecs is used to control the size of the log.
+ *
+ * If zrecs is not specified, a default value is used.
+ */
+static void
+zone_setup_logging(zone_t z)
+{
+ char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
+ char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
+ char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
+
+ /*
+ * Don't allow more than ZRECORDS_MAX records even if the user asked for more.
+ *
+ * This prevents accidentally hogging too much kernel memory
+ * and making the system unusable.
+ */
+ if (log_records > ZRECORDS_MAX) {
+ log_records = ZRECORDS_MAX;
+ }
+
+ /*
+ * Append kalloc heap name to zone name (if zone is used by kalloc)
+ */
+ snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
+
+ /* zlog0 isn't allowed. */
+ for (int i = 1; i <= max_num_zones_to_log; i++) {
+ snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
+
+ if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val)) &&
+ track_this_zone(zone_name, zlog_val)) {
+ z->zone_logging = true;
+ num_zones_logged++;
+ break;
+ }
+ }
+
+ /*
+ * Backwards compat. with the old boot-arg used to specify single zone
+ * logging i.e. zlog Needs to happen after the newer zlogn checks
+ * because the prefix will match all the zlogn
+ * boot-args.
+ */
+ if (!z->zone_logging &&
+ PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val)) &&
+ track_this_zone(zone_name, zlog_val)) {
+ z->zone_logging = true;
+ num_zones_logged++;
+ }
+
+
+ /*
+ * If we want to log a zone, see if we need to allocate buffer space for
+ * the log.
+ *
+ * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
+ * we have to defer allocation in that case.
+ *
+ * zone_init() will finish the job.
+ *
+ * If we want to log one of the VM related zones that's set up early on,
+ * we will skip allocation of the log until zinit is called again later
+ * on some other zone.
+ */
+ if (z->zone_logging && startup_phase >= STARTUP_SUB_KMEM_ALLOC) {
+ zone_enable_logging(z);
+ }
+}
+
+/*
+ * Each record in the log contains a pointer to the zone element it refers to,
+ * and a small array to hold the pc's from the stack trace. A
+ * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
+ * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
+ * If the log fills, old records are replaced as if it were a circular buffer.
+ */
+
+
+/*
+ * Decide if we want to log this zone by doing a string compare between a zone name and the name
+ * of the zone to log. Return true if the strings are equal, false otherwise. Because it's not
+ * possible to include spaces in strings passed in via the boot-args, a period in the logname will
+ * match a space in the zone name.
+ */
+
+/*
+ * Test if we want to log this zalloc/zfree event. We log if this is the zone we're interested in and
+ * the buffer for the records has been allocated.
+ */
+
+#define DO_LOGGING(z) (z->zlog_btlog != NULL)
+#else /* !ZONE_ENABLE_LOGGING */
+#define DO_LOGGING(z) 0
+#endif /* !ZONE_ENABLE_LOGGING */
+#if CONFIG_ZLEAKS
+
+/*
+ * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding
+ * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a
+ * backtrace. Every free, we examine the table and determine if the allocation was being tracked,
+ * and stop tracking it if it was being tracked.
+ *
+ * We track the allocations in the zallocations hash table, which stores the address that was returned from
+ * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which
+ * stores the backtrace associated with that allocation. This provides uniquing for the relatively large
+ * backtraces - we don't store them more than once.
+ *
+ * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up
+ * a large amount of virtual space.
+ */
+#define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */
+#define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */
+#define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */
+#define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */
+static uint32_t zleak_state = 0; /* State of collection, as above */
+static unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */
+
+bool panic_include_ztrace = FALSE; /* Enable zleak logging on panic */
+vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */
+vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */
+
+/*
+ * Counters for allocation statistics.
+ */
+
+/* Times two active records want to occupy the same spot */
+static unsigned int z_alloc_collisions = 0;
+static unsigned int z_trace_collisions = 0;
+
+/* Times a new record lands on a spot previously occupied by a freed allocation */
+static unsigned int z_alloc_overwrites = 0;
+static unsigned int z_trace_overwrites = 0;
+
+/* Times a new alloc or trace is put into the hash table */
+static unsigned int z_alloc_recorded = 0;
+static unsigned int z_trace_recorded = 0;
+
+/* Times zleak_log returned false due to not being able to acquire the lock */
+static unsigned int z_total_conflicts = 0;
+
+/*
+ * Structure for keeping track of an allocation
+ * An allocation bucket is in use if its element is not NULL
+ */
+struct zallocation {
+ uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */
+ vm_size_t za_size; /* how much memory did this allocation take up? */
+ uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */
+ /* TODO: #if this out */
+ uint32_t za_hit_count; /* for determining effectiveness of hash function */
+};
+
+/* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */
+static uint32_t zleak_alloc_buckets = CONFIG_ZLEAK_ALLOCATION_MAP_NUM;
+static uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM;
+
+vm_size_t zleak_max_zonemap_size;
+
+/* Hashmaps of allocations and their corresponding traces */
+static struct zallocation* zallocations;
+static struct ztrace* ztraces;
+
+/* not static so that panic can see this, see kern/debug.c */
+struct ztrace* top_ztrace;
+
+/* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */
+static LCK_GRP_DECLARE(zleak_lock_grp, "zleak_lock");
+static LCK_SPIN_DECLARE(zleak_lock, &zleak_lock_grp);
+
+/*
+ * Initializes the zone leak monitor. Called from zone_init()
+ */
+__startup_func
+static void
+zleak_init(vm_size_t max_zonemap_size)
+{
+ char scratch_buf[16];
+ boolean_t zleak_enable_flag = FALSE;
+
+ zleak_max_zonemap_size = max_zonemap_size;
+ zleak_global_tracking_threshold = max_zonemap_size / 2;
+ zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8;
+
+#if CONFIG_EMBEDDED
+ if (PE_parse_boot_argn("-zleakon", scratch_buf, sizeof(scratch_buf))) {
+ zleak_enable_flag = TRUE;
+ printf("zone leak detection enabled\n");
+ } else {
+ zleak_enable_flag = FALSE;
+ printf("zone leak detection disabled\n");
+ }
+#else /* CONFIG_EMBEDDED */
+ /* -zleakoff (flag to disable zone leak monitor) */
+ if (PE_parse_boot_argn("-zleakoff", scratch_buf, sizeof(scratch_buf))) {
+ zleak_enable_flag = FALSE;
+ printf("zone leak detection disabled\n");
+ } else {
+ zleak_enable_flag = TRUE;
+ printf("zone leak detection enabled\n");
+ }
+#endif /* CONFIG_EMBEDDED */
+
+ /* zfactor=XXXX (override how often to sample the zone allocator) */
+ if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) {
+ printf("Zone leak factor override: %u\n", zleak_sample_factor);
+ }
+
+ /* zleak-allocs=XXXX (override number of buckets in zallocations) */
+ if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) {
+ printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets);
+ /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
+ if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets - 1))) {
+ printf("Override isn't a power of two, bad things might happen!\n");
+ }
+ }
+
+ /* zleak-traces=XXXX (override number of buckets in ztraces) */
+ if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) {
+ printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets);
+ /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */
+ if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets - 1))) {
+ printf("Override isn't a power of two, bad things might happen!\n");
+ }
+ }
+
+ if (zleak_enable_flag) {
+ zleak_state = ZLEAK_STATE_ENABLED;
+ }
+}
+
+/*
+ * Support for kern.zleak.active sysctl - a simplified
+ * version of the zleak_state variable.
+ */
+int
+get_zleak_state(void)
+{
+ if (zleak_state & ZLEAK_STATE_FAILED) {
+ return -1;
+ }
+ if (zleak_state & ZLEAK_STATE_ACTIVE) {
+ return 1;
+ }
+ return 0;
+}
+
+kern_return_t
+zleak_activate(void)
+{
+ kern_return_t retval;
+ vm_size_t z_alloc_size = zleak_alloc_buckets * sizeof(struct zallocation);
+ vm_size_t z_trace_size = zleak_trace_buckets * sizeof(struct ztrace);
+ void *allocations_ptr = NULL;
+ void *traces_ptr = NULL;
+
+ /* Only one thread attempts to activate at a time */
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ return KERN_SUCCESS;
+ }
+
+ /* Indicate that we're doing the setup */
+ lck_spin_lock(&zleak_lock);
+ if (zleak_state & (ZLEAK_STATE_ACTIVE | ZLEAK_STATE_ACTIVATING | ZLEAK_STATE_FAILED)) {
+ lck_spin_unlock(&zleak_lock);
+ return KERN_SUCCESS;
+ }
+
+ zleak_state |= ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ /* Allocate and zero tables */
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&allocations_ptr, z_alloc_size, VM_KERN_MEMORY_DIAG);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ retval = kmem_alloc_kobject(kernel_map, (vm_offset_t*)&traces_ptr, z_trace_size, VM_KERN_MEMORY_DIAG);
+ if (retval != KERN_SUCCESS) {
+ goto fail;
+ }
+
+ bzero(allocations_ptr, z_alloc_size);
+ bzero(traces_ptr, z_trace_size);
+
+ /* Everything's set. Install tables, mark active. */
+ zallocations = allocations_ptr;
+ ztraces = traces_ptr;
+
+ /*
+ * Initialize the top_ztrace to the first entry in ztraces,
+ * so we don't have to check for null in zleak_log
+ */
+ top_ztrace = &ztraces[0];
+
+ /*
+ * Note that we do need a barrier between installing
+ * the tables and setting the active flag, because the zfree()
+ * path accesses the table without a lock if we're active.
+ */
+ lck_spin_lock(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_ACTIVE;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ return 0;
+
+fail:
+ /*
+ * If we fail to allocate memory, don't further tax
+ * the system by trying again.
+ */
+ lck_spin_lock(&zleak_lock);
+ zleak_state |= ZLEAK_STATE_FAILED;
+ zleak_state &= ~ZLEAK_STATE_ACTIVATING;
+ lck_spin_unlock(&zleak_lock);
+
+ if (allocations_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)allocations_ptr, z_alloc_size);
+ }
+
+ if (traces_ptr != NULL) {
+ kmem_free(kernel_map, (vm_offset_t)traces_ptr, z_trace_size);
+ }
+
+ return retval;
+}
+
+static inline void
+zleak_activate_if_needed(void)
+{
+ if (__probable((zleak_state & ZLEAK_STATE_ENABLED) == 0)) {
+ return;
+ }
+ if (zleak_state & ZLEAK_STATE_ACTIVE) {
+ return;
+ }
+ if (zone_submaps_approx_size() < zleak_global_tracking_threshold) {
+ return;
+ }
+
+ kern_return_t kr = zleak_activate();
+ if (kr != KERN_SUCCESS) {
+ printf("Failed to activate live zone leak debugging (%d).\n", kr);
+ }
+}
+
+static inline void
+zleak_track_if_needed(zone_t z)
+{
+ if (__improbable(zleak_state & ZLEAK_STATE_ACTIVE)) {
+ if (!z->zleak_on &&
+ zone_size_wired(z) >= zleak_per_zone_tracking_threshold) {
+ z->zleak_on = true;
+ }
+ }
+}
+
+/*
+ * TODO: What about allocations that never get deallocated,
+ * especially ones with unique backtraces? Should we wait to record
+ * until after boot has completed?
+ * (How many persistent zallocs are there?)
+ */
+
+/*
+ * This function records the allocation in the allocations table,
+ * and stores the associated backtrace in the traces table
+ * (or just increments the refcount if the trace is already recorded)
+ * If the allocation slot is in use, the old allocation is replaced with the new allocation, and
+ * the associated trace's refcount is decremented.
+ * If the trace slot is in use, it returns.
+ * The refcount is incremented by the amount of memory the allocation consumes.
+ * The return value indicates whether to try again next time.
+ */
+static boolean_t
+zleak_log(uintptr_t* bt,
+ uintptr_t addr,
+ uint32_t depth,
+ vm_size_t allocation_size)
+{
+ /* Quit if there's someone else modifying the hash tables */
+ if (!lck_spin_try_lock(&zleak_lock)) {
+ z_total_conflicts++;
+ return FALSE;
+ }
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets);
+ struct ztrace* trace = &ztraces[trace_index];
+
+ allocation->za_hit_count++;
+ trace->zt_hit_count++;
+
+ /*
+ * If the allocation bucket we want to be in is occupied, and if the occupier
+ * has the same trace as us, just bail.
+ */
+ if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) {
+ z_alloc_collisions++;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+ }
+
+ /* STEP 1: Store the backtrace in the traces array. */
+ /* A size of zero indicates that the trace bucket is free. */
+
+ if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0) {
+ /*
+ * Different unique trace with same hash!
+ * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated
+ * and get out of the way for later chances
+ */
+ trace->zt_collisions++;
+ z_trace_collisions++;
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+ } else if (trace->zt_size > 0) {
+ /* Same trace, already added, so increment refcount */
+ trace->zt_size += allocation_size;
+ } else {
+ /* Found an unused trace bucket, record the trace here! */
+ if (trace->zt_depth != 0) { /* if this slot was previously used but not currently in use */
+ z_trace_overwrites++;
+ }
+
+ z_trace_recorded++;
+ trace->zt_size = allocation_size;
+ memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)));
+
+ trace->zt_depth = depth;
+ trace->zt_collisions = 0;
+ }
+
+ /* STEP 2: Store the allocation record in the allocations array. */
+
+ if (allocation->za_element != (uintptr_t) 0) {
+ /*
+ * Straight up replace any allocation record that was there. We don't want to do the work
+ * to preserve the allocation entries that were there, because we only record a subset of the
+ * allocations anyways.
+ */
+
+ z_alloc_collisions++;
+
+ struct ztrace* associated_trace = &ztraces[allocation->za_trace_index];
+ /* Knock off old allocation's size, not the new allocation */
+ associated_trace->zt_size -= allocation->za_size;
+ } else if (allocation->za_trace_index != 0) {
+ /* Slot previously used but not currently in use */
+ z_alloc_overwrites++;
+ }
+
+ allocation->za_element = addr;
+ allocation->za_trace_index = trace_index;
+ allocation->za_size = allocation_size;
+
+ z_alloc_recorded++;
+
+ if (top_ztrace->zt_size < trace->zt_size) {
+ top_ztrace = trace;
+ }
+
+ lck_spin_unlock(&zleak_lock);
+ return TRUE;
+}
+
+/*
+ * Free the allocation record and release the stacktrace.
+ * This should be as fast as possible because it will be called for every free.
+ */
+__attribute__((noinline))
+static void
+zleak_free(uintptr_t addr,
+ vm_size_t allocation_size)
+{
+ if (addr == (uintptr_t) 0) {
+ return;
+ }
+
+ struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)];
+
+ /* Double-checked locking: check to find out if we're interested, lock, check to make
+ * sure it hasn't changed, then modify it, and release the lock.
+ */
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ /* if the allocation was the one, grab the lock, check again, then delete it */
+ lck_spin_lock(&zleak_lock);
+
+ if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) {
+ struct ztrace *trace;
+
+ /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */
+ if (allocation->za_size != allocation_size) {
+ panic("Freeing as size %lu memory that was allocated with size %lu\n",
+ (uintptr_t)allocation_size, (uintptr_t)allocation->za_size);
+ }
+
+ trace = &ztraces[allocation->za_trace_index];
+
+ /* size of 0 indicates trace bucket is unused */
+ if (trace->zt_size > 0) {
+ trace->zt_size -= allocation_size;
+ }
+
+ /* A NULL element means the allocation bucket is unused */
+ allocation->za_element = 0;
+ }
+ lck_spin_unlock(&zleak_lock);
+ }
+}
+
+#else
+static inline void
+zleak_activate_if_needed(void)
+{
+}
+
+static inline void
+zleak_track_if_needed(__unused zone_t z)
+{
+}
+#endif /* CONFIG_ZLEAKS */
+#if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
+
+__attribute__((noinline))
+static void
+zalloc_log_or_trace_leaks(zone_t zone, vm_offset_t addr, void *fp)
+{
+ uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */
+ unsigned int numsaved = 0;
+
+#if ZONE_ENABLE_LOGGING
+ if (DO_LOGGING(zone)) {
+ numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL);
+ btlog_add_entry(zone->zlog_btlog, (void *)addr,
+ ZOP_ALLOC, (void **)zbt, numsaved);
+ }
+#endif /* ZONE_ENABLE_LOGGING */
+
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: capture a backtrace every zleak_sample_factor
+ * allocations in this zone.
+ */
+ if (__improbable(zone->zleak_on)) {
+ if (sample_counter(&zone->zleak_capture, zleak_sample_factor)) {
+ /* Avoid backtracing twice if zone logging is on */
+ if (numsaved == 0) {
+ numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH, fp, NULL);
+ }
+ /* Sampling can fail if another sample is happening at the same time in a different zone. */
+ if (!zleak_log(zbt, addr, numsaved, zone_elem_size(zone))) {
+ /* If it failed, roll back the counter so we sample the next allocation instead. */
+ zone->zleak_capture = zleak_sample_factor;
+ }
+ }
+ }
+
+ if (__improbable(zone_leaks_scan_enable &&
+ !(zone_elem_size(zone) & (sizeof(uintptr_t) - 1)))) {
+ unsigned int count, idx;
+ /* Fill element, from tail, with backtrace in reverse order */
+ if (numsaved == 0) {
+ numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH, fp, NULL);
+ }
+ count = (unsigned int)(zone_elem_size(zone) / sizeof(uintptr_t));
+ if (count >= numsaved) {
+ count = numsaved - 1;
+ }
+ for (idx = 0; idx < count; idx++) {
+ ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1];
+ }
+ }
+#endif /* CONFIG_ZLEAKS */
+}
+
+static inline bool
+zalloc_should_log_or_trace_leaks(zone_t zone, vm_size_t elem_size)
+{
+#if ZONE_ENABLE_LOGGING
+ if (DO_LOGGING(zone)) {
+ return true;
+ }
+#endif /* ZONE_ENABLE_LOGGING */
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: capture a backtrace every zleak_sample_factor
+ * allocations in this zone.
+ */
+ if (zone->zleak_on) {
+ return true;
+ }
+ if (zone_leaks_scan_enable && !(elem_size & (sizeof(uintptr_t) - 1))) {
+ return true;
+ }
+#endif /* CONFIG_ZLEAKS */
+ return false;
+}
+
+#endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
+#if ZONE_ENABLE_LOGGING
+
+__attribute__((noinline))
+static void
+zfree_log_trace(zone_t zone, vm_offset_t addr, void *fp)
+{
+ /*
+ * See if we're doing logging on this zone.
+ *
+ * There are two styles of logging used depending on
+ * whether we're trying to catch a leak or corruption.
+ */
+ if (__improbable(DO_LOGGING(zone))) {
+ if (corruption_debug_flag) {
+ uintptr_t zbt[MAX_ZTRACE_DEPTH];
+ unsigned int numsaved;
+ /*
+ * We're logging to catch a corruption.
+ *
+ * Add a record of this zfree operation to log.
+ */
+ numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH, fp, NULL);
+ btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_FREE,
+ (void **)zbt, numsaved);
+ } else {
+ /*
+ * We're logging to catch a leak.
+ *
+ * Remove any record we might have for this element
+ * since it's being freed. Note that we may not find it
+ * if the buffer overflowed and that's OK.
+ *
+ * Since the log is of a limited size, old records get
+ * overwritten if there are more zallocs than zfrees.
+ */
+ btlog_remove_entries_for_element(zone->zlog_btlog, (void *)addr);
+ }
+ }
+}
+
+#endif /* ZONE_ENABLE_LOGGING */
+
+/* These functions outside of CONFIG_ZLEAKS because they are also used in
+ * mbuf.c for mbuf leak-detection. This is why they lack the z_ prefix.
+ */
+
+/* "Thomas Wang's 32/64 bit mix functions." http://www.concentric.net/~Ttwang/tech/inthash.htm */
+uintptr_t
+hash_mix(uintptr_t x)
+{
+#ifndef __LP64__
+ x += ~(x << 15);
+ x ^= (x >> 10);
+ x += (x << 3);
+ x ^= (x >> 6);
+ x += ~(x << 11);
+ x ^= (x >> 16);
+#else
+ x += ~(x << 32);
+ x ^= (x >> 22);
+ x += ~(x << 13);
+ x ^= (x >> 8);
+ x += (x << 3);
+ x ^= (x >> 15);
+ x += ~(x << 27);
+ x ^= (x >> 31);
+#endif
+ return x;
+}
+
+uint32_t
+hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size)
+{
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ while (depth) {
+ hash += bt[--depth];
+ }
+
+ hash = hash_mix(hash) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+/*
+ * TODO: Determine how well distributed this is
+ * max_size must be a power of 2. i.e 0x10000 because 0x10000-1 is 0x0FFFF which is a great bitmask
+ */
+uint32_t
+hashaddr(uintptr_t pt, uint32_t max_size)
+{
+ uintptr_t hash = 0;
+ uintptr_t mask = max_size - 1;
+
+ hash = hash_mix(pt) & mask;
+
+ assert(hash < max_size);
+
+ return (uint32_t) hash;
+}
+
+#endif /* !ZALLOC_TEST */
+#pragma mark zone (re)fill
+#if !ZALLOC_TEST
+
+/*!
+ * @defgroup Zone Refill
+ * @{
+ *
+ * @brief
+ * Functions handling The zone refill machinery.
+ *
+ * @discussion
+ * Zones are refilled based on 3 mechanisms: direct expansion, async expansion,
+ * VM-specific replenishment. Zones using VM-specific replenishment are marked
+ * with the @c z_replenishes property set.
+ *
+ * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
+ * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
+ *
+ * - call @c zone_expand_locked() directly if the caller is allowed to block,
+ *
+ * - wakeup the asynchroous expansion thread call if the caller is not allowed
+ * to block.
+ *
+ * - call @c zone_replenish_locked() to kick the replenish state machine.
+ *
+ *
+ * <h2>Synchronous expansion</h2>
+ *
+ * This mechanism is actually the only one that may refill a zone, and all the
+ * other ones funnel through this one eventually.
+ *
+ * @c zone_expand_locked() implements the core of the expansion mechanism,
+ * and will do so while a caller specified predicate is true.
+ *
+ * Zone expansion allows for up to 2 threads to concurrently refill the zone:
+ * - one VM privileged thread,
+ * - one regular thread.
+ *
+ * Regular threads that refill will put down their identity in @c z_expander,
+ * so that priority inversion avoidance can be implemented.
+ *
+ * However, VM privileged threads are allowed to use VM page reserves,
+ * which allows for the system to recover from extreme memory pressure
+ * situations, allowing for the few allocations that @c zone_gc() or
+ * killing processes require.
+ *
+ * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
+ * is set. @c z_expander is not necessarily the identity of this VM privileged
+ * thread (it is if the VM privileged thread came in first, but wouldn't be, and
+ * could even be @c THREAD_NULL otherwise).
+ *
+ * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
+ * spending a whole pointer on priority inheritance for VM privileged threads
+ * (and other issues related to having two owners), we use the rwlock boost as
+ * a stop gap to avoid priority inversions.
+ *
+ *
+ * <h2>Chunk wiring policies</h2>
+ *
+ * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
+ * to try to minimize fragmentation relative to element sizes not aligning with
+ * a chunk size well. However, this can grow large and be hard to fulfill on
+ * a system under a lot of memory pressure (chunks can be as long as 8 pages on
+ * 4k page systems).
+ *
+ * This is why, when under memory pressure the system allows chunks to be
+ * partially populated. The metadata of the first page in the chunk maintains
+ * the count of actually populated pages.
+ *
+ * The metadata for addresses assigned to a zone are found of 4 queues:
+ * - @c z_pageq_empty has chunk heads with populated pages and no allocated
+ * elements (those can be targeted by @c zone_gc()),
+ * - @c z_pageq_partial has chunk heads with populated pages that are partially
+ * used,
+ * - @c z_pageq_full has chunk heads with populated pages with no free elements
+ * left,
+ * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
+ * the zone forever (if @c z_va_sequester is enabled), or the first secondary
+ * metadata for a chunk whose corresponding page is not populated in the
+ * chunk.
+ *
+ * When new pages need to be wired/populated, chunks from the @c z_pageq_va
+ * queues are preferred.
+ *
+ *
+ * <h2>Asynchronous expansion</h2>
+ *
+ * This mechanism allows for refilling zones used mostly with non blocking
+ * callers. It relies on a thread call (@c zone_expand_callout) which will
+ * iterate all zones and refill the ones marked with @c z_async_refilling.
+ *
+ * NOTE: If the calling thread for zalloc_noblock is lower priority than
+ * the thread_call, then zalloc_noblock to an empty zone may succeed.
+ *
+ *
+ * <h2>Dealing with zone allocations from the mach VM code</h2>
+ *
+ * The implementation of the mach VM itself uses the zone allocator
+ * for things like the vm_map_entry data structure. In order to prevent
+ * an infinite recursion problem when adding more pages to a zone, @c zalloc
+ * uses a replenish thread to refill the VM layer's zones before they have
+ * too few remaining free entries. The reserved remaining free entries
+ * guarantee that the VM routines can get entries from already mapped pages.
+ *
+ * In order for that to work, the amount of allocations in the nested
+ * case have to be bounded. There are currently 2 replenish zones, and
+ * if each needs 1 element of each zone to add a new page to itself, that
+ * gives us a minumum reserve of 2 elements.
+ *
+ * There is also a deadlock issue with the zone garbage collection thread,
+ * or any thread that is trying to free zone pages. While holding
+ * the kernel's map lock they may need to allocate new VM map entries, hence
+ * we need enough reserve to allow them to get past the point of holding the
+ * map lock. After freeing that page, the GC thread will wait in
+ * @c zone_reclaim() until the replenish threads can finish.
+ * Since there's only 1 GC thread at a time, that adds a minimum of 1 to the
+ * reserve size.
+ *
+ * Since the minumum amount you can add to a zone is 1 page,
+ * we'll use 16K (from ARM) as the refill size on all platforms.
+ *
+ * When a refill zone drops to half that available, i.e. REFILL_SIZE / 2,
+ * @c zalloc_ext() will wake the replenish thread. The replenish thread runs
+ * until at least REFILL_SIZE worth of free elements exist, before sleeping again.
+ * In the meantime threads may continue to use the reserve until there are only
+ * REFILL_SIZE / 4 elements left. Below that point only the replenish threads
+ * themselves and the GC thread may continue to use from the reserve.
+ */
+
+static thread_call_data_t zone_expand_callout;
+
+static inline kma_flags_t
+zone_kma_flags(zone_t z, zalloc_flags_t flags)
+{
+ kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
+
+ if (z->z_noencrypt) {
+ kmaflags |= KMA_NOENCRYPT;
+ }
+ if (flags & Z_NOPAGEWAIT) {
+ kmaflags |= KMA_NOPAGEWAIT;
+ }
+ if (z->z_permanent || (!z->z_destructible && z->z_va_sequester)) {
+ kmaflags |= KMA_PERMANENT;
+ }
+ if (z->z_submap_idx == Z_SUBMAP_IDX_GENERAL &&
+ z->kalloc_heap != KHEAP_ID_NONE) {
+ kmaflags |= KMA_KHEAP;
+ }
+
+ return kmaflags;
+}
+
+/*!
+ * @function zcram_and_lock()
+ *
+ * @brief
+ * Prepare some memory for being usable for allocation purposes.
+ *
+ * @discussion
+ * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
+ * to be usable in the zone.
+ *
+ * This function assumes the metadata is already populated for the range.
+ *
+ * Calling this function with @c pg_start being 0 means that the memory
+ * is either a partial chunk, or a full chunk, that isn't published anywhere
+ * and the initialization can happen without locks held.
+ *
+ * Calling this function with a non zero @c pg_start means that we are extending
+ * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
+ * is already usable and published in the zone, so extending it requires holding
+ * the zone lock.
+ *
+ * @param zone The zone to cram new populated pages into
+ * @param addr The base address for the chunk(s)
+ * @param pg_va_new The number of virtual pages newly assigned to the zone
+ * @param pg_start The first newly populated page relative to @a addr.
+ * @param pg_end The after-last newly populated page relative to @a addr.
+ * @param kind The kind of memory assigned to the zone.
+ */
+static void
+zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
+ uint32_t pg_start, uint32_t pg_end, zone_addr_kind_t kind)
+{
+ zone_id_t zindex = zone_index(zone);
+ vm_offset_t elem_size = zone_elem_size(zone);
+ uint32_t free_start = 0, free_end = 0;
+
+ struct zone_page_metadata *meta = zone_meta_from_addr(addr);
+ uint32_t chunk_pages = zone->z_chunk_pages;
+
+ assert(pg_start < pg_end && pg_end <= chunk_pages);
+
+ if (pg_start == 0) {
+ uint16_t chunk_len = (uint16_t)pg_end;
+ uint16_t secondary_len = ZM_SECONDARY_PAGE;
+ bool inline_bitmap = false;
+
+ if (zone->z_percpu) {
+ chunk_len = 1;
+ secondary_len = ZM_SECONDARY_PCPU_PAGE;
+ assert(pg_end == zpercpu_count());
+ }
+ if (!zone->z_permanent) {
+ inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
+ }
+
+ meta[0] = (struct zone_page_metadata){
+ .zm_index = zindex,
+ .zm_inline_bitmap = inline_bitmap,
+ .zm_chunk_len = chunk_len,
+ };
+ if (kind == ZONE_ADDR_FOREIGN) {
+ /* Never hit z_pageq_empty */
+ meta[0].zm_alloc_size = ZM_ALLOC_SIZE_LOCK;
+ }
+
+ for (uint16_t i = 1; i < chunk_pages; i++) {
+ meta[i] = (struct zone_page_metadata){
+ .zm_index = zindex,
+ .zm_inline_bitmap = inline_bitmap,
+ .zm_chunk_len = secondary_len,
+ .zm_page_index = i,
+ };
+ }
+
+ free_end = (uint32_t)ptoa(chunk_len) / elem_size;
+ if (!zone->z_permanent) {
+ zone_meta_bits_init(meta, free_end, zone->z_chunk_elems);
+ }
+ } else {
+ assert(!zone->z_percpu && !zone->z_permanent);
+
+ free_end = (uint32_t)ptoa(pg_end) / elem_size;
+ free_start = (uint32_t)ptoa(pg_start) / elem_size;
+ }
+
+#if VM_MAX_TAG_ZONES
+ if (__improbable(zone->tags)) {
+ assert(kind == ZONE_ADDR_NATIVE && !zone->z_percpu);
+ ztMemoryAdd(zone, addr + ptoa(pg_start),
+ ptoa(pg_end - pg_start));
+ }
+#endif /* VM_MAX_TAG_ZONES */
+
+ /*
+ * Insert the initialized pages / metadatas into the right lists.
+ */
+
+ zone_lock(zone);
+ assert(zone->z_self == zone);
+
+ if (pg_start != 0) {
+ assert(meta->zm_chunk_len == pg_start);
+
+ zone_meta_bits_merge(meta, free_start, free_end);
+ meta->zm_chunk_len = (uint16_t)pg_end;
+
+ /*
+ * consume the zone_meta_lock_in_partial()
+ * done in zone_expand_locked()
+ */
+ zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
+ zone_meta_remqueue(zone, meta);
+ }
+
+ if (zone->z_permanent || meta->zm_alloc_size) {
+ zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
+ } else {
+ zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
+ zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
+ }
+ if (pg_end < chunk_pages) {
+ /* push any non populated residual VA on z_pageq_va */
+ zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
+ }
+
+ zone_elems_free_add(zone, free_end - free_start);
+ zone->z_elems_avail += free_end - free_start;
+ zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
+ if (pg_va_new) {
+ zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
+ }
+ if (zone->z_wired_hwm < zone->z_wired_cur) {
+ zone->z_wired_hwm = zone->z_wired_cur;
+ }
+
+ os_atomic_add(&zones_phys_page_mapped_count, pg_end - pg_start, relaxed);
+}
+
+static void
+zcram(zone_t zone, vm_offset_t addr, uint32_t pages, zone_addr_kind_t kind)
+{
+ uint32_t chunk_pages = zone->z_chunk_pages;
+
+ assert(pages % chunk_pages == 0);
+ for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
+ zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, kind);
+ zone_unlock(zone);
+ }
+}
+
+void
+zone_cram_foreign(zone_t zone, vm_offset_t newmem, vm_size_t size)
+{
+ uint32_t pages = (uint32_t)atop(size);
+
+ if (!from_zone_map(newmem, size, ZONE_ADDR_FOREIGN)) {
+ panic("zone_cram_foreign: foreign memory [%p] being crammed is "
+ "outside of expected range", (void *)newmem);
+ }
+ if (!zone->z_allows_foreign) {
+ panic("zone_cram_foreign: foreign memory [%p] being crammed in "
+ "zone '%s%s' not expecting it", (void *)newmem,
+ zone_heap_name(zone), zone_name(zone));
+ }
+ if (size % ptoa(zone->z_chunk_pages)) {
+ panic("zone_cram_foreign: foreign memory [%p] being crammed has "
+ "invalid size %zx", (void *)newmem, (size_t)size);
+ }
+ if (startup_phase >= STARTUP_SUB_ZALLOC) {
+ panic("zone_cram_foreign: foreign memory [%p] being crammed "
+ "after zalloc is initialized", (void *)newmem);
+ }
+
+ bzero((void *)newmem, size);
+ zcram(zone, newmem, pages, ZONE_ADDR_FOREIGN);
+}
+
+void
+zone_fill_initially(zone_t zone, vm_size_t nelems)
+{
+ kma_flags_t kmaflags;
+ kern_return_t kr;
+ vm_offset_t addr;
+ uint32_t pages;
+
+ assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
+ assert(zone->z_elems_avail == 0);
+
+ kmaflags = zone_kma_flags(zone, Z_WAITOK) | KMA_PERMANENT;
+ pages = zone_alloc_pages_for_nelems(zone, nelems);
+ kr = kernel_memory_allocate(zone_submap(zone), &addr, ptoa(pages),
+ 0, kmaflags, VM_KERN_MEMORY_ZONE);
+ if (kr != KERN_SUCCESS) {
+ panic("kernel_memory_allocate() of %u pages failed", pages);
+ }
+
+ zone_meta_populate(addr, ptoa(pages));
+ zcram(zone, addr, pages, ZONE_ADDR_NATIVE);
+}
+
+static vm_offset_t
+zone_allocate_va(zone_t z, zalloc_flags_t flags)
+{
+ kma_flags_t kmaflags = zone_kma_flags(z, flags) | KMA_VAONLY;
+ vm_size_t size = ptoa(z->z_chunk_pages);
+ kern_return_t kr;
+ vm_offset_t addr;
+
+ kr = kernel_memory_allocate(zone_submap(z), &addr, size, 0,
+ kmaflags, VM_KERN_MEMORY_ZONE);
+
+#if !__LP64__
+ if (kr == KERN_NO_SPACE && z->z_replenishes) {
+ /*
+ * On 32bit the zone submaps do not have as much VA
+ * available, so use the VA reserved map for this
+ * purpose.
+ */
+ vm_map_t map = zone_submaps[Z_SUBMAP_IDX_VA_RESERVE];
+ kr = kernel_memory_allocate(map, &addr, size, 0,
+ kmaflags, VM_KERN_MEMORY_ZONE);
+ }
+#endif
+
+ if (kr == KERN_SUCCESS) {
+#if ZALLOC_EARLY_GAPS
+ if (__improbable(zone_caching_disabled < 0)) {
+ zone_allocate_random_early_gap(z);
+ }
+#endif /* ZALLOC_EARLY_GAPS */
+ zone_meta_populate(addr, size);
+ return addr;
+ }
+
+ panic_include_zprint = TRUE;
+#if CONFIG_ZLEAKS
+ if ((zleak_state & ZLEAK_STATE_ACTIVE)) {
+ panic_include_ztrace = TRUE;
+ }
+#endif /* CONFIG_ZLEAKS */
+ zone_t zone_largest = zone_find_largest();
+ panic("zalloc: zone map exhausted while allocating from zone [%s%s], "
+ "likely due to memory leak in zone [%s%s] "
+ "(%luM, %d elements allocated)",
+ zone_heap_name(z), zone_name(z),
+ zone_heap_name(zone_largest), zone_name(zone_largest),
+ (unsigned long)zone_size_wired(zone_largest) >> 20,
+ zone_count_allocated(zone_largest));
+}
+
+static bool
+zone_expand_pred_nope(__unused zone_t z)
+{
+ return false;
+}
+
+static inline void
+ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
+{
+#if DEBUG || DEVELOPMENT
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
+ size, 0, 0, 0);
+#else
+ (void)size;
+#endif
+}
+
+static inline void
+ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
+{
+#if DEBUG || DEVELOPMENT
+ task_t task = current_task();
+ if (pages && task) {
+ ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
+ }
+ VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
+ pages, 0, 0, 0);
+#else
+ (void)pages;
+#endif
+}
+
+static void
+zone_expand_locked(zone_t z, zalloc_flags_t flags, bool (*pred)(zone_t))
+{
+ thread_t self = current_thread();
+ bool vm_priv = (self->options & TH_OPT_VMPRIV);
+ bool clear_vm_priv;
+
+ for (;;) {
+ if (!pred) {
+ /* NULL pred means "try just once" */
+ pred = zone_expand_pred_nope;
+ } else if (!pred(z)) {
+ return;
+ }
+
+ if (vm_priv && !z->z_expander_vm_priv) {
+ /*
+ * Claim the vm priv overcommit slot
+ *
+ * We do not track exact ownership for VM privileged
+ * threads, so use the rwlock boost as a stop-gap
+ * just in case.
+ */
+ set_thread_rwlock_boost();
+ z->z_expander_vm_priv = true;
+ clear_vm_priv = true;
+ } else {
+ clear_vm_priv = false;
+ }
+
+ if (z->z_expander == NULL) {
+ z->z_expander = self;
+ break;
+ }
+ if (clear_vm_priv) {
+ break;
+ }
+
+ if (flags & Z_NOPAGEWAIT) {
+ return;
+ }
+
+ z->z_expanding_wait = true;
+ lck_spin_sleep_with_inheritor(&z->z_lock, LCK_SLEEP_DEFAULT,
+ &z->z_expander, z->z_expander,
+ TH_UNINT, TIMEOUT_WAIT_FOREVER);
+ }
+
+ do {
+ struct zone_page_metadata *meta = NULL;
+ uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
+ vm_page_t page_list = NULL;
+ vm_offset_t addr = 0;
+ int waited = 0;
+
+ /*
+ * While we hold the zone lock, look if there's VA we can:
+ * - complete from partial pages,
+ * - reuse from the sequester list.
+ *
+ * When the page is being populated we pretend we allocated
+ * an extra element so that zone_gc() can't attempt to free
+ * the chunk (as it could become empty while we wait for pages).
+ */
+ if (!zone_pva_is_null(z->z_pageq_va)) {
+ meta = zone_meta_queue_pop_native(z,
+ &z->z_pageq_va, &addr);
+ if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
+ cur_pages = meta->zm_page_index;
+ meta -= cur_pages;
+ addr -= ptoa(cur_pages);
+ zone_meta_lock_in_partial(z, meta, cur_pages);
+ }
+ }
+ zone_unlock(z);
+
+ /*
+ * Do the zone leak activation here because zleak_activate()
+ * may block, and can't be done on the way out.
+ *
+ * Trigger jetsams via the vm_pageout_garbage_collect thread if
+ * we're running out of zone memory
+ */
+ zleak_activate_if_needed();
+ if (zone_map_nearing_exhaustion()) {
+ thread_wakeup((event_t)&vm_pageout_garbage_collect);
+ }
+
+ /*
+ * And now allocate pages to populate our VA.
+ */
+ if (z->z_percpu) {
+ min_pages = z->z_chunk_pages;
+ } else {
+ min_pages = (uint32_t)atop(round_page(zone_elem_size(z)));
+ }
+
+ ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
+
+ while (pages < z->z_chunk_pages - cur_pages) {
+ vm_page_t m = vm_page_grab();
+
+ if (m) {
+ pages++;
+ m->vmp_snext = page_list;
+ page_list = m;
+ vm_page_zero_fill(m);
+ continue;
+ }
+
+ if (pages >= min_pages && (vm_pool_low() || waited)) {
+ break;
+ }
+
+ if ((flags & Z_NOPAGEWAIT) == 0) {
+ waited++;
+ VM_PAGE_WAIT();
+ continue;
+ }
+
+ /*
+ * Undo everything and bail out:
+ *
+ * - free pages
+ * - undo the fake allocation if any
+ * - put the VA back on the VA page queue.
+ */
+ vm_page_free_list(page_list, FALSE);
+ ZONE_TRACE_VM_KERN_REQUEST_END(pages);
+
+ zone_lock(z);
+
+ if (cur_pages) {
+ zone_meta_unlock_from_partial(z, meta, cur_pages);
+ }
+ if (meta) {
+ zone_meta_queue_push(z, &z->z_pageq_va,
+ meta + cur_pages);
+ }
+ goto page_shortage;
+ }
+
+ /*
+ * If we didn't find pre-allocated VA, then allocate a chunk
+ * of VA here.
+ */
+ if (addr == 0) {
+ addr = zone_allocate_va(z, flags);
+ meta = zone_meta_from_addr(addr);
+ new_va = z->z_chunk_pages;
+ }
+
+ kernel_memory_populate_with_pages(zone_submap(z),
+ addr + ptoa(cur_pages), ptoa(pages), page_list,
+ zone_kma_flags(z, flags), VM_KERN_MEMORY_ZONE);
+
+ ZONE_TRACE_VM_KERN_REQUEST_END(pages);
+
+ zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages,
+ ZONE_ADDR_NATIVE);
+ } while (pred(z));
+
+page_shortage:
+ zleak_track_if_needed(z);
+
+ if (clear_vm_priv) {
+ z->z_expander_vm_priv = false;
+ clear_thread_rwlock_boost();
+ }
+ if (z->z_expander == self) {
+ z->z_expander = THREAD_NULL;
+ }
+ if (z->z_expanding_wait) {
+ z->z_expanding_wait = false;
+ wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
+ }
+}
+
+static bool
+zalloc_needs_refill(zone_t zone)
+{
+ if (zone->z_elems_free > zone->z_elems_rsv) {
+ return false;
+ }
+ if (zone->z_wired_cur < zone->z_wired_max) {
+ return true;
+ }
+ if (zone->exhaustible) {
+ return false;
+ }
+ if (zone->expandable) {
+ /*
+ * If we're expandable, just don't go through this again.
+ */
+ zone->z_wired_max = ~0u;
+ return true;
+ }
+ zone_unlock(zone);
+
+ panic_include_zprint = true;
+#if CONFIG_ZLEAKS
+ if (zleak_state & ZLEAK_STATE_ACTIVE) {
+ panic_include_ztrace = true;
+ }
+#endif /* CONFIG_ZLEAKS */
+ panic("zone '%s%s' exhausted", zone_heap_name(zone), zone_name(zone));
+}
+
+static void
+zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
+{
+ zone_foreach(z) {
+ if (z->no_callout) {
+ /* z_async_refilling will never be set */
+ continue;
+ }
+
+ if (z->z_replenishes) {
+ /* those use the zone_replenish_thread */
+ continue;
+ }
+
+ zone_lock(z);
+ if (z->z_self && z->z_async_refilling) {
+ z->z_async_refilling = false;
+ zone_expand_locked(z, Z_WAITOK, zalloc_needs_refill);
+ }
+ zone_unlock(z);
+ }
+}
+
+static inline void
+zone_expand_async_schedule_if_needed(zone_t zone)
+{
+ if (zone->z_elems_free > zone->z_elems_rsv || zone->z_async_refilling ||
+ zone->no_callout) {
+ return;
+ }
+
+ if (!zone->expandable && zone->z_wired_cur >= zone->z_wired_max) {
+ return;
+ }
+
+ if (zone->z_elems_free == 0 || !vm_pool_low()) {
+ zone->z_async_refilling = true;
+ thread_call_enter(&zone_expand_callout);
+ }
+}
+
+#endif /* !ZALLOC_TEST */
+#pragma mark zone replenishing (VM allocations)
+#if !ZALLOC_TEST
+
+/*
+ * Tracks how many zone_replenish threads are active, because zone_gc() wants
+ * for those to be finished before it proceeds.
+ *
+ * This counts how many replenish threads are active in
+ * ZONE_REPLENISH_ACTIVE_INC increments,
+ * and uses the low bit to track if there are any waiters.
+ */
+#define ZONE_REPLENISH_ACTIVE_NONE 0u
+#define ZONE_REPLENISH_ACTIVE_WAITER_BIT 1u
+#define ZONE_REPLENISH_ACTIVE_INC 2u
+#define ZONE_REPLENISH_ACTIVE_MASK (~ZONE_REPLENISH_ACTIVE_WAITER_BIT)
+static unsigned _Atomic zone_replenish_active;
+static unsigned zone_replenish_wakeups;
+static unsigned zone_replenish_wakeups_initiated;
+static unsigned zone_replenish_throttle_count;
+
+#define ZONE_REPLENISH_TARGET (16 * 1024)
+
+static void
+zone_replenish_wait_if_needed(void)
+{
+ /*
+ * This check can be racy, the reserves ought to be enough
+ * to compensate for a little race
+ */
+ while (os_atomic_load(&zone_replenish_active, relaxed) !=
+ ZONE_REPLENISH_ACTIVE_NONE) {
+ unsigned o_active, n_active;
+
+ assert_wait(&zone_replenish_active, THREAD_UNINT);
+
+ os_atomic_rmw_loop(&zone_replenish_active, o_active, n_active, relaxed, {
+ if (o_active == ZONE_REPLENISH_ACTIVE_NONE) {
+ os_atomic_rmw_loop_give_up({
+ clear_wait(current_thread(), THREAD_AWAKENED);
+ return;
+ });
+ }
+ if (o_active & ZONE_REPLENISH_ACTIVE_WAITER_BIT) {
+ os_atomic_rmw_loop_give_up(break);
+ }
+ n_active = o_active | ZONE_REPLENISH_ACTIVE_WAITER_BIT;
+ });
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+}
+
+__attribute__((noinline))
+static void
+zone_replenish_locked(zone_t zone)
+{
+ thread_t thr = current_thread();
+ uint32_t min_free;
+
+ zone_replenish_wakeups++;
+
+ /*
+ * We'll let threads continue to allocate under the reserve:
+ * - until it depleted to 50% for regular threads,
+ * - until it depleted to 25% for VM_PRIV threads.
+ *
+ * After that only TH_OPT_ZONE_PRIV threads may continue.
+ */
+ if (thr->options & TH_OPT_VMPRIV) {
+ min_free = zone->z_elems_rsv / 4;
+ } else {
+ min_free = zone->z_elems_rsv / 2;
+ }
+
+ while (zone->z_elems_free <= zone->z_elems_rsv) {
+ /*
+ * Wakeup the replenish thread if not running.
+ */
+ if (!zone->z_async_refilling) {
+ os_atomic_add(&zone_replenish_active,
+ ZONE_REPLENISH_ACTIVE_INC, relaxed);
+ zone->z_async_refilling = true;
+ zone_replenish_wakeups_initiated++;
+ thread_wakeup(&zone->z_elems_rsv);
+ }
+
+ if (zone->z_elems_free > min_free) {
+ break;
+ }
+
+ /*
+ * TH_OPT_ZONE_PRIV threads are the GC thread and a replenish
+ * thread itself.
+ *
+ * Replenish threads *need* to use the reserve. GC threads need
+ * to get through the current allocation, but then will wait at
+ * a higher level after they've dropped any locks which would
+ * deadlock the replenish thread.
+ *
+ * The value of (refill_level / 2) in the previous bit of code
+ * should have given us headroom even though this thread didn't
+ * wait.
+ */
+ if (thr->options & TH_OPT_ZONE_PRIV) {
+ assert(zone->z_elems_free != 0);
+ break;
+ }
+
+ if (startup_phase < STARTUP_SUB_MACH_IPC) {
+ panic("vm_map_steal_memory didn't steal enough memory: "
+ "trying to grow [%s%s] before the scheduler has started",
+ zone_heap_name(zone), zone_name(zone));
+ }
+
+ /*
+ * Wait for the replenish threads to add more elements
+ * for us to allocate from.
+ */
+ zone_replenish_throttle_count++;
+ zone->z_replenish_wait = true;
+ assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC);
+ zone_unlock(zone);
+ thread_block(THREAD_CONTINUE_NULL);
+ zone_lock(zone);
+ zone->z_replenish_wait = false;
+
+ assert(zone->z_self == zone);
+ }
+}
+
+static bool
+zone_replenish_needed(zone_t z)
+{
+ return z->z_elems_free <= z->z_elems_rsv;
+}
+
+/*
+ * High priority VM privileged thread used to asynchronously refill a given zone.
+ * These are needed for data structures used by the lower level VM itself. The
+ * replenish thread maintains a reserve of elements, so that the VM will never
+ * block in the zone allocator.
+ */
+__dead2
+static void
+zone_replenish_thread(void *_z, wait_result_t __unused wr)
+{
+ unsigned o_active, n_active;
+ zone_t z = _z;
+
+ zone_lock(z);
+ assert(z->z_self == z);
+ assert(z->z_async_refilling && z->z_replenishes);
+
+ zone_expand_locked(z, Z_WAITOK, zone_replenish_needed);
+
+ if (z->z_replenish_wait) {
+ /* Wakeup any potentially throttled allocations */
+ z->z_replenish_wait = false;
+ thread_wakeup(z);
+ }
+
+ /* wakeup zone_reclaim() callers that were possibly waiting */
+ os_atomic_rmw_loop(&zone_replenish_active, o_active, n_active, relaxed, {
+ if (os_sub_overflow(o_active, ZONE_REPLENISH_ACTIVE_INC, &n_active)) {
+ panic("zone_replenish_active corrupt: %d", o_active);
+ }
+ if ((n_active & ZONE_REPLENISH_ACTIVE_MASK) == 0) {
+ n_active = ZONE_REPLENISH_ACTIVE_NONE;
+ }
+ });
+
+ if (n_active == ZONE_REPLENISH_ACTIVE_NONE &&
+ (o_active & ZONE_REPLENISH_ACTIVE_WAITER_BIT)) {
+ thread_wakeup(&zone_replenish_active);
+ }
+
+ z->z_async_refilling = false;
+ assert_wait(&z->z_elems_rsv, THREAD_UNINT);
+
+ zone_unlock(z);
+
+ thread_block_parameter(zone_replenish_thread, z);
+ __builtin_unreachable();
+}
+
+void
+zone_replenish_configure(zone_t z)
+{
+ thread_t th;
+ kern_return_t kr;
+ char name[MAXTHREADNAMESIZE];
+
+ zone_lock(z);
+ assert(!z->z_replenishes && !z->z_destructible);
+ z->z_elems_rsv = (uint16_t)(ZONE_REPLENISH_TARGET / zone_elem_size(z));
+ z->z_replenishes = true;
+ os_atomic_add(&zone_replenish_active, ZONE_REPLENISH_ACTIVE_INC, relaxed);
+ z->z_async_refilling = true;
+ zone_unlock(z);
+
+ kr = kernel_thread_create(zone_replenish_thread, z, MAXPRI_KERNEL, &th);
+ if (kr != KERN_SUCCESS) {
+ panic("zone_replenish_configure, thread create: 0x%x", kr);
+ }
+ /* make sure this thread can't lose its stack */
+ assert(th->reserved_stack == th->kernel_stack);
+
+ snprintf(name, sizeof(name), "z_replenish(%s)", zone_name(z));
+ thread_set_thread_name(th, name);
+
+ thread_mtx_lock(th);
+ th->options |= TH_OPT_VMPRIV | TH_OPT_ZONE_PRIV;
+ thread_start(th);
+ thread_mtx_unlock(th);
+
+ thread_deallocate(th);
+}
+
+/*! @} */
+#endif /* !ZALLOC_TEST */
+#pragma mark zone jetsam integration
+#if !ZALLOC_TEST
+
+/*
+ * We're being very conservative here and picking a value of 95%. We might need to lower this if
+ * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
+ */
+#define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
+
+/*
+ * Trigger zone-map-exhaustion jetsams if the zone map is X% full, where X=zone_map_jetsam_limit.
+ * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
+ */
+TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
+ ZONE_MAP_JETSAM_LIMIT_DEFAULT);
+
+void
+get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
+{
+ vm_offset_t phys_pages = os_atomic_load(&zones_phys_page_mapped_count, relaxed);
+ *current_size = ptoa_64(phys_pages);
+ *capacity = ptoa_64(zone_phys_mapped_max_pages);
+}
+
+void
+get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
+{
+ zone_t largest_zone = zone_find_largest();
+
+ /*
+ * Append kalloc heap name to zone name (if zone is used by kalloc)
+ */
+ snprintf(zone_name, zone_name_len, "%s%s",
+ zone_heap_name(largest_zone), largest_zone->z_name);
+
+ *zone_size = zone_size_wired(largest_zone);
+}
+
+bool
+zone_map_nearing_exhaustion(void)
+{
+ uint64_t phys_pages = os_atomic_load(&zones_phys_page_mapped_count, relaxed);
+ return phys_pages * 100 > zone_phys_mapped_max_pages * zone_map_jetsam_limit;
+}
+
+
+#define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
+
+/*
+ * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
+ * to walk through the jetsam priority bands and kill processes.
+ */
+static void
+kill_process_in_largest_zone(void)
+{
+ pid_t pid = -1;
+ zone_t largest_zone = zone_find_largest();
+
+ printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
+ ptoa_64(os_atomic_load(&zones_phys_page_mapped_count, relaxed)),
+ ptoa_64(zone_phys_mapped_max_pages),
+ (uint64_t)zone_submaps_approx_size(),
+ (uint64_t)(zone_foreign_size() + zone_native_size()),
+ zone_map_jetsam_limit);
+ printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
+ largest_zone->z_name, (uintptr_t)zone_size_wired(largest_zone));
+
+ /*
+ * We want to make sure we don't call this function from userspace.
+ * Or we could end up trying to synchronously kill the process
+ * whose context we're in, causing the system to hang.
+ */
+ assert(current_task() == kernel_task);
+
+ /*
+ * If vm_object_zone is the largest, check to see if the number of
+ * elements in vm_map_entry_zone is comparable.
+ *
+ * If so, consider vm_map_entry_zone as the largest. This lets us target
+ * a specific process to jetsam to quickly recover from the zone map
+ * bloat.
+ */
+ if (largest_zone == vm_object_zone) {
+ unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
+ unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
+ /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
+ if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
+ largest_zone = vm_map_entry_zone;
+ printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
+ (uintptr_t)zone_size_wired(largest_zone));
+ }
+ }
+
+ /* TODO: Extend this to check for the largest process in other zones as well. */
+ if (largest_zone == vm_map_entry_zone) {
+ pid = find_largest_process_vm_map_entries();
+ } else {
+ printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
+ "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
+ largest_zone->z_name);
+ }
+ if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
+ printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
+ }
+}
+
+#endif /* !ZALLOC_TEST */
+#pragma mark zfree
+#if !ZALLOC_TEST
+#if KASAN_ZALLOC
+
+/*!
+ * @defgroup zfree
+ * @{
+ *
+ * @brief
+ * The codepath for zone frees.
+ *
+ * @discussion
+ * There are 4 major ways to allocate memory that end up in the zone allocator:
+ * - @c zfree()
+ * - @c zfree_percpu()
+ * - @c kfree*()
+ * - @c zfree_permanent()
+ *
+ * While permanent zones have their own allocation scheme, all other codepaths
+ * will eventually go through the @c zfree_ext() choking point.
+ *
+ * Ignoring the @c gzalloc_free() codepath, the decision tree looks like this:
+ * <code>
+ * zfree_ext()
+ * ├───> zfree_cached() ────────────────╮
+ * │ │ │
+ * │ │ │
+ * │ ├───> zfree_cached_slow() ───┤
+ * │ │ │ │
+ * │ │ v │
+ * ╰───────┴───> zfree_item() ──────────┴───>
+ * </code>
+ *
+ * @c zfree_ext() takes care of all the generic work to perform on an element
+ * before it is freed (zeroing, logging, tagging, ...) then will hand it off to:
+ * - @c zfree_item() if zone caching is off
+ * - @c zfree_cached() if zone caching is on.
+ *
+ * @c zfree_cached can take a number of decisions:
+ * - a fast path if the (f) or (a) magazines have space (preemption disabled),
+ * - using the cpu local or recirculation depot calling @c zfree_cached_slow(),
+ * - falling back to @c zfree_item() when CPU caching has been disabled.
+ */
+
+/*
+ * Called from zfree() to add the element being freed to the KASan quarantine.
+ *
+ * Returns true if the newly-freed element made it into the quarantine without
+ * displacing another, false otherwise. In the latter case, addrp points to the
+ * address of the displaced element, which will be freed by the zone.
+ */
+static bool
+kasan_quarantine_freed_element(
+ zone_t *zonep, /* the zone the element is being freed to */
+ void **addrp) /* address of the element being freed */
+{
+ zone_t zone = *zonep;
+ void *addr = *addrp;
+
+ /*
+ * Resize back to the real allocation size and hand off to the KASan
+ * quarantine. `addr` may then point to a different allocation, if the
+ * current element replaced another in the quarantine. The zone then
+ * takes ownership of the swapped out free element.
+ */
+ vm_size_t usersz = zone_elem_size(zone) - 2 * zone->z_kasan_redzone;
+ vm_size_t sz = usersz;
+
+ if (addr && zone->z_kasan_redzone) {
+ kasan_check_free((vm_address_t)addr, usersz, KASAN_HEAP_ZALLOC);
+ addr = (void *)kasan_dealloc((vm_address_t)addr, &sz);
+ assert(sz == zone_elem_size(zone));
+ }
+ if (addr && !zone->kasan_noquarantine) {
+ kasan_free(&addr, &sz, KASAN_HEAP_ZALLOC, zonep, usersz, true);
+ if (!addr) {
+ return TRUE;
+ }
+ }
+ if (addr && zone->kasan_noquarantine) {
+ kasan_unpoison(addr, zone_elem_size(zone));
+ }
+ *addrp = addr;
+ return FALSE;
+}
+
+#endif /* KASAN_ZALLOC */
+
+__header_always_inline void
+zfree_drop(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze,
+ bool recirc)
+{
+ vm_offset_t esize = zone_elem_size(zone);
+
+ if (zone_meta_mark_free(meta, ze) == recirc) {
+ zone_meta_double_free_panic(zone, ze, __func__);
+ }
+
+ vm_offset_t old_size = meta->zm_alloc_size;
+ vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
+ vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
+
+ if (new_size == 0) {
+ /* whether the page was on the intermediate or all_used, queue, move it to free */
+ zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
+ zone->z_wired_empty += meta->zm_chunk_len;
+ } else if (old_size + esize > max_size) {
+ /* first free element on page, move from all_used */
+ zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
+ }
+}
+
+static void
+zfree_item(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze)
+{
+ /* transfer preemption count to lock */
+ zone_lock_nopreempt_check_contention(zone, NULL);
+
+ zfree_drop(zone, meta, ze, false);
+ zone_elems_free_add(zone, 1);
+
+ zone_unlock(zone);
+}
+
+__attribute__((noinline))
+static void
+zfree_cached_slow(zone_t zone, struct zone_page_metadata *meta,
+ zone_element_t ze, zone_cache_t cache)
+{
+ struct zone_depot mags = STAILQ_HEAD_INITIALIZER(mags);
+ zone_magazine_t mag = NULL;
+ uint16_t n = 0;
+
+ if (zone_meta_is_free(meta, ze)) {
+ zone_meta_double_free_panic(zone, ze, __func__);
+ }
+
+ if (zone == zc_magazine_zone) {
+ mag = (zone_magazine_t)zone_element_addr(ze,
+ zone_elem_size(zone));
+#if KASAN_ZALLOC
+ kasan_poison_range((vm_offset_t)mag, zone_elem_size(zone),
+ ASAN_VALID);
+#endif
+ } else {
+ mag = zone_magazine_alloc(Z_NOWAIT);
+ if (__improbable(mag == NULL)) {
+ return zfree_item(zone, meta, ze);
+ }
+ mag->zm_cur = 1;
+ mag->zm_elems[0] = ze;
+ }
+
+ mag = zone_magazine_replace(&cache->zc_free_cur,
+ &cache->zc_free_elems, mag);
+
+ z_debug_assert(cache->zc_free_cur <= 1);
+ z_debug_assert(mag->zm_cur == zc_mag_size());
+
+ STAILQ_INSERT_HEAD(&mags, mag, zm_link);
+ n = 1;
+
+ if (cache->zc_depot_max >= 2 * zc_mag_size()) {
+ /*
+ * If we can use the local depot (zc_depot_max allows for
+ * 2 magazines worth of elements) then:
+ *
+ * 1. if we have space for an extra depot locally,
+ * push it, and leave.
+ *
+ * 2. if we overflow, then take (1 / zc_recirc_denom)
+ * of the depot out, in order to migrate it to the
+ * recirculation depot.
+ */
+ zone_depot_lock_nopreempt(cache);
+
+ if ((cache->zc_depot_cur + 2) * zc_mag_size() <=
+ cache->zc_depot_max) {
+ cache->zc_depot_cur++;
+ STAILQ_INSERT_TAIL(&cache->zc_depot, mag, zm_link);
+ return zone_depot_unlock(cache);
+ }
+
+ while (zc_recirc_denom * cache->zc_depot_cur * zc_mag_size() >=
+ (zc_recirc_denom - 1) * cache->zc_depot_max) {
+ mag = STAILQ_FIRST(&cache->zc_depot);
+ STAILQ_REMOVE_HEAD(&cache->zc_depot, zm_link);
+ STAILQ_INSERT_TAIL(&mags, mag, zm_link);
+ cache->zc_depot_cur--;
+ n++;
+ }
+
+ zone_depot_unlock(cache);
+ } else {
+ enable_preemption();
+ }
+
+ /*
+ * Preflight validity of all the elements before we touch the zone
+ * metadata, and then insert them into the recirculation depot.
+ */
+ STAILQ_FOREACH(mag, &mags, zm_link) {
+ for (uint16_t i = 0; i < zc_mag_size(); i++) {
+ zone_element_validate(zone, mag->zm_elems[i]);
+ }
+ }
+
+ zone_lock_check_contention(zone, cache);
+
+ STAILQ_FOREACH(mag, &mags, zm_link) {
+ for (uint16_t i = 0; i < zc_mag_size(); i++) {
+ zone_element_t e = mag->zm_elems[i];
+
+ if (!zone_meta_mark_free(zone_meta_from_element(e), e)) {
+ zone_meta_double_free_panic(zone, e, __func__);
+ }
+ }
+ }
+ STAILQ_CONCAT(&zone->z_recirc, &mags);
+ zone->z_recirc_cur += n;
+
+ zone_elems_free_add(zone, n * zc_mag_size());
+
+ zone_unlock(zone);
+}
+
+static void
+zfree_cached(zone_t zone, struct zone_page_metadata *meta, zone_element_t ze)
+{
+ zone_cache_t cache = zpercpu_get(zone->z_pcpu_cache);
+
+ if (cache->zc_free_cur >= zc_mag_size()) {
+ if (cache->zc_alloc_cur >= zc_mag_size()) {
+ return zfree_cached_slow(zone, meta, ze, cache);
+ }
+ zone_cache_swap_magazines(cache);
+ }
+
+ if (__improbable(cache->zc_alloc_elems == NULL)) {
+ return zfree_item(zone, meta, ze);
+ }
+
+ if (zone_meta_is_free(meta, ze)) {
+ zone_meta_double_free_panic(zone, ze, __func__);
+ }
+
+ uint16_t idx = cache->zc_free_cur++;
+ if (idx >= zc_mag_size()) {
+ zone_accounting_panic(zone, "zc_free_cur overflow");
+ }
+ cache->zc_free_elems[idx] = ze;
+
+ enable_preemption();
+}
+
+/*
+ * The function is noinline when zlog can be used so that the backtracing can
+ * reliably skip the zfree_ext() and zfree_log_trace()
+ * boring frames.
+ */
+#if ZONE_ENABLE_LOGGING
+__attribute__((noinline))
+#endif /* ZONE_ENABLE_LOGGING */
+void
+zfree_ext(zone_t zone, zone_stats_t zstats, void *addr)
+{
+ struct zone_page_metadata *page_meta;
+ vm_offset_t elem = (vm_offset_t)addr;
+ vm_size_t elem_size = zone_elem_size(zone);
+ zone_element_t ze;
+
+ DTRACE_VM2(zfree, zone_t, zone, void*, addr);
+ TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, elem_size, elem);
+#if VM_MAX_TAG_ZONES
+ if (__improbable(zone->tags)) {
+ vm_tag_t tag = *ztSlot(zone, elem) >> 1;
+ // set the tag with b0 clear so the block remains inuse
+ *ztSlot(zone, elem) = 0xFFFE;
+ vm_tag_update_zone_size(tag, zone->tag_zone_index,
+ -(long)elem_size);
+ }
+#endif /* VM_MAX_TAG_ZONES */
+
+#if KASAN_ZALLOC
+ if (kasan_quarantine_freed_element(&zone, &addr)) {
+ return;
+ }
+ /*
+ * kasan_quarantine_freed_element() might return a different
+ * {zone, addr} than the one being freed for kalloc heaps.
+ *
+ * Make sure we reload everything.
+ */
+ elem = (vm_offset_t)addr;
+ elem_size = zone_elem_size(zone);
+#endif
+#if CONFIG_ZLEAKS
+ /*
+ * Zone leak detection: un-track the allocation
+ */
+ if (__improbable(zone->zleak_on)) {
+ zleak_free(elem, elem_size);
+ }
+#endif /* CONFIG_ZLEAKS */
+#if ZONE_ENABLE_LOGGING
+ if (__improbable(DO_LOGGING(zone))) {
+ zfree_log_trace(zone, elem, __builtin_frame_address(0));
+ }
+#endif /* ZONE_ENABLE_LOGGING */
+#if CONFIG_GZALLOC
+ if (__improbable(zone->gzalloc_tracked)) {
+ return gzalloc_free(zone, zstats, addr);
+ }
+#endif /* CONFIG_GZALLOC */
+
+ page_meta = zone_element_resolve(zone, elem, elem_size, &ze);
+ ze.ze_value |= zfree_clear_or_poison(zone, elem, elem_size);
+#if KASAN_ZALLOC
+ if (zone->z_percpu) {
+ zpercpu_foreach_cpu(i) {
+ kasan_poison_range(elem + ptoa(i), elem_size,
+ ASAN_HEAP_FREED);
+ }
+ } else {
+ kasan_poison_range(elem, elem_size, ASAN_HEAP_FREED);
+ }
+#endif
+
+ disable_preemption();
+ zpercpu_get(zstats)->zs_mem_freed += elem_size;
+
+ if (zone->z_pcpu_cache) {
+ return zfree_cached(zone, page_meta, ze);
+ }
+
+ return zfree_item(zone, page_meta, ze);
+}
+
+void
+(zfree)(union zone_or_view zov, void *addr)
+{
+ zone_t zone = zov.zov_view->zv_zone;
+ zone_stats_t zstats = zov.zov_view->zv_stats;
+ assert(!zone->z_percpu);
+ zfree_ext(zone, zstats, addr);
+}
+
+void
+zfree_percpu(union zone_or_view zov, void *addr)
+{
+ zone_t zone = zov.zov_view->zv_zone;
+ zone_stats_t zstats = zov.zov_view->zv_stats;
+ assert(zone->z_percpu);
+ zfree_ext(zone, zstats, (void *)__zpcpu_demangle(addr));
+}
+
+/*! @} */
+#endif /* !ZALLOC_TEST */
+#pragma mark zalloc
+#if !ZALLOC_TEST
+
+/*!
+ * @defgroup zalloc
+ * @{
+ *
+ * @brief
+ * The codepath for zone allocations.
+ *
+ * @discussion
+ * There are 4 major ways to allocate memory that end up in the zone allocator:
+ * - @c zalloc(), @c zalloc_flags(), ...
+ * - @c zalloc_percpu()
+ * - @c kalloc*()
+ * - @c zalloc_permanent()
+ *
+ * While permanent zones have their own allocation scheme, all other codepaths
+ * will eventually go through the @c zalloc_ext() choking point.
+ *
+ * Ignoring the @c zalloc_gz() codepath, the decision tree looks like this:
+ * <code>
+ * zalloc_ext()
+ * │
+ * ├───> zalloc_cached() ──────> zalloc_cached_fast() ───╮
+ * │ │ ^ │
+ * │ │ │ │
+ * │ ╰───> zalloc_cached_slow() ───╯ │
+ * │ │ │
+ * │<─────────────────╮ ├─────────────╮ │
+ * │ │ │ │ │
+ * │ │ v │ │
+ * │<───────╮ â•──> zalloc_item_slow() ────┤ │
+ * │ │ │ │ │
+ * │ │ │ v │
+ * ╰───> zalloc_item() ──────────> zalloc_item_fast() ───┤
+ * │
+ * v
+ * zalloc_return()
+ * </code>
+ *
+ *
+ * The @c zalloc_item() track is used when zone caching is off:
+ * - @c zalloc_item_fast() is used when there are enough elements available,
+ * - @c zalloc_item_slow() is used when a refill is needed, which can cause
+ * the zone to grow. This is the only codepath that refills.
+ *
+ * This track uses the zone lock for serialization:
+ * - taken in @c zalloc_item(),
+ * - maintained during @c zalloc_item_slow() (possibly dropped and re-taken),
+ * - dropped in @c zalloc_item_fast().
+ *
+ *
+ * The @c zalloc_cached() track is used when zone caching is on:
+ * - @c zalloc_cached_fast() is taken when the cache has elements,
+ * - @c zalloc_cached_slow() is taken if a cache refill is needed.
+ * It can chose many strategies:
+ * ~ @c zalloc_cached_from_depot() to try to reuse cpu stashed magazines,
+ * ~ using the global recirculation depot @c z_recirc,
+ * ~ using zalloc_import() if the zone has enough elements,
+ * ~ falling back to the @c zalloc_item() track if zone caching is disabled
+ * due to VM pressure or the zone has no available elements.
+ *
+ * This track disables preemption for serialization:
+ * - preemption is disabled in @c zalloc_cached(),
+ * - kept disabled during @c zalloc_cached_slow(), converted into a zone lock
+ * if switching to @c zalloc_item_slow(),
+ * - preemption is reenabled in @c zalloc_cached_fast().
+ *
+ * @c zalloc_cached_from_depot() also takes depot locks (taken by the caller,
+ * released by @c zalloc_cached_from_depot().
+ *
+ * In general the @c zalloc_*_slow() codepaths deal with refilling and will
+ * tail call into the @c zalloc_*_fast() code to perform the actual allocation.
+ *
+ * @c zalloc_return() is the final function everyone tail calls into,
+ * which prepares the element for consumption by the caller and deals with
+ * common treatment (zone logging, tags, kasan, validation, ...).
+ */
+
+/*!
+ * @function zalloc_import
+ *
+ * @brief
+ * Import @c n elements in the specified array, opposite of @c zfree_drop().
+ *
+ * @param zone The zone to import elements from
+ * @param elems The array to import into
+ * @param n The number of elements to import. Must be non zero,
+ * and smaller than @c zone->z_elems_free.
+ */
+__header_always_inline void
+zalloc_import(zone_t zone, zone_element_t *elems, uint32_t n)
+{
+ vm_size_t esize = zone_elem_size(zone);
+ uint32_t i = 0;
+
+ assertf(STAILQ_EMPTY(&zone->z_recirc),
+ "Trying to import from zone %p [%s%s] with non empty recirc",
+ zone, zone_heap_name(zone), zone_name(zone));
+
+ do {
+ vm_offset_t page, eidx, size = 0;
+ struct zone_page_metadata *meta;
+
+ if (!zone_pva_is_null(zone->z_pageq_partial)) {
+ meta = zone_pva_to_meta(zone->z_pageq_partial);
+ page = zone_pva_to_addr(zone->z_pageq_partial);
+ } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
+ meta = zone_pva_to_meta(zone->z_pageq_empty);
+ page = zone_pva_to_addr(zone->z_pageq_empty);
+ zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
+ } else {
+ zone_accounting_panic(zone, "z_elems_free corruption");
+ }
+
+ if (!zone_has_index(zone, meta->zm_index)) {
+ zone_page_metadata_index_confusion_panic(zone, page, meta);
+ }
+
+ vm_offset_t old_size = meta->zm_alloc_size;
+ vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
+
+ do {
+ eidx = zone_meta_find_and_clear_bit(zone, meta);
+ elems[i++] = zone_element_encode(page, eidx, ZPM_AUTO);
+ size += esize;
+ } while (i < n && old_size + size + esize <= max_size);
+
+ vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
+
+ if (new_size + esize > max_size) {
+ zone_meta_requeue(zone, &zone->z_pageq_full, meta);
+ } else if (old_size == 0) {
+ /* remove from free, move to intermediate */
+ zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
+ }
+ } while (i < n);
+}
+
+/*!
+ * @function zalloc_return
+ *
+ * @brief
+ * Performs the tail-end of the work required on allocations before the caller
+ * uses them.
+ *
+ * @discussion
+ * This function is called without any zone lock held,
+ * and preemption back to the state it had when @c zalloc_ext() was called.
+ *
+ * @param zone The zone we're allocating from.
+ * @param ze The encoded element we just allocated.
+ * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
+ * @param elem_size The element size for this zone.
+ * @param freemag An optional magazine that needs to be freed.
+ */
+__attribute__((noinline))
+static void *
+zalloc_return(zone_t zone, zone_element_t ze, zalloc_flags_t flags,
+ vm_offset_t elem_size, zone_magazine_t freemag)
+{
+ vm_offset_t addr = zone_element_addr(ze, elem_size);
+
+#if KASAN_ZALLOC
+ if (zone->z_percpu) {
+ zpercpu_foreach_cpu(i) {
+ kasan_poison_range(addr + ptoa(i), elem_size,
+ ASAN_VALID);
+ }
+ } else {
+ kasan_poison_range(addr, elem_size, ASAN_VALID);
+ }
+#endif
+#if ZALLOC_ENABLE_POISONING
+ zalloc_validate_element(zone, addr, elem_size, zone_element_prot(ze));
+#endif /* ZALLOC_ENABLE_POISONING */
+#if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS
+ if (__improbable(zalloc_should_log_or_trace_leaks(zone, elem_size))) {
+ zalloc_log_or_trace_leaks(zone, addr, __builtin_frame_address(0));
+ }
+#endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
+#if KASAN_ZALLOC
+ if (zone->z_kasan_redzone) {
+ addr = kasan_alloc(addr, elem_size,
+ elem_size - 2 * zone->z_kasan_redzone,
+ zone->z_kasan_redzone);
+ elem_size -= 2 * zone->z_kasan_redzone;
+ }
+ /*
+ * Initialize buffer with unique pattern only if memory
+ * wasn't expected to be zeroed.
+ */
+ if (!zone->z_free_zeroes && !(flags & Z_ZERO)) {
+ kasan_leak_init(addr, elem_size);
+ }
+#endif /* KASAN_ZALLOC */
+ if ((flags & Z_ZERO) && !zone->z_free_zeroes) {
+ bzero((void *)addr, elem_size);
+ }
+
+#if VM_MAX_TAG_ZONES
+ if (__improbable(zone->tags)) {
+ vm_tag_t tag = zalloc_flags_get_tag(flags);
+ if (tag == VM_KERN_MEMORY_NONE) {
+ tag = VM_KERN_MEMORY_KALLOC;
+ }
+ // set the tag with b0 clear so the block remains inuse
+ *ztSlot(zone, addr) = (vm_tag_t)(tag << 1);
+ vm_tag_update_zone_size(tag, zone->tag_zone_index,
+ (long)elem_size);
+ }
+#endif /* VM_MAX_TAG_ZONES */
+
+ TRACE_MACHLEAKS(ZALLOC_CODE, ZALLOC_CODE_2, elem_size, addr);
+ DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
+ if (freemag) {
+ zone_magazine_free(freemag);
+ }
+ return (void *)addr;
+}
+
+#if CONFIG_GZALLOC
+/*!
+ * @function zalloc_gz
+ *
+ * @brief
+ * Performs allocations for zones using gzalloc.
+ *
+ * @discussion
+ * This function is noinline so that it doesn't affect the codegen
+ * of the fastpath.
+ */
+__attribute__((noinline))
+static void *
+zalloc_gz(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
+{
+ vm_offset_t addr = gzalloc_alloc(zone, zstats, flags);
+ return zalloc_return(zone, zone_element_encode(addr, 0, ZPM_AUTO),
+ flags, zone_elem_size(zone), NULL);
+}
+#endif /* CONFIG_GZALLOC */
+
+static void *
+zalloc_item_fast(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
+{
+ vm_size_t esize = zone_elem_size(zone);
+ zone_element_t ze;
+
+ zalloc_import(zone, &ze, 1);
+ zone_elems_free_sub(zone, 1);
+ zpercpu_get(zstats)->zs_mem_allocated += esize;
+ zone_unlock(zone);
+
+ return zalloc_return(zone, ze, flags, esize, NULL);
+}
+
+/*!
+ * @function zalloc_item_slow
+ *
+ * @brief
+ * Performs allocations when the zone is out of elements.
+ *
+ * @discussion
+ * This function might drop the lock and reenable preemption,
+ * which means the per-CPU caching layer or recirculation depot
+ * might have received elements.
+ */
+__attribute__((noinline))
+static void *
+zalloc_item_slow(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
+{
+ if (zone->z_replenishes) {
+ zone_replenish_locked(zone);
+ } else {
+ if ((flags & Z_NOWAIT) == 0) {
+ zone_expand_locked(zone, flags, zalloc_needs_refill);
+ }
+ if (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) {
+ zone_expand_async_schedule_if_needed(zone);
+ }
+ if (__improbable(zone->z_elems_free == 0)) {
+ zone_unlock(zone);
+ if (__improbable(flags & Z_NOFAIL)) {
+ zone_nofail_panic(zone);
+ }
+ DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
+ return NULL;
+ }
+ }
+
+ /*
+ * We might have changed core or got preempted/blocked while expanding
+ * the zone. Allocating from the zone when the recirculation depot
+ * is not empty is not allowed.
+ *
+ * It will be rare but possible for the depot to refill while we were
+ * waiting for pages. If that happens we need to start over.
+ */
+ if (!STAILQ_EMPTY(&zone->z_recirc)) {
+ zone_unlock(zone);
+ return zalloc_ext(zone, zstats, flags);
+ }
+
+ return zalloc_item_fast(zone, zstats, flags);
+}
+
+/*!
+ * @function zalloc_item
+ *
+ * @brief
+ * Performs allocations when zone caching is off.
+ *
+ * @discussion
+ * This function calls @c zalloc_item_slow() when refilling the zone
+ * is needed, or @c zalloc_item_fast() if the zone has enough free elements.
+ */
+static void *
+zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
+{
+ zone_lock_check_contention(zone, NULL);
+
+ /*
+ * When we commited to the zalloc_item() path,
+ * zone caching might have been flipped/enabled.
+ *
+ * If we got preempted for long enough, the recirculation layer
+ * can have been populated, and allocating from the zone would be
+ * incorrect.
+ *
+ * So double check for this extremely rare race here.
+ */
+ if (__improbable(!STAILQ_EMPTY(&zone->z_recirc))) {
+ zone_unlock(zone);
+ return zalloc_ext(zone, zstats, flags);
+ }
+
+ if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
+ return zalloc_item_slow(zone, zstats, flags);
+ }
+
+ return zalloc_item_fast(zone, zstats, flags);
+}
+
+static void *
+zalloc_cached_fast(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
+ zone_cache_t cache, zone_magazine_t freemag)
+{
+ vm_offset_t esize = zone_elem_size(zone);
+ zone_element_t ze;
+ uint32_t index;
+
+ index = --cache->zc_alloc_cur;
+ if (index >= zc_mag_size()) {
+ zone_accounting_panic(zone, "zc_alloc_cur wrap around");
+ }
+ ze = cache->zc_alloc_elems[index];
+ cache->zc_alloc_elems[index].ze_value = 0;
+
+ zpercpu_get(zstats)->zs_mem_allocated += esize;
+ enable_preemption();
+
+ if (zone_meta_is_free(zone_meta_from_element(ze), ze)) {
+ zone_meta_double_free_panic(zone, ze, __func__);
+ }
+
+ return zalloc_return(zone, ze, flags, esize, freemag);
+}
+
+static void *
+zalloc_cached_from_depot(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
+ zone_cache_t cache, zone_cache_t depot, zone_magazine_t mag)
+{
+ STAILQ_REMOVE_HEAD(&depot->zc_depot, zm_link);
+ if (depot->zc_depot_cur-- == 0) {
+ zone_accounting_panic(zone, "zc_depot_cur wrap-around");
+ }
+ zone_depot_unlock_nopreempt(depot);
+
+ mag = zone_magazine_replace(&cache->zc_alloc_cur,
+ &cache->zc_alloc_elems, mag);
+
+ z_debug_assert(cache->zc_alloc_cur == zc_mag_size());
+ z_debug_assert(mag->zm_cur == 0);
+
+ if (zone == zc_magazine_zone) {
+ enable_preemption();
+ bzero(mag, zone_elem_size(zone));
+ return mag;
+ }
+
+ return zalloc_cached_fast(zone, zstats, flags, cache, mag);
+}
+
+__attribute__((noinline))
+static void *
+zalloc_cached_slow(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags,
+ zone_cache_t cache)
+{
+ zone_magazine_t mag = NULL;
+ struct zone_depot mags = STAILQ_HEAD_INITIALIZER(mags);
+
+ /*
+ * Try to allocate from our local depot, if there's one.
+ */
+ if (STAILQ_FIRST(&cache->zc_depot)) {
+ zone_depot_lock_nopreempt(cache);
+
+ if ((mag = STAILQ_FIRST(&cache->zc_depot)) != NULL) {
+ return zalloc_cached_from_depot(zone, zstats, flags,
+ cache, cache, mag);
+ }
+
+ zone_depot_unlock_nopreempt(cache);
+ }
+
+ zone_lock_nopreempt_check_contention(zone, cache);
+
+ /*
+ * If the recirculation depot is empty, we'll need to import.
+ * The system is tuned for this to be extremely rare.
+ */
+ if (__improbable(STAILQ_EMPTY(&zone->z_recirc))) {
+ uint16_t n_elems = zc_mag_size();
+
+ if (zone->z_elems_free < n_elems + zone->z_elems_rsv / 2 &&
+ os_sub_overflow(zone->z_elems_free,
+ zone->z_elems_rsv / 2, &n_elems)) {
+ n_elems = 0;
+ }
+
+ z_debug_assert(n_elems <= zc_mag_size());
+
+ if (__improbable(n_elems == 0)) {
+ /*
+ * If importing elements would deplete the zone,
+ * call zalloc_item_slow()
+ */
+ return zalloc_item_slow(zone, zstats, flags);
+ }
+
+ if (__improbable(zone_caching_disabled)) {
+ if (__improbable(zone_caching_disabled < 0)) {
+ /*
+ * In the first 10s after boot, mess with
+ * the scan position in order to make early
+ * allocations patterns less predictible.
+ */
+ zone_early_scramble_rr(zone, zstats);
+ }
+ return zalloc_item_fast(zone, zstats, flags);
+ }
+
+ zalloc_import(zone, cache->zc_alloc_elems, n_elems);
+
+ cache->zc_alloc_cur = n_elems;
+ zone_elems_free_sub(zone, n_elems);
+
+ zone_unlock_nopreempt(zone);
+
+ return zalloc_cached_fast(zone, zstats, flags, cache, NULL);
+ }
+
+ uint16_t n_mags = 0;
+
+ /*
+ * If the recirculation depot has elements, then try to fill
+ * the local per-cpu depot to (1 / zc_recirc_denom)
+ */
+ do {
+ mag = STAILQ_FIRST(&zone->z_recirc);
+ STAILQ_REMOVE_HEAD(&zone->z_recirc, zm_link);
+ STAILQ_INSERT_TAIL(&mags, mag, zm_link);
+ n_mags++;
+
+ for (uint16_t i = 0; i < zc_mag_size(); i++) {
+ zone_element_t e = mag->zm_elems[i];
+
+ if (!zone_meta_mark_used(zone_meta_from_element(e), e)) {
+ zone_meta_double_free_panic(zone, e, __func__);
+ }
+ }
+ } while (!STAILQ_EMPTY(&zone->z_recirc) &&
+ zc_recirc_denom * n_mags * zc_mag_size() <= cache->zc_depot_max);
+
+ zone_elems_free_sub(zone, n_mags * zc_mag_size());
+ zone_counter_sub(zone, z_recirc_cur, n_mags);
+
+ zone_unlock_nopreempt(zone);
+
+ /*
+ * And then incorporate everything into our per-cpu layer.