+ /*
+ * Not sure which is the corrupted one.
+ * It's less likely that the backup pointer was overwritten with
+ * ( (sane address) ^ (valid cookie) ), so we'll guess that the
+ * primary pointer has been overwritten with a sane but incorrect address.
+ */
+ if (sane_primary && sane_backup) {
+ zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0);
+ }
+
+ /* Neither are sane, so just guess. */
+ zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0);
+}
+
+/*
+ * Adds the element to the head of the zone's free list
+ * Keeps a backup next-pointer at the end of the element
+ */
+static inline void
+free_to_zone(zone_t zone,
+ vm_offset_t element,
+ boolean_t poison)
+{
+ vm_offset_t old_head;
+ struct zone_page_metadata *page_meta;
+
+ vm_offset_t *primary = (vm_offset_t *) element;
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
+
+ page_meta = get_zone_page_metadata((struct zone_free_element *)element, FALSE);
+ assert(PAGE_METADATA_GET_ZONE(page_meta) == zone);
+ old_head = (vm_offset_t)page_metadata_get_freelist(page_meta);
+
+ if (__improbable(!is_sane_zone_element(zone, old_head))) {
+ panic("zfree: invalid head pointer %p for freelist of zone %s\n",
+ (void *) old_head, zone->zone_name);
+ }
+
+ if (__improbable(!is_sane_zone_element(zone, element))) {
+ panic("zfree: freeing invalid pointer %p to zone %s\n",
+ (void *) element, zone->zone_name);
+ }
+
+ if (__improbable(old_head == element)) {
+ panic("zfree: double free of %p to zone %s\n",
+ (void *) element, zone->zone_name);
+ }
+ /*
+ * Always write a redundant next pointer
+ * So that it is more difficult to forge, xor it with a random cookie
+ * A poisoned element is indicated by using zp_poisoned_cookie
+ * instead of zp_nopoison_cookie
+ */
+
+ *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie);
+
+ /*
+ * Insert this element at the head of the free list. We also xor the
+ * primary pointer with the zp_nopoison_cookie to make sure a free
+ * element does not provide the location of the next free element directly.
+ */
+ *primary = old_head ^ zp_nopoison_cookie;
+ page_metadata_set_freelist(page_meta, (struct zone_free_element *)element);
+ page_meta->free_count++;
+ if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
+ if (page_meta->free_count == 1) {
+ /* first foreign element freed on page, move from all_used */
+ re_queue_tail(&zone->pages.any_free_foreign, &(page_meta->pages));
+ } else {
+ /* no other list transitions */
+ }
+ } else if (page_meta->free_count == get_metadata_alloc_count(page_meta)) {
+ /* whether the page was on the intermediate or all_used, queue, move it to free */
+ re_queue_tail(&zone->pages.all_free, &(page_meta->pages));
+ zone->count_all_free_pages += page_meta->page_count;
+ } else if (page_meta->free_count == 1) {
+ /* first free element on page, move from all_used */
+ re_queue_tail(&zone->pages.intermediate, &(page_meta->pages));
+ }
+ zone->count--;
+ zone->countfree++;
+
+#if KASAN_ZALLOC
+ kasan_poison_range(element, zone->elem_size, ASAN_HEAP_FREED);
+#endif
+}
+
+
+/*
+ * Removes an element from the zone's free list, returning 0 if the free list is empty.
+ * Verifies that the next-pointer and backup next-pointer are intact,
+ * and verifies that a poisoned element hasn't been modified.
+ */
+static inline vm_offset_t
+try_alloc_from_zone(zone_t zone,
+ vm_tag_t tag __unused,
+ boolean_t* check_poison)
+{
+ vm_offset_t element;
+ struct zone_page_metadata *page_meta;
+
+ *check_poison = FALSE;
+
+ /* if zone is empty, bail */
+ if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign)) {
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
+ } else if (!queue_empty(&zone->pages.intermediate)) {
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
+ } else if (!queue_empty(&zone->pages.all_free)) {
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
+ assert(zone->count_all_free_pages >= page_meta->page_count);
+ zone->count_all_free_pages -= page_meta->page_count;
+ } else {
+ return 0;
+ }
+ /* Check if page_meta passes is_sane_zone_element */
+ if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta))) {
+ panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
+ (void *) page_meta, zone->zone_name);
+ }
+ assert(PAGE_METADATA_GET_ZONE(page_meta) == zone);
+ element = (vm_offset_t)page_metadata_get_freelist(page_meta);
+
+ if (__improbable(!is_sane_zone_ptr(zone, element, zone->elem_size))) {
+ panic("zfree: invalid head pointer %p for freelist of zone %s\n",
+ (void *) element, zone->zone_name);
+ }
+
+ vm_offset_t *primary = (vm_offset_t *) element;
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
+
+ /*
+ * Since the primary next pointer is xor'ed with zp_nopoison_cookie
+ * for obfuscation, retrieve the original value back
+ */
+ vm_offset_t next_element = *primary ^ zp_nopoison_cookie;
+ vm_offset_t next_element_primary = *primary;
+ vm_offset_t next_element_backup = *backup;
+
+ /*
+ * backup_ptr_mismatch_panic will determine what next_element
+ * should have been, and print it appropriately
+ */
+ if (__improbable(!is_sane_zone_element(zone, next_element))) {
+ backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup);
+ }
+
+ /* Check the backup pointer for the regular cookie */
+ if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) {
+ /* Check for the poisoned cookie instead */
+ if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie))) {
+ /* Neither cookie is valid, corruption has occurred */
+ backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup);
+ }
+
+ /*
+ * Element was marked as poisoned, so check its integrity before using it.
+ */
+ *check_poison = TRUE;
+ }
+
+ /* Make sure the page_meta is at the correct offset from the start of page */
+ if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element, FALSE))) {
+ panic("zalloc: Incorrect metadata %p found in zone %s page queue. Expected metadata: %p\n",
+ page_meta, zone->zone_name, get_zone_page_metadata((struct zone_free_element *)element, FALSE));
+ }
+
+ /* Make sure next_element belongs to the same page as page_meta */
+ if (next_element) {
+ if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element, FALSE))) {
+ panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
+ (void *)next_element, (void *)element, zone->zone_name);
+ }
+ }
+
+ /* Remove this element from the free list */
+ page_metadata_set_freelist(page_meta, (struct zone_free_element *)next_element);
+ page_meta->free_count--;
+
+ if (page_meta->free_count == 0) {
+ /* move to all used */
+ re_queue_tail(&zone->pages.all_used, &(page_meta->pages));
+ } else {
+ if (!zone->allows_foreign || from_zone_map(element, zone->elem_size)) {
+ if (get_metadata_alloc_count(page_meta) == page_meta->free_count + 1) {
+ /* remove from free, move to intermediate */
+ re_queue_tail(&zone->pages.intermediate, &(page_meta->pages));
+ }
+ }
+ }
+ zone->countfree--;
+ zone->count++;
+ zone->sum_count++;
+
+#if VM_MAX_TAG_ZONES
+ if (__improbable(zone->tags)) {
+ // set the tag with b0 clear so the block remains inuse
+ ZTAG(zone, element)[0] = (tag << 1);
+ }
+#endif /* VM_MAX_TAG_ZONES */
+
+
+#if KASAN_ZALLOC
+ kasan_poison_range(element, zone->elem_size, ASAN_VALID);
+#endif
+
+ return element;
+}
+
+/*
+ * End of zone poisoning
+ */
+
+/*
+ * Zone info options
+ */
+#define ZINFO_SLOTS MAX_ZONES /* for now */
+
+zone_t zone_find_largest(void);
+
+/*
+ * Async allocation of zones
+ * This mechanism allows for bootstrapping an empty zone which is setup with
+ * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call
+ * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free.
+ * This will prime the zone for the next use.
+ *
+ * Currently the thread_callout function (zalloc_async) will loop through all zones
+ * looking for any zone with async_pending set and do the work for it.
+ *
+ * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call,
+ * then zalloc_noblock to an empty zone may succeed.
+ */
+void zalloc_async(
+ thread_call_param_t p0,
+ thread_call_param_t p1);
+
+static thread_call_data_t call_async_alloc;
+
+/*
+ * Align elements that use the zone page list to 32 byte boundaries.
+ */
+#define ZONE_ELEMENT_ALIGNMENT 32
+
+#define zone_wakeup(zone) thread_wakeup((event_t)(zone))
+#define zone_sleep(zone) \
+ (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN_ALWAYS, (event_t)(zone), THREAD_UNINT);
+
+
+#define lock_zone_init(zone) \
+MACRO_BEGIN \
+ lck_attr_setdefault(&(zone)->lock_attr); \
+ lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \
+ &zone_locks_grp, &(zone)->lock_attr); \
+MACRO_END
+
+#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock)
+
+/*
+ * Exclude more than one concurrent garbage collection
+ */
+decl_lck_mtx_data(, zone_gc_lock);
+
+lck_attr_t zone_gc_lck_attr;
+lck_grp_t zone_gc_lck_grp;
+lck_grp_attr_t zone_gc_lck_grp_attr;
+lck_mtx_ext_t zone_gc_lck_ext;
+
+boolean_t zone_gc_allowed = TRUE;
+boolean_t panic_include_zprint = FALSE;
+
+mach_memory_info_t *panic_kext_memory_info = NULL;
+vm_size_t panic_kext_memory_size = 0;
+
+#define ZALLOC_DEBUG_ZONEGC 0x00000001
+#define ZALLOC_DEBUG_ZCRAM 0x00000002
+
+#if DEBUG || DEVELOPMENT
+static uint32_t zalloc_debug = 0;
+#endif
+
+/*
+ * Zone leak debugging code
+ *
+ * When enabled, this code keeps a log to track allocations to a particular zone that have not
+ * yet been freed. Examining this log will reveal the source of a zone leak. The log is allocated
+ * only when logging is enabled, so there is no effect on the system when it's turned off. Logging is
+ * off by default.
+ *
+ * Enable the logging via the boot-args. Add the parameter "zlog=<zone>" to boot-args where <zone>
+ * is the name of the zone you wish to log.
+ *
+ * This code only tracks one zone, so you need to identify which one is leaking first.
+ * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone
+ * garbage collector. Note that the zone name printed in the panic message is not necessarily the one
+ * containing the leak. So do a zprint from gdb and locate the zone with the bloated size. This
+ * is most likely the problem zone, so set zlog in boot-args to this zone name, reboot and re-run the test. The
+ * next time it panics with this message, examine the log using the kgmacros zstack, findoldest and countpcs.
+ * See the help in the kgmacros for usage info.
+ *
+ *
+ * Zone corruption logging
+ *
+ * Logging can also be used to help identify the source of a zone corruption. First, identify the zone
+ * that is being corrupted, then add "-zc zlog=<zone name>" to the boot-args. When -zc is used in conjunction
+ * with zlog, it changes the logging style to track both allocations and frees to the zone. So when the
+ * corruption is detected, examining the log will show you the stack traces of the callers who last allocated
+ * and freed any particular element in the zone. Use the findelem kgmacro with the address of the element that's been
+ * corrupted to examine its history. This should lead to the source of the corruption.
+ */
+
+static boolean_t log_records_init = FALSE;
+static int log_records; /* size of the log, expressed in number of records */
+
+#define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */
+
+static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING;
+static int num_zones_logged = 0;
+
+static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */
+
+/* Log allocations and frees to help debug a zone element corruption */
+boolean_t corruption_debug_flag = DEBUG; /* enabled by "-zc" boot-arg */
+/* Making pointer scanning leaks detection possible for all zones */
+
+#if DEBUG || DEVELOPMENT
+boolean_t leak_scan_debug_flag = FALSE; /* enabled by "-zl" boot-arg */
+#endif /* DEBUG || DEVELOPMENT */
+
+
+/*
+ * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to
+ * the number of records you want in the log. For example, "zrecs=10" sets it to 10 records. Since this
+ * is the number of stacks suspected of leaking, we don't need many records.
+ */
+
+#if defined(__LP64__)
+#define ZRECORDS_MAX 2560 /* Max records allowed in the log */
+#else
+#define ZRECORDS_MAX 1536 /* Max records allowed in the log */
+#endif
+#define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */
+
+/*
+ * Each record in the log contains a pointer to the zone element it refers to,
+ * and a small array to hold the pc's from the stack trace. A
+ * record is added to the log each time a zalloc() is done in the zone_of_interest. For leak debugging,
+ * the record is cleared when a zfree() is done. For corruption debugging, the log tracks both allocs and frees.
+ * If the log fills, old records are replaced as if it were a circular buffer.
+ */