+static void /* noreturn */
+backup_ptr_mismatch_panic(zone_t zone,
+ vm_offset_t element,
+ vm_offset_t primary,
+ vm_offset_t backup)
+{
+ vm_offset_t likely_backup;
+
+ boolean_t sane_backup;
+ boolean_t sane_primary = is_sane_zone_element(zone, primary);
+ boolean_t element_was_poisoned = (backup & 0x1) ? TRUE : FALSE;
+
+#if defined(__LP64__)
+ /* We can inspect the tag in the upper bits for additional confirmation */
+ if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000)
+ element_was_poisoned = TRUE;
+ else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000)
+ element_was_poisoned = FALSE;
+#endif
+
+ if (element_was_poisoned) {
+ likely_backup = backup ^ zp_poisoned_cookie;
+ sane_backup = is_sane_zone_element(zone, likely_backup);
+ } else {
+ likely_backup = backup ^ zp_nopoison_cookie;
+ sane_backup = is_sane_zone_element(zone, likely_backup);
+ }
+
+ /* The primary is definitely the corrupted one */
+ if (!sane_primary && sane_backup)
+ zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
+
+ /* The backup is definitely the corrupted one */
+ if (sane_primary && !sane_backup)
+ zone_element_was_modified_panic(zone, element, backup,
+ (primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)),
+ zone->elem_size - sizeof(vm_offset_t));
+
+ /*
+ * Not sure which is the corrupted one.
+ * It's less likely that the backup pointer was overwritten with
+ * ( (sane address) ^ (valid cookie) ), so we'll guess that the
+ * primary pointer has been overwritten with a sane but incorrect address.
+ */
+ if (sane_primary && sane_backup)
+ zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
+
+ /* Neither are sane, so just guess. */
+ zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
+}
+
+/*
+ * Sets the next element of tail to elem.
+ * elem can be NULL.
+ * Preserves the poisoning state of the element.
+ */
+static inline void
+append_zone_element(zone_t zone,
+ struct zone_free_element *tail,
+ struct zone_free_element *elem)
+{
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, (vm_offset_t *) tail);
+
+ vm_offset_t old_backup = *backup;
+
+ vm_offset_t old_next = (vm_offset_t) tail->next;
+ vm_offset_t new_next = (vm_offset_t) elem;
+
+ if (old_next == (old_backup ^ zp_nopoison_cookie))
+ *backup = new_next ^ zp_nopoison_cookie;
+ else if (old_next == (old_backup ^ zp_poisoned_cookie))
+ *backup = new_next ^ zp_poisoned_cookie;
+ else
+ backup_ptr_mismatch_panic(zone,
+ (vm_offset_t) tail,
+ old_next,
+ old_backup);
+
+ tail->next = elem;
+}
+
+
+/*
+ * Insert a linked list of elements (delineated by head and tail) at the head of
+ * the zone free list. Every element in the list being added has already gone
+ * through append_zone_element, so their backup pointers are already
+ * set properly.
+ * Precondition: There should be no elements after tail
+ */
+static inline void
+add_list_to_zone(zone_t zone,
+ struct zone_free_element *head,
+ struct zone_free_element *tail)
+{
+ assert(tail->next == NULL);
+ assert(!zone->use_page_list);
+
+ append_zone_element(zone, tail, zone->free_elements);
+
+ zone->free_elements = head;
+}
+
+
+/*
+ * Adds the element to the head of the zone's free list
+ * Keeps a backup next-pointer at the end of the element
+ */
+static inline void
+free_to_zone(zone_t zone,
+ vm_offset_t element,
+ boolean_t poison)
+{
+ vm_offset_t old_head;
+ struct zone_page_metadata *page_meta;
+
+ vm_offset_t *primary = (vm_offset_t *) element;
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
+
+ if (zone->use_page_list) {
+ page_meta = get_zone_page_metadata((struct zone_free_element *)element);
+ assert(page_meta->zone == zone);
+ old_head = (vm_offset_t)page_meta->elements;
+ } else {
+ old_head = (vm_offset_t)zone->free_elements;
+ }
+
+#if MACH_ASSERT
+ if (__improbable(!is_sane_zone_element(zone, old_head)))
+ panic("zfree: invalid head pointer %p for freelist of zone %s\n",
+ (void *) old_head, zone->zone_name);
+#endif
+
+ if (__improbable(!is_sane_zone_element(zone, element)))
+ panic("zfree: freeing invalid pointer %p to zone %s\n",
+ (void *) element, zone->zone_name);
+
+ /*
+ * Always write a redundant next pointer
+ * So that it is more difficult to forge, xor it with a random cookie
+ * A poisoned element is indicated by using zp_poisoned_cookie
+ * instead of zp_nopoison_cookie
+ */
+
+ *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie);
+
+ /* Insert this element at the head of the free list */
+ *primary = old_head;
+ if (zone->use_page_list) {
+ page_meta->elements = (struct zone_free_element *)element;
+ page_meta->free_count++;
+ if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
+ if (page_meta->free_count == 1) {
+ /* first foreign element freed on page, move from all_used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.any_free_foreign, (queue_entry_t)page_meta);
+ } else {
+ /* no other list transitions */
+ }
+ } else if (page_meta->free_count == page_meta->alloc_count) {
+ /* whether the page was on the intermediate or all_used, queue, move it to free */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.all_free, (queue_entry_t)page_meta);
+ } else if (page_meta->free_count == 1) {
+ /* first free element on page, move from all_used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
+ }
+ } else {
+ zone->free_elements = (struct zone_free_element *)element;
+ }
+ zone->count--;
+ zone->countfree++;
+}
+
+
+/*
+ * Removes an element from the zone's free list, returning 0 if the free list is empty.
+ * Verifies that the next-pointer and backup next-pointer are intact,
+ * and verifies that a poisoned element hasn't been modified.
+ */
+static inline vm_offset_t
+try_alloc_from_zone(zone_t zone,
+ boolean_t* check_poison)
+{
+ vm_offset_t element;
+ struct zone_page_metadata *page_meta;
+
+ *check_poison = FALSE;
+
+ /* if zone is empty, bail */
+ if (zone->use_page_list) {
+ if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign))
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
+ else if (!queue_empty(&zone->pages.intermediate))
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
+ else if (!queue_empty(&zone->pages.all_free))
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
+ else {
+ return 0;
+ }
+
+ /* Check if page_meta passes is_sane_zone_element */
+ if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta)))
+ panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
+ (void *) page_meta, zone->zone_name);
+ assert(page_meta->zone == zone);
+ element = (vm_offset_t)page_meta->elements;
+ } else {
+ if (zone->free_elements == NULL)
+ return 0;
+
+ element = (vm_offset_t)zone->free_elements;
+ }
+
+#if MACH_ASSERT
+ if (__improbable(!is_sane_zone_element(zone, element)))
+ panic("zfree: invalid head pointer %p for freelist of zone %s\n",
+ (void *) element, zone->zone_name);
+#endif
+
+ vm_offset_t *primary = (vm_offset_t *) element;
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
+
+ vm_offset_t next_element = *primary;
+ vm_offset_t next_element_backup = *backup;
+
+ /*
+ * backup_ptr_mismatch_panic will determine what next_element
+ * should have been, and print it appropriately
+ */
+ if (__improbable(!is_sane_zone_element(zone, next_element)))
+ backup_ptr_mismatch_panic(zone, element, next_element, next_element_backup);
+
+ /* Check the backup pointer for the regular cookie */
+ if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) {
+
+ /* Check for the poisoned cookie instead */
+ if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie)))
+ /* Neither cookie is valid, corruption has occurred */
+ backup_ptr_mismatch_panic(zone, element, next_element, next_element_backup);
+
+ /*
+ * Element was marked as poisoned, so check its integrity before using it.
+ */
+ *check_poison = TRUE;
+ }
+
+ if (zone->use_page_list) {
+
+ /* Make sure the page_meta is at the correct offset from the start of page */
+ if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element)))
+ panic("zalloc: metadata located at incorrect location on page of zone %s\n",
+ zone->zone_name);
+
+ /* Make sure next_element belongs to the same page as page_meta */
+ if (next_element) {
+ if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element)))
+ panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
+ (void *)next_element, (void *)element, zone->zone_name);
+ }
+ }
+
+ /* Remove this element from the free list */
+ if (zone->use_page_list) {
+
+ page_meta->elements = (struct zone_free_element *)next_element;
+ page_meta->free_count--;
+
+ if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
+ if (page_meta->free_count == 0) {
+ /* move to all used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
+ } else {
+ /* no other list transitions */
+ }
+ } else if (page_meta->free_count == 0) {
+ /* remove from intermediate or free, move to all_used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
+ } else if (page_meta->alloc_count == page_meta->free_count + 1) {
+ /* remove from free, move to intermediate */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
+ }
+ } else {
+ zone->free_elements = (struct zone_free_element *)next_element;
+ }
+ zone->countfree--;
+ zone->count++;
+ zone->sum_count++;
+
+ return element;
+}
+
+
+/*
+ * End of zone poisoning
+ */
+
+/*
+ * Fake zones for things that want to report via zprint but are not actually zones.
+ */
+struct fake_zone_info {
+ const char* name;
+ void (*init)(int);
+ void (*query)(int *,
+ vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *,
+ uint64_t *, int *, int *, int *);
+};
+
+static const struct fake_zone_info fake_zones[] = {
+};
+static const unsigned int num_fake_zones =
+ sizeof (fake_zones) / sizeof (fake_zones[0]);
+
+/*
+ * Zone info options
+ */
+boolean_t zinfo_per_task = FALSE; /* enabled by -zinfop in boot-args */
+#define ZINFO_SLOTS 200 /* for now */
+#define ZONES_MAX (ZINFO_SLOTS - num_fake_zones - 1)
+
+/*
+ * Support for garbage collection of unused zone pages
+ *
+ * The kernel virtually allocates the "zone map" submap of the kernel
+ * map. When an individual zone needs more storage, memory is allocated
+ * out of the zone map, and the two-level "zone_page_table" is
+ * on-demand expanded so that it has entries for those pages.
+ * zone_page_init()/zone_page_alloc() initialize "alloc_count"
+ * to the number of zone elements that occupy the zone page (which may
+ * be a minimum of 1, including if a zone element spans multiple
+ * pages).
+ *
+ * Asynchronously, the zone_gc() logic attempts to walk zone free
+ * lists to see if all the elements on a zone page are free. If
+ * "collect_count" (which it increments during the scan) matches
+ * "alloc_count", the zone page is a candidate for collection and the
+ * physical page is returned to the VM system. During this process, the
+ * first word of the zone page is re-used to maintain a linked list of
+ * to-be-collected zone pages.
+ */
+typedef uint32_t zone_page_index_t;
+#define ZONE_PAGE_INDEX_INVALID ((zone_page_index_t)0xFFFFFFFFU)