+ if (++new_count >= factor) {
+ rolled_over = TRUE;
+ new_count = 0;
+ } else {
+ rolled_over = FALSE;
+ }
+
+ } while (!OSCompareAndSwap(old_count, new_count, count_p));
+
+ return rolled_over;
+}
+
+#if defined(__LP64__)
+#define ZP_POISON 0xdeadbeefdeadbeef
+#else
+#define ZP_POISON 0xdeadbeef
+#endif
+
+#define ZP_DEFAULT_SAMPLING_FACTOR 16
+#define ZP_DEFAULT_SCALE_FACTOR 4
+
+/*
+ * A zp_factor of 0 indicates zone poisoning is disabled,
+ * however, we still poison zones smaller than zp_tiny_zone_limit (a cacheline).
+ * Passing the -no-zp boot-arg disables even this behavior.
+ * In all cases, we record and check the integrity of a backup pointer.
+ */
+
+/* set by zp-factor=N boot arg, zero indicates non-tiny poisoning disabled */
+uint32_t zp_factor = 0;
+
+/* set by zp-scale=N boot arg, scales zp_factor by zone size */
+uint32_t zp_scale = 0;
+
+/* set in zp_init, zero indicates -no-zp boot-arg */
+vm_size_t zp_tiny_zone_limit = 0;
+
+/* initialized to a per-boot random value in zp_init */
+uintptr_t zp_poisoned_cookie = 0;
+uintptr_t zp_nopoison_cookie = 0;
+
+
+/*
+ * initialize zone poisoning
+ * called from zone_bootstrap before any allocations are made from zalloc
+ */
+static inline void
+zp_init(void)
+{
+ char temp_buf[16];
+
+ /*
+ * Initialize backup pointer random cookie for poisoned elements
+ * Try not to call early_random() back to back, it may return
+ * the same value if mach_absolute_time doesn't have sufficient time
+ * to tick over between calls. <rdar://problem/11597395>
+ * (This is only a problem on embedded devices)
+ */
+ zp_poisoned_cookie = (uintptr_t) early_random();
+
+ /*
+ * Always poison zones smaller than a cacheline,
+ * because it's pretty close to free
+ */
+ ml_cpu_info_t cpu_info;
+ ml_cpu_get_info(&cpu_info);
+ zp_tiny_zone_limit = (vm_size_t) cpu_info.cache_line_size;
+
+ zp_factor = ZP_DEFAULT_SAMPLING_FACTOR;
+ zp_scale = ZP_DEFAULT_SCALE_FACTOR;
+
+ //TODO: Bigger permutation?
+ /*
+ * Permute the default factor +/- 1 to make it less predictable
+ * This adds or subtracts ~4 poisoned objects per 1000 frees.
+ */
+ if (zp_factor != 0) {
+ uint32_t rand_bits = early_random() & 0x3;
+
+ if (rand_bits == 0x1)
+ zp_factor += 1;
+ else if (rand_bits == 0x2)
+ zp_factor -= 1;
+ /* if 0x0 or 0x3, leave it alone */
+ }
+
+ /* -zp: enable poisoning for every alloc and free */
+ if (PE_parse_boot_argn("-zp", temp_buf, sizeof(temp_buf))) {
+ zp_factor = 1;
+ }
+
+ /* -no-zp: disable poisoning completely even for tiny zones */
+ if (PE_parse_boot_argn("-no-zp", temp_buf, sizeof(temp_buf))) {
+ zp_factor = 0;
+ zp_tiny_zone_limit = 0;
+ printf("Zone poisoning disabled\n");
+ }
+
+ /* zp-factor=XXXX: override how often to poison freed zone elements */
+ if (PE_parse_boot_argn("zp-factor", &zp_factor, sizeof(zp_factor))) {
+ printf("Zone poisoning factor override: %u\n", zp_factor);
+ }
+
+ /* zp-scale=XXXX: override how much zone size scales zp-factor by */
+ if (PE_parse_boot_argn("zp-scale", &zp_scale, sizeof(zp_scale))) {
+ printf("Zone poisoning scale factor override: %u\n", zp_scale);
+ }
+
+ /* Initialize backup pointer random cookie for unpoisoned elements */
+ zp_nopoison_cookie = (uintptr_t) early_random();
+
+#if MACH_ASSERT
+ if (zp_poisoned_cookie == zp_nopoison_cookie)
+ panic("early_random() is broken: %p and %p are not random\n",
+ (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie);
+#endif
+
+ /*
+ * Use the last bit in the backup pointer to hint poisoning state
+ * to backup_ptr_mismatch_panic. Valid zone pointers are aligned, so
+ * the low bits are zero.
+ */
+ zp_poisoned_cookie |= (uintptr_t)0x1ULL;
+ zp_nopoison_cookie &= ~((uintptr_t)0x1ULL);
+
+#if defined(__LP64__)
+ /*
+ * Make backup pointers more obvious in GDB for 64 bit
+ * by making OxFFFFFF... ^ cookie = 0xFACADE...
+ * (0xFACADE = 0xFFFFFF ^ 0x053521)
+ * (0xC0FFEE = 0xFFFFFF ^ 0x3f0011)
+ * The high 3 bytes of a zone pointer are always 0xFFFFFF, and are checked
+ * by the sanity check, so it's OK for that part of the cookie to be predictable.
+ *
+ * TODO: Use #defines, xors, and shifts
+ */
+
+ zp_poisoned_cookie &= 0x000000FFFFFFFFFF;
+ zp_poisoned_cookie |= 0x0535210000000000; /* 0xFACADE */
+
+ zp_nopoison_cookie &= 0x000000FFFFFFFFFF;
+ zp_nopoison_cookie |= 0x3f00110000000000; /* 0xC0FFEE */
+#endif
+}
+
+/* zone_map page count for page table structure */
+uint64_t zone_map_table_page_count = 0;
+
+/*
+ * These macros are used to keep track of the number
+ * of pages being used by the zone currently. The
+ * z->page_count is protected by the zone lock.
+ */
+#define ZONE_PAGE_COUNT_INCR(z, count) \
+{ \
+ OSAddAtomic64(count, &(z->page_count)); \
+}
+
+#define ZONE_PAGE_COUNT_DECR(z, count) \
+{ \
+ OSAddAtomic64(-count, &(z->page_count)); \
+}
+
+/* for is_sane_zone_element and garbage collection */
+
+vm_offset_t zone_map_min_address = 0; /* initialized in zone_init */
+vm_offset_t zone_map_max_address = 0;
+
+/* Helpful for walking through a zone's free element list. */
+struct zone_free_element {
+ struct zone_free_element *next;
+ /* ... */
+ /* void *backup_ptr; */
+};
+
+struct zone_page_metadata {
+ queue_chain_t pages;
+ struct zone_free_element *elements;
+ zone_t zone;
+ uint16_t alloc_count;
+ uint16_t free_count;
+};
+
+/* The backup pointer is stored in the last pointer-sized location in an element. */
+static inline vm_offset_t *
+get_backup_ptr(vm_size_t elem_size,
+ vm_offset_t *element)
+{
+ return (vm_offset_t *) ((vm_offset_t)element + elem_size - sizeof(vm_offset_t));
+}
+
+static inline struct zone_page_metadata *
+get_zone_page_metadata(struct zone_free_element *element)
+{
+ return (struct zone_page_metadata *)(trunc_page((vm_offset_t)element));
+}
+
+/*
+ * Zone checking helper function.
+ * A pointer that satisfies these conditions is OK to be a freelist next pointer
+ * A pointer that doesn't satisfy these conditions indicates corruption
+ */
+static inline boolean_t
+is_sane_zone_ptr(zone_t zone,
+ vm_offset_t addr,
+ size_t obj_size)
+{
+ /* Must be aligned to pointer boundary */
+ if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0))
+ return FALSE;
+
+ /* Must be a kernel address */
+ if (__improbable(!pmap_kernel_va(addr)))
+ return FALSE;
+
+ /* Must be from zone map if the zone only uses memory from the zone_map */
+ /*
+ * TODO: Remove the zone->collectable check when every
+ * zone using foreign memory is properly tagged with allows_foreign
+ */
+ if (zone->collectable && !zone->allows_foreign) {
+#if ZONE_ALIAS_ADDR
+ /*
+ * If this address is in the static kernel region, it might be
+ * the alias address of a valid zone element.
+ * If we tried to find the zone_virtual_addr() of an invalid
+ * address in the static kernel region, it will panic, so don't
+ * check addresses in this region.
+ *
+ * TODO: Use a safe variant of zone_virtual_addr to
+ * make this check more accurate
+ *
+ * The static kernel region is mapped at:
+ * [gVirtBase, gVirtBase + gPhysSize]
+ */
+ if ((addr - gVirtBase) < gPhysSize)
+ return TRUE;
+#endif
+ /* check if addr is from zone map */
+ if (addr >= zone_map_min_address &&
+ (addr + obj_size - 1) < zone_map_max_address )
+ return TRUE;
+
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static inline boolean_t
+is_sane_zone_page_metadata(zone_t zone,
+ vm_offset_t page_meta)
+{
+ /* NULL page metadata structures are invalid */
+ if (page_meta == 0)
+ return FALSE;
+ return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata));
+}
+
+static inline boolean_t
+is_sane_zone_element(zone_t zone,
+ vm_offset_t addr)
+{
+ /* NULL is OK because it indicates the tail of the list */
+ if (addr == 0)
+ return TRUE;
+ return is_sane_zone_ptr(zone, addr, zone->elem_size);
+}
+
+/* Someone wrote to freed memory. */
+static inline void /* noreturn */
+zone_element_was_modified_panic(zone_t zone,
+ vm_offset_t element,
+ vm_offset_t found,
+ vm_offset_t expected,
+ vm_offset_t offset)
+{
+ panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p",
+ zone->zone_name,
+ (void *) expected,
+ (void *) found,
+ (void *) (expected ^ found),
+ (uint32_t) offset,
+ (uint32_t) zone->elem_size,
+ (void *) element,
+ (void *) zp_nopoison_cookie,
+ (void *) zp_poisoned_cookie);
+}
+
+/*
+ * The primary and backup pointers don't match.
+ * Determine which one was likely the corrupted pointer, find out what it
+ * probably should have been, and panic.
+ * I would like to mark this as noreturn, but panic() isn't marked noreturn.
+ */
+static void /* noreturn */
+backup_ptr_mismatch_panic(zone_t zone,
+ vm_offset_t element,
+ vm_offset_t primary,
+ vm_offset_t backup)
+{
+ vm_offset_t likely_backup;
+
+ boolean_t sane_backup;
+ boolean_t sane_primary = is_sane_zone_element(zone, primary);
+ boolean_t element_was_poisoned = (backup & 0x1) ? TRUE : FALSE;
+
+#if defined(__LP64__)
+ /* We can inspect the tag in the upper bits for additional confirmation */
+ if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000)
+ element_was_poisoned = TRUE;
+ else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000)
+ element_was_poisoned = FALSE;
+#endif
+
+ if (element_was_poisoned) {
+ likely_backup = backup ^ zp_poisoned_cookie;
+ sane_backup = is_sane_zone_element(zone, likely_backup);
+ } else {
+ likely_backup = backup ^ zp_nopoison_cookie;
+ sane_backup = is_sane_zone_element(zone, likely_backup);
+ }
+
+ /* The primary is definitely the corrupted one */
+ if (!sane_primary && sane_backup)
+ zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
+
+ /* The backup is definitely the corrupted one */
+ if (sane_primary && !sane_backup)
+ zone_element_was_modified_panic(zone, element, backup,
+ (primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)),
+ zone->elem_size - sizeof(vm_offset_t));
+
+ /*
+ * Not sure which is the corrupted one.
+ * It's less likely that the backup pointer was overwritten with
+ * ( (sane address) ^ (valid cookie) ), so we'll guess that the
+ * primary pointer has been overwritten with a sane but incorrect address.
+ */
+ if (sane_primary && sane_backup)
+ zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
+
+ /* Neither are sane, so just guess. */
+ zone_element_was_modified_panic(zone, element, primary, likely_backup, 0);
+}
+
+/*
+ * Sets the next element of tail to elem.
+ * elem can be NULL.
+ * Preserves the poisoning state of the element.
+ */
+static inline void
+append_zone_element(zone_t zone,
+ struct zone_free_element *tail,
+ struct zone_free_element *elem)
+{
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, (vm_offset_t *) tail);
+
+ vm_offset_t old_backup = *backup;
+
+ vm_offset_t old_next = (vm_offset_t) tail->next;
+ vm_offset_t new_next = (vm_offset_t) elem;
+
+ if (old_next == (old_backup ^ zp_nopoison_cookie))
+ *backup = new_next ^ zp_nopoison_cookie;
+ else if (old_next == (old_backup ^ zp_poisoned_cookie))
+ *backup = new_next ^ zp_poisoned_cookie;
+ else
+ backup_ptr_mismatch_panic(zone,
+ (vm_offset_t) tail,
+ old_next,
+ old_backup);
+
+ tail->next = elem;
+}
+
+
+/*
+ * Insert a linked list of elements (delineated by head and tail) at the head of
+ * the zone free list. Every element in the list being added has already gone
+ * through append_zone_element, so their backup pointers are already
+ * set properly.
+ * Precondition: There should be no elements after tail
+ */
+static inline void
+add_list_to_zone(zone_t zone,
+ struct zone_free_element *head,
+ struct zone_free_element *tail)
+{
+ assert(tail->next == NULL);
+ assert(!zone->use_page_list);
+
+ append_zone_element(zone, tail, zone->free_elements);
+
+ zone->free_elements = head;
+}
+
+
+/*
+ * Adds the element to the head of the zone's free list
+ * Keeps a backup next-pointer at the end of the element
+ */
+static inline void
+free_to_zone(zone_t zone,
+ vm_offset_t element,
+ boolean_t poison)
+{
+ vm_offset_t old_head;
+ struct zone_page_metadata *page_meta;
+
+ vm_offset_t *primary = (vm_offset_t *) element;
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
+
+ if (zone->use_page_list) {
+ page_meta = get_zone_page_metadata((struct zone_free_element *)element);
+ assert(page_meta->zone == zone);
+ old_head = (vm_offset_t)page_meta->elements;
+ } else {
+ old_head = (vm_offset_t)zone->free_elements;
+ }
+
+#if MACH_ASSERT
+ if (__improbable(!is_sane_zone_element(zone, old_head)))
+ panic("zfree: invalid head pointer %p for freelist of zone %s\n",
+ (void *) old_head, zone->zone_name);
+#endif
+
+ if (__improbable(!is_sane_zone_element(zone, element)))
+ panic("zfree: freeing invalid pointer %p to zone %s\n",
+ (void *) element, zone->zone_name);
+
+ /*
+ * Always write a redundant next pointer
+ * So that it is more difficult to forge, xor it with a random cookie
+ * A poisoned element is indicated by using zp_poisoned_cookie
+ * instead of zp_nopoison_cookie
+ */
+
+ *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie);
+
+ /* Insert this element at the head of the free list */
+ *primary = old_head;
+ if (zone->use_page_list) {
+ page_meta->elements = (struct zone_free_element *)element;
+ page_meta->free_count++;
+ if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
+ if (page_meta->free_count == 1) {
+ /* first foreign element freed on page, move from all_used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.any_free_foreign, (queue_entry_t)page_meta);
+ } else {
+ /* no other list transitions */
+ }
+ } else if (page_meta->free_count == page_meta->alloc_count) {
+ /* whether the page was on the intermediate or all_used, queue, move it to free */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.all_free, (queue_entry_t)page_meta);
+ } else if (page_meta->free_count == 1) {
+ /* first free element on page, move from all_used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
+ }
+ } else {
+ zone->free_elements = (struct zone_free_element *)element;
+ }
+ zone->count--;
+ zone->countfree++;
+}
+
+
+/*
+ * Removes an element from the zone's free list, returning 0 if the free list is empty.
+ * Verifies that the next-pointer and backup next-pointer are intact,
+ * and verifies that a poisoned element hasn't been modified.
+ */
+static inline vm_offset_t
+try_alloc_from_zone(zone_t zone,
+ boolean_t* check_poison)
+{
+ vm_offset_t element;
+ struct zone_page_metadata *page_meta;
+
+ *check_poison = FALSE;
+
+ /* if zone is empty, bail */
+ if (zone->use_page_list) {
+ if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign))
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign);
+ else if (!queue_empty(&zone->pages.intermediate))
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate);
+ else if (!queue_empty(&zone->pages.all_free))
+ page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free);
+ else {
+ return 0;
+ }
+
+ /* Check if page_meta passes is_sane_zone_element */
+ if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta)))
+ panic("zalloc: invalid metadata structure %p for freelist of zone %s\n",
+ (void *) page_meta, zone->zone_name);
+ assert(page_meta->zone == zone);
+ element = (vm_offset_t)page_meta->elements;
+ } else {
+ if (zone->free_elements == NULL)
+ return 0;
+
+ element = (vm_offset_t)zone->free_elements;
+ }
+
+#if MACH_ASSERT
+ if (__improbable(!is_sane_zone_element(zone, element)))
+ panic("zfree: invalid head pointer %p for freelist of zone %s\n",
+ (void *) element, zone->zone_name);
+#endif
+
+ vm_offset_t *primary = (vm_offset_t *) element;
+ vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary);
+
+ vm_offset_t next_element = *primary;
+ vm_offset_t next_element_backup = *backup;
+
+ /*
+ * backup_ptr_mismatch_panic will determine what next_element
+ * should have been, and print it appropriately
+ */
+ if (__improbable(!is_sane_zone_element(zone, next_element)))
+ backup_ptr_mismatch_panic(zone, element, next_element, next_element_backup);
+
+ /* Check the backup pointer for the regular cookie */
+ if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) {
+
+ /* Check for the poisoned cookie instead */
+ if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie)))
+ /* Neither cookie is valid, corruption has occurred */
+ backup_ptr_mismatch_panic(zone, element, next_element, next_element_backup);
+
+ /*
+ * Element was marked as poisoned, so check its integrity before using it.
+ */
+ *check_poison = TRUE;
+ }
+
+ if (zone->use_page_list) {
+
+ /* Make sure the page_meta is at the correct offset from the start of page */
+ if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element)))
+ panic("zalloc: metadata located at incorrect location on page of zone %s\n",
+ zone->zone_name);
+
+ /* Make sure next_element belongs to the same page as page_meta */
+ if (next_element) {
+ if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element)))
+ panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n",
+ (void *)next_element, (void *)element, zone->zone_name);
+ }
+ }
+
+ /* Remove this element from the free list */
+ if (zone->use_page_list) {
+
+ page_meta->elements = (struct zone_free_element *)next_element;
+ page_meta->free_count--;
+
+ if (zone->allows_foreign && !from_zone_map(element, zone->elem_size)) {
+ if (page_meta->free_count == 0) {
+ /* move to all used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
+ } else {
+ /* no other list transitions */
+ }
+ } else if (page_meta->free_count == 0) {
+ /* remove from intermediate or free, move to all_used */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.all_used, (queue_entry_t)page_meta);
+ } else if (page_meta->alloc_count == page_meta->free_count + 1) {
+ /* remove from free, move to intermediate */
+ remqueue((queue_entry_t)page_meta);
+ enqueue_tail(&zone->pages.intermediate, (queue_entry_t)page_meta);
+ }
+ } else {
+ zone->free_elements = (struct zone_free_element *)next_element;
+ }
+ zone->countfree--;
+ zone->count++;
+ zone->sum_count++;
+
+ return element;
+}
+
+
+/*
+ * End of zone poisoning
+ */