+ }
+}
+
+/*
+ * Routine to panic if a pointer is not mapped to an expected zone.
+ * This can be used as a means of pinning an object to the zone it is expected
+ * to be a part of. Causes a panic if the address does not belong to any
+ * specified zone, does not belong to any zone, has been freed and therefore
+ * unmapped from the zone, or the pointer contains an uninitialized value that
+ * does not belong to any zone.
+ */
+
+void
+zone_require(void *addr, zone_t expected_zone)
+{
+ struct zone *src_zone = NULL;
+ struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE);
+
+ src_zone = PAGE_METADATA_GET_ZONE(page_meta);
+ if (__improbable(src_zone == NULL)) {
+ panic("Address not in a zone for zone_require check (addr: %p)", addr);
+ }
+
+ if (__improbable(src_zone != expected_zone)) {
+ panic("Address not in expected zone for zone_require check (addr: %p, zone: %s)", addr, src_zone->zone_name);
+ }
+}
+
+/*
+ * ZTAGS
+ */
+
+#if VM_MAX_TAG_ZONES
+
+// for zones with tagging enabled:
+
+// calculate a pointer to the tag base entry,
+// holding either a uint32_t the first tag offset for a page in the zone map,
+// or two uint16_t tags if the page can only hold one or two elements
+
+#define ZTAGBASE(zone, element) \
+ (&((uint32_t *)zone_tagbase_min)[atop((element) - zone_map_min_address)])
+
+// pointer to the tag for an element
+#define ZTAG(zone, element) \
+ ({ \
+ vm_tag_t * result; \
+ if ((zone)->tags_inline) { \
+ result = (vm_tag_t *) ZTAGBASE((zone), (element)); \
+ if ((page_mask & element) >= (zone)->elem_size) result++; \
+ } else { \
+ result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / (zone)->elem_size]; \
+ } \
+ result; \
+ })
+
+
+static vm_offset_t zone_tagbase_min;
+static vm_offset_t zone_tagbase_max;
+static vm_offset_t zone_tagbase_map_size;
+static vm_map_t zone_tagbase_map;
+
+static vm_offset_t zone_tags_min;
+static vm_offset_t zone_tags_max;
+static vm_offset_t zone_tags_map_size;
+static vm_map_t zone_tags_map;
+
+// simple heap allocator for allocating the tags for new memory
+
+decl_lck_mtx_data(, ztLock); /* heap lock */
+enum{
+ ztFreeIndexCount = 8,
+ ztFreeIndexMax = (ztFreeIndexCount - 1),
+ ztTagsPerBlock = 4
+};
+
+struct ztBlock {
+#if __LITTLE_ENDIAN__
+ uint64_t free:1,
+ next:21,
+ prev:21,
+ size:21;
+#else
+// ztBlock needs free bit least significant
+#error !__LITTLE_ENDIAN__
+#endif
+};
+typedef struct ztBlock ztBlock;
+
+static ztBlock * ztBlocks;
+static uint32_t ztBlocksCount;
+static uint32_t ztBlocksFree;
+
+static uint32_t
+ztLog2up(uint32_t size)
+{
+ if (1 == size) {
+ size = 0;
+ } else {
+ size = 32 - __builtin_clz(size - 1);
+ }
+ return size;
+}
+
+static uint32_t
+ztLog2down(uint32_t size)
+{
+ size = 31 - __builtin_clz(size);
+ return size;
+}
+
+static void
+ztFault(vm_map_t map, const void * address, size_t size, uint32_t flags)
+{
+ vm_map_offset_t addr = (vm_map_offset_t) address;
+ vm_map_offset_t page, end;
+
+ page = trunc_page(addr);
+ end = round_page(addr + size);
+
+ for (; page < end; page += page_size) {
+ if (!pmap_find_phys(kernel_pmap, page)) {
+ kern_return_t __unused
+ ret = kernel_memory_populate(map, page, PAGE_SIZE,
+ KMA_KOBJECT | flags, VM_KERN_MEMORY_DIAG);
+ assert(ret == KERN_SUCCESS);
+ }
+ }
+}
+
+static boolean_t
+ztPresent(const void * address, size_t size)
+{
+ vm_map_offset_t addr = (vm_map_offset_t) address;
+ vm_map_offset_t page, end;
+ boolean_t result;
+
+ page = trunc_page(addr);
+ end = round_page(addr + size);
+ for (result = TRUE; (page < end); page += page_size) {
+ result = pmap_find_phys(kernel_pmap, page);
+ if (!result) {
+ break;
+ }
+ }
+ return result;
+}
+
+
+void __unused
+ztDump(boolean_t sanity);
+void __unused
+ztDump(boolean_t sanity)
+{
+ uint32_t q, cq, p;
+
+ for (q = 0; q <= ztFreeIndexMax; q++) {
+ p = q;
+ do{
+ if (sanity) {
+ cq = ztLog2down(ztBlocks[p].size);
+ if (cq > ztFreeIndexMax) {
+ cq = ztFreeIndexMax;
+ }
+ if (!ztBlocks[p].free
+ || ((p != q) && (q != cq))
+ || (ztBlocks[ztBlocks[p].next].prev != p)
+ || (ztBlocks[ztBlocks[p].prev].next != p)) {
+ kprintf("zterror at %d", p);
+ ztDump(FALSE);
+ kprintf("zterror at %d", p);
+ assert(FALSE);
+ }
+ continue;
+ }
+ kprintf("zt[%03d]%c %d, %d, %d\n",
+ p, ztBlocks[p].free ? 'F' : 'A',
+ ztBlocks[p].next, ztBlocks[p].prev,
+ ztBlocks[p].size);
+ p = ztBlocks[p].next;
+ if (p == q) {
+ break;
+ }
+ }while (p != q);
+ if (!sanity) {
+ printf("\n");
+ }
+ }
+ if (!sanity) {
+ printf("-----------------------\n");
+ }
+}
+
+
+
+#define ZTBDEQ(idx) \
+ ztBlocks[ztBlocks[(idx)].prev].next = ztBlocks[(idx)].next; \
+ ztBlocks[ztBlocks[(idx)].next].prev = ztBlocks[(idx)].prev;
+
+static void
+ztFree(zone_t zone __unused, uint32_t index, uint32_t count)
+{
+ uint32_t q, w, p, size, merge;
+
+ assert(count);
+ ztBlocksFree += count;
+
+ // merge with preceding
+ merge = (index + count);
+ if ((merge < ztBlocksCount)
+ && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
+ && ztBlocks[merge].free) {
+ ZTBDEQ(merge);
+ count += ztBlocks[merge].size;
+ }
+
+ // merge with following
+ merge = (index - 1);
+ if ((merge > ztFreeIndexMax)
+ && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
+ && ztBlocks[merge].free) {
+ size = ztBlocks[merge].size;
+ count += size;
+ index -= size;
+ ZTBDEQ(index);
+ }
+
+ q = ztLog2down(count);
+ if (q > ztFreeIndexMax) {
+ q = ztFreeIndexMax;
+ }
+ w = q;
+ // queue in order of size
+ while (TRUE) {
+ p = ztBlocks[w].next;
+ if (p == q) {
+ break;
+ }
+ if (ztBlocks[p].size >= count) {
+ break;
+ }
+ w = p;
+ }
+ ztBlocks[p].prev = index;
+ ztBlocks[w].next = index;
+
+ // fault in first
+ ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0);
+
+ // mark first & last with free flag and size
+ ztBlocks[index].free = TRUE;
+ ztBlocks[index].size = count;
+ ztBlocks[index].prev = w;
+ ztBlocks[index].next = p;
+ if (count > 1) {
+ index += (count - 1);
+ // fault in last
+ ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0);
+ ztBlocks[index].free = TRUE;
+ ztBlocks[index].size = count;
+ }
+}
+
+static uint32_t
+ztAlloc(zone_t zone, uint32_t count)
+{
+ uint32_t q, w, p, leftover;
+
+ assert(count);
+
+ q = ztLog2up(count);
+ if (q > ztFreeIndexMax) {
+ q = ztFreeIndexMax;
+ }
+ do{
+ w = q;
+ while (TRUE) {
+ p = ztBlocks[w].next;
+ if (p == q) {
+ break;
+ }
+ if (ztBlocks[p].size >= count) {
+ // dequeue, mark both ends allocated
+ ztBlocks[w].next = ztBlocks[p].next;
+ ztBlocks[ztBlocks[p].next].prev = w;
+ ztBlocks[p].free = FALSE;
+ ztBlocksFree -= ztBlocks[p].size;
+ if (ztBlocks[p].size > 1) {
+ ztBlocks[p + ztBlocks[p].size - 1].free = FALSE;
+ }
+
+ // fault all the allocation
+ ztFault(zone_tags_map, &ztBlocks[p], count * sizeof(ztBlocks[p]), 0);
+ // mark last as allocated
+ if (count > 1) {
+ ztBlocks[p + count - 1].free = FALSE;
+ }
+ // free remainder
+ leftover = ztBlocks[p].size - count;
+ if (leftover) {
+ ztFree(zone, p + ztBlocks[p].size - leftover, leftover);
+ }
+
+ return p;
+ }
+ w = p;
+ }
+ q++;
+ }while (q <= ztFreeIndexMax);
+
+ return -1U;
+}
+
+static void
+ztInit(vm_size_t max_zonemap_size, lck_grp_t * group)
+{
+ kern_return_t ret;
+ vm_map_kernel_flags_t vmk_flags;
+ uint32_t idx;
+
+ lck_mtx_init(&ztLock, group, LCK_ATTR_NULL);
+
+ // allocate submaps VM_KERN_MEMORY_DIAG
+
+ zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ ret = kmem_suballoc(kernel_map, &zone_tagbase_min, zone_tagbase_map_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG,
+ &zone_tagbase_map);
+
+ if (ret != KERN_SUCCESS) {
+ panic("zone_init: kmem_suballoc failed");
+ }
+ zone_tagbase_max = zone_tagbase_min + round_page(zone_tagbase_map_size);
+
+ zone_tags_map_size = 2048 * 1024 * sizeof(vm_tag_t);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ ret = kmem_suballoc(kernel_map, &zone_tags_min, zone_tags_map_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG,
+ &zone_tags_map);
+
+ if (ret != KERN_SUCCESS) {
+ panic("zone_init: kmem_suballoc failed");
+ }
+ zone_tags_max = zone_tags_min + round_page(zone_tags_map_size);
+
+ ztBlocks = (ztBlock *) zone_tags_min;
+ ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock));
+
+ // initialize the qheads
+ lck_mtx_lock(&ztLock);
+
+ ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0);
+ for (idx = 0; idx < ztFreeIndexCount; idx++) {
+ ztBlocks[idx].free = TRUE;
+ ztBlocks[idx].next = idx;
+ ztBlocks[idx].prev = idx;
+ ztBlocks[idx].size = 0;
+ }
+ // free remaining space
+ ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount);
+
+ lck_mtx_unlock(&ztLock);
+}
+
+static void
+ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size)
+{
+ uint32_t * tagbase;
+ uint32_t count, block, blocks, idx;
+ size_t pages;
+
+ pages = atop(size);
+ tagbase = ZTAGBASE(zone, mem);
+
+ lck_mtx_lock(&ztLock);
+
+ // fault tagbase
+ ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0);
+
+ if (!zone->tags_inline) {
+ // allocate tags
+ count = (uint32_t)(size / zone->elem_size);
+ blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
+ block = ztAlloc(zone, blocks);
+ if (-1U == block) {
+ ztDump(false);
+ }
+ assert(-1U != block);
+ }
+
+ lck_mtx_unlock(&ztLock);
+
+ if (!zone->tags_inline) {
+ // set tag base for each page
+ block *= ztTagsPerBlock;
+ for (idx = 0; idx < pages; idx++) {
+ tagbase[idx] = block + (uint32_t)((ptoa(idx) + (zone->elem_size - 1)) / zone->elem_size);
+ }
+ }
+}
+
+static void
+ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size)
+{
+ uint32_t * tagbase;
+ uint32_t count, block, blocks, idx;
+ size_t pages;
+
+ // set tag base for each page
+ pages = atop(size);
+ tagbase = ZTAGBASE(zone, mem);
+ block = tagbase[0];
+ for (idx = 0; idx < pages; idx++) {
+ tagbase[idx] = 0xFFFFFFFF;
+ }
+
+ lck_mtx_lock(&ztLock);
+ if (!zone->tags_inline) {
+ count = (uint32_t)(size / zone->elem_size);
+ blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
+ assert(block != 0xFFFFFFFF);
+ block /= ztTagsPerBlock;
+ ztFree(NULL /* zone is unlocked */, block, blocks);
+ }
+
+ lck_mtx_unlock(&ztLock);
+}
+
+uint32_t
+zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size)
+{
+ zone_t z;
+ uint32_t idx;
+
+ simple_lock(&all_zones_lock, &zone_locks_grp);
+
+ for (idx = 0; idx < num_zones; idx++) {
+ z = &(zone_array[idx]);
+ if (!z->tags) {
+ continue;
+ }
+ if (tag_zone_index != z->tag_zone_index) {
+ continue;
+ }
+ *elem_size = z->elem_size;
+ break;
+ }
+
+ simple_unlock(&all_zones_lock);
+
+ if (idx == num_zones) {
+ idx = -1U;
+ }
+
+ return idx;