+
+#define ZTBDEQ(idx) \
+ ztBlocks[ztBlocks[(idx)].prev].next = ztBlocks[(idx)].next; \
+ ztBlocks[ztBlocks[(idx)].next].prev = ztBlocks[(idx)].prev;
+
+static void
+ztFree(zone_t zone __unused, uint32_t index, uint32_t count)
+{
+ uint32_t q, w, p, size, merge;
+
+ assert(count);
+ ztBlocksFree += count;
+
+ // merge with preceding
+ merge = (index + count);
+ if ((merge < ztBlocksCount)
+ && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
+ && ztBlocks[merge].free)
+ {
+ ZTBDEQ(merge);
+ count += ztBlocks[merge].size;
+ }
+
+ // merge with following
+ merge = (index - 1);
+ if ((merge > ztFreeIndexMax)
+ && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge]))
+ && ztBlocks[merge].free)
+ {
+ size = ztBlocks[merge].size;
+ count += size;
+ index -= size;
+ ZTBDEQ(index);
+ }
+
+ q = ztLog2down(count);
+ if (q > ztFreeIndexMax) q = ztFreeIndexMax;
+ w = q;
+ // queue in order of size
+ while (TRUE)
+ {
+ p = ztBlocks[w].next;
+ if (p == q) break;
+ if (ztBlocks[p].size >= count) break;
+ w = p;
+ }
+ ztBlocks[p].prev = index;
+ ztBlocks[w].next = index;
+
+ // fault in first
+ ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0);
+
+ // mark first & last with free flag and size
+ ztBlocks[index].free = TRUE;
+ ztBlocks[index].size = count;
+ ztBlocks[index].prev = w;
+ ztBlocks[index].next = p;
+ if (count > 1)
+ {
+ index += (count - 1);
+ // fault in last
+ ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0);
+ ztBlocks[index].free = TRUE;
+ ztBlocks[index].size = count;
+ }
+}
+
+static uint32_t
+ztAlloc(zone_t zone, uint32_t count)
+{
+ uint32_t q, w, p, leftover;
+
+ assert(count);
+
+ q = ztLog2up(count);
+ if (q > ztFreeIndexMax) q = ztFreeIndexMax;
+ do
+ {
+ w = q;
+ while (TRUE)
+ {
+ p = ztBlocks[w].next;
+ if (p == q) break;
+ if (ztBlocks[p].size >= count)
+ {
+ // dequeue, mark both ends allocated
+ ztBlocks[w].next = ztBlocks[p].next;
+ ztBlocks[ztBlocks[p].next].prev = w;
+ ztBlocks[p].free = FALSE;
+ ztBlocksFree -= ztBlocks[p].size;
+ if (ztBlocks[p].size > 1) ztBlocks[p + ztBlocks[p].size - 1].free = FALSE;
+
+ // fault all the allocation
+ ztFault(zone_tags_map, &ztBlocks[p], count * sizeof(ztBlocks[p]), 0);
+ // mark last as allocated
+ if (count > 1) ztBlocks[p + count - 1].free = FALSE;
+ // free remainder
+ leftover = ztBlocks[p].size - count;
+ if (leftover) ztFree(zone, p + ztBlocks[p].size - leftover, leftover);
+
+ return (p);
+ }
+ w = p;
+ }
+ q++;
+ }
+ while (q <= ztFreeIndexMax);
+
+ return (-1U);
+}
+
+static void
+ztInit(vm_size_t max_zonemap_size, lck_grp_t * group)
+{
+ kern_return_t ret;
+ vm_map_kernel_flags_t vmk_flags;
+ uint32_t idx;
+
+ lck_mtx_init(&ztLock, group, LCK_ATTR_NULL);
+
+ // allocate submaps VM_KERN_MEMORY_DIAG
+
+ zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ ret = kmem_suballoc(kernel_map, &zone_tagbase_min, zone_tagbase_map_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG,
+ &zone_tagbase_map);
+
+ if (ret != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed");
+ zone_tagbase_max = zone_tagbase_min + round_page(zone_tagbase_map_size);
+
+ zone_tags_map_size = 2048*1024 * sizeof(vm_tag_t);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_permanent = TRUE;
+ ret = kmem_suballoc(kernel_map, &zone_tags_min, zone_tags_map_size,
+ FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG,
+ &zone_tags_map);
+
+ if (ret != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed");
+ zone_tags_max = zone_tags_min + round_page(zone_tags_map_size);
+
+ ztBlocks = (ztBlock *) zone_tags_min;
+ ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock));
+
+ // initialize the qheads
+ lck_mtx_lock(&ztLock);
+
+ ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0);
+ for (idx = 0; idx < ztFreeIndexCount; idx++)
+ {
+ ztBlocks[idx].free = TRUE;
+ ztBlocks[idx].next = idx;
+ ztBlocks[idx].prev = idx;
+ ztBlocks[idx].size = 0;
+ }
+ // free remaining space
+ ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount);
+
+ lck_mtx_unlock(&ztLock);
+}
+
+static void
+ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size)
+{
+ uint32_t * tagbase;
+ uint32_t count, block, blocks, idx;
+ size_t pages;
+
+ pages = atop(size);
+ tagbase = ZTAGBASE(zone, mem);
+
+ lck_mtx_lock(&ztLock);
+
+ // fault tagbase
+ ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0);
+
+ if (!zone->tags_inline)
+ {
+ // allocate tags
+ count = (uint32_t)(size / zone->elem_size);
+ blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
+ block = ztAlloc(zone, blocks);
+ if (-1U == block) ztDump(false);
+ assert(-1U != block);
+ }
+
+ lck_mtx_unlock(&ztLock);
+
+ if (!zone->tags_inline)
+ {
+ // set tag base for each page
+ block *= ztTagsPerBlock;
+ for (idx = 0; idx < pages; idx++)
+ {
+ tagbase[idx] = block + (uint32_t)((ptoa(idx) + (zone->elem_size - 1)) / zone->elem_size);
+ }
+ }
+}
+
+static void
+ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size)
+{
+ uint32_t * tagbase;
+ uint32_t count, block, blocks, idx;
+ size_t pages;
+
+ // set tag base for each page
+ pages = atop(size);
+ tagbase = ZTAGBASE(zone, mem);
+ block = tagbase[0];
+ for (idx = 0; idx < pages; idx++)
+ {
+ tagbase[idx] = 0xFFFFFFFF;
+ }
+
+ lck_mtx_lock(&ztLock);
+ if (!zone->tags_inline)
+ {
+ count = (uint32_t)(size / zone->elem_size);
+ blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock);
+ assert(block != 0xFFFFFFFF);
+ block /= ztTagsPerBlock;
+ ztFree(NULL /* zone is unlocked */, block, blocks);
+ }
+
+ lck_mtx_unlock(&ztLock);
+}
+
+uint32_t
+zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size)
+{
+ zone_t z;
+ uint32_t idx;
+
+ simple_lock(&all_zones_lock);
+
+ for (idx = 0; idx < num_zones; idx++)
+ {
+ z = &(zone_array[idx]);
+ if (!z->tags) continue;
+ if (tag_zone_index != z->tag_zone_index) continue;
+ *elem_size = z->elem_size;
+ break;
+ }
+
+ simple_unlock(&all_zones_lock);
+
+ if (idx == num_zones) idx = -1U;
+
+ return (idx);
+}
+
+#endif /* VM_MAX_TAG_ZONES */
+
+/* Routine to get the size of a zone allocated address. If the address doesnt belong to the
+ * zone_map, returns 0.
+ */
+vm_size_t
+zone_element_size(void *addr, zone_t *z)
+{
+ struct zone *src_zone;
+ if (from_zone_map(addr, sizeof(void *))) {
+ struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE);
+ src_zone = PAGE_METADATA_GET_ZONE(page_meta);
+ if (z) {
+ *z = src_zone;