+void
+vm_tag_update_size(vm_tag_t tag, int64_t delta)
+{
+ vm_allocation_site_t * allocation;
+ uint64_t prior;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ allocation = vm_allocation_sites[tag];
+ assert(allocation);
+
+ if (delta < 0) {
+ assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation);
+ }
+ prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+ uint64_t new, peak;
+ new = prior + delta;
+ do
+ {
+ peak = allocation->peak;
+ if (new <= peak) break;
+ }
+ while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+ if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) return;
+
+ if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+void
+kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
+{
+ uint64_t prior;
+
+ if (delta < 0) {
+ assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
+ }
+ prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+ uint64_t new, peak;
+ new = prior + delta;
+ do
+ {
+ peak = allocation->peak;
+ if (new <= peak) break;
+ }
+ while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+ if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+#if VM_MAX_TAG_ZONES
+
+void
+vm_allocation_zones_init(void)
+{
+ kern_return_t ret;
+ vm_offset_t addr;
+ vm_size_t size;
+
+ size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **)
+ + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+
+ ret = kernel_memory_allocate(kernel_map,
+ &addr, round_page(size), 0,
+ KMA_ZERO, VM_KERN_MEMORY_DIAG);
+ assert(KERN_SUCCESS == ret);
+
+ vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
+ addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **);
+
+ // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
+ // in vm_tag_update_zone_size() won't recurse
+ vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG] = (vm_allocation_zone_total_t *) addr;
+ addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+ vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr;
+}
+
+void
+vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx)
+{
+ vm_allocation_zone_total_t * zone;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ if (zidx >= VM_MAX_TAG_ZONES) return;
+
+ zone = vm_allocation_zone_totals[tag];
+ if (!zone)
+ {
+ zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG);
+ if (!zone) return;
+ bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+ if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag]))
+ {
+ kfree(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+ }
+ }
+}
+
+void
+vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste)
+{
+ vm_allocation_zone_total_t * zone;
+ uint32_t new;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ if (zidx >= VM_MAX_TAG_ZONES) return;
+
+ zone = vm_allocation_zone_totals[tag];
+ assert(zone);
+ zone += zidx;
+
+ /* the zone is locked */
+ if (delta < 0)
+ {
+ assertf(zone->total >= ((uint64_t)-delta), "zidx %d, tag %d, %p", zidx, tag, zone);
+ zone->total += delta;
+ }
+ else
+ {
+ zone->total += delta;
+ if (zone->total > zone->peak) zone->peak = zone->total;
+ if (dwaste)
+ {
+ new = zone->waste;
+ if (zone->wastediv < 65536) zone->wastediv++;
+ else new -= (new >> 16);
+ __assert_only bool ov = os_add_overflow(new, dwaste, &new);
+ assert(!ov);
+ zone->waste = new;
+ }
+ }
+}
+
+#endif /* VM_MAX_TAG_ZONES */
+
+void
+kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
+{
+ kern_allocation_name_t other;
+ struct vm_allocation_total * total;
+ uint32_t subidx;
+
+ subidx = 0;
+ assert(VM_KERN_MEMORY_NONE != subtag);
+ for (; subidx < allocation->subtotalscount; subidx++)
+ {
+ if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag)
+ {
+ allocation->subtotals[subidx].tag = subtag;
+ break;
+ }
+ if (subtag == allocation->subtotals[subidx].tag) break;
+ }
+ assert(subidx < allocation->subtotalscount);
+ if (subidx >= allocation->subtotalscount) return;
+
+ total = &allocation->subtotals[subidx];
+ other = vm_allocation_sites[subtag];
+ assert(other);
+
+ if (delta < 0)
+ {
+ assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
+ OSAddAtomic64(delta, &total->total);
+ assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
+ OSAddAtomic64(delta, &other->mapped);
+ }
+ else
+ {
+ OSAddAtomic64(delta, &other->mapped);
+ OSAddAtomic64(delta, &total->total);
+ }
+}
+
+const char *
+kern_allocation_get_name(kern_allocation_name_t allocation)
+{
+ return (KA_NAME(allocation));
+}
+
+kern_allocation_name_t
+kern_allocation_name_allocate(const char * name, uint32_t subtotalscount)
+{
+ uint32_t namelen;
+
+ namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
+
+ kern_allocation_name_t allocation;
+ allocation = kalloc(KA_SIZE(namelen, subtotalscount));
+ bzero(allocation, KA_SIZE(namelen, subtotalscount));
+
+ allocation->refcount = 1;
+ allocation->subtotalscount = subtotalscount;
+ allocation->flags = (namelen << VM_TAG_NAME_LEN_SHIFT);
+ strlcpy(KA_NAME(allocation), name, namelen + 1);
+
+ return (allocation);
+}
+
+void
+kern_allocation_name_release(kern_allocation_name_t allocation)
+{
+ assert(allocation->refcount > 0);
+ if (1 == OSAddAtomic16(-1, &allocation->refcount))
+ {
+ kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
+ }
+}
+
+vm_tag_t
+kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
+{
+ return (vm_tag_alloc(allocation));
+}
+
+#if ! VM_TAG_ACTIVE_UPDATE