+static vm_size_t
+vm_map_lookup_kalloc_entry_locked(
+ vm_map_t map,
+ void *addr)
+{
+ boolean_t ret;
+ vm_map_entry_t vm_entry = NULL;
+
+ ret = vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry);
+ if (!ret) {
+ panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n",
+ map, addr);
+ }
+ if (vm_entry->vme_start != (vm_map_offset_t)addr) {
+ panic("Attempting to lookup/free the middle of a kalloc'ed element! (map: %p, addr: %p, entry: %p)\n",
+ map, addr, vm_entry);
+ }
+ if (!vm_entry->vme_atomic) {
+ panic("Attempting to lookup/free an address not managed by kalloc! (map: %p, addr: %p, entry: %p)\n",
+ map, addr, vm_entry);
+ }
+ return (vm_entry->vme_end - vm_entry->vme_start);
+}
+
+#if KASAN_KALLOC
+/*
+ * KASAN kalloc stashes the original user-requested size away in the poisoned
+ * area. Return that directly.
+ */
+vm_size_t
+kalloc_size(void *addr)
+{
+ (void)vm_map_lookup_kalloc_entry_locked; /* silence warning */
+ return kasan_user_size((vm_offset_t)addr);
+}
+#else
+vm_size_t
+kalloc_size(
+ void *addr)
+{
+ vm_map_t map;
+ vm_size_t size;
+
+ size = zone_element_size(addr, NULL);
+ if (size) {
+ return size;
+ }
+ if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) {
+ map = kalloc_map;
+ } else {
+ map = kernel_map;
+ }
+ vm_map_lock_read(map);
+ size = vm_map_lookup_kalloc_entry_locked(map, addr);
+ vm_map_unlock_read(map);
+ return size;
+}
+#endif
+
+vm_size_t
+kalloc_bucket_size(
+ vm_size_t size)
+{
+ zone_t z;
+ vm_map_t map;
+
+ if (size < MAX_SIZE_ZDLUT) {
+ z = get_zone_dlut(size);
+ return z->elem_size;
+ }
+
+ if (size < kalloc_max_prerounded) {
+ z = get_zone_search(size, k_zindex_start);
+ return z->elem_size;
+ }
+
+ if (size >= kalloc_kernmap_size)
+ map = kernel_map;
+ else
+ map = kalloc_map;
+
+ return vm_map_round_page(size, VM_MAP_PAGE_MASK(map));
+}
+
+#if KASAN_KALLOC
+vm_size_t
+kfree_addr(void *addr)
+{
+ vm_size_t origsz = kalloc_size(addr);
+ kfree(addr, origsz);
+ return origsz;
+}
+#else
+vm_size_t
+kfree_addr(
+ void *addr)
+{
+ vm_map_t map;
+ vm_size_t size = 0;
+ kern_return_t ret;
+ zone_t z;
+
+ size = zone_element_size(addr, &z);
+ if (size) {
+ DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, z->elem_size, void*, addr);
+ zfree(z, addr);
+ return size;
+ }
+
+ if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) {
+ map = kalloc_map;
+ } else {
+ map = kernel_map;
+ }
+ if ((vm_offset_t)addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
+ panic("kfree on an address not in the kernel & kext address range! addr: %p\n", addr);
+ }
+
+ vm_map_lock(map);
+ size = vm_map_lookup_kalloc_entry_locked(map, addr);
+ ret = vm_map_remove_locked(map,
+ vm_map_trunc_page((vm_map_offset_t)addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page((vm_map_offset_t)addr + size,
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_REMOVE_KUNWIRE);
+ if (ret != KERN_SUCCESS) {
+ panic("vm_map_remove_locked() failed for kalloc vm_entry! addr: %p, map: %p ret: %d\n",
+ addr, map, ret);
+ }
+ vm_map_unlock(map);
+ DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, size, void*, addr);
+
+ kalloc_spin_lock();
+ kalloc_large_total -= size;
+ kalloc_large_inuse--;
+ kalloc_unlock();
+
+ KALLOC_ZINFO_SFREE(size);
+ return size;
+}
+#endif
+