+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_offset_t mask,
+ ppnum_t max_pnum,
+ ppnum_t pnum_mask,
+ int flags,
+ vm_tag_t tag);
+
+extern kern_return_t kmem_alloc_flags(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag,
+ int flags);
+
+extern kern_return_t kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag) __XNU_INTERNAL(kmem_alloc_pageable);
+
+extern kern_return_t kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag);
+
+extern kern_return_t kmem_realloc(
+ vm_map_t map,
+ vm_offset_t oldaddr,
+ vm_size_t oldsize,
+ vm_offset_t *newaddrp,
+ vm_size_t newsize,
+ vm_tag_t tag);
+
+extern void kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size);
+
+extern kern_return_t kmem_suballoc(
+ vm_map_t parent,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t pageable,
+ int flags,
+ vm_map_kernel_flags_t vmk_flags,
+ vm_tag_t tag,
+ vm_map_t *new_map);
+
+extern kern_return_t kmem_alloc_kobject(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size,
+ vm_tag_t tag) __XNU_INTERNAL(kmem_alloc_kobject);
+
+extern kern_return_t kernel_memory_populate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags,
+ vm_tag_t tag);
+
+extern void kernel_memory_depopulate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size,
+ int flags);
+
+extern kern_return_t memory_object_iopl_request(
+ ipc_port_t port,
+ memory_object_offset_t offset,
+ upl_size_t *upl_size,
+ upl_t *upl_ptr,
+ upl_page_info_array_t user_page_list,
+ unsigned int *page_list_count,
+ upl_control_flags_t *flags,
+ vm_tag_t tag);
+
+struct mach_memory_info;
+extern kern_return_t vm_page_diagnose(struct mach_memory_info * info,
+ unsigned int num_info, uint64_t zones_collectable_bytes);
+
+extern uint32_t vm_page_diagnose_estimate(void);
+
+#if DEBUG || DEVELOPMENT
+
+extern kern_return_t mach_memory_info_check(void);
+
+extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size);
+
+#endif /* DEBUG || DEVELOPMENT */
+
+extern vm_tag_t vm_tag_bt(void);
+
+extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site);
+
+extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP);
+
+extern void vm_tag_update_size(vm_tag_t tag, int64_t size);
+
+#if VM_MAX_TAG_ZONES
+extern void vm_allocation_zones_init(void);
+extern void vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx);
+extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste);
+
+extern vm_allocation_zone_total_t ** vm_allocation_zone_totals;
+
+#endif /* VM_MAX_TAG_ZONES */
+
+extern vm_tag_t vm_tag_bt_debug(void);
+
+extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen);
+
+extern boolean_t vm_kernel_map_is_kernel(vm_map_t map);
+
+extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
+
+#else /* XNU_KERNEL_PRIVATE */
+
+extern kern_return_t kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size);
+
+extern kern_return_t kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size);
+
+extern kern_return_t kmem_alloc_kobject(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size);
+
+extern void kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size);
+
+#endif /* !XNU_KERNEL_PRIVATE */