+typedef uint32_t *wpl_array_t;
+
+extern void vm_page_free_reserve(int pages);
+
+extern void vm_pageout_throttle_down(vm_page_t page);
+extern void vm_pageout_throttle_up(vm_page_t page);
+
+/*
+ * ENCRYPTED SWAP:
+ */
+extern void upl_encrypt(
+ upl_t upl,
+ upl_offset_t crypt_offset,
+ upl_size_t crypt_size);
+extern void vm_page_encrypt(
+ vm_page_t page,
+ vm_map_offset_t kernel_map_offset);
+extern boolean_t vm_pages_encrypted; /* are there encrypted pages ? */
+extern void vm_page_decrypt(
+ vm_page_t page,
+ vm_map_offset_t kernel_map_offset);
+extern kern_return_t vm_paging_map_object(
+ vm_page_t page,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_prot_t protection,
+ boolean_t can_unlock_object,
+ vm_map_size_t *size, /* IN/OUT */
+ vm_map_offset_t *address, /* OUT */
+ boolean_t *need_unmap); /* OUT */
+extern void vm_paging_unmap_object(
+ vm_object_t object,
+ vm_map_offset_t start,
+ vm_map_offset_t end);
+decl_simple_lock_data(extern, vm_paging_lock)
+
+/*
+ * Backing store throttle when BS is exhausted
+ */
+extern unsigned int vm_backing_store_low;
+
+extern void vm_pageout_steal_laundry(
+ vm_page_t page,
+ boolean_t queues_locked);
+
+extern boolean_t vm_page_is_slideable(vm_page_t m);
+
+extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset);
+#endif /* MACH_KERNEL_PRIVATE */
+
+#if UPL_DEBUG
+extern kern_return_t upl_ubc_alias_set(
+ upl_t upl,
+ uintptr_t alias1,
+ uintptr_t alias2);
+extern int upl_ubc_alias_get(
+ upl_t upl,
+ uintptr_t * al,
+ uintptr_t * al2);
+#endif /* UPL_DEBUG */
+
+extern void vm_countdirtypages(void);
+
+extern void vm_backing_store_disable(
+ boolean_t suspend);
+
+extern kern_return_t upl_transpose(
+ upl_t upl1,
+ upl_t upl2);
+
+extern kern_return_t mach_vm_pressure_monitor(
+ boolean_t wait_for_pressure,
+ unsigned int nsecs_monitored,
+ unsigned int *pages_reclaimed_p,
+ unsigned int *pages_wanted_p);
+
+extern kern_return_t
+vm_set_buffer_cleanup_callout(
+ boolean_t (*func)(int));
+
+struct vm_page_stats_reusable {
+ SInt32 reusable_count;
+ uint64_t reusable;
+ uint64_t reused;
+ uint64_t reused_wire;
+ uint64_t reused_remove;
+ uint64_t all_reusable_calls;
+ uint64_t partial_reusable_calls;
+ uint64_t all_reuse_calls;
+ uint64_t partial_reuse_calls;
+ uint64_t reusable_pages_success;
+ uint64_t reusable_pages_failure;
+ uint64_t reusable_pages_shared;
+ uint64_t reuse_pages_success;
+ uint64_t reuse_pages_failure;
+ uint64_t can_reuse_success;
+ uint64_t can_reuse_failure;
+ uint64_t reusable_reclaimed;
+};
+extern struct vm_page_stats_reusable vm_page_stats_reusable;
+
+extern int hibernate_flush_memory(void);
+extern void hibernate_create_paddr_map(void);
+
+extern int vm_compressor_mode;
+extern int vm_compressor_thread_count;
+
+#define VM_PAGER_DEFAULT 0x1 /* Use default pager. */
+#define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* In-core compressor only. */
+#define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* In-core compressor + swap backend. */
+#define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager.*/
+#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/
+#define VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Freezer backed by in-core compressor with swap support too.*/
+
+#define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */
+
+#define DEFAULT_PAGER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_DEFAULT) == VM_PAGER_DEFAULT)
+
+#define COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_COMPRESSOR_NO_SWAP | VM_PAGER_COMPRESSOR_WITH_SWAP))
+
+#define DEFAULT_FREEZER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_FREEZER_DEFAULT) == VM_PAGER_FREEZER_DEFAULT)
+
+#define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP | VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP))
+