+ memory_object_size_t *total_p,
+ memory_object_size_t *avail_p,
+ vm_size_t *pagesize_p,
+ boolean_t *encrypted_p);
+
+extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot);
+extern void log_unnest_badness(
+ vm_map_t map,
+ vm_map_offset_t start_unnest,
+ vm_map_offset_t end_unnest,
+ boolean_t is_nested_map,
+ vm_map_offset_t lowest_unnestable_addr);
+
+struct proc;
+struct proc *current_proc(void);
+extern int cs_allow_invalid(struct proc *p);
+extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed);
+
+#define CS_VALIDATE_TAINTED 0x00000001
+#define CS_VALIDATE_NX 0x00000002
+extern boolean_t cs_validate_range(struct vnode *vp,
+ memory_object_t pager,
+ memory_object_offset_t offset,
+ const void *data,
+ vm_size_t size,
+ unsigned *result);
+#if PMAP_CS
+extern kern_return_t cs_associate_blob_with_mapping(
+ void *pmap,
+ vm_map_offset_t start,
+ vm_map_size_t size,
+ vm_object_offset_t offset,
+ void *blobs_p);
+#endif /* PMAP_CS */
+
+extern kern_return_t memory_entry_purgeable_control_internal(
+ ipc_port_t entry_port,
+ vm_purgable_t control,
+ int *state);
+
+extern kern_return_t memory_entry_access_tracking_internal(
+ ipc_port_t entry_port,
+ int *access_tracking,
+ uint32_t *access_tracking_reads,
+ uint32_t *access_tracking_writes);
+
+extern kern_return_t mach_memory_entry_purgable_control(
+ ipc_port_t entry_port,
+ vm_purgable_t control,
+ int *state);
+
+extern kern_return_t mach_memory_entry_get_page_counts(
+ ipc_port_t entry_port,
+ unsigned int *resident_page_count,
+ unsigned int *dirty_page_count);
+
+extern kern_return_t mach_memory_entry_page_op(
+ ipc_port_t entry_port,
+ vm_object_offset_t offset,
+ int ops,
+ ppnum_t *phys_entry,
+ int *flags);
+
+extern kern_return_t mach_memory_entry_range_op(
+ ipc_port_t entry_port,
+ vm_object_offset_t offset_beg,
+ vm_object_offset_t offset_end,
+ int ops,
+ int *range);
+
+extern void mach_memory_entry_port_release(ipc_port_t port);
+extern void mach_destroy_memory_entry(ipc_port_t port);
+extern kern_return_t mach_memory_entry_allocate(
+ struct vm_named_entry **user_entry_p,
+ ipc_port_t *user_handle_p);
+
+extern void vm_paging_map_init(void);
+
+extern int macx_backing_store_compaction(int flags);
+extern unsigned int mach_vm_ctl_page_free_wanted(void);
+
+extern int no_paging_space_action(void);
+
+#define VM_TOGGLE_CLEAR 0
+#define VM_TOGGLE_SET 1
+#define VM_TOGGLE_GETVALUE 999
+int vm_toggle_entry_reuse(int, int*);
+
+#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */
+#define SWAP_READ 0x00000001 /* Read buffer. */
+#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */
+
+extern void vm_compressor_pager_init(void);
+extern kern_return_t compressor_memory_object_create(
+ memory_object_size_t,
+ memory_object_t *);
+
+extern boolean_t vm_compressor_low_on_space(void);
+extern boolean_t vm_compressor_out_of_space(void);
+extern int vm_swap_low_on_space(void);
+void do_fastwake_warmup_all(void);
+#if CONFIG_JETSAM
+extern int proc_get_memstat_priority(struct proc*, boolean_t);
+#endif /* CONFIG_JETSAM */
+
+/* the object purger. purges the next eligible object from memory. */
+/* returns TRUE if an object was purged, otherwise FALSE. */
+boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
+void vm_purgeable_nonvolatile_owner_update(task_t owner,
+ int delta);
+void vm_purgeable_volatile_owner_update(task_t owner,
+ int delta);
+void vm_owned_objects_disown(task_t task);
+
+
+struct trim_list {
+ uint64_t tl_offset;
+ uint64_t tl_length;
+ struct trim_list *tl_next;
+};
+
+u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only);
+
+#define MAX_SWAPFILENAME_LEN 1024
+#define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */
+
+extern char swapfilename[MAX_SWAPFILENAME_LEN + 1];
+
+struct vm_counters {
+ unsigned int do_collapse_compressor;
+ unsigned int do_collapse_compressor_pages;
+ unsigned int do_collapse_terminate;
+ unsigned int do_collapse_terminate_failure;
+ unsigned int should_cow_but_wired;
+ unsigned int create_upl_extra_cow;
+ unsigned int create_upl_extra_cow_pages;
+ unsigned int create_upl_lookup_failure_write;
+ unsigned int create_upl_lookup_failure_copy;
+};
+extern struct vm_counters vm_counters;
+
+#if CONFIG_SECLUDED_MEMORY
+struct vm_page_secluded_data {
+ int eligible_for_secluded;
+ int grab_success_free;
+ int grab_success_other;
+ int grab_failure_locked;
+ int grab_failure_state;
+ int grab_failure_dirty;
+ int grab_for_iokit;
+ int grab_for_iokit_success;
+};
+extern struct vm_page_secluded_data vm_page_secluded;
+
+extern int num_tasks_can_use_secluded_mem;
+
+/* boot-args */
+extern int secluded_for_apps;
+extern int secluded_for_iokit;
+extern int secluded_for_filecache;
+#if 11
+extern int secluded_for_fbdp;
+#endif
+
+extern uint64_t vm_page_secluded_drain(void);
+extern void memory_object_mark_eligible_for_secluded(
+ memory_object_control_t control,
+ boolean_t eligible_for_secluded);
+
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+#define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
+
+extern kern_return_t mach_make_memory_entry_internal(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ vm_named_entry_kernel_flags_t vmne_kflags,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_handle);
+
+#define roundup(x, y) ((((x) % (y)) == 0) ? \
+ (x) : ((x) + ((y) - ((x) % (y)))))
+
+#ifdef __cplusplus
+}
+#endif
+
+/*
+ * Flags for the VM swapper/reclaimer.
+ * Used by vm_swap_consider_defragment()
+ * to force defrag/reclaim by the swap
+ * GC thread.
+ */
+#define VM_SWAP_FLAGS_NONE 0
+#define VM_SWAP_FLAGS_FORCE_DEFRAG 1
+#define VM_SWAP_FLAGS_FORCE_RECLAIM 2
+
+#if __arm64__
+/*
+ * Flags to control the behavior of
+ * the legacy footprint entitlement.
+ */
+#define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1)
+#define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2)
+#define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3)
+
+#endif /* __arm64__ */