ipc_space_t space);
extern task_t port_name_to_task(
mach_port_name_t name);
+extern task_t port_name_to_task_inspect(
+ mach_port_name_t name);
+extern void ipc_port_release_send(
+ ipc_port_t port);
#endif /* _IPC_IPC_PORT_H_ */
extern ipc_space_t get_task_ipcspace(
extern vm_map_offset_t vm_map_page_mask(vm_map_t);
+extern kern_return_t vm_map_purgable_control(
+ vm_map_t map,
+ vm_map_offset_t address,
+ vm_purgable_t control,
+ int *state);
+
+extern kern_return_t
+vnode_pager_get_object_vnode(
+ memory_object_t mem_obj,
+ uintptr_t * vnodeaddr,
+ uint32_t * vid);
+
#if CONFIG_COREDUMP
extern boolean_t coredumpok(vm_map_t map, vm_offset_t va);
#endif
vm_object_offset_t crypto_end);
#endif /* CONFIG_CODE_DECRYPTION */
+struct vm_shared_region_slide_info;
+extern kern_return_t vm_map_shared_region(
+ vm_map_t map,
+ vm_map_offset_t start,
+ vm_map_offset_t end,
+ vm_object_offset_t backing_offset,
+ struct vm_shared_region_slide_info *slide_info);
+extern void shared_region_pager_bootstrap(void);
+extern memory_object_t shared_region_pager_setup(
+ vm_object_t backing_object,
+ vm_object_offset_t backing_offset,
+ struct vm_shared_region_slide_info *slide_info);
+
struct vnode;
extern void swapfile_pager_bootstrap(void);
extern memory_object_t swapfile_pager_setup(struct vnode *vp);
#define SIXTEENK_PAGE_SHIFT 14
#endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */
+#if __arm64__
+#define FOURK_PAGE_SIZE 0x1000
+#define FOURK_PAGE_MASK 0xFFF
+#define FOURK_PAGE_SHIFT 12
+
+extern unsigned int page_shift_user32;
+
+#define VM_MAP_DEBUG_FOURK MACH_ASSERT
+#if VM_MAP_DEBUG_FOURK
+extern int vm_map_debug_fourk;
+#endif /* VM_MAP_DEBUG_FOURK */
+extern void fourk_pager_bootstrap(void);
+extern memory_object_t fourk_pager_create(void);
+extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj);
+extern kern_return_t fourk_pager_populate(
+ memory_object_t mem_obj,
+ boolean_t overwrite,
+ int index,
+ vm_object_t new_backing_object,
+ vm_object_offset_t new_backing_offset,
+ vm_object_t *old_backing_object,
+ vm_object_offset_t *old_backing_offset);
+#endif /* __arm64__ */
/*
* bsd
extern void vnode_setswapmount(struct vnode *);
extern int64_t vnode_getswappin_avail(struct vnode *);
+extern void vnode_pager_was_dirtied(
+ struct vnode *,
+ vm_object_offset_t,
+ vm_object_offset_t);
+
typedef int pager_return_t;
extern pager_return_t vnode_pagein(
struct vnode *, upl_t,
uintptr_t *);
#endif
+extern void vnode_pager_dirtied(
+ memory_object_t,
+ vm_object_offset_t,
+ vm_object_offset_t);
extern kern_return_t vnode_pager_get_isinuse(
memory_object_t,
uint32_t *);
memory_object_t);
extern void vnode_pager_vrele(
struct vnode *vp);
-extern void vnode_pager_release_from_cache(
- int *);
extern struct vnode *vnode_pager_lookup_vnode(
memory_object_t);
const void *data,
vm_size_t size,
unsigned *result);
+#if PMAP_CS
+extern kern_return_t cs_associate_blob_with_mapping(
+ void *pmap,
+ vm_map_offset_t start,
+ vm_map_size_t size,
+ vm_object_offset_t offset,
+ void *blobs_p);
+#endif /* PMAP_CS */
+
+extern kern_return_t memory_entry_purgeable_control_internal(
+ ipc_port_t entry_port,
+ vm_purgable_t control,
+ int *state);
+
+extern kern_return_t memory_entry_access_tracking_internal(
+ ipc_port_t entry_port,
+ int *access_tracking,
+ uint32_t *access_tracking_reads,
+ uint32_t *access_tracking_writes);
extern kern_return_t mach_memory_entry_purgable_control(
ipc_port_t entry_port,
memory_object_t *);
extern boolean_t vm_compressor_low_on_space(void);
+extern boolean_t vm_compressor_out_of_space(void);
extern int vm_swap_low_on_space(void);
void do_fastwake_warmup_all(void);
#if CONFIG_JETSAM
/* returns TRUE if an object was purged, otherwise FALSE. */
boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group);
void vm_purgeable_disown(task_t task);
+void vm_purgeable_nonvolatile_owner_update(task_t owner,
+ int delta);
+void vm_purgeable_volatile_owner_update(task_t owner,
+ int delta);
+
struct trim_list {
uint64_t tl_offset;
extern int secluded_for_fbdp;
#endif
-/*
- * "secluded_aging_policy" controls the aging of secluded pages:
- *
- * SECLUDED_AGING_FIFO
- * When a page eligible for the secluded queue is activated or
- * deactivated, it is inserted in the secluded queue.
- * When it get pushed out of the secluded queue, it gets freed.
- *
- * SECLUDED_AGING_ALONG_ACTIVE
- * When a page eligible for the secluded queue is activated, it is
- * inserted in the secluded queue.
- * When it gets pushed out of the secluded queue, its "referenced" bit
- * is reset and it is inserted in the inactive queue.
- *
- * SECLUDED_AGING_AFTER_INACTIVE
- * A page eligible for the secluded queue first makes its way through the
- * active and inactive queues.
- * When it is pushed out of the inactive queue without being re-activated,
- * it is inserted in the secluded queue instead of being reclaimed.
- * When it is pushed out of the secluded queue, it is either freed if it
- * hasn't been re-referenced, or re-activated if it has been re-referenced.
- *
- * SECLUDED_AGING_BEFORE_ACTIVE
- * A page eligible for the secluded queue will first make its way through
- * the secluded queue. When it gets pushed out of the secluded queue (by
- * new secluded pages), it goes back to the normal aging path, through the
- * active queue and then the inactive queue.
- */
-extern int secluded_aging_policy;
-#define SECLUDED_AGING_FIFO 0
-#define SECLUDED_AGING_ALONG_ACTIVE 1
-#define SECLUDED_AGING_AFTER_INACTIVE 2
-#define SECLUDED_AGING_BEFORE_ACTIVE 3
-
extern void memory_object_mark_eligible_for_secluded(
memory_object_control_t control,
boolean_t eligible_for_secluded);
#endif /* CONFIG_SECLUDED_MEMORY */
+#define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */
+
+extern kern_return_t mach_make_memory_entry_internal(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_handle);
+
+#define roundup(x, y) ((((x) % (y)) == 0) ? \
+ (x) : ((x) + ((y) - ((x) % (y)))))
+
#ifdef __cplusplus
}
#endif
+/*
+ * Flags for the VM swapper/reclaimer.
+ * Used by vm_swap_consider_defragment()
+ * to force defrag/reclaim by the swap
+ * GC thread.
+ */
+#define VM_SWAP_FLAGS_NONE 0
+#define VM_SWAP_FLAGS_FORCE_DEFRAG 1
+#define VM_SWAP_FLAGS_FORCE_RECLAIM 2
+
#endif /* _VM_VM_PROTOS_H_ */
#endif /* XNU_KERNEL_PRIVATE */