X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..143464d58d2bd6378e74eec636961ceb0d32fb91:/osfmk/vm/vm_pageout.h diff --git a/osfmk/vm/vm_pageout.h b/osfmk/vm/vm_pageout.h index b76023182..7396460b2 100644 --- a/osfmk/vm/vm_pageout.h +++ b/osfmk/vm/vm_pageout.h @@ -81,6 +81,72 @@ #include +#ifdef MACH_KERNEL_PRIVATE +#include +#endif + +#include + +#define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) + +/* externally manipulated counters */ +extern unsigned int vm_pageout_cleaned_reactivated, vm_pageout_cleaned_fault_reactivated, vm_pageout_cleaned_commit_reactivated; + +#if CONFIG_JETSAM +#define LATENCY_JETSAM FALSE +#if LATENCY_JETSAM +#define JETSAM_LATENCY_TOKEN_AGE 3000 /* 3ms */ +#define NUM_OF_JETSAM_LATENCY_TOKENS 1000 + +#define JETSAM_AGE_NOTIFY_CRITICAL 1500000 /* 1.5 secs */ + +extern boolean_t jlp_init; +extern uint64_t jlp_time, jlp_current; +extern unsigned int latency_jetsam_wakeup; +#endif /* LATENCY_JETSAM */ +#endif /* CONFIG_JETSAM */ + +#if CONFIG_FREEZE +extern boolean_t memorystatus_freeze_enabled; +#define VM_DYNAMIC_PAGING_ENABLED(port) ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) || (memorystatus_freeze_enabled == FALSE && IP_VALID(port))) +#else +#define VM_DYNAMIC_PAGING_ENABLED(port) (COMPRESSED_PAGER_IS_ACTIVE || IP_VALID(port)) +#endif + + +extern int vm_debug_events; + +#define VMF_CHECK_ZFDELAY 0x100 +#define VMF_COWDELAY 0x101 +#define VMF_ZFDELAY 0x102 +#define VMF_COMPRESSORDELAY 0x103 + +#define VM_PAGEOUT_SCAN 0x104 +#define VM_PAGEOUT_BALANCE 0x105 +#define VM_PAGEOUT_FREELIST 0x106 +#define VM_PAGEOUT_PURGEONE 0x107 +#define VM_PAGEOUT_CACHE_EVICT 0x108 +#define VM_PAGEOUT_THREAD_BLOCK 0x109 +#define VM_PAGEOUT_JETSAM 0x10A +#define VM_PAGEOUT_PAGE_TOKEN 0x10B + +#define VM_UPL_PAGE_WAIT 0x120 +#define VM_IOPL_PAGE_WAIT 0x121 +#define VM_PAGE_WAIT_BLOCK 0x122 + +#define VM_PRESSURE_EVENT 0x130 +#define VM_EXECVE 0x131 +#define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 + +#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ + MACRO_BEGIN \ + if (vm_debug_events) { \ + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ + } \ + MACRO_END + +extern void inline memoryshot(unsigned int event, unsigned int control); + extern kern_return_t vm_map_create_upl( vm_map_t map, vm_map_address_t offset, @@ -96,43 +162,60 @@ extern ppnum_t upl_get_highest_page( extern upl_size_t upl_get_size( upl_t upl); -#ifdef MACH_KERNEL_PRIVATE -#include +#ifndef MACH_KERNEL_PRIVATE +typedef struct vm_page *vm_page_t; +#endif -extern unsigned int vm_pageout_scan_event_counter; -extern unsigned int vm_zf_queue_count; +extern void vm_page_free_list( + vm_page_t mem, + boolean_t prepare_object); +extern kern_return_t vm_page_alloc_list( + int page_count, + int flags, + vm_page_t * list); -#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */ +extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); +extern vm_object_offset_t vm_page_get_offset(vm_page_t page); +extern ppnum_t vm_page_get_phys_page(vm_page_t page); +extern vm_page_t vm_page_get_next(vm_page_t page); -extern unsigned int vm_zf_count; +extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); -#define VM_ZF_COUNT_INCR() \ - MACRO_BEGIN \ - OSAddAtomic(1, (SInt32 *) &vm_zf_count); \ - MACRO_END \ +#ifdef MACH_KERNEL_PRIVATE -#define VM_ZF_COUNT_DECR() \ - MACRO_BEGIN \ - OSAddAtomic(-1, (SInt32 *) &vm_zf_count); \ - MACRO_END \ +#include -#else /* !(defined(__ppc__)) */ +extern unsigned int vm_pageout_scan_event_counter; +extern unsigned int vm_page_anonymous_count; -extern uint64_t vm_zf_count; -#define VM_ZF_COUNT_INCR() \ - MACRO_BEGIN \ - OSAddAtomic64(1, (SInt64 *) &vm_zf_count); \ - MACRO_END \ +/* + * must hold the page queues lock to + * manipulate this structure + */ +struct vm_pageout_queue { + queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ + unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ + unsigned int pgo_maxlaundry; + uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ + uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ + + unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ + pgo_busy:1, /* iothread is currently processing request from pgo_pending */ + pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ + pgo_draining:1, + pgo_inited:1, + :0; +}; + +#define VM_PAGE_Q_THROTTLED(q) \ + ((q)->pgo_laundry >= (q)->pgo_maxlaundry) -#define VM_ZF_COUNT_DECR() \ - MACRO_BEGIN \ - OSAddAtomic64(-1, (SInt64 *) &vm_zf_count); \ - MACRO_END \ +extern struct vm_pageout_queue vm_pageout_queue_internal; +extern struct vm_pageout_queue vm_pageout_queue_external; -#endif /* !(defined(__ppc__)) */ /* * Routines exported to Mach. @@ -145,7 +228,8 @@ extern void vm_pageout_object_terminate( vm_object_t object); extern void vm_pageout_cluster( - vm_page_t m); + vm_page_t m, + boolean_t pageout); extern void vm_pageout_initialize_page( vm_page_t m); @@ -205,6 +289,7 @@ struct ucd { struct upl { decl_lck_mtx_data(, Lock) /* Synchronization */ int ref_count; + int ext_ref_count; int flags; vm_object_t src_object; /* object derived from */ vm_object_offset_t offset; @@ -243,6 +328,8 @@ struct upl { #define UPL_SHADOWED 0x1000 #define UPL_KERNEL_OBJECT 0x2000 #define UPL_VECTOR 0x4000 +#define UPL_SET_DIRTY 0x8000 +#define UPL_HAS_BUSY 0x10000 /* flags for upl_create flags parameter */ #define UPL_CREATE_EXTERNAL 0 @@ -262,6 +349,12 @@ extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_s extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t); extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*); +extern void vm_object_set_pmap_cache_attr( + vm_object_t object, + upl_page_info_array_t user_page_list, + unsigned int num_pages, + boolean_t batch_pmap_op); + extern kern_return_t vm_object_iopl_request( vm_object_t object, vm_object_offset_t offset, @@ -295,10 +388,6 @@ extern kern_return_t vm_map_remove_upl( /* wired page list structure */ typedef uint32_t *wpl_array_t; -extern void vm_page_free_list( - vm_page_t mem, - boolean_t prepare_object); - extern void vm_page_free_reserve(int pages); extern void vm_pageout_throttle_down(vm_page_t page); @@ -319,13 +408,14 @@ extern void vm_page_decrypt( vm_page_t page, vm_map_offset_t kernel_map_offset); extern kern_return_t vm_paging_map_object( - vm_map_offset_t *address, vm_page_t page, vm_object_t object, vm_object_offset_t offset, - vm_map_size_t *size, vm_prot_t protection, - boolean_t can_unlock_object); + boolean_t can_unlock_object, + vm_map_size_t *size, /* IN/OUT */ + vm_map_offset_t *address, /* OUT */ + boolean_t *need_unmap); /* OUT */ extern void vm_paging_unmap_object( vm_object_t object, vm_map_offset_t start, @@ -337,10 +427,13 @@ decl_simple_lock_data(extern, vm_paging_lock) */ extern unsigned int vm_backing_store_low; -extern void vm_pageout_queue_steal( +extern void vm_pageout_steal_laundry( vm_page_t page, boolean_t queues_locked); +extern boolean_t vm_page_is_slideable(vm_page_t m); + +extern kern_return_t vm_page_slide(vm_page_t page, vm_map_offset_t kernel_mapping_offset); #endif /* MACH_KERNEL_PRIVATE */ #if UPL_DEBUG @@ -371,7 +464,7 @@ extern kern_return_t mach_vm_pressure_monitor( extern kern_return_t vm_set_buffer_cleanup_callout( - boolean_t (*func)(void)); + boolean_t (*func)(int)); struct vm_page_stats_reusable { SInt32 reusable_count; @@ -390,9 +483,34 @@ struct vm_page_stats_reusable { uint64_t reuse_pages_failure; uint64_t can_reuse_success; uint64_t can_reuse_failure; + uint64_t reusable_reclaimed; }; extern struct vm_page_stats_reusable vm_page_stats_reusable; +extern int hibernate_flush_memory(void); +extern void hibernate_create_paddr_map(void); + +extern int vm_compressor_mode; +extern int vm_compressor_thread_count; + +#define VM_PAGER_DEFAULT 0x1 /* Use default pager. */ +#define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* In-core compressor only. */ +#define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* In-core compressor + swap backend. */ +#define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager.*/ +#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ +#define VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Freezer backed by in-core compressor with swap support too.*/ + +#define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ + +#define DEFAULT_PAGER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_DEFAULT) == VM_PAGER_DEFAULT) + +#define COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_COMPRESSOR_NO_SWAP | VM_PAGER_COMPRESSOR_WITH_SWAP)) + +#define DEFAULT_FREEZER_IS_ACTIVE ((vm_compressor_mode & VM_PAGER_FREEZER_DEFAULT) == VM_PAGER_FREEZER_DEFAULT) + +#define DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE (vm_compressor_mode & (VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP | VM_PAGER_FREEZER_COMPRESSOR_WITH_SWAP)) + + #endif /* KERNEL_PRIVATE */ #endif /* _VM_VM_PAGEOUT_H_ */