X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..39236c6e673c41db228275375ab7fdb0f837b292:/osfmk/vm/vm_object.h diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index a093bf2f4..9c4fe0e32 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -66,6 +66,8 @@ #ifndef _VM_VM_OBJECT_H_ #define _VM_VM_OBJECT_H_ +#include <debug.h> +#include <mach_assert.h> #include <mach_pagemap.h> #include <task_swapper.h> @@ -85,11 +87,12 @@ #include <ipc/ipc_types.h> #include <vm/pmap.h> -#if MACH_PAGEMAP #include <vm/vm_external.h> -#endif /* MACH_PAGEMAP */ + +#include <vm/vm_options.h> struct vm_page; +struct vm_shared_region_slide_info; /* * Types defined: @@ -105,18 +108,35 @@ struct vm_object_fault_info { vm_behavior_t behavior; vm_map_offset_t lo_offset; vm_map_offset_t hi_offset; - boolean_t no_cache; + unsigned int + /* boolean_t */ no_cache:1, + /* boolean_t */ stealth:1, + /* boolean_t */ io_sync:1, + /* boolean_t */ cs_bypass:1, + /* boolean_t */ mark_zf_absent:1, + /* boolean_t */ batch_pmap_op:1, + __vm_object_fault_info_unused_bits:26; }; +#define vo_size vo_un1.vou_size +#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan +#define vo_shadow_offset vo_un2.vou_shadow_offset +#define vo_cache_ts vo_un2.vou_cache_ts +#define vo_purgeable_owner vo_un2.vou_purgeable_owner +#define vo_slide_info vo_un2.vou_slide_info struct vm_object { queue_head_t memq; /* Resident memory */ lck_rw_t Lock; /* Synchronization */ - vm_object_size_t size; /* Object size (only valid - * if internal) - */ + union { + vm_object_size_t vou_size; /* Object size (only valid if internal) */ + int vou_cache_pages_to_scan; /* pages yet to be visited in an + * external object in cache + */ + } vo_un1; + struct vm_page *memq_hint; int ref_count; /* Number of references */ #if TASK_SWAPPER @@ -124,6 +144,8 @@ struct vm_object { #endif /* TASK_SWAPPER */ unsigned int resident_page_count; /* number of resident pages */ + unsigned int wired_page_count; /* number of wired pages */ + unsigned int reusable_page_count; struct vm_object *copy; /* Object that should receive * a copy of my changed pages, @@ -133,7 +155,18 @@ struct vm_object { * copy_call. */ struct vm_object *shadow; /* My shadow */ - vm_object_offset_t shadow_offset; /* Offset into shadow */ + + union { + vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ + clock_sec_t vou_cache_ts; /* age of an external object + * present in cache + */ + task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set + * to volatile/emtpy, this is the task + * that owns this purgeable object. + */ + struct vm_shared_region_slide_info *vou_slide_info; + } vo_un2; memory_object_t pager; /* Where to get data */ vm_object_offset_t paging_offset; /* Offset into memory object */ @@ -142,7 +175,7 @@ struct vm_object { memory_object_copy_strategy_t copy_strategy; /* How to handle data copy */ - int paging_in_progress; + short paging_in_progress; /* The memory object ports are * being used (e.g., for pagein * or pageout) -- don't change @@ -150,6 +183,8 @@ struct vm_object { * don't collapse, destroy or * terminate) */ + short activity_in_progress; + unsigned int /* boolean_t array */ all_wanted:11, /* Bit array of "want to be * awakened" notations. See @@ -193,11 +228,10 @@ struct vm_object { /* boolean_t */ purgable:2, /* Purgable state. See * VM_PURGABLE_* */ + /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token + * becomes ripe. + */ /* boolean_t */ shadowed:1, /* Shadow may exist */ - /* boolean_t */ silent_overwrite:1, - /* Allow full page overwrite - * without data_request if - * page is absent */ /* boolean_t */ advisory_pageout:1, /* Instead of sending page * via OOL, just notify @@ -288,9 +322,22 @@ struct vm_object { code_signed:1, /* pages are signed and should be validated; the signatures are stored with the pager */ - not_in_use:23; /* for expansion */ - -#ifdef UPL_DEBUG + hashed:1, /* object/pager entered in hash */ + transposed:1, /* object was transposed with another */ + mapping_in_progress:1, /* pager being mapped/unmapped */ + volatile_empty:1, + volatile_fault:1, + all_reusable:1, + blocked_access:1, + set_cache_attr:1, + object_slid:1, + purgeable_queue_type:2, + purgeable_queue_group:3, + __object2_unused_bits:9; /* for expansion */ + + uint32_t scan_collisions; + +#if UPL_DEBUG queue_head_t uplq; /* List of outstanding upls */ #endif /* UPL_DEBUG */ @@ -306,9 +353,14 @@ struct vm_object { } pip_holders[VM_PIP_DEBUG_MAX_REFS]; #endif /* VM_PIP_DEBUG */ - queue_chain_t objq; /* object queue - currently used for purgable queues */ + queue_chain_t objq; /* object queue - currently used for purgable queues */ }; +#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ + ((object)->volatile_fault && \ + ((object)->purgable == VM_PURGABLE_VOLATILE || \ + (object)->purgable == VM_PURGABLE_EMPTY)) + #define VM_PAGE_REMOVE(page) \ MACRO_BEGIN \ vm_page_t __page = (page); \ @@ -337,10 +389,13 @@ struct vm_object { __object->memq_hint = __page; \ MACRO_END -__private_extern__ +extern vm_object_t kernel_object; /* the single kernel object */ -__private_extern__ +extern +vm_object_t compressor_object; /* the single compressor object */ + +extern unsigned int vm_object_absent_max; /* maximum number of absent pages at a time for each object */ @@ -355,33 +410,40 @@ struct msync_req { vm_object_offset_t offset; vm_object_size_t length; vm_object_t object; /* back pointer */ - decl_mutex_data(, msync_req_lock) /* Lock for this structure */ + decl_lck_mtx_data(, msync_req_lock) /* Lock for this structure */ }; typedef struct msync_req *msync_req_t; #define MSYNC_REQ_NULL ((msync_req_t) 0) + +extern lck_grp_t vm_map_lck_grp; +extern lck_attr_t vm_map_lck_attr; + /* * Macros to allocate and free msync_reqs */ #define msync_req_alloc(msr) \ - MACRO_BEGIN \ + MACRO_BEGIN \ (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ - mutex_init(&(msr)->msync_req_lock, 0); \ - msr->flag = VM_MSYNC_INITIALIZED; \ - MACRO_END + lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \ + msr->flag = VM_MSYNC_INITIALIZED; \ + MACRO_END #define msync_req_free(msr) \ - (kfree((msr), sizeof(struct msync_req))) + MACRO_BEGIN \ + lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \ + kfree((msr), sizeof(struct msync_req)); \ + MACRO_END -#define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock) -#define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock) +#define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock) +#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock) /* * Declare procedures that operate on VM objects. */ -__private_extern__ void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode"))); +__private_extern__ void vm_object_bootstrap(void); __private_extern__ void vm_object_init(void); @@ -429,8 +491,8 @@ __private_extern__ void vm_object_res_deallocate( vm_object_t RLObject = (object); \ vm_object_lock_assert_shared(object); \ assert((RLObject)->ref_count > 0); \ - OSAddAtomic(1, (SInt32 *)&(RLObject)->ref_count); \ - assert((RLObject)->ref_count > 1); \ + OSAddAtomic(1, &(RLObject)->ref_count); \ + assert((RLObject)->ref_count > 0); \ /* XXX we would need an atomic version of the following ... */ \ vm_object_res_reference(RLObject); \ MACRO_END @@ -445,8 +507,8 @@ __private_extern__ void vm_object_reference( MACRO_BEGIN \ vm_object_t RObject = (object); \ if (RObject) { \ - vm_object_lock(RObject); \ - vm_object_reference_locked(RObject); \ + vm_object_lock_shared(RObject); \ + vm_object_reference_shared(RObject); \ vm_object_unlock(RObject); \ } \ MACRO_END @@ -468,6 +530,15 @@ __private_extern__ void vm_object_pmap_protect( vm_map_offset_t pmap_start, vm_prot_t prot); +__private_extern__ void vm_object_pmap_protect_options( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot, + int options); + __private_extern__ void vm_object_page_remove( vm_object_t object, vm_object_offset_t start, @@ -477,9 +548,16 @@ __private_extern__ void vm_object_deactivate_pages( vm_object_t object, vm_object_offset_t offset, vm_object_size_t size, - boolean_t kill_page); + boolean_t kill_page, + boolean_t reusable_page); + +__private_extern__ void vm_object_reuse_pages( + vm_object_t object, + vm_object_offset_t start_offset, + vm_object_offset_t end_offset, + boolean_t allow_partial_reuse); -__private_extern__ unsigned int vm_object_purge( +__private_extern__ void vm_object_purge( vm_object_t object); __private_extern__ kern_return_t vm_object_purgable_control( @@ -487,6 +565,13 @@ __private_extern__ kern_return_t vm_object_purgable_control( vm_purgable_t control, int *state); +__private_extern__ kern_return_t vm_object_get_page_counts( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + unsigned int *resident_page_count, + unsigned int *dirty_page_count); + __private_extern__ boolean_t vm_object_coalesce( vm_object_t prev_object, vm_object_t next_object, @@ -524,7 +609,7 @@ __private_extern__ kern_return_t vm_object_copy_slowly( vm_object_t src_object, vm_object_offset_t src_offset, vm_object_size_t size, - int interruptible, + boolean_t interruptible, vm_object_t *_result_object); __private_extern__ vm_object_t vm_object_copy_delayed( @@ -542,6 +627,9 @@ __private_extern__ kern_return_t vm_object_destroy( __private_extern__ void vm_object_pager_create( vm_object_t object); +__private_extern__ void vm_object_compressor_pager_create( + vm_object_t object); + __private_extern__ void vm_object_page_map( vm_object_t object, vm_object_offset_t offset, @@ -604,7 +692,8 @@ __private_extern__ void vm_object_cluster_size( vm_object_t object, vm_object_offset_t *start, vm_size_t *length, - vm_object_fault_info_t fault_info); + vm_object_fault_info_t fault_info, + uint32_t *io_streaming); __private_extern__ kern_return_t vm_object_populate_with_private( vm_object_t object, @@ -612,6 +701,10 @@ __private_extern__ kern_return_t vm_object_populate_with_private( ppnum_t phys_page, vm_size_t size); +__private_extern__ void vm_object_change_wimg_mode( + vm_object_t object, + unsigned int wimg_mode); + extern kern_return_t adjust_vm_object_cache( vm_size_t oval, vm_size_t nval); @@ -628,7 +721,48 @@ extern kern_return_t vm_object_range_op( vm_object_offset_t offset_beg, vm_object_offset_t offset_end, int ops, - int *range); + uint32_t *range); + + +__private_extern__ void vm_object_reap_pages( + vm_object_t object, + int reap_type); +#define REAP_REAP 0 +#define REAP_TERMINATE 1 +#define REAP_PURGEABLE 2 +#define REAP_DATA_FLUSH 3 + +#if CONFIG_FREEZE +struct default_freezer_handle; + +__private_extern__ kern_return_t +vm_object_pack( + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + boolean_t *shared, + vm_object_t src_object, + struct default_freezer_handle *df_handle); + +__private_extern__ void +vm_object_pack_pages( + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + vm_object_t src_object, + struct default_freezer_handle *df_handle); + +__private_extern__ void +vm_object_pageout( + vm_object_t object); + +__private_extern__ kern_return_t +vm_object_pagein( + vm_object_t object); +#endif /* CONFIG_FREEZE */ /* * Event waiting handling @@ -637,10 +771,13 @@ extern kern_return_t vm_object_range_op( #define VM_OBJECT_EVENT_INITIALIZED 0 #define VM_OBJECT_EVENT_PAGER_READY 1 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 +#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 #define VM_OBJECT_EVENT_UNCACHING 5 #define VM_OBJECT_EVENT_COPY_CALL 6 #define VM_OBJECT_EVENT_CACHING 7 +#define VM_OBJECT_EVENT_UNBLOCKED 8 +#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 #define vm_object_assert_wait(object, event, interruptible) \ (((object)->all_wanted |= 1 << (event)), \ @@ -681,8 +818,9 @@ extern kern_return_t vm_object_range_op( #include <libkern/OSDebug.h> #define VM_PIP_DEBUG_BEGIN(object) \ MACRO_BEGIN \ - if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \ - int pip = (object)->paging_in_progress; \ + int pip = ((object)->paging_in_progress + \ + (object)->activity_in_progress); \ + if (pip < VM_PIP_DEBUG_MAX_REFS) { \ (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ VM_PIP_DEBUG_STACK_FRAMES); \ } \ @@ -691,7 +829,26 @@ extern kern_return_t vm_object_range_op( #define VM_PIP_DEBUG_BEGIN(object) #endif /* VM_PIP_DEBUG */ -#define vm_object_paging_begin(object) \ +#define vm_object_activity_begin(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->paging_in_progress >= 0); \ + VM_PIP_DEBUG_BEGIN((object)); \ + (object)->activity_in_progress++; \ + MACRO_END + +#define vm_object_activity_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->activity_in_progress > 0); \ + (object)->activity_in_progress--; \ + if ((object)->paging_in_progress == 0 && \ + (object)->activity_in_progress == 0) \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ + MACRO_END + +#define vm_object_paging_begin(object) \ MACRO_BEGIN \ vm_object_lock_assert_exclusive((object)); \ assert((object)->paging_in_progress >= 0); \ @@ -699,20 +856,25 @@ extern kern_return_t vm_object_range_op( (object)->paging_in_progress++; \ MACRO_END -#define vm_object_paging_end(object) \ +#define vm_object_paging_end(object) \ MACRO_BEGIN \ vm_object_lock_assert_exclusive((object)); \ assert((object)->paging_in_progress > 0); \ - if (--(object)->paging_in_progress == 0) { \ - vm_object_wakeup(object, \ - VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ + (object)->paging_in_progress--; \ + if ((object)->paging_in_progress == 0) { \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \ + if ((object)->activity_in_progress == 0) \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ } \ MACRO_END #define vm_object_paging_wait(object, interruptible) \ MACRO_BEGIN \ vm_object_lock_assert_exclusive((object)); \ - while ((object)->paging_in_progress != 0) { \ + while ((object)->paging_in_progress != 0 || \ + (object)->activity_in_progress != 0) { \ wait_result_t _wr; \ \ _wr = vm_object_sleep((object), \ @@ -724,6 +886,53 @@ extern kern_return_t vm_object_range_op( } \ MACRO_END +#define vm_object_paging_only_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->paging_in_progress != 0) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\ + (interruptible)); \ + \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ + /*XXX break; */ \ + } \ + MACRO_END + + +#define vm_object_mapping_begin(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert(! (object)->mapping_in_progress); \ + (object)->mapping_in_progress = TRUE; \ + MACRO_END + +#define vm_object_mapping_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->mapping_in_progress); \ + (object)->mapping_in_progress = FALSE; \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \ + MACRO_END + +#define vm_object_mapping_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->mapping_in_progress) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \ + (interruptible)); \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ + /*XXX break; */ \ + } \ + assert(!(object)->mapping_in_progress); \ + MACRO_END + #define OBJECT_LOCK_SHARED 0 @@ -733,11 +942,14 @@ extern lck_grp_t vm_object_lck_grp; extern lck_grp_attr_t vm_object_lck_grp_attr; extern lck_attr_t vm_object_lck_attr; extern lck_attr_t kernel_object_lck_attr; +extern lck_attr_t compressor_object_lck_attr; extern vm_object_t vm_pageout_scan_wants_object; extern void vm_object_lock(vm_object_t); extern boolean_t vm_object_lock_try(vm_object_t); +extern boolean_t _vm_object_lock_try(vm_object_t); +extern boolean_t vm_object_lock_avoid(vm_object_t); extern void vm_object_lock_shared(vm_object_t); extern boolean_t vm_object_lock_try_shared(vm_object_t); @@ -750,32 +962,38 @@ extern boolean_t vm_object_lock_try_shared(vm_object_t); (((object) == kernel_object || \ (object) == vm_submap_object) ? \ &kernel_object_lck_attr : \ - &vm_object_lck_attr)) + (((object) == compressor_object) ? \ + &compressor_object_lck_attr : \ + &vm_object_lck_attr))) #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) #define vm_object_unlock(object) lck_rw_done(&(object)->Lock) #define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock) -#define vm_object_lock_try_scan(object) lck_rw_try_lock_exclusive(&(object)->Lock) +#define vm_object_lock_try_scan(object) _vm_object_lock_try(object) /* * CAUTION: the following vm_object_lock_assert_held*() macros merely * check if anyone is holding the lock, but the holder may not necessarily * be the caller... */ -#if DEBUG +#if MACH_ASSERT || DEBUG #define vm_object_lock_assert_held(object) \ lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) #define vm_object_lock_assert_shared(object) \ lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) #define vm_object_lock_assert_exclusive(object) \ lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) -#else /* DEBUG */ +#else /* MACH_ASSERT || DEBUG */ #define vm_object_lock_assert_held(object) #define vm_object_lock_assert_shared(object) #define vm_object_lock_assert_exclusive(object) -#endif /* DEBUG */ +#endif /* MACH_ASSERT || DEBUG */ #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK)) +extern void vm_object_cache_add(vm_object_t); +extern void vm_object_cache_remove(vm_object_t); +extern int vm_object_cache_evict(int, int); + #endif /* _VM_VM_OBJECT_H_ */