X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..d9a64523371fa019c4575bb400cbbc3a50ac9903:/osfmk/vm/vm_object.h diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index 94537ded9..ccd1de547 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -89,6 +89,7 @@ #include #include +#include #if VM_OBJECT_TRACKING #include @@ -96,6 +97,7 @@ extern void vm_object_tracking_init(void); extern boolean_t vm_object_tracking_inited; extern btlog_t *vm_object_tracking_btlog; +#define VM_OBJECT_TRACKING_NUM_RECORDS 50000 #define VM_OBJECT_TRACKING_BTDEPTH 7 #define VM_OBJECT_TRACKING_OP_CREATED 1 #define VM_OBJECT_TRACKING_OP_MODIFIED 2 @@ -103,7 +105,6 @@ extern btlog_t *vm_object_tracking_btlog; #endif /* VM_OBJECT_TRACKING */ struct vm_page; -struct vm_shared_region_slide_info; /* * Types defined: @@ -124,9 +125,10 @@ struct vm_object_fault_info { /* boolean_t */ stealth:1, /* boolean_t */ io_sync:1, /* boolean_t */ cs_bypass:1, + /* boolean_t */ pmap_cs_associated:1, /* boolean_t */ mark_zf_absent:1, /* boolean_t */ batch_pmap_op:1, - __vm_object_fault_info_unused_bits:26; + __vm_object_fault_info_unused_bits:25; int pmap_options; }; @@ -135,13 +137,28 @@ struct vm_object_fault_info { #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan #define vo_shadow_offset vo_un2.vou_shadow_offset #define vo_cache_ts vo_un2.vou_cache_ts -#define vo_purgeable_owner vo_un2.vou_purgeable_owner -#define vo_slide_info vo_un2.vou_slide_info +#define vo_owner vo_un2.vou_owner struct vm_object { - queue_head_t memq; /* Resident memory */ + /* + * on 64 bit systems we pack the pointers hung off the memq. + * those pointers have to be able to point back to the memq. + * the packed pointers are required to be on a 64 byte boundary + * which means 2 things for the vm_object... (1) the memq + * struct has to be the first element of the structure so that + * we can control it's alignment... (2) the vm_object must be + * aligned on a 64 byte boundary... for static vm_object's + * this is accomplished via the 'aligned' attribute... for + * vm_object's in the zone pool, this is accomplished by + * rounding the size of the vm_object element to the nearest + * 64 byte size before creating the zone. + */ + vm_page_queue_head_t memq; /* Resident memory - must be first */ lck_rw_t Lock; /* Synchronization */ +#if DEVELOPMENT || DEBUG + thread_t Lock_owner; +#endif union { vm_object_size_t vou_size; /* Object size (only valid if internal) */ int vou_cache_pages_to_scan; /* pages yet to be visited in an @@ -151,12 +168,10 @@ struct vm_object { struct vm_page *memq_hint; int ref_count; /* Number of references */ -#if TASK_SWAPPER - int res_count; /* Residency references (swap)*/ -#endif /* TASK_SWAPPER */ unsigned int resident_page_count; /* number of resident pages */ - unsigned int wired_page_count; /* number of wired pages */ + unsigned int wired_page_count; /* number of wired pages + use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */ unsigned int reusable_page_count; struct vm_object *copy; /* Object that should receive @@ -173,11 +188,10 @@ struct vm_object { clock_sec_t vou_cache_ts; /* age of an external object * present in cache */ - task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set - * to volatile/emtpy, this is the task - * that owns this purgeable object. - */ - struct vm_shared_region_slide_info *vou_slide_info; + task_t vou_owner; /* If the object is purgeable + * or has a "ledger_tag", this + * is the task that owns it. + */ } vo_un2; memory_object_t pager; /* Where to get data */ @@ -240,15 +254,6 @@ struct vm_object { * therefore, managed by the * default memory manger) */ - /* boolean_t */ temporary:1, /* Permanent objects may be - * changed externally by the - * memory manager, and changes - * made in memory must be - * reflected back to the memory - * manager. Temporary objects - * lack both of these - * characteristics. - */ /* boolean_t */ private:1, /* magic device_pager object, * holds private pages only */ /* boolean_t */ pageout:1, /* pageout object. contains @@ -259,16 +264,11 @@ struct vm_object { /* boolean_t */ purgable:2, /* Purgable state. See * VM_PURGABLE_* */ + /* boolean_t */ purgeable_only_by_kernel:1, /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token * becomes ripe. */ /* boolean_t */ shadowed:1, /* Shadow may exist */ - /* boolean_t */ advisory_pageout:1, - /* Instead of sending page - * via OOL, just notify - * pager that the kernel - * wants to discard it, page - * remains in object */ /* boolean_t */ true_share:1, /* This object is mapped * in more than one place @@ -308,24 +308,19 @@ struct vm_object { * memory rules w.r.t pmap * access bits. */ - /* boolean_t */ nophyscache:1; + /* boolean_t */ nophyscache:1, /* When mapped at the * pmap level, don't allow * primary caching. (for * I/O) */ - - + /* boolean_t */ _object5_unused_bits:1; queue_chain_t cached_list; /* Attachment point for the * list of objects cached as a * result of their can_persist * value */ - - queue_head_t msr_q; /* memory object synchronise - request queue */ - /* * the following fields are not protected by any locks * they are updated via atomic compare and swap @@ -335,25 +330,14 @@ struct vm_object { uint32_t pages_created; uint32_t pages_used; -#if MACH_PAGEMAP - vm_external_map_t existence_map; /* bitmap of pages written to - * backing storage */ -#endif /* MACH_PAGEMAP */ vm_offset_t cow_hint; /* last page present in */ /* shadow but not in object */ -#if MACH_ASSERT - struct vm_object *paging_object; /* object which pages to be - * swapped out are temporary - * put in current object - */ -#endif /* hold object lock when altering */ unsigned int wimg_bits:8, /* cache WIMG bits */ code_signed:1, /* pages are signed and should be validated; the signatures are stored with the pager */ - hashed:1, /* object/pager entered in hash */ transposed:1, /* object was transposed with another */ mapping_in_progress:1, /* pager being mapped/unmapped */ phantom_isssd:1, @@ -362,11 +346,29 @@ struct vm_object { all_reusable:1, blocked_access:1, set_cache_attr:1, - object_slid:1, + object_is_shared_cache:1, purgeable_queue_type:2, purgeable_queue_group:3, io_tracking:1, - __object2_unused_bits:7; /* for expansion */ + no_tag_update:1, /* */ +#if CONFIG_SECLUDED_MEMORY + eligible_for_secluded:1, + can_grab_secluded:1, +#else /* CONFIG_SECLUDED_MEMORY */ + __object3_unused_bits:2, +#endif /* CONFIG_SECLUDED_MEMORY */ +#if VM_OBJECT_ACCESS_TRACKING + access_tracking:1, +#else /* VM_OBJECT_ACCESS_TRACKING */ + __unused_access_tracking:1, +#endif /* VM_OBJECT_ACCESS_TRACKING */ + vo_ledger_tag:2, + __object2_unused_bits:2; /* for expansion */ + +#if VM_OBJECT_ACCESS_TRACKING + uint32_t access_tracking_reads; + uint32_t access_tracking_writes; +#endif /* VM_OBJECT_ACCESS_TRACKING */ uint8_t scan_collisions; vm_tag_t wire_tag; @@ -392,6 +394,11 @@ struct vm_object { #endif /* VM_PIP_DEBUG */ queue_chain_t objq; /* object queue - currently used for purgable queues */ + queue_chain_t task_objq; /* objects owned by task - protected by task lock */ + +#if !VM_TAG_ACTIVE_UPDATE + queue_chain_t wired_objq; +#endif /* !VM_TAG_ACTIVE_UPDATE */ #if DEBUG void *purgeable_owner_bt[16]; @@ -400,11 +407,26 @@ struct vm_object { #endif /* DEBUG */ }; +/* values for object->vo_ledger_tag */ +#define VM_OBJECT_LEDGER_TAG_NONE 0 +#define VM_OBJECT_LEDGER_TAG_NETWORK 1 +#define VM_OBJECT_LEDGER_TAG_MEDIA 2 +#define VM_OBJECT_LEDGER_TAG_RESERVED 3 + #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ ((object)->volatile_fault && \ ((object)->purgable == VM_PURGABLE_VOLATILE || \ (object)->purgable == VM_PURGABLE_EMPTY)) +#if VM_OBJECT_ACCESS_TRACKING +extern uint64_t vm_object_access_tracking_reads; +extern uint64_t vm_object_access_tracking_writes; +extern void vm_object_access_tracking(vm_object_t object, + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *acess_tracking_writes); +#endif /* VM_OBJECT_ACCESS_TRACKING */ + extern vm_object_t kernel_object; /* the single kernel object */ @@ -419,64 +441,159 @@ unsigned int vm_object_absent_max; /* maximum number of absent pages # define VM_MSYNC_SYNCHRONIZING 1 # define VM_MSYNC_DONE 2 -struct msync_req { - queue_chain_t msr_q; /* object request queue */ - queue_chain_t req_q; /* vm_msync request queue */ - unsigned int flag; - vm_object_offset_t offset; - vm_object_size_t length; - vm_object_t object; /* back pointer */ - decl_lck_mtx_data(, msync_req_lock) /* Lock for this structure */ -}; - -typedef struct msync_req *msync_req_t; -#define MSYNC_REQ_NULL ((msync_req_t) 0) - extern lck_grp_t vm_map_lck_grp; extern lck_attr_t vm_map_lck_attr; -/* - * Macros to allocate and free msync_reqs - */ -#define msync_req_alloc(msr) \ - MACRO_BEGIN \ - (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ - lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \ - msr->flag = VM_MSYNC_INITIALIZED; \ - MACRO_END - -#define msync_req_free(msr) \ - MACRO_BEGIN \ - lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \ - kfree((msr), sizeof(struct msync_req)); \ - MACRO_END - -#define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock) -#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock) +#ifndef VM_TAG_ACTIVE_UPDATE +#error VM_TAG_ACTIVE_UPDATE +#endif -#define VM_OBJECT_WIRED(object) \ - MACRO_BEGIN \ - if ((object)->purgable == VM_PURGABLE_DENY) \ - { \ +#if VM_TAG_ACTIVE_UPDATE +#define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE") +#define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE") +#else /* VM_TAG_ACTIVE_UPDATE */ +#define VM_OBJECT_WIRED_ENQUEUE(object) \ + MACRO_BEGIN \ lck_spin_lock(&vm_objects_wired_lock); \ - assert(!(object)->objq.next); \ - queue_enter(&vm_objects_wired, (object), vm_object_t, objq); \ + assert(!(object)->wired_objq.next); \ + assert(!(object)->wired_objq.prev); \ + queue_enter(&vm_objects_wired, (object), \ + vm_object_t, wired_objq); \ lck_spin_unlock(&vm_objects_wired_lock); \ + MACRO_END +#define VM_OBJECT_WIRED_DEQUEUE(object) \ + MACRO_BEGIN \ + if ((object)->wired_objq.next) { \ + lck_spin_lock(&vm_objects_wired_lock); \ + queue_remove(&vm_objects_wired, (object), \ + vm_object_t, wired_objq); \ + lck_spin_unlock(&vm_objects_wired_lock); \ + } \ + MACRO_END +#endif /* VM_TAG_ACTIVE_UPDATE */ + +#define VM_OBJECT_WIRED(object, tag) \ + MACRO_BEGIN \ + assert(VM_KERN_MEMORY_NONE != (tag)); \ + assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \ + (object)->wire_tag = (tag); \ + if (!VM_TAG_ACTIVE_UPDATE) { \ + VM_OBJECT_WIRED_ENQUEUE((object)); \ } \ MACRO_END -#define VM_OBJECT_UNWIRED(object) \ - MACRO_BEGIN \ - (object)->wire_tag = VM_KERN_MEMORY_NONE; \ - if (((object)->purgable == VM_PURGABLE_DENY) && (object)->objq.next) \ - { \ - lck_spin_lock(&vm_objects_wired_lock); \ - queue_remove(&vm_objects_wired, (object), vm_object_t, objq); \ - lck_spin_unlock(&vm_objects_wired_lock); \ - } \ +#define VM_OBJECT_UNWIRED(object) \ + MACRO_BEGIN \ + if (!VM_TAG_ACTIVE_UPDATE) { \ + VM_OBJECT_WIRED_DEQUEUE((object)); \ + } \ + if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \ + vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \ + (object)->wire_tag = VM_KERN_MEMORY_NONE; \ + } \ MACRO_END +// These two macros start & end a C block +#define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \ + MACRO_BEGIN \ + { \ + int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag; + +#define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \ + if (__wireddelta) { \ + boolean_t __overflow __assert_only = \ + os_add_overflow((object)->wired_page_count, __wireddelta, \ + &(object)->wired_page_count); \ + assert(!__overflow); \ + if (!(object)->pageout && !(object)->no_tag_update) { \ + if (__wireddelta > 0) { \ + assert (VM_KERN_MEMORY_NONE != (tag)); \ + if (VM_KERN_MEMORY_NONE == __waswired) { \ + VM_OBJECT_WIRED((object), (tag)); \ + } \ + vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ + } else if (VM_KERN_MEMORY_NONE != __waswired) { \ + assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \ + vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ + if (!(object)->wired_page_count) { \ + VM_OBJECT_UNWIRED((object)); \ + } \ + } \ + } \ + } \ + } \ + MACRO_END + +#define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \ + __wireddelta += delta; \ + +#define VM_OBJECT_WIRED_PAGE_ADD(object, m) \ + if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++; + +#define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \ + if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--; + + + +#define OBJECT_LOCK_SHARED 0 +#define OBJECT_LOCK_EXCLUSIVE 1 + +extern lck_grp_t vm_object_lck_grp; +extern lck_grp_attr_t vm_object_lck_grp_attr; +extern lck_attr_t vm_object_lck_attr; +extern lck_attr_t kernel_object_lck_attr; +extern lck_attr_t compressor_object_lck_attr; + +extern vm_object_t vm_pageout_scan_wants_object; + +extern void vm_object_lock(vm_object_t); +extern boolean_t vm_object_lock_try(vm_object_t); +extern boolean_t _vm_object_lock_try(vm_object_t); +extern boolean_t vm_object_lock_avoid(vm_object_t); +extern void vm_object_lock_shared(vm_object_t); +extern boolean_t vm_object_lock_yield_shared(vm_object_t); +extern boolean_t vm_object_lock_try_shared(vm_object_t); +extern void vm_object_unlock(vm_object_t); +extern boolean_t vm_object_lock_upgrade(vm_object_t); + +/* + * Object locking macros + */ + +#define vm_object_lock_init(object) \ + lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ + (((object) == kernel_object || \ + (object) == vm_submap_object) ? \ + &kernel_object_lck_attr : \ + (((object) == compressor_object) ? \ + &compressor_object_lck_attr : \ + &vm_object_lck_attr))) +#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) + +#define vm_object_lock_try_scan(object) _vm_object_lock_try(object) + +/* + * CAUTION: the following vm_object_lock_assert_held*() macros merely + * check if anyone is holding the lock, but the holder may not necessarily + * be the caller... + */ +#if MACH_ASSERT || DEBUG +#define vm_object_lock_assert_held(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) +#define vm_object_lock_assert_shared(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) +#define vm_object_lock_assert_exclusive(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) +#define vm_object_lock_assert_notheld(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD) +#else /* MACH_ASSERT || DEBUG */ +#define vm_object_lock_assert_held(object) +#define vm_object_lock_assert_shared(object) +#define vm_object_lock_assert_exclusive(object) +#define vm_object_lock_assert_notheld(object) +#endif /* MACH_ASSERT || DEBUG */ + /* * Declare procedures that operate on VM objects. @@ -597,7 +714,7 @@ __private_extern__ void vm_object_reuse_pages( vm_object_offset_t end_offset, boolean_t allow_partial_reuse); -__private_extern__ void vm_object_purge( +__private_extern__ uint64_t vm_object_purge( vm_object_t object, int flags); @@ -686,7 +803,8 @@ __private_extern__ kern_return_t vm_object_upl_request( upl_t *upl, upl_page_info_t *page_info, unsigned int *count, - upl_control_flags_t flags); + upl_control_flags_t flags, + vm_tag_t tag); __private_extern__ kern_return_t vm_object_transpose( vm_object_t object1, @@ -721,11 +839,10 @@ __private_extern__ kern_return_t vm_object_lock_request( -__private_extern__ vm_object_t vm_object_enter( +__private_extern__ vm_object_t vm_object_memory_object_associate( memory_object_t pager, + vm_object_t object, vm_object_size_t size, - boolean_t internal, - boolean_t init, boolean_t check_named); @@ -774,27 +891,6 @@ __private_extern__ void vm_object_reap_pages( #define REAP_DATA_FLUSH 3 #if CONFIG_FREEZE -struct default_freezer_handle; - -__private_extern__ kern_return_t -vm_object_pack( - unsigned int *purgeable_count, - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - unsigned int dirty_budget, - boolean_t *shared, - vm_object_t src_object, - struct default_freezer_handle *df_handle); - -__private_extern__ void -vm_object_pack_pages( - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - unsigned int dirty_budget, - vm_object_t src_object, - struct default_freezer_handle *df_handle); __private_extern__ void vm_object_compressed_freezer_pageout( @@ -804,9 +900,6 @@ __private_extern__ void vm_object_compressed_freezer_done( void); -__private_extern__ kern_return_t -vm_object_pagein( - vm_object_t object); #endif /* CONFIG_FREEZE */ __private_extern__ void @@ -841,37 +934,114 @@ extern void vm_io_reprioritize_init(void); #define VM_OBJECT_EVENT_UNBLOCKED 8 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 -#define vm_object_assert_wait(object, event, interruptible) \ - (((object)->all_wanted |= 1 << (event)), \ - assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible))) - -#define vm_object_wait(object, event, interruptible) \ - (vm_object_assert_wait((object),(event),(interruptible)), \ - vm_object_unlock(object), \ - thread_block(THREAD_CONTINUE_NULL)) \ +#define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */ -#define thread_sleep_vm_object(object, event, interruptible) \ - lck_rw_sleep(&(object)->Lock, LCK_SLEEP_PROMOTED_PRI, (event_t)(event), (interruptible)) +static __inline__ wait_result_t +vm_object_assert_wait( + vm_object_t object, + int event, + wait_interrupt_t interruptible) +{ + wait_result_t wr; + + vm_object_lock_assert_exclusive(object); + assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); + + object->all_wanted |= 1 << event; + wr = assert_wait((event_t)((vm_offset_t)object + event), + interruptible); + return wr; +} + +static __inline__ wait_result_t +vm_object_wait( + vm_object_t object, + int event, + wait_interrupt_t interruptible) +{ + wait_result_t wr; + + vm_object_assert_wait(object, event, interruptible); + vm_object_unlock(object); + wr = thread_block(THREAD_CONTINUE_NULL); + return wr; +} + +static __inline__ wait_result_t +thread_sleep_vm_object( + vm_object_t object, + event_t event, + wait_interrupt_t interruptible) +{ + wait_result_t wr; + +#if DEVELOPMENT || DEBUG + if (object->Lock_owner != current_thread()) + panic("thread_sleep_vm_object: now owner - %p\n", object); + object->Lock_owner = 0; +#endif + wr = lck_rw_sleep(&object->Lock, + LCK_SLEEP_PROMOTED_PRI, + event, + interruptible); +#if DEVELOPMENT || DEBUG + object->Lock_owner = current_thread(); +#endif + return wr; +} -#define vm_object_sleep(object, event, interruptible) \ - (((object)->all_wanted |= 1 << (event)), \ - thread_sleep_vm_object((object), \ - ((vm_offset_t)(object)+(event)), (interruptible))) +static __inline__ wait_result_t +vm_object_sleep( + vm_object_t object, + int event, + wait_interrupt_t interruptible) +{ + wait_result_t wr; + + vm_object_lock_assert_exclusive(object); + assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); + + object->all_wanted |= 1 << event; + wr = thread_sleep_vm_object(object, + (event_t)((vm_offset_t)object + event), + interruptible); + return wr; +} + +static __inline__ void +vm_object_wakeup( + vm_object_t object, + int event) +{ + vm_object_lock_assert_exclusive(object); + assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); + + if (object->all_wanted & (1 << event)) + thread_wakeup((event_t)((vm_offset_t)object + event)); + object->all_wanted &= ~(1 << event); +} + +static __inline__ void +vm_object_set_wanted( + vm_object_t object, + int event) +{ + vm_object_lock_assert_exclusive(object); + assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); -#define vm_object_wakeup(object, event) \ - MACRO_BEGIN \ - if ((object)->all_wanted & (1 << (event))) \ - thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \ - (object)->all_wanted &= ~(1 << (event)); \ - MACRO_END + object->all_wanted |= (1 << event); +} -#define vm_object_set_wanted(object, event) \ - MACRO_BEGIN \ - ((object)->all_wanted |= (1 << (event))); \ - MACRO_END +static __inline__ int +vm_object_wanted( + vm_object_t object, + int event) +{ + vm_object_lock_assert_held(object); + assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); -#define vm_object_wanted(object, event) \ - ((object)->all_wanted & (1 << (event))) + return object->all_wanted & (1 << event); +} /* * Routines implemented as macros @@ -1005,63 +1175,6 @@ extern void vm_io_reprioritize_init(void); -#define OBJECT_LOCK_SHARED 0 -#define OBJECT_LOCK_EXCLUSIVE 1 - -extern lck_grp_t vm_object_lck_grp; -extern lck_grp_attr_t vm_object_lck_grp_attr; -extern lck_attr_t vm_object_lck_attr; -extern lck_attr_t kernel_object_lck_attr; -extern lck_attr_t compressor_object_lck_attr; - -extern vm_object_t vm_pageout_scan_wants_object; - -extern void vm_object_lock(vm_object_t); -extern boolean_t vm_object_lock_try(vm_object_t); -extern boolean_t _vm_object_lock_try(vm_object_t); -extern boolean_t vm_object_lock_avoid(vm_object_t); -extern void vm_object_lock_shared(vm_object_t); -extern boolean_t vm_object_lock_try_shared(vm_object_t); - -/* - * Object locking macros - */ - -#define vm_object_lock_init(object) \ - lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ - (((object) == kernel_object || \ - (object) == vm_submap_object) ? \ - &kernel_object_lck_attr : \ - (((object) == compressor_object) ? \ - &compressor_object_lck_attr : \ - &vm_object_lck_attr))) -#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) - -#define vm_object_unlock(object) lck_rw_done(&(object)->Lock) -#define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock) -#define vm_object_lock_try_scan(object) _vm_object_lock_try(object) - -/* - * CAUTION: the following vm_object_lock_assert_held*() macros merely - * check if anyone is holding the lock, but the holder may not necessarily - * be the caller... - */ -#if MACH_ASSERT || DEBUG -#define vm_object_lock_assert_held(object) \ - lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) -#define vm_object_lock_assert_shared(object) \ - lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) -#define vm_object_lock_assert_exclusive(object) \ - lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) -#define vm_object_lock_assert_notheld(object) \ - lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD) -#else /* MACH_ASSERT || DEBUG */ -#define vm_object_lock_assert_held(object) -#define vm_object_lock_assert_shared(object) -#define vm_object_lock_assert_exclusive(object) -#define vm_object_lock_assert_notheld(object) -#endif /* MACH_ASSERT || DEBUG */ - #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK)) @@ -1069,4 +1182,27 @@ extern void vm_object_cache_add(vm_object_t); extern void vm_object_cache_remove(vm_object_t); extern int vm_object_cache_evict(int, int); +#define VM_OBJECT_OWNER_DISOWNED ((task_t) -1) +#define VM_OBJECT_OWNER(object) \ + ((((object)->purgable == VM_PURGABLE_DENY && \ + (object)->vo_ledger_tag == 0) || \ + (object)->vo_owner == TASK_NULL) \ + ? TASK_NULL /* not owned */ \ + : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \ + ? kernel_task /* disowned -> kernel */ \ + : (object)->vo_owner)) /* explicit owner */ \ + +extern void vm_object_ledger_tag_ledgers( + vm_object_t object, + int *ledger_idx_volatile, + int *ledger_idx_nonvolatile, + int *ledger_idx_volatile_compressed, + int *ledger_idx_nonvolatile_compressed, + boolean_t *do_footprint); +extern kern_return_t vm_object_ownership_change( + vm_object_t object, + int ledger_tag, + task_t owner, + boolean_t task_objq_locked); + #endif /* _VM_VM_OBJECT_H_ */