X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ff6e181ae92fc6f1e89841290f461d1f2f9badd9..39236c6e673c41db228275375ab7fdb0f837b292:/osfmk/vm/vm_object.h diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index 512bb1087..9c4fe0e32 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -1,14 +1,19 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -18,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -61,6 +66,8 @@ #ifndef _VM_VM_OBJECT_H_ #define _VM_VM_OBJECT_H_ +#include +#include #include #include @@ -73,31 +80,63 @@ #include #include #include +#include #include #include #include #include #include -#if MACH_PAGEMAP #include -#endif /* MACH_PAGEMAP */ + +#include struct vm_page; +struct vm_shared_region_slide_info; /* * Types defined: * * vm_object_t Virtual memory object. + * vm_object_fault_info_t Used to determine cluster size. */ +struct vm_object_fault_info { + int interruptible; + uint32_t user_tag; + vm_size_t cluster_size; + vm_behavior_t behavior; + vm_map_offset_t lo_offset; + vm_map_offset_t hi_offset; + unsigned int + /* boolean_t */ no_cache:1, + /* boolean_t */ stealth:1, + /* boolean_t */ io_sync:1, + /* boolean_t */ cs_bypass:1, + /* boolean_t */ mark_zf_absent:1, + /* boolean_t */ batch_pmap_op:1, + __vm_object_fault_info_unused_bits:26; +}; + + +#define vo_size vo_un1.vou_size +#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan +#define vo_shadow_offset vo_un2.vou_shadow_offset +#define vo_cache_ts vo_un2.vou_cache_ts +#define vo_purgeable_owner vo_un2.vou_purgeable_owner +#define vo_slide_info vo_un2.vou_slide_info + struct vm_object { queue_head_t memq; /* Resident memory */ - decl_mutex_data(, Lock) /* Synchronization */ + lck_rw_t Lock; /* Synchronization */ + + union { + vm_object_size_t vou_size; /* Object size (only valid if internal) */ + int vou_cache_pages_to_scan; /* pages yet to be visited in an + * external object in cache + */ + } vo_un1; - vm_object_size_t size; /* Object size (only valid - * if internal) - */ struct vm_page *memq_hint; int ref_count; /* Number of references */ #if TASK_SWAPPER @@ -105,6 +144,8 @@ struct vm_object { #endif /* TASK_SWAPPER */ unsigned int resident_page_count; /* number of resident pages */ + unsigned int wired_page_count; /* number of wired pages */ + unsigned int reusable_page_count; struct vm_object *copy; /* Object that should receive * a copy of my changed pages, @@ -114,7 +155,18 @@ struct vm_object { * copy_call. */ struct vm_object *shadow; /* My shadow */ - vm_object_offset_t shadow_offset; /* Offset into shadow */ + + union { + vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ + clock_sec_t vou_cache_ts; /* age of an external object + * present in cache + */ + task_t vou_purgeable_owner; /* If the purg'a'ble bits below are set + * to volatile/emtpy, this is the task + * that owns this purgeable object. + */ + struct vm_shared_region_slide_info *vou_slide_info; + } vo_un2; memory_object_t pager; /* Where to get data */ vm_object_offset_t paging_offset; /* Offset into memory object */ @@ -123,15 +175,7 @@ struct vm_object { memory_object_copy_strategy_t copy_strategy; /* How to handle data copy */ - unsigned int absent_count; /* The number of pages that - * have been requested but - * not filled. That is, the - * number of pages for which - * the "absent" attribute is - * asserted. - */ - - unsigned int paging_in_progress; + short paging_in_progress; /* The memory object ports are * being used (e.g., for pagein * or pageout) -- don't change @@ -139,6 +183,8 @@ struct vm_object { * don't collapse, destroy or * terminate) */ + short activity_in_progress; + unsigned int /* boolean_t array */ all_wanted:11, /* Bit array of "want to be * awakened" notations. See @@ -180,14 +226,12 @@ struct vm_object { /* boolean_t */ alive:1, /* Not yet terminated */ /* boolean_t */ purgable:2, /* Purgable state. See - * VM_OBJECT_PURGABLE_* - * items below. + * VM_PURGABLE_* */ + /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token + * becomes ripe. + */ /* boolean_t */ shadowed:1, /* Shadow may exist */ - /* boolean_t */ silent_overwrite:1, - /* Allow full page overwrite - * without data_request if - * page is absent */ /* boolean_t */ advisory_pageout:1, /* Instead of sending page * via OOL, just notify @@ -251,9 +295,15 @@ struct vm_object { queue_head_t msr_q; /* memory object synchronise request queue */ + /* + * the following fields are not protected by any locks + * they are updated via atomic compare and swap + */ vm_object_offset_t last_alloc; /* last allocation offset */ - vm_object_offset_t sequential; /* sequential access size */ - vm_size_t cluster_size; /* size of paging cluster */ + int sequential; /* sequential access size */ + + uint32_t pages_created; + uint32_t pages_used; #if MACH_PAGEMAP vm_external_map_t existence_map; /* bitmap of pages written to * backing storage */ @@ -266,15 +316,51 @@ struct vm_object { * put in current object */ #endif - /* hold object lock when altering */ - unsigned int /* cache WIMG bits */ - wimg_bits:8, /* wimg plus some expansion*/ - not_in_use:24; -#ifdef UPL_DEBUG + /* hold object lock when altering */ + unsigned int + wimg_bits:8, /* cache WIMG bits */ + code_signed:1, /* pages are signed and should be + validated; the signatures are stored + with the pager */ + hashed:1, /* object/pager entered in hash */ + transposed:1, /* object was transposed with another */ + mapping_in_progress:1, /* pager being mapped/unmapped */ + volatile_empty:1, + volatile_fault:1, + all_reusable:1, + blocked_access:1, + set_cache_attr:1, + object_slid:1, + purgeable_queue_type:2, + purgeable_queue_group:3, + __object2_unused_bits:9; /* for expansion */ + + uint32_t scan_collisions; + +#if UPL_DEBUG queue_head_t uplq; /* List of outstanding upls */ #endif /* UPL_DEBUG */ + +#ifdef VM_PIP_DEBUG +/* + * Keep track of the stack traces for the first holders + * of a "paging_in_progress" reference for this VM object. + */ +#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ +#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ + struct __pip_backtrace { + void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES]; + } pip_holders[VM_PIP_DEBUG_MAX_REFS]; +#endif /* VM_PIP_DEBUG */ + + queue_chain_t objq; /* object queue - currently used for purgable queues */ }; +#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ + ((object)->volatile_fault && \ + ((object)->purgable == VM_PURGABLE_VOLATILE || \ + (object)->purgable == VM_PURGABLE_EMPTY)) + #define VM_PAGE_REMOVE(page) \ MACRO_BEGIN \ vm_page_t __page = (page); \ @@ -303,10 +389,13 @@ struct vm_object { __object->memq_hint = __page; \ MACRO_END -__private_extern__ +extern vm_object_t kernel_object; /* the single kernel object */ -__private_extern__ +extern +vm_object_t compressor_object; /* the single compressor object */ + +extern unsigned int vm_object_absent_max; /* maximum number of absent pages at a time for each object */ @@ -321,27 +410,34 @@ struct msync_req { vm_object_offset_t offset; vm_object_size_t length; vm_object_t object; /* back pointer */ - decl_mutex_data(, msync_req_lock) /* Lock for this structure */ + decl_lck_mtx_data(, msync_req_lock) /* Lock for this structure */ }; typedef struct msync_req *msync_req_t; #define MSYNC_REQ_NULL ((msync_req_t) 0) + +extern lck_grp_t vm_map_lck_grp; +extern lck_attr_t vm_map_lck_attr; + /* * Macros to allocate and free msync_reqs */ #define msync_req_alloc(msr) \ - MACRO_BEGIN \ + MACRO_BEGIN \ (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ - mutex_init(&(msr)->msync_req_lock, 0); \ - msr->flag = VM_MSYNC_INITIALIZED; \ - MACRO_END + lck_mtx_init(&(msr)->msync_req_lock, &vm_map_lck_grp, &vm_map_lck_attr); \ + msr->flag = VM_MSYNC_INITIALIZED; \ + MACRO_END #define msync_req_free(msr) \ - (kfree((msr), sizeof(struct msync_req))) + MACRO_BEGIN \ + lck_mtx_destroy(&(msr)->msync_req_lock, &vm_map_lck_grp); \ + kfree((msr), sizeof(struct msync_req)); \ + MACRO_END -#define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock) -#define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock) +#define msr_lock(msr) lck_mtx_lock(&(msr)->msync_req_lock) +#define msr_unlock(msr) lck_mtx_unlock(&(msr)->msync_req_lock) /* * Declare procedures that operate on VM objects. @@ -351,6 +447,10 @@ __private_extern__ void vm_object_bootstrap(void); __private_extern__ void vm_object_init(void); +__private_extern__ void vm_object_init_lck_grp(void); + +__private_extern__ void vm_object_reaper_init(void); + __private_extern__ vm_object_t vm_object_allocate( vm_object_size_t size); @@ -376,12 +476,26 @@ __private_extern__ void vm_object_res_deallocate( #endif /* TASK_SWAPPER */ #define vm_object_reference_locked(object) \ -MACRO_BEGIN \ - vm_object_t RLObject = (object); \ - assert((RLObject)->ref_count > 0); \ - (RLObject)->ref_count++; \ - vm_object_res_reference(RLObject); \ -MACRO_END + MACRO_BEGIN \ + vm_object_t RLObject = (object); \ + vm_object_lock_assert_exclusive(object); \ + assert((RLObject)->ref_count > 0); \ + (RLObject)->ref_count++; \ + assert((RLObject)->ref_count > 1); \ + vm_object_res_reference(RLObject); \ + MACRO_END + + +#define vm_object_reference_shared(object) \ + MACRO_BEGIN \ + vm_object_t RLObject = (object); \ + vm_object_lock_assert_shared(object); \ + assert((RLObject)->ref_count > 0); \ + OSAddAtomic(1, &(RLObject)->ref_count); \ + assert((RLObject)->ref_count > 0); \ + /* XXX we would need an atomic version of the following ... */ \ + vm_object_res_reference(RLObject); \ + MACRO_END __private_extern__ void vm_object_reference( @@ -393,8 +507,8 @@ __private_extern__ void vm_object_reference( MACRO_BEGIN \ vm_object_t RObject = (object); \ if (RObject) { \ - vm_object_lock(RObject); \ - vm_object_reference_locked(RObject); \ + vm_object_lock_shared(RObject); \ + vm_object_reference_shared(RObject); \ vm_object_unlock(RObject); \ } \ MACRO_END @@ -416,6 +530,15 @@ __private_extern__ void vm_object_pmap_protect( vm_map_offset_t pmap_start, vm_prot_t prot); +__private_extern__ void vm_object_pmap_protect_options( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot, + int options); + __private_extern__ void vm_object_page_remove( vm_object_t object, vm_object_offset_t start, @@ -425,9 +548,16 @@ __private_extern__ void vm_object_deactivate_pages( vm_object_t object, vm_object_offset_t offset, vm_object_size_t size, - boolean_t kill_page); + boolean_t kill_page, + boolean_t reusable_page); + +__private_extern__ void vm_object_reuse_pages( + vm_object_t object, + vm_object_offset_t start_offset, + vm_object_offset_t end_offset, + boolean_t allow_partial_reuse); -__private_extern__ unsigned int vm_object_purge( +__private_extern__ void vm_object_purge( vm_object_t object); __private_extern__ kern_return_t vm_object_purgable_control( @@ -435,6 +565,13 @@ __private_extern__ kern_return_t vm_object_purgable_control( vm_purgable_t control, int *state); +__private_extern__ kern_return_t vm_object_get_page_counts( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + unsigned int *resident_page_count, + unsigned int *dirty_page_count); + __private_extern__ boolean_t vm_object_coalesce( vm_object_t prev_object, vm_object_t next_object, @@ -450,7 +587,8 @@ __private_extern__ boolean_t vm_object_shadow( __private_extern__ void vm_object_collapse( vm_object_t object, - vm_object_offset_t offset); + vm_object_offset_t offset, + boolean_t can_bypass); __private_extern__ boolean_t vm_object_copy_quickly( vm_object_t *_object, @@ -471,13 +609,14 @@ __private_extern__ kern_return_t vm_object_copy_slowly( vm_object_t src_object, vm_object_offset_t src_offset, vm_object_size_t size, - int interruptible, + boolean_t interruptible, vm_object_t *_result_object); __private_extern__ vm_object_t vm_object_copy_delayed( vm_object_t src_object, vm_object_offset_t src_offset, - vm_object_size_t size); + vm_object_size_t size, + boolean_t src_object_shared); @@ -488,6 +627,9 @@ __private_extern__ kern_return_t vm_object_destroy( __private_extern__ void vm_object_pager_create( vm_object_t object); +__private_extern__ void vm_object_compressor_pager_create( + vm_object_t object); + __private_extern__ void vm_object_page_map( vm_object_t object, vm_object_offset_t offset, @@ -546,14 +688,12 @@ __private_extern__ vm_object_t vm_object_enter( boolean_t check_named); -/* - * Purgable object state. - */ - -#define VM_OBJECT_NONPURGABLE 0 /* not a purgable object */ -#define VM_OBJECT_PURGABLE_NONVOLATILE 1 /* non-volatile purgable object */ -#define VM_OBJECT_PURGABLE_VOLATILE 2 /* volatile (but intact) purgable object */ -#define VM_OBJECT_PURGABLE_EMPTY 3 /* volatile purgable object that has been emptied */ +__private_extern__ void vm_object_cluster_size( + vm_object_t object, + vm_object_offset_t *start, + vm_size_t *length, + vm_object_fault_info_t fault_info, + uint32_t *io_streaming); __private_extern__ kern_return_t vm_object_populate_with_private( vm_object_t object, @@ -561,10 +701,69 @@ __private_extern__ kern_return_t vm_object_populate_with_private( ppnum_t phys_page, vm_size_t size); -__private_extern__ kern_return_t adjust_vm_object_cache( +__private_extern__ void vm_object_change_wimg_mode( + vm_object_t object, + unsigned int wimg_mode); + +extern kern_return_t adjust_vm_object_cache( vm_size_t oval, vm_size_t nval); +extern kern_return_t vm_object_page_op( + vm_object_t object, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags); + +extern kern_return_t vm_object_range_op( + vm_object_t object, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, + int ops, + uint32_t *range); + + +__private_extern__ void vm_object_reap_pages( + vm_object_t object, + int reap_type); +#define REAP_REAP 0 +#define REAP_TERMINATE 1 +#define REAP_PURGEABLE 2 +#define REAP_DATA_FLUSH 3 + +#if CONFIG_FREEZE +struct default_freezer_handle; + +__private_extern__ kern_return_t +vm_object_pack( + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + boolean_t *shared, + vm_object_t src_object, + struct default_freezer_handle *df_handle); + +__private_extern__ void +vm_object_pack_pages( + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + vm_object_t src_object, + struct default_freezer_handle *df_handle); + +__private_extern__ void +vm_object_pageout( + vm_object_t object); + +__private_extern__ kern_return_t +vm_object_pagein( + vm_object_t object); +#endif /* CONFIG_FREEZE */ + /* * Event waiting handling */ @@ -572,11 +771,13 @@ __private_extern__ kern_return_t adjust_vm_object_cache( #define VM_OBJECT_EVENT_INITIALIZED 0 #define VM_OBJECT_EVENT_PAGER_READY 1 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 -#define VM_OBJECT_EVENT_ABSENT_COUNT 3 +#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 #define VM_OBJECT_EVENT_UNCACHING 5 #define VM_OBJECT_EVENT_COPY_CALL 6 #define VM_OBJECT_EVENT_CACHING 7 +#define VM_OBJECT_EVENT_UNBLOCKED 8 +#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 #define vm_object_assert_wait(object, event, interruptible) \ (((object)->all_wanted |= 1 << (event)), \ @@ -588,7 +789,7 @@ __private_extern__ kern_return_t adjust_vm_object_cache( thread_block(THREAD_CONTINUE_NULL)) \ #define thread_sleep_vm_object(object, event, interruptible) \ - thread_sleep_mutex((event_t)(event), &(object)->Lock, (interruptible)) + lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible)) #define vm_object_sleep(object, event, interruptible) \ (((object)->all_wanted |= 1 << (event)), \ @@ -613,24 +814,67 @@ __private_extern__ kern_return_t adjust_vm_object_cache( /* * Routines implemented as macros */ +#ifdef VM_PIP_DEBUG +#include +#define VM_PIP_DEBUG_BEGIN(object) \ + MACRO_BEGIN \ + int pip = ((object)->paging_in_progress + \ + (object)->activity_in_progress); \ + if (pip < VM_PIP_DEBUG_MAX_REFS) { \ + (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ + VM_PIP_DEBUG_STACK_FRAMES); \ + } \ + MACRO_END +#else /* VM_PIP_DEBUG */ +#define VM_PIP_DEBUG_BEGIN(object) +#endif /* VM_PIP_DEBUG */ -#define vm_object_paging_begin(object) \ +#define vm_object_activity_begin(object) \ MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->paging_in_progress >= 0); \ + VM_PIP_DEBUG_BEGIN((object)); \ + (object)->activity_in_progress++; \ + MACRO_END + +#define vm_object_activity_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->activity_in_progress > 0); \ + (object)->activity_in_progress--; \ + if ((object)->paging_in_progress == 0 && \ + (object)->activity_in_progress == 0) \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ + MACRO_END + +#define vm_object_paging_begin(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->paging_in_progress >= 0); \ + VM_PIP_DEBUG_BEGIN((object)); \ (object)->paging_in_progress++; \ MACRO_END -#define vm_object_paging_end(object) \ +#define vm_object_paging_end(object) \ MACRO_BEGIN \ - assert((object)->paging_in_progress != 0); \ - if (--(object)->paging_in_progress == 0) { \ - vm_object_wakeup(object, \ - VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->paging_in_progress > 0); \ + (object)->paging_in_progress--; \ + if ((object)->paging_in_progress == 0) { \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \ + if ((object)->activity_in_progress == 0) \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ } \ MACRO_END #define vm_object_paging_wait(object, interruptible) \ MACRO_BEGIN \ - while ((object)->paging_in_progress != 0) { \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->paging_in_progress != 0 || \ + (object)->activity_in_progress != 0) { \ wait_result_t _wr; \ \ _wr = vm_object_sleep((object), \ @@ -642,31 +886,114 @@ __private_extern__ kern_return_t adjust_vm_object_cache( } \ MACRO_END -#define vm_object_absent_assert_wait(object, interruptible) \ +#define vm_object_paging_only_wait(object, interruptible) \ MACRO_BEGIN \ - vm_object_assert_wait( (object), \ - VM_OBJECT_EVENT_ABSENT_COUNT, \ - (interruptible)); \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->paging_in_progress != 0) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\ + (interruptible)); \ + \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ + /*XXX break; */ \ + } \ MACRO_END -#define vm_object_absent_release(object) \ +#define vm_object_mapping_begin(object) \ MACRO_BEGIN \ - (object)->absent_count--; \ + vm_object_lock_assert_exclusive((object)); \ + assert(! (object)->mapping_in_progress); \ + (object)->mapping_in_progress = TRUE; \ + MACRO_END + +#define vm_object_mapping_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->mapping_in_progress); \ + (object)->mapping_in_progress = FALSE; \ vm_object_wakeup((object), \ - VM_OBJECT_EVENT_ABSENT_COUNT); \ + VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \ MACRO_END +#define vm_object_mapping_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->mapping_in_progress) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \ + (interruptible)); \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ + /*XXX break; */ \ + } \ + assert(!(object)->mapping_in_progress); \ + MACRO_END + + + +#define OBJECT_LOCK_SHARED 0 +#define OBJECT_LOCK_EXCLUSIVE 1 + +extern lck_grp_t vm_object_lck_grp; +extern lck_grp_attr_t vm_object_lck_grp_attr; +extern lck_attr_t vm_object_lck_attr; +extern lck_attr_t kernel_object_lck_attr; +extern lck_attr_t compressor_object_lck_attr; + +extern vm_object_t vm_pageout_scan_wants_object; + +extern void vm_object_lock(vm_object_t); +extern boolean_t vm_object_lock_try(vm_object_t); +extern boolean_t _vm_object_lock_try(vm_object_t); +extern boolean_t vm_object_lock_avoid(vm_object_t); +extern void vm_object_lock_shared(vm_object_t); +extern boolean_t vm_object_lock_try_shared(vm_object_t); + /* * Object locking macros */ -#define vm_object_lock_init(object) mutex_init(&(object)->Lock, 0) -#define vm_object_lock(object) mutex_lock(&(object)->Lock) -#define vm_object_unlock(object) mutex_unlock(&(object)->Lock) -#define vm_object_lock_try(object) mutex_try(&(object)->Lock) +#define vm_object_lock_init(object) \ + lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ + (((object) == kernel_object || \ + (object) == vm_submap_object) ? \ + &kernel_object_lck_attr : \ + (((object) == compressor_object) ? \ + &compressor_object_lck_attr : \ + &vm_object_lck_attr))) +#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) + +#define vm_object_unlock(object) lck_rw_done(&(object)->Lock) +#define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock) +#define vm_object_lock_try_scan(object) _vm_object_lock_try(object) + +/* + * CAUTION: the following vm_object_lock_assert_held*() macros merely + * check if anyone is holding the lock, but the holder may not necessarily + * be the caller... + */ +#if MACH_ASSERT || DEBUG +#define vm_object_lock_assert_held(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) +#define vm_object_lock_assert_shared(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) +#define vm_object_lock_assert_exclusive(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) +#else /* MACH_ASSERT || DEBUG */ +#define vm_object_lock_assert_held(object) +#define vm_object_lock_assert_shared(object) +#define vm_object_lock_assert_exclusive(object) +#endif /* MACH_ASSERT || DEBUG */ #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK)) +extern void vm_object_cache_add(vm_object_t); +extern void vm_object_cache_remove(vm_object_t); +extern int vm_object_cache_evict(int, int); + #endif /* _VM_VM_OBJECT_H_ */