X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..cf7d32b81c573a0536dc4da4157f9c26f8d0bed3:/osfmk/vm/vm_object.h diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index a598722ab..a093bf2f4 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -78,6 +78,7 @@ #include #include #include +#include #include #include #include @@ -94,11 +95,24 @@ struct vm_page; * Types defined: * * vm_object_t Virtual memory object. + * vm_object_fault_info_t Used to determine cluster size. */ +struct vm_object_fault_info { + int interruptible; + uint32_t user_tag; + vm_size_t cluster_size; + vm_behavior_t behavior; + vm_map_offset_t lo_offset; + vm_map_offset_t hi_offset; + boolean_t no_cache; +}; + + + struct vm_object { queue_head_t memq; /* Resident memory */ - decl_mutex_data(, Lock) /* Synchronization */ + lck_rw_t Lock; /* Synchronization */ vm_object_size_t size; /* Object size (only valid * if internal) @@ -128,15 +142,7 @@ struct vm_object { memory_object_copy_strategy_t copy_strategy; /* How to handle data copy */ - unsigned int absent_count; /* The number of pages that - * have been requested but - * not filled. That is, the - * number of pages for which - * the "absent" attribute is - * asserted. - */ - - unsigned int paging_in_progress; + int paging_in_progress; /* The memory object ports are * being used (e.g., for pagein * or pageout) -- don't change @@ -185,8 +191,7 @@ struct vm_object { /* boolean_t */ alive:1, /* Not yet terminated */ /* boolean_t */ purgable:2, /* Purgable state. See - * VM_OBJECT_PURGABLE_* - * items below. + * VM_PURGABLE_* */ /* boolean_t */ shadowed:1, /* Shadow may exist */ /* boolean_t */ silent_overwrite:1, @@ -256,9 +261,15 @@ struct vm_object { queue_head_t msr_q; /* memory object synchronise request queue */ + /* + * the following fields are not protected by any locks + * they are updated via atomic compare and swap + */ vm_object_offset_t last_alloc; /* last allocation offset */ - vm_object_offset_t sequential; /* sequential access size */ - vm_size_t cluster_size; /* size of paging cluster */ + int sequential; /* sequential access size */ + + uint32_t pages_created; + uint32_t pages_used; #if MACH_PAGEMAP vm_external_map_t existence_map; /* bitmap of pages written to * backing storage */ @@ -271,13 +282,31 @@ struct vm_object { * put in current object */ #endif - /* hold object lock when altering */ - unsigned int /* cache WIMG bits */ - wimg_bits:8, /* wimg plus some expansion*/ - not_in_use:24; + /* hold object lock when altering */ + unsigned int + wimg_bits:8, /* cache WIMG bits */ + code_signed:1, /* pages are signed and should be + validated; the signatures are stored + with the pager */ + not_in_use:23; /* for expansion */ + #ifdef UPL_DEBUG queue_head_t uplq; /* List of outstanding upls */ #endif /* UPL_DEBUG */ + +#ifdef VM_PIP_DEBUG +/* + * Keep track of the stack traces for the first holders + * of a "paging_in_progress" reference for this VM object. + */ +#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ +#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ + struct __pip_backtrace { + void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES]; + } pip_holders[VM_PIP_DEBUG_MAX_REFS]; +#endif /* VM_PIP_DEBUG */ + + queue_chain_t objq; /* object queue - currently used for purgable queues */ }; #define VM_PAGE_REMOVE(page) \ @@ -352,10 +381,12 @@ typedef struct msync_req *msync_req_t; * Declare procedures that operate on VM objects. */ -__private_extern__ void vm_object_bootstrap(void); +__private_extern__ void vm_object_bootstrap(void) __attribute__((section("__TEXT, initcode"))); __private_extern__ void vm_object_init(void); +__private_extern__ void vm_object_init_lck_grp(void); + __private_extern__ void vm_object_reaper_init(void); __private_extern__ vm_object_t vm_object_allocate( @@ -383,12 +414,26 @@ __private_extern__ void vm_object_res_deallocate( #endif /* TASK_SWAPPER */ #define vm_object_reference_locked(object) \ -MACRO_BEGIN \ - vm_object_t RLObject = (object); \ - assert((RLObject)->ref_count > 0); \ - (RLObject)->ref_count++; \ - vm_object_res_reference(RLObject); \ -MACRO_END + MACRO_BEGIN \ + vm_object_t RLObject = (object); \ + vm_object_lock_assert_exclusive(object); \ + assert((RLObject)->ref_count > 0); \ + (RLObject)->ref_count++; \ + assert((RLObject)->ref_count > 1); \ + vm_object_res_reference(RLObject); \ + MACRO_END + + +#define vm_object_reference_shared(object) \ + MACRO_BEGIN \ + vm_object_t RLObject = (object); \ + vm_object_lock_assert_shared(object); \ + assert((RLObject)->ref_count > 0); \ + OSAddAtomic(1, (SInt32 *)&(RLObject)->ref_count); \ + assert((RLObject)->ref_count > 1); \ + /* XXX we would need an atomic version of the following ... */ \ + vm_object_res_reference(RLObject); \ + MACRO_END __private_extern__ void vm_object_reference( @@ -457,7 +502,8 @@ __private_extern__ boolean_t vm_object_shadow( __private_extern__ void vm_object_collapse( vm_object_t object, - vm_object_offset_t offset); + vm_object_offset_t offset, + boolean_t can_bypass); __private_extern__ boolean_t vm_object_copy_quickly( vm_object_t *_object, @@ -484,7 +530,8 @@ __private_extern__ kern_return_t vm_object_copy_slowly( __private_extern__ vm_object_t vm_object_copy_delayed( vm_object_t src_object, vm_object_offset_t src_offset, - vm_object_size_t size); + vm_object_size_t size, + boolean_t src_object_shared); @@ -553,14 +600,11 @@ __private_extern__ vm_object_t vm_object_enter( boolean_t check_named); -/* - * Purgable object state. - */ - -#define VM_OBJECT_NONPURGABLE 0 /* not a purgable object */ -#define VM_OBJECT_PURGABLE_NONVOLATILE 1 /* non-volatile purgable object */ -#define VM_OBJECT_PURGABLE_VOLATILE 2 /* volatile (but intact) purgable object */ -#define VM_OBJECT_PURGABLE_EMPTY 3 /* volatile purgable object that has been emptied */ +__private_extern__ void vm_object_cluster_size( + vm_object_t object, + vm_object_offset_t *start, + vm_size_t *length, + vm_object_fault_info_t fault_info); __private_extern__ kern_return_t vm_object_populate_with_private( vm_object_t object, @@ -568,10 +612,24 @@ __private_extern__ kern_return_t vm_object_populate_with_private( ppnum_t phys_page, vm_size_t size); -__private_extern__ kern_return_t adjust_vm_object_cache( +extern kern_return_t adjust_vm_object_cache( vm_size_t oval, vm_size_t nval); +extern kern_return_t vm_object_page_op( + vm_object_t object, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags); + +extern kern_return_t vm_object_range_op( + vm_object_t object, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, + int ops, + int *range); + /* * Event waiting handling */ @@ -579,7 +637,6 @@ __private_extern__ kern_return_t adjust_vm_object_cache( #define VM_OBJECT_EVENT_INITIALIZED 0 #define VM_OBJECT_EVENT_PAGER_READY 1 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 -#define VM_OBJECT_EVENT_ABSENT_COUNT 3 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 #define VM_OBJECT_EVENT_UNCACHING 5 #define VM_OBJECT_EVENT_COPY_CALL 6 @@ -595,7 +652,7 @@ __private_extern__ kern_return_t adjust_vm_object_cache( thread_block(THREAD_CONTINUE_NULL)) \ #define thread_sleep_vm_object(object, event, interruptible) \ - thread_sleep_mutex((event_t)(event), &(object)->Lock, (interruptible)) + lck_rw_sleep(&(object)->Lock, LCK_SLEEP_DEFAULT, (event_t)(event), (interruptible)) #define vm_object_sleep(object, event, interruptible) \ (((object)->all_wanted |= 1 << (event)), \ @@ -620,15 +677,32 @@ __private_extern__ kern_return_t adjust_vm_object_cache( /* * Routines implemented as macros */ +#ifdef VM_PIP_DEBUG +#include +#define VM_PIP_DEBUG_BEGIN(object) \ + MACRO_BEGIN \ + if ((object)->paging_in_progress < VM_PIP_DEBUG_MAX_REFS) { \ + int pip = (object)->paging_in_progress; \ + (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ + VM_PIP_DEBUG_STACK_FRAMES); \ + } \ + MACRO_END +#else /* VM_PIP_DEBUG */ +#define VM_PIP_DEBUG_BEGIN(object) +#endif /* VM_PIP_DEBUG */ #define vm_object_paging_begin(object) \ MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->paging_in_progress >= 0); \ + VM_PIP_DEBUG_BEGIN((object)); \ (object)->paging_in_progress++; \ MACRO_END #define vm_object_paging_end(object) \ MACRO_BEGIN \ - assert((object)->paging_in_progress != 0); \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->paging_in_progress > 0); \ if (--(object)->paging_in_progress == 0) { \ vm_object_wakeup(object, \ VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ @@ -637,6 +711,7 @@ __private_extern__ kern_return_t adjust_vm_object_cache( #define vm_object_paging_wait(object, interruptible) \ MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ while ((object)->paging_in_progress != 0) { \ wait_result_t _wr; \ \ @@ -649,29 +724,56 @@ __private_extern__ kern_return_t adjust_vm_object_cache( } \ MACRO_END -#define vm_object_absent_assert_wait(object, interruptible) \ - MACRO_BEGIN \ - vm_object_assert_wait( (object), \ - VM_OBJECT_EVENT_ABSENT_COUNT, \ - (interruptible)); \ - MACRO_END -#define vm_object_absent_release(object) \ - MACRO_BEGIN \ - (object)->absent_count--; \ - vm_object_wakeup((object), \ - VM_OBJECT_EVENT_ABSENT_COUNT); \ - MACRO_END +#define OBJECT_LOCK_SHARED 0 +#define OBJECT_LOCK_EXCLUSIVE 1 + +extern lck_grp_t vm_object_lck_grp; +extern lck_grp_attr_t vm_object_lck_grp_attr; +extern lck_attr_t vm_object_lck_attr; +extern lck_attr_t kernel_object_lck_attr; + +extern vm_object_t vm_pageout_scan_wants_object; + +extern void vm_object_lock(vm_object_t); +extern boolean_t vm_object_lock_try(vm_object_t); +extern void vm_object_lock_shared(vm_object_t); +extern boolean_t vm_object_lock_try_shared(vm_object_t); /* * Object locking macros */ -#define vm_object_lock_init(object) mutex_init(&(object)->Lock, 0) -#define vm_object_lock(object) mutex_lock(&(object)->Lock) -#define vm_object_unlock(object) mutex_unlock(&(object)->Lock) -#define vm_object_lock_try(object) mutex_try(&(object)->Lock) +#define vm_object_lock_init(object) \ + lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ + (((object) == kernel_object || \ + (object) == vm_submap_object) ? \ + &kernel_object_lck_attr : \ + &vm_object_lck_attr)) +#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) + +#define vm_object_unlock(object) lck_rw_done(&(object)->Lock) +#define vm_object_lock_upgrade(object) lck_rw_lock_shared_to_exclusive(&(object)->Lock) +#define vm_object_lock_try_scan(object) lck_rw_try_lock_exclusive(&(object)->Lock) + +/* + * CAUTION: the following vm_object_lock_assert_held*() macros merely + * check if anyone is holding the lock, but the holder may not necessarily + * be the caller... + */ +#if DEBUG +#define vm_object_lock_assert_held(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD) +#define vm_object_lock_assert_shared(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED) +#define vm_object_lock_assert_exclusive(object) \ + lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) +#else /* DEBUG */ +#define vm_object_lock_assert_held(object) +#define vm_object_lock_assert_shared(object) +#define vm_object_lock_assert_exclusive(object) +#endif /* DEBUG */ #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))