X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..d190cdc3f5544636abb56dc1874be391d3e1b148:/osfmk/vm/vm_fault.c diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index 963f6345e..21d449cde 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -64,7 +64,7 @@ #include #include -#include +#include #include #include @@ -73,6 +73,7 @@ #include #include /* For memory_object_data_{request,unlock} */ +#include #include #include @@ -86,11 +87,11 @@ #include #include #include +#include -#include - +#include +#include #include -#include #include #include #include @@ -98,44 +99,84 @@ #include #include #include +#include +#include +#include /* Needed by some vm_page.h macros */ +#include + +#include +#include +#include -#include +#include /* for struct timespec */ #define VM_FAULT_CLASSIFY 0 -#define VM_FAULT_STATIC_CONFIG 1 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */ -unsigned int vm_object_absent_max = 50; +unsigned int vm_object_pagein_throttle = 16; + +/* + * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which + * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts + * of memory if they're buggy and can run the system completely out of swap space. If this happens, we + * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps + * keep the UI active so that the user has a chance to kill the offending task before the system + * completely hangs. + * + * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied + * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold + * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a + * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again. + */ + +extern void throttle_lowpri_io(int); -int vm_fault_debug = 0; +extern struct vnode *vnode_pager_lookup_vnode(memory_object_t); -#if !VM_FAULT_STATIC_CONFIG -boolean_t vm_fault_dirty_handling = FALSE; -boolean_t vm_fault_interruptible = FALSE; -boolean_t software_reference_bits = TRUE; -#endif +uint64_t vm_hard_throttle_threshold; -#if MACH_KDB -extern struct db_watchpoint *db_watchpoint_list; -#endif /* MACH_KDB */ +#define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \ + (vm_page_free_count < vm_page_throttle_limit && \ + proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) > THROTTLE_LEVEL_THROTTLED)) + + +#define HARD_THROTTLE_DELAY 5000 /* 5000 us == 5 ms */ +#define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */ + +#define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6 +#define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000 + + +boolean_t current_thread_aborted(void); + /* Forward declarations of internal routines. */ -extern kern_return_t vm_fault_wire_fast( +static kern_return_t vm_fault_wire_fast( vm_map_t map, vm_map_offset_t va, + vm_prot_t prot, vm_map_entry_t entry, pmap_t pmap, - vm_map_offset_t pmap_addr); - -extern void vm_fault_continue(void); - -extern void vm_fault_copy_cleanup( + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); + +static kern_return_t vm_fault_internal( + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t caller_prot, + boolean_t change_wiring, + int interruptible, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); + +static void vm_fault_copy_cleanup( vm_page_t page, vm_page_t top_page); -extern void vm_fault_copy_dst_cleanup( +static void vm_fault_copy_dst_cleanup( vm_page_t page); #if VM_FAULT_CLASSIFY @@ -146,6 +187,21 @@ extern void vm_fault_classify(vm_object_t object, extern void vm_fault_classify_init(void); #endif +unsigned long vm_pmap_enter_blocked = 0; +unsigned long vm_pmap_enter_retried = 0; + +unsigned long vm_cs_validates = 0; +unsigned long vm_cs_revalidates = 0; +unsigned long vm_cs_query_modified = 0; +unsigned long vm_cs_validated_dirtied = 0; +unsigned long vm_cs_bitmap_validated = 0; + +void vm_pre_fault(vm_map_offset_t); + +extern char *kdp_compressor_decompressed_page; +extern addr64_t kdp_compressor_decompressed_page_paddr; +extern ppnum_t kdp_compressor_decompressed_page_ppnum; + /* * Routine: vm_fault_init * Purpose: @@ -154,6 +210,54 @@ extern void vm_fault_classify_init(void); void vm_fault_init(void) { + int i, vm_compressor_temp; + boolean_t need_default_val = TRUE; + /* + * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is + * computed as a percentage of available memory, and the percentage used is scaled inversely with + * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems + * and reduce the value down to 10% for very large memory configurations. This helps give us a + * definition of a memory hog that makes more sense relative to the amount of ram in the machine. + * The formula here simply uses the number of gigabytes of ram to adjust the percentage. + */ + + vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100; + + /* + * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry. + */ + + if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) { + for ( i = 0; i < VM_PAGER_MAX_MODES; i++) { + if (vm_compressor_temp > 0 && + ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) { + need_default_val = FALSE; + vm_compressor_mode = vm_compressor_temp; + break; + } + } + if (need_default_val) + printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp); + } + if (need_default_val) { + /* If no boot arg or incorrect boot arg, try device tree. */ + PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode)); + } + PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count)); + + if (PE_parse_boot_argn("vm_compressor_immediate", &vm_compressor_temp, sizeof (vm_compressor_temp))) + vm_compressor_immediate_preferred_override = TRUE; + else { + if (PE_get_default("kern.vm_compressor_immediate", &vm_compressor_temp, sizeof(vm_compressor_temp))) + vm_compressor_immediate_preferred_override = TRUE; + } + if (vm_compressor_immediate_preferred_override == TRUE) { + if (vm_compressor_temp) + vm_compressor_immediate_preferred = TRUE; + else + vm_compressor_immediate_preferred = FALSE; + } + printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode); } /* @@ -172,18 +276,19 @@ vm_fault_init(void) */ void vm_fault_cleanup( - register vm_object_t object, - register vm_page_t top_page) + vm_object_t object, + vm_page_t top_page) { vm_object_paging_end(object); - vm_object_unlock(object); + vm_object_unlock(object); if (top_page != VM_PAGE_NULL) { - object = top_page->object; - vm_object_lock(object); - VM_PAGE_FREE(top_page); - vm_object_paging_end(object); - vm_object_unlock(object); + object = VM_PAGE_OBJECT(top_page); + + vm_object_lock(object); + VM_PAGE_FREE(top_page); + vm_object_paging_end(object); + vm_object_unlock(object); } } @@ -205,29 +310,147 @@ struct { #define CLUSTER_STAT(clause) #endif /* MACH_CLUSTER_STATS */ -/* XXX - temporary */ -boolean_t vm_allow_clustered_pagein = FALSE; -int vm_pagein_cluster_used = 0; - #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) boolean_t vm_page_deactivate_behind = TRUE; /* - * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior + * default sizes given VM_BEHAVIOR_DEFAULT reference behavior */ -int vm_default_ahead = 0; -int vm_default_behind = MAX_UPL_TRANSFER; +#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128 +#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */ + /* we use it to size an array on the stack */ + +int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW; + +#define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024) + +/* + * vm_page_is_sequential + * + * Determine if sequential access is in progress + * in accordance with the behavior specified. + * Update state to indicate current access pattern. + * + * object must have at least the shared lock held + */ +static +void +vm_fault_is_sequential( + vm_object_t object, + vm_object_offset_t offset, + vm_behavior_t behavior) +{ + vm_object_offset_t last_alloc; + int sequential; + int orig_sequential; + + last_alloc = object->last_alloc; + sequential = object->sequential; + orig_sequential = sequential; + + switch (behavior) { + case VM_BEHAVIOR_RANDOM: + /* + * reset indicator of sequential behavior + */ + sequential = 0; + break; + + case VM_BEHAVIOR_SEQUENTIAL: + if (offset && last_alloc == offset - PAGE_SIZE_64) { + /* + * advance indicator of sequential behavior + */ + if (sequential < MAX_SEQUENTIAL_RUN) + sequential += PAGE_SIZE; + } else { + /* + * reset indicator of sequential behavior + */ + sequential = 0; + } + break; + + case VM_BEHAVIOR_RSEQNTL: + if (last_alloc && last_alloc == offset + PAGE_SIZE_64) { + /* + * advance indicator of sequential behavior + */ + if (sequential > -MAX_SEQUENTIAL_RUN) + sequential -= PAGE_SIZE; + } else { + /* + * reset indicator of sequential behavior + */ + sequential = 0; + } + break; + + case VM_BEHAVIOR_DEFAULT: + default: + if (offset && last_alloc == (offset - PAGE_SIZE_64)) { + /* + * advance indicator of sequential behavior + */ + if (sequential < 0) + sequential = 0; + if (sequential < MAX_SEQUENTIAL_RUN) + sequential += PAGE_SIZE; + + } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) { + /* + * advance indicator of sequential behavior + */ + if (sequential > 0) + sequential = 0; + if (sequential > -MAX_SEQUENTIAL_RUN) + sequential -= PAGE_SIZE; + } else { + /* + * reset indicator of sequential behavior + */ + sequential = 0; + } + break; + } + if (sequential != orig_sequential) { + if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) { + /* + * if someone else has already updated object->sequential + * don't bother trying to update it or object->last_alloc + */ + return; + } + } + /* + * I'd like to do this with a OSCompareAndSwap64, but that + * doesn't exist for PPC... however, it shouldn't matter + * that much... last_alloc is maintained so that we can determine + * if a sequential access pattern is taking place... if only + * one thread is banging on this object, no problem with the unprotected + * update... if 2 or more threads are banging away, we run the risk of + * someone seeing a mangled update... however, in the face of multiple + * accesses, no sequential access pattern can develop anyway, so we + * haven't lost any real info. + */ + object->last_alloc = offset; +} + + +int vm_page_deactivate_behind_count = 0; /* - * vm_page_deactivate_behind + * vm_page_deactivate_behind * - * Determine if sequential access is in progress - * in accordance with the behavior specified. If - * so, compute a potential page to deactive and - * deactivate it. + * Determine if sequential access is in progress + * in accordance with the behavior specified. If + * so, compute a potential page to deactivate and + * deactivate it. * - * The object must be locked. + * object must be locked. + * + * return TRUE if we actually deactivate a page */ static boolean_t @@ -236,89 +459,369 @@ vm_fault_deactivate_behind( vm_object_offset_t offset, vm_behavior_t behavior) { - vm_page_t m; + int n; + int pages_in_run = 0; + int max_pages_in_run = 0; + int sequential_run; + int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + vm_object_offset_t run_offset = 0; + vm_object_offset_t pg_offset = 0; + vm_page_t m; + vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER]; + pages_in_run = 0; #if TRACEFAULTPAGE dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ #endif - if (object == kernel_object) { + if (object == kernel_object || vm_page_deactivate_behind == FALSE) { /* * Do not deactivate pages from the kernel object: they * are not intended to become pageable. + * or we've disabled the deactivate behind mechanism */ return FALSE; } - + if ((sequential_run = object->sequential)) { + if (sequential_run < 0) { + sequential_behavior = VM_BEHAVIOR_RSEQNTL; + sequential_run = 0 - sequential_run; + } else { + sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + } + } switch (behavior) { case VM_BEHAVIOR_RANDOM: - object->sequential = PAGE_SIZE_64; - m = VM_PAGE_NULL; break; case VM_BEHAVIOR_SEQUENTIAL: - if (offset && - object->last_alloc == offset - PAGE_SIZE_64) { - object->sequential += PAGE_SIZE_64; - m = vm_page_lookup(object, offset - PAGE_SIZE_64); - } else { - object->sequential = PAGE_SIZE_64; /* reset */ - m = VM_PAGE_NULL; + if (sequential_run >= (int)PAGE_SIZE) { + run_offset = 0 - PAGE_SIZE_64; + max_pages_in_run = 1; } break; case VM_BEHAVIOR_RSEQNTL: - if (object->last_alloc && - object->last_alloc == offset + PAGE_SIZE_64) { - object->sequential += PAGE_SIZE_64; - m = vm_page_lookup(object, offset + PAGE_SIZE_64); - } else { - object->sequential = PAGE_SIZE_64; /* reset */ - m = VM_PAGE_NULL; + if (sequential_run >= (int)PAGE_SIZE) { + run_offset = PAGE_SIZE_64; + max_pages_in_run = 1; } break; case VM_BEHAVIOR_DEFAULT: default: - if (offset && - object->last_alloc == offset - PAGE_SIZE_64) { - vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; - - object->sequential += PAGE_SIZE_64; - m = (offset >= behind && - object->sequential >= behind) ? - vm_page_lookup(object, offset - behind) : - VM_PAGE_NULL; - } else if (object->last_alloc && - object->last_alloc == offset + PAGE_SIZE_64) { - vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; - - object->sequential += PAGE_SIZE_64; - m = (offset < -behind && - object->sequential >= behind) ? - vm_page_lookup(object, offset + behind) : - VM_PAGE_NULL; - } else { - object->sequential = PAGE_SIZE_64; - m = VM_PAGE_NULL; + { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; + + /* + * determine if the run of sequential accesss has been + * long enough on an object with default access behavior + * to consider it for deactivation + */ + if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) { + /* + * the comparisons between offset and behind are done + * in this kind of odd fashion in order to prevent wrap around + * at the end points + */ + if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { + if (offset >= behind) { + run_offset = 0 - behind; + pg_offset = PAGE_SIZE_64; + max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; + } + } else { + if (offset < -behind) { + run_offset = behind; + pg_offset = 0 - PAGE_SIZE_64; + max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; + } + } } break; } + } + for (n = 0; n < max_pages_in_run; n++) { + m = vm_page_lookup(object, offset + run_offset + (n * pg_offset)); - object->last_alloc = offset; + if (m && !m->laundry && !m->busy && !m->no_cache && (m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->fictitious && !m->absent) { + page_run[pages_in_run++] = m; + + /* + * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise... + * + * a TLB flush isn't really needed here since at worst we'll miss the reference bit being + * updated in the PTE if a remote processor still has this mapping cached in its TLB when the + * new reference happens. If no futher references happen on the page after that remote TLB flushes + * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue + * by pageout_scan, which is just fine since the last reference would have happened quite far + * in the past (TLB caches don't hang around for very long), and of course could just as easily + * have happened before we did the deactivate_behind. + */ + pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); + } + } + if (pages_in_run) { + vm_page_lockspin_queues(); + + for (n = 0; n < pages_in_run; n++) { + + m = page_run[n]; - if (m) { - if (!m->busy) { - vm_page_lock_queues(); - vm_page_deactivate(m); - vm_page_unlock_queues(); + vm_page_deactivate_internal(m, FALSE); + + vm_page_deactivate_behind_count++; #if TRACEFAULTPAGE dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif } + vm_page_unlock_queues(); + return TRUE; } return FALSE; } +#if (DEVELOPMENT || DEBUG) +uint32_t vm_page_creation_throttled_hard = 0; +uint32_t vm_page_creation_throttled_soft = 0; +uint64_t vm_page_creation_throttle_avoided = 0; +#endif /* DEVELOPMENT || DEBUG */ + +static int +vm_page_throttled(boolean_t page_kept) +{ + clock_sec_t elapsed_sec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; + + thread_t thread = current_thread(); + + if (thread->options & TH_OPT_VMPRIV) + return (0); + + if (thread->t_page_creation_throttled) { + thread->t_page_creation_throttled = 0; + + if (page_kept == FALSE) + goto no_throttle; + } + if (NEED_TO_HARD_THROTTLE_THIS_TASK()) { +#if (DEVELOPMENT || DEBUG) + thread->t_page_creation_throttled_hard++; + OSAddAtomic(1, &vm_page_creation_throttled_hard); +#endif /* DEVELOPMENT || DEBUG */ + return (HARD_THROTTLE_DELAY); + } + + if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) && + thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) { + + if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) { +#if (DEVELOPMENT || DEBUG) + OSAddAtomic64(1, &vm_page_creation_throttle_avoided); +#endif + goto no_throttle; + } + clock_get_system_microtime(&tv_sec, &tv_usec); + + elapsed_sec = tv_sec - thread->t_page_creation_time; + + if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS || + (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) { + + if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) { + /* + * we'll reset our stats to give a well behaved app + * that was unlucky enough to accumulate a bunch of pages + * over a long period of time a chance to get out of + * the throttled state... we reset the counter and timestamp + * so that if it stays under the rate limit for the next second + * it will be back in our good graces... if it exceeds it, it + * will remain in the throttled state + */ + thread->t_page_creation_time = tv_sec; + thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1); + } + ++vm_page_throttle_count; + + thread->t_page_creation_throttled = 1; + + if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) { +#if (DEVELOPMENT || DEBUG) + thread->t_page_creation_throttled_hard++; + OSAddAtomic(1, &vm_page_creation_throttled_hard); +#endif /* DEVELOPMENT || DEBUG */ + return (HARD_THROTTLE_DELAY); + } else { +#if (DEVELOPMENT || DEBUG) + thread->t_page_creation_throttled_soft++; + OSAddAtomic(1, &vm_page_creation_throttled_soft); +#endif /* DEVELOPMENT || DEBUG */ + return (SOFT_THROTTLE_DELAY); + } + } + thread->t_page_creation_time = tv_sec; + thread->t_page_creation_count = 0; + } +no_throttle: + thread->t_page_creation_count++; + + return (0); +} + + +/* + * check for various conditions that would + * prevent us from creating a ZF page... + * cleanup is based on being called from vm_fault_page + * + * object must be locked + * object == m->object + */ +static vm_fault_return_t +vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state, boolean_t page_throttle) +{ + int throttle_delay; + + if (object->shadow_severed || + VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) { + /* + * Either: + * 1. the shadow chain was severed, + * 2. the purgeable object is volatile or empty and is marked + * to fault on access while volatile. + * Just have to return an error at this point + */ + if (m != VM_PAGE_NULL) + VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); + + thread_interrupt_level(interruptible_state); + + return (VM_FAULT_MEMORY_ERROR); + } + if (vm_backing_store_low) { + /* + * are we protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { + + if (m != VM_PAGE_NULL) + VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); + + assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); + + thread_block(THREAD_CONTINUE_NULL); + thread_interrupt_level(interruptible_state); + + return (VM_FAULT_RETRY); + } + } + if (page_throttle == TRUE) { + if ((throttle_delay = vm_page_throttled(FALSE))) { + /* + * we're throttling zero-fills... + * treat this as if we couldn't grab a page + */ + if (m != VM_PAGE_NULL) + VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); + + VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); + + delay(throttle_delay); + + if (current_thread_aborted()) { + thread_interrupt_level(interruptible_state); + return VM_FAULT_INTERRUPTED; + } + thread_interrupt_level(interruptible_state); + + return (VM_FAULT_MEMORY_SHORTAGE); + } + } + return (VM_FAULT_SUCCESS); +} + + +/* + * do the work to zero fill a page and + * inject it into the correct paging queue + * + * m->object must be locked + * page queue lock must NOT be held + */ +static int +vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) +{ + int my_fault = DBG_ZERO_FILL_FAULT; + vm_object_t object; + + object = VM_PAGE_OBJECT(m); + + /* + * This is is a zero-fill page fault... + * + * Checking the page lock is a waste of + * time; this page was absent, so + * it can't be page locked by a pager. + * + * we also consider it undefined + * with respect to instruction + * execution. i.e. it is the responsibility + * of higher layers to call for an instruction + * sync after changing the contents and before + * sending a program into this area. We + * choose this approach for performance + */ + m->pmapped = TRUE; + + m->cs_validated = FALSE; + m->cs_tainted = FALSE; + m->cs_nx = FALSE; + + if (no_zero_fill == TRUE) { + my_fault = DBG_NZF_PAGE_FAULT; + + if (m->absent && m->busy) + return (my_fault); + } else { + vm_page_zero_fill(m); + + VM_STAT_INCR(zero_fill_count); + DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL); + } + assert(!m->laundry); + assert(object != kernel_object); + //assert(m->pageq.next == 0 && m->pageq.prev == 0); + + if (!VM_DYNAMIC_PAGING_ENABLED() && + (object->purgable == VM_PURGABLE_DENY || + object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_VOLATILE )) { + + vm_page_lockspin_queues(); + + if (!VM_DYNAMIC_PAGING_ENABLED()) { + assert(!VM_PAGE_WIRED(m)); + + /* + * can't be on the pageout queue since we don't + * have a pager to try and clean to + */ + vm_page_queues_remove(m, TRUE); + vm_page_check_pageable_safe(m); + vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq); + m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q; + vm_page_throttled_count++; + } + vm_page_unlock_queues(); + } + return (my_fault); +} + + /* * Routine: vm_fault_page * Purpose: @@ -328,12 +831,10 @@ vm_fault_deactivate_behind( * Additional arguments: * The required permissions for the page is given * in "fault_type". Desired permissions are included - * in "protection". The minimum and maximum valid offsets - * within the object for the relevant map entry are - * passed in "lo_offset" and "hi_offset" respectively and - * the expected page reference pattern is passed in "behavior". - * These three parameters are used to determine pagein cluster - * limits. + * in "protection". + * fault_info is passed along to determine pagein cluster + * limits... it contains the expected reference pattern, + * cluster size if available, etc... * * If the desired page is known to be resident (for * example, because it was previously wired down), asserting @@ -362,7 +863,15 @@ vm_fault_deactivate_behind( * be destroyed when this guarantee is no longer required. * The "result_page" is also left busy. It is not removed * from the pageout queues. + * Special Case: + * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the + * fault succeeded but there's no VM page (i.e. the VM object + * does not actually hold VM pages, but device memory or + * large pages). The object is still locked and we still hold a + * paging_in_progress reference. */ +unsigned int vm_fault_page_blocked_access = 0; +unsigned int vm_fault_page_forced_retry = 0; vm_fault_return_t vm_fault_page( @@ -371,14 +880,11 @@ vm_fault_page( vm_object_offset_t first_offset, /* Offset into object */ vm_prot_t fault_type, /* What access is requested */ boolean_t must_be_resident,/* Must page be resident? */ - int interruptible, /* how may fault be interrupted? */ - vm_map_offset_t lo_offset, /* Map entry start */ - vm_map_offset_t hi_offset, /* Map entry end */ - vm_behavior_t behavior, /* Page reference behavior */ + boolean_t caller_lookup, /* caller looked up page */ /* Modifies in place: */ vm_prot_t *protection, /* Protection for mapping */ - /* Returns: */ vm_page_t *result_page, /* Page found, if successful */ + /* Returns: */ vm_page_t *top_page, /* Page in top object, if * not result_page. */ int *type_of_fault, /* if non-null, fill in with type of fault @@ -389,153 +895,75 @@ vm_fault_page( boolean_t data_supply, /* treat as data_supply if * it is a write fault and a full * page is provided */ - vm_map_t map, - __unused vm_map_offset_t vaddr) + vm_object_fault_info_t fault_info) { - register vm_page_t m; - register vm_object_t object; - register vm_object_offset_t offset; vm_page_t first_m; vm_object_t next_object; vm_object_t copy_object; boolean_t look_for_page; + boolean_t force_fault_retry = FALSE; vm_prot_t access_required = fault_type; vm_prot_t wants_copy_flag; - vm_object_size_t length; - vm_object_offset_t cluster_start, cluster_end; CLUSTER_STAT(int pages_at_higher_offsets;) CLUSTER_STAT(int pages_at_lower_offsets;) - kern_return_t wait_result; + kern_return_t wait_result; boolean_t interruptible_state; - boolean_t bumped_pagein = FALSE; - + boolean_t data_already_requested = FALSE; + vm_behavior_t orig_behavior; + vm_size_t orig_cluster_size; + vm_fault_return_t error; + int my_fault; + uint32_t try_failed_count; + int interruptible; /* how may fault be interrupted? */ + int external_state = VM_EXTERNAL_STATE_UNKNOWN; + memory_object_t pager; + vm_fault_return_t retval; + int grab_options; -#if MACH_PAGEMAP /* - * MACH page map - an optional optimization where a bit map is maintained - * by the VM subsystem for internal objects to indicate which pages of - * the object currently reside on backing store. This existence map - * duplicates information maintained by the vnode pager. It is - * created at the time of the first pageout against the object, i.e. - * at the same time pager for the object is created. The optimization - * is designed to eliminate pager interaction overhead, if it is - * 'known' that the page does not exist on backing store. - * - * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is - * either marked as paged out in the existence map for the object or no - * existence map exists for the object. LOOK_FOR() is one of the - * criteria in the decision to invoke the pager. It is also used as one - * of the criteria to terminate the scan for adjacent pages in a clustered - * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for - * permanent objects. Note also that if the pager for an internal object + * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is + * marked as paged out in the compressor pager or the pager doesn't exist. + * Note also that if the pager for an internal object * has not been created, the pager is not invoked regardless of the value - * of LOOK_FOR() and that clustered pagein scans are only done on an object - * for which a pager has been created. + * of MUST_ASK_PAGER(). * * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset - * is marked as paged out in the existence map for the object. PAGED_OUT() + * is marked as paged out in the compressor pager. * PAGED_OUT() is used to determine if a page has already been pushed * into a copy object in order to avoid a redundant page out operation. */ -#define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \ - != VM_EXTERNAL_STATE_ABSENT) -#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \ - == VM_EXTERNAL_STATE_EXISTS) -#else /* MACH_PAGEMAP */ -/* - * If the MACH page map optimization is not enabled, - * LOOK_FOR() always evaluates to TRUE. The pager will always be - * invoked to resolve missing pages in an object, assuming the pager - * has been created for the object. In a clustered page operation, the - * absence of a page on backing backing store cannot be used to terminate - * a scan for adjacent pages since that information is available only in - * the pager. Hence pages that may not be paged out are potentially - * included in a clustered request. The vnode pager is coded to deal - * with any combination of absent/present pages in a clustered - * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager - * will always be invoked to push a dirty page into a copy object assuming - * a pager has been created. If the page has already been pushed, the - * pager will ingore the new request. - */ -#define LOOK_FOR(o, f) TRUE -#define PAGED_OUT(o, f) FALSE -#endif /* MACH_PAGEMAP */ +#define MUST_ASK_PAGER(o, f, s) \ + ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT) + +#define PAGED_OUT(o, f) \ + (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS) /* * Recovery actions */ -#define PREPARE_RELEASE_PAGE(m) \ - MACRO_BEGIN \ - vm_page_lock_queues(); \ - MACRO_END - -#define DO_RELEASE_PAGE(m) \ - MACRO_BEGIN \ - PAGE_WAKEUP_DONE(m); \ - if (!m->active && !m->inactive) \ - vm_page_activate(m); \ - vm_page_unlock_queues(); \ - MACRO_END - #define RELEASE_PAGE(m) \ MACRO_BEGIN \ - PREPARE_RELEASE_PAGE(m); \ - DO_RELEASE_PAGE(m); \ + PAGE_WAKEUP_DONE(m); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + vm_page_lockspin_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \ + vm_page_deactivate(m); \ + else \ + vm_page_activate(m); \ + } \ + vm_page_unlock_queues(); \ + } \ MACRO_END #if TRACEFAULTPAGE dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ #endif - - -#if !VM_FAULT_STATIC_CONFIG - if (vm_fault_dirty_handling -#if MACH_KDB - /* - * If there are watchpoints set, then - * we don't want to give away write permission - * on a read fault. Make the task write fault, - * so that the watchpoint code notices the access. - */ - || db_watchpoint_list -#endif /* MACH_KDB */ - ) { - /* - * If we aren't asking for write permission, - * then don't give it away. We're using write - * faults to set the dirty bit. - */ - if (!(fault_type & VM_PROT_WRITE)) - *protection &= ~VM_PROT_WRITE; - } - - if (!vm_fault_interruptible) - interruptible = THREAD_UNINT; -#else /* STATIC_CONFIG */ -#if MACH_KDB - /* - * If there are watchpoints set, then - * we don't want to give away write permission - * on a read fault. Make the task write fault, - * so that the watchpoint code notices the access. - */ - if (db_watchpoint_list) { - /* - * If we aren't asking for write permission, - * then don't give it away. We're using write - * faults to set the dirty bit. - */ - if (!(fault_type & VM_PROT_WRITE)) - *protection &= ~VM_PROT_WRITE; - } - -#endif /* MACH_KDB */ -#endif /* STATIC_CONFIG */ - + interruptible = fault_info->interruptible; interruptible_state = thread_interrupt_level(interruptible); /* @@ -550,172 +978,211 @@ vm_fault_page( * pager access or when waiting for memory, so * we use a busy page then. * - * Note also that we aren't as concerned about more than - * one thread attempting to memory_object_data_unlock - * the same page at once, so we don't hold the page - * as busy then, but do record the highest unlock - * value so far. [Unlock requests may also be delivered - * out of order.] - * * 2) To prevent another thread from racing us down the * shadow chain and entering a new page in the top * object before we do, we must keep a busy page in * the top object while following the shadow chain. * * 3) We must increment paging_in_progress on any object - * for which we have a busy page + * for which we have a busy page before dropping + * the object lock * * 4) We leave busy pages on the pageout queues. * If the pageout daemon comes across a busy page, * it will remove the page from the pageout queues. */ - /* - * Search for the page at object/offset. - */ - object = first_object; offset = first_offset; first_m = VM_PAGE_NULL; access_required = fault_type; + XPR(XPR_VM_FAULT, "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n", - (integer_t)object, offset, fault_type, *protection, 0); + object, offset, fault_type, *protection, 0); /* - * See whether this page is resident + * default type of fault */ + my_fault = DBG_CACHE_HIT_FAULT; while (TRUE) { #if TRACEFAULTPAGE dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ #endif + + grab_options = 0; +#if CONFIG_SECLUDED_MEMORY + if (object->can_grab_secluded) { + grab_options |= VM_PAGE_GRAB_SECLUDED; + } +#endif /* CONFIG_SECLUDED_MEMORY */ + if (!object->alive) { + /* + * object is no longer valid + * clean up and return error + */ vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_ERROR); + + return (VM_FAULT_MEMORY_ERROR); } - m = vm_page_lookup(object, offset); -#if TRACEFAULTPAGE - dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ -#endif - if (m != VM_PAGE_NULL) { + + if (!object->pager_created && object->phys_contiguous) { /* - * If the page was pre-paged as part of a - * cluster, record the fact. - * If we were passed a valid pointer for - * "type_of_fault", than we came from - * vm_fault... we'll let it deal with - * this condition, since it - * needs to see m->clustered to correctly - * account the pageins. + * A physically-contiguous object without a pager: + * must be a "large page" object. We do not deal + * with VM pages for this object. */ - if (type_of_fault == NULL && m->clustered) { - vm_pagein_cluster_used++; - m->clustered = FALSE; + caller_lookup = FALSE; + m = VM_PAGE_NULL; + goto phys_contig_object; + } + + if (object->blocked_access) { + /* + * Access to this VM object has been blocked. + * Replace our "paging_in_progress" reference with + * a "activity_in_progress" reference and wait for + * access to be unblocked. + */ + caller_lookup = FALSE; /* no longer valid after sleep */ + vm_object_activity_begin(object); + vm_object_paging_end(object); + while (object->blocked_access) { + vm_object_sleep(object, + VM_OBJECT_EVENT_UNBLOCKED, + THREAD_UNINT); } + vm_fault_page_blocked_access++; + vm_object_paging_begin(object); + vm_object_activity_end(object); + } + /* + * See whether the page at 'offset' is resident + */ + if (caller_lookup == TRUE) { /* - * If the page is being brought in, - * wait for it and then retry. - * - * A possible optimization: if the page - * is known to be resident, we can ignore - * pages that are absent (regardless of - * whether they're busy). + * The caller has already looked up the page + * and gave us the result in "result_page". + * We can use this for the first lookup but + * it loses its validity as soon as we unlock + * the object. */ + m = *result_page; + caller_lookup = FALSE; /* no longer valid after that */ + } else { + m = vm_page_lookup(object, offset); + } +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ +#endif + if (m != VM_PAGE_NULL) { if (m->busy) { + /* + * The page is being brought in, + * wait for it and then retry. + */ #if TRACEFAULTPAGE dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif wait_result = PAGE_SLEEP(object, m, interruptible); + XPR(XPR_VM_FAULT, "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n", - (integer_t)object, offset, - (integer_t)m, 0, 0); + object, offset, + m, 0, 0); counter(c_vm_fault_page_block_busy_kernel++); if (wait_result != THREAD_AWAKENED) { vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); + if (wait_result == THREAD_RESTART) - { - return(VM_FAULT_RETRY); - } + return (VM_FAULT_RETRY); else - { - return(VM_FAULT_INTERRUPTED); - } + return (VM_FAULT_INTERRUPTED); } continue; } + if (m->laundry) { + m->free_when_done = FALSE; - if (m->encrypted) { - /* - * ENCRYPTED SWAP: - * the user needs access to a page that we - * encrypted before paging it out. - * Decrypt the page now. - * Keep it busy to prevent anyone from - * accessing it during the decryption. - */ - m->busy = TRUE; - vm_page_decrypt(m, 0); - assert(object == m->object); - assert(m->busy); - PAGE_WAKEUP_DONE(m); - + if (!m->cleaning) + vm_pageout_steal_laundry(m, FALSE); + } + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { /* - * Retry from the top, in case - * something changed while we were - * decrypting. + * Guard page: off limits ! */ - continue; + if (fault_type == VM_PROT_NONE) { + /* + * The fault is not requesting any + * access to the guard page, so it must + * be just to wire or unwire it. + * Let's pretend it succeeded... + */ + m->busy = TRUE; + *result_page = m; + assert(first_m == VM_PAGE_NULL); + *top_page = first_m; + if (type_of_fault) + *type_of_fault = DBG_GUARD_FAULT; + thread_interrupt_level(interruptible_state); + return VM_FAULT_SUCCESS; + } else { + /* + * The fault requests access to the + * guard page: let's deny that ! + */ + vm_fault_cleanup(object, first_m); + thread_interrupt_level(interruptible_state); + return VM_FAULT_MEMORY_ERROR; + } } - ASSERT_PAGE_DECRYPTED(m); - - /* - * If the page is in error, give up now. - */ if (m->error) { + /* + * The page is in error, give up now. + */ #if TRACEFAULTPAGE dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ #endif if (error_code) - *error_code = m->page_error; + *error_code = KERN_MEMORY_ERROR; VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_ERROR); - } - - /* - * If the pager wants us to restart - * at the top of the chain, - * typically because it has moved the - * page to another pager, then do so. - */ + return (VM_FAULT_MEMORY_ERROR); + } if (m->restart) { + /* + * The pager wants us to restart + * at the top of the chain, + * typically because it has moved the + * page to another pager, then do so. + */ #if TRACEFAULTPAGE dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_RETRY); - } - - /* - * If the page isn't busy, but is absent, - * then it was deemed "unavailable". - */ + return (VM_FAULT_RETRY); + } if (m->absent) { - /* + /* + * The page isn't busy, but is absent, + * therefore it's deemed "unavailable". + * * Remove the non-existent page (unless it's * in the top object) and move on down to the * next object (if there is one). @@ -723,175 +1190,118 @@ vm_fault_page( #if TRACEFAULTPAGE dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ #endif - next_object = object->shadow; - if (next_object == VM_OBJECT_NULL) { - vm_page_t real_m; - - assert(!must_be_resident); - - if (object->shadow_severed) { - vm_fault_cleanup( - object, first_m); - thread_interrupt_level(interruptible_state); - return VM_FAULT_MEMORY_ERROR; - } + if (next_object == VM_OBJECT_NULL) { /* * Absent page at bottom of shadow * chain; zero fill the page we left - * busy in the first object, and flush - * the absent page. But first we - * need to allocate a real page. + * busy in the first object, and free + * the absent page. */ - if (VM_PAGE_THROTTLED() || - (real_m = vm_page_grab()) - == VM_PAGE_NULL) { - vm_fault_cleanup( - object, first_m); - thread_interrupt_level( - interruptible_state); - return( - VM_FAULT_MEMORY_SHORTAGE); - } + assert(!must_be_resident); /* - * are we protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. + * check for any conditions that prevent + * us from creating a new zero-fill page + * vm_fault_check will do all of the + * fault cleanup in the case of an error condition + * including resetting the thread_interrupt_level */ + error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); - if(vm_backing_store_low) { - if(!(current_task()->priv_flags - & VM_BACKING_STORE_PRIV)) { - assert_wait((event_t) - &vm_backing_store_low, - THREAD_UNINT); - vm_fault_cleanup(object, - first_m); - thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level( - interruptible_state); - return(VM_FAULT_RETRY); - } - } - + if (error != VM_FAULT_SUCCESS) + return (error); XPR(XPR_VM_FAULT, - "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", - (integer_t)object, offset, - (integer_t)m, - (integer_t)first_object, 0); + "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", + object, offset, + m, + first_object, 0); + if (object != first_object) { + /* + * free the absent page we just found + */ VM_PAGE_FREE(m); + + /* + * drop reference and lock on current object + */ vm_object_paging_end(object); vm_object_unlock(object); - object = first_object; - offset = first_offset; + + /* + * grab the original page we + * 'soldered' in place and + * retake lock on 'first_object' + */ m = first_m; first_m = VM_PAGE_NULL; - vm_object_lock(object); - } - - VM_PAGE_FREE(m); - assert(real_m->busy); - vm_page_insert(real_m, object, offset); - m = real_m; - /* - * Drop the lock while zero filling - * page. Then break because this - * is the page we wanted. Checking - * the page lock is a waste of time; - * this page was either absent or - * newly allocated -- in both cases - * it can't be page locked by a pager. - */ - m->no_isync = FALSE; + object = first_object; + offset = first_offset; - if (!no_zero_fill) { - vm_object_unlock(object); - vm_page_zero_fill(m); vm_object_lock(object); - - if (type_of_fault) - *type_of_fault = DBG_ZERO_FILL_FAULT; - VM_STAT(zero_fill_count++); - } - if (bumped_pagein == TRUE) { - VM_STAT(pageins--); - current_task()->pageins--; - } - vm_page_lock_queues(); - VM_PAGE_QUEUES_REMOVE(m); - m->page_ticket = vm_page_ticket; - assert(!m->laundry); - assert(m->object != kernel_object); - assert(m->pageq.next == NULL && - m->pageq.prev == NULL); - if(m->object->size > 0x200000) { - m->zero_fill = TRUE; - /* depends on the queues lock */ - vm_zf_count += 1; - queue_enter(&vm_page_queue_zf, - m, vm_page_t, pageq); } else { - queue_enter( - &vm_page_queue_inactive, - m, vm_page_t, pageq); - } - vm_page_ticket_roll++; - if(vm_page_ticket_roll == - VM_PAGE_TICKETS_IN_ROLL) { - vm_page_ticket_roll = 0; - if(vm_page_ticket == - VM_PAGE_TICKET_ROLL_IDS) - vm_page_ticket= 0; - else - vm_page_ticket++; + /* + * we're going to use the absent page we just found + * so convert it to a 'busy' page + */ + m->absent = FALSE; + m->busy = TRUE; } - m->inactive = TRUE; - vm_page_inactive_count++; - vm_page_unlock_queues(); + if (fault_info->mark_zf_absent && no_zero_fill == TRUE) + m->absent = TRUE; + /* + * zero-fill the page and put it on + * the correct paging queue + */ + my_fault = vm_fault_zero_page(m, no_zero_fill); + break; } else { - if (must_be_resident) { + if (must_be_resident) vm_object_paging_end(object); - } else if (object != first_object) { + else if (object != first_object) { vm_object_paging_end(object); VM_PAGE_FREE(m); } else { first_m = m; m->absent = FALSE; - m->unusual = FALSE; - vm_object_absent_release(object); m->busy = TRUE; - vm_page_lock_queues(); - VM_PAGE_QUEUES_REMOVE(m); + vm_page_lockspin_queues(); + vm_page_queues_remove(m, FALSE); vm_page_unlock_queues(); } XPR(XPR_VM_FAULT, "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n", - (integer_t)object, offset, - (integer_t)next_object, - offset+object->shadow_offset,0); - offset += object->shadow_offset; - hi_offset += object->shadow_offset; - lo_offset += object->shadow_offset; + object, offset, + next_object, + offset+object->vo_shadow_offset,0); + + offset += object->vo_shadow_offset; + fault_info->lo_offset += object->vo_shadow_offset; + fault_info->hi_offset += object->vo_shadow_offset; access_required = VM_PROT_READ; + vm_object_lock(next_object); vm_object_unlock(object); object = next_object; vm_object_paging_begin(object); + + /* + * reset to default type of fault + */ + my_fault = DBG_CACHE_HIT_FAULT; + continue; } } - if ((m->cleaning) - && ((object != first_object) || - (object->copy != VM_OBJECT_NULL)) - && (fault_type & VM_PROT_WRITE)) { + && ((object != first_object) || (object->copy != VM_OBJECT_NULL)) + && (fault_type & VM_PROT_WRITE)) { /* * This is a copy-on-write fault that will * cause us to revoke access to this page, but @@ -907,191 +1317,175 @@ vm_fault_page( #endif XPR(XPR_VM_FAULT, "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n", - (integer_t)object, offset, - (integer_t)m, 0, 0); - /* take an extra ref so that object won't die */ - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); + object, offset, + m, 0, 0); + /* + * take an extra ref so that object won't die + */ + vm_object_reference_locked(object); + vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); vm_object_lock(object); assert(object->ref_count > 0); + m = vm_page_lookup(object, offset); + if (m != VM_PAGE_NULL && m->cleaning) { PAGE_ASSERT_WAIT(m, interruptible); + vm_object_unlock(object); wait_result = thread_block(THREAD_CONTINUE_NULL); vm_object_deallocate(object); + goto backoff; } else { vm_object_unlock(object); + vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return VM_FAULT_RETRY; + + return (VM_FAULT_RETRY); } } + if (type_of_fault == NULL && (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) && + !(fault_info != NULL && fault_info->stealth)) { + /* + * If we were passed a non-NULL pointer for + * "type_of_fault", than we came from + * vm_fault... we'll let it deal with + * this condition, since it + * needs to see m->speculative to correctly + * account the pageins, otherwise... + * take it off the speculative queue, we'll + * let the caller of vm_fault_page deal + * with getting it onto the correct queue + * + * If the caller specified in fault_info that + * it wants a "stealth" fault, we also leave + * the page in the speculative queue. + */ + vm_page_lockspin_queues(); + if (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) + vm_page_queues_remove(m, FALSE); + vm_page_unlock_queues(); + } + assert(object == VM_PAGE_OBJECT(m)); - /* - * If the desired access to this page has - * been locked out, request that it be unlocked. - */ - - if (access_required & m->page_lock) { - if ((access_required & m->unlock_request) != access_required) { - vm_prot_t new_unlock_request; - kern_return_t rc; - -#if TRACEFAULTPAGE - dbgTrace(0xBEEF000A, (unsigned int) m, (unsigned int) object->pager_ready); /* (TEST/DEBUG) */ -#endif - if (!object->pager_ready) { - XPR(XPR_VM_FAULT, - "vm_f_page: ready wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n", - access_required, - (integer_t)object, offset, - (integer_t)m, 0); - /* take an extra ref */ - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); - vm_fault_cleanup(object, - first_m); - counter(c_vm_fault_page_block_backoff_kernel++); - vm_object_lock(object); - assert(object->ref_count > 0); - if (!object->pager_ready) { - wait_result = vm_object_assert_wait( - object, - VM_OBJECT_EVENT_PAGER_READY, - interruptible); - vm_object_unlock(object); - if (wait_result == THREAD_WAITING) - wait_result = thread_block(THREAD_CONTINUE_NULL); - vm_object_deallocate(object); - goto backoff; - } else { - vm_object_unlock(object); - vm_object_deallocate(object); - thread_interrupt_level(interruptible_state); - return VM_FAULT_RETRY; - } - } + if (m->encrypted) { + /* + * ENCRYPTED SWAP: + * the user needs access to a page that we + * encrypted before paging it out. + * Decrypt the page now. + * Keep it busy to prevent anyone from + * accessing it during the decryption. + */ + m->busy = TRUE; + vm_page_decrypt(m, 0); + assert(m->busy); + PAGE_WAKEUP_DONE(m); - new_unlock_request = m->unlock_request = - (access_required | m->unlock_request); - vm_object_unlock(object); - XPR(XPR_VM_FAULT, - "vm_f_page: unlock obj 0x%X, offset 0x%X, page 0x%X, unl_req %d\n", - (integer_t)object, offset, - (integer_t)m, new_unlock_request, 0); - if ((rc = memory_object_data_unlock( - object->pager, - offset + object->paging_offset, - PAGE_SIZE, - new_unlock_request)) - != KERN_SUCCESS) { - if (vm_fault_debug) - printf("vm_fault: memory_object_data_unlock failed\n"); - vm_object_lock(object); - vm_fault_cleanup(object, first_m); - thread_interrupt_level(interruptible_state); - return((rc == MACH_SEND_INTERRUPTED) ? - VM_FAULT_INTERRUPTED : - VM_FAULT_MEMORY_ERROR); - } - vm_object_lock(object); - continue; - } + /* + * Retry from the top, in case + * something changed while we were + * decrypting. + */ + continue; + } + ASSERT_PAGE_DECRYPTED(m); - XPR(XPR_VM_FAULT, - "vm_f_page: access wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n", - access_required, (integer_t)object, - offset, (integer_t)m, 0); - /* take an extra ref so object won't die */ - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); - vm_fault_cleanup(object, first_m); - counter(c_vm_fault_page_block_backoff_kernel++); - vm_object_lock(object); - assert(object->ref_count > 0); - m = vm_page_lookup(object, offset); - if (m != VM_PAGE_NULL && - (access_required & m->page_lock) && - !((access_required & m->unlock_request) != access_required)) { - PAGE_ASSERT_WAIT(m, interruptible); - vm_object_unlock(object); - wait_result = thread_block(THREAD_CONTINUE_NULL); - vm_object_deallocate(object); - goto backoff; - } else { - vm_object_unlock(object); - vm_object_deallocate(object); - thread_interrupt_level(interruptible_state); - return VM_FAULT_RETRY; - } + if (object->code_signed) { + /* + * CODE SIGNING: + * We just paged in a page from a signed + * memory object but we don't need to + * validate it now. We'll validate it if + * when it gets mapped into a user address + * space for the first time or when the page + * gets copied to another object as a result + * of a copy-on-write. + */ } + /* - * We mark the page busy and leave it on - * the pageout queues. If the pageout - * deamon comes across it, then it will - * remove the page. + * We mark the page busy and leave it on + * the pageout queues. If the pageout + * deamon comes across it, then it will + * remove the page from the queue, but not the object */ - #if TRACEFAULTPAGE dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - -#if !VM_FAULT_STATIC_CONFIG - if (!software_reference_bits) { - vm_page_lock_queues(); - if (m->inactive) - vm_stat.reactivations++; - - VM_PAGE_QUEUES_REMOVE(m); - vm_page_unlock_queues(); - } -#endif XPR(XPR_VM_FAULT, "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n", - (integer_t)object, offset, (integer_t)m, 0, 0); + object, offset, m, 0, 0); assert(!m->busy); - m->busy = TRUE; assert(!m->absent); + + m->busy = TRUE; break; } + + + /* + * we get here when there is no page present in the object at + * the offset we're interested in... we'll allocate a page + * at this point if the pager associated with + * this object can provide the data or we're the top object... + * object is locked; m == NULL + */ + if (must_be_resident) { + if (fault_type == VM_PROT_NONE && + object == kernel_object) { + /* + * We've been called from vm_fault_unwire() + * while removing a map entry that was allocated + * with KMA_KOBJECT and KMA_VAONLY. This page + * is not present and there's nothing more to + * do here (nothing to unwire). + */ + vm_fault_cleanup(object, first_m); + thread_interrupt_level(interruptible_state); + + return VM_FAULT_MEMORY_ERROR; + } - look_for_page = - (object->pager_created) && - LOOK_FOR(object, offset) && - (!data_supply); + goto dont_look_for_page; + } + data_supply = FALSE; + look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply); + #if TRACEFAULTPAGE dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ #endif - if ((look_for_page || (object == first_object)) - && !must_be_resident - && !(object->phys_contiguous)) { + if (!look_for_page && object == first_object && !object->phys_contiguous) { /* - * Allocate a new page for this object/offset - * pair. + * Allocate a new page for this object/offset pair as a placeholder */ - - m = vm_page_grab_fictitious(); + m = vm_page_grab_options(grab_options); #if TRACEFAULTPAGE dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m == VM_PAGE_NULL) { + vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_FICTITIOUS_SHORTAGE); + + return (VM_FAULT_MEMORY_SHORTAGE); } - vm_page_insert(m, object, offset); - } - if ((look_for_page && !must_be_resident)) { + if (fault_info && fault_info->batch_pmap_op == TRUE) { + vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); + } else { + vm_page_insert(m, object, offset); + } + } + if (look_for_page) { kern_return_t rc; + int my_fault_type; /* * If the memory manager is not ready, we @@ -1101,231 +1495,355 @@ vm_fault_page( #if TRACEFAULTPAGE dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - if(m != VM_PAGE_NULL) - VM_PAGE_FREE(m); + if (m != VM_PAGE_NULL) + VM_PAGE_FREE(m); + XPR(XPR_VM_FAULT, "vm_f_page: ready wait obj 0x%X, offset 0x%X\n", - (integer_t)object, offset, 0, 0, 0); - /* take an extra ref so object won't die */ - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); + object, offset, 0, 0, 0); + + /* + * take an extra ref so object won't die + */ + vm_object_reference_locked(object); vm_fault_cleanup(object, first_m); counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); assert(object->ref_count > 0); + if (!object->pager_ready) { - wait_result = vm_object_assert_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - interruptible); + wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible); + vm_object_unlock(object); if (wait_result == THREAD_WAITING) wait_result = thread_block(THREAD_CONTINUE_NULL); vm_object_deallocate(object); + goto backoff; } else { vm_object_unlock(object); vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return VM_FAULT_RETRY; - } - } - if(object->phys_contiguous) { - if(m != VM_PAGE_NULL) { - VM_PAGE_FREE(m); - m = VM_PAGE_NULL; + return (VM_FAULT_RETRY); } - goto no_clustering; } - if (object->internal) { + if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) { /* - * Requests to the default pager - * must reserve a real page in advance, - * because the pager's data-provided - * won't block for pages. IMPORTANT: - * this acts as a throttling mechanism - * for data_requests to the default - * pager. + * If there are too many outstanding page + * requests pending on this external object, we + * wait for them to be resolved now. */ - #if TRACEFAULTPAGE - dbgTrace(0xBEEF000F, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - if (m->fictitious && !vm_page_convert(m)) { + if (m != VM_PAGE_NULL) VM_PAGE_FREE(m); - vm_fault_cleanup(object, first_m); - thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_SHORTAGE); - } - } else if (object->absent_count > - vm_object_absent_max) { /* - * If there are too many outstanding page - * requests pending on this object, we - * wait for them to be resolved now. + * take an extra ref so object won't die */ + vm_object_reference_locked(object); -#if TRACEFAULTPAGE - dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ -#endif - if(m != VM_PAGE_NULL) - VM_PAGE_FREE(m); - /* take an extra ref so object won't die */ - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); assert(object->ref_count > 0); - if (object->absent_count > vm_object_absent_max) { - vm_object_absent_assert_wait(object, - interruptible); - vm_object_unlock(object); + + if (object->paging_in_progress >= vm_object_pagein_throttle) { + vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible); + + vm_object_unlock(object); wait_result = thread_block(THREAD_CONTINUE_NULL); vm_object_deallocate(object); + goto backoff; } else { vm_object_unlock(object); vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return VM_FAULT_RETRY; + + return (VM_FAULT_RETRY); } } + if (object->internal) { + int compressed_count_delta; - /* - * Indicate that the page is waiting for data - * from the memory manager. - */ + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + + if (m == VM_PAGE_NULL) { + /* + * Allocate a new page for this object/offset pair as a placeholder + */ + m = vm_page_grab_options(grab_options); +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ +#endif + if (m == VM_PAGE_NULL) { + + vm_fault_cleanup(object, first_m); + thread_interrupt_level(interruptible_state); - if(m != VM_PAGE_NULL) { + return (VM_FAULT_MEMORY_SHORTAGE); + } - m->list_req_pending = TRUE; + m->absent = TRUE; + if (fault_info && fault_info->batch_pmap_op == TRUE) { + vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); + } else { + vm_page_insert(m, object, offset); + } + } + assert(m->busy); + m->absent = TRUE; - m->unusual = TRUE; - object->absent_count++; + pager = object->pager; - } + assert(object->paging_in_progress > 0); + vm_object_unlock(object); -no_clustering: - cluster_start = offset; - length = PAGE_SIZE; + rc = vm_compressor_pager_get( + pager, + offset + object->paging_offset, + VM_PAGE_GET_PHYS_PAGE(m), + &my_fault_type, + 0, + &compressed_count_delta); - /* - * lengthen the cluster by the pages in the working set - */ - if((map != NULL) && - (current_task()->dynamic_working_set != 0)) { - cluster_end = cluster_start + length; - /* tws values for start and end are just a - * suggestions. Therefore, as long as - * build_cluster does not use pointers or - * take action based on values that - * could be affected by re-entrance we - * do not need to take the map lock. - */ - cluster_end = offset + PAGE_SIZE_64; - tws_build_cluster( - current_task()->dynamic_working_set, - object, &cluster_start, - &cluster_end, 0x40000); - length = cluster_end - cluster_start; + if (type_of_fault == NULL) { + int throttle_delay; + + /* + * we weren't called from vm_fault, so we + * need to apply page creation throttling + * do it before we re-acquire any locks + */ + if (my_fault_type == DBG_COMPRESSOR_FAULT) { + if ((throttle_delay = vm_page_throttled(TRUE))) { + VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0); + delay(throttle_delay); + } + } + } + vm_object_lock(object); + assert(object->paging_in_progress > 0); + + vm_compressor_pager_count( + pager, + compressed_count_delta, + FALSE, /* shared_lock */ + object); + + switch (rc) { + case KERN_SUCCESS: + m->absent = FALSE; + m->dirty = TRUE; + if ((object->wimg_bits & + VM_WIMG_MASK) != + VM_WIMG_USE_DEFAULT) { + /* + * If the page is not cacheable, + * we can't let its contents + * linger in the data cache + * after the decompression. + */ + pmap_sync_page_attributes_phys( + VM_PAGE_GET_PHYS_PAGE(m)); + } else { + m->written_by_kernel = TRUE; + } + + /* + * If the object is purgeable, its + * owner's purgeable ledgers have been + * updated in vm_page_insert() but the + * page was also accounted for in a + * "compressed purgeable" ledger, so + * update that now. + */ + if ((object->purgable != + VM_PURGABLE_DENY) && + (object->vo_purgeable_owner != + NULL)) { + /* + * One less compressed + * purgeable page. + */ + vm_purgeable_compressed_update( + object, + -1); + } + + break; + case KERN_MEMORY_FAILURE: + m->unusual = TRUE; + m->error = TRUE; + m->absent = FALSE; + break; + case KERN_MEMORY_ERROR: + assert(m->absent); + break; + default: + panic("vm_fault_page(): unexpected " + "error %d from " + "vm_compressor_pager_get()\n", + rc); + } + PAGE_WAKEUP_DONE(m); + + rc = KERN_SUCCESS; + goto data_requested; + } + my_fault_type = DBG_PAGEIN_FAULT; + + if (m != VM_PAGE_NULL) { + VM_PAGE_FREE(m); + m = VM_PAGE_NULL; } + #if TRACEFAULTPAGE dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ #endif + /* - * We have a busy page, so we can - * release the object lock. + * It's possible someone called vm_object_destroy while we weren't + * holding the object lock. If that has happened, then bail out + * here. */ - vm_object_unlock(object); + + pager = object->pager; + + if (pager == MEMORY_OBJECT_NULL) { + vm_fault_cleanup(object, first_m); + thread_interrupt_level(interruptible_state); + return VM_FAULT_MEMORY_ERROR; + } /* - * Call the memory manager to retrieve the data. + * We have an absent page in place for the faulting offset, + * so we can release the object lock. */ - if (type_of_fault) - *type_of_fault = ((int)length << 8) | DBG_PAGEIN_FAULT; - VM_STAT(pageins++); - current_task()->pageins++; - bumped_pagein = TRUE; + vm_object_unlock(object); /* - * If this object uses a copy_call strategy, - * and we are interested in a copy of this object - * (having gotten here only by following a - * shadow chain), then tell the memory manager - * via a flag added to the desired_access - * parameter, so that it can detect a race - * between our walking down the shadow chain - * and its pushing pages up into a copy of - * the object that it manages. + * If this object uses a copy_call strategy, + * and we are interested in a copy of this object + * (having gotten here only by following a + * shadow chain), then tell the memory manager + * via a flag added to the desired_access + * parameter, so that it can detect a race + * between our walking down the shadow chain + * and its pushing pages up into a copy of + * the object that it manages. */ - - if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && - object != first_object) { + if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) wants_copy_flag = VM_PROT_WANTS_COPY; - } else { + else wants_copy_flag = VM_PROT_NONE; - } XPR(XPR_VM_FAULT, "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n", - (integer_t)object, offset, (integer_t)m, + object, offset, m, access_required | wants_copy_flag, 0); - rc = memory_object_data_request(object->pager, - cluster_start + object->paging_offset, - length, - access_required | wants_copy_flag); + if (object->copy == first_object) { + /* + * if we issue the memory_object_data_request in + * this state, we are subject to a deadlock with + * the underlying filesystem if it is trying to + * shrink the file resulting in a push of pages + * into the copy object... that push will stall + * on the placeholder page, and if the pushing thread + * is holding a lock that is required on the pagein + * path (such as a truncate lock), we'll deadlock... + * to avoid this potential deadlock, we throw away + * our placeholder page before calling memory_object_data_request + * and force this thread to retry the vm_fault_page after + * we have issued the I/O. the second time through this path + * we will find the page already in the cache (presumably still + * busy waiting for the I/O to complete) and then complete + * the fault w/o having to go through memory_object_data_request again + */ + assert(first_m != VM_PAGE_NULL); + assert(VM_PAGE_OBJECT(first_m) == first_object); + + vm_object_lock(first_object); + VM_PAGE_FREE(first_m); + vm_object_paging_end(first_object); + vm_object_unlock(first_object); + + first_m = VM_PAGE_NULL; + force_fault_retry = TRUE; + + vm_fault_page_forced_retry++; + } + if (data_already_requested == TRUE) { + orig_behavior = fault_info->behavior; + orig_cluster_size = fault_info->cluster_size; + fault_info->behavior = VM_BEHAVIOR_RANDOM; + fault_info->cluster_size = PAGE_SIZE; + } + /* + * Call the memory manager to retrieve the data. + */ + rc = memory_object_data_request( + pager, + offset + object->paging_offset, + PAGE_SIZE, + access_required | wants_copy_flag, + (memory_object_fault_info_t)fault_info); + + if (data_already_requested == TRUE) { + fault_info->behavior = orig_behavior; + fault_info->cluster_size = orig_cluster_size; + } else + data_already_requested = TRUE; + + DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL); #if TRACEFAULTPAGE dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ #endif + vm_object_lock(object); + + data_requested: if (rc != KERN_SUCCESS) { - if (rc != MACH_SEND_INTERRUPTED - && vm_fault_debug) - printf("%s(0x%x, 0x%xll, 0x%xll, 0x%x) failed, rc=%d\n", - "memory_object_data_request", - object->pager, - cluster_start + object->paging_offset, - length, access_required, rc); - /* - * Don't want to leave a busy page around, - * but the data request may have blocked, - * so check if it's still there and busy. - */ - if(!object->phys_contiguous) { - vm_object_lock(object); - for (; length; length -= PAGE_SIZE, - cluster_start += PAGE_SIZE_64) { - vm_page_t p; - if ((p = vm_page_lookup(object, - cluster_start)) - && p->absent && p->busy - && p != first_m) { - VM_PAGE_FREE(p); - } - } - } + vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return((rc == MACH_SEND_INTERRUPTED) ? + + return ((rc == MACH_SEND_INTERRUPTED) ? VM_FAULT_INTERRUPTED : VM_FAULT_MEMORY_ERROR); + } else { + clock_sec_t tv_sec; + clock_usec_t tv_usec; + + if (my_fault_type == DBG_PAGEIN_FAULT) { + clock_get_system_microtime(&tv_sec, &tv_usec); + current_thread()->t_page_creation_time = tv_sec; + current_thread()->t_page_creation_count = 0; + } } - - vm_object_lock(object); - if ((interruptible != THREAD_UNINT) && - (current_thread()->state & TH_ABORT)) { + if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) { + + vm_fault_cleanup(object, first_m); + thread_interrupt_level(interruptible_state); + + return (VM_FAULT_INTERRUPTED); + } + if (force_fault_retry == TRUE) { + vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_INTERRUPTED); + + return (VM_FAULT_RETRY); } - if (m == VM_PAGE_NULL && - object->phys_contiguous) { + if (m == VM_PAGE_NULL && object->phys_contiguous) { /* * No page here means that the object we * initially looked up was "physically @@ -1339,8 +1857,15 @@ no_clustering: * page fault against the object's new backing * store (different memory object). */ - break; + phys_contig_object: + goto done; } + /* + * potentially a pagein fault + * if we make it through the state checks + * above, than we'll count it as such + */ + my_fault = my_fault_type; /* * Retry with same object/offset, since new data may @@ -1349,12 +1874,13 @@ no_clustering: */ continue; } - +dont_look_for_page: /* - * The only case in which we get here is if - * object has no pager (or unwiring). If the pager doesn't - * have the page this is handled in the m->absent case above - * (and if you change things here you should look above). + * We get here if the object has no pager, or an existence map + * exists and indicates the page isn't present on the pager + * or we're unwiring a page. If a pager exists, but there + * is no existence map, then the m->absent case above handles + * the ZF case when the pager can't provide the page */ #if TRACEFAULTPAGE dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ @@ -1366,20 +1892,17 @@ no_clustering: XPR(XPR_VM_FAULT, "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n", - (integer_t)object, offset, (integer_t)m, - (integer_t)object->shadow, 0); - /* - * Move on to the next object. Lock the next - * object before unlocking the current one. - */ + object, offset, m, + object->shadow, 0); + next_object = object->shadow; + if (next_object == VM_OBJECT_NULL) { - assert(!must_be_resident); /* - * If there's no object left, fill the page - * in the top object with zeros. But first we - * need to allocate a real page. + * we've hit the bottom of the shadown chain, + * fill the page in the top object with zeros. */ + assert(!must_be_resident); if (object != first_object) { vm_object_paging_end(object); @@ -1389,118 +1912,56 @@ no_clustering: offset = first_offset; vm_object_lock(object); } - m = first_m; - assert(m->object == object); + assert(VM_PAGE_OBJECT(m) == object); first_m = VM_PAGE_NULL; - if(m == VM_PAGE_NULL) { - m = vm_page_grab(); - if (m == VM_PAGE_NULL) { - vm_fault_cleanup( - object, VM_PAGE_NULL); - thread_interrupt_level( - interruptible_state); - return(VM_FAULT_MEMORY_SHORTAGE); - } - vm_page_insert( - m, object, offset); - } - - if (object->shadow_severed) { - VM_PAGE_FREE(m); - vm_fault_cleanup(object, VM_PAGE_NULL); - thread_interrupt_level(interruptible_state); - return VM_FAULT_MEMORY_ERROR; - } - /* - * are we protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. + * check for any conditions that prevent + * us from creating a new zero-fill page + * vm_fault_check will do all of the + * fault cleanup in the case of an error condition + * including resetting the thread_interrupt_level */ + error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); - if(vm_backing_store_low) { - if(!(current_task()->priv_flags - & VM_BACKING_STORE_PRIV)) { - assert_wait((event_t) - &vm_backing_store_low, - THREAD_UNINT); - VM_PAGE_FREE(m); + if (error != VM_FAULT_SUCCESS) + return (error); + + if (m == VM_PAGE_NULL) { + m = vm_page_grab_options(grab_options); + + if (m == VM_PAGE_NULL) { vm_fault_cleanup(object, VM_PAGE_NULL); - thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level( - interruptible_state); - return(VM_FAULT_RETRY); - } - } + thread_interrupt_level(interruptible_state); - if (VM_PAGE_THROTTLED() || - (m->fictitious && !vm_page_convert(m))) { - VM_PAGE_FREE(m); - vm_fault_cleanup(object, VM_PAGE_NULL); - thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_SHORTAGE); + return (VM_FAULT_MEMORY_SHORTAGE); + } + vm_page_insert(m, object, offset); } - m->no_isync = FALSE; + if (fault_info->mark_zf_absent && no_zero_fill == TRUE) + m->absent = TRUE; - if (!no_zero_fill) { - vm_object_unlock(object); - vm_page_zero_fill(m); - vm_object_lock(object); + my_fault = vm_fault_zero_page(m, no_zero_fill); - if (type_of_fault) - *type_of_fault = DBG_ZERO_FILL_FAULT; - VM_STAT(zero_fill_count++); - } - if (bumped_pagein == TRUE) { - VM_STAT(pageins--); - current_task()->pageins--; - } - vm_page_lock_queues(); - VM_PAGE_QUEUES_REMOVE(m); - assert(!m->laundry); - assert(m->object != kernel_object); - assert(m->pageq.next == NULL && - m->pageq.prev == NULL); - if(m->object->size > 0x200000) { - m->zero_fill = TRUE; - /* depends on the queues lock */ - vm_zf_count += 1; - queue_enter(&vm_page_queue_zf, - m, vm_page_t, pageq); - } else { - queue_enter( - &vm_page_queue_inactive, - m, vm_page_t, pageq); - } - m->page_ticket = vm_page_ticket; - vm_page_ticket_roll++; - if(vm_page_ticket_roll == VM_PAGE_TICKETS_IN_ROLL) { - vm_page_ticket_roll = 0; - if(vm_page_ticket == - VM_PAGE_TICKET_ROLL_IDS) - vm_page_ticket= 0; - else - vm_page_ticket++; - } - m->inactive = TRUE; - vm_page_inactive_count++; - vm_page_unlock_queues(); -#if 0 - pmap_clear_modify(m->phys_page); -#endif break; - } - else { + + } else { + /* + * Move on to the next object. Lock the next + * object before unlocking the current one. + */ if ((object != first_object) || must_be_resident) vm_object_paging_end(object); - offset += object->shadow_offset; - hi_offset += object->shadow_offset; - lo_offset += object->shadow_offset; + + offset += object->vo_shadow_offset; + fault_info->lo_offset += object->vo_shadow_offset; + fault_info->hi_offset += object->vo_shadow_offset; access_required = VM_PROT_READ; + vm_object_lock(next_object); vm_object_unlock(object); + object = next_object; vm_object_paging_begin(object); } @@ -1528,12 +1989,10 @@ no_clustering: dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif #if EXTRA_ASSERTIONS - if(m != VM_PAGE_NULL) { - assert(m->busy && !m->absent); - assert((first_m == VM_PAGE_NULL) || - (first_m->busy && !first_m->absent && - !first_m->active && !first_m->inactive)); - } + assert(m->busy && !m->absent); + assert((first_m == VM_PAGE_NULL) || + (first_m->busy && !first_m->absent && + !first_m->active && !first_m->inactive && !first_m->secluded)); #endif /* EXTRA_ASSERTIONS */ /* @@ -1541,33 +2000,31 @@ no_clustering: * If we found a page, we must have decrypted it before we * get here... */ - if (m != VM_PAGE_NULL) { - ASSERT_PAGE_DECRYPTED(m); - } + ASSERT_PAGE_DECRYPTED(m); XPR(XPR_VM_FAULT, - "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n", - (integer_t)object, offset, (integer_t)m, - (integer_t)first_object, (integer_t)first_m); + "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n", + object, offset, m, + first_object, first_m); + /* - * If the page is being written, but isn't - * already owned by the top-level object, - * we have to copy it into a new page owned - * by the top-level object. + * If the page is being written, but isn't + * already owned by the top-level object, + * we have to copy it into a new page owned + * by the top-level object. */ - - if ((object != first_object) && (m != VM_PAGE_NULL)) { - /* - * We only really need to copy if we - * want to write it. - */ + if (object != first_object) { #if TRACEFAULTPAGE - dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ #endif if (fault_type & VM_PROT_WRITE) { vm_page_t copy_m; + /* + * We only really need to copy if we + * want to write it. + */ assert(!must_be_resident); /* @@ -1575,131 +2032,143 @@ no_clustering: * backing store exhaustion. If so * sleep unless we are privileged. */ + if (vm_backing_store_low) { + if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { - if(vm_backing_store_low) { - if(!(current_task()->priv_flags - & VM_BACKING_STORE_PRIV)) { - assert_wait((event_t) - &vm_backing_store_low, - THREAD_UNINT); RELEASE_PAGE(m); vm_fault_cleanup(object, first_m); + + assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); + thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level( - interruptible_state); - return(VM_FAULT_RETRY); + thread_interrupt_level(interruptible_state); + + return (VM_FAULT_RETRY); } } - /* - * If we try to collapse first_object at this - * point, we may deadlock when we try to get - * the lock on an intermediate object (since we - * have the bottom object locked). We can't - * unlock the bottom object, because the page - * we found may move (by collapse) if we do. + * If we try to collapse first_object at this + * point, we may deadlock when we try to get + * the lock on an intermediate object (since we + * have the bottom object locked). We can't + * unlock the bottom object, because the page + * we found may move (by collapse) if we do. * - * Instead, we first copy the page. Then, when - * we have no more use for the bottom object, - * we unlock it and try to collapse. + * Instead, we first copy the page. Then, when + * we have no more use for the bottom object, + * we unlock it and try to collapse. * - * Note that we copy the page even if we didn't - * need to... that's the breaks. + * Note that we copy the page even if we didn't + * need to... that's the breaks. */ /* - * Allocate a page for the copy + * Allocate a page for the copy */ - copy_m = vm_page_grab(); + copy_m = vm_page_grab_options(grab_options); + if (copy_m == VM_PAGE_NULL) { RELEASE_PAGE(m); + vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_SHORTAGE); - } - + return (VM_FAULT_MEMORY_SHORTAGE); + } XPR(XPR_VM_FAULT, "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n", - (integer_t)object, offset, - (integer_t)m, (integer_t)copy_m, 0); + object, offset, + m, copy_m, 0); + vm_page_copy(m, copy_m); /* - * If another map is truly sharing this - * page with us, we have to flush all - * uses of the original page, since we - * can't distinguish those which want the - * original from those which need the - * new copy. + * If another map is truly sharing this + * page with us, we have to flush all + * uses of the original page, since we + * can't distinguish those which want the + * original from those which need the + * new copy. * - * XXXO If we know that only one map has - * access to this page, then we could - * avoid the pmap_disconnect() call. + * XXXO If we know that only one map has + * access to this page, then we could + * avoid the pmap_disconnect() call. */ + if (m->pmapped) + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); - vm_page_lock_queues(); + if (m->clustered) { + VM_PAGE_COUNT_AS_PAGEIN(m); + VM_PAGE_CONSUME_CLUSTERED(m); + } assert(!m->cleaning); - pmap_disconnect(m->phys_page); - vm_page_deactivate(m); - copy_m->dirty = TRUE; + /* - * Setting reference here prevents this fault from - * being counted as a (per-thread) reactivate as well - * as a copy-on-write. + * We no longer need the old page or object. */ - first_m->reference = TRUE; - vm_page_unlock_queues(); + RELEASE_PAGE(m); /* - * We no longer need the old page or object. + * This check helps with marking the object as having a sequential pattern + * Normally we'll miss doing this below because this fault is about COW to + * the first_object i.e. bring page in from disk, push to object above but + * don't update the file object's sequential pattern. */ + if (object->internal == FALSE) { + vm_fault_is_sequential(object, offset, fault_info->behavior); + } - PAGE_WAKEUP_DONE(m); vm_object_paging_end(object); vm_object_unlock(object); - if (type_of_fault) - *type_of_fault = DBG_COW_FAULT; - VM_STAT(cow_faults++); + my_fault = DBG_COW_FAULT; + VM_STAT_INCR(cow_faults); + DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL); current_task()->cow_faults++; + object = first_object; offset = first_offset; vm_object_lock(object); + /* + * get rid of the place holder + * page that we soldered in earlier + */ VM_PAGE_FREE(first_m); first_m = VM_PAGE_NULL; + + /* + * and replace it with the + * page we just copied into + */ assert(copy_m->busy); vm_page_insert(copy_m, object, offset); - m = copy_m; + SET_PAGE_DIRTY(copy_m, TRUE); + m = copy_m; /* - * Now that we've gotten the copy out of the - * way, let's try to collapse the top object. - * But we have to play ugly games with - * paging_in_progress to do that... + * Now that we've gotten the copy out of the + * way, let's try to collapse the top object. + * But we have to play ugly games with + * paging_in_progress to do that... */ - vm_object_paging_end(object); - vm_object_collapse(object, offset); + vm_object_collapse(object, offset, TRUE); vm_object_paging_begin(object); - } - else { + } else *protection &= (~VM_PROT_WRITE); - } } - /* - * Now check whether the page needs to be pushed into the - * copy object. The use of asymmetric copy on write for - * shared temporary objects means that we may do two copies to - * satisfy the fault; one above to get the page from a - * shadowed object, and one here to push it into the copy. + * Now check whether the page needs to be pushed into the + * copy object. The use of asymmetric copy on write for + * shared temporary objects means that we may do two copies to + * satisfy the fault; one above to get the page from a + * shadowed object, and one here to push it into the copy. */ + try_failed_count = 0; - while ((copy_object = first_object->copy) != VM_OBJECT_NULL && - (m!= VM_PAGE_NULL)) { + while ((copy_object = first_object->copy) != VM_OBJECT_NULL) { vm_object_offset_t copy_offset; vm_page_t copy_m; @@ -1707,72 +2176,76 @@ no_clustering: dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ #endif /* - * If the page is being written, but hasn't been - * copied to the copy-object, we have to copy it there. + * If the page is being written, but hasn't been + * copied to the copy-object, we have to copy it there. */ - if ((fault_type & VM_PROT_WRITE) == 0) { *protection &= ~VM_PROT_WRITE; break; } /* - * If the page was guaranteed to be resident, - * we must have already performed the copy. + * If the page was guaranteed to be resident, + * we must have already performed the copy. */ - if (must_be_resident) break; /* - * Try to get the lock on the copy_object. + * Try to get the lock on the copy_object. */ if (!vm_object_lock_try(copy_object)) { - vm_object_unlock(object); - mutex_pause(); /* wait a bit */ + vm_object_unlock(object); + try_failed_count++; + mutex_pause(try_failed_count); /* wait a bit */ vm_object_lock(object); + continue; } + try_failed_count = 0; /* - * Make another reference to the copy-object, - * to keep it from disappearing during the - * copy. + * Make another reference to the copy-object, + * to keep it from disappearing during the + * copy. */ - assert(copy_object->ref_count > 0); - copy_object->ref_count++; - VM_OBJ_RES_INCR(copy_object); + vm_object_reference_locked(copy_object); /* - * Does the page exist in the copy? + * Does the page exist in the copy? */ - copy_offset = first_offset - copy_object->shadow_offset; - if (copy_object->size <= copy_offset) + copy_offset = first_offset - copy_object->vo_shadow_offset; + + if (copy_object->vo_size <= copy_offset) /* * Copy object doesn't cover this page -- do nothing. */ ; - else if ((copy_m = - vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { - /* Page currently exists in the copy object */ + else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { + /* + * Page currently exists in the copy object + */ if (copy_m->busy) { /* - * If the page is being brought - * in, wait for it and then retry. + * If the page is being brought + * in, wait for it and then retry. */ RELEASE_PAGE(m); - /* take an extra ref so object won't die */ - assert(copy_object->ref_count > 0); - copy_object->ref_count++; - vm_object_res_reference(copy_object); + + /* + * take an extra ref so object won't die + */ + vm_object_reference_locked(copy_object); vm_object_unlock(copy_object); vm_fault_cleanup(object, first_m); counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(copy_object); assert(copy_object->ref_count > 0); VM_OBJ_RES_DECR(copy_object); + vm_object_lock_assert_exclusive(copy_object); copy_object->ref_count--; assert(copy_object->ref_count > 0); copy_m = vm_page_lookup(copy_object, copy_offset); @@ -1784,15 +2257,18 @@ no_clustering: */ if (copy_m != VM_PAGE_NULL && copy_m->busy) { PAGE_ASSERT_WAIT(copy_m, interruptible); + vm_object_unlock(copy_object); wait_result = thread_block(THREAD_CONTINUE_NULL); vm_object_deallocate(copy_object); + goto backoff; } else { vm_object_unlock(copy_object); vm_object_deallocate(copy_object); thread_interrupt_level(interruptible_state); - return VM_FAULT_RETRY; + + return (VM_FAULT_RETRY); } } } @@ -1807,301 +2283,1082 @@ no_clustering: * We must copy the page to the copy object. */ - /* - * are we protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. - */ + if (vm_backing_store_low) { + /* + * we are protecting the system from + * backing store exhaustion. If so + * sleep unless we are privileged. + */ + if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { + assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); - if(vm_backing_store_low) { - if(!(current_task()->priv_flags - & VM_BACKING_STORE_PRIV)) { - assert_wait((event_t) - &vm_backing_store_low, - THREAD_UNINT); RELEASE_PAGE(m); VM_OBJ_RES_DECR(copy_object); + vm_object_lock_assert_exclusive(copy_object); copy_object->ref_count--; assert(copy_object->ref_count > 0); + vm_object_unlock(copy_object); vm_fault_cleanup(object, first_m); thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level( - interruptible_state); - return(VM_FAULT_RETRY); + thread_interrupt_level(interruptible_state); + + return (VM_FAULT_RETRY); } } - /* - * Allocate a page for the copy + * Allocate a page for the copy */ copy_m = vm_page_alloc(copy_object, copy_offset); + if (copy_m == VM_PAGE_NULL) { RELEASE_PAGE(m); + VM_OBJ_RES_DECR(copy_object); + vm_object_lock_assert_exclusive(copy_object); copy_object->ref_count--; assert(copy_object->ref_count > 0); + vm_object_unlock(copy_object); vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return(VM_FAULT_MEMORY_SHORTAGE); - } + return (VM_FAULT_MEMORY_SHORTAGE); + } /* - * Must copy page into copy-object. + * Must copy page into copy-object. */ - vm_page_copy(m, copy_m); /* - * If the old page was in use by any users - * of the copy-object, it must be removed - * from all pmaps. (We can't know which - * pmaps use it.) + * If the old page was in use by any users + * of the copy-object, it must be removed + * from all pmaps. (We can't know which + * pmaps use it.) */ + if (m->pmapped) + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); - vm_page_lock_queues(); - assert(!m->cleaning); - pmap_disconnect(m->phys_page); - copy_m->dirty = TRUE; - vm_page_unlock_queues(); - + if (m->clustered) { + VM_PAGE_COUNT_AS_PAGEIN(m); + VM_PAGE_CONSUME_CLUSTERED(m); + } /* - * If there's a pager, then immediately - * page out this page, using the "initialize" - * option. Else, we use the copy. + * If there's a pager, then immediately + * page out this page, using the "initialize" + * option. Else, we use the copy. */ + if ((!copy_object->pager_ready) + || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT + ) { - if -#if MACH_PAGEMAP - ((!copy_object->pager_created) || - vm_external_state_get( - copy_object->existence_map, copy_offset) - == VM_EXTERNAL_STATE_ABSENT) -#else - (!copy_object->pager_created) -#endif - { - vm_page_lock_queues(); + vm_page_lockspin_queues(); + assert(!m->cleaning); vm_page_activate(copy_m); vm_page_unlock_queues(); + + SET_PAGE_DIRTY(copy_m, TRUE); PAGE_WAKEUP_DONE(copy_m); - } - else { + + } else { + assert(copy_m->busy == TRUE); + assert(!m->cleaning); /* - * The page is already ready for pageout: - * not on pageout queues and busy. - * Unlock everything except the - * copy_object itself. + * dirty is protected by the object lock */ + SET_PAGE_DIRTY(copy_m, TRUE); + /* + * The page is already ready for pageout: + * not on pageout queues and busy. + * Unlock everything except the + * copy_object itself. + */ vm_object_unlock(object); /* - * Write the page to the copy-object, - * flushing it from the kernel. + * Write the page to the copy-object, + * flushing it from the kernel. */ - vm_pageout_initialize_page(copy_m); /* - * Since the pageout may have - * temporarily dropped the - * copy_object's lock, we - * check whether we'll have - * to deallocate the hard way. + * Since the pageout may have + * temporarily dropped the + * copy_object's lock, we + * check whether we'll have + * to deallocate the hard way. */ - - if ((copy_object->shadow != object) || - (copy_object->ref_count == 1)) { + if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) { vm_object_unlock(copy_object); vm_object_deallocate(copy_object); vm_object_lock(object); + continue; } - /* - * Pick back up the old object's - * lock. [It is safe to do so, - * since it must be deeper in the - * object tree.] + * Pick back up the old object's + * lock. [It is safe to do so, + * since it must be deeper in the + * object tree.] */ - vm_object_lock(object); } /* - * Because we're pushing a page upward - * in the object tree, we must restart - * any faults that are waiting here. - * [Note that this is an expansion of - * PAGE_WAKEUP that uses the THREAD_RESTART - * wait result]. Can't turn off the page's - * busy bit because we're not done with it. + * Because we're pushing a page upward + * in the object tree, we must restart + * any faults that are waiting here. + * [Note that this is an expansion of + * PAGE_WAKEUP that uses the THREAD_RESTART + * wait result]. Can't turn off the page's + * busy bit because we're not done with it. */ - if (m->wanted) { m->wanted = FALSE; - thread_wakeup_with_result((event_t) m, - THREAD_RESTART); + thread_wakeup_with_result((event_t) m, THREAD_RESTART); } } - /* - * The reference count on copy_object must be - * at least 2: one for our extra reference, - * and at least one from the outside world - * (we checked that when we last locked - * copy_object). + * The reference count on copy_object must be + * at least 2: one for our extra reference, + * and at least one from the outside world + * (we checked that when we last locked + * copy_object). */ + vm_object_lock_assert_exclusive(copy_object); copy_object->ref_count--; assert(copy_object->ref_count > 0); + VM_OBJ_RES_DECR(copy_object); vm_object_unlock(copy_object); break; } +done: *result_page = m; *top_page = first_m; XPR(XPR_VM_FAULT, "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n", - (integer_t)object, offset, (integer_t)m, (integer_t)first_m, 0); - /* - * If the page can be written, assume that it will be. - * [Earlier, we restrict the permission to allow write - * access only if the fault so required, so we don't - * mark read-only data as dirty.] - */ + object, offset, m, first_m, 0); + if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == object); - if(m != VM_PAGE_NULL) { -#if !VM_FAULT_STATIC_CONFIG - if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE)) - m->dirty = TRUE; -#endif - if (vm_page_deactivate_behind) - vm_fault_deactivate_behind(object, offset, behavior); - } else { - vm_object_unlock(object); - } - thread_interrupt_level(interruptible_state); + retval = VM_FAULT_SUCCESS; -#if TRACEFAULTPAGE - dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ -#endif - return(VM_FAULT_SUCCESS); + if (my_fault == DBG_PAGEIN_FAULT) { -#if 0 - block_and_backoff: - vm_fault_cleanup(object, first_m); + VM_PAGE_COUNT_AS_PAGEIN(m); - counter(c_vm_fault_page_block_backoff_kernel++); - thread_block(THREAD_CONTINUE_NULL); + if (object->internal) + my_fault = DBG_PAGEIND_FAULT; + else + my_fault = DBG_PAGEINV_FAULT; + + /* + * evaluate access pattern and update state + * vm_fault_deactivate_behind depends on the + * state being up to date + */ + vm_fault_is_sequential(object, offset, fault_info->behavior); + + vm_fault_deactivate_behind(object, offset, fault_info->behavior); + } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) { + + VM_STAT_INCR(decompressions); + } + if (type_of_fault) + *type_of_fault = my_fault; + } else { + retval = VM_FAULT_SUCCESS_NO_VM_PAGE; + assert(first_m == VM_PAGE_NULL); + assert(object == first_object); + } + + thread_interrupt_level(interruptible_state); + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ #endif + return retval; - backoff: +backoff: thread_interrupt_level(interruptible_state); + if (wait_result == THREAD_INTERRUPTED) - return VM_FAULT_INTERRUPTED; - return VM_FAULT_RETRY; + return (VM_FAULT_INTERRUPTED); + return (VM_FAULT_RETRY); #undef RELEASE_PAGE } + + /* - * Routine: vm_fault_tws_insert - * Purpose: - * Add fault information to the task working set. - * Implementation: - * We always insert the base object/offset pair - * rather the actual object/offset. - * Assumptions: - * Map and real_map locked. - * Object locked and referenced. - * Returns: - * TRUE if startup file should be written. - * With object locked and still referenced. - * But we may drop the object lock temporarily. + * CODE SIGNING: + * When soft faulting a page, we have to validate the page if: + * 1. the page is being mapped in user space + * 2. the page hasn't already been found to be "tainted" + * 3. the page belongs to a code-signed object + * 4. the page has not been validated yet or has been mapped for write. */ -static boolean_t -vm_fault_tws_insert( - vm_map_t map, - vm_map_t real_map, - vm_map_offset_t vaddr, - vm_object_t object, - vm_object_offset_t offset) +#define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj) \ + ((pmap) != kernel_pmap /*1*/ && \ + !(page)->cs_tainted /*2*/ && \ + (page_obj)->code_signed /*3*/ && \ + (!(page)->cs_validated || (page)->wpmapped /*4*/)) + + +/* + * page queue lock must NOT be held + * m->object must be locked + * + * NOTE: m->object could be locked "shared" only if we are called + * from vm_fault() as part of a soft fault. If so, we must be + * careful not to modify the VM object in any way that is not + * legal under a shared lock... + */ +extern int panic_on_cs_killed; +extern int proc_selfpid(void); +extern char *proc_name_address(void *p); +unsigned long cs_enter_tainted_rejected = 0; +unsigned long cs_enter_tainted_accepted = 0; +kern_return_t +vm_fault_enter(vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t prot, + vm_prot_t caller_prot, + boolean_t wired, + boolean_t change_wiring, + boolean_t no_cache, + boolean_t cs_bypass, + __unused int user_tag, + int pmap_options, + boolean_t *need_retry, + int *type_of_fault) { - tws_hash_line_t line; - task_t task; - kern_return_t kr; - boolean_t result = FALSE; - - /* Avoid possible map lock deadlock issues */ - if (map == kernel_map || map == kalloc_map || - real_map == kernel_map || real_map == kalloc_map) - return result; - - task = current_task(); - if (task->dynamic_working_set != 0) { - vm_object_t base_object; - vm_object_t base_shadow; - vm_object_offset_t base_offset; - base_object = object; - base_offset = offset; - while ((base_shadow = base_object->shadow)) { - vm_object_lock(base_shadow); - vm_object_unlock(base_object); - base_offset += - base_object->shadow_offset; - base_object = base_shadow; + kern_return_t kr, pe_result; + boolean_t previously_pmapped = m->pmapped; + boolean_t must_disconnect = 0; + boolean_t map_is_switched, map_is_switch_protected; + int cs_enforcement_enabled; + vm_prot_t fault_type; + vm_object_t object; + + fault_type = change_wiring ? VM_PROT_NONE : caller_prot; + object = VM_PAGE_OBJECT(m); + + vm_object_lock_assert_held(object); + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); + + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { + assert(m->fictitious); + return KERN_SUCCESS; + } + + if (*type_of_fault == DBG_ZERO_FILL_FAULT) { + + vm_object_lock_assert_exclusive(object); + + } else if ((fault_type & VM_PROT_WRITE) == 0 && !m->wpmapped) { + /* + * This is not a "write" fault, so we + * might not have taken the object lock + * exclusively and we might not be able + * to update the "wpmapped" bit in + * vm_fault_enter(). + * Let's just grant read access to + * the page for now and we'll + * soft-fault again if we need write + * access later... + */ + prot &= ~VM_PROT_WRITE; + } + if (m->pmapped == FALSE) { + + if (m->clustered) { + if (*type_of_fault == DBG_CACHE_HIT_FAULT) { + /* + * found it in the cache, but this + * is the first fault-in of the page (m->pmapped == FALSE) + * so it must have come in as part of + * a cluster... account 1 pagein against it + */ + if (object->internal) + *type_of_fault = DBG_PAGEIND_FAULT; + else + *type_of_fault = DBG_PAGEINV_FAULT; + + VM_PAGE_COUNT_AS_PAGEIN(m); + } + VM_PAGE_CONSUME_CLUSTERED(m); } - kr = tws_lookup( - task->dynamic_working_set, - base_offset, base_object, - &line); - if (kr == KERN_OPERATION_TIMED_OUT){ - result = TRUE; - if (base_object != object) { - vm_object_unlock(base_object); - vm_object_lock(object); + } + + if (*type_of_fault != DBG_COW_FAULT) { + DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL); + + if (pmap == kernel_pmap) { + DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL); + } + } + + /* Validate code signature if necessary. */ + if (VM_FAULT_NEED_CS_VALIDATION(pmap, m, object)) { + vm_object_lock_assert_exclusive(object); + + if (m->cs_validated) { + vm_cs_revalidates++; + } + + /* VM map is locked, so 1 ref will remain on VM object - + * so no harm if vm_page_validate_cs drops the object lock */ + vm_page_validate_cs(m); + } + +#define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/) +#define page_nx(m) ((m)->cs_nx) + + map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && + (pmap == vm_map_pmap(current_thread()->map))); + map_is_switch_protected = current_thread()->map->switch_protect; + + /* If the map is switched, and is switch-protected, we must protect + * some pages from being write-faulted: immutable pages because by + * definition they may not be written, and executable pages because that + * would provide a way to inject unsigned code. + * If the page is immutable, we can simply return. However, we can't + * immediately determine whether a page is executable anywhere. But, + * we can disconnect it everywhere and remove the executable protection + * from the current map. We do that below right before we do the + * PMAP_ENTER. + */ + cs_enforcement_enabled = cs_enforcement(NULL); + + if(cs_enforcement_enabled && map_is_switched && + map_is_switch_protected && page_immutable(m, prot) && + (prot & VM_PROT_WRITE)) + { + return KERN_CODESIGN_ERROR; + } + + if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) { + if (cs_debug) + printf("page marked to be NX, not letting it be mapped EXEC\n"); + return KERN_CODESIGN_ERROR; + } + + if (cs_enforcement_enabled && + !m->cs_validated && + (prot & VM_PROT_EXECUTE) && + !(caller_prot & VM_PROT_EXECUTE)) { + /* + * FOURK PAGER: + * This page has not been validated and will not be + * allowed to be mapped for "execute". + * But the caller did not request "execute" access for this + * fault, so we should not raise a code-signing violation + * (and possibly kill the process) below. + * Instead, let's just remove the "execute" access request. + * + * This can happen on devices with a 4K page size if a 16K + * page contains a mix of signed&executable and + * unsigned&non-executable 4K pages, making the whole 16K + * mapping "executable". + */ + prot &= ~VM_PROT_EXECUTE; + } + + /* A page could be tainted, or pose a risk of being tainted later. + * Check whether the receiving process wants it, and make it feel + * the consequences (that hapens in cs_invalid_page()). + * For CS Enforcement, two other conditions will + * cause that page to be tainted as well: + * - pmapping an unsigned page executable - this means unsigned code; + * - writeable mapping of a validated page - the content of that page + * can be changed without the kernel noticing, therefore unsigned + * code can be created + */ + if (!cs_bypass && + (m->cs_tainted || + (cs_enforcement_enabled && + (/* The page is unsigned and wants to be executable */ + (!m->cs_validated && (prot & VM_PROT_EXECUTE)) || + /* The page should be immutable, but is in danger of being modified + * This is the case where we want policy from the code directory - + * is the page immutable or not? For now we have to assume that + * code pages will be immutable, data pages not. + * We'll assume a page is a code page if it has a code directory + * and we fault for execution. + * That is good enough since if we faulted the code page for + * writing in another map before, it is wpmapped; if we fault + * it for writing in this map later it will also be faulted for executing + * at the same time; and if we fault for writing in another map + * later, we will disconnect it from this pmap so we'll notice + * the change. + */ + (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped)) + )) + )) + { + /* We will have a tainted page. Have to handle the special case + * of a switched map now. If the map is not switched, standard + * procedure applies - call cs_invalid_page(). + * If the map is switched, the real owner is invalid already. + * There is no point in invalidating the switching process since + * it will not be executing from the map. So we don't call + * cs_invalid_page() in that case. */ + boolean_t reject_page, cs_killed; + if(map_is_switched) { + assert(pmap==vm_map_pmap(current_thread()->map)); + assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); + reject_page = FALSE; + } else { + if (cs_debug > 5) + printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n", + object->code_signed ? "yes" : "no", + m->cs_validated ? "yes" : "no", + m->cs_tainted ? "yes" : "no", + m->wpmapped ? "yes" : "no", + m->slid ? "yes" : "no", + (int)prot); + reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed); + } + + if (reject_page) { + /* reject the invalid page: abort the page fault */ + int pid; + const char *procname; + task_t task; + vm_object_t file_object, shadow; + vm_object_offset_t file_offset; + char *pathname, *filename; + vm_size_t pathname_len, filename_len; + boolean_t truncated_path; +#define __PATH_MAX 1024 + struct timespec mtime, cs_mtime; + int shadow_depth; + os_reason_t codesigning_exit_reason = OS_REASON_NULL; + + kr = KERN_CODESIGN_ERROR; + cs_enter_tainted_rejected++; + + /* get process name and pid */ + procname = "?"; + task = current_task(); + pid = proc_selfpid(); + if (task->bsd_info != NULL) + procname = proc_name_address(task->bsd_info); + + /* get file's VM object */ + file_object = object; + file_offset = m->offset; + for (shadow = file_object->shadow, + shadow_depth = 0; + shadow != VM_OBJECT_NULL; + shadow = file_object->shadow, + shadow_depth++) { + vm_object_lock_shared(shadow); + if (file_object != object) { + vm_object_unlock(file_object); + } + file_offset += file_object->vo_shadow_offset; + file_object = shadow; } - } else if (kr != KERN_SUCCESS) { - if(base_object != object) - vm_object_reference_locked(base_object); - kr = tws_insert( - task->dynamic_working_set, - base_offset, base_object, - vaddr, real_map); - if(base_object != object) { - vm_object_unlock(base_object); - vm_object_deallocate(base_object); + + mtime.tv_sec = 0; + mtime.tv_nsec = 0; + cs_mtime.tv_sec = 0; + cs_mtime.tv_nsec = 0; + + /* get file's pathname and/or filename */ + pathname = NULL; + filename = NULL; + pathname_len = 0; + filename_len = 0; + truncated_path = FALSE; + /* no pager -> no file -> no pathname, use "" in that case */ + if (file_object->pager != NULL) { + pathname = (char *)kalloc(__PATH_MAX * 2); + if (pathname) { + pathname[0] = '\0'; + pathname_len = __PATH_MAX; + filename = pathname + pathname_len; + filename_len = __PATH_MAX; + } + vnode_pager_get_object_name(file_object->pager, + pathname, + pathname_len, + filename, + filename_len, + &truncated_path); + if (pathname) { + /* safety first... */ + pathname[__PATH_MAX-1] = '\0'; + filename[__PATH_MAX-1] = '\0'; + } + vnode_pager_get_object_mtime(file_object->pager, + &mtime, + &cs_mtime); } - if(kr == KERN_NO_SPACE) { - if (base_object == object) - vm_object_unlock(object); - tws_expand_working_set( - task->dynamic_working_set, - TWS_HASH_LINE_COUNT, - FALSE); - if (base_object == object) - vm_object_lock(object); - } else if(kr == KERN_OPERATION_TIMED_OUT) { - result = TRUE; + printf("CODE SIGNING: process %d[%s]: " + "rejecting invalid page at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d " + "wpmapped:%d slid:%d dirty:%d depth:%d)\n", + pid, procname, (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + m->cs_validated, + m->cs_tainted, + m->cs_nx, + m->wpmapped, + m->slid, + m->dirty, + shadow_depth); + + /* + * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page + * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the + * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler + * will deal with the segmentation fault. + */ + if (cs_killed) { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, + pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0); + + codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE); + if (codesigning_exit_reason == NULL) { + printf("vm_fault_enter: failed to allocate codesigning exit reason\n"); + } else { + mach_vm_address_t data_addr = 0; + struct codesigning_exit_reason_info *ceri = NULL; + uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri)); + + if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) { + printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n"); + } else { + if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor, + EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) { + ceri = (struct codesigning_exit_reason_info *)data_addr; + static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname)); + + ceri->ceri_virt_addr = vaddr; + ceri->ceri_file_offset = file_offset; + if (pathname) + strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname)); + else + ceri->ceri_pathname[0] = '\0'; + if (filename) + strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename)); + else + ceri->ceri_filename[0] = '\0'; + ceri->ceri_path_truncated = (truncated_path); + ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec; + ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec; + ceri->ceri_page_modtime_secs = mtime.tv_sec; + ceri->ceri_page_modtime_nsecs = mtime.tv_nsec; + ceri->ceri_object_codesigned = (object->code_signed); + ceri->ceri_page_codesig_validated = (m->cs_validated); + ceri->ceri_page_codesig_tainted = (m->cs_tainted); + ceri->ceri_page_codesig_nx = (m->cs_nx); + ceri->ceri_page_wpmapped = (m->wpmapped); + ceri->ceri_page_slid = (m->slid); + ceri->ceri_page_dirty = (m->dirty); + ceri->ceri_page_shadow_depth = shadow_depth; + } else { +#if DEBUG || DEVELOPMENT + panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason"); +#else + printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n"); +#endif /* DEBUG || DEVELOPMENT */ + /* Free the buffer */ + os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0); + } + } + } + + set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE); } - if(base_object != object) - vm_object_lock(object); - } else if (base_object != object) { - vm_object_unlock(base_object); + if (panic_on_cs_killed && + object->object_slid) { + panic("CODE SIGNING: process %d[%s]: " + "rejecting invalid page at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d" + "wpmapped:%d slid:%d dirty:%d depth:%d)\n", + pid, procname, (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + m->cs_validated, + m->cs_tainted, + m->cs_nx, + m->wpmapped, + m->slid, + m->dirty, + shadow_depth); + } + + if (file_object != object) { + vm_object_unlock(file_object); + } + if (pathname_len != 0) { + kfree(pathname, __PATH_MAX * 2); + pathname = NULL; + filename = NULL; + } + } else { + /* proceed with the invalid page */ + kr = KERN_SUCCESS; + if (!m->cs_validated && + !object->code_signed) { + /* + * This page has not been (fully) validated but + * does not belong to a code-signed object + * so it should not be forcefully considered + * as tainted. + * We're just concerned about it here because + * we've been asked to "execute" it but that + * does not mean that it should cause other + * accesses to fail. + * This happens when a debugger sets a + * breakpoint and we then execute code in + * that page. Marking the page as "tainted" + * would cause any inspection tool ("leaks", + * "vmmap", "CrashReporter", ...) to get killed + * due to code-signing violation on that page, + * even though they're just reading it and not + * executing from it. + */ + } else { + /* + * Page might have been tainted before or not; + * now it definitively is. If the page wasn't + * tainted, we must disconnect it from all + * pmaps later, to force existing mappings + * through that code path for re-consideration + * of the validity of that page. + */ + must_disconnect = !m->cs_tainted; + m->cs_tainted = TRUE; + } + cs_enter_tainted_accepted++; + } + if (kr != KERN_SUCCESS) { + if (cs_debug) { + printf("CODESIGNING: vm_fault_enter(0x%llx): " + "*** INVALID PAGE ***\n", + (long long)vaddr); + } +#if !SECURE_KERNEL + if (cs_enforcement_panic) { + panic("CODESIGNING: panicking on invalid page\n"); + } +#endif + } + + } else { + /* proceed with the valid page */ + kr = KERN_SUCCESS; + } + + boolean_t page_queues_locked = FALSE; +#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ +MACRO_BEGIN \ + if (! page_queues_locked) { \ + page_queues_locked = TRUE; \ + vm_page_lockspin_queues(); \ + } \ +MACRO_END +#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \ +MACRO_BEGIN \ + if (page_queues_locked) { \ + page_queues_locked = FALSE; \ + vm_page_unlock_queues(); \ + } \ +MACRO_END + + /* + * Hold queues lock to manipulate + * the page queues. Change wiring + * case is obvious. + */ + assert((m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object); + +#if CONFIG_BACKGROUND_QUEUE + vm_page_update_background_state(m); +#endif + if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) { + /* + * Compressor pages are neither wired + * nor pageable and should never change. + */ + assert(object == compressor_object); + } else if (change_wiring) { + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + + if (wired) { + if (kr == KERN_SUCCESS) { + vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE); + } + } else { + vm_page_unwire(m, TRUE); + } + /* we keep the page queues lock, if we need it later */ + + } else { + if (object->internal == TRUE) { + /* + * don't allow anonymous pages on + * the speculative queues + */ + no_cache = FALSE; + } + if (kr != KERN_SUCCESS) { + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + vm_page_deactivate(m); + /* we keep the page queues lock, if we need it later */ + } else if (((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) || + (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) || + (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) || + ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) && + !VM_PAGE_WIRED(m)) { + + if (vm_page_local_q && + (*type_of_fault == DBG_COW_FAULT || + *type_of_fault == DBG_ZERO_FILL_FAULT) ) { + struct vpl *lq; + uint32_t lid; + + assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q); + + __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); + vm_object_lock_assert_exclusive(object); + + /* + * we got a local queue to stuff this + * new page on... + * its safe to manipulate local and + * local_id at this point since we're + * behind an exclusive object lock and + * the page is not on any global queue. + * + * we'll use the current cpu number to + * select the queue note that we don't + * need to disable preemption... we're + * going to be behind the local queue's + * lock to do the real work + */ + lid = cpu_number(); + + lq = &vm_page_local_q[lid].vpl_un.vpl; + + VPL_LOCK(&lq->vpl_lock); + + vm_page_check_pageable_safe(m); + vm_page_queue_enter(&lq->vpl_queue, m, + vm_page_t, pageq); + m->vm_page_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q; + m->local_id = lid; + lq->vpl_count++; + + if (object->internal) + lq->vpl_internal_count++; + else + lq->vpl_external_count++; + + VPL_UNLOCK(&lq->vpl_lock); + + if (lq->vpl_count > vm_page_local_q_soft_limit) + { + /* + * we're beyond the soft limit + * for the local queue + * vm_page_reactivate_local will + * 'try' to take the global page + * queue lock... if it can't + * that's ok... we'll let the + * queue continue to grow up + * to the hard limit... at that + * point we'll wait for the + * lock... once we've got the + * lock, we'll transfer all of + * the pages from the local + * queue to the global active + * queue + */ + vm_page_reactivate_local(lid, FALSE, FALSE); + } + } else { + + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + + /* + * test again now that we hold the + * page queue lock + */ + if (!VM_PAGE_WIRED(m)) { + if (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + vm_page_queues_remove(m, FALSE); + + vm_pageout_cleaned_reactivated++; + vm_pageout_cleaned_fault_reactivated++; + } + + if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m) || + no_cache) { + /* + * If this is a no_cache mapping + * and the page has never been + * mapped before or was + * previously a no_cache page, + * then we want to leave pages + * in the speculative state so + * that they can be readily + * recycled if free memory runs + * low. Otherwise the page is + * activated as normal. + */ + + if (no_cache && + (!previously_pmapped || + m->no_cache)) { + m->no_cache = TRUE; + + if (m->vm_page_q_state != VM_PAGE_ON_SPECULATIVE_Q) + vm_page_speculate(m, FALSE); + + } else if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m)) { + vm_page_activate(m); + } + } + } + /* we keep the page queues lock, if we need it later */ + } + } + } + /* we're done with the page queues lock, if we ever took it */ + __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); + + + /* If we have a KERN_SUCCESS from the previous checks, we either have + * a good page, or a tainted page that has been accepted by the process. + * In both cases the page will be entered into the pmap. + * If the page is writeable, we need to disconnect it from other pmaps + * now so those processes can take note. + */ + if (kr == KERN_SUCCESS) { + /* + * NOTE: we may only hold the vm_object lock SHARED + * at this point, so we need the phys_page lock to + * properly serialize updating the pmapped and + * xpmapped bits + */ + if ((prot & VM_PROT_EXECUTE) && !m->xpmapped) { + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + + pmap_lock_phys_page(phys_page); + /* + * go ahead and take the opportunity + * to set 'pmapped' here so that we don't + * need to grab this lock a 2nd time + * just below + */ + m->pmapped = TRUE; + + if (!m->xpmapped) { + + m->xpmapped = TRUE; + + pmap_unlock_phys_page(phys_page); + + if (!object->internal) + OSAddAtomic(1, &vm_page_xpmapped_external_count); + + if (object->internal && + object->pager != NULL) { + /* + * This page could have been + * uncompressed by the + * compressor pager and its + * contents might be only in + * the data cache. + * Since it's being mapped for + * "execute" for the fist time, + * make sure the icache is in + * sync. + */ + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + pmap_sync_page_data_phys(phys_page); + } + } else + pmap_unlock_phys_page(phys_page); + } else { + if (m->pmapped == FALSE) { + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + + pmap_lock_phys_page(phys_page); + m->pmapped = TRUE; + pmap_unlock_phys_page(phys_page); + } + } + if (vm_page_is_slideable(m)) { + boolean_t was_busy = m->busy; + + vm_object_lock_assert_exclusive(object); + + m->busy = TRUE; + kr = vm_page_slide(m, 0); + assert(m->busy); + if(!was_busy) { + PAGE_WAKEUP_DONE(m); + } + if (kr != KERN_SUCCESS) { + /* + * This page has not been slid correctly, + * do not do the pmap_enter() ! + * Let vm_fault_enter() return the error + * so the caller can fail the fault. + */ + goto after_the_pmap_enter; + } + } + + if (fault_type & VM_PROT_WRITE) { + + if (m->wpmapped == FALSE) { + vm_object_lock_assert_exclusive(object); + if (!object->internal && object->pager) { + task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager)); + } + m->wpmapped = TRUE; + } + if (must_disconnect) { + /* + * We can only get here + * because of the CSE logic + */ + assert(cs_enforcement_enabled); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + /* + * If we are faulting for a write, we can clear + * the execute bit - that will ensure the page is + * checked again before being executable, which + * protects against a map switch. + * This only happens the first time the page + * gets tainted, so we won't get stuck here + * to make an already writeable page executable. + */ + if (!cs_bypass){ + prot &= ~VM_PROT_EXECUTE; + } + } + } + assert(VM_PAGE_OBJECT(m) == object); + + /* Prevent a deadlock by not + * holding the object lock if we need to wait for a page in + * pmap_enter() - */ + PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0, + wired, + pmap_options | PMAP_OPTIONS_NOWAIT, + pe_result); + + if(pe_result == KERN_RESOURCE_SHORTAGE) { + + if (need_retry) { + /* + * this will be non-null in the case where we hold the lock + * on the top-object in this chain... we can't just drop + * the lock on the object we're inserting the page into + * and recall the PMAP_ENTER since we can still cause + * a deadlock if one of the critical paths tries to + * acquire the lock on the top-object and we're blocked + * in PMAP_ENTER waiting for memory... our only recourse + * is to deal with it at a higher level where we can + * drop both locks. + */ + *need_retry = TRUE; + vm_pmap_enter_retried++; + goto after_the_pmap_enter; + } + /* The nonblocking version of pmap_enter did not succeed. + * and we don't need to drop other locks and retry + * at the level above us, so + * use the blocking version instead. Requires marking + * the page busy and unlocking the object */ + boolean_t was_busy = m->busy; + + vm_object_lock_assert_exclusive(object); + + m->busy = TRUE; + vm_object_unlock(object); + + PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, + 0, wired, + pmap_options, pe_result); + + assert(VM_PAGE_OBJECT(m) == object); + + /* Take the object lock again. */ vm_object_lock(object); + + /* If the page was busy, someone else will wake it up. + * Otherwise, we have to do it now. */ + assert(m->busy); + if(!was_busy) { + PAGE_WAKEUP_DONE(m); + } + vm_pmap_enter_blocked++; } } - return result; + +after_the_pmap_enter: + return kr; +} + +void +vm_pre_fault(vm_map_offset_t vaddr) +{ + if (pmap_find_phys(current_map()->pmap, vaddr) == 0) { + + vm_fault(current_map(), /* map */ + vaddr, /* vaddr */ + VM_PROT_READ, /* fault_type */ + FALSE, /* change_wiring */ + THREAD_UNINT, /* interruptible */ + NULL, /* caller_pmap */ + 0 /* caller_pmap_addr */); + } } + /* * Routine: vm_fault * Purpose: @@ -2117,6 +3374,11 @@ vm_fault_tws_insert( */ extern int _map_enter_debug; +extern uint64_t get_current_unique_pid(void); + +unsigned long vm_fault_collapse_total = 0; +unsigned long vm_fault_collapse_skipped = 0; + kern_return_t vm_fault( @@ -2127,59 +3389,111 @@ vm_fault( int interruptible, pmap_t caller_pmap, vm_map_offset_t caller_pmap_addr) +{ + return vm_fault_internal(map, vaddr, fault_type, change_wiring, + interruptible, caller_pmap, caller_pmap_addr, + NULL); +} + + +kern_return_t +vm_fault_internal( + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t caller_prot, + boolean_t change_wiring, + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr, + ppnum_t *physpage_p) { vm_map_version_t version; /* Map version for verificiation */ boolean_t wired; /* Should mapping be wired down? */ vm_object_t object; /* Top-level object */ vm_object_offset_t offset; /* Top-level offset */ vm_prot_t prot; /* Protection for mapping */ - vm_behavior_t behavior; /* Expected paging behavior */ - vm_map_offset_t lo_offset, hi_offset; vm_object_t old_copy_object; /* Saved copy object */ vm_page_t result_page; /* Result of vm_fault_page */ vm_page_t top_page; /* Placeholder page */ kern_return_t kr; - register vm_page_t m; /* Fast access to result_page */ - kern_return_t error_code = 0; /* page error reasons */ - register + kern_return_t error_code; vm_object_t cur_object; - register + vm_object_t m_object = NULL; vm_object_offset_t cur_offset; vm_page_t cur_m; vm_object_t new_object; int type_of_fault; + pmap_t pmap; + boolean_t interruptible_state; vm_map_t real_map = map; vm_map_t original_map = map; - pmap_t pmap = NULL; - boolean_t interruptible_state; - unsigned int cache_attr; - int write_startup_file = 0; - boolean_t need_activation; - vm_prot_t full_fault_type; - - if (get_preemption_level() != 0) - return (KERN_FAILURE); + vm_prot_t fault_type; + vm_prot_t original_fault_type; + struct vm_object_fault_info fault_info; + boolean_t need_collapse = FALSE; + boolean_t need_retry = FALSE; + boolean_t *need_retry_ptr = NULL; + int object_lock_type = 0; + int cur_object_lock_type; + vm_object_t top_object = VM_OBJECT_NULL; + int throttle_delay; + int compressed_count_delta; + int grab_options; + vm_map_offset_t trace_vaddr; + vm_map_offset_t trace_real_vaddr; +#if DEVELOPMENT || DEBUG + vm_map_offset_t real_vaddr; + + real_vaddr = vaddr; +#endif /* DEVELOPMENT || DEBUG */ + trace_real_vaddr = vaddr; + vaddr = vm_map_trunc_page(vaddr, PAGE_MASK); + + if (map == kernel_map) { + trace_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(vaddr); + trace_real_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(trace_real_vaddr); + } else { + trace_vaddr = vaddr; + } - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START, - vaddr, - 0, - 0, + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + (map == kernel_map), 0, 0); - /* at present we do not fully check for execute permission */ - /* we generally treat it is read except in certain device */ - /* memory settings */ - full_fault_type = fault_type; - if(fault_type & VM_PROT_EXECUTE) { - fault_type &= ~VM_PROT_EXECUTE; - fault_type |= VM_PROT_READ; - } + if (get_preemption_level() != 0) { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + KERN_FAILURE, + 0, + 0); + return (KERN_FAILURE); + } + interruptible_state = thread_interrupt_level(interruptible); + fault_type = (change_wiring ? VM_PROT_NONE : caller_prot); + + VM_STAT_INCR(faults); + current_task()->faults++; + original_fault_type = fault_type; + + if (fault_type & VM_PROT_WRITE) + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + else + object_lock_type = OBJECT_LOCK_SHARED; + + cur_object_lock_type = OBJECT_LOCK_SHARED; + +RetryFault: /* * assume we will hit a page in the cache * otherwise, explicitly override with @@ -2187,38 +3501,55 @@ vm_fault( */ type_of_fault = DBG_CACHE_HIT_FAULT; - VM_STAT(faults++); - current_task()->faults++; - - RetryFault: ; - /* * Find the backing store object and offset into * it to begin the search. */ + fault_type = original_fault_type; map = original_map; vm_map_lock_read(map); - kr = vm_map_lookup_locked(&map, vaddr, fault_type, &version, - &object, &offset, - &prot, &wired, - &behavior, &lo_offset, &hi_offset, &real_map); -//if (_map_enter_debug)printf("vm_map_lookup_locked(map=0x%x, addr=0x%llx, prot=%d wired=%d) = %d\n", map, vaddr, prot, wired, kr); + kr = vm_map_lookup_locked(&map, vaddr, fault_type, + object_lock_type, &version, + &object, &offset, &prot, &wired, + &fault_info, + &real_map); - pmap = real_map->pmap; if (kr != KERN_SUCCESS) { vm_map_unlock_read(map); goto done; } + pmap = real_map->pmap; + fault_info.interruptible = interruptible; + fault_info.stealth = FALSE; + fault_info.io_sync = FALSE; + fault_info.mark_zf_absent = FALSE; + fault_info.batch_pmap_op = FALSE; /* - * If the page is wired, we must fault for the current protection - * value, to avoid further faults. + * If the page is wired, we must fault for the current protection + * value, to avoid further faults. */ - - if (wired) + if (wired) { fault_type = prot | VM_PROT_WRITE; + /* + * since we're treating this fault as a 'write' + * we must hold the top object lock exclusively + */ + if (object_lock_type == OBJECT_LOCK_SHARED) { + + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly + * take the lock exclusively + */ + vm_object_lock(object); + } + } + } #if VM_FAULT_CLASSIFY /* @@ -2259,34 +3590,123 @@ vm_fault( */ - /* - * If this page is to be inserted in a copy delay object - * for writing, and if the object has a copy, then the - * copy delay strategy is implemented in the slow fault page. - */ - if (object->copy_strategy != MEMORY_OBJECT_COPY_DELAY || - object->copy == VM_OBJECT_NULL || - (fault_type & VM_PROT_WRITE) == 0) { - cur_object = object; - cur_offset = offset; + /* + * If this page is to be inserted in a copy delay object + * for writing, and if the object has a copy, then the + * copy delay strategy is implemented in the slow fault page. + */ + if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY && + object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) + goto handle_copy_delay; + + cur_object = object; + cur_offset = offset; + + grab_options = 0; +#if CONFIG_SECLUDED_MEMORY + if (object->can_grab_secluded) { + grab_options |= VM_PAGE_GRAB_SECLUDED; + } +#endif /* CONFIG_SECLUDED_MEMORY */ + + while (TRUE) { + if (!cur_object->pager_created && + cur_object->phys_contiguous) /* superpage */ + break; + + if (cur_object->blocked_access) { + /* + * Access to this VM object has been blocked. + * Let the slow path handle it. + */ + break; + } + + m = vm_page_lookup(cur_object, cur_offset); + m_object = NULL; + + if (m != VM_PAGE_NULL) { + m_object = cur_object; + + if (m->busy) { + wait_result_t result; + + /* + * in order to do the PAGE_ASSERT_WAIT, we must + * have object that 'm' belongs to locked exclusively + */ + if (object != cur_object) { + + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(cur_object) == FALSE) { + /* + * couldn't upgrade so go do a full retry + * immediately since we can no longer be + * certain about cur_object (since we + * don't hold a reference on it)... + * first drop the top object lock + */ + vm_object_unlock(object); + + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + goto RetryFault; + } + } + } else if (object_lock_type == OBJECT_LOCK_SHARED) { + + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly take the lock + * exclusively and go relookup the page since we + * will have dropped the object lock and + * a different thread could have inserted + * a page at this offset + * no need for a full retry since we're + * at the top level of the object chain + */ + vm_object_lock(object); + + continue; + } + } + if ((m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) { + /* + * m->busy == TRUE and the object is locked exclusively + * if m->pageout_queue == TRUE after we acquire the + * queues lock, we are guaranteed that it is stable on + * the pageout queue and therefore reclaimable + * + * NOTE: this is only true for the internal pageout queue + * in the compressor world + */ + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + + vm_page_lock_queues(); - while (TRUE) { - m = vm_page_lookup(cur_object, cur_offset); - if (m != VM_PAGE_NULL) { - if (m->busy) { - wait_result_t result; + if (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q) { + vm_pageout_throttle_up(m); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(m); + goto reclaimed_from_pageout; + } + vm_page_unlock_queues(); + } if (object != cur_object) - vm_object_unlock(object); + vm_object_unlock(object); vm_map_unlock_read(map); if (real_map != map) vm_map_unlock(real_map); -#if !VM_FAULT_STATIC_CONFIG - if (!vm_fault_interruptible) - interruptible = THREAD_UNINT; -#endif result = PAGE_ASSERT_WAIT(m, interruptible); vm_object_unlock(cur_object); @@ -2302,29 +3722,123 @@ vm_fault( kr = KERN_ABORTED; goto done; } - if (m->unusual && (m->error || m->restart || m->private - || m->absent || (fault_type & m->page_lock))) { +reclaimed_from_pageout: + if (m->laundry) { + if (object != cur_object) { + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + vm_object_unlock(object); + vm_object_unlock(cur_object); + + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + goto RetryFault; + } + + } else if (object_lock_type == OBJECT_LOCK_SHARED) { + + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly take the lock + * exclusively and go relookup the page since we + * will have dropped the object lock and + * a different thread could have inserted + * a page at this offset + * no need for a full retry since we're + * at the top level of the object chain + */ + vm_object_lock(object); + + continue; + } + } + vm_pageout_steal_laundry(m, FALSE); + } + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { + /* + * Guard page: let the slow path deal with it + */ + break; + } + if (m->unusual && (m->error || m->restart || m->private || m->absent)) { /* - * Unusual case. Give up. + * Unusual case... let the slow path deal with it */ break; } + if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) { + if (object != cur_object) + vm_object_unlock(object); + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + vm_object_unlock(cur_object); + kr = KERN_MEMORY_ERROR; + goto done; + } if (m->encrypted) { /* * ENCRYPTED SWAP: * We've soft-faulted (because it's not in the page * table) on an encrypted page. - * Keep the page "busy" so that noone messes with + * Keep the page "busy" so that no one messes with * it during the decryption. * Release the extra locks we're holding, keep only * the page's VM object lock. + * + * in order to set 'busy' on 'm', we must + * have object that 'm' belongs to locked exclusively */ - m->busy = TRUE; - if (object != cur_object) { + if (object != cur_object) { vm_object_unlock(object); + + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(cur_object) == FALSE) { + /* + * couldn't upgrade so go do a full retry + * immediately since we've already dropped + * the top object lock associated with this page + * and the current one got dropped due to the + * failed upgrade... the state is no longer valid + */ + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + goto RetryFault; + } + } + } else if (object_lock_type == OBJECT_LOCK_SHARED) { + + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly take the lock + * exclusively and go relookup the page since we + * will have dropped the object lock and + * a different thread could have inserted + * a page at this offset + * no need for a full retry since we're + * at the top level of the object chain + */ + vm_object_lock(object); + + continue; + } } + m->busy = TRUE; + vm_map_unlock_read(map); if (real_map != map) vm_map_unlock(real_map); @@ -2333,8 +3847,8 @@ vm_fault( assert(m->busy); PAGE_WAKEUP_DONE(m); - vm_object_unlock(m->object); + vm_object_unlock(cur_object); /* * Retry from the top, in case anything * changed while we were decrypting... @@ -2343,6 +3857,76 @@ vm_fault( } ASSERT_PAGE_DECRYPTED(m); + if(vm_page_is_slideable(m)) { + /* + * We might need to slide this page, and so, + * we want to hold the VM object exclusively. + */ + if (object != cur_object) { + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + vm_object_unlock(object); + vm_object_unlock(cur_object); + + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + goto RetryFault; + } + } else if (object_lock_type == OBJECT_LOCK_SHARED) { + + vm_object_unlock(object); + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + vm_map_unlock_read(map); + goto RetryFault; + } + } + assert(m_object == VM_PAGE_OBJECT(m)); + + if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m, m_object) || + (physpage_p != NULL && (prot & VM_PROT_WRITE))) { +upgrade_for_validation: + /* + * We might need to validate this page + * against its code signature, so we + * want to hold the VM object exclusively. + */ + if (object != cur_object) { + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + vm_object_unlock(object); + vm_object_unlock(cur_object); + + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + goto RetryFault; + } + + } else if (object_lock_type == OBJECT_LOCK_SHARED) { + + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly take the lock + * exclusively and go relookup the page since we + * will have dropped the object lock and + * a different thread could have inserted + * a page at this offset + * no need for a full retry since we're + * at the top level of the object chain + */ + vm_object_lock(object); + + continue; + } + } + } /* * Two cases of map in faults: * - At top level w/o copy object. @@ -2350,665 +3934,980 @@ vm_fault( * --> must disallow write. */ - if (object == cur_object && - object->copy == VM_OBJECT_NULL) - goto FastMapInFault; + if (object == cur_object && object->copy == VM_OBJECT_NULL) { + + goto FastPmapEnter; + } if ((fault_type & VM_PROT_WRITE) == 0) { - boolean_t sequential; prot &= ~VM_PROT_WRITE; - /* - * Set up to map the page ... - * mark the page busy, drop - * locks and take a paging reference - * on the object with the page. - */ - if (object != cur_object) { - vm_object_unlock(object); + /* + * We still need to hold the top object + * lock here to prevent a race between + * a read fault (taking only "shared" + * locks) and a write fault (taking + * an "exclusive" lock on the top + * object. + * Otherwise, as soon as we release the + * top lock, the write fault could + * proceed and actually complete before + * the read fault, and the copied page's + * translation could then be overwritten + * by the read fault's translation for + * the original page. + * + * Let's just record what the top object + * is and we'll release it later. + */ + top_object = object; + + /* + * switch to the object that has the new page + */ object = cur_object; + object_lock_type = cur_object_lock_type; } -FastMapInFault: - m->busy = TRUE; - - vm_object_paging_begin(object); - FastPmapEnter: + assert(m_object == VM_PAGE_OBJECT(m)); + /* - * Check a couple of global reasons to - * be conservative about write access. - * Then do the pmap_enter. + * prepare for the pmap_enter... + * object and map are both locked + * m contains valid data + * object == m->object + * cur_object == NULL or it's been unlocked + * no paging references on either object or cur_object */ -#if !VM_FAULT_STATIC_CONFIG - if (vm_fault_dirty_handling -#if MACH_KDB - || db_watchpoint_list -#endif - && (fault_type & VM_PROT_WRITE) == 0) - prot &= ~VM_PROT_WRITE; -#else /* STATIC_CONFIG */ -#if MACH_KDB - if (db_watchpoint_list - && (fault_type & VM_PROT_WRITE) == 0) - prot &= ~VM_PROT_WRITE; -#endif /* MACH_KDB */ -#endif /* STATIC_CONFIG */ - cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK; - - sequential = FALSE; - need_activation = FALSE; - - if (m->no_isync == TRUE) { - m->no_isync = FALSE; - pmap_sync_page_data_phys(m->phys_page); - - if ((type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) { - /* - * found it in the cache, but this - * is the first fault-in of the page (no_isync == TRUE) - * so it must have come in as part of - * a cluster... account 1 pagein against it - */ - VM_STAT(pageins++); - current_task()->pageins++; - type_of_fault = DBG_PAGEIN_FAULT; - sequential = TRUE; - } - if (m->clustered) - need_activation = TRUE; - - } else if (cache_attr != VM_WIMG_DEFAULT) { - pmap_sync_page_attributes_phys(m->phys_page); - } - - if(caller_pmap) { - PMAP_ENTER(caller_pmap, - caller_pmap_addr, m, - prot, cache_attr, wired); + if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) + need_retry_ptr = &need_retry; + else + need_retry_ptr = NULL; + + if (caller_pmap) { + kr = vm_fault_enter(m, + caller_pmap, + caller_pmap_addr, + prot, + caller_prot, + wired, + change_wiring, + fault_info.no_cache, + fault_info.cs_bypass, + fault_info.user_tag, + fault_info.pmap_options, + need_retry_ptr, + &type_of_fault); } else { - PMAP_ENTER(pmap, vaddr, m, - prot, cache_attr, wired); + kr = vm_fault_enter(m, + pmap, + vaddr, + prot, + caller_prot, + wired, + change_wiring, + fault_info.no_cache, + fault_info.cs_bypass, + fault_info.user_tag, + fault_info.pmap_options, + need_retry_ptr, + &type_of_fault); } +#if DEVELOPMENT || DEBUG + { + int event_code = 0; - /* - * Hold queues lock to manipulate - * the page queues. Change wiring - * case is obvious. In soft ref bits - * case activate page only if it fell - * off paging queues, otherwise just - * activate it if it's inactive. - * - * NOTE: original vm_fault code will - * move active page to back of active - * queue. This code doesn't. - */ - vm_page_lock_queues(); + if (m_object->internal) + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + else if (m_object->object_slid) + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + else + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); - if (m->clustered) { - vm_pagein_cluster_used++; - m->clustered = FALSE; - } - m->reference = TRUE; + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0); - if (change_wiring) { - if (wired) - vm_page_wire(m); - else - vm_page_unwire(m); - } -#if VM_FAULT_STATIC_CONFIG - else { - if ((!m->active && !m->inactive) || ((need_activation == TRUE) && !m->active)) - vm_page_activate(m); - } -#else - else if (software_reference_bits) { - if (!m->active && !m->inactive) - vm_page_activate(m); - } - else if (!m->active) { - vm_page_activate(m); + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); } #endif - vm_page_unlock_queues(); + if (kr == KERN_SUCCESS && + physpage_p != NULL) { + /* for vm_map_wire_and_extract() */ + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); + if (prot & VM_PROT_WRITE) { + vm_object_lock_assert_exclusive(m_object); + m->dirty = TRUE; + } + } - /* - * That's it, clean up and return. - */ - PAGE_WAKEUP_DONE(m); + if (top_object != VM_OBJECT_NULL) { + /* + * It's safe to drop the top object + * now that we've done our + * vm_fault_enter(). Any other fault + * in progress for that virtual + * address will either find our page + * and translation or put in a new page + * and translation. + */ + vm_object_unlock(top_object); + top_object = VM_OBJECT_NULL; + } - sequential = (sequential && vm_page_deactivate_behind) ? - vm_fault_deactivate_behind(object, cur_offset, behavior) : - FALSE; + if (need_collapse == TRUE) + vm_object_collapse(object, offset, TRUE); + + if (need_retry == FALSE && + (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { + /* + * evaluate access pattern and update state + * vm_fault_deactivate_behind depends on the + * state being up to date + */ + vm_fault_is_sequential(object, cur_offset, fault_info.behavior); - /* - * Add non-sequential pages to the working set. - * The sequential pages will be brought in through - * normal clustering behavior. - */ - if (!sequential && !object->private) { - write_startup_file = - vm_fault_tws_insert(map, real_map, vaddr, - object, cur_offset); + vm_fault_deactivate_behind(object, cur_offset, fault_info.behavior); } + /* + * That's it, clean up and return. + */ + if (m->busy) + PAGE_WAKEUP_DONE(m); - vm_object_paging_end(object); vm_object_unlock(object); vm_map_unlock_read(map); - if(real_map != map) + if (real_map != map) vm_map_unlock(real_map); - if(write_startup_file) - tws_send_startup_info(current_task()); - - thread_interrupt_level(interruptible_state); - - - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END, - vaddr, - type_of_fault & 0xff, - KERN_SUCCESS, - type_of_fault >> 8, - 0); - - return KERN_SUCCESS; + if (need_retry == TRUE) { + /* + * vm_fault_enter couldn't complete the PMAP_ENTER... + * at this point we don't hold any locks so it's safe + * to ask the pmap layer to expand the page table to + * accommodate this mapping... once expanded, we'll + * re-drive the fault which should result in vm_fault_enter + * being able to successfully enter the mapping this time around + */ + (void)pmap_enter_options( + pmap, vaddr, 0, 0, 0, 0, 0, + PMAP_OPTIONS_NOENTER, NULL); + + need_retry = FALSE; + goto RetryFault; + } + goto done; } - /* - * Copy on write fault. If objects match, then - * object->copy must not be NULL (else control - * would be in previous code block), and we - * have a potential push into the copy object - * with which we won't cope here. + * COPY ON WRITE FAULT */ - - if (cur_object == object) + assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE); + + /* + * If objects match, then + * object->copy must not be NULL (else control + * would be in previous code block), and we + * have a potential push into the copy object + * with which we can't cope with here. + */ + if (cur_object == object) { + /* + * must take the slow path to + * deal with the copy push + */ break; + } + + /* + * This is now a shadow based copy on write + * fault -- it requires a copy up the shadow + * chain. + */ + assert(m_object == VM_PAGE_OBJECT(m)); + + if ((cur_object_lock_type == OBJECT_LOCK_SHARED) && + VM_FAULT_NEED_CS_VALIDATION(NULL, m, m_object)) { + goto upgrade_for_validation; + } + /* - * This is now a shadow based copy on write - * fault -- it requires a copy up the shadow - * chain. + * Allocate a page in the original top level + * object. Give up if allocate fails. Also + * need to remember current page, as it's the + * source of the copy. * - * Allocate a page in the original top level - * object. Give up if allocate fails. Also - * need to remember current page, as it's the - * source of the copy. + * at this point we hold locks on both + * object and cur_object... no need to take + * paging refs or mark pages BUSY since + * we don't drop either object lock until + * the page has been copied and inserted */ cur_m = m; - m = vm_page_grab(); + m = vm_page_grab_options(grab_options); + m_object = NULL; + if (m == VM_PAGE_NULL) { + /* + * no free page currently available... + * must take the slow path + */ break; } /* - * Now do the copy. Mark the source busy - * and take out paging references on both - * objects. + * Now do the copy. Mark the source page busy... * * NOTE: This code holds the map lock across * the page copy. */ - - cur_m->busy = TRUE; vm_page_copy(cur_m, m); vm_page_insert(m, object, offset); + m_object = object; + SET_PAGE_DIRTY(m, FALSE); - vm_object_paging_begin(cur_object); - vm_object_paging_begin(object); + /* + * Now cope with the source page and object + */ + if (object->ref_count > 1 && cur_m->pmapped) + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); + + if (cur_m->clustered) { + VM_PAGE_COUNT_AS_PAGEIN(cur_m); + VM_PAGE_CONSUME_CLUSTERED(cur_m); + vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior); + } + need_collapse = TRUE; + + if (!cur_object->internal && + cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) { + /* + * The object from which we've just + * copied a page is most probably backed + * by a vnode. We don't want to waste too + * much time trying to collapse the VM objects + * and create a bottleneck when several tasks + * map the same file. + */ + if (cur_object->copy == object) { + /* + * Shared mapping or no COW yet. + * We can never collapse a copy + * object into its backing object. + */ + need_collapse = FALSE; + } else if (cur_object->copy == object->shadow && + object->shadow->resident_page_count == 0) { + /* + * Shared mapping after a COW occurred. + */ + need_collapse = FALSE; + } + } + vm_object_unlock(cur_object); + + if (need_collapse == FALSE) + vm_fault_collapse_skipped++; + vm_fault_collapse_total++; type_of_fault = DBG_COW_FAULT; - VM_STAT(cow_faults++); + VM_STAT_INCR(cow_faults); + DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL); current_task()->cow_faults++; + goto FastPmapEnter; + + } else { /* - * Now cope with the source page and object - * If the top object has a ref count of 1 - * then no other map can access it, and hence - * it's not necessary to do the pmap_disconnect. + * No page at cur_object, cur_offset... m == NULL */ + if (cur_object->pager_created) { + int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; + + if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { + int my_fault_type; + int c_flags = C_DONT_BLOCK; + boolean_t insert_cur_object = FALSE; + + /* + * May have to talk to a pager... + * if so, take the slow path by + * doing a 'break' from the while (TRUE) loop + * + * external_state will only be set to VM_EXTERNAL_STATE_EXISTS + * if the compressor is active and the page exists there + */ + if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) + break; - vm_page_lock_queues(); - vm_page_deactivate(cur_m); - m->dirty = TRUE; - pmap_disconnect(cur_m->phys_page); - vm_page_unlock_queues(); + if (map == kernel_map || real_map == kernel_map) { + /* + * can't call into the compressor with the kernel_map + * lock held, since the compressor may try to operate + * on the kernel map in order to return an empty c_segment + */ + break; + } + if (object != cur_object) { + if (fault_type & VM_PROT_WRITE) + c_flags |= C_KEEP; + else + insert_cur_object = TRUE; + } + if (insert_cur_object == TRUE) { - PAGE_WAKEUP_DONE(cur_m); - vm_object_paging_end(cur_object); - vm_object_unlock(cur_object); + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - /* - * Slight hack to call vm_object collapse - * and then reuse common map in code. - * note that the object lock was taken above. - */ - - vm_object_paging_end(object); - vm_object_collapse(object, offset); - vm_object_paging_begin(object); + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; - goto FastPmapEnter; - } - else { + if (vm_object_lock_upgrade(cur_object) == FALSE) { + /* + * couldn't upgrade so go do a full retry + * immediately since we can no longer be + * certain about cur_object (since we + * don't hold a reference on it)... + * first drop the top object lock + */ + vm_object_unlock(object); - /* - * No page at cur_object, cur_offset - */ + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); - if (cur_object->pager_created) { + goto RetryFault; + } + } + } else if (object_lock_type == OBJECT_LOCK_SHARED) { + + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (object != cur_object) { + /* + * we can't go for the upgrade on the top + * lock since the upgrade may block waiting + * for readers to drain... since we hold + * cur_object locked at this point, waiting + * for the readers to drain would represent + * a lock order inversion since the lock order + * for objects is the reference order in the + * shadown chain + */ + vm_object_unlock(object); + vm_object_unlock(cur_object); - /* - * Have to talk to the pager. Give up. - */ - break; - } + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + goto RetryFault; + } + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly take the lock + * exclusively and go relookup the page since we + * will have dropped the object lock and + * a different thread could have inserted + * a page at this offset + * no need for a full retry since we're + * at the top level of the object chain + */ + vm_object_lock(object); + + continue; + } + } + m = vm_page_grab_options(grab_options); + m_object = NULL; - if (cur_object->shadow == VM_OBJECT_NULL) { + if (m == VM_PAGE_NULL) { + /* + * no free page currently available... + * must take the slow path + */ + break; + } - if (cur_object->shadow_severed) { - vm_object_paging_end(object); - vm_object_unlock(object); - vm_map_unlock_read(map); - if(real_map != map) - vm_map_unlock(real_map); + /* + * The object is and remains locked + * so no need to take a + * "paging_in_progress" reference. + */ + boolean_t shared_lock; + if ((object == cur_object && + object_lock_type == OBJECT_LOCK_EXCLUSIVE) || + (object != cur_object && + cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) { + shared_lock = FALSE; + } else { + shared_lock = TRUE; + } - if(write_startup_file) - tws_send_startup_info( - current_task()); + kr = vm_compressor_pager_get( + cur_object->pager, + (cur_offset + + cur_object->paging_offset), + VM_PAGE_GET_PHYS_PAGE(m), + &my_fault_type, + c_flags, + &compressed_count_delta); + + vm_compressor_pager_count( + cur_object->pager, + compressed_count_delta, + shared_lock, + cur_object); + + if (kr != KERN_SUCCESS) { + vm_page_release(m, FALSE); + m = VM_PAGE_NULL; + break; + } + m->dirty = TRUE; - thread_interrupt_level(interruptible_state); + /* + * If the object is purgeable, its + * owner's purgeable ledgers will be + * updated in vm_page_insert() but the + * page was also accounted for in a + * "compressed purgeable" ledger, so + * update that now. + */ + if (object != cur_object && + !insert_cur_object) { + /* + * We're not going to insert + * the decompressed page into + * the object it came from. + * + * We're dealing with a + * copy-on-write fault on + * "object". + * We're going to decompress + * the page directly into the + * target "object" while + * keepin the compressed + * page for "cur_object", so + * no ledger update in that + * case. + */ + } else if ((cur_object->purgable == + VM_PURGABLE_DENY) || + (cur_object->vo_purgeable_owner == + NULL)) { + /* + * "cur_object" is not purgeable + * or is not owned, so no + * purgeable ledgers to update. + */ + } else { + /* + * One less compressed + * purgeable page for + * cur_object's owner. + */ + vm_purgeable_compressed_update( + cur_object, + -1); + } - return KERN_MEMORY_ERROR; - } + if (insert_cur_object) { + vm_page_insert(m, cur_object, cur_offset); + m_object = cur_object; + } else { + vm_page_insert(m, object, offset); + m_object = object; + } + + if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { + /* + * If the page is not cacheable, + * we can't let its contents + * linger in the data cache + * after the decompression. + */ + pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m)); + } + + type_of_fault = my_fault_type; + VM_STAT_INCR(decompressions); + + if (cur_object != object) { + if (insert_cur_object) { + top_object = object; + /* + * switch to the object that has the new page + */ + object = cur_object; + object_lock_type = cur_object_lock_type; + } else { + vm_object_unlock(cur_object); + cur_object = object; + } + } + goto FastPmapEnter; + } /* - * Zero fill fault. Page gets - * filled in top object. Insert - * page, then drop any lower lock. - * Give up if no page. + * existence map present and indicates + * that the pager doesn't have this page */ - if (VM_PAGE_THROTTLED()) { - break; + } + if (cur_object->shadow == VM_OBJECT_NULL) { + /* + * Zero fill fault. Page gets + * inserted into the original object. + */ + if (cur_object->shadow_severed || + VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) || + cur_object == compressor_object || + cur_object == kernel_object || + cur_object == vm_submap_object) { + if (object != cur_object) + vm_object_unlock(cur_object); + vm_object_unlock(object); + + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + kr = KERN_MEMORY_ERROR; + goto done; + } + if (vm_backing_store_low) { + /* + * we are protecting the system from + * backing store exhaustion... + * must take the slow path if we're + * not privileged + */ + if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) + break; + } + if (cur_object != object) { + vm_object_unlock(cur_object); + + cur_object = object; } + if (object_lock_type == OBJECT_LOCK_SHARED) { - /* - * are we protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. - */ - if(vm_backing_store_low) { - if(!(current_task()->priv_flags - & VM_BACKING_STORE_PRIV)) - break; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade so do a full retry on the fault + * since we dropped the object lock which + * could allow another thread to insert + * a page at this offset + */ + vm_map_unlock_read(map); + if (real_map != map) + vm_map_unlock(real_map); + + goto RetryFault; + } } m = vm_page_alloc(object, offset); + m_object = NULL; + if (m == VM_PAGE_NULL) { + /* + * no free page currently available... + * must take the slow path + */ break; } - /* - * This is a zero-fill or initial fill - * page fault. As such, we consider it - * undefined with respect to instruction - * execution. i.e. it is the responsibility - * of higher layers to call for an instruction - * sync after changing the contents and before - * sending a program into this area. We - * choose this approach for performance - */ - - m->no_isync = FALSE; - - if (cur_object != object) - vm_object_unlock(cur_object); - - vm_object_paging_begin(object); - vm_object_unlock(object); + m_object = object; /* - * Now zero fill page and map it. - * the page is probably going to - * be written soon, so don't bother - * to clear the modified bit + * Now zero fill page... + * the page is probably going to + * be written soon, so don't bother + * to clear the modified bit * - * NOTE: This code holds the map - * lock across the zero fill. + * NOTE: This code holds the map + * lock across the zero fill. */ - - if (!map->no_zero_fill) { - vm_page_zero_fill(m); - type_of_fault = DBG_ZERO_FILL_FAULT; - VM_STAT(zero_fill_count++); - } - vm_page_lock_queues(); - VM_PAGE_QUEUES_REMOVE(m); - - m->page_ticket = vm_page_ticket; - assert(!m->laundry); - assert(m->object != kernel_object); - assert(m->pageq.next == NULL && - m->pageq.prev == NULL); - if(m->object->size > 0x200000) { - m->zero_fill = TRUE; - /* depends on the queues lock */ - vm_zf_count += 1; - queue_enter(&vm_page_queue_zf, - m, vm_page_t, pageq); - } else { - queue_enter( - &vm_page_queue_inactive, - m, vm_page_t, pageq); - } - vm_page_ticket_roll++; - if(vm_page_ticket_roll == - VM_PAGE_TICKETS_IN_ROLL) { - vm_page_ticket_roll = 0; - if(vm_page_ticket == - VM_PAGE_TICKET_ROLL_IDS) - vm_page_ticket= 0; - else - vm_page_ticket++; - } - - m->inactive = TRUE; - vm_page_inactive_count++; - vm_page_unlock_queues(); - vm_object_lock(object); + type_of_fault = vm_fault_zero_page(m, map->no_zero_fill); goto FastPmapEnter; } + /* + * On to the next level in the shadow chain + */ + cur_offset += cur_object->vo_shadow_offset; + new_object = cur_object->shadow; /* - * On to the next level + * take the new_object's lock with the indicated state */ + if (cur_object_lock_type == OBJECT_LOCK_SHARED) + vm_object_lock_shared(new_object); + else + vm_object_lock(new_object); - cur_offset += cur_object->shadow_offset; - new_object = cur_object->shadow; - vm_object_lock(new_object); if (cur_object != object) vm_object_unlock(cur_object); + cur_object = new_object; continue; } } - /* - * Cleanup from fast fault failure. Drop any object - * lock other than original and drop map lock. + * Cleanup from fast fault failure. Drop any object + * lock other than original and drop map lock. */ - if (object != cur_object) vm_object_unlock(cur_object); + + /* + * must own the object lock exclusively at this point + */ + if (object_lock_type == OBJECT_LOCK_SHARED) { + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + + if (vm_object_lock_upgrade(object) == FALSE) { + /* + * couldn't upgrade, so explictly + * take the lock exclusively + * no need to retry the fault at this + * point since "vm_fault_page" will + * completely re-evaluate the state + */ + vm_object_lock(object); + } } - vm_map_unlock_read(map); - if(real_map != map) +handle_copy_delay: + vm_map_unlock_read(map); + if (real_map != map) vm_map_unlock(real_map); + assert(object != compressor_object); + assert(object != kernel_object); + assert(object != vm_submap_object); + /* - * Make a reference to this object to - * prevent its disposal while we are messing with - * it. Once we have the reference, the map is free - * to be diddled. Since objects reference their - * shadows (and copies), they will stay around as well. + * Make a reference to this object to + * prevent its disposal while we are messing with + * it. Once we have the reference, the map is free + * to be diddled. Since objects reference their + * shadows (and copies), they will stay around as well. */ - - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); + vm_object_reference_locked(object); vm_object_paging_begin(object); XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0); - if (!object->private) { - write_startup_file = - vm_fault_tws_insert(map, real_map, vaddr, object, offset); - } + error_code = 0; + result_page = VM_PAGE_NULL; kr = vm_fault_page(object, offset, fault_type, (change_wiring && !wired), - interruptible, - lo_offset, hi_offset, behavior, + FALSE, /* page not looked up */ &prot, &result_page, &top_page, &type_of_fault, - &error_code, map->no_zero_fill, FALSE, map, vaddr); + &error_code, map->no_zero_fill, + FALSE, &fault_info); /* - * If we didn't succeed, lose the object reference immediately. + * if kr != VM_FAULT_SUCCESS, then the paging reference + * has been dropped and the object unlocked... the ref_count + * is still held + * + * if kr == VM_FAULT_SUCCESS, then the paging reference + * is still held along with the ref_count on the original object + * + * the object is returned locked with a paging reference + * + * if top_page != NULL, then it's BUSY and the + * object it belongs to has a paging reference + * but is returned unlocked */ - - if (kr != VM_FAULT_SUCCESS) + if (kr != VM_FAULT_SUCCESS && + kr != VM_FAULT_SUCCESS_NO_VM_PAGE) { + /* + * we didn't succeed, lose the object reference immediately. + */ vm_object_deallocate(object); - /* - * See why we failed, and take corrective action. - */ - - switch (kr) { - case VM_FAULT_SUCCESS: - break; + /* + * See why we failed, and take corrective action. + */ + switch (kr) { case VM_FAULT_MEMORY_SHORTAGE: if (vm_page_wait((change_wiring) ? THREAD_UNINT : THREAD_ABORTSAFE)) goto RetryFault; - /* fall thru */ + /* + * fall thru + */ case VM_FAULT_INTERRUPTED: kr = KERN_ABORTED; goto done; case VM_FAULT_RETRY: goto RetryFault; - case VM_FAULT_FICTITIOUS_SHORTAGE: - vm_page_more_fictitious(); - goto RetryFault; case VM_FAULT_MEMORY_ERROR: if (error_code) kr = error_code; else kr = KERN_MEMORY_ERROR; goto done; + default: + panic("vm_fault: unexpected error 0x%x from " + "vm_fault_page()\n", kr); + } } - m = result_page; + m_object = NULL; - if(m != VM_PAGE_NULL) { + if (m != VM_PAGE_NULL) { + m_object = VM_PAGE_OBJECT(m); assert((change_wiring && !wired) ? - (top_page == VM_PAGE_NULL) : - ((top_page == VM_PAGE_NULL) == (m->object == object))); + (top_page == VM_PAGE_NULL) : + ((top_page == VM_PAGE_NULL) == (m_object == object))); } /* - * How to clean up the result of vm_fault_page. This - * happens whether the mapping is entered or not. + * What to do with the resulting page from vm_fault_page + * if it doesn't get entered into the physical map: */ - -#define UNLOCK_AND_DEALLOCATE \ - MACRO_BEGIN \ - vm_fault_cleanup(m->object, top_page); \ - vm_object_deallocate(object); \ - MACRO_END - - /* - * What to do with the resulting page from vm_fault_page - * if it doesn't get entered into the physical map: - */ - #define RELEASE_PAGE(m) \ MACRO_BEGIN \ PAGE_WAKEUP_DONE(m); \ - vm_page_lock_queues(); \ - if (!m->active && !m->inactive) \ - vm_page_activate(m); \ - vm_page_unlock_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + vm_page_lockspin_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ + } \ MACRO_END /* - * We must verify that the maps have not changed - * since our last lookup. + * We must verify that the maps have not changed + * since our last lookup. */ - - if(m != VM_PAGE_NULL) { - old_copy_object = m->object->copy; - vm_object_unlock(m->object); + if (m != VM_PAGE_NULL) { + old_copy_object = m_object->copy; + vm_object_unlock(m_object); } else { old_copy_object = VM_OBJECT_NULL; + vm_object_unlock(object); } + + /* + * no object locks are held at this point + */ if ((map != original_map) || !vm_map_verify(map, &version)) { vm_object_t retry_object; vm_object_offset_t retry_offset; vm_prot_t retry_prot; /* - * To avoid trying to write_lock the map while another - * thread has it read_locked (in vm_map_pageable), we - * do not try for write permission. If the page is - * still writable, we will get write permission. If it - * is not, or has been marked needs_copy, we enter the - * mapping without write permission, and will merely - * take another fault. + * To avoid trying to write_lock the map while another + * thread has it read_locked (in vm_map_pageable), we + * do not try for write permission. If the page is + * still writable, we will get write permission. If it + * is not, or has been marked needs_copy, we enter the + * mapping without write permission, and will merely + * take another fault. */ map = original_map; vm_map_lock_read(map); + kr = vm_map_lookup_locked(&map, vaddr, - fault_type & ~VM_PROT_WRITE, &version, - &retry_object, &retry_offset, &retry_prot, - &wired, &behavior, &lo_offset, &hi_offset, - &real_map); + fault_type & ~VM_PROT_WRITE, + OBJECT_LOCK_EXCLUSIVE, &version, + &retry_object, &retry_offset, &retry_prot, + &wired, + &fault_info, + &real_map); pmap = real_map->pmap; if (kr != KERN_SUCCESS) { vm_map_unlock_read(map); - if(m != VM_PAGE_NULL) { - vm_object_lock(m->object); + + if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == m_object); + + /* + * retake the lock so that + * we can drop the paging reference + * in vm_fault_cleanup and do the + * PAGE_WAKEUP_DONE in RELEASE_PAGE + */ + vm_object_lock(m_object); + RELEASE_PAGE(m); - UNLOCK_AND_DEALLOCATE; + + vm_fault_cleanup(m_object, top_page); } else { - vm_object_deallocate(object); + /* + * retake the lock so that + * we can drop the paging reference + * in vm_fault_cleanup + */ + vm_object_lock(object); + + vm_fault_cleanup(object, top_page); } + vm_object_deallocate(object); + goto done; } - vm_object_unlock(retry_object); - if(m != VM_PAGE_NULL) { - vm_object_lock(m->object); - } else { - vm_object_lock(object); - } - if ((retry_object != object) || - (retry_offset != offset)) { + if ((retry_object != object) || (retry_offset != offset)) { + vm_map_unlock_read(map); - if(real_map != map) + if (real_map != map) vm_map_unlock(real_map); - if(m != VM_PAGE_NULL) { + + if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == m_object); + + /* + * retake the lock so that + * we can drop the paging reference + * in vm_fault_cleanup and do the + * PAGE_WAKEUP_DONE in RELEASE_PAGE + */ + vm_object_lock(m_object); + RELEASE_PAGE(m); - UNLOCK_AND_DEALLOCATE; + + vm_fault_cleanup(m_object, top_page); } else { - vm_object_deallocate(object); + /* + * retake the lock so that + * we can drop the paging reference + * in vm_fault_cleanup + */ + vm_object_lock(object); + + vm_fault_cleanup(object, top_page); } + vm_object_deallocate(object); + goto RetryFault; } - /* - * Check whether the protection has changed or the object - * has been copied while we left the map unlocked. + * Check whether the protection has changed or the object + * has been copied while we left the map unlocked. */ prot &= retry_prot; - if(m != VM_PAGE_NULL) { - vm_object_unlock(m->object); - } else { - vm_object_unlock(object); - } - } - if(m != VM_PAGE_NULL) { - vm_object_lock(m->object); - } else { - vm_object_lock(object); } + if (m != VM_PAGE_NULL) { + vm_object_lock(m_object); - /* - * If the copy object changed while the top-level object - * was unlocked, then we must take away write permission. - */ - - if(m != VM_PAGE_NULL) { - if (m->object->copy != old_copy_object) + if (m_object->copy != old_copy_object) { + /* + * The copy object changed while the top-level object + * was unlocked, so take away write permission. + */ prot &= ~VM_PROT_WRITE; - } + } + } else + vm_object_lock(object); /* - * If we want to wire down this page, but no longer have - * adequate permissions, we must start all over. + * If we want to wire down this page, but no longer have + * adequate permissions, we must start all over. */ + if (wired && (fault_type != (prot | VM_PROT_WRITE))) { - if (wired && (fault_type != (prot|VM_PROT_WRITE))) { vm_map_verify_done(map, &version); - if(real_map != map) + if (real_map != map) vm_map_unlock(real_map); - if(m != VM_PAGE_NULL) { + + if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == m_object); + RELEASE_PAGE(m); - UNLOCK_AND_DEALLOCATE; - } else { - vm_object_deallocate(object); - } - goto RetryFault; - } - /* - * Put this page into the physical map. - * We had to do the unlock above because pmap_enter - * may cause other faults. The page may be on - * the pageout queues. If the pageout daemon comes - * across the page, it will remove it from the queues. - */ - need_activation = FALSE; + vm_fault_cleanup(m_object, top_page); + } else + vm_fault_cleanup(object, top_page); - if (m != VM_PAGE_NULL) { - if (m->no_isync == TRUE) { - pmap_sync_page_data_phys(m->phys_page); - - if ((type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) { - /* - * found it in the cache, but this - * is the first fault-in of the page (no_isync == TRUE) - * so it must have come in as part of - * a cluster... account 1 pagein against it - */ - VM_STAT(pageins++); - current_task()->pageins++; - - type_of_fault = DBG_PAGEIN_FAULT; - } - if (m->clustered) { - need_activation = TRUE; - } - m->no_isync = FALSE; - } - cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK; + vm_object_deallocate(object); - if(caller_pmap) { - PMAP_ENTER(caller_pmap, - caller_pmap_addr, m, - prot, cache_attr, wired); + goto RetryFault; + } + if (m != VM_PAGE_NULL) { + /* + * Put this page into the physical map. + * We had to do the unlock above because pmap_enter + * may cause other faults. The page may be on + * the pageout queues. If the pageout daemon comes + * across the page, it will remove it from the queues. + */ + if (caller_pmap) { + kr = vm_fault_enter(m, + caller_pmap, + caller_pmap_addr, + prot, + caller_prot, + wired, + change_wiring, + fault_info.no_cache, + fault_info.cs_bypass, + fault_info.user_tag, + fault_info.pmap_options, + NULL, + &type_of_fault); } else { - PMAP_ENTER(pmap, vaddr, m, - prot, cache_attr, wired); + kr = vm_fault_enter(m, + pmap, + vaddr, + prot, + caller_prot, + wired, + change_wiring, + fault_info.no_cache, + fault_info.cs_bypass, + fault_info.user_tag, + fault_info.pmap_options, + NULL, + &type_of_fault); } + assert(VM_PAGE_OBJECT(m) == m_object); - /* - * Add working set information for private objects here. - */ - if (m->object->private) { - write_startup_file = - vm_fault_tws_insert(map, real_map, vaddr, - m->object, m->offset); +#if DEVELOPMENT || DEBUG + { + int event_code = 0; + + if (m_object->internal) + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + else if (m_object->object_slid) + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + else + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0); + + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); + } +#endif + if (kr != KERN_SUCCESS) { + /* abort this page fault */ + vm_map_verify_done(map, &version); + if (real_map != map) + vm_map_unlock(real_map); + PAGE_WAKEUP_DONE(m); + vm_fault_cleanup(m_object, top_page); + vm_object_deallocate(object); + goto done; + } + if (physpage_p != NULL) { + /* for vm_map_wire_and_extract() */ + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); + if (prot & VM_PROT_WRITE) { + vm_object_lock_assert_exclusive(m_object); + m->dirty = TRUE; + } } } else { -#ifndef i386 vm_map_entry_t entry; vm_map_offset_t laddr; vm_map_offset_t ldelta, hdelta; @@ -3018,27 +4917,31 @@ FastPmapEnter: * in the object */ +#ifdef ppc /* While we do not worry about execution protection in */ /* general, certian pages may have instruction execution */ /* disallowed. We will check here, and if not allowed */ /* to execute, we return with a protection failure. */ - if((full_fault_type & VM_PROT_EXECUTE) && - (!pmap_eligible_for_execute((ppnum_t) - (object->shadow_offset >> 12)))) { + if ((fault_type & VM_PROT_EXECUTE) && + (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) { vm_map_verify_done(map, &version); - if(real_map != map) + + if (real_map != map) vm_map_unlock(real_map); + vm_fault_cleanup(object, top_page); vm_object_deallocate(object); + kr = KERN_PROTECTION_FAILURE; goto done; } +#endif /* ppc */ - if(real_map != map) { + if (real_map != map) vm_map_unlock(real_map); - } + if (original_map != map) { vm_map_unlock_read(map); vm_map_lock_read(original_map); @@ -3050,143 +4953,136 @@ FastPmapEnter: hdelta = 0xFFFFF000; ldelta = 0xFFFFF000; - - while(vm_map_lookup_entry(map, laddr, &entry)) { - if(ldelta > (laddr - entry->vme_start)) + while (vm_map_lookup_entry(map, laddr, &entry)) { + if (ldelta > (laddr - entry->vme_start)) ldelta = laddr - entry->vme_start; - if(hdelta > (entry->vme_end - laddr)) + if (hdelta > (entry->vme_end - laddr)) hdelta = entry->vme_end - laddr; - if(entry->is_sub_map) { + if (entry->is_sub_map) { - laddr = (laddr - entry->vme_start) - + entry->offset; - vm_map_lock_read(entry->object.sub_map); - if(map != real_map) + laddr = ((laddr - entry->vme_start) + + VME_OFFSET(entry)); + vm_map_lock_read(VME_SUBMAP(entry)); + + if (map != real_map) vm_map_unlock_read(map); - if(entry->use_pmap) { + if (entry->use_pmap) { vm_map_unlock_read(real_map); - real_map = entry->object.sub_map; + real_map = VME_SUBMAP(entry); } - map = entry->object.sub_map; + map = VME_SUBMAP(entry); } else { break; } } - if(vm_map_lookup_entry(map, laddr, &entry) && - (entry->object.vm_object != NULL) && - (entry->object.vm_object == object)) { + if (vm_map_lookup_entry(map, laddr, &entry) && + (VME_OBJECT(entry) != NULL) && + (VME_OBJECT(entry) == object)) { + int superpage; + + if (!object->pager_created && + object->phys_contiguous && + VME_OFFSET(entry) == 0 && + (entry->vme_end - entry->vme_start == object->vo_size) && + VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size-1))) { + superpage = VM_MEM_SUPERPAGE; + } else { + superpage = 0; + } + if (superpage && physpage_p) { + /* for vm_map_wire_and_extract() */ + *physpage_p = (ppnum_t) + ((((vm_map_offset_t) + object->vo_shadow_offset) + + VME_OFFSET(entry) + + (laddr - entry->vme_start)) + >> PAGE_SHIFT); + } - if(caller_pmap) { - /* Set up a block mapped area */ + if (caller_pmap) { + /* + * Set up a block mapped area + */ + assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); pmap_map_block(caller_pmap, - (addr64_t)(caller_pmap_addr - ldelta), - (((vm_map_offset_t) - (entry->object.vm_object->shadow_offset)) - + entry->offset + - (laddr - entry->vme_start) - - ldelta) >> 12, - ((ldelta + hdelta) >> 12), prot, - (VM_WIMG_MASK & (int)object->wimg_bits), 0); + (addr64_t)(caller_pmap_addr - ldelta), + (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) + + VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), + (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, + (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); } else { - /* Set up a block mapped area */ + /* + * Set up a block mapped area + */ + assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); pmap_map_block(real_map->pmap, - (addr64_t)(vaddr - ldelta), - (((vm_map_offset_t) - (entry->object.vm_object->shadow_offset)) - + entry->offset + - (laddr - entry->vme_start) - ldelta) >> 12, - ((ldelta + hdelta) >> 12), prot, - (VM_WIMG_MASK & (int)object->wimg_bits), 0); + (addr64_t)(vaddr - ldelta), + (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) + + VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), + (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, + (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); } } -#else -#ifdef notyet - if(caller_pmap) { - pmap_enter(caller_pmap, caller_pmap_addr, - object->shadow_offset>>12, prot, 0, TRUE); - } else { - pmap_enter(pmap, vaddr, - object->shadow_offset>>12, prot, 0, TRUE); - } - /* Map it in */ -#endif -#endif - } /* - * If the page is not wired down and isn't already - * on a pageout queue, then put it where the - * pageout daemon can find it. + * Unlock everything, and return */ - if(m != VM_PAGE_NULL) { - vm_page_lock_queues(); + vm_map_verify_done(map, &version); + if (real_map != map) + vm_map_unlock(real_map); - if (m->clustered) { - vm_pagein_cluster_used++; - m->clustered = FALSE; - } - m->reference = TRUE; + if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == m_object); - if (change_wiring) { - if (wired) - vm_page_wire(m); - else - vm_page_unwire(m); - } -#if VM_FAULT_STATIC_CONFIG - else { - if ((!m->active && !m->inactive) || ((need_activation == TRUE) && !m->active)) - vm_page_activate(m); - } -#else - else if (software_reference_bits) { - if (!m->active && !m->inactive) - vm_page_activate(m); - m->reference = TRUE; - } else { - vm_page_activate(m); - } -#endif - vm_page_unlock_queues(); - } + PAGE_WAKEUP_DONE(m); - /* - * Unlock everything, and return - */ + vm_fault_cleanup(m_object, top_page); + } else + vm_fault_cleanup(object, top_page); - vm_map_verify_done(map, &version); - if(real_map != map) - vm_map_unlock(real_map); - if(m != VM_PAGE_NULL) { - PAGE_WAKEUP_DONE(m); - UNLOCK_AND_DEALLOCATE; - } else { - vm_fault_cleanup(object, top_page); - vm_object_deallocate(object); - } - kr = KERN_SUCCESS; + vm_object_deallocate(object); -#undef UNLOCK_AND_DEALLOCATE #undef RELEASE_PAGE - done: - if(write_startup_file) - tws_send_startup_info(current_task()); - + kr = KERN_SUCCESS; +done: thread_interrupt_level(interruptible_state); - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END, - vaddr, - type_of_fault & 0xff, + /* + * Only I/O throttle on faults which cause a pagein/swapin. + */ + if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) { + throttle_lowpri_io(1); + } else { + if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) { + + if ((throttle_delay = vm_page_throttled(TRUE))) { + + if (vm_debug_events) { + if (type_of_fault == DBG_COMPRESSOR_FAULT) + VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); + else if (type_of_fault == DBG_COW_FAULT) + VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); + else + VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); + } + delay(throttle_delay); + } + } + } + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, kr, - type_of_fault >> 8, + type_of_fault, 0); - return(kr); + return (kr); } /* @@ -3198,19 +5094,20 @@ kern_return_t vm_fault_wire( vm_map_t map, vm_map_entry_t entry, + vm_prot_t prot, pmap_t pmap, - vm_map_offset_t pmap_addr) + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { - - register vm_map_offset_t va; - register vm_map_offset_t end_addr = entry->vme_end; - register kern_return_t rc; + vm_map_offset_t va; + vm_map_offset_t end_addr = entry->vme_end; + kern_return_t rc; assert(entry->in_transition); - if ((entry->object.vm_object != NULL) && - !entry->is_sub_map && - entry->object.vm_object->phys_contiguous) { + if ((VME_OBJECT(entry) != NULL) && + !entry->is_sub_map && + VME_OBJECT(entry)->phys_contiguous) { return KERN_SUCCESS; } @@ -3229,14 +5126,19 @@ vm_fault_wire( */ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { - if ((rc = vm_fault_wire_fast( - map, va, entry, pmap, - pmap_addr + (va - entry->vme_start) - )) != KERN_SUCCESS) { - rc = vm_fault(map, va, VM_PROT_NONE, TRUE, - (pmap == kernel_pmap) ? - THREAD_UNINT : THREAD_ABORTSAFE, - pmap, pmap_addr + (va - entry->vme_start)); + rc = vm_fault_wire_fast(map, va, prot, entry, pmap, + pmap_addr + (va - entry->vme_start), + physpage_p); + if (rc != KERN_SUCCESS) { + rc = vm_fault_internal(map, va, prot, TRUE, + ((pmap == kernel_pmap) + ? THREAD_UNINT + : THREAD_ABORTSAFE), + pmap, + (pmap_addr + + (va - entry->vme_start)), + physpage_p); + DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL); } if (rc != KERN_SUCCESS) { @@ -3266,12 +5168,38 @@ vm_fault_unwire( pmap_t pmap, vm_map_offset_t pmap_addr) { - register vm_map_offset_t va; - register vm_map_offset_t end_addr = entry->vme_end; + vm_map_offset_t va; + vm_map_offset_t end_addr = entry->vme_end; vm_object_t object; + struct vm_object_fault_info fault_info; + + object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry); + + /* + * If it's marked phys_contiguous, then vm_fault_wire() didn't actually + * do anything since such memory is wired by default. So we don't have + * anything to undo here. + */ + + if (object != VM_OBJECT_NULL && object->phys_contiguous) + return; - object = (entry->is_sub_map) - ? VM_OBJECT_NULL : entry->object.vm_object; + fault_info.interruptible = THREAD_UNINT; + fault_info.behavior = entry->behavior; + fault_info.user_tag = VME_ALIAS(entry); + fault_info.pmap_options = 0; + if (entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap)) { + fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; + } + fault_info.lo_offset = VME_OFFSET(entry); + fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); + fault_info.no_cache = entry->no_cache; + fault_info.stealth = TRUE; + fault_info.io_sync = FALSE; + fault_info.cs_bypass = FALSE; + fault_info.mark_zf_absent = FALSE; + fault_info.batch_pmap_op = FALSE; /* * Since the pages are wired down, we must be able to @@ -3279,14 +5207,14 @@ vm_fault_unwire( */ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { - pmap_change_wiring(pmap, - pmap_addr + (va - entry->vme_start), FALSE); if (object == VM_OBJECT_NULL) { + if (pmap) { + pmap_change_wiring(pmap, + pmap_addr + (va - entry->vme_start), FALSE); + } (void) vm_fault(map, va, VM_PROT_NONE, TRUE, THREAD_UNINT, pmap, pmap_addr); - } else if (object->phys_contiguous) { - continue; } else { vm_prot_t prot; vm_page_t result_page; @@ -3294,6 +5222,14 @@ vm_fault_unwire( vm_object_t result_object; vm_fault_return_t result; + if (end_addr - va > (vm_size_t) -1) { + /* 32-bit overflow */ + fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE); + } else { + fault_info.cluster_size = (vm_size_t) (end_addr - va); + assert(fault_info.cluster_size == end_addr - va); + } + do { prot = VM_PROT_NONE; @@ -3302,39 +5238,72 @@ vm_fault_unwire( XPR(XPR_VM_FAULT, "vm_fault_unwire -> vm_fault_page\n", 0,0,0,0,0); - result = vm_fault_page(object, - entry->offset + - (va - entry->vme_start), - VM_PROT_NONE, TRUE, - THREAD_UNINT, - entry->offset, - entry->offset + - (entry->vme_end - - entry->vme_start), - entry->behavior, - &prot, - &result_page, - &top_page, - (int *)0, - 0, map->no_zero_fill, - FALSE, NULL, 0); + result_page = VM_PAGE_NULL; + result = vm_fault_page( + object, + (VME_OFFSET(entry) + + (va - entry->vme_start)), + VM_PROT_NONE, TRUE, + FALSE, /* page not looked up */ + &prot, &result_page, &top_page, + (int *)0, + NULL, map->no_zero_fill, + FALSE, &fault_info); } while (result == VM_FAULT_RETRY); + /* + * If this was a mapping to a file on a device that has been forcibly + * unmounted, then we won't get a page back from vm_fault_page(). Just + * move on to the next one in case the remaining pages are mapped from + * different objects. During a forced unmount, the object is terminated + * so the alive flag will be false if this happens. A forced unmount will + * will occur when an external disk is unplugged before the user does an + * eject, so we don't want to panic in that situation. + */ + + if (result == VM_FAULT_MEMORY_ERROR && !object->alive) + continue; + + if (result == VM_FAULT_MEMORY_ERROR && + object == kernel_object) { + /* + * This must have been allocated with + * KMA_KOBJECT and KMA_VAONLY and there's + * no physical page at this offset. + * We're done (no page to free). + */ + assert(deallocate); + continue; + } + if (result != VM_FAULT_SUCCESS) panic("vm_fault_unwire: failure"); - result_object = result_page->object; + result_object = VM_PAGE_OBJECT(result_page); + if (deallocate) { - assert(!result_page->fictitious); - pmap_disconnect(result_page->phys_page); + assert(VM_PAGE_GET_PHYS_PAGE(result_page) != + vm_page_fictitious_addr); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page)); VM_PAGE_FREE(result_page); } else { - vm_page_lock_queues(); - vm_page_unwire(result_page); - vm_page_unlock_queues(); + if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) + pmap_change_wiring(pmap, + pmap_addr + (va - entry->vme_start), FALSE); + + + if (VM_PAGE_WIRED(result_page)) { + vm_page_lockspin_queues(); + vm_page_unwire(result_page, TRUE); + vm_page_unlock_queues(); + } + if(entry->zero_wired_pages) { + pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page)); + entry->zero_wired_pages = FALSE; + } + PAGE_WAKEUP_DONE(result_page); } - vm_fault_cleanup(result_object, top_page); } } @@ -3370,22 +5339,25 @@ vm_fault_unwire( * other than the common case will return KERN_FAILURE, and the caller * is expected to call vm_fault(). */ -kern_return_t +static kern_return_t vm_fault_wire_fast( __unused vm_map_t map, vm_map_offset_t va, + vm_prot_t caller_prot, vm_map_entry_t entry, - pmap_t pmap, - vm_map_offset_t pmap_addr) + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { vm_object_t object; vm_object_offset_t offset; - register vm_page_t m; + vm_page_t m; vm_prot_t prot; thread_t thread = current_thread(); - unsigned int cache_attr; + int type_of_fault; + kern_return_t kr; - VM_STAT(faults++); + VM_STAT_INCR(faults); if (thread != THREAD_NULL && thread->task != TASK_NULL) thread->task->faults++; @@ -3397,8 +5369,8 @@ vm_fault_wire_fast( #undef RELEASE_PAGE #define RELEASE_PAGE(m) { \ PAGE_WAKEUP_DONE(m); \ - vm_page_lock_queues(); \ - vm_page_unwire(m); \ + vm_page_lockspin_queues(); \ + vm_page_unwire(m, TRUE); \ vm_page_unlock_queues(); \ } @@ -3427,15 +5399,17 @@ vm_fault_wire_fast( /* * If this entry is not directly to a vm_object, bail out. */ - if (entry->is_sub_map) + if (entry->is_sub_map) { + assert(physpage_p == NULL); return(KERN_FAILURE); + } /* * Find the backing store object and offset into it. */ - object = entry->object.vm_object; - offset = (va - entry->vme_start) + entry->offset; + object = VME_OBJECT(entry); + offset = (va - entry->vme_start) + VME_OFFSET(entry); prot = entry->protection; /* @@ -3444,9 +5418,7 @@ vm_fault_wire_fast( */ vm_object_lock(object); - assert(object->ref_count > 0); - object->ref_count++; - vm_object_res_reference(object); + vm_object_reference_locked(object); vm_object_paging_begin(object); /* @@ -3471,20 +5443,29 @@ vm_fault_wire_fast( */ m = vm_page_lookup(object, offset); if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) || - (m->unusual && ( m->error || m->restart || m->absent || - prot & m->page_lock))) { + (m->unusual && ( m->error || m->restart || m->absent))) { GIVE_UP; } ASSERT_PAGE_DECRYPTED(m); + if (m->fictitious && + VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { + /* + * Guard pages are fictitious pages and are never + * entered into a pmap, so let's say it's been wired... + */ + kr = KERN_SUCCESS; + goto done; + } + /* * Wire the page down now. All bail outs beyond this * point must unwire the page. */ - vm_page_lock_queues(); - vm_page_wire(m); + vm_page_lockspin_queues(); + vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE); vm_page_unlock_queues(); /* @@ -3504,27 +5485,52 @@ vm_fault_wire_fast( /* * Put this page into the physical map. - * We have to unlock the object because pmap_enter - * may cause other faults. */ - if (m->no_isync == TRUE) { - pmap_sync_page_data_phys(m->phys_page); - - m->no_isync = FALSE; + type_of_fault = DBG_CACHE_HIT_FAULT; + kr = vm_fault_enter(m, + pmap, + pmap_addr, + prot, + prot, + TRUE, + FALSE, + FALSE, + FALSE, + VME_ALIAS(entry), + ((entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap)) + ? PMAP_OPTIONS_ALT_ACCT + : 0), + NULL, + &type_of_fault); + if (kr != KERN_SUCCESS) { + RELEASE_PAGE(m); + GIVE_UP; } - cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK; - - PMAP_ENTER(pmap, pmap_addr, m, prot, cache_attr, TRUE); - +done: /* * Unlock everything, and return */ + if (physpage_p) { + /* for vm_map_wire_and_extract() */ + if (kr == KERN_SUCCESS) { + assert(object == VM_PAGE_OBJECT(m)); + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); + if (prot & VM_PROT_WRITE) { + vm_object_lock_assert_exclusive(object); + m->dirty = TRUE; + } + } else { + *physpage_p = 0; + } + } + PAGE_WAKEUP_DONE(m); UNLOCK_AND_DEALLOCATE; - return(KERN_SUCCESS); + return kr; } @@ -3534,33 +5540,36 @@ vm_fault_wire_fast( * Release a page used by vm_fault_copy. */ -void +static void vm_fault_copy_cleanup( vm_page_t page, vm_page_t top_page) { - vm_object_t object = page->object; + vm_object_t object = VM_PAGE_OBJECT(page); vm_object_lock(object); PAGE_WAKEUP_DONE(page); - vm_page_lock_queues(); - if (!page->active && !page->inactive) - vm_page_activate(page); - vm_page_unlock_queues(); + if ( !VM_PAGE_PAGEABLE(page)) { + vm_page_lockspin_queues(); + if ( !VM_PAGE_PAGEABLE(page)) { + vm_page_activate(page); + } + vm_page_unlock_queues(); + } vm_fault_cleanup(object, top_page); } -void +static void vm_fault_copy_dst_cleanup( vm_page_t page) { vm_object_t object; if (page != VM_PAGE_NULL) { - object = page->object; + object = VM_PAGE_OBJECT(page); vm_object_lock(object); - vm_page_lock_queues(); - vm_page_unwire(page); + vm_page_lockspin_queues(); + vm_page_unwire(page, TRUE); vm_page_unlock_queues(); vm_object_paging_end(object); vm_object_unlock(object); @@ -3617,18 +5626,18 @@ vm_fault_copy( vm_map_size_t amount_left; vm_object_t old_copy_object; + vm_object_t result_page_object = NULL; kern_return_t error = 0; + vm_fault_return_t result; vm_map_size_t part_size; + struct vm_object_fault_info fault_info_src; + struct vm_object_fault_info fault_info_dst; /* * In order not to confuse the clustered pageins, align * the different offsets on a page boundary. */ - vm_object_offset_t src_lo_offset = vm_object_trunc_page(src_offset); - vm_object_offset_t dst_lo_offset = vm_object_trunc_page(dst_offset); - vm_object_offset_t src_hi_offset = vm_object_round_page(src_offset + *copy_size); - vm_object_offset_t dst_hi_offset = vm_object_round_page(dst_offset + *copy_size); #define RETURN(x) \ MACRO_BEGIN \ @@ -3637,6 +5646,33 @@ vm_fault_copy( MACRO_END amount_left = *copy_size; + + fault_info_src.interruptible = interruptible; + fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL; + fault_info_src.user_tag = 0; + fault_info_src.pmap_options = 0; + fault_info_src.lo_offset = vm_object_trunc_page(src_offset); + fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left; + fault_info_src.no_cache = FALSE; + fault_info_src.stealth = TRUE; + fault_info_src.io_sync = FALSE; + fault_info_src.cs_bypass = FALSE; + fault_info_src.mark_zf_absent = FALSE; + fault_info_src.batch_pmap_op = FALSE; + + fault_info_dst.interruptible = interruptible; + fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL; + fault_info_dst.user_tag = 0; + fault_info_dst.pmap_options = 0; + fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset); + fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left; + fault_info_dst.no_cache = FALSE; + fault_info_dst.stealth = TRUE; + fault_info_dst.io_sync = FALSE; + fault_info_dst.cs_bypass = FALSE; + fault_info_dst.mark_zf_absent = FALSE; + fault_info_dst.batch_pmap_op = FALSE; + do { /* while (amount_left > 0) */ /* * There may be a deadlock if both source and destination @@ -3652,22 +5688,27 @@ vm_fault_copy( vm_object_lock(dst_object); vm_object_paging_begin(dst_object); + if (amount_left > (vm_size_t) -1) { + /* 32-bit overflow */ + fault_info_dst.cluster_size = (vm_size_t) (0 - PAGE_SIZE); + } else { + fault_info_dst.cluster_size = (vm_size_t) amount_left; + assert(fault_info_dst.cluster_size == amount_left); + } + XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0); - switch (vm_fault_page(dst_object, - vm_object_trunc_page(dst_offset), - VM_PROT_WRITE|VM_PROT_READ, - FALSE, - interruptible, - dst_lo_offset, - dst_hi_offset, - VM_BEHAVIOR_SEQUENTIAL, - &dst_prot, - &dst_page, - &dst_top_page, - (int *)0, - &error, - dst_map->no_zero_fill, - FALSE, NULL, 0)) { + dst_page = VM_PAGE_NULL; + result = vm_fault_page(dst_object, + vm_object_trunc_page(dst_offset), + VM_PROT_WRITE|VM_PROT_READ, + FALSE, + FALSE, /* page not looked up */ + &dst_prot, &dst_page, &dst_top_page, + (int *)0, + &error, + dst_map->no_zero_fill, + FALSE, &fault_info_dst); + switch (result) { case VM_FAULT_SUCCESS: break; case VM_FAULT_RETRY: @@ -3678,18 +5719,24 @@ vm_fault_copy( /* fall thru */ case VM_FAULT_INTERRUPTED: RETURN(MACH_SEND_INTERRUPTED); - case VM_FAULT_FICTITIOUS_SHORTAGE: - vm_page_more_fictitious(); - goto RetryDestinationFault; + case VM_FAULT_SUCCESS_NO_VM_PAGE: + /* success but no VM page: fail the copy */ + vm_object_paging_end(dst_object); + vm_object_unlock(dst_object); + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: if (error) return (error); else return(KERN_MEMORY_ERROR); + default: + panic("vm_fault_copy: unexpected error 0x%x from " + "vm_fault_page()\n", result); } assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); - old_copy_object = dst_page->object->copy; + assert(dst_object == VM_PAGE_OBJECT(dst_page)); + old_copy_object = dst_object->copy; /* * There exists the possiblity that the source and @@ -3703,11 +5750,11 @@ vm_fault_copy( * holding the dest page so it doesn't go away. */ - vm_page_lock_queues(); - vm_page_wire(dst_page); + vm_page_lockspin_queues(); + vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE); vm_page_unlock_queues(); PAGE_WAKEUP_DONE(dst_page); - vm_object_unlock(dst_page->object); + vm_object_unlock(dst_object); if (dst_top_page != VM_PAGE_NULL) { vm_object_lock(dst_object); @@ -3736,25 +5783,29 @@ vm_fault_copy( src_prot = VM_PROT_READ; vm_object_paging_begin(src_object); + if (amount_left > (vm_size_t) -1) { + /* 32-bit overflow */ + fault_info_src.cluster_size = (vm_size_t) (0 - PAGE_SIZE); + } else { + fault_info_src.cluster_size = (vm_size_t) amount_left; + assert(fault_info_src.cluster_size == amount_left); + } + XPR(XPR_VM_FAULT, "vm_fault_copy(2) -> vm_fault_page\n", 0,0,0,0,0); - switch (vm_fault_page(src_object, - vm_object_trunc_page(src_offset), - VM_PROT_READ, - FALSE, - interruptible, - src_lo_offset, - src_hi_offset, - VM_BEHAVIOR_SEQUENTIAL, - &src_prot, - &result_page, - &src_top_page, - (int *)0, - &error, - FALSE, - FALSE, NULL, 0)) { - + result_page = VM_PAGE_NULL; + result = vm_fault_page( + src_object, + vm_object_trunc_page(src_offset), + VM_PROT_READ, FALSE, + FALSE, /* page not looked up */ + &src_prot, + &result_page, &src_top_page, + (int *)0, &error, FALSE, + FALSE, &fault_info_src); + + switch (result) { case VM_FAULT_SUCCESS: break; case VM_FAULT_RETRY: @@ -3766,23 +5817,29 @@ vm_fault_copy( case VM_FAULT_INTERRUPTED: vm_fault_copy_dst_cleanup(dst_page); RETURN(MACH_SEND_INTERRUPTED); - case VM_FAULT_FICTITIOUS_SHORTAGE: - vm_page_more_fictitious(); - goto RetrySourceFault; + case VM_FAULT_SUCCESS_NO_VM_PAGE: + /* success but no VM page: fail */ + vm_object_paging_end(src_object); + vm_object_unlock(src_object); + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: vm_fault_copy_dst_cleanup(dst_page); if (error) return (error); else return(KERN_MEMORY_ERROR); + default: + panic("vm_fault_copy(2): unexpected " + "error 0x%x from " + "vm_fault_page()\n", result); } - + result_page_object = VM_PAGE_OBJECT(result_page); assert((src_top_page == VM_PAGE_NULL) == - (result_page->object == src_object)); + (result_page_object == src_object)); } assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE); - vm_object_unlock(result_page->object); + vm_object_unlock(result_page_object); } if (!vm_map_verify(dst_map, dst_version)) { @@ -3791,18 +5848,19 @@ vm_fault_copy( vm_fault_copy_dst_cleanup(dst_page); break; } + assert(dst_object == VM_PAGE_OBJECT(dst_page)); - vm_object_lock(dst_page->object); + vm_object_lock(dst_object); - if (dst_page->object->copy != old_copy_object) { - vm_object_unlock(dst_page->object); + if (dst_object->copy != old_copy_object) { + vm_object_unlock(dst_object); vm_map_verify_done(dst_map, dst_version); if (result_page != VM_PAGE_NULL && src_page != dst_page) vm_fault_copy_cleanup(result_page, src_top_page); vm_fault_copy_dst_cleanup(dst_page); break; } - vm_object_unlock(dst_page->object); + vm_object_unlock(dst_object); /* * Copy the page, and note that it is dirty @@ -3829,15 +5887,24 @@ vm_fault_copy( } if (result_page == VM_PAGE_NULL) { + assert((vm_offset_t) dst_po == dst_po); + assert((vm_size_t) part_size == part_size); vm_page_part_zero_fill(dst_page, - dst_po, part_size); + (vm_offset_t) dst_po, + (vm_size_t) part_size); } else { - vm_page_part_copy(result_page, src_po, - dst_page, dst_po, part_size); + assert((vm_offset_t) src_po == src_po); + assert((vm_offset_t) dst_po == dst_po); + assert((vm_size_t) part_size == part_size); + vm_page_part_copy(result_page, + (vm_offset_t) src_po, + dst_page, + (vm_offset_t) dst_po, + (vm_size_t)part_size); if(!dst_page->dirty){ vm_object_lock(dst_object); - dst_page->dirty = TRUE; - vm_object_unlock(dst_page->object); + SET_PAGE_DIRTY(dst_page, TRUE); + vm_object_unlock(dst_object); } } @@ -3847,11 +5914,14 @@ vm_fault_copy( if (result_page == VM_PAGE_NULL) vm_page_zero_fill(dst_page); else{ + vm_object_lock(result_page_object); vm_page_copy(result_page, dst_page); + vm_object_unlock(result_page_object); + if(!dst_page->dirty){ vm_object_lock(dst_object); - dst_page->dirty = TRUE; - vm_object_unlock(dst_page->object); + SET_PAGE_DIRTY(dst_page, TRUE); + vm_object_unlock(dst_object); } } @@ -3878,165 +5948,6 @@ vm_fault_copy( /*NOTREACHED*/ } -#ifdef notdef - -/* - * Routine: vm_fault_page_overwrite - * - * Description: - * A form of vm_fault_page that assumes that the - * resulting page will be overwritten in its entirety, - * making it unnecessary to obtain the correct *contents* - * of the page. - * - * Implementation: - * XXX Untested. Also unused. Eventually, this technology - * could be used in vm_fault_copy() to advantage. - */ -vm_fault_return_t -vm_fault_page_overwrite( - register - vm_object_t dst_object, - vm_object_offset_t dst_offset, - vm_page_t *result_page) /* OUT */ -{ - register - vm_page_t dst_page; - kern_return_t wait_result; - -#define interruptible THREAD_UNINT /* XXX */ - - while (TRUE) { - /* - * Look for a page at this offset - */ - - while ((dst_page = vm_page_lookup(dst_object, dst_offset)) - == VM_PAGE_NULL) { - /* - * No page, no problem... just allocate one. - */ - - dst_page = vm_page_alloc(dst_object, dst_offset); - if (dst_page == VM_PAGE_NULL) { - vm_object_unlock(dst_object); - VM_PAGE_WAIT(); - vm_object_lock(dst_object); - continue; - } - - /* - * Pretend that the memory manager - * write-protected the page. - * - * Note that we will be asking for write - * permission without asking for the data - * first. - */ - - dst_page->overwriting = TRUE; - dst_page->page_lock = VM_PROT_WRITE; - dst_page->absent = TRUE; - dst_page->unusual = TRUE; - dst_object->absent_count++; - - break; - - /* - * When we bail out, we might have to throw - * away the page created here. - */ - -#define DISCARD_PAGE \ - MACRO_BEGIN \ - vm_object_lock(dst_object); \ - dst_page = vm_page_lookup(dst_object, dst_offset); \ - if ((dst_page != VM_PAGE_NULL) && dst_page->overwriting) \ - VM_PAGE_FREE(dst_page); \ - vm_object_unlock(dst_object); \ - MACRO_END - } - - /* - * If the page is write-protected... - */ - - if (dst_page->page_lock & VM_PROT_WRITE) { - /* - * ... and an unlock request hasn't been sent - */ - - if ( ! (dst_page->unlock_request & VM_PROT_WRITE)) { - vm_prot_t u; - kern_return_t rc; - - /* - * ... then send one now. - */ - - if (!dst_object->pager_ready) { - wait_result = vm_object_assert_wait(dst_object, - VM_OBJECT_EVENT_PAGER_READY, - interruptible); - vm_object_unlock(dst_object); - if (wait_result == THREAD_WAITING) - wait_result = thread_block(THREAD_CONTINUE_NULL); - if (wait_result != THREAD_AWAKENED) { - DISCARD_PAGE; - return(VM_FAULT_INTERRUPTED); - } - continue; - } - - u = dst_page->unlock_request |= VM_PROT_WRITE; - vm_object_unlock(dst_object); - - if ((rc = memory_object_data_unlock( - dst_object->pager, - dst_offset + dst_object->paging_offset, - PAGE_SIZE, - u)) != KERN_SUCCESS) { - if (vm_fault_debug) - printf("vm_object_overwrite: memory_object_data_unlock failed\n"); - DISCARD_PAGE; - return((rc == MACH_SEND_INTERRUPTED) ? - VM_FAULT_INTERRUPTED : - VM_FAULT_MEMORY_ERROR); - } - vm_object_lock(dst_object); - continue; - } - - /* ... fall through to wait below */ - } else { - /* - * If the page isn't being used for other - * purposes, then we're done. - */ - if ( ! (dst_page->busy || dst_page->absent || - dst_page->error || dst_page->restart) ) - break; - } - - wait_result = PAGE_ASSERT_WAIT(dst_page, interruptible); - vm_object_unlock(dst_object); - if (wait_result == THREAD_WAITING) - wait_result = thread_block(THREAD_CONTINUE_NULL); - if (wait_result != THREAD_AWAKENED) { - DISCARD_PAGE; - return(VM_FAULT_INTERRUPTED); - } - } - - *result_page = dst_page; - return(VM_FAULT_SUCCESS); - -#undef interruptible -#undef DISCARD_PAGE -} - -#endif /* notdef */ - #if VM_FAULT_CLASSIFY /* * Temporary statistics gathering support. @@ -4068,8 +5979,7 @@ vm_fault_classify(vm_object_t object, while (TRUE) { m = vm_page_lookup(object, offset); if (m != VM_PAGE_NULL) { - if (m->busy || m->error || m->restart || m->absent || - fault_type & m->page_lock) { + if (m->busy || m->error || m->restart || m->absent) { type = VM_FAULT_TYPE_OTHER; break; } @@ -4091,7 +6001,7 @@ vm_fault_classify(vm_object_t object, break; } - offset += object->shadow_offset; + offset += object->vo_shadow_offset; object = object->shadow; level++; continue; @@ -4122,3 +6032,389 @@ vm_fault_classify_init(void) return; } #endif /* VM_FAULT_CLASSIFY */ + +vm_offset_t +kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) +{ + vm_map_entry_t entry; + vm_object_t object; + vm_offset_t object_offset; + vm_page_t m; + int compressor_external_state, compressed_count_delta; + int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); + int my_fault_type = VM_PROT_READ; + kern_return_t kr; + + if (not_in_kdp) { + panic("kdp_lightweight_fault called from outside of debugger context"); + } + + assert(map != VM_MAP_NULL); + + assert((cur_target_addr & PAGE_MASK) == 0); + if ((cur_target_addr & PAGE_MASK) != 0) { + return 0; + } + + if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) { + return 0; + } + + if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) { + return 0; + } + + if (entry->is_sub_map) { + return 0; + } + + object = VME_OBJECT(entry); + if (object == VM_OBJECT_NULL) { + return 0; + } + + object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry); + + while (TRUE) { + if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) { + return 0; + } + + if (object->pager_created && (object->paging_in_progress || + object->activity_in_progress)) { + return 0; + } + + m = kdp_vm_page_lookup(object, object_offset); + + if (m != VM_PAGE_NULL) { + + if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { + return 0; + } + + if (m->laundry || m->busy || m->free_when_done || m->absent || m->error || m->cleaning || + m->overwriting || m->restart || m->unusual) { + return 0; + } + + assert(!m->private); + if (m->private) { + return 0; + } + + assert(!m->fictitious); + if (m->fictitious) { + return 0; + } + + assert(!m->encrypted); + if (m->encrypted) { + return 0; + } + + assert(!m->encrypted_cleaning); + if (m->encrypted_cleaning) { + return 0; + } + + assert(m->vm_page_q_state != VM_PAGE_USED_BY_COMPRESSOR); + if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) { + return 0; + } + + return ptoa(VM_PAGE_GET_PHYS_PAGE(m)); + } + + compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; + + if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) { + if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) { + kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset), + kdp_compressor_decompressed_page_ppnum, &my_fault_type, + compressor_flags, &compressed_count_delta); + if (kr == KERN_SUCCESS) { + return kdp_compressor_decompressed_page_paddr; + } else { + return 0; + } + } + } + + if (object->shadow == VM_OBJECT_NULL) { + return 0; + } + + object_offset += object->vo_shadow_offset; + object = object->shadow; + } + +} + +void +vm_page_validate_cs_mapped( + vm_page_t page, + const void *kaddr) +{ + vm_object_t object; + vm_object_offset_t offset; + memory_object_t pager; + struct vnode *vnode; + boolean_t validated; + unsigned tainted; + + assert(page->busy); + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_exclusive(object); + + if (page->wpmapped && !page->cs_tainted) { + /* + * This page was mapped for "write" access sometime in the + * past and could still be modifiable in the future. + * Consider it tainted. + * [ If the page was already found to be "tainted", no + * need to re-validate. ] + */ + page->cs_validated = TRUE; + page->cs_tainted = TRUE; + if (cs_debug) { + printf("CODESIGNING: vm_page_validate_cs: " + "page %p obj %p off 0x%llx " + "was modified\n", + page, object, page->offset); + } + vm_cs_validated_dirtied++; + } + + if (page->cs_validated || page->cs_tainted) { + return; + } + + vm_cs_validates++; + + assert(object->code_signed); + offset = page->offset; + + if (!object->alive || object->terminating || object->pager == NULL) { + /* + * The object is terminating and we don't have its pager + * so we can't validate the data... + */ + return; + } + /* + * Since we get here to validate a page that was brought in by + * the pager, we know that this pager is all setup and ready + * by now. + */ + assert(!object->internal); + assert(object->pager != NULL); + assert(object->pager_ready); + + pager = object->pager; + assert(object->paging_in_progress); + vnode = vnode_pager_lookup_vnode(pager); + + /* verify the SHA1 hash for this page */ + tainted = 0; + validated = cs_validate_range(vnode, + pager, + (object->paging_offset + + offset), + (const void *)((const char *)kaddr), + PAGE_SIZE_64, + &tainted); + + if (tainted & CS_VALIDATE_TAINTED) { + page->cs_tainted = TRUE; + } + if (tainted & CS_VALIDATE_NX) { + page->cs_nx = TRUE; + } + + if (validated) { + page->cs_validated = TRUE; + } +} + +void +vm_page_validate_cs( + vm_page_t page) +{ + vm_object_t object; + vm_object_offset_t offset; + vm_map_offset_t koffset; + vm_map_size_t ksize; + vm_offset_t kaddr; + kern_return_t kr; + boolean_t busy_page; + boolean_t need_unmap; + + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_held(object); + + if (page->wpmapped && !page->cs_tainted) { + vm_object_lock_assert_exclusive(object); + + /* + * This page was mapped for "write" access sometime in the + * past and could still be modifiable in the future. + * Consider it tainted. + * [ If the page was already found to be "tainted", no + * need to re-validate. ] + */ + page->cs_validated = TRUE; + page->cs_tainted = TRUE; + if (cs_debug) { + printf("CODESIGNING: vm_page_validate_cs: " + "page %p obj %p off 0x%llx " + "was modified\n", + page, object, page->offset); + } + vm_cs_validated_dirtied++; + } + + if (page->cs_validated || page->cs_tainted) { + return; + } + + if (page->slid) { + panic("vm_page_validate_cs(%p): page is slid\n", page); + } + assert(!page->slid); + +#if CHECK_CS_VALIDATION_BITMAP + if ( vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page(page->offset + object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) { + page->cs_validated = TRUE; + page->cs_tainted = FALSE; + vm_cs_bitmap_validated++; + return; + } +#endif + vm_object_lock_assert_exclusive(object); + + assert(object->code_signed); + offset = page->offset; + + busy_page = page->busy; + if (!busy_page) { + /* keep page busy while we map (and unlock) the VM object */ + page->busy = TRUE; + } + + /* + * Take a paging reference on the VM object + * to protect it from collapse or bypass, + * and keep it from disappearing too. + */ + vm_object_paging_begin(object); + + /* map the page in the kernel address space */ + ksize = PAGE_SIZE_64; + koffset = 0; + need_unmap = FALSE; + kr = vm_paging_map_object(page, + object, + offset, + VM_PROT_READ, + FALSE, /* can't unlock object ! */ + &ksize, + &koffset, + &need_unmap); + if (kr != KERN_SUCCESS) { + panic("vm_page_validate_cs: could not map page: 0x%x\n", kr); + } + kaddr = CAST_DOWN(vm_offset_t, koffset); + + /* validate the mapped page */ + vm_page_validate_cs_mapped(page, (const void *) kaddr); + +#if CHECK_CS_VALIDATION_BITMAP + if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) { + vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET ); + } +#endif + assert(page->busy); + assert(object == VM_PAGE_OBJECT(page)); + vm_object_lock_assert_exclusive(object); + + if (!busy_page) { + PAGE_WAKEUP_DONE(page); + } + if (need_unmap) { + /* unmap the map from the kernel address space */ + vm_paging_unmap_object(object, koffset, koffset + ksize); + koffset = 0; + ksize = 0; + kaddr = 0; + } + vm_object_paging_end(object); +} + +void +vm_page_validate_cs_mapped_chunk( + vm_page_t page, + const void *kaddr, + vm_offset_t chunk_offset, + vm_size_t chunk_size, + boolean_t *validated_p, + unsigned *tainted_p) +{ + vm_object_t object; + vm_object_offset_t offset, offset_in_page; + memory_object_t pager; + struct vnode *vnode; + boolean_t validated; + unsigned tainted; + + *validated_p = FALSE; + *tainted_p = 0; + + assert(page->busy); + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_exclusive(object); + + assert(object->code_signed); + offset = page->offset; + + if (!object->alive || object->terminating || object->pager == NULL) { + /* + * The object is terminating and we don't have its pager + * so we can't validate the data... + */ + return; + } + /* + * Since we get here to validate a page that was brought in by + * the pager, we know that this pager is all setup and ready + * by now. + */ + assert(!object->internal); + assert(object->pager != NULL); + assert(object->pager_ready); + + pager = object->pager; + assert(object->paging_in_progress); + vnode = vnode_pager_lookup_vnode(pager); + + /* verify the signature for this chunk */ + offset_in_page = chunk_offset; + assert(offset_in_page < PAGE_SIZE); + + tainted = 0; + validated = cs_validate_range(vnode, + pager, + (object->paging_offset + + offset + + offset_in_page), + (const void *)((const char *)kaddr + + offset_in_page), + chunk_size, + &tainted); + if (validated) { + *validated_p = TRUE; + } + if (tainted) { + *tainted_p = tainted; + } +}