X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..2a1bd2d3eef5c7a7bb14f4bb9fdbca9a96ee4752:/osfmk/vm/vm_fault.c diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index d20d7a365..43814d16e 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -68,11 +68,11 @@ #include #include -#include /* for error codes */ +#include /* for error codes */ #include #include #include - /* For memory_object_data_{request,unlock} */ +/* For memory_object_data_{request,unlock} */ #include #include @@ -82,11 +82,11 @@ #include #include #include -#include #include #include #include #include +#include #include #include @@ -100,25 +100,29 @@ #include #include #include -#include /* Needed by some vm_page.h macros */ +#include /* Needed by some vm_page.h macros */ #include #include +#include +#include -#include /* for struct timespec */ +#include -#define VM_FAULT_CLASSIFY 0 +#define VM_FAULT_CLASSIFY 0 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */ -unsigned int vm_object_pagein_throttle = 16; +int vm_protect_privileged_from_untrusted = 1; + +unsigned int vm_object_pagein_throttle = 16; /* - * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which + * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts * of memory if they're buggy and can run the system completely out of swap space. If this happens, we * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps - * keep the UI active so that the user has a chance to kill the offending task before the system + * keep the UI active so that the user has a chance to kill the offending task before the system * completely hangs. * * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied @@ -129,55 +133,69 @@ unsigned int vm_object_pagein_throttle = 16; extern void throttle_lowpri_io(int); -uint64_t vm_hard_throttle_threshold; +extern struct vnode *vnode_pager_lookup_vnode(memory_object_t); +uint64_t vm_hard_throttle_threshold; -#define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \ - (vm_page_free_count < vm_page_throttle_limit && \ - proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) > THROTTLE_LEVEL_THROTTLED)) +OS_ALWAYS_INLINE +boolean_t +NEED_TO_HARD_THROTTLE_THIS_TASK(void) +{ + return vm_wants_task_throttled(current_task()) || + ((vm_page_free_count < vm_page_throttle_limit || + HARD_THROTTLE_LIMIT_REACHED()) && + proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED); +} +#define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */ +#define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */ -#define HARD_THROTTLE_DELAY 5000 /* 5000 us == 5 ms */ -#define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */ +#define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6 +#define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000 -#define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6 -#define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000 +#define VM_STAT_DECOMPRESSIONS() \ +MACRO_BEGIN \ + VM_STAT_INCR(decompressions); \ + current_thread()->decompressions++; \ +MACRO_END boolean_t current_thread_aborted(void); /* Forward declarations of internal routines. */ static kern_return_t vm_fault_wire_fast( - vm_map_t map, - vm_map_offset_t va, - vm_prot_t prot, - vm_map_entry_t entry, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p); + vm_map_t map, + vm_map_offset_t va, + vm_prot_t prot, + vm_tag_t wire_tag, + vm_map_entry_t entry, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); static kern_return_t vm_fault_internal( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t caller_prot, - boolean_t change_wiring, - int interruptible, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p); + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t caller_prot, + boolean_t change_wiring, + vm_tag_t wire_tag, + int interruptible, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); static void vm_fault_copy_cleanup( - vm_page_t page, - vm_page_t top_page); + vm_page_t page, + vm_page_t top_page); static void vm_fault_copy_dst_cleanup( - vm_page_t page); + vm_page_t page); -#if VM_FAULT_CLASSIFY -extern void vm_fault_classify(vm_object_t object, - vm_object_offset_t offset, - vm_prot_t fault_type); +#if VM_FAULT_CLASSIFY +extern void vm_fault_classify(vm_object_t object, + vm_object_offset_t offset, + vm_prot_t fault_type); extern void vm_fault_classify_init(void); #endif @@ -191,18 +209,36 @@ unsigned long vm_cs_query_modified = 0; unsigned long vm_cs_validated_dirtied = 0; unsigned long vm_cs_bitmap_validated = 0; -void vm_pre_fault(vm_map_offset_t); +void vm_pre_fault(vm_map_offset_t, vm_prot_t); -extern int not_in_kdp; extern char *kdp_compressor_decompressed_page; -extern addr64_t kdp_compressor_decompressed_page_paddr; -extern ppnum_t kdp_compressor_decompressed_page_ppnum; +extern addr64_t kdp_compressor_decompressed_page_paddr; +extern ppnum_t kdp_compressor_decompressed_page_ppnum; + +struct vmrtfr { + int vmrtfr_maxi; + int vmrtfr_curi; + int64_t vmrtf_total; + vm_rtfault_record_t *vm_rtf_records; +} vmrtfrs; +#define VMRTF_DEFAULT_BUFSIZE (4096) +#define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t)) +TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT); + +static void vm_rtfrecord_lock(void); +static void vm_rtfrecord_unlock(void); +static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int); + +extern lck_grp_t vm_page_lck_grp_bucket; +extern lck_attr_t vm_page_lck_attr; +LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); /* * Routine: vm_fault_init * Purpose: * Initialize our private data structures. */ +__startup_func void vm_fault_init(void) { @@ -217,44 +253,48 @@ vm_fault_init(void) * The formula here simply uses the number of gigabytes of ram to adjust the percentage. */ - vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100; + vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100; /* * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry. */ - if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) { - for ( i = 0; i < VM_PAGER_MAX_MODES; i++) { - if (vm_compressor_temp > 0 && - ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) { + if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) { + for (i = 0; i < VM_PAGER_MAX_MODES; i++) { + if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) { need_default_val = FALSE; vm_compressor_mode = vm_compressor_temp; break; } } - if (need_default_val) + if (need_default_val) { printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp); - } + } + } if (need_default_val) { /* If no boot arg or incorrect boot arg, try device tree. */ PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode)); } - PE_parse_boot_argn("vm_compressor_threads", &vm_compressor_thread_count, sizeof (vm_compressor_thread_count)); - - if (PE_parse_boot_argn("vm_compressor_immediate", &vm_compressor_temp, sizeof (vm_compressor_temp))) - vm_compressor_immediate_preferred_override = TRUE; - else { - if (PE_get_default("kern.vm_compressor_immediate", &vm_compressor_temp, sizeof(vm_compressor_temp))) - vm_compressor_immediate_preferred_override = TRUE; - } - if (vm_compressor_immediate_preferred_override == TRUE) { - if (vm_compressor_temp) - vm_compressor_immediate_preferred = TRUE; - else - vm_compressor_immediate_preferred = FALSE; - } printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode); + + PE_parse_boot_argn("vm_protect_privileged_from_untrusted", + &vm_protect_privileged_from_untrusted, + sizeof(vm_protect_privileged_from_untrusted)); +} + +__startup_func +static void +vm_rtfault_record_init(void) +{ + size_t size; + + vmrtf_num_records = MAX(vmrtf_num_records, 1); + size = vmrtf_num_records * sizeof(vm_rtfault_record_t); + vmrtfrs.vm_rtf_records = zalloc_permanent(size, + ZALIGN(vm_rtfault_record_t)); + vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1; } +STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init); /* * Routine: vm_fault_cleanup @@ -272,14 +312,14 @@ vm_fault_init(void) */ void vm_fault_cleanup( - register vm_object_t object, - register vm_page_t top_page) + vm_object_t object, + vm_page_t top_page) { vm_object_paging_end(object); - vm_object_unlock(object); + vm_object_unlock(object); if (top_page != VM_PAGE_NULL) { - object = top_page->object; + object = VM_PAGE_OBJECT(top_page); vm_object_lock(object); VM_PAGE_FREE(top_page); @@ -288,38 +328,20 @@ vm_fault_cleanup( } } -#if MACH_CLUSTER_STATS -#define MAXCLUSTERPAGES 16 -struct { - unsigned long pages_in_cluster; - unsigned long pages_at_higher_offsets; - unsigned long pages_at_lower_offsets; -} cluster_stats_in[MAXCLUSTERPAGES]; -#define CLUSTER_STAT(clause) clause -#define CLUSTER_STAT_HIGHER(x) \ - ((cluster_stats_in[(x)].pages_at_higher_offsets)++) -#define CLUSTER_STAT_LOWER(x) \ - ((cluster_stats_in[(x)].pages_at_lower_offsets)++) -#define CLUSTER_STAT_CLUSTER(x) \ - ((cluster_stats_in[(x)].pages_in_cluster)++) -#else /* MACH_CLUSTER_STATS */ -#define CLUSTER_STAT(clause) -#endif /* MACH_CLUSTER_STATS */ - #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) -boolean_t vm_page_deactivate_behind = TRUE; -/* - * default sizes given VM_BEHAVIOR_DEFAULT reference behavior +boolean_t vm_page_deactivate_behind = TRUE; +/* + * default sizes given VM_BEHAVIOR_DEFAULT reference behavior */ -#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128 -#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */ +#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128 +#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */ /* we use it to size an array on the stack */ int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW; -#define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024) +#define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024) /* * vm_page_is_sequential @@ -333,90 +355,101 @@ int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW; static void vm_fault_is_sequential( - vm_object_t object, - vm_object_offset_t offset, - vm_behavior_t behavior) + vm_object_t object, + vm_object_offset_t offset, + vm_behavior_t behavior) { - vm_object_offset_t last_alloc; - int sequential; - int orig_sequential; + vm_object_offset_t last_alloc; + int sequential; + int orig_sequential; - last_alloc = object->last_alloc; + last_alloc = object->last_alloc; sequential = object->sequential; orig_sequential = sequential; + offset = vm_object_trunc_page(offset); + if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) { + /* re-faulting in the same page: no change in behavior */ + return; + } + switch (behavior) { case VM_BEHAVIOR_RANDOM: - /* + /* * reset indicator of sequential behavior */ - sequential = 0; - break; + sequential = 0; + break; case VM_BEHAVIOR_SEQUENTIAL: - if (offset && last_alloc == offset - PAGE_SIZE_64) { - /* + if (offset && last_alloc == offset - PAGE_SIZE_64) { + /* * advance indicator of sequential behavior */ - if (sequential < MAX_SEQUENTIAL_RUN) - sequential += PAGE_SIZE; + if (sequential < MAX_SEQUENTIAL_RUN) { + sequential += PAGE_SIZE; + } } else { - /* + /* * reset indicator of sequential behavior */ - sequential = 0; + sequential = 0; } - break; + break; case VM_BEHAVIOR_RSEQNTL: - if (last_alloc && last_alloc == offset + PAGE_SIZE_64) { - /* + if (last_alloc && last_alloc == offset + PAGE_SIZE_64) { + /* * advance indicator of sequential behavior */ - if (sequential > -MAX_SEQUENTIAL_RUN) - sequential -= PAGE_SIZE; + if (sequential > -MAX_SEQUENTIAL_RUN) { + sequential -= PAGE_SIZE; + } } else { - /* + /* * reset indicator of sequential behavior */ - sequential = 0; + sequential = 0; } - break; + break; case VM_BEHAVIOR_DEFAULT: default: - if (offset && last_alloc == (offset - PAGE_SIZE_64)) { - /* + if (offset && last_alloc == (offset - PAGE_SIZE_64)) { + /* * advance indicator of sequential behavior */ - if (sequential < 0) - sequential = 0; - if (sequential < MAX_SEQUENTIAL_RUN) - sequential += PAGE_SIZE; - + if (sequential < 0) { + sequential = 0; + } + if (sequential < MAX_SEQUENTIAL_RUN) { + sequential += PAGE_SIZE; + } } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) { - /* + /* * advance indicator of sequential behavior */ - if (sequential > 0) - sequential = 0; - if (sequential > -MAX_SEQUENTIAL_RUN) - sequential -= PAGE_SIZE; + if (sequential > 0) { + sequential = 0; + } + if (sequential > -MAX_SEQUENTIAL_RUN) { + sequential -= PAGE_SIZE; + } } else { - /* + /* * reset indicator of sequential behavior */ - sequential = 0; + sequential = 0; } - break; + break; } if (sequential != orig_sequential) { - if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) { - /* + if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) { + /* * if someone else has already updated object->sequential * don't bother trying to update it or object->last_alloc */ - return; + return; } } /* @@ -451,61 +484,65 @@ int vm_page_deactivate_behind_count = 0; static boolean_t vm_fault_deactivate_behind( - vm_object_t object, - vm_object_offset_t offset, - vm_behavior_t behavior) + vm_object_t object, + vm_object_offset_t offset, + vm_behavior_t behavior) { - int n; - int pages_in_run = 0; - int max_pages_in_run = 0; - int sequential_run; - int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - vm_object_offset_t run_offset = 0; - vm_object_offset_t pg_offset = 0; - vm_page_t m; - vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER]; + int n; + int pages_in_run = 0; + int max_pages_in_run = 0; + int sequential_run; + int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + vm_object_offset_t run_offset = 0; + vm_object_offset_t pg_offset = 0; + vm_page_t m; + vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER]; pages_in_run = 0; #if TRACEFAULTPAGE - dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ #endif - - if (object == kernel_object || vm_page_deactivate_behind == FALSE) { + if (object == kernel_object || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) { /* * Do not deactivate pages from the kernel object: they * are not intended to become pageable. * or we've disabled the deactivate behind mechanism + * or we are dealing with an offset that is not aligned to + * the system's PAGE_SIZE because in that case we will + * handle the deactivation on the aligned offset and, thus, + * the full PAGE_SIZE page once. This helps us avoid the redundant + * deactivates and the extra faults. */ return FALSE; } if ((sequential_run = object->sequential)) { - if (sequential_run < 0) { - sequential_behavior = VM_BEHAVIOR_RSEQNTL; - sequential_run = 0 - sequential_run; - } else { - sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - } + if (sequential_run < 0) { + sequential_behavior = VM_BEHAVIOR_RSEQNTL; + sequential_run = 0 - sequential_run; + } else { + sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + } } switch (behavior) { case VM_BEHAVIOR_RANDOM: break; case VM_BEHAVIOR_SEQUENTIAL: - if (sequential_run >= (int)PAGE_SIZE) { + if (sequential_run >= (int)PAGE_SIZE) { run_offset = 0 - PAGE_SIZE_64; max_pages_in_run = 1; } break; case VM_BEHAVIOR_RSEQNTL: - if (sequential_run >= (int)PAGE_SIZE) { + if (sequential_run >= (int)PAGE_SIZE) { run_offset = PAGE_SIZE_64; max_pages_in_run = 1; } break; case VM_BEHAVIOR_DEFAULT: default: - { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; + { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; - /* + /* * determine if the run of sequential accesss has been * long enough on an object with default access behavior * to consider it for deactivation @@ -516,27 +553,26 @@ vm_fault_deactivate_behind( * in this kind of odd fashion in order to prevent wrap around * at the end points */ - if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { - if (offset >= behind) { + if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { + if (offset >= behind) { run_offset = 0 - behind; pg_offset = PAGE_SIZE_64; max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; } } else { - if (offset < -behind) { + if (offset < -behind) { run_offset = behind; pg_offset = 0 - PAGE_SIZE_64; max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; } } } - break; + break;} } - } - for (n = 0; n < max_pages_in_run; n++) { + for (n = 0; n < max_pages_in_run; n++) { m = vm_page_lookup(object, offset + run_offset + (n * pg_offset)); - if (m && !m->laundry && !m->busy && !m->no_cache && !m->throttled && !m->fictitious && !m->absent) { + if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) { page_run[pages_in_run++] = m; /* @@ -550,21 +586,20 @@ vm_fault_deactivate_behind( * in the past (TLB caches don't hang around for very long), and of course could just as easily * have happened before we did the deactivate_behind. */ - pmap_clear_refmod_options(m->phys_page, VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); + pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); } } if (pages_in_run) { vm_page_lockspin_queues(); for (n = 0; n < pages_in_run; n++) { - m = page_run[n]; vm_page_deactivate_internal(m, FALSE); vm_page_deactivate_behind_count++; #if TRACEFAULTPAGE - dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif } vm_page_unlock_queues(); @@ -576,40 +611,41 @@ vm_fault_deactivate_behind( #if (DEVELOPMENT || DEBUG) -uint32_t vm_page_creation_throttled_hard = 0; -uint32_t vm_page_creation_throttled_soft = 0; -uint64_t vm_page_creation_throttle_avoided = 0; +uint32_t vm_page_creation_throttled_hard = 0; +uint32_t vm_page_creation_throttled_soft = 0; +uint64_t vm_page_creation_throttle_avoided = 0; #endif /* DEVELOPMENT || DEBUG */ static int vm_page_throttled(boolean_t page_kept) { - clock_sec_t elapsed_sec; - clock_sec_t tv_sec; - clock_usec_t tv_usec; - + clock_sec_t elapsed_sec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; + thread_t thread = current_thread(); - - if (thread->options & TH_OPT_VMPRIV) - return (0); + + if (thread->options & TH_OPT_VMPRIV) { + return 0; + } if (thread->t_page_creation_throttled) { thread->t_page_creation_throttled = 0; - - if (page_kept == FALSE) + + if (page_kept == FALSE) { goto no_throttle; + } } if (NEED_TO_HARD_THROTTLE_THIS_TASK()) { #if (DEVELOPMENT || DEBUG) thread->t_page_creation_throttled_hard++; OSAddAtomic(1, &vm_page_creation_throttled_hard); #endif /* DEVELOPMENT || DEBUG */ - return (HARD_THROTTLE_DELAY); + return HARD_THROTTLE_DELAY; } - if ((vm_page_free_count < vm_page_throttle_limit || ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && SWAPPER_NEEDS_TO_UNTHROTTLE())) && + if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) && thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) { - if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) { #if (DEVELOPMENT || DEBUG) OSAddAtomic64(1, &vm_page_creation_throttle_avoided); @@ -622,7 +658,6 @@ vm_page_throttled(boolean_t page_kept) if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS || (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) { - if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) { /* * we'll reset our stats to give a well behaved app @@ -630,28 +665,28 @@ vm_page_throttled(boolean_t page_kept) * over a long period of time a chance to get out of * the throttled state... we reset the counter and timestamp * so that if it stays under the rate limit for the next second - * it will be back in our good graces... if it exceeds it, it + * it will be back in our good graces... if it exceeds it, it * will remain in the throttled state */ thread->t_page_creation_time = tv_sec; thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1); } - ++vm_page_throttle_count; + VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1); thread->t_page_creation_throttled = 1; - if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) && HARD_THROTTLE_LIMIT_REACHED()) { + if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) { #if (DEVELOPMENT || DEBUG) thread->t_page_creation_throttled_hard++; OSAddAtomic(1, &vm_page_creation_throttled_hard); #endif /* DEVELOPMENT || DEBUG */ - return (HARD_THROTTLE_DELAY); + return HARD_THROTTLE_DELAY; } else { #if (DEVELOPMENT || DEBUG) thread->t_page_creation_throttled_soft++; OSAddAtomic(1, &vm_page_creation_throttled_soft); #endif /* DEVELOPMENT || DEBUG */ - return (SOFT_THROTTLE_DELAY); + return SOFT_THROTTLE_DELAY; } } thread->t_page_creation_time = tv_sec; @@ -660,7 +695,7 @@ vm_page_throttled(boolean_t page_kept) no_throttle: thread->t_page_creation_count++; - return (0); + return 0; } @@ -670,49 +705,30 @@ no_throttle: * cleanup is based on being called from vm_fault_page * * object must be locked - * object == m->object + * object == m->vmp_object */ static vm_fault_return_t -vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t interruptible_state, boolean_t page_throttle) +vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle) { int throttle_delay; - if (object->shadow_severed || + if (object->shadow_severed || VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) { - /* + /* * Either: * 1. the shadow chain was severed, * 2. the purgeable object is volatile or empty and is marked * to fault on access while volatile. * Just have to return an error at this point */ - if (m != VM_PAGE_NULL) - VM_PAGE_FREE(m); + if (m != VM_PAGE_NULL) { + VM_PAGE_FREE(m); + } vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_ERROR); - } - if (vm_backing_store_low) { - /* - * are we protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. - */ - if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { - - if (m != VM_PAGE_NULL) - VM_PAGE_FREE(m); - vm_fault_cleanup(object, first_m); - - assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); - - thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level(interruptible_state); - - return (VM_FAULT_RETRY); - } + return VM_FAULT_MEMORY_ERROR; } if (page_throttle == TRUE) { if ((throttle_delay = vm_page_throttled(FALSE))) { @@ -720,8 +736,9 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int * we're throttling zero-fills... * treat this as if we couldn't grab a page */ - if (m != VM_PAGE_NULL) + if (m != VM_PAGE_NULL) { VM_PAGE_FREE(m); + } vm_fault_cleanup(object, first_m); VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); @@ -734,24 +751,58 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, boolean_t int } thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } } - return (VM_FAULT_SUCCESS); + return VM_FAULT_SUCCESS; +} + +/* + * Clear the code signing bits on the given page_t + */ +static void +vm_fault_cs_clear(vm_page_t m) +{ + m->vmp_cs_validated = VMP_CS_ALL_FALSE; + m->vmp_cs_tainted = VMP_CS_ALL_FALSE; + m->vmp_cs_nx = VMP_CS_ALL_FALSE; } +/* + * Enqueues the given page on the throttled queue. + * The caller must hold the vm_page_queue_lock and it will be held on return. + */ +static void +vm_fault_enqueue_throttled_locked(vm_page_t m) +{ + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + assert(!VM_PAGE_WIRED(m)); + + /* + * can't be on the pageout queue since we don't + * have a pager to try and clean to + */ + vm_page_queues_remove(m, TRUE); + vm_page_check_pageable_safe(m); + vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); + m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; + vm_page_throttled_count++; +} /* * do the work to zero fill a page and * inject it into the correct paging queue * - * m->object must be locked + * m->vmp_object must be locked * page queue lock must NOT be held */ static int vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) { - int my_fault = DBG_ZERO_FILL_FAULT; + int my_fault = DBG_ZERO_FILL_FAULT; + vm_object_t object; + + object = VM_PAGE_OBJECT(m); /* * This is is a zero-fill page fault... @@ -765,55 +816,38 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) * execution. i.e. it is the responsibility * of higher layers to call for an instruction * sync after changing the contents and before - * sending a program into this area. We + * sending a program into this area. We * choose this approach for performance */ - m->pmapped = TRUE; - - m->cs_validated = FALSE; - m->cs_tainted = FALSE; - m->cs_nx = FALSE; + vm_fault_cs_clear(m); + m->vmp_pmapped = TRUE; if (no_zero_fill == TRUE) { my_fault = DBG_NZF_PAGE_FAULT; - if (m->absent && m->busy) - return (my_fault); + if (m->vmp_absent && m->vmp_busy) { + return my_fault; + } } else { vm_page_zero_fill(m); VM_STAT_INCR(zero_fill_count); DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL); } - assert(!m->laundry); - assert(m->object != kernel_object); - //assert(m->pageq.next == NULL && m->pageq.prev == NULL); - - if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default) && - (m->object->purgable == VM_PURGABLE_DENY || - m->object->purgable == VM_PURGABLE_NONVOLATILE || - m->object->purgable == VM_PURGABLE_VOLATILE )) { - + assert(!m->vmp_laundry); + assert(object != kernel_object); + //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0); + if (!VM_DYNAMIC_PAGING_ENABLED() && + (object->purgable == VM_PURGABLE_DENY || + object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_VOLATILE)) { vm_page_lockspin_queues(); - - if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default)) { - assert(!VM_PAGE_WIRED(m)); - - /* - * can't be on the pageout queue since we don't - * have a pager to try and clean to - */ - assert(!m->pageout_queue); - - vm_page_queues_remove(m); - vm_page_check_pageable_safe(m); - queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq); - m->throttled = TRUE; - vm_page_throttled_count++; + if (!VM_DYNAMIC_PAGING_ENABLED()) { + vm_fault_enqueue_throttled_locked(m); } vm_page_unlock_queues(); } - return (my_fault); + return my_fault; } @@ -827,7 +861,7 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) * The required permissions for the page is given * in "fault_type". Desired permissions are included * in "protection". - * fault_info is passed along to determine pagein cluster + * fault_info is passed along to determine pagein cluster * limits... it contains the expected reference pattern, * cluster size if available, etc... * @@ -859,9 +893,9 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) * The "result_page" is also left busy. It is not removed * from the pageout queues. * Special Case: - * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the + * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the * fault succeeded but there's no VM page (i.e. the VM object - * does not actually hold VM pages, but device memory or + * does not actually hold VM pages, but device memory or * large pages). The object is still locked and we still hold a * paging_in_progress reference. */ @@ -871,121 +905,94 @@ unsigned int vm_fault_page_forced_retry = 0; vm_fault_return_t vm_fault_page( /* Arguments: */ - vm_object_t first_object, /* Object to begin search */ - vm_object_offset_t first_offset, /* Offset into object */ - vm_prot_t fault_type, /* What access is requested */ - boolean_t must_be_resident,/* Must page be resident? */ - boolean_t caller_lookup, /* caller looked up page */ + vm_object_t first_object, /* Object to begin search */ + vm_object_offset_t first_offset, /* Offset into object */ + vm_prot_t fault_type, /* What access is requested */ + boolean_t must_be_resident,/* Must page be resident? */ + boolean_t caller_lookup, /* caller looked up page */ /* Modifies in place: */ - vm_prot_t *protection, /* Protection for mapping */ - vm_page_t *result_page, /* Page found, if successful */ + vm_prot_t *protection, /* Protection for mapping */ + vm_page_t *result_page, /* Page found, if successful */ /* Returns: */ - vm_page_t *top_page, /* Page in top object, if - * not result_page. */ + vm_page_t *top_page, /* Page in top object, if + * not result_page. */ int *type_of_fault, /* if non-null, fill in with type of fault - * COW, zero-fill, etc... returned in trace point */ + * COW, zero-fill, etc... returned in trace point */ /* More arguments: */ - kern_return_t *error_code, /* code if page is in error */ - boolean_t no_zero_fill, /* don't zero fill absent pages */ - boolean_t data_supply, /* treat as data_supply if - * it is a write fault and a full - * page is provided */ + kern_return_t *error_code, /* code if page is in error */ + boolean_t no_zero_fill, /* don't zero fill absent pages */ + boolean_t data_supply, /* treat as data_supply if + * it is a write fault and a full + * page is provided */ vm_object_fault_info_t fault_info) { - vm_page_t m; - vm_object_t object; - vm_object_offset_t offset; - vm_page_t first_m; - vm_object_t next_object; - vm_object_t copy_object; - boolean_t look_for_page; - boolean_t force_fault_retry = FALSE; - vm_prot_t access_required = fault_type; - vm_prot_t wants_copy_flag; - CLUSTER_STAT(int pages_at_higher_offsets;) - CLUSTER_STAT(int pages_at_lower_offsets;) - kern_return_t wait_result; - boolean_t interruptible_state; - boolean_t data_already_requested = FALSE; - vm_behavior_t orig_behavior; - vm_size_t orig_cluster_size; - vm_fault_return_t error; - int my_fault; - uint32_t try_failed_count; - int interruptible; /* how may fault be interrupted? */ - int external_state = VM_EXTERNAL_STATE_UNKNOWN; - memory_object_t pager; - vm_fault_return_t retval; + vm_page_t m; + vm_object_t object; + vm_object_offset_t offset; + vm_page_t first_m; + vm_object_t next_object; + vm_object_t copy_object; + boolean_t look_for_page; + boolean_t force_fault_retry = FALSE; + vm_prot_t access_required = fault_type; + vm_prot_t wants_copy_flag; + kern_return_t wait_result; + wait_interrupt_t interruptible_state; + boolean_t data_already_requested = FALSE; + vm_behavior_t orig_behavior; + vm_size_t orig_cluster_size; + vm_fault_return_t error; + int my_fault; + uint32_t try_failed_count; + int interruptible; /* how may fault be interrupted? */ + int external_state = VM_EXTERNAL_STATE_UNKNOWN; + memory_object_t pager; + vm_fault_return_t retval; + int grab_options; /* - * MACH page map - an optional optimization where a bit map is maintained - * by the VM subsystem for internal objects to indicate which pages of - * the object currently reside on backing store. This existence map - * duplicates information maintained by the vnode pager. It is - * created at the time of the first pageout against the object, i.e. - * at the same time pager for the object is created. The optimization - * is designed to eliminate pager interaction overhead, if it is - * 'known' that the page does not exist on backing store. - * - * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is - * either marked as paged out in the existence map for the object or no - * existence map exists for the object. MUST_ASK_PAGER() is one of the - * criteria in the decision to invoke the pager. It is also used as one - * of the criteria to terminate the scan for adjacent pages in a clustered - * pagein operation. Note that MUST_ASK_PAGER() always evaluates to TRUE for - * permanent objects. Note also that if the pager for an internal object - * has not been created, the pager is not invoked regardless of the value - * of MUST_ASK_PAGER() and that clustered pagein scans are only done on an object - * for which a pager has been created. + * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is + * marked as paged out in the compressor pager or the pager doesn't exist. + * Note also that if the pager for an internal object + * has not been created, the pager is not invoked regardless of the value + * of MUST_ASK_PAGER(). * * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset - * is marked as paged out in the existence map for the object. PAGED_OUT() + * is marked as paged out in the compressor pager. * PAGED_OUT() is used to determine if a page has already been pushed * into a copy object in order to avoid a redundant page out operation. */ -#if MACH_PAGEMAP -#define MUST_ASK_PAGER(o, f, s) \ - ((vm_external_state_get((o)->existence_map, (f)) \ - != VM_EXTERNAL_STATE_ABSENT) && \ - (s = (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)))) \ - != VM_EXTERNAL_STATE_ABSENT) -#define PAGED_OUT(o, f) \ - ((vm_external_state_get((o)->existence_map, (f)) \ - == VM_EXTERNAL_STATE_EXISTS) || \ - (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) \ - == VM_EXTERNAL_STATE_EXISTS)) -#else /* MACH_PAGEMAP */ -#define MUST_ASK_PAGER(o, f, s) \ +#define MUST_ASK_PAGER(o, f, s) \ ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT) + #define PAGED_OUT(o, f) \ (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS) -#endif /* MACH_PAGEMAP */ /* * Recovery actions */ -#define RELEASE_PAGE(m) \ - MACRO_BEGIN \ - PAGE_WAKEUP_DONE(m); \ - if (!m->active && !m->inactive && !m->throttled) { \ - vm_page_lockspin_queues(); \ - if (!m->active && !m->inactive && !m->throttled) { \ - if (COMPRESSED_PAGER_IS_ACTIVE) \ - vm_page_deactivate(m); \ - else \ - vm_page_activate(m); \ - } \ - vm_page_unlock_queues(); \ - } \ +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + vm_page_lockspin_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \ + vm_page_deactivate(m); \ + else \ + vm_page_activate(m); \ + } \ + vm_page_unlock_queues(); \ + } \ MACRO_END #if TRACEFAULTPAGE - dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ #endif interruptible = fault_info->interruptible; interruptible_state = thread_interrupt_level(interruptible); - + /* * INVARIANTS (through entire routine): * @@ -1017,11 +1024,6 @@ vm_fault_page( first_m = VM_PAGE_NULL; access_required = fault_type; - - XPR(XPR_VM_FAULT, - "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n", - object, offset, fault_type, *protection, 0); - /* * default type of fault */ @@ -1029,17 +1031,25 @@ vm_fault_page( while (TRUE) { #if TRACEFAULTPAGE - dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ #endif + + grab_options = 0; +#if CONFIG_SECLUDED_MEMORY + if (object->can_grab_secluded) { + grab_options |= VM_PAGE_GRAB_SECLUDED; + } +#endif /* CONFIG_SECLUDED_MEMORY */ + if (!object->alive) { - /* + /* * object is no longer valid * clean up and return error */ vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_ERROR); + return VM_FAULT_MEMORY_ERROR; } if (!object->pager_created && object->phys_contiguous) { @@ -1065,8 +1075,8 @@ vm_fault_page( vm_object_paging_end(object); while (object->blocked_access) { vm_object_sleep(object, - VM_OBJECT_EVENT_UNBLOCKED, - THREAD_UNINT); + VM_OBJECT_EVENT_UNBLOCKED, + THREAD_UNINT); } vm_fault_page_blocked_access++; vm_object_paging_begin(object); @@ -1087,47 +1097,44 @@ vm_fault_page( m = *result_page; caller_lookup = FALSE; /* no longer valid after that */ } else { - m = vm_page_lookup(object, offset); + m = vm_page_lookup(object, vm_object_trunc_page(offset)); } #if TRACEFAULTPAGE - dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m != VM_PAGE_NULL) { - - if (m->busy) { - /* + if (m->vmp_busy) { + /* * The page is being brought in, * wait for it and then retry. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif wait_result = PAGE_SLEEP(object, m, interruptible); - XPR(XPR_VM_FAULT, - "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n", - object, offset, - m, 0, 0); counter(c_vm_fault_page_block_busy_kernel++); if (wait_result != THREAD_AWAKENED) { vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - if (wait_result == THREAD_RESTART) - return (VM_FAULT_RETRY); - else - return (VM_FAULT_INTERRUPTED); + if (wait_result == THREAD_RESTART) { + return VM_FAULT_RETRY; + } else { + return VM_FAULT_INTERRUPTED; + } } continue; } - if (m->laundry) { - m->pageout = FALSE; + if (m->vmp_laundry) { + m->vmp_free_when_done = FALSE; - if (!m->cleaning) + if (!m->vmp_cleaning) { vm_pageout_steal_laundry(m, FALSE); + } } - if (m->phys_page == vm_page_guard_addr) { + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { /* * Guard page: off limits ! */ @@ -1138,12 +1145,13 @@ vm_fault_page( * be just to wire or unwire it. * Let's pretend it succeeded... */ - m->busy = TRUE; + m->vmp_busy = TRUE; *result_page = m; assert(first_m == VM_PAGE_NULL); *top_page = first_m; - if (type_of_fault) + if (type_of_fault) { *type_of_fault = DBG_GUARD_FAULT; + } thread_interrupt_level(interruptible_state); return VM_FAULT_SUCCESS; } else { @@ -1157,41 +1165,42 @@ vm_fault_page( } } - if (m->error) { - /* + if (m->vmp_error) { + /* * The page is in error, give up now. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ #endif - if (error_code) - *error_code = KERN_MEMORY_ERROR; + if (error_code) { + *error_code = KERN_MEMORY_ERROR; + } VM_PAGE_FREE(m); vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_ERROR); + return VM_FAULT_MEMORY_ERROR; } - if (m->restart) { - /* + if (m->vmp_restart) { + /* * The pager wants us to restart * at the top of the chain, * typically because it has moved the * page to another pager, then do so. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif VM_PAGE_FREE(m); vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } - if (m->absent) { - /* + if (m->vmp_absent) { + /* * The page isn't busy, but is absent, * therefore it's deemed "unavailable". * @@ -1200,7 +1209,7 @@ vm_fault_page( * next object (if there is one). */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ #endif next_object = object->shadow; @@ -1216,23 +1225,18 @@ vm_fault_page( /* * check for any conditions that prevent * us from creating a new zero-fill page - * vm_fault_check will do all of the + * vm_fault_check will do all of the * fault cleanup in the case of an error condition * including resetting the thread_interrupt_level */ error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); - if (error != VM_FAULT_SUCCESS) - return (error); - - XPR(XPR_VM_FAULT, - "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", - object, offset, - m, - first_object, 0); + if (error != VM_FAULT_SUCCESS) { + return error; + } if (object != first_object) { - /* + /* * free the absent page we just found */ VM_PAGE_FREE(m); @@ -1244,7 +1248,7 @@ vm_fault_page( vm_object_unlock(object); /* - * grab the original page we + * grab the original page we * 'soldered' in place and * retake lock on 'first_object' */ @@ -1256,15 +1260,16 @@ vm_fault_page( vm_object_lock(object); } else { - /* + /* * we're going to use the absent page we just found * so convert it to a 'busy' page */ - m->absent = FALSE; - m->busy = TRUE; + m->vmp_absent = FALSE; + m->vmp_busy = TRUE; + } + if (fault_info->mark_zf_absent && no_zero_fill == TRUE) { + m->vmp_absent = TRUE; } - if (fault_info->mark_zf_absent && no_zero_fill == TRUE) - m->absent = TRUE; /* * zero-fill the page and put it on * the correct paging queue @@ -1273,28 +1278,20 @@ vm_fault_page( break; } else { - if (must_be_resident) + if (must_be_resident) { vm_object_paging_end(object); - else if (object != first_object) { + } else if (object != first_object) { vm_object_paging_end(object); VM_PAGE_FREE(m); } else { first_m = m; - m->absent = FALSE; - m->busy = TRUE; + m->vmp_absent = FALSE; + m->vmp_busy = TRUE; vm_page_lockspin_queues(); - - assert(!m->pageout_queue); - vm_page_queues_remove(m); - + vm_page_queues_remove(m, FALSE); vm_page_unlock_queues(); } - XPR(XPR_VM_FAULT, - "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n", - object, offset, - next_object, - offset+object->vo_shadow_offset,0); offset += object->vo_shadow_offset; fault_info->lo_offset += object->vo_shadow_offset; @@ -1305,7 +1302,7 @@ vm_fault_page( vm_object_unlock(object); object = next_object; vm_object_paging_begin(object); - + /* * reset to default type of fault */ @@ -1314,7 +1311,7 @@ vm_fault_page( continue; } } - if ((m->cleaning) + if ((m->vmp_cleaning) && ((object != first_object) || (object->copy != VM_OBJECT_NULL)) && (fault_type & VM_PROT_WRITE)) { /* @@ -1328,26 +1325,22 @@ vm_fault_page( * wired mapping. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */ #endif - XPR(XPR_VM_FAULT, - "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n", - object, offset, - m, 0, 0); /* * take an extra ref so that object won't die */ vm_object_reference_locked(object); vm_fault_cleanup(object, first_m); - + counter(c_vm_fault_page_block_backoff_kernel++); vm_object_lock(object); assert(object->ref_count > 0); - m = vm_page_lookup(object, offset); + m = vm_page_lookup(object, vm_object_trunc_page(offset)); - if (m != VM_PAGE_NULL && m->cleaning) { + if (m != VM_PAGE_NULL && m->vmp_cleaning) { PAGE_ASSERT_WAIT(m, interruptible); vm_object_unlock(object); @@ -1361,17 +1354,17 @@ vm_fault_page( vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } - if (type_of_fault == NULL && m->speculative && + if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) && !(fault_info != NULL && fault_info->stealth)) { - /* + /* * If we were passed a non-NULL pointer for * "type_of_fault", than we came from * vm_fault... we'll let it deal with * this condition, since it - * needs to see m->speculative to correctly + * needs to see m->vmp_speculative to correctly * account the pageins, otherwise... * take it off the speculative queue, we'll * let the caller of vm_fault_page deal @@ -1381,37 +1374,15 @@ vm_fault_page( * it wants a "stealth" fault, we also leave * the page in the speculative queue. */ - vm_page_lockspin_queues(); - if (m->speculative) - vm_page_queues_remove(m); - vm_page_unlock_queues(); - } - - if (m->encrypted) { - /* - * ENCRYPTED SWAP: - * the user needs access to a page that we - * encrypted before paging it out. - * Decrypt the page now. - * Keep it busy to prevent anyone from - * accessing it during the decryption. - */ - m->busy = TRUE; - vm_page_decrypt(m, 0); - assert(object == m->object); - assert(m->busy); - PAGE_WAKEUP_DONE(m); - - /* - * Retry from the top, in case - * something changed while we were - * decrypting. - */ - continue; + vm_page_lockspin_queues(); + if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) { + vm_page_queues_remove(m, FALSE); + } + vm_page_unlock_queues(); } - ASSERT_PAGE_DECRYPTED(m); + assert(object == VM_PAGE_OBJECT(m)); - if (m->object->code_signed) { + if (object->code_signed) { /* * CODE SIGNING: * We just paged in a page from a signed @@ -1431,18 +1402,15 @@ vm_fault_page( * remove the page from the queue, but not the object */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - XPR(XPR_VM_FAULT, - "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n", - object, offset, m, 0, 0); - assert(!m->busy); - assert(!m->absent); + assert(!m->vmp_busy); + assert(!m->vmp_absent); - m->busy = TRUE; + m->vmp_busy = TRUE; break; } - + /* * we get here when there is no page present in the object at @@ -1451,6 +1419,7 @@ vm_fault_page( * this object can provide the data or we're the top object... * object is locked; m == NULL */ + if (must_be_resident) { if (fault_type == VM_PROT_NONE && object == kernel_object) { @@ -1470,40 +1439,42 @@ vm_fault_page( goto dont_look_for_page; } -#if !MACH_PAGEMAP + /* Don't expect to fault pages into the kernel object. */ + assert(object != kernel_object); + data_supply = FALSE; -#endif /* !MACH_PAGEMAP */ - look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply); - + look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply); + #if TRACEFAULTPAGE - dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (!look_for_page && object == first_object && !object->phys_contiguous) { /* * Allocate a new page for this object/offset pair as a placeholder */ - m = vm_page_grab(); + m = vm_page_grab_options(grab_options); #if TRACEFAULTPAGE - dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m == VM_PAGE_NULL) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } if (fault_info && fault_info->batch_pmap_op == TRUE) { - vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); + vm_page_insert_internal(m, object, + vm_object_trunc_page(offset), + VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); } else { - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); } } if (look_for_page) { - kern_return_t rc; - int my_fault_type; + kern_return_t rc; + int my_fault_type; /* * If the memory manager is not ready, we @@ -1511,14 +1482,11 @@ vm_fault_page( */ if (!object->pager_ready) { #if TRACEFAULTPAGE - dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - if (m != VM_PAGE_NULL) - VM_PAGE_FREE(m); - - XPR(XPR_VM_FAULT, - "vm_f_page: ready wait obj 0x%X, offset 0x%X\n", - object, offset, 0, 0, 0); + if (m != VM_PAGE_NULL) { + VM_PAGE_FREE(m); + } /* * take an extra ref so object won't die @@ -1534,8 +1502,9 @@ vm_fault_page( wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible); vm_object_unlock(object); - if (wait_result == THREAD_WAITING) + if (wait_result == THREAD_WAITING) { wait_result = thread_block(THREAD_CONTINUE_NULL); + } vm_object_deallocate(object); goto backoff; @@ -1544,7 +1513,7 @@ vm_fault_page( vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) { @@ -1554,10 +1523,11 @@ vm_fault_page( * wait for them to be resolved now. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - if (m != VM_PAGE_NULL) + if (m != VM_PAGE_NULL) { VM_PAGE_FREE(m); + } /* * take an extra ref so object won't die */ @@ -1571,7 +1541,7 @@ vm_fault_page( assert(object->ref_count > 0); if (object->paging_in_progress >= vm_object_pagein_throttle) { - vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible); + vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible); vm_object_unlock(object); wait_result = thread_block(THREAD_CONTINUE_NULL); @@ -1583,40 +1553,39 @@ vm_fault_page( vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } - if (object->internal && - (COMPRESSED_PAGER_IS_ACTIVE - || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE)) { + if (object->internal) { int compressed_count_delta; + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + if (m == VM_PAGE_NULL) { /* * Allocate a new page for this object/offset pair as a placeholder */ - m = vm_page_grab(); + m = vm_page_grab_options(grab_options); #if TRACEFAULTPAGE - dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m == VM_PAGE_NULL) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } - m->absent = TRUE; + m->vmp_absent = TRUE; if (fault_info && fault_info->batch_pmap_op == TRUE) { - vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); + vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL); } else { - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); } } - assert(m->busy); - - m->absent = TRUE; + assert(m->vmp_busy); + + m->vmp_absent = TRUE; pager = object->pager; assert(object->paging_in_progress > 0); @@ -1625,13 +1594,13 @@ vm_fault_page( rc = vm_compressor_pager_get( pager, offset + object->paging_offset, - m->phys_page, + VM_PAGE_GET_PHYS_PAGE(m), &my_fault_type, 0, &compressed_count_delta); if (type_of_fault == NULL) { - int throttle_delay; + int throttle_delay; /* * we weren't called from vm_fault, so we @@ -1656,10 +1625,10 @@ vm_fault_page( switch (rc) { case KERN_SUCCESS: - m->absent = FALSE; - m->dirty = TRUE; - if ((m->object->wimg_bits & - VM_WIMG_MASK) != + m->vmp_absent = FALSE; + m->vmp_dirty = TRUE; + if ((object->wimg_bits & + VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { /* * If the page is not cacheable, @@ -1668,9 +1637,9 @@ vm_fault_page( * after the decompression. */ pmap_sync_page_attributes_phys( - m->phys_page); + VM_PAGE_GET_PHYS_PAGE(m)); } else { - m->written_by_kernel = TRUE; + m->vmp_written_by_kernel = TRUE; } /* @@ -1681,33 +1650,34 @@ vm_fault_page( * "compressed purgeable" ledger, so * update that now. */ - if ((object->purgable != - VM_PURGABLE_DENY) && - (object->vo_purgeable_owner != - NULL)) { + if (((object->purgable != + VM_PURGABLE_DENY) || + object->vo_ledger_tag) && + (object->vo_owner != + NULL)) { /* * One less compressed - * purgeable page. + * purgeable/tagged page. */ - vm_purgeable_compressed_update( + vm_object_owner_compressed_update( object, -1); } break; case KERN_MEMORY_FAILURE: - m->unusual = TRUE; - m->error = TRUE; - m->absent = FALSE; + m->vmp_unusual = TRUE; + m->vmp_error = TRUE; + m->vmp_absent = FALSE; break; case KERN_MEMORY_ERROR: - assert(m->absent); + assert(m->vmp_absent); break; default: panic("vm_fault_page(): unexpected " - "error %d from " - "vm_compressor_pager_get()\n", - rc); + "error %d from " + "vm_compressor_pager_get()\n", + rc); } PAGE_WAKEUP_DONE(m); @@ -1715,19 +1685,19 @@ vm_fault_page( goto data_requested; } my_fault_type = DBG_PAGEIN_FAULT; - + if (m != VM_PAGE_NULL) { VM_PAGE_FREE(m); m = VM_PAGE_NULL; } #if TRACEFAULTPAGE - dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ #endif /* * It's possible someone called vm_object_destroy while we weren't - * holding the object lock. If that has happened, then bail out + * holding the object lock. If that has happened, then bail out * here. */ @@ -1744,6 +1714,10 @@ vm_fault_page( * so we can release the object lock. */ + if (object->object_is_shared_cache) { + set_thread_rwlock_boost(); + } + vm_object_unlock(object); /* @@ -1757,15 +1731,11 @@ vm_fault_page( * and its pushing pages up into a copy of * the object that it manages. */ - if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) + if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) { wants_copy_flag = VM_PROT_WANTS_COPY; - else + } else { wants_copy_flag = VM_PROT_NONE; - - XPR(XPR_VM_FAULT, - "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n", - object, offset, m, - access_required | wants_copy_flag, 0); + } if (object->copy == first_object) { /* @@ -1786,8 +1756,8 @@ vm_fault_page( * the fault w/o having to go through memory_object_data_request again */ assert(first_m != VM_PAGE_NULL); - assert(first_m->object == first_object); - + assert(VM_PAGE_OBJECT(first_m) == first_object); + vm_object_lock(first_object); VM_PAGE_FREE(first_m); vm_object_paging_end(first_object); @@ -1811,7 +1781,7 @@ vm_fault_page( */ rc = memory_object_data_request( pager, - offset + object->paging_offset, + vm_object_trunc_page(offset) + object->paging_offset, PAGE_SIZE, access_required | wants_copy_flag, (memory_object_fault_info_t)fault_info); @@ -1819,24 +1789,28 @@ vm_fault_page( if (data_already_requested == TRUE) { fault_info->behavior = orig_behavior; fault_info->cluster_size = orig_cluster_size; - } else + } else { data_already_requested = TRUE; + } DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL); #if TRACEFAULTPAGE - dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ #endif vm_object_lock(object); - data_requested: - if (rc != KERN_SUCCESS) { + if (object->object_is_shared_cache) { + clear_thread_rwlock_boost(); + } +data_requested: + if (rc != KERN_SUCCESS) { vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return ((rc == MACH_SEND_INTERRUPTED) ? - VM_FAULT_INTERRUPTED : - VM_FAULT_MEMORY_ERROR); + return (rc == MACH_SEND_INTERRUPTED) ? + VM_FAULT_INTERRUPTED : + VM_FAULT_MEMORY_ERROR; } else { clock_sec_t tv_sec; clock_usec_t tv_usec; @@ -1848,23 +1822,21 @@ vm_fault_page( } } if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_INTERRUPTED); + return VM_FAULT_INTERRUPTED; } if (force_fault_retry == TRUE) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } if (m == VM_PAGE_NULL && object->phys_contiguous) { /* * No page here means that the object we - * initially looked up was "physically + * initially looked up was "physically * contiguous" (i.e. device memory). However, * with Virtual VRAM, the object might not * be backed by that device memory anymore, @@ -1875,7 +1847,7 @@ vm_fault_page( * page fault against the object's new backing * store (different memory object). */ - phys_contig_object: +phys_contig_object: goto done; } /* @@ -1894,24 +1866,20 @@ vm_fault_page( } dont_look_for_page: /* - * We get here if the object has no pager, or an existence map + * We get here if the object has no pager, or an existence map * exists and indicates the page isn't present on the pager * or we're unwiring a page. If a pager exists, but there - * is no existence map, then the m->absent case above handles + * is no existence map, then the m->vmp_absent case above handles * the ZF case when the pager can't provide the page */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif - if (object == first_object) + if (object == first_object) { first_m = m; - else + } else { assert(m == VM_PAGE_NULL); - - XPR(XPR_VM_FAULT, - "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n", - object, offset, m, - object->shadow, 0); + } next_object = object->shadow; @@ -1931,46 +1899,48 @@ dont_look_for_page: vm_object_lock(object); } m = first_m; - assert(m->object == object); + assert(VM_PAGE_OBJECT(m) == object); first_m = VM_PAGE_NULL; /* * check for any conditions that prevent * us from creating a new zero-fill page - * vm_fault_check will do all of the + * vm_fault_check will do all of the * fault cleanup in the case of an error condition * including resetting the thread_interrupt_level */ error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); - if (error != VM_FAULT_SUCCESS) - return (error); + if (error != VM_FAULT_SUCCESS) { + return error; + } if (m == VM_PAGE_NULL) { - m = vm_page_grab(); + m = vm_page_grab_options(grab_options); if (m == VM_PAGE_NULL) { vm_fault_cleanup(object, VM_PAGE_NULL); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); + } + if (fault_info->mark_zf_absent && no_zero_fill == TRUE) { + m->vmp_absent = TRUE; } - if (fault_info->mark_zf_absent && no_zero_fill == TRUE) - m->absent = TRUE; my_fault = vm_fault_zero_page(m, no_zero_fill); break; - } else { - /* + /* * Move on to the next object. Lock the next * object before unlocking the current one. */ - if ((object != first_object) || must_be_resident) + if ((object != first_object) || must_be_resident) { vm_object_paging_end(object); + } offset += object->vo_shadow_offset; fault_info->lo_offset += object->vo_shadow_offset; @@ -2004,26 +1974,14 @@ dont_look_for_page: */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif -#if EXTRA_ASSERTIONS - assert(m->busy && !m->absent); +#if EXTRA_ASSERTIONS + assert(m->vmp_busy && !m->vmp_absent); assert((first_m == VM_PAGE_NULL) || - (first_m->busy && !first_m->absent && - !first_m->active && !first_m->inactive)); -#endif /* EXTRA_ASSERTIONS */ - - /* - * ENCRYPTED SWAP: - * If we found a page, we must have decrypted it before we - * get here... - */ - ASSERT_PAGE_DECRYPTED(m); - - XPR(XPR_VM_FAULT, - "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n", - object, offset, m, - first_object, first_m); + (first_m->vmp_busy && !first_m->vmp_absent && + !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded)); +#endif /* EXTRA_ASSERTIONS */ /* * If the page is being written, but isn't @@ -2032,11 +1990,10 @@ dont_look_for_page: * by the top-level object. */ if (object != first_object) { - #if TRACEFAULTPAGE - dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ #endif - if (fault_type & VM_PROT_WRITE) { + if (fault_type & VM_PROT_WRITE) { vm_page_t copy_m; /* @@ -2045,25 +2002,6 @@ dont_look_for_page: */ assert(!must_be_resident); - /* - * are we protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. - */ - if (vm_backing_store_low) { - if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { - - RELEASE_PAGE(m); - vm_fault_cleanup(object, first_m); - - assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); - - thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level(interruptible_state); - - return (VM_FAULT_RETRY); - } - } /* * If we try to collapse first_object at this * point, we may deadlock when we try to get @@ -2083,7 +2021,7 @@ dont_look_for_page: /* * Allocate a page for the copy */ - copy_m = vm_page_grab(); + copy_m = vm_page_grab_options(grab_options); if (copy_m == VM_PAGE_NULL) { RELEASE_PAGE(m); @@ -2091,12 +2029,8 @@ dont_look_for_page: vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } - XPR(XPR_VM_FAULT, - "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n", - object, offset, - m, copy_m, 0); vm_page_copy(m, copy_m); @@ -2112,20 +2046,31 @@ dont_look_for_page: * access to this page, then we could * avoid the pmap_disconnect() call. */ - if (m->pmapped) - pmap_disconnect(m->phys_page); + if (m->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + } - if (m->clustered) { + if (m->vmp_clustered) { VM_PAGE_COUNT_AS_PAGEIN(m); VM_PAGE_CONSUME_CLUSTERED(m); } - assert(!m->cleaning); + assert(!m->vmp_cleaning); /* * We no longer need the old page or object. */ RELEASE_PAGE(m); + /* + * This check helps with marking the object as having a sequential pattern + * Normally we'll miss doing this below because this fault is about COW to + * the first_object i.e. bring page in from disk, push to object above but + * don't update the file object's sequential pattern. + */ + if (object->internal == FALSE) { + vm_fault_is_sequential(object, offset, fault_info->behavior); + } + vm_object_paging_end(object); vm_object_unlock(object); @@ -2144,13 +2089,13 @@ dont_look_for_page: */ VM_PAGE_FREE(first_m); first_m = VM_PAGE_NULL; - + /* * and replace it with the * page we just copied into */ - assert(copy_m->busy); - vm_page_insert(copy_m, object, offset); + assert(copy_m->vmp_busy); + vm_page_insert(copy_m, object, vm_object_trunc_page(offset)); SET_PAGE_DIRTY(copy_m, TRUE); m = copy_m; @@ -2159,13 +2104,13 @@ dont_look_for_page: * way, let's try to collapse the top object. * But we have to play ugly games with * paging_in_progress to do that... - */ - vm_object_paging_end(object); - vm_object_collapse(object, offset, TRUE); + */ + vm_object_paging_end(object); + vm_object_collapse(object, vm_object_trunc_page(offset), TRUE); vm_object_paging_begin(object); - - } else - *protection &= (~VM_PROT_WRITE); + } else { + *protection &= (~VM_PROT_WRITE); + } } /* * Now check whether the page needs to be pushed into the @@ -2177,11 +2122,11 @@ dont_look_for_page: try_failed_count = 0; while ((copy_object = first_object->copy) != VM_OBJECT_NULL) { - vm_object_offset_t copy_offset; - vm_page_t copy_m; + vm_object_offset_t copy_offset; + vm_page_t copy_m; #if TRACEFAULTPAGE - dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ #endif /* * If the page is being written, but hasn't been @@ -2196,18 +2141,18 @@ dont_look_for_page: * If the page was guaranteed to be resident, * we must have already performed the copy. */ - if (must_be_resident) + if (must_be_resident) { break; + } /* * Try to get the lock on the copy_object. */ if (!vm_object_lock_try(copy_object)) { - vm_object_unlock(object); try_failed_count++; - mutex_pause(try_failed_count); /* wait a bit */ + mutex_pause(try_failed_count); /* wait a bit */ vm_object_lock(object); continue; @@ -2225,17 +2170,18 @@ dont_look_for_page: * Does the page exist in the copy? */ copy_offset = first_offset - copy_object->vo_shadow_offset; + copy_offset = vm_object_trunc_page(copy_offset); - if (copy_object->vo_size <= copy_offset) + if (copy_object->vo_size <= copy_offset) { /* * Copy object doesn't cover this page -- do nothing. */ ; - else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { + } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { /* * Page currently exists in the copy object */ - if (copy_m->busy) { + if (copy_m->vmp_busy) { /* * If the page is being brought * in, wait for it and then retry. @@ -2257,13 +2203,8 @@ dont_look_for_page: copy_object->ref_count--; assert(copy_object->ref_count > 0); copy_m = vm_page_lookup(copy_object, copy_offset); - /* - * ENCRYPTED SWAP: - * it's OK if the "copy_m" page is encrypted, - * because we're not moving it nor handling its - * contents. - */ - if (copy_m != VM_PAGE_NULL && copy_m->busy) { + + if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) { PAGE_ASSERT_WAIT(copy_m, interruptible); vm_object_unlock(copy_object); @@ -2276,11 +2217,10 @@ dont_look_for_page: vm_object_deallocate(copy_object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } - } - else if (!PAGED_OUT(copy_object, copy_offset)) { + } else if (!PAGED_OUT(copy_object, copy_offset)) { /* * If PAGED_OUT is TRUE, then the page used to exist * in the copy-object, and has already been paged out. @@ -2289,32 +2229,7 @@ dont_look_for_page: * for example) or it hasn't been paged out. * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT) * We must copy the page to the copy object. - */ - - if (vm_backing_store_low) { - /* - * we are protecting the system from - * backing store exhaustion. If so - * sleep unless we are privileged. - */ - if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) { - assert_wait((event_t)&vm_backing_store_low, THREAD_UNINT); - - RELEASE_PAGE(m); - VM_OBJ_RES_DECR(copy_object); - vm_object_lock_assert_exclusive(copy_object); - copy_object->ref_count--; - assert(copy_object->ref_count > 0); - - vm_object_unlock(copy_object); - vm_fault_cleanup(object, first_m); - thread_block(THREAD_CONTINUE_NULL); - thread_interrupt_level(interruptible_state); - - return (VM_FAULT_RETRY); - } - } - /* + * * Allocate a page for the copy */ copy_m = vm_page_alloc(copy_object, copy_offset); @@ -2331,23 +2246,24 @@ dont_look_for_page: vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } /* * Must copy page into copy-object. */ vm_page_copy(m, copy_m); - + /* * If the old page was in use by any users * of the copy-object, it must be removed * from all pmaps. (We can't know which * pmaps use it.) */ - if (m->pmapped) - pmap_disconnect(m->phys_page); + if (m->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + } - if (m->clustered) { + if (m->vmp_clustered) { VM_PAGE_COUNT_AS_PAGEIN(m); VM_PAGE_CONSUME_CLUSTERED(m); } @@ -2356,101 +2272,32 @@ dont_look_for_page: * page out this page, using the "initialize" * option. Else, we use the copy. */ - if ((!copy_object->pager_ready) -#if MACH_PAGEMAP - || vm_external_state_get(copy_object->existence_map, copy_offset) == VM_EXTERNAL_STATE_ABSENT -#endif + if ((!copy_object->pager_ready) || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT ) { - vm_page_lockspin_queues(); - assert(!m->cleaning); + assert(!m->vmp_cleaning); vm_page_activate(copy_m); vm_page_unlock_queues(); SET_PAGE_DIRTY(copy_m, TRUE); PAGE_WAKEUP_DONE(copy_m); + } else { + assert(copy_m->vmp_busy == TRUE); + assert(!m->vmp_cleaning); - } else if (copy_object->internal && - (DEFAULT_PAGER_IS_ACTIVE || DEFAULT_FREEZER_IS_ACTIVE)) { /* - * For internal objects check with the pager to see - * if the page already exists in the backing store. - * If yes, then we can drop the copy page. If not, - * then we'll activate it, mark it dirty and keep it - * around. + * dirty is protected by the object lock */ - - kern_return_t kr = KERN_SUCCESS; - - memory_object_t copy_pager = copy_object->pager; - assert(copy_pager != MEMORY_OBJECT_NULL); - vm_object_paging_begin(copy_object); - - vm_object_unlock(copy_object); - - kr = memory_object_data_request( - copy_pager, - copy_offset + copy_object->paging_offset, - 0, /* Only query the pager. */ - VM_PROT_READ, - NULL); - - vm_object_lock(copy_object); - - vm_object_paging_end(copy_object); + SET_PAGE_DIRTY(copy_m, TRUE); /* - * Since we dropped the copy_object's lock, - * check whether we'll have to deallocate - * the hard way. + * The page is already ready for pageout: + * not on pageout queues and busy. + * Unlock everything except the + * copy_object itself. */ - if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) { - vm_object_unlock(copy_object); - vm_object_deallocate(copy_object); - vm_object_lock(object); - - continue; - } - if (kr == KERN_SUCCESS) { - /* - * The pager has the page. We don't want to overwrite - * that page by sending this one out to the backing store. - * So we drop the copy page. - */ - VM_PAGE_FREE(copy_m); - - } else { - /* - * The pager doesn't have the page. We'll keep this one - * around in the copy object. It might get sent out to - * the backing store under memory pressure. - */ - vm_page_lockspin_queues(); - assert(!m->cleaning); - vm_page_activate(copy_m); - vm_page_unlock_queues(); - - SET_PAGE_DIRTY(copy_m, TRUE); - PAGE_WAKEUP_DONE(copy_m); - } - } else { - - assert(copy_m->busy == TRUE); - assert(!m->cleaning); - - /* - * dirty is protected by the object lock - */ - SET_PAGE_DIRTY(copy_m, TRUE); - - /* - * The page is already ready for pageout: - * not on pageout queues and busy. - * Unlock everything except the - * copy_object itself. - */ - vm_object_unlock(object); + vm_object_unlock(object); /* * Write the page to the copy-object, @@ -2490,8 +2337,8 @@ dont_look_for_page: * wait result]. Can't turn off the page's * busy bit because we're not done with it. */ - if (m->wanted) { - m->wanted = FALSE; + if (m->vmp_wanted) { + m->vmp_wanted = FALSE; thread_wakeup_with_result((event_t) m, THREAD_RESTART); } } @@ -2506,7 +2353,7 @@ dont_look_for_page: copy_object->ref_count--; assert(copy_object->ref_count > 0); - VM_OBJ_RES_DECR(copy_object); + VM_OBJ_RES_DECR(copy_object); vm_object_unlock(copy_object); break; @@ -2516,36 +2363,44 @@ done: *result_page = m; *top_page = first_m; - XPR(XPR_VM_FAULT, - "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n", - object, offset, m, first_m, 0); - if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == object); + retval = VM_FAULT_SUCCESS; if (my_fault == DBG_PAGEIN_FAULT) { - VM_PAGE_COUNT_AS_PAGEIN(m); - if (m->object->internal) + if (object->internal) { my_fault = DBG_PAGEIND_FAULT; - else + } else { my_fault = DBG_PAGEINV_FAULT; + } - /* + /* * evaluate access pattern and update state * vm_fault_deactivate_behind depends on the * state being up to date */ - vm_fault_is_sequential(object, offset, fault_info->behavior); - + vm_fault_is_sequential(object, offset, fault_info->behavior); + vm_fault_deactivate_behind(object, offset, fault_info->behavior); + } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) { + /* + * we weren't called from vm_fault, so handle the + * accounting here for hits in the cache + */ + if (m->vmp_clustered) { + VM_PAGE_COUNT_AS_PAGEIN(m); + VM_PAGE_CONSUME_CLUSTERED(m); + } + vm_fault_is_sequential(object, offset, fault_info->behavior); vm_fault_deactivate_behind(object, offset, fault_info->behavior); } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) { - - VM_STAT_INCR(decompressions); + VM_STAT_DECOMPRESSIONS(); + } + if (type_of_fault) { + *type_of_fault = my_fault; } - if (type_of_fault) - *type_of_fault = my_fault; } else { retval = VM_FAULT_SUCCESS_NO_VM_PAGE; assert(first_m == VM_PAGE_NULL); @@ -2555,21 +2410,27 @@ done: thread_interrupt_level(interruptible_state); #if TRACEFAULTPAGE - dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ #endif return retval; backoff: thread_interrupt_level(interruptible_state); - if (wait_result == THREAD_INTERRUPTED) - return (VM_FAULT_INTERRUPTED); - return (VM_FAULT_RETRY); + if (wait_result == THREAD_INTERRUPTED) { + return VM_FAULT_INTERRUPTED; + } + return VM_FAULT_RETRY; -#undef RELEASE_PAGE +#undef RELEASE_PAGE } +extern int panic_on_cs_killed; +extern int proc_selfpid(void); +extern char *proc_name_address(void *p); +unsigned long cs_enter_tainted_rejected = 0; +unsigned long cs_enter_tainted_accepted = 0; /* * CODE SIGNING: @@ -2579,424 +2440,640 @@ backoff: * 3. the page belongs to a code-signed object * 4. the page has not been validated yet or has been mapped for write. */ -#define VM_FAULT_NEED_CS_VALIDATION(pmap, page) \ - ((pmap) != kernel_pmap /*1*/ && \ - !(page)->cs_tainted /*2*/ && \ - (page)->object->code_signed /*3*/ && \ - (!(page)->cs_validated || (page)->wpmapped /*4*/)) - - -/* - * page queue lock must NOT be held - * m->object must be locked - * - * NOTE: m->object could be locked "shared" only if we are called - * from vm_fault() as part of a soft fault. If so, we must be - * careful not to modify the VM object in any way that is not - * legal under a shared lock... - */ -extern int proc_selfpid(void); -extern char *proc_name_address(void *p); -unsigned long cs_enter_tainted_rejected = 0; -unsigned long cs_enter_tainted_accepted = 0; -kern_return_t -vm_fault_enter(vm_page_t m, - pmap_t pmap, - vm_map_offset_t vaddr, - vm_prot_t prot, - vm_prot_t caller_prot, - boolean_t wired, - boolean_t change_wiring, - boolean_t no_cache, - boolean_t cs_bypass, - __unused int user_tag, - int pmap_options, - boolean_t *need_retry, - int *type_of_fault) +static bool +vm_fault_cs_need_validation( + pmap_t pmap, + vm_page_t page, + vm_object_t page_obj, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) { - kern_return_t kr, pe_result; - boolean_t previously_pmapped = m->pmapped; - boolean_t must_disconnect = 0; - boolean_t map_is_switched, map_is_switch_protected; - int cs_enforcement_enabled; - vm_prot_t fault_type; - - fault_type = change_wiring ? VM_PROT_NONE : caller_prot; - - vm_object_lock_assert_held(m->object); -#if DEBUG - lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); -#endif /* DEBUG */ - - if (m->phys_page == vm_page_guard_addr) { - assert(m->fictitious); - return KERN_SUCCESS; + if (pmap == kernel_pmap) { + /* 1 - not user space */ + return false; } - - if (*type_of_fault == DBG_ZERO_FILL_FAULT) { - - vm_object_lock_assert_exclusive(m->object); - - } else if ((fault_type & VM_PROT_WRITE) == 0) { - /* - * This is not a "write" fault, so we - * might not have taken the object lock - * exclusively and we might not be able - * to update the "wpmapped" bit in - * vm_fault_enter(). - * Let's just grant read access to - * the page for now and we'll - * soft-fault again if we need write - * access later... - */ - prot &= ~VM_PROT_WRITE; + if (!page_obj->code_signed) { + /* 3 - page does not belong to a code-signed object */ + return false; } - if (m->pmapped == FALSE) { - - if (m->clustered) { - if (*type_of_fault == DBG_CACHE_HIT_FAULT) { - /* - * found it in the cache, but this - * is the first fault-in of the page (m->pmapped == FALSE) - * so it must have come in as part of - * a cluster... account 1 pagein against it - */ - if (m->object->internal) - *type_of_fault = DBG_PAGEIND_FAULT; - else - *type_of_fault = DBG_PAGEINV_FAULT; - - VM_PAGE_COUNT_AS_PAGEIN(m); - } - VM_PAGE_CONSUME_CLUSTERED(m); + if (fault_page_size == PAGE_SIZE) { + /* looking at the whole page */ + assertf(fault_phys_offset == 0, + "fault_page_size 0x%llx fault_phys_offset 0x%llx\n", + (uint64_t)fault_page_size, + (uint64_t)fault_phys_offset); + if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) { + /* 2 - page is all tainted */ + return false; + } + if (page->vmp_cs_validated == VMP_CS_ALL_TRUE && + !page->vmp_wpmapped) { + /* 4 - already fully validated and never mapped writable */ + return false; + } + } else { + /* looking at a specific sub-page */ + if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) { + /* 2 - sub-page was already marked as tainted */ + return false; + } + if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) && + !page->vmp_wpmapped) { + /* 4 - already validated and never mapped writable */ + return false; } } + /* page needs to be validated */ + return true; +} - if (*type_of_fault != DBG_COW_FAULT) { - DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL); - if (pmap == kernel_pmap) { - DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL); - } +static bool +vm_fault_cs_page_immutable( + vm_page_t m, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_prot_t prot __unused) +{ + if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) + /*&& ((prot) & VM_PROT_EXECUTE)*/) { + return true; } + return false; +} - /* Validate code signature if necessary. */ - if (VM_FAULT_NEED_CS_VALIDATION(pmap, m)) { - vm_object_lock_assert_exclusive(m->object); +static bool +vm_fault_cs_page_nx( + vm_page_t m, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) +{ + return VMP_CS_NX(m, fault_page_size, fault_phys_offset); +} - if (m->cs_validated) { +/* + * Check if the page being entered into the pmap violates code signing. + */ +static kern_return_t +vm_fault_cs_check_violation( + bool cs_bypass, + vm_object_t object, + vm_page_t m, + pmap_t pmap, + vm_prot_t prot, + vm_prot_t caller_prot, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_object_fault_info_t fault_info, + bool map_is_switched, + bool map_is_switch_protected, + bool *cs_violation) +{ +#if !PMAP_CS +#pragma unused(caller_prot) +#pragma unused(fault_info) +#endif /* !PMAP_CS */ + int cs_enforcement_enabled; + if (!cs_bypass && + vm_fault_cs_need_validation(pmap, m, object, + fault_page_size, fault_phys_offset)) { + vm_object_lock_assert_exclusive(object); + + if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) { vm_cs_revalidates++; } - /* VM map is locked, so 1 ref will remain on VM object - + /* VM map is locked, so 1 ref will remain on VM object - * so no harm if vm_page_validate_cs drops the object lock */ - vm_page_validate_cs(m); - } -#define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/) -#define page_nx(m) ((m)->cs_nx) + vm_page_validate_cs(m, fault_page_size, fault_phys_offset); + } - map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && - (pmap == vm_map_pmap(current_thread()->map))); - map_is_switch_protected = current_thread()->map->switch_protect; - /* If the map is switched, and is switch-protected, we must protect - * some pages from being write-faulted: immutable pages because by + * some pages from being write-faulted: immutable pages because by * definition they may not be written, and executable pages because that * would provide a way to inject unsigned code. * If the page is immutable, we can simply return. However, we can't * immediately determine whether a page is executable anywhere. But, * we can disconnect it everywhere and remove the executable protection - * from the current map. We do that below right before we do the + * from the current map. We do that below right before we do the * PMAP_ENTER. */ - cs_enforcement_enabled = cs_enforcement(NULL); + if (pmap == kernel_pmap) { + /* kernel fault: cs_enforcement does not apply */ + cs_enforcement_enabled = 0; + } else { + cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap); + } - if(cs_enforcement_enabled && map_is_switched && - map_is_switch_protected && page_immutable(m, prot) && - (prot & VM_PROT_WRITE)) - { + if (cs_enforcement_enabled && map_is_switched && + map_is_switch_protected && + vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) && + (prot & VM_PROT_WRITE)) { return KERN_CODESIGN_ERROR; } - if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) { - if (cs_debug) + if (cs_enforcement_enabled && + vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) && + (prot & VM_PROT_EXECUTE)) { + if (cs_debug) { printf("page marked to be NX, not letting it be mapped EXEC\n"); + } return KERN_CODESIGN_ERROR; } /* A page could be tainted, or pose a risk of being tainted later. * Check whether the receiving process wants it, and make it feel * the consequences (that hapens in cs_invalid_page()). - * For CS Enforcement, two other conditions will - * cause that page to be tainted as well: + * For CS Enforcement, two other conditions will + * cause that page to be tainted as well: * - pmapping an unsigned page executable - this means unsigned code; * - writeable mapping of a validated page - the content of that page * can be changed without the kernel noticing, therefore unsigned * code can be created */ - if (!cs_bypass && - (m->cs_tainted || - (cs_enforcement_enabled && - (/* The page is unsigned and wants to be executable */ - (!m->cs_validated && (prot & VM_PROT_EXECUTE)) || - /* The page should be immutable, but is in danger of being modified - * This is the case where we want policy from the code directory - - * is the page immutable or not? For now we have to assume that - * code pages will be immutable, data pages not. - * We'll assume a page is a code page if it has a code directory - * and we fault for execution. - * That is good enough since if we faulted the code page for - * writing in another map before, it is wpmapped; if we fault - * it for writing in this map later it will also be faulted for executing - * at the same time; and if we fault for writing in another map - * later, we will disconnect it from this pmap so we'll notice - * the change. - */ - (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped)) - )) - )) - { - /* We will have a tainted page. Have to handle the special case - * of a switched map now. If the map is not switched, standard - * procedure applies - call cs_invalid_page(). - * If the map is switched, the real owner is invalid already. - * There is no point in invalidating the switching process since - * it will not be executing from the map. So we don't call - * cs_invalid_page() in that case. */ - boolean_t reject_page; - if(map_is_switched) { - assert(pmap==vm_map_pmap(current_thread()->map)); - assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); - reject_page = FALSE; - } else { - if (cs_debug > 5) - printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s slid: %s prot: 0x%x\n", - m->object->code_signed ? "yes" : "no", - m->cs_validated ? "yes" : "no", - m->cs_tainted ? "yes" : "no", - m->wpmapped ? "yes" : "no", - m->slid ? "yes" : "no", - (int)prot); - reject_page = cs_invalid_page((addr64_t) vaddr); + if (cs_bypass) { + /* code-signing is bypassed */ + *cs_violation = FALSE; + } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) { + /* tainted page */ + *cs_violation = TRUE; + } else if (!cs_enforcement_enabled) { + /* no further code-signing enforcement */ + *cs_violation = FALSE; + } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) && + ((prot & VM_PROT_WRITE) || + m->vmp_wpmapped)) { + /* + * The page should be immutable, but is in danger of being + * modified. + * This is the case where we want policy from the code + * directory - is the page immutable or not? For now we have + * to assume that code pages will be immutable, data pages not. + * We'll assume a page is a code page if it has a code directory + * and we fault for execution. + * That is good enough since if we faulted the code page for + * writing in another map before, it is wpmapped; if we fault + * it for writing in this map later it will also be faulted for + * executing at the same time; and if we fault for writing in + * another map later, we will disconnect it from this pmap so + * we'll notice the change. + */ + *cs_violation = TRUE; + } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) && + (prot & VM_PROT_EXECUTE) + ) { + *cs_violation = TRUE; + } else { + *cs_violation = FALSE; + } + return KERN_SUCCESS; +} + +/* + * Handles a code signing violation by either rejecting the page or forcing a disconnect. + * @param must_disconnect This value will be set to true if the caller must disconnect + * this page. + * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault. + */ +static kern_return_t +vm_fault_cs_handle_violation( + vm_object_t object, + vm_page_t m, + pmap_t pmap, + vm_prot_t prot, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + bool map_is_switched, + bool map_is_switch_protected, + bool *must_disconnect) +{ +#if !MACH_ASSERT +#pragma unused(pmap) +#pragma unused(map_is_switch_protected) +#endif /* !MACH_ASSERT */ + /* + * We will have a tainted page. Have to handle the special case + * of a switched map now. If the map is not switched, standard + * procedure applies - call cs_invalid_page(). + * If the map is switched, the real owner is invalid already. + * There is no point in invalidating the switching process since + * it will not be executing from the map. So we don't call + * cs_invalid_page() in that case. + */ + boolean_t reject_page, cs_killed; + kern_return_t kr; + if (map_is_switched) { + assert(pmap == vm_map_pmap(current_thread()->map)); + assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); + reject_page = FALSE; + } else { + if (cs_debug > 5) { + printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n", + object->code_signed ? "yes" : "no", + VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no", + VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no", + m->vmp_wpmapped ? "yes" : "no", + (int)prot); } - - if (reject_page) { - /* reject the invalid page: abort the page fault */ - int pid; - const char *procname; - task_t task; - vm_object_t file_object, shadow; - vm_object_offset_t file_offset; - char *pathname, *filename; - vm_size_t pathname_len, filename_len; - boolean_t truncated_path; + reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed); + } + + if (reject_page) { + /* reject the invalid page: abort the page fault */ + int pid; + const char *procname; + task_t task; + vm_object_t file_object, shadow; + vm_object_offset_t file_offset; + char *pathname, *filename; + vm_size_t pathname_len, filename_len; + boolean_t truncated_path; #define __PATH_MAX 1024 - struct timespec mtime, cs_mtime; - - kr = KERN_CODESIGN_ERROR; - cs_enter_tainted_rejected++; - - /* get process name and pid */ - procname = "?"; - task = current_task(); - pid = proc_selfpid(); - if (task->bsd_info != NULL) - procname = proc_name_address(task->bsd_info); - - /* get file's VM object */ - file_object = m->object; - file_offset = m->offset; - for (shadow = file_object->shadow; - shadow != VM_OBJECT_NULL; - shadow = file_object->shadow) { - vm_object_lock_shared(shadow); - if (file_object != m->object) { - vm_object_unlock(file_object); - } - file_offset += file_object->vo_shadow_offset; - file_object = shadow; - } + struct timespec mtime, cs_mtime; + int shadow_depth; + os_reason_t codesigning_exit_reason = OS_REASON_NULL; + + kr = KERN_CODESIGN_ERROR; + cs_enter_tainted_rejected++; + + /* get process name and pid */ + procname = "?"; + task = current_task(); + pid = proc_selfpid(); + if (task->bsd_info != NULL) { + procname = proc_name_address(task->bsd_info); + } - mtime.tv_sec = 0; - mtime.tv_nsec = 0; - cs_mtime.tv_sec = 0; - cs_mtime.tv_nsec = 0; + /* get file's VM object */ + file_object = object; + file_offset = m->vmp_offset; + for (shadow = file_object->shadow, + shadow_depth = 0; + shadow != VM_OBJECT_NULL; + shadow = file_object->shadow, + shadow_depth++) { + vm_object_lock_shared(shadow); + if (file_object != object) { + vm_object_unlock(file_object); + } + file_offset += file_object->vo_shadow_offset; + file_object = shadow; + } - /* get file's pathname and/or filename */ - pathname = NULL; - filename = NULL; - pathname_len = 0; - filename_len = 0; - truncated_path = FALSE; - /* no pager -> no file -> no pathname, use "" in that case */ - if (file_object->pager != NULL) { - pathname = (char *)kalloc(__PATH_MAX * 2); - if (pathname) { - pathname[0] = '\0'; - pathname_len = __PATH_MAX; - filename = pathname + pathname_len; - filename_len = __PATH_MAX; - } - vnode_pager_get_object_name(file_object->pager, - pathname, - pathname_len, - filename, - filename_len, - &truncated_path); - if (pathname) { + mtime.tv_sec = 0; + mtime.tv_nsec = 0; + cs_mtime.tv_sec = 0; + cs_mtime.tv_nsec = 0; + + /* get file's pathname and/or filename */ + pathname = NULL; + filename = NULL; + pathname_len = 0; + filename_len = 0; + truncated_path = FALSE; + /* no pager -> no file -> no pathname, use "" in that case */ + if (file_object->pager != NULL) { + pathname = kheap_alloc(KHEAP_TEMP, __PATH_MAX * 2, Z_WAITOK); + if (pathname) { + pathname[0] = '\0'; + pathname_len = __PATH_MAX; + filename = pathname + pathname_len; + filename_len = __PATH_MAX; + + if (vnode_pager_get_object_name(file_object->pager, + pathname, + pathname_len, + filename, + filename_len, + &truncated_path) == KERN_SUCCESS) { /* safety first... */ - pathname[__PATH_MAX-1] = '\0'; - filename[__PATH_MAX-1] = '\0'; + pathname[__PATH_MAX - 1] = '\0'; + filename[__PATH_MAX - 1] = '\0'; + + vnode_pager_get_object_mtime(file_object->pager, + &mtime, + &cs_mtime); + } else { + kheap_free(KHEAP_TEMP, pathname, __PATH_MAX * 2); + pathname = NULL; + filename = NULL; + pathname_len = 0; + filename_len = 0; + truncated_path = FALSE; } - vnode_pager_get_object_mtime(file_object->pager, - &mtime, - &cs_mtime); - } - printf("CODE SIGNING: process %d[%s]: " - "rejecting invalid page at address 0x%llx " - "from offset 0x%llx in file \"%s%s%s\" " - "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " - "(signed:%d validated:%d tainted:%d " - "wpmapped:%d slid:%d)\n", - pid, procname, (addr64_t) vaddr, - file_offset, - (pathname ? pathname : ""), - (truncated_path ? "/.../" : ""), - (truncated_path ? filename : ""), - cs_mtime.tv_sec, cs_mtime.tv_nsec, - ((cs_mtime.tv_sec == mtime.tv_sec && - cs_mtime.tv_nsec == mtime.tv_nsec) - ? "==" - : "!="), - mtime.tv_sec, mtime.tv_nsec, - m->object->code_signed, - m->cs_validated, - m->cs_tainted, - m->wpmapped, - m->slid); - if (file_object != m->object) { - vm_object_unlock(file_object); - } - if (pathname_len != 0) { - kfree(pathname, __PATH_MAX * 2); - pathname = NULL; - filename = NULL; } - } else { - /* proceed with the invalid page */ - kr = KERN_SUCCESS; - if (!m->cs_validated) { - /* - * This page has not been validated, so it - * must not belong to a code-signed object - * and should not be forcefully considered - * as tainted. - * We're just concerned about it here because - * we've been asked to "execute" it but that - * does not mean that it should cause other - * accesses to fail. - * This happens when a debugger sets a - * breakpoint and we then execute code in - * that page. Marking the page as "tainted" - * would cause any inspection tool ("leaks", - * "vmmap", "CrashReporter", ...) to get killed - * due to code-signing violation on that page, - * even though they're just reading it and not - * executing from it. - */ - assert(!m->object->code_signed); + } + printf("CODE SIGNING: process %d[%s]: " + "rejecting invalid page at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d " + "wpmapped:%d dirty:%d depth:%d)\n", + pid, procname, (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset), + VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset), + VMP_CS_NX(m, fault_page_size, fault_phys_offset), + m->vmp_wpmapped, + m->vmp_dirty, + shadow_depth); + + /* + * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page + * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the + * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler + * will deal with the segmentation fault. + */ + if (cs_killed) { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, + pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0); + + codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE); + if (codesigning_exit_reason == NULL) { + printf("vm_fault_enter: failed to allocate codesigning exit reason\n"); } else { - /* - * Page might have been tainted before or not; - * now it definitively is. If the page wasn't - * tainted, we must disconnect it from all - * pmaps later, to force existing mappings - * through that code path for re-consideration - * of the validity of that page. - */ - must_disconnect = !m->cs_tainted; - m->cs_tainted = TRUE; + mach_vm_address_t data_addr = 0; + struct codesigning_exit_reason_info *ceri = NULL; + uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri)); + + if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) { + printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n"); + } else { + if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor, + EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) { + ceri = (struct codesigning_exit_reason_info *)data_addr; + static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname)); + + ceri->ceri_virt_addr = vaddr; + ceri->ceri_file_offset = file_offset; + if (pathname) { + strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname)); + } else { + ceri->ceri_pathname[0] = '\0'; + } + if (filename) { + strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename)); + } else { + ceri->ceri_filename[0] = '\0'; + } + ceri->ceri_path_truncated = (truncated_path ? 1 : 0); + ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec; + ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec; + ceri->ceri_page_modtime_secs = mtime.tv_sec; + ceri->ceri_page_modtime_nsecs = mtime.tv_nsec; + ceri->ceri_object_codesigned = (object->code_signed); + ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset); + ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset); + ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset); + ceri->ceri_page_wpmapped = (m->vmp_wpmapped); + ceri->ceri_page_slid = 0; + ceri->ceri_page_dirty = (m->vmp_dirty); + ceri->ceri_page_shadow_depth = shadow_depth; + } else { +#if DEBUG || DEVELOPMENT + panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason"); +#else + printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n"); +#endif /* DEBUG || DEVELOPMENT */ + /* Free the buffer */ + os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0); + } + } } - cs_enter_tainted_accepted++; + + set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE); } - if (kr != KERN_SUCCESS) { - if (cs_debug) { - printf("CODESIGNING: vm_fault_enter(0x%llx): " - "*** INVALID PAGE ***\n", - (long long)vaddr); - } -#if !SECURE_KERNEL - if (cs_enforcement_panic) { - panic("CODESIGNING: panicking on invalid page\n"); - } -#endif + if (panic_on_cs_killed && + object->object_is_shared_cache) { + char *tainted_contents; + vm_map_offset_t src_vaddr; + src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT); + tainted_contents = kalloc(PAGE_SIZE); + bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE); + printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents); + panic("CODE SIGNING: process %d[%s]: " + "rejecting invalid page (phys#0x%x) at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d" + "wpmapped:%d dirty:%d depth:%d)\n", + pid, procname, + VM_PAGE_GET_PHYS_PAGE(m), + (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset), + VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset), + VMP_CS_NX(m, fault_page_size, fault_phys_offset), + m->vmp_wpmapped, + m->vmp_dirty, + shadow_depth); + } + + if (file_object != object) { + vm_object_unlock(file_object); + } + if (pathname_len != 0) { + kheap_free(KHEAP_TEMP, pathname, __PATH_MAX * 2); + pathname = NULL; + filename = NULL; } - } else { - /* proceed with the valid page */ + /* proceed with the invalid page */ kr = KERN_SUCCESS; + if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) && + !object->code_signed) { + /* + * This page has not been (fully) validated but + * does not belong to a code-signed object + * so it should not be forcefully considered + * as tainted. + * We're just concerned about it here because + * we've been asked to "execute" it but that + * does not mean that it should cause other + * accesses to fail. + * This happens when a debugger sets a + * breakpoint and we then execute code in + * that page. Marking the page as "tainted" + * would cause any inspection tool ("leaks", + * "vmmap", "CrashReporter", ...) to get killed + * due to code-signing violation on that page, + * even though they're just reading it and not + * executing from it. + */ + } else { + /* + * Page might have been tainted before or not; + * now it definitively is. If the page wasn't + * tainted, we must disconnect it from all + * pmaps later, to force existing mappings + * through that code path for re-consideration + * of the validity of that page. + */ + if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) { + *must_disconnect = TRUE; + VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE); + } + } + cs_enter_tainted_accepted++; + } + if (kr != KERN_SUCCESS) { + if (cs_debug) { + printf("CODESIGNING: vm_fault_enter(0x%llx): " + "*** INVALID PAGE ***\n", + (long long)vaddr); + } +#if !SECURE_KERNEL + if (cs_enforcement_panic) { + panic("CODESIGNING: panicking on invalid page\n"); + } +#endif } + return kr; +} - boolean_t page_queues_locked = FALSE; -#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ -MACRO_BEGIN \ - if (! page_queues_locked) { \ - page_queues_locked = TRUE; \ - vm_page_lockspin_queues(); \ - } \ +/* + * Check that the code signature is valid for the given page being inserted into + * the pmap. + * + * @param must_disconnect This value will be set to true if the caller must disconnect + * this page. + * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault. + */ +static kern_return_t +vm_fault_validate_cs( + bool cs_bypass, + vm_object_t object, + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t prot, + vm_prot_t caller_prot, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_object_fault_info_t fault_info, + bool *must_disconnect) +{ + bool map_is_switched, map_is_switch_protected, cs_violation; + kern_return_t kr; + /* Validate code signature if necessary. */ + map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && + (pmap == vm_map_pmap(current_thread()->map))); + map_is_switch_protected = current_thread()->map->switch_protect; + kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap, + prot, caller_prot, fault_page_size, fault_phys_offset, fault_info, + map_is_switched, map_is_switch_protected, &cs_violation); + if (kr != KERN_SUCCESS) { + return kr; + } + if (cs_violation) { + kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr, + fault_page_size, fault_phys_offset, + map_is_switched, map_is_switch_protected, must_disconnect); + } + return kr; +} + +/* + * Enqueue the page on the appropriate paging queue. + */ +static void +vm_fault_enqueue_page( + vm_object_t object, + vm_page_t m, + bool wired, + bool change_wiring, + vm_tag_t wire_tag, + bool no_cache, + int *type_of_fault, + kern_return_t kr) +{ + assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object); + boolean_t page_queues_locked = FALSE; + boolean_t previously_pmapped = m->vmp_pmapped; +#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ +MACRO_BEGIN \ + if (! page_queues_locked) { \ + page_queues_locked = TRUE; \ + vm_page_lockspin_queues(); \ + } \ MACRO_END -#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \ -MACRO_BEGIN \ - if (page_queues_locked) { \ - page_queues_locked = FALSE; \ - vm_page_unlock_queues(); \ - } \ +#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \ +MACRO_BEGIN \ + if (page_queues_locked) { \ + page_queues_locked = FALSE; \ + vm_page_unlock_queues(); \ + } \ MACRO_END - /* - * Hold queues lock to manipulate - * the page queues. Change wiring - * case is obvious. - */ - assert(m->compressor || m->object != compressor_object); - if (m->compressor) { +#if CONFIG_BACKGROUND_QUEUE + vm_page_update_background_state(m); +#endif + if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { /* * Compressor pages are neither wired * nor pageable and should never change. */ - assert(m->object == compressor_object); + assert(object == compressor_object); } else if (change_wiring) { - __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); if (wired) { if (kr == KERN_SUCCESS) { - vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE); + vm_page_wire(m, wire_tag, TRUE); } } else { - vm_page_unwire(m, TRUE); + vm_page_unwire(m, TRUE); } /* we keep the page queues lock, if we need it later */ - } else { - if (kr != KERN_SUCCESS) { - __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); - vm_page_deactivate(m); + if (object->internal == TRUE) { + /* + * don't allow anonymous pages on + * the speculative queues + */ + no_cache = FALSE; + } + if (kr != KERN_SUCCESS) { + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + vm_page_deactivate(m); /* we keep the page queues lock, if we need it later */ - } else if (((!m->active && !m->inactive) || - m->clean_queue || - no_cache) && - !VM_PAGE_WIRED(m) && !m->throttled) { - + } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || + (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) || + (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) || + ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) && + !VM_PAGE_WIRED(m)) { if (vm_page_local_q && - !no_cache && (*type_of_fault == DBG_COW_FAULT || - *type_of_fault == DBG_ZERO_FILL_FAULT) ) { - struct vpl *lq; - uint32_t lid; + *type_of_fault == DBG_ZERO_FILL_FAULT)) { + struct vpl *lq; + uint32_t lid; + + assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); - vm_object_lock_assert_exclusive(m->object); + vm_object_lock_assert_exclusive(object); /* * we got a local queue to stuff this @@ -3009,31 +3086,30 @@ MACRO_END * we'll use the current cpu number to * select the queue note that we don't * need to disable preemption... we're - * going to behind the local queue's + * going to be behind the local queue's * lock to do the real work */ lid = cpu_number(); - lq = &vm_page_local_q[lid].vpl_un.vpl; + lq = zpercpu_get_cpu(vm_page_local_q, lid); VPL_LOCK(&lq->vpl_lock); vm_page_check_pageable_safe(m); - queue_enter(&lq->vpl_queue, m, - vm_page_t, pageq); - m->local = TRUE; - m->local_id = lid; + vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq); + m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q; + m->vmp_local_id = lid; lq->vpl_count++; - - if (m->object->internal) + + if (object->internal) { lq->vpl_internal_count++; - else + } else { lq->vpl_external_count++; + } VPL_UNLOCK(&lq->vpl_lock); - if (lq->vpl_count > vm_page_local_q_soft_limit) - { + if (lq->vpl_count > vm_page_local_q_soft_limit) { /* * we're beyond the soft limit * for the local queue @@ -3053,7 +3129,6 @@ MACRO_END vm_page_reactivate_local(lid, FALSE, FALSE); } } else { - __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); /* @@ -3061,15 +3136,14 @@ MACRO_END * page queue lock */ if (!VM_PAGE_WIRED(m)) { - if (m->clean_queue) { - vm_page_queues_remove(m); + if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + vm_page_queues_remove(m, FALSE); - vm_pageout_cleaned_reactivated++; - vm_pageout_cleaned_fault_reactivated++; + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1); } - if ((!m->active && - !m->inactive) || + if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) || no_cache) { /* * If this is a no_cache mapping @@ -3081,20 +3155,18 @@ MACRO_END * that they can be readily * recycled if free memory runs * low. Otherwise the page is - * activated as normal. + * activated as normal. */ if (no_cache && (!previously_pmapped || - m->no_cache)) { - m->no_cache = TRUE; + m->vmp_no_cache)) { + m->vmp_no_cache = TRUE; - if (!m->speculative) + if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { vm_page_speculate(m, FALSE); - - } else if (!m->active && - !m->inactive) { - + } + } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) { vm_page_activate(m); } } @@ -3105,189 +3177,503 @@ MACRO_END } /* we're done with the page queues lock, if we ever took it */ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED(); +} - - /* If we have a KERN_SUCCESS from the previous checks, we either have - * a good page, or a tainted page that has been accepted by the process. - * In both cases the page will be entered into the pmap. - * If the page is writeable, we need to disconnect it from other pmaps - * now so those processes can take note. +/* + * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting. + * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo + * before being inserted into the pmap. + */ +static bool +vm_fault_enter_set_mapped( + vm_object_t object, + vm_page_t m, + vm_prot_t prot, + vm_prot_t fault_type) +{ + bool page_needs_sync = false; + /* + * NOTE: we may only hold the vm_object lock SHARED + * at this point, so we need the phys_page lock to + * properly serialize updating the pmapped and + * xpmapped bits */ - if (kr == KERN_SUCCESS) { + if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) { + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + + pmap_lock_phys_page(phys_page); + m->vmp_pmapped = TRUE; + + if (!m->vmp_xpmapped) { + m->vmp_xpmapped = TRUE; + + pmap_unlock_phys_page(phys_page); - /* - * NOTE: we may only hold the vm_object lock SHARED - * at this point, so we need the phys_page lock to - * properly serialize updating the pmapped and - * xpmapped bits + if (!object->internal) { + OSAddAtomic(1, &vm_page_xpmapped_external_count); + } + +#if defined(__arm__) || defined(__arm64__) + page_needs_sync = true; +#else + if (object->internal && + object->pager != NULL) { + /* + * This page could have been + * uncompressed by the + * compressor pager and its + * contents might be only in + * the data cache. + * Since it's being mapped for + * "execute" for the fist time, + * make sure the icache is in + * sync. + */ + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + page_needs_sync = true; + } +#endif + } else { + pmap_unlock_phys_page(phys_page); + } + } else { + if (m->vmp_pmapped == FALSE) { + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + + pmap_lock_phys_page(phys_page); + m->vmp_pmapped = TRUE; + pmap_unlock_phys_page(phys_page); + } + } + + if (fault_type & VM_PROT_WRITE) { + if (m->vmp_wpmapped == FALSE) { + vm_object_lock_assert_exclusive(object); + if (!object->internal && object->pager) { + task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager)); + } + m->vmp_wpmapped = TRUE; + } + } + return page_needs_sync; +} + +/* + * Try to enter the given page into the pmap. + * Will retry without execute permission iff PMAP_CS is enabled and we encounter + * a codesigning failure on a non-execute fault. + */ +static kern_return_t +vm_fault_attempt_pmap_enter( + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_page_t m, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_prot_t fault_type, + bool wired, + int pmap_options) +{ +#if !PMAP_CS +#pragma unused(caller_prot) +#endif /* !PMAP_CS */ + kern_return_t kr; + if (fault_page_size != PAGE_SIZE) { + DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type); + assertf((!(fault_phys_offset & FOURK_PAGE_MASK) && + fault_phys_offset < PAGE_SIZE), + "0x%llx\n", (uint64_t)fault_phys_offset); + } else { + assertf(fault_phys_offset == 0, + "0x%llx\n", (uint64_t)fault_phys_offset); + } + + PMAP_ENTER_OPTIONS(pmap, vaddr, + fault_phys_offset, + m, *prot, fault_type, 0, + wired, + pmap_options, + kr); + return kr; +} + +/* + * Enter the given page into the pmap. + * The map must be locked shared. + * The vm object must NOT be locked. + * + * @param need_retry if not null, avoid making a (potentially) blocking call into + * the pmap layer. When such a call would be necessary, return true in this boolean instead. + */ +static kern_return_t +vm_fault_pmap_enter( + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_page_t m, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_prot_t fault_type, + bool wired, + int pmap_options, + boolean_t *need_retry) +{ + kern_return_t kr; + if (need_retry != NULL) { + /* + * Although we don't hold a lock on this object, we hold a lock + * on the top object in the chain. To prevent a deadlock, we + * can't allow the pmap layer to block. + */ + pmap_options |= PMAP_OPTIONS_NOWAIT; + } + kr = vm_fault_attempt_pmap_enter(pmap, vaddr, + fault_page_size, fault_phys_offset, + m, prot, caller_prot, fault_type, wired, pmap_options); + if (kr == KERN_RESOURCE_SHORTAGE) { + if (need_retry) { + /* + * There's nothing we can do here since we hold the + * lock on the top object in the chain. The caller + * will need to deal with this by dropping that lock and retrying. + */ + *need_retry = TRUE; + vm_pmap_enter_retried++; + } + } + return kr; +} + +/* + * Enter the given page into the pmap. + * The vm map must be locked shared. + * The vm object must be locked exclusive, unless this is a soft fault. + * For a soft fault, the object must be locked shared or exclusive. + * + * @param need_retry if not null, avoid making a (potentially) blocking call into + * the pmap layer. When such a call would be necessary, return true in this boolean instead. + */ +static kern_return_t +vm_fault_pmap_enter_with_object_lock( + vm_object_t object, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_page_t m, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_prot_t fault_type, + bool wired, + int pmap_options, + boolean_t *need_retry) +{ + kern_return_t kr; + /* + * Prevent a deadlock by not + * holding the object lock if we need to wait for a page in + * pmap_enter() - + */ + kr = vm_fault_attempt_pmap_enter(pmap, vaddr, + fault_page_size, fault_phys_offset, + m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT); +#if __x86_64__ + if (kr == KERN_INVALID_ARGUMENT && + pmap == PMAP_NULL && + wired) { + /* + * Wiring a page in a pmap-less VM map: + * VMware's "vmmon" kernel extension does this + * to grab pages. + * Let it proceed even though the PMAP_ENTER() failed. */ - if ((prot & VM_PROT_EXECUTE) && !m->xpmapped) { + kr = KERN_SUCCESS; + } +#endif /* __x86_64__ */ - pmap_lock_phys_page(m->phys_page); + if (kr == KERN_RESOURCE_SHORTAGE) { + if (need_retry) { /* - * go ahead and take the opportunity - * to set 'pmapped' here so that we don't - * need to grab this lock a 2nd time - * just below + * this will be non-null in the case where we hold the lock + * on the top-object in this chain... we can't just drop + * the lock on the object we're inserting the page into + * and recall the PMAP_ENTER since we can still cause + * a deadlock if one of the critical paths tries to + * acquire the lock on the top-object and we're blocked + * in PMAP_ENTER waiting for memory... our only recourse + * is to deal with it at a higher level where we can + * drop both locks. */ - m->pmapped = TRUE; - - if (!m->xpmapped) { + *need_retry = TRUE; + vm_pmap_enter_retried++; + goto done; + } + /* + * The nonblocking version of pmap_enter did not succeed. + * and we don't need to drop other locks and retry + * at the level above us, so + * use the blocking version instead. Requires marking + * the page busy and unlocking the object + */ + boolean_t was_busy = m->vmp_busy; - m->xpmapped = TRUE; + vm_object_lock_assert_exclusive(object); - pmap_unlock_phys_page(m->phys_page); + m->vmp_busy = TRUE; + vm_object_unlock(object); - if (!m->object->internal) - OSAddAtomic(1, &vm_page_xpmapped_external_count); + PMAP_ENTER_OPTIONS(pmap, vaddr, + fault_phys_offset, + m, *prot, fault_type, + 0, wired, + pmap_options, kr); - if ((COMPRESSED_PAGER_IS_ACTIVE) && - m->object->internal && - m->object->pager != NULL) { - /* - * This page could have been - * uncompressed by the - * compressor pager and its - * contents might be only in - * the data cache. - * Since it's being mapped for - * "execute" for the fist time, - * make sure the icache is in - * sync. - */ - pmap_sync_page_data_phys(m->phys_page); - } - } else - pmap_unlock_phys_page(m->phys_page); + assert(VM_PAGE_OBJECT(m) == object); + + /* Take the object lock again. */ + vm_object_lock(object); + + /* If the page was busy, someone else will wake it up. + * Otherwise, we have to do it now. */ + assert(m->vmp_busy); + if (!was_busy) { + PAGE_WAKEUP_DONE(m); + } + vm_pmap_enter_blocked++; + } + +done: + return kr; +} + +/* + * Prepare to enter a page into the pmap by checking CS, protection bits, + * and setting mapped bits on the page_t. + * Does not modify the page's paging queue. + * + * page queue lock must NOT be held + * m->vmp_object must be locked + * + * NOTE: m->vmp_object could be locked "shared" only if we are called + * from vm_fault() as part of a soft fault. + */ +static kern_return_t +vm_fault_enter_prepare( + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t *prot, + vm_prot_t caller_prot, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + boolean_t change_wiring, + vm_prot_t fault_type, + vm_object_fault_info_t fault_info, + int *type_of_fault, + bool *page_needs_data_sync) +{ + kern_return_t kr; + bool is_tainted = false; + vm_object_t object; + boolean_t cs_bypass = fault_info->cs_bypass; + + object = VM_PAGE_OBJECT(m); + + vm_object_lock_assert_held(object); + +#if KASAN + if (pmap == kernel_pmap) { + kasan_notify_address(vaddr, PAGE_SIZE); + } +#endif + + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); + + if (*type_of_fault == DBG_ZERO_FILL_FAULT) { + vm_object_lock_assert_exclusive(object); + } else if ((fault_type & VM_PROT_WRITE) == 0 && + !change_wiring && + (!m->vmp_wpmapped +#if VM_OBJECT_ACCESS_TRACKING + || object->access_tracking +#endif /* VM_OBJECT_ACCESS_TRACKING */ + )) { + /* + * This is not a "write" fault, so we + * might not have taken the object lock + * exclusively and we might not be able + * to update the "wpmapped" bit in + * vm_fault_enter(). + * Let's just grant read access to + * the page for now and we'll + * soft-fault again if we need write + * access later... + */ + + /* This had better not be a JIT page. */ + if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) { + *prot &= ~VM_PROT_WRITE; } else { - if (m->pmapped == FALSE) { - pmap_lock_phys_page(m->phys_page); - m->pmapped = TRUE; - pmap_unlock_phys_page(m->phys_page); + assert(cs_bypass); + } + } + if (m->vmp_pmapped == FALSE) { + if (m->vmp_clustered) { + if (*type_of_fault == DBG_CACHE_HIT_FAULT) { + /* + * found it in the cache, but this + * is the first fault-in of the page (m->vmp_pmapped == FALSE) + * so it must have come in as part of + * a cluster... account 1 pagein against it + */ + if (object->internal) { + *type_of_fault = DBG_PAGEIND_FAULT; + } else { + *type_of_fault = DBG_PAGEINV_FAULT; + } + + VM_PAGE_COUNT_AS_PAGEIN(m); } + VM_PAGE_CONSUME_CLUSTERED(m); } - if (vm_page_is_slideable(m)) { - boolean_t was_busy = m->busy; + } - vm_object_lock_assert_exclusive(m->object); + if (*type_of_fault != DBG_COW_FAULT) { + DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL); - m->busy = TRUE; - kr = vm_page_slide(m, 0); - assert(m->busy); - if(!was_busy) { - PAGE_WAKEUP_DONE(m); + if (pmap == kernel_pmap) { + DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL); + } + } + + kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr, + *prot, caller_prot, fault_page_size, fault_phys_offset, + fault_info, &is_tainted); + if (kr == KERN_SUCCESS) { + /* + * We either have a good page, or a tainted page that has been accepted by the process. + * In both cases the page will be entered into the pmap. + */ + *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type); + if ((fault_type & VM_PROT_WRITE) && is_tainted) { + /* + * This page is tainted but we're inserting it anyways. + * Since it's writeable, we need to disconnect it from other pmaps + * now so those processes can take note. + */ + + /* + * We can only get here + * because of the CSE logic + */ + assert(pmap_get_vm_map_cs_enforced(pmap)); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + /* + * If we are faulting for a write, we can clear + * the execute bit - that will ensure the page is + * checked again before being executable, which + * protects against a map switch. + * This only happens the first time the page + * gets tainted, so we won't get stuck here + * to make an already writeable page executable. + */ + if (!cs_bypass) { + assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)); + *prot &= ~VM_PROT_EXECUTE; } - if (kr != KERN_SUCCESS) { - /* - * This page has not been slid correctly, - * do not do the pmap_enter() ! - * Let vm_fault_enter() return the error - * so the caller can fail the fault. - */ - goto after_the_pmap_enter; + } + assert(VM_PAGE_OBJECT(m) == object); + +#if VM_OBJECT_ACCESS_TRACKING + if (object->access_tracking) { + DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type); + if (fault_type & VM_PROT_WRITE) { + object->access_tracking_writes++; + vm_object_access_tracking_writes++; + } else { + object->access_tracking_reads++; + vm_object_access_tracking_reads++; } } +#endif /* VM_OBJECT_ACCESS_TRACKING */ + } - if (fault_type & VM_PROT_WRITE) { + return kr; +} + +/* + * page queue lock must NOT be held + * m->vmp_object must be locked + * + * NOTE: m->vmp_object could be locked "shared" only if we are called + * from vm_fault() as part of a soft fault. If so, we must be + * careful not to modify the VM object in any way that is not + * legal under a shared lock... + */ +kern_return_t +vm_fault_enter( + vm_page_t m, + pmap_t pmap, + vm_map_offset_t vaddr, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + vm_prot_t prot, + vm_prot_t caller_prot, + boolean_t wired, + boolean_t change_wiring, + vm_tag_t wire_tag, + vm_object_fault_info_t fault_info, + boolean_t *need_retry, + int *type_of_fault) +{ + kern_return_t kr; + vm_object_t object; + bool page_needs_data_sync; + vm_prot_t fault_type; + int pmap_options = fault_info->pmap_options; - if (m->wpmapped == FALSE) { - vm_object_lock_assert_exclusive(m->object); + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { + assert(m->vmp_fictitious); + return KERN_SUCCESS; + } - m->wpmapped = TRUE; - } - if (must_disconnect) { - /* - * We can only get here - * because of the CSE logic - */ - assert(cs_enforcement_enabled); - pmap_disconnect(m->phys_page); - /* - * If we are faulting for a write, we can clear - * the execute bit - that will ensure the page is - * checked again before being executable, which - * protects against a map switch. - * This only happens the first time the page - * gets tainted, so we won't get stuck here - * to make an already writeable page executable. - */ - if (!cs_bypass){ - prot &= ~VM_PROT_EXECUTE; - } - } - } + fault_type = change_wiring ? VM_PROT_NONE : caller_prot; - /* Prevent a deadlock by not - * holding the object lock if we need to wait for a page in - * pmap_enter() - */ - PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0, - wired, - pmap_options | PMAP_OPTIONS_NOWAIT, - pe_result); + kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot, + fault_page_size, fault_phys_offset, change_wiring, fault_type, + fault_info, type_of_fault, &page_needs_data_sync); + object = VM_PAGE_OBJECT(m); - if(pe_result == KERN_RESOURCE_SHORTAGE) { + vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr); - if (need_retry) { - /* - * this will be non-null in the case where we hold the lock - * on the top-object in this chain... we can't just drop - * the lock on the object we're inserting the page into - * and recall the PMAP_ENTER since we can still cause - * a deadlock if one of the critical paths tries to - * acquire the lock on the top-object and we're blocked - * in PMAP_ENTER waiting for memory... our only recourse - * is to deal with it at a higher level where we can - * drop both locks. - */ - *need_retry = TRUE; - vm_pmap_enter_retried++; - goto after_the_pmap_enter; - } - /* The nonblocking version of pmap_enter did not succeed. - * and we don't need to drop other locks and retry - * at the level above us, so - * use the blocking version instead. Requires marking - * the page busy and unlocking the object */ - boolean_t was_busy = m->busy; - - vm_object_lock_assert_exclusive(m->object); - - m->busy = TRUE; - vm_object_unlock(m->object); - - PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, - 0, wired, - pmap_options, pe_result); - - /* Take the object lock again. */ - vm_object_lock(m->object); - - /* If the page was busy, someone else will wake it up. - * Otherwise, we have to do it now. */ - assert(m->busy); - if(!was_busy) { - PAGE_WAKEUP_DONE(m); - } - vm_pmap_enter_blocked++; + if (kr == KERN_SUCCESS) { + if (page_needs_data_sync) { + pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m)); } + + kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr, + fault_page_size, fault_phys_offset, m, + &prot, caller_prot, fault_type, wired, pmap_options, need_retry); } -after_the_pmap_enter: return kr; } void -vm_pre_fault(vm_map_offset_t vaddr) +vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot) { if (pmap_find_phys(current_map()->pmap, vaddr) == 0) { - - vm_fault(current_map(), /* map */ - vaddr, /* vaddr */ - VM_PROT_READ, /* fault_type */ - FALSE, /* change_wiring */ - THREAD_UNINT, /* interruptible */ - NULL, /* caller_pmap */ - 0 /* caller_pmap_addr */); + vm_fault(current_map(), /* map */ + vaddr, /* vaddr */ + prot, /* fault_type */ + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + THREAD_UNINT, /* interruptible */ + NULL, /* caller_pmap */ + 0 /* caller_pmap_addr */); } } @@ -3306,93 +3692,314 @@ vm_pre_fault(vm_map_offset_t vaddr) * and deallocated when leaving vm_fault. */ -extern int _map_enter_debug; +extern uint64_t get_current_unique_pid(void); unsigned long vm_fault_collapse_total = 0; unsigned long vm_fault_collapse_skipped = 0; kern_return_t -vm_fault( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, - int interruptible, - pmap_t caller_pmap, - vm_map_offset_t caller_pmap_addr) +vm_fault_external( + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr) { return vm_fault_internal(map, vaddr, fault_type, change_wiring, - interruptible, caller_pmap, caller_pmap_addr, - NULL); + change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE, + interruptible, caller_pmap, caller_pmap_addr, + NULL); +} + +kern_return_t +vm_fault( + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr) +{ + return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag, + interruptible, caller_pmap, caller_pmap_addr, + NULL); +} + +static boolean_t +current_proc_is_privileged(void) +{ + return csproc_get_platform_binary(current_proc()); +} + +uint64_t vm_copied_on_read = 0; + +/* + * Cleanup after a vm_fault_enter. + * At this point, the fault should either have failed (kr != KERN_SUCCESS) + * or the page should be in the pmap and on the correct paging queue. + * + * Precondition: + * map must be locked shared. + * m_object must be locked. + * If top_object != VM_OBJECT_NULL, it must be locked. + * real_map must be locked. + * + * Postcondition: + * map will be unlocked + * m_object will be unlocked + * top_object will be unlocked + * If real_map != map, it will be unlocked + */ +static void +vm_fault_complete( + vm_map_t map, + vm_map_t real_map, + vm_object_t object, + vm_object_t m_object, + vm_page_t m, + vm_map_offset_t offset, + vm_map_offset_t trace_real_vaddr, + vm_object_fault_info_t fault_info, + vm_prot_t caller_prot, +#if CONFIG_DTRACE + vm_map_offset_t real_vaddr, +#else + __unused vm_map_offset_t real_vaddr, +#endif /* CONFIG_DTRACE */ + int type_of_fault, + boolean_t need_retry, + kern_return_t kr, + ppnum_t *physpage_p, + vm_prot_t prot, + vm_object_t top_object, + boolean_t need_collapse, + vm_map_offset_t cur_offset, + vm_prot_t fault_type, + vm_object_t *written_on_object, + memory_object_t *written_on_pager, + vm_object_offset_t *written_on_offset) +{ + int event_code = 0; + vm_map_lock_assert_shared(map); + vm_object_lock_assert_held(m_object); + if (top_object != VM_OBJECT_NULL) { + vm_object_lock_assert_held(top_object); + } + vm_map_lock_assert_held(real_map); + + if (m_object->internal) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + } else if (m_object->object_is_shared_cache) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + } else { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + } + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); + if (need_retry == FALSE) { + KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid(), 0, 0, 0, 0); + } + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag); + if (kr == KERN_SUCCESS && + physpage_p != NULL) { + /* for vm_map_wire_and_extract() */ + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); + if (prot & VM_PROT_WRITE) { + vm_object_lock_assert_exclusive(m_object); + m->vmp_dirty = TRUE; + } + } + + if (top_object != VM_OBJECT_NULL) { + /* + * It's safe to drop the top object + * now that we've done our + * vm_fault_enter(). Any other fault + * in progress for that virtual + * address will either find our page + * and translation or put in a new page + * and translation. + */ + vm_object_unlock(top_object); + top_object = VM_OBJECT_NULL; + } + + if (need_collapse == TRUE) { + vm_object_collapse(object, vm_object_trunc_page(offset), TRUE); + } + + if (need_retry == FALSE && + (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { + /* + * evaluate access pattern and update state + * vm_fault_deactivate_behind depends on the + * state being up to date + */ + vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior); + + vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior); + } + /* + * That's it, clean up and return. + */ + if (m->vmp_busy) { + vm_object_lock_assert_exclusive(m_object); + PAGE_WAKEUP_DONE(m); + } + + if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) { + vm_object_paging_begin(m_object); + + assert(*written_on_object == VM_OBJECT_NULL); + *written_on_object = m_object; + *written_on_pager = m_object->pager; + *written_on_offset = m_object->paging_offset + m->vmp_offset; + } + vm_object_unlock(object); + + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } } +static inline int +vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault) +{ + if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) { + return DBG_COR_FAULT; + } + return type_of_fault; +} kern_return_t vm_fault_internal( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t caller_prot, - boolean_t change_wiring, - int interruptible, - pmap_t caller_pmap, - vm_map_offset_t caller_pmap_addr, - ppnum_t *physpage_p) + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t caller_prot, + boolean_t change_wiring, + vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr, + ppnum_t *physpage_p) { - vm_map_version_t version; /* Map version for verificiation */ - boolean_t wired; /* Should mapping be wired down? */ - vm_object_t object; /* Top-level object */ - vm_object_offset_t offset; /* Top-level offset */ - vm_prot_t prot; /* Protection for mapping */ - vm_object_t old_copy_object; /* Saved copy object */ - vm_page_t result_page; /* Result of vm_fault_page */ - vm_page_t top_page; /* Placeholder page */ - kern_return_t kr; - - vm_page_t m; /* Fast access to result_page */ - kern_return_t error_code; - vm_object_t cur_object; - vm_object_offset_t cur_offset; - vm_page_t cur_m; - vm_object_t new_object; + vm_map_version_t version; /* Map version for verificiation */ + boolean_t wired; /* Should mapping be wired down? */ + vm_object_t object; /* Top-level object */ + vm_object_offset_t offset; /* Top-level offset */ + vm_prot_t prot; /* Protection for mapping */ + vm_object_t old_copy_object; /* Saved copy object */ + vm_page_t result_page; /* Result of vm_fault_page */ + vm_page_t top_page; /* Placeholder page */ + kern_return_t kr; + + vm_page_t m; /* Fast access to result_page */ + kern_return_t error_code; + vm_object_t cur_object; + vm_object_t m_object = NULL; + vm_object_offset_t cur_offset; + vm_page_t cur_m; + vm_object_t new_object; int type_of_fault; - pmap_t pmap; - boolean_t interruptible_state; - vm_map_t real_map = map; - vm_map_t original_map = map; - vm_prot_t fault_type; - vm_prot_t original_fault_type; - struct vm_object_fault_info fault_info; - boolean_t need_collapse = FALSE; - boolean_t need_retry = FALSE; - boolean_t *need_retry_ptr = NULL; - int object_lock_type = 0; - int cur_object_lock_type; - vm_object_t top_object = VM_OBJECT_NULL; - int throttle_delay; - int compressed_count_delta; - - - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, - ((uint64_t)vaddr >> 32), - vaddr, - (map == kernel_map), - 0, - 0); + pmap_t pmap; + wait_interrupt_t interruptible_state; + vm_map_t real_map = map; + vm_map_t original_map = map; + bool object_locks_dropped = FALSE; + vm_prot_t fault_type; + vm_prot_t original_fault_type; + struct vm_object_fault_info fault_info = {}; + bool need_collapse = FALSE; + boolean_t need_retry = FALSE; + boolean_t *need_retry_ptr = NULL; + uint8_t object_lock_type = 0; + uint8_t cur_object_lock_type; + vm_object_t top_object = VM_OBJECT_NULL; + vm_object_t written_on_object = VM_OBJECT_NULL; + memory_object_t written_on_pager = NULL; + vm_object_offset_t written_on_offset = 0; + int throttle_delay; + int compressed_count_delta; + uint8_t grab_options; + bool need_copy; + bool need_copy_on_read; + vm_map_offset_t trace_vaddr; + vm_map_offset_t trace_real_vaddr; + vm_map_size_t fault_page_size; + vm_map_size_t fault_page_mask; + vm_map_offset_t fault_phys_offset; + vm_map_offset_t real_vaddr; + bool resilient_media_retry = FALSE; + vm_object_t resilient_media_object = VM_OBJECT_NULL; + vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1; + bool page_needs_data_sync = false; + /* + * Was the VM object contended when vm_map_lookup_locked locked it? + * If so, the zero fill path will drop the lock + * NB: Ideally we would always drop the lock rather than rely on + * this heuristic, but vm_object_unlock currently takes > 30 cycles. + */ + bool object_is_contended = false; + + real_vaddr = vaddr; + trace_real_vaddr = vaddr; + + if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) { + fault_phys_offset = (vm_map_offset_t)-1; + fault_page_size = VM_MAP_PAGE_SIZE(original_map); + fault_page_mask = VM_MAP_PAGE_MASK(original_map); + if (fault_page_size < PAGE_SIZE) { + DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot); + vaddr = vm_map_trunc_page(vaddr, fault_page_mask); + } + } else { + fault_phys_offset = 0; + fault_page_size = PAGE_SIZE; + fault_page_mask = PAGE_MASK; + vaddr = vm_map_trunc_page(vaddr, PAGE_MASK); + } + + if (map == kernel_map) { + trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr); + trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr); + } else { + trace_vaddr = vaddr; + } + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + (map == kernel_map), + 0, + 0); if (get_preemption_level() != 0) { - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, - ((uint64_t)vaddr >> 32), - vaddr, - KERN_FAILURE, - 0, - 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + KERN_FAILURE, + 0, + 0); + + return KERN_FAILURE; + } - return (KERN_FAILURE); + thread_t cthread = current_thread(); + bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME); + uint64_t fstart = 0; + + if (rtfault) { + fstart = mach_continuous_time(); } - + interruptible_state = thread_interrupt_level(interruptible); fault_type = (change_wiring ? VM_PROT_NONE : caller_prot); @@ -3401,14 +4008,29 @@ vm_fault_internal( current_task()->faults++; original_fault_type = fault_type; - if (fault_type & VM_PROT_WRITE) - object_lock_type = OBJECT_LOCK_EXCLUSIVE; - else - object_lock_type = OBJECT_LOCK_SHARED; + need_copy = FALSE; + if (fault_type & VM_PROT_WRITE) { + need_copy = TRUE; + } + + if (need_copy || change_wiring) { + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + } else { + object_lock_type = OBJECT_LOCK_SHARED; + } cur_object_lock_type = OBJECT_LOCK_SHARED; + if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) { + if (compressor_map) { + if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) { + panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map)); + } + } + } RetryFault: + assert(written_on_object == VM_OBJECT_NULL); + /* * assume we will hit a page in the cache * otherwise, explicitly override with @@ -3424,17 +4046,32 @@ RetryFault: map = original_map; vm_map_lock_read(map); - kr = vm_map_lookup_locked(&map, vaddr, fault_type, - object_lock_type, &version, - &object, &offset, &prot, &wired, - &fault_info, - &real_map); + if (resilient_media_retry) { + /* + * If we have to insert a fake zero-filled page to hide + * a media failure to provide the real page, we need to + * resolve any pending copy-on-write on this mapping. + * VM_PROT_COPY tells vm_map_lookup_locked() to deal + * with that even if this is not a "write" fault. + */ + need_copy = TRUE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + } + kr = vm_map_lookup_locked(&map, vaddr, + (fault_type | (need_copy ? VM_PROT_COPY : 0)), + object_lock_type, &version, + &object, &offset, &prot, &wired, + &fault_info, + &real_map, + &object_is_contended); if (kr != KERN_SUCCESS) { vm_map_unlock_read(map); goto done; } + + pmap = real_map->pmap; fault_info.interruptible = interruptible; fault_info.stealth = FALSE; @@ -3442,31 +4079,67 @@ RetryFault: fault_info.mark_zf_absent = FALSE; fault_info.batch_pmap_op = FALSE; + if (resilient_media_retry) { + /* + * We're retrying this fault after having detected a media + * failure from a "resilient_media" mapping. + * Check that the mapping is still pointing at the object + * that just failed to provide a page. + */ + assert(resilient_media_object != VM_OBJECT_NULL); + assert(resilient_media_offset != (vm_object_offset_t)-1); + if (object != VM_OBJECT_NULL && + object == resilient_media_object && + offset == resilient_media_offset && + fault_info.resilient_media) { + /* + * This mapping still points at the same object + * and is still "resilient_media": proceed in + * "recovery-from-media-failure" mode, where we'll + * insert a zero-filled page in the top object. + */ +// printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset); + } else { + /* not recovering: reset state */ +// printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset); + resilient_media_retry = FALSE; + /* release our extra reference on failed object */ +// printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object); + vm_object_deallocate(resilient_media_object); + resilient_media_object = VM_OBJECT_NULL; + resilient_media_offset = (vm_object_offset_t)-1; + } + } else { + assert(resilient_media_object == VM_OBJECT_NULL); + resilient_media_offset = (vm_object_offset_t)-1; + } + /* * If the page is wired, we must fault for the current protection * value, to avoid further faults. */ if (wired) { fault_type = prot | VM_PROT_WRITE; + } + if (wired || need_copy) { /* * since we're treating this fault as a 'write' * we must hold the top object lock exclusively */ if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly * take the lock exclusively */ - vm_object_lock(object); + vm_object_lock(object); } } } -#if VM_FAULT_CLASSIFY +#if VM_FAULT_CLASSIFY /* * Temporary data gathering code */ @@ -3486,7 +4159,7 @@ RetryFault: * and use the original fault path (which doesn't hold * the map lock, and relies on busy pages). * The give up cases include: - * - Have to talk to pager. + * - Have to talk to pager. * - Page is busy, absent or in error. * - Pager has locked out desired access. * - Fault needs to be restarted. @@ -3494,7 +4167,7 @@ RetryFault: * * The code is an infinite loop that moves one level down * the shadow chain each time. cur_object and cur_offset - * refer to the current object being examined. object and offset + * refer to the current object being examined. object and offset * are the original object from the map. The loop is at the * top level if and only if object and cur_object are the same. * @@ -3504,6 +4177,26 @@ RetryFault: * */ +#if defined(__arm64__) + /* + * Fail if reading an execute-only page in a + * pmap that enforces execute-only protection. + */ + if (fault_type == VM_PROT_READ && + (prot & VM_PROT_EXECUTE) && + !(prot & VM_PROT_READ) && + pmap_enforces_execute_only(pmap)) { + vm_object_unlock(object); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } + kr = KERN_PROTECTION_FAILURE; + goto done; + } +#endif + + fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK); /* * If this page is to be inserted in a copy delay object @@ -3511,16 +4204,25 @@ RetryFault: * copy delay strategy is implemented in the slow fault page. */ if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY && - object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) - goto handle_copy_delay; + object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) { + goto handle_copy_delay; + } cur_object = object; cur_offset = offset; + grab_options = 0; +#if CONFIG_SECLUDED_MEMORY + if (object->can_grab_secluded) { + grab_options |= VM_PAGE_GRAB_SECLUDED; + } +#endif /* CONFIG_SECLUDED_MEMORY */ + while (TRUE) { if (!cur_object->pager_created && - cur_object->phys_contiguous) /* superpage */ + cur_object->phys_contiguous) { /* superpage */ break; + } if (cur_object->blocked_access) { /* @@ -3530,24 +4232,25 @@ RetryFault: break; } - m = vm_page_lookup(cur_object, cur_offset); + m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset)); + m_object = NULL; if (m != VM_PAGE_NULL) { - if (m->busy) { - wait_result_t result; + m_object = cur_object; + + if (m->vmp_busy) { + wait_result_t result; /* * in order to do the PAGE_ASSERT_WAIT, we must * have object that 'm' belongs to locked exclusively */ if (object != cur_object) { - if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(cur_object) == FALSE) { - /* + /* * couldn't upgrade so go do a full retry * immediately since we can no longer be * certain about cur_object (since we @@ -3556,19 +4259,19 @@ RetryFault: */ vm_object_unlock(object); - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } goto RetryFault; } } } else if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly take the lock * exclusively and go relookup the page since we * will have dropped the object lock and @@ -3577,14 +4280,14 @@ RetryFault: * no need for a full retry since we're * at the top level of the object chain */ - vm_object_lock(object); + vm_object_lock(object); continue; } } - if (m->pageout_queue && m->object->internal && COMPRESSED_PAGER_IS_ACTIVE) { + if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) { /* - * m->busy == TRUE and the object is locked exclusively + * m->vmp_busy == TRUE and the object is locked exclusively * if m->pageout_queue == TRUE after we acquire the * queues lock, we are guaranteed that it is stable on * the pageout queue and therefore reclaimable @@ -3592,9 +4295,11 @@ RetryFault: * NOTE: this is only true for the internal pageout queue * in the compressor world */ + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); + vm_page_lock_queues(); - if (m->pageout_queue) { + if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { vm_pageout_throttle_up(m); vm_page_unlock_queues(); @@ -3603,30 +4308,33 @@ RetryFault: } vm_page_unlock_queues(); } - if (object != cur_object) + if (object != cur_object) { vm_object_unlock(object); + } vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + if (real_map != map) { + vm_map_unlock(real_map); + } result = PAGE_ASSERT_WAIT(m, interruptible); vm_object_unlock(cur_object); if (result == THREAD_WAITING) { - result = thread_block(THREAD_CONTINUE_NULL); + result = thread_block(THREAD_CONTINUE_NULL); counter(c_vm_fault_page_block_busy_kernel++); } - if (result == THREAD_AWAKENED || result == THREAD_RESTART) - goto RetryFault; + if (result == THREAD_AWAKENED || result == THREAD_RESTART) { + goto RetryFault; + } kr = KERN_ABORTED; goto done; } reclaimed_from_pageout: - if (m->laundry) { + if (m->vmp_laundry) { if (object != cur_object) { if (cur_object_lock_type == OBJECT_LOCK_SHARED) { cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; @@ -3635,14 +4343,13 @@ reclaimed_from_pageout: vm_object_unlock(cur_object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } - } else if (object_lock_type == OBJECT_LOCK_SHARED) { - object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { @@ -3660,163 +4367,63 @@ reclaimed_from_pageout: continue; } } - m->pageout = FALSE; - vm_pageout_steal_laundry(m, FALSE); } - if (m->phys_page == vm_page_guard_addr) { + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { /* * Guard page: let the slow path deal with it */ break; } - if (m->unusual && (m->error || m->restart || m->private || m->absent)) { - /* + if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) { + /* * Unusual case... let the slow path deal with it */ break; } - if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m->object)) { - if (object != cur_object) - vm_object_unlock(object); - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); - vm_object_unlock(cur_object); - kr = KERN_MEMORY_ERROR; - goto done; - } - - if (m->encrypted) { - /* - * ENCRYPTED SWAP: - * We've soft-faulted (because it's not in the page - * table) on an encrypted page. - * Keep the page "busy" so that no one messes with - * it during the decryption. - * Release the extra locks we're holding, keep only - * the page's VM object lock. - * - * in order to set 'busy' on 'm', we must - * have object that 'm' belongs to locked exclusively - */ - if (object != cur_object) { + if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) { + if (object != cur_object) { vm_object_unlock(object); - - if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; - - if (vm_object_lock_upgrade(cur_object) == FALSE) { - /* - * couldn't upgrade so go do a full retry - * immediately since we've already dropped - * the top object lock associated with this page - * and the current one got dropped due to the - * failed upgrade... the state is no longer valid - */ - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); - - goto RetryFault; - } - } - } else if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; - - if (vm_object_lock_upgrade(object) == FALSE) { - /* - * couldn't upgrade, so explictly take the lock - * exclusively and go relookup the page since we - * will have dropped the object lock and - * a different thread could have inserted - * a page at this offset - * no need for a full retry since we're - * at the top level of the object chain - */ - vm_object_lock(object); - - continue; - } } - m->busy = TRUE; - vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); - - vm_page_decrypt(m, 0); - - assert(m->busy); - PAGE_WAKEUP_DONE(m); - - vm_object_unlock(cur_object); - /* - * Retry from the top, in case anything - * changed while we were decrypting... - */ - goto RetryFault; - } - ASSERT_PAGE_DECRYPTED(m); - - if(vm_page_is_slideable(m)) { - /* - * We might need to slide this page, and so, - * we want to hold the VM object exclusively. - */ - if (object != cur_object) { - if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - vm_object_unlock(object); - vm_object_unlock(cur_object); - - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; - - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); - - goto RetryFault; - } - } else if (object_lock_type == OBJECT_LOCK_SHARED) { - - vm_object_unlock(object); - object_lock_type = OBJECT_LOCK_EXCLUSIVE; - vm_map_unlock_read(map); - goto RetryFault; } + vm_object_unlock(cur_object); + kr = KERN_MEMORY_ERROR; + goto done; } + assert(m_object == VM_PAGE_OBJECT(m)); - if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m) || + if (vm_fault_cs_need_validation(map->pmap, m, m_object, + PAGE_SIZE, 0) || (physpage_p != NULL && (prot & VM_PROT_WRITE))) { -upgrade_for_validation: +upgrade_lock_and_retry: /* * We might need to validate this page * against its code signature, so we * want to hold the VM object exclusively. */ - if (object != cur_object) { + if (object != cur_object) { if (cur_object_lock_type == OBJECT_LOCK_SHARED) { vm_object_unlock(object); vm_object_unlock(cur_object); - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } - } else if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly take the lock * exclusively and go relookup the page since we * will have dropped the object lock and @@ -3825,7 +4432,7 @@ upgrade_for_validation: * no need for a full retry since we're * at the top level of the object chain */ - vm_object_lock(object); + vm_object_lock(object); continue; } @@ -3839,14 +4446,78 @@ upgrade_for_validation: */ if (object == cur_object && object->copy == VM_OBJECT_NULL) { - goto FastPmapEnter; } - if ((fault_type & VM_PROT_WRITE) == 0) { + if (!need_copy && + !fault_info.no_copy_on_read && + cur_object != object && + !cur_object->internal && + !cur_object->pager_trusted && + vm_protect_privileged_from_untrusted && + !((prot & VM_PROT_EXECUTE) && + cur_object->code_signed && + pmap_get_vm_map_cs_enforced(caller_pmap ? caller_pmap : pmap)) && + current_proc_is_privileged()) { + /* + * We're faulting on a page in "object" and + * went down the shadow chain to "cur_object" + * to find out that "cur_object"'s pager + * is not "trusted", i.e. we can not trust it + * to always return the same contents. + * Since the target is a "privileged" process, + * let's treat this as a copy-on-read fault, as + * if it was a copy-on-write fault. + * Once "object" gets a copy of this page, it + * won't have to rely on "cur_object" to + * provide the contents again. + * + * This is done by setting "need_copy" and + * retrying the fault from the top with the + * appropriate locking. + * + * Special case: if the mapping is executable + * and the untrusted object is code-signed and + * the process is "cs_enforced", we do not + * copy-on-read because that would break + * code-signing enforcement expectations (an + * executable page must belong to a code-signed + * object) and we can rely on code-signing + * to re-validate the page if it gets evicted + * and paged back in. + */ +// printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset); + vm_copied_on_read++; + need_copy = TRUE; + + vm_object_unlock(object); + vm_object_unlock(cur_object); + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } + goto RetryFault; + } + + if (!(fault_type & VM_PROT_WRITE) && !need_copy) { + if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) { + prot &= ~VM_PROT_WRITE; + } else { + /* + * For a protection that the pmap cares + * about, we must hand over the full + * set of protections (so that the pmap + * layer can apply any desired policy). + * This means that cs_bypass must be + * set, as this can force us to pass + * RWX. + */ + assert(fault_info.cs_bypass); + } - if (object != cur_object) { - /* + if (object != cur_object) { + /* * We still need to hold the top object * lock here to prevent a race between * a read fault (taking only "shared" @@ -3873,100 +4544,86 @@ upgrade_for_validation: object_lock_type = cur_object_lock_type; } FastPmapEnter: + assert(m_object == VM_PAGE_OBJECT(m)); + /* * prepare for the pmap_enter... * object and map are both locked * m contains valid data - * object == m->object + * object == m->vmp_object * cur_object == NULL or it's been unlocked * no paging references on either object or cur_object */ - if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) + if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) { need_retry_ptr = &need_retry; - else - need_retry_ptr = NULL; - - if (caller_pmap) { - kr = vm_fault_enter(m, - caller_pmap, - caller_pmap_addr, - prot, - caller_prot, - wired, - change_wiring, - fault_info.no_cache, - fault_info.cs_bypass, - fault_info.user_tag, - fault_info.pmap_options, - need_retry_ptr, - &type_of_fault); } else { - kr = vm_fault_enter(m, - pmap, - vaddr, - prot, - caller_prot, - wired, - change_wiring, - fault_info.no_cache, - fault_info.cs_bypass, - fault_info.user_tag, - fault_info.pmap_options, - need_retry_ptr, - &type_of_fault); - } - - if (kr == KERN_SUCCESS && - physpage_p != NULL) { - /* for vm_map_wire_and_extract() */ - *physpage_p = m->phys_page; - if (prot & VM_PROT_WRITE) { - vm_object_lock_assert_exclusive( - m->object); - m->dirty = TRUE; - } + need_retry_ptr = NULL; } - if (top_object != VM_OBJECT_NULL) { - /* - * It's safe to drop the top object - * now that we've done our - * vm_fault_enter(). Any other fault - * in progress for that virtual - * address will either find our page - * and translation or put in a new page - * and translation. - */ - vm_object_unlock(top_object); - top_object = VM_OBJECT_NULL; + if (fault_page_size < PAGE_SIZE) { + DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot); + assertf((!(fault_phys_offset & FOURK_PAGE_MASK) && + fault_phys_offset < PAGE_SIZE), + "0x%llx\n", (uint64_t)fault_phys_offset); + } else { + assertf(fault_phys_offset == 0, + "0x%llx\n", (uint64_t)fault_phys_offset); } - if (need_collapse == TRUE) - vm_object_collapse(object, offset, TRUE); - - if (need_retry == FALSE && - (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { - /* - * evaluate access pattern and update state - * vm_fault_deactivate_behind depends on the - * state being up to date - */ - vm_fault_is_sequential(object, cur_offset, fault_info.behavior); - - vm_fault_deactivate_behind(object, cur_offset, fault_info.behavior); + if (caller_pmap) { + kr = vm_fault_enter(m, + caller_pmap, + caller_pmap_addr, + fault_page_size, + fault_phys_offset, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + need_retry_ptr, + &type_of_fault); + } else { + kr = vm_fault_enter(m, + pmap, + vaddr, + fault_page_size, + fault_phys_offset, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + need_retry_ptr, + &type_of_fault); } - /* - * That's it, clean up and return. - */ - if (m->busy) - PAGE_WAKEUP_DONE(m); - - vm_object_unlock(object); - - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + vm_fault_complete( + map, + real_map, + object, + m_object, + m, + offset, + trace_real_vaddr, + &fault_info, + caller_prot, + real_vaddr, + vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), + need_retry, + kr, + physpage_p, + prot, + top_object, + need_collapse, + cur_offset, + fault_type, + &written_on_object, + &written_on_pager, + &written_on_offset); + top_object = VM_OBJECT_NULL; if (need_retry == TRUE) { /* * vm_fault_enter couldn't complete the PMAP_ENTER... @@ -3979,7 +4636,7 @@ FastPmapEnter: (void)pmap_enter_options( pmap, vaddr, 0, 0, 0, 0, 0, PMAP_OPTIONS_NOENTER, NULL); - + need_retry = FALSE; goto RetryFault; } @@ -3990,7 +4647,7 @@ FastPmapEnter: */ assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE); - /* + /* * If objects match, then * object->copy must not be NULL (else control * would be in previous code block), and we @@ -3998,22 +4655,24 @@ FastPmapEnter: * with which we can't cope with here. */ if (cur_object == object) { - /* + /* * must take the slow path to * deal with the copy push */ break; } - + /* * This is now a shadow based copy on write * fault -- it requires a copy up the shadow * chain. */ - + assert(m_object == VM_PAGE_OBJECT(m)); + if ((cur_object_lock_type == OBJECT_LOCK_SHARED) && - VM_FAULT_NEED_CS_VALIDATION(NULL, m)) { - goto upgrade_for_validation; + vm_fault_cs_need_validation(NULL, m, m_object, + PAGE_SIZE, 0)) { + goto upgrade_lock_and_retry; } /* @@ -4022,17 +4681,18 @@ FastPmapEnter: * need to remember current page, as it's the * source of the copy. * - * at this point we hold locks on both + * at this point we hold locks on both * object and cur_object... no need to take * paging refs or mark pages BUSY since * we don't drop either object lock until * the page has been copied and inserted */ cur_m = m; - m = vm_page_grab(); + m = vm_page_grab_options(grab_options); + m_object = NULL; if (m == VM_PAGE_NULL) { - /* + /* * no free page currently available... * must take the slow path */ @@ -4045,24 +4705,64 @@ FastPmapEnter: * the page copy. */ vm_page_copy(cur_m, m); - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); + if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) { + DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset); + } + m_object = object; SET_PAGE_DIRTY(m, FALSE); /* * Now cope with the source page and object */ - if (object->ref_count > 1 && cur_m->pmapped) - pmap_disconnect(cur_m->phys_page); - - if (cur_m->clustered) { + if (object->ref_count > 1 && cur_m->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); + } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) { + /* + * We've copied the full 16K page but we're + * about to call vm_fault_enter() only for + * the 4K chunk we're faulting on. The other + * three 4K chunks in that page could still + * be pmapped in this pmap. + * Since the VM object layer thinks that the + * entire page has been dealt with and the + * original page might no longer be needed, + * it might collapse/bypass the original VM + * object and free its pages, which would be + * bad (and would trigger pmap_verify_free() + * assertions) if the other 4K chunks are still + * pmapped. + */ + /* + * XXX FBDP TODO4K: to be revisisted + * Technically, we need to pmap_disconnect() + * only the target pmap's mappings for the 4K + * chunks of this 16K VM page. If other pmaps + * have PTEs on these chunks, that means that + * the associated VM map must have a reference + * on the VM object, so no need to worry about + * those. + * pmap_protect() for each 4K chunk would be + * better but we'd have to check which chunks + * are actually mapped before and after this + * one. + * A full-blown pmap_disconnect() is easier + * for now but not efficient. + */ + DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m)); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); + } + + if (cur_m->vmp_clustered) { VM_PAGE_COUNT_AS_PAGEIN(cur_m); VM_PAGE_CONSUME_CLUSTERED(cur_m); + vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior); } need_collapse = TRUE; if (!cur_object->internal && cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) { - /* + /* * The object from which we've just * copied a page is most probably backed * by a vnode. We don't want to waste too @@ -4070,25 +4770,26 @@ FastPmapEnter: * and create a bottleneck when several tasks * map the same file. */ - if (cur_object->copy == object) { - /* + if (cur_object->copy == object) { + /* * Shared mapping or no COW yet. * We can never collapse a copy * object into its backing object. */ - need_collapse = FALSE; + need_collapse = FALSE; } else if (cur_object->copy == object->shadow && - object->shadow->resident_page_count == 0) { - /* + object->shadow->resident_page_count == 0) { + /* * Shared mapping after a COW occurred. */ - need_collapse = FALSE; + need_collapse = FALSE; } } vm_object_unlock(cur_object); - if (need_collapse == FALSE) - vm_fault_collapse_skipped++; + if (need_collapse == FALSE) { + vm_fault_collapse_skipped++; + } vm_fault_collapse_total++; type_of_fault = DBG_COW_FAULT; @@ -4097,20 +4798,19 @@ FastPmapEnter: current_task()->cow_faults++; goto FastPmapEnter; - } else { /* * No page at cur_object, cur_offset... m == NULL */ if (cur_object->pager_created) { - int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; + vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; - if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { - int my_fault_type; - int c_flags = C_DONT_BLOCK; - boolean_t insert_cur_object = FALSE; + if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { + int my_fault_type; + uint8_t c_flags = C_DONT_BLOCK; + bool insert_cur_object = FALSE; - /* + /* * May have to talk to a pager... * if so, take the slow path by * doing a 'break' from the while (TRUE) loop @@ -4118,8 +4818,9 @@ FastPmapEnter: * external_state will only be set to VM_EXTERNAL_STATE_EXISTS * if the compressor is active and the page exists there */ - if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) + if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) { break; + } if (map == kernel_map || real_map == kernel_map) { /* @@ -4130,15 +4831,14 @@ FastPmapEnter: break; } if (object != cur_object) { - if (fault_type & VM_PROT_WRITE) + if (fault_type & VM_PROT_WRITE) { c_flags |= C_KEEP; - else + } else { insert_cur_object = TRUE; + } } if (insert_cur_object == TRUE) { - if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(cur_object) == FALSE) { @@ -4152,14 +4852,14 @@ FastPmapEnter: vm_object_unlock(object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } } } else if (object_lock_type == OBJECT_LOCK_SHARED) { - object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (object != cur_object) { @@ -4177,8 +4877,9 @@ FastPmapEnter: vm_object_unlock(cur_object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } @@ -4193,11 +4894,12 @@ FastPmapEnter: * at the top level of the object chain */ vm_object_lock(object); - + continue; } } - m = vm_page_grab(); + m = vm_page_grab_options(grab_options); + m_object = NULL; if (m == VM_PAGE_NULL) { /* @@ -4212,11 +4914,11 @@ FastPmapEnter: * so no need to take a * "paging_in_progress" reference. */ - boolean_t shared_lock; + bool shared_lock; if ((object == cur_object && - object_lock_type == OBJECT_LOCK_EXCLUSIVE) || + object_lock_type == OBJECT_LOCK_EXCLUSIVE) || (object != cur_object && - cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) { + cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) { shared_lock = FALSE; } else { shared_lock = TRUE; @@ -4224,9 +4926,9 @@ FastPmapEnter: kr = vm_compressor_pager_get( cur_object->pager, - (cur_offset + - cur_object->paging_offset), - m->phys_page, + (vm_object_trunc_page(cur_offset) + + cur_object->paging_offset), + VM_PAGE_GET_PHYS_PAGE(m), &my_fault_type, c_flags, &compressed_count_delta); @@ -4238,10 +4940,29 @@ FastPmapEnter: cur_object); if (kr != KERN_SUCCESS) { - vm_page_release(m); + vm_page_release(m, FALSE); + m = VM_PAGE_NULL; + } + /* + * If vm_compressor_pager_get() returns + * KERN_MEMORY_FAILURE, then the + * compressed data is permanently lost, + * so return this error immediately. + */ + if (kr == KERN_MEMORY_FAILURE) { + if (object != cur_object) { + vm_object_unlock(cur_object); + } + vm_object_unlock(object); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } + goto done; + } else if (kr != KERN_SUCCESS) { break; } - m->dirty = TRUE; + m->vmp_dirty = TRUE; /* * If the object is purgeable, its @@ -4269,45 +4990,50 @@ FastPmapEnter: * no ledger update in that * case. */ - } else if ((cur_object->purgable == - VM_PURGABLE_DENY) || - (cur_object->vo_purgeable_owner == - NULL)) { + } else if (((cur_object->purgable == + VM_PURGABLE_DENY) && + (!cur_object->vo_ledger_tag)) || + (cur_object->vo_owner == + NULL)) { /* * "cur_object" is not purgeable - * or is not owned, so no - * purgeable ledgers to update. + * and is not ledger-taged, or + * there's no owner for it, + * so no owner's ledgers to + * update. */ } else { /* * One less compressed - * purgeable page for + * purgeable/tagged page for * cur_object's owner. */ - vm_purgeable_compressed_update( + vm_object_owner_compressed_update( cur_object, -1); } if (insert_cur_object) { - vm_page_insert(m, cur_object, cur_offset); + vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset)); + m_object = cur_object; } else { - vm_page_insert(m, object, offset); + vm_page_insert(m, object, vm_object_trunc_page(offset)); + m_object = object; } - if ((m->object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { - /* + if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { + /* * If the page is not cacheable, * we can't let its contents * linger in the data cache * after the decompression. */ - pmap_sync_page_attributes_phys(m->phys_page); + pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m)); } type_of_fault = my_fault_type; - VM_STAT_INCR(decompressions); + VM_STAT_DECOMPRESSIONS(); if (cur_object != object) { if (insert_cur_object) { @@ -4329,97 +5055,237 @@ FastPmapEnter: * that the pager doesn't have this page */ } - if (cur_object->shadow == VM_OBJECT_NULL) { + if (cur_object->shadow == VM_OBJECT_NULL || + resilient_media_retry) { /* * Zero fill fault. Page gets * inserted into the original object. */ if (cur_object->shadow_severed || - VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) - { - if (object != cur_object) - vm_object_unlock(cur_object); + VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) || + cur_object == compressor_object || + cur_object == kernel_object || + cur_object == vm_submap_object) { + if (object != cur_object) { + vm_object_unlock(cur_object); + } vm_object_unlock(object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } kr = KERN_MEMORY_ERROR; goto done; } - if (vm_backing_store_low) { - /* - * we are protecting the system from - * backing store exhaustion... - * must take the slow path if we're - * not privileged - */ - if (!(current_task()->priv_flags & VM_BACKING_STORE_PRIV)) - break; - } - if (cur_object != object) { + if (cur_object != object) { vm_object_unlock(cur_object); cur_object = object; } if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade so do a full retry on the fault * since we dropped the object lock which * could allow another thread to insert * a page at this offset */ - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } goto RetryFault; } } - m = vm_page_alloc(object, offset); + if (!object->internal) { + panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object); + } + m = vm_page_alloc(object, vm_object_trunc_page(offset)); + m_object = NULL; if (m == VM_PAGE_NULL) { - /* + /* * no free page currently available... * must take the slow path */ break; } + m_object = object; /* - * Now zero fill page... - * the page is probably going to - * be written soon, so don't bother - * to clear the modified bit + * Zeroing the page and entering into it into the pmap + * represents a significant amount of the zero fill fault handler's work. * - * NOTE: This code holds the map - * lock across the zero fill. + * To improve fault scalability, we'll drop the object lock, if it appears contended, + * now that we've inserted the page into the vm object. + * Before dropping the lock, we need to check protection bits and set the + * mapped bits on the page. Then we can mark the page busy, drop the lock, + * zero it, and do the pmap enter. We'll need to reacquire the lock + * to clear the busy bit and wake up any waiters. */ - type_of_fault = vm_fault_zero_page(m, map->no_zero_fill); + vm_fault_cs_clear(m); + m->vmp_pmapped = TRUE; + if (map->no_zero_fill) { + type_of_fault = DBG_NZF_PAGE_FAULT; + } else { + type_of_fault = DBG_ZERO_FILL_FAULT; + } + { + pmap_t destination_pmap; + vm_map_offset_t destination_pmap_vaddr; + vm_prot_t enter_fault_type; + if (caller_pmap) { + destination_pmap = caller_pmap; + destination_pmap_vaddr = caller_pmap_addr; + } else { + destination_pmap = pmap; + destination_pmap_vaddr = vaddr; + } + if (change_wiring) { + enter_fault_type = VM_PROT_NONE; + } else { + enter_fault_type = caller_prot; + } + kr = vm_fault_enter_prepare(m, + destination_pmap, + destination_pmap_vaddr, + &prot, + caller_prot, + fault_page_size, + fault_phys_offset, + change_wiring, + enter_fault_type, + &fault_info, + &type_of_fault, + &page_needs_data_sync); + if (kr != KERN_SUCCESS) { + goto zero_fill_cleanup; + } - goto FastPmapEnter; - } + if (object_is_contended) { + /* + * At this point the page is in the vm object, but not on a paging queue. + * Since it's accessible to another thread but its contents are invalid + * (it hasn't been zeroed) mark it busy before dropping the object lock. + */ + m->vmp_busy = TRUE; + vm_object_unlock(object); + } + if (type_of_fault == DBG_ZERO_FILL_FAULT) { + /* + * Now zero fill page... + * the page is probably going to + * be written soon, so don't bother + * to clear the modified bit + * + * NOTE: This code holds the map + * lock across the zero fill. + */ + vm_page_zero_fill(m); + VM_STAT_INCR(zero_fill_count); + DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL); + } + if (page_needs_data_sync) { + pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m)); + } + + if (top_object != VM_OBJECT_NULL) { + need_retry_ptr = &need_retry; + } else { + need_retry_ptr = NULL; + } + if (object_is_contended) { + kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr, + fault_page_size, fault_phys_offset, + m, &prot, caller_prot, enter_fault_type, wired, + fault_info.pmap_options, need_retry_ptr); + vm_object_lock(object); + } else { + kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr, + fault_page_size, fault_phys_offset, + m, &prot, caller_prot, enter_fault_type, wired, + fault_info.pmap_options, need_retry_ptr); + } + } +zero_fill_cleanup: + if (!VM_DYNAMIC_PAGING_ENABLED() && + (object->purgable == VM_PURGABLE_DENY || + object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_VOLATILE)) { + vm_page_lockspin_queues(); + if (!VM_DYNAMIC_PAGING_ENABLED()) { + vm_fault_enqueue_throttled_locked(m); + } + vm_page_unlock_queues(); + } + vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr); + + vm_fault_complete( + map, + real_map, + object, + m_object, + m, + offset, + trace_real_vaddr, + &fault_info, + caller_prot, + real_vaddr, + type_of_fault, + need_retry, + kr, + physpage_p, + prot, + top_object, + need_collapse, + cur_offset, + fault_type, + &written_on_object, + &written_on_pager, + &written_on_offset); + top_object = VM_OBJECT_NULL; + if (need_retry == TRUE) { + /* + * vm_fault_enter couldn't complete the PMAP_ENTER... + * at this point we don't hold any locks so it's safe + * to ask the pmap layer to expand the page table to + * accommodate this mapping... once expanded, we'll + * re-drive the fault which should result in vm_fault_enter + * being able to successfully enter the mapping this time around + */ + (void)pmap_enter_options( + pmap, vaddr, 0, 0, 0, 0, 0, + PMAP_OPTIONS_NOENTER, NULL); + + need_retry = FALSE; + goto RetryFault; + } + goto done; + } /* * On to the next level in the shadow chain */ cur_offset += cur_object->vo_shadow_offset; new_object = cur_object->shadow; + fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset); /* * take the new_object's lock with the indicated state */ - if (cur_object_lock_type == OBJECT_LOCK_SHARED) - vm_object_lock_shared(new_object); - else - vm_object_lock(new_object); + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + vm_object_lock_shared(new_object); + } else { + vm_object_lock(new_object); + } - if (cur_object != object) + if (cur_object != object) { vm_object_unlock(cur_object); + } cur_object = new_object; @@ -4430,33 +5296,73 @@ FastPmapEnter: * Cleanup from fast fault failure. Drop any object * lock other than original and drop map lock. */ - if (object != cur_object) + if (object != cur_object) { vm_object_unlock(cur_object); + } /* * must own the object lock exclusively at this point */ if (object_lock_type == OBJECT_LOCK_SHARED) { - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly * take the lock exclusively * no need to retry the fault at this * point since "vm_fault_page" will * completely re-evaluate the state */ - vm_object_lock(object); + vm_object_lock(object); } } handle_copy_delay: vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } + + if (__improbable(object == compressor_object || + object == kernel_object || + object == vm_submap_object)) { + /* + * These objects are explicitly managed and populated by the + * kernel. The virtual ranges backed by these objects should + * either have wired pages or "holes" that are not supposed to + * be accessed at all until they get explicitly populated. + * We should never have to resolve a fault on a mapping backed + * by one of these VM objects and providing a zero-filled page + * would be wrong here, so let's fail the fault and let the + * caller crash or recover. + */ + vm_object_unlock(object); + kr = KERN_MEMORY_ERROR; + goto done; + } + + assert(object != compressor_object); + assert(object != kernel_object); + assert(object != vm_submap_object); + + if (resilient_media_retry) { + /* + * We could get here if we failed to get a free page + * to zero-fill and had to take the slow path again. + * Reset our "recovery-from-failed-media" state. + */ + assert(resilient_media_object != VM_OBJECT_NULL); + assert(resilient_media_offset != (vm_object_offset_t)-1); + /* release our extra reference on failed object */ +// printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object); + vm_object_deallocate(resilient_media_object); + resilient_media_object = VM_OBJECT_NULL; + resilient_media_offset = (vm_object_offset_t)-1; + resilient_media_retry = FALSE; + } - /* + /* * Make a reference to this object to * prevent its disposal while we are messing with * it. Once we have the reference, the map is free @@ -4466,18 +5372,17 @@ handle_copy_delay: vm_object_reference_locked(object); vm_object_paging_begin(object); - XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0); - + set_thread_pagein_error(cthread, 0); error_code = 0; result_page = VM_PAGE_NULL; kr = vm_fault_page(object, offset, fault_type, - (change_wiring && !wired), - FALSE, /* page not looked up */ - &prot, &result_page, &top_page, - &type_of_fault, - &error_code, map->no_zero_fill, - FALSE, &fault_info); + (change_wiring && !wired), + FALSE, /* page not looked up */ + &prot, &result_page, &top_page, + &type_of_fault, + &error_code, map->no_zero_fill, + FALSE, &fault_info); /* * if kr != VM_FAULT_SUCCESS, then the paging reference @@ -4489,87 +5394,137 @@ handle_copy_delay: * * the object is returned locked with a paging reference * - * if top_page != NULL, then it's BUSY and the + * if top_page != NULL, then it's BUSY and the * object it belongs to has a paging reference * but is returned unlocked */ if (kr != VM_FAULT_SUCCESS && kr != VM_FAULT_SUCCESS_NO_VM_PAGE) { - /* - * we didn't succeed, lose the object reference immediately. - */ - vm_object_deallocate(object); + if (kr == VM_FAULT_MEMORY_ERROR && + fault_info.resilient_media) { + assertf(object->internal, "object %p", object); + /* + * This fault failed but the mapping was + * "media resilient", so we'll retry the fault in + * recovery mode to get a zero-filled page in the + * top object. + * Keep the reference on the failing object so + * that we can check that the mapping is still + * pointing to it when we retry the fault. + */ +// printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page); + assert(!resilient_media_retry); /* no double retry */ + assert(resilient_media_object == VM_OBJECT_NULL); + assert(resilient_media_offset == (vm_object_offset_t)-1); + resilient_media_retry = TRUE; + resilient_media_object = object; + resilient_media_offset = offset; +// printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset); + goto RetryFault; + } else { + /* + * we didn't succeed, lose the object reference + * immediately. + */ + vm_object_deallocate(object); + object = VM_OBJECT_NULL; /* no longer valid */ + } /* * See why we failed, and take corrective action. */ switch (kr) { case VM_FAULT_MEMORY_SHORTAGE: - if (vm_page_wait((change_wiring) ? - THREAD_UNINT : - THREAD_ABORTSAFE)) + if (vm_page_wait((change_wiring) ? + THREAD_UNINT : + THREAD_ABORTSAFE)) { goto RetryFault; - /* - * fall thru - */ + } + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: kr = KERN_ABORTED; goto done; case VM_FAULT_RETRY: goto RetryFault; case VM_FAULT_MEMORY_ERROR: - if (error_code) + if (error_code) { kr = error_code; - else + } else { kr = KERN_MEMORY_ERROR; + } goto done; default: panic("vm_fault: unexpected error 0x%x from " - "vm_fault_page()\n", kr); + "vm_fault_page()\n", kr); } } m = result_page; + m_object = NULL; if (m != VM_PAGE_NULL) { + m_object = VM_PAGE_OBJECT(m); assert((change_wiring && !wired) ? - (top_page == VM_PAGE_NULL) : - ((top_page == VM_PAGE_NULL) == (m->object == object))); + (top_page == VM_PAGE_NULL) : + ((top_page == VM_PAGE_NULL) == (m_object == object))); } /* * What to do with the resulting page from vm_fault_page * if it doesn't get entered into the physical map: */ -#define RELEASE_PAGE(m) \ - MACRO_BEGIN \ - PAGE_WAKEUP_DONE(m); \ - if (!m->active && !m->inactive && !m->throttled) { \ - vm_page_lockspin_queues(); \ - if (!m->active && !m->inactive && !m->throttled) \ - vm_page_activate(m); \ - vm_page_unlock_queues(); \ - } \ +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + vm_page_lockspin_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ + } \ MACRO_END + + object_locks_dropped = FALSE; /* * We must verify that the maps have not changed - * since our last lookup. + * since our last lookup. vm_map_verify() needs the + * map lock (shared) but we are holding object locks. + * So we do a try_lock() first and, if that fails, we + * drop the object locks and go in for the map lock again. */ - if (m != VM_PAGE_NULL) { - old_copy_object = m->object->copy; - vm_object_unlock(m->object); - } else { - old_copy_object = VM_OBJECT_NULL; - vm_object_unlock(object); + if (!vm_map_try_lock_read(original_map)) { + if (m != VM_PAGE_NULL) { + old_copy_object = m_object->copy; + vm_object_unlock(m_object); + } else { + old_copy_object = VM_OBJECT_NULL; + vm_object_unlock(object); + } + + object_locks_dropped = TRUE; + + vm_map_lock_read(original_map); } - /* - * no object locks are held at this point - */ if ((map != original_map) || !vm_map_verify(map, &version)) { - vm_object_t retry_object; - vm_object_offset_t retry_offset; - vm_prot_t retry_prot; + if (object_locks_dropped == FALSE) { + if (m != VM_PAGE_NULL) { + old_copy_object = m_object->copy; + vm_object_unlock(m_object); + } else { + old_copy_object = VM_OBJECT_NULL; + vm_object_unlock(object); + } + + object_locks_dropped = TRUE; + } + + /* + * no object locks are held at this point + */ + vm_object_t retry_object; + vm_object_offset_t retry_offset; + vm_prot_t retry_prot; /* * To avoid trying to write_lock the map while another @@ -4581,41 +5536,43 @@ handle_copy_delay: * take another fault. */ map = original_map; - vm_map_lock_read(map); kr = vm_map_lookup_locked(&map, vaddr, - fault_type & ~VM_PROT_WRITE, - OBJECT_LOCK_EXCLUSIVE, &version, - &retry_object, &retry_offset, &retry_prot, - &wired, - &fault_info, - &real_map); + fault_type & ~VM_PROT_WRITE, + OBJECT_LOCK_EXCLUSIVE, &version, + &retry_object, &retry_offset, &retry_prot, + &wired, + &fault_info, + &real_map, + NULL); pmap = real_map->pmap; if (kr != KERN_SUCCESS) { vm_map_unlock_read(map); if (m != VM_PAGE_NULL) { - /* + assert(VM_PAGE_OBJECT(m) == m_object); + + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup and do the * PAGE_WAKEUP_DONE in RELEASE_PAGE */ - vm_object_lock(m->object); + vm_object_lock(m_object); RELEASE_PAGE(m); - vm_fault_cleanup(m->object, top_page); + vm_fault_cleanup(m_object, top_page); } else { - /* + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup */ - vm_object_lock(object); + vm_object_lock(object); - vm_fault_cleanup(object, top_page); + vm_fault_cleanup(object, top_page); } vm_object_deallocate(object); @@ -4624,32 +5581,34 @@ handle_copy_delay: vm_object_unlock(retry_object); if ((retry_object != object) || (retry_offset != offset)) { - vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (m != VM_PAGE_NULL) { - /* + assert(VM_PAGE_OBJECT(m) == m_object); + + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup and do the * PAGE_WAKEUP_DONE in RELEASE_PAGE */ - vm_object_lock(m->object); + vm_object_lock(m_object); RELEASE_PAGE(m); - vm_fault_cleanup(m->object, top_page); + vm_fault_cleanup(m_object, top_page); } else { - /* + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup */ - vm_object_lock(object); + vm_object_lock(object); - vm_fault_cleanup(object, top_page); + vm_fault_cleanup(object, top_page); } vm_object_deallocate(object); @@ -4659,37 +5618,88 @@ handle_copy_delay: * Check whether the protection has changed or the object * has been copied while we left the map unlocked. */ - prot &= retry_prot; + if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) { + /* If the pmap layer cares, pass the full set. */ + prot = retry_prot; + } else { + prot &= retry_prot; + } } - if (m != VM_PAGE_NULL) { - vm_object_lock(m->object); - if (m->object->copy != old_copy_object) { - /* - * The copy object changed while the top-level object - * was unlocked, so take away write permission. - */ - prot &= ~VM_PROT_WRITE; + if (object_locks_dropped == TRUE) { + if (m != VM_PAGE_NULL) { + vm_object_lock(m_object); + + if (m_object->copy != old_copy_object) { + /* + * The copy object changed while the top-level object + * was unlocked, so take away write permission. + */ + assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)); + prot &= ~VM_PROT_WRITE; + } + } else { + vm_object_lock(object); } - } else - vm_object_lock(object); + + object_locks_dropped = FALSE; + } + + if (!need_copy && + !fault_info.no_copy_on_read && + m != VM_PAGE_NULL && + VM_PAGE_OBJECT(m) != object && + !VM_PAGE_OBJECT(m)->pager_trusted && + vm_protect_privileged_from_untrusted && + !((prot & VM_PROT_EXECUTE) && + VM_PAGE_OBJECT(m)->code_signed && + pmap_get_vm_map_cs_enforced(caller_pmap ? caller_pmap : pmap)) && + current_proc_is_privileged()) { + /* + * We found the page we want in an "untrusted" VM object + * down the shadow chain. Since the target is "privileged" + * we want to perform a copy-on-read of that page, so that the + * mapped object gets a stable copy and does not have to + * rely on the "untrusted" object to provide the same + * contents if the page gets reclaimed and has to be paged + * in again later on. + * + * Special case: if the mapping is executable and the untrusted + * object is code-signed and the process is "cs_enforced", we + * do not copy-on-read because that would break code-signing + * enforcement expectations (an executable page must belong + * to a code-signed object) and we can rely on code-signing + * to re-validate the page if it gets evicted and paged back in. + */ +// printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset); + vm_copied_on_read++; + need_copy_on_read = TRUE; + need_copy = TRUE; + } else { + need_copy_on_read = FALSE; + } /* * If we want to wire down this page, but no longer have * adequate permissions, we must start all over. + * If we decided to copy-on-read, we must also start all over. */ - if (wired && (fault_type != (prot | VM_PROT_WRITE))) { - - vm_map_verify_done(map, &version); - if (real_map != map) + if ((wired && (fault_type != (prot | VM_PROT_WRITE))) || + need_copy_on_read) { + vm_map_unlock_read(map); + if (real_map != map) { vm_map_unlock(real_map); + } if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == m_object); + RELEASE_PAGE(m); - vm_fault_cleanup(m->object, top_page); - } else - vm_fault_cleanup(object, top_page); + vm_fault_cleanup(m_object, top_page); + } else { + vm_fault_cleanup(object, top_page); + } vm_object_deallocate(object); @@ -4703,88 +5713,94 @@ handle_copy_delay: * the pageout queues. If the pageout daemon comes * across the page, it will remove it from the queues. */ + if (fault_page_size < PAGE_SIZE) { + DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot); + assertf((!(fault_phys_offset & FOURK_PAGE_MASK) && + fault_phys_offset < PAGE_SIZE), + "0x%llx\n", (uint64_t)fault_phys_offset); + } else { + assertf(fault_phys_offset == 0, + "0x%llx\n", (uint64_t)fault_phys_offset); + } if (caller_pmap) { kr = vm_fault_enter(m, - caller_pmap, - caller_pmap_addr, - prot, - caller_prot, - wired, - change_wiring, - fault_info.no_cache, - fault_info.cs_bypass, - fault_info.user_tag, - fault_info.pmap_options, - NULL, - &type_of_fault); + caller_pmap, + caller_pmap_addr, + fault_page_size, + fault_phys_offset, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + NULL, + &type_of_fault); } else { kr = vm_fault_enter(m, - pmap, - vaddr, - prot, - caller_prot, - wired, - change_wiring, - fault_info.no_cache, - fault_info.cs_bypass, - fault_info.user_tag, - fault_info.pmap_options, - NULL, - &type_of_fault); + pmap, + vaddr, + fault_page_size, + fault_phys_offset, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + NULL, + &type_of_fault); + } + assert(VM_PAGE_OBJECT(m) == m_object); + + { + int event_code = 0; + + if (m_object->internal) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + } else if (m_object->object_is_shared_cache) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + } else { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + } + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid(), 0); + KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid(), 0, 0, 0, 0); + + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); } if (kr != KERN_SUCCESS) { /* abort this page fault */ - vm_map_verify_done(map, &version); - if (real_map != map) + vm_map_unlock_read(map); + if (real_map != map) { vm_map_unlock(real_map); + } PAGE_WAKEUP_DONE(m); - vm_fault_cleanup(m->object, top_page); + vm_fault_cleanup(m_object, top_page); vm_object_deallocate(object); goto done; } if (physpage_p != NULL) { /* for vm_map_wire_and_extract() */ - *physpage_p = m->phys_page; + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); if (prot & VM_PROT_WRITE) { - vm_object_lock_assert_exclusive(m->object); - m->dirty = TRUE; + vm_object_lock_assert_exclusive(m_object); + m->vmp_dirty = TRUE; } } } else { + vm_map_entry_t entry; + vm_map_offset_t laddr; + vm_map_offset_t ldelta, hdelta; - vm_map_entry_t entry; - vm_map_offset_t laddr; - vm_map_offset_t ldelta, hdelta; - - /* + /* * do a pmap block mapping from the physical address - * in the object + * in the object */ -#ifdef ppc - /* While we do not worry about execution protection in */ - /* general, certian pages may have instruction execution */ - /* disallowed. We will check here, and if not allowed */ - /* to execute, we return with a protection failure. */ - - if ((fault_type & VM_PROT_EXECUTE) && - (!pmap_eligible_for_execute((ppnum_t)(object->vo_shadow_offset >> 12)))) { - - vm_map_verify_done(map, &version); - - if (real_map != map) - vm_map_unlock(real_map); - - vm_fault_cleanup(object, top_page); - vm_object_deallocate(object); - - kr = KERN_PROTECTION_FAILURE; - goto done; - } -#endif /* ppc */ - - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (original_map != map) { vm_map_unlock_read(map); @@ -4798,36 +5814,40 @@ handle_copy_delay: ldelta = 0xFFFFF000; while (vm_map_lookup_entry(map, laddr, &entry)) { - if (ldelta > (laddr - entry->vme_start)) + if (ldelta > (laddr - entry->vme_start)) { ldelta = laddr - entry->vme_start; - if (hdelta > (entry->vme_end - laddr)) + } + if (hdelta > (entry->vme_end - laddr)) { hdelta = entry->vme_end - laddr; + } if (entry->is_sub_map) { - - laddr = ((laddr - entry->vme_start) - + VME_OFFSET(entry)); + laddr = ((laddr - entry->vme_start) + + VME_OFFSET(entry)); vm_map_lock_read(VME_SUBMAP(entry)); - if (map != real_map) + if (map != real_map) { vm_map_unlock_read(map); + } if (entry->use_pmap) { vm_map_unlock_read(real_map); real_map = VME_SUBMAP(entry); } map = VME_SUBMAP(entry); - } else { break; } } - if (vm_map_lookup_entry(map, laddr, &entry) && + if (vm_map_lookup_entry(map, laddr, &entry) && (VME_OBJECT(entry) != NULL) && (VME_OBJECT(entry) == object)) { - int superpage; + uint16_t superpage; if (!object->pager_created && - object->phys_contiguous) { + object->phys_contiguous && + VME_OFFSET(entry) == 0 && + (entry->vme_end - entry->vme_start == object->vo_size) && + VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) { superpage = VM_MEM_SUPERPAGE; } else { superpage = 0; @@ -4836,11 +5856,11 @@ handle_copy_delay: if (superpage && physpage_p) { /* for vm_map_wire_and_extract() */ *physpage_p = (ppnum_t) - ((((vm_map_offset_t) - object->vo_shadow_offset) - + VME_OFFSET(entry) - + (laddr - entry->vme_start)) - >> PAGE_SHIFT); + ((((vm_map_offset_t) + object->vo_shadow_offset) + + VME_OFFSET(entry) + + (laddr - entry->vme_start)) + >> PAGE_SHIFT); } if (caller_pmap) { @@ -4848,49 +5868,89 @@ handle_copy_delay: * Set up a block mapped area */ assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); - pmap_map_block(caller_pmap, - (addr64_t)(caller_pmap_addr - ldelta), - (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) + - VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), - (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, - (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); - } else { + kr = pmap_map_block(caller_pmap, + (addr64_t)(caller_pmap_addr - ldelta), + (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) + + VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), + (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, + (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); + + if (kr != KERN_SUCCESS) { + goto cleanup; + } + } else { /* * Set up a block mapped area */ assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); - pmap_map_block(real_map->pmap, - (addr64_t)(vaddr - ldelta), - (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) + - VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), - (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, - (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); + kr = pmap_map_block(real_map->pmap, + (addr64_t)(vaddr - ldelta), + (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) + + VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), + (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, + (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); + + if (kr != KERN_SUCCESS) { + goto cleanup; + } } } } + /* + * Success + */ + kr = KERN_SUCCESS; + + /* + * TODO: could most of the done cases just use cleanup? + */ +cleanup: /* * Unlock everything, and return */ - vm_map_verify_done(map, &version); - if (real_map != map) + vm_map_unlock_read(map); + if (real_map != map) { vm_map_unlock(real_map); + } if (m != VM_PAGE_NULL) { + assert(VM_PAGE_OBJECT(m) == m_object); + + if (!m_object->internal && (fault_type & VM_PROT_WRITE)) { + vm_object_paging_begin(m_object); + + assert(written_on_object == VM_OBJECT_NULL); + written_on_object = m_object; + written_on_pager = m_object->pager; + written_on_offset = m_object->paging_offset + m->vmp_offset; + } PAGE_WAKEUP_DONE(m); - vm_fault_cleanup(m->object, top_page); - } else - vm_fault_cleanup(object, top_page); + vm_fault_cleanup(m_object, top_page); + } else { + vm_fault_cleanup(object, top_page); + } vm_object_deallocate(object); -#undef RELEASE_PAGE +#undef RELEASE_PAGE - kr = KERN_SUCCESS; done: thread_interrupt_level(interruptible_state); + if (resilient_media_object != VM_OBJECT_NULL) { + assert(resilient_media_retry); + assert(resilient_media_offset != (vm_object_offset_t)-1); + /* release extra reference on failed object */ +// printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object); + vm_object_deallocate(resilient_media_object); + resilient_media_object = VM_OBJECT_NULL; + resilient_media_offset = (vm_object_offset_t)-1; + resilient_media_retry = FALSE; + } + assert(!resilient_media_retry); + /* * Only I/O throttle on faults which cause a pagein/swapin. */ @@ -4898,30 +5958,48 @@ done: throttle_lowpri_io(1); } else { if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) { - if ((throttle_delay = vm_page_throttled(TRUE))) { - if (vm_debug_events) { - if (type_of_fault == DBG_COMPRESSOR_FAULT) + if (type_of_fault == DBG_COMPRESSOR_FAULT) { VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); - else if (type_of_fault == DBG_COW_FAULT) + } else if (type_of_fault == DBG_COW_FAULT) { VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); - else + } else { VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); + } } delay(throttle_delay); } } } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, - ((uint64_t)vaddr >> 32), - vaddr, - kr, - type_of_fault, - 0); - return (kr); + if (written_on_object) { + vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64); + + vm_object_lock(written_on_object); + vm_object_paging_end(written_on_object); + vm_object_unlock(written_on_object); + + written_on_object = VM_OBJECT_NULL; + } + + if (rtfault) { + vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault); + } + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + kr, + vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), + 0); + + if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) { + DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr); + } + + return kr; } /* @@ -4931,22 +6009,23 @@ done: */ kern_return_t vm_fault_wire( - vm_map_t map, - vm_map_entry_t entry, + vm_map_t map, + vm_map_entry_t entry, vm_prot_t prot, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p) + vm_tag_t wire_tag, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { - - register vm_map_offset_t va; - register vm_map_offset_t end_addr = entry->vme_end; - register kern_return_t rc; + vm_map_offset_t va; + vm_map_offset_t end_addr = entry->vme_end; + kern_return_t rc; + vm_map_size_t effective_page_size; assert(entry->in_transition); - if ((VME_OBJECT(entry) != NULL) && - !entry->is_sub_map && + if ((VME_OBJECT(entry) != NULL) && + !entry->is_sub_map && VME_OBJECT(entry)->phys_contiguous) { return KERN_SUCCESS; } @@ -4957,37 +6036,40 @@ vm_fault_wire( * page tables and such can be locked down as well. */ - pmap_pageable(pmap, pmap_addr, - pmap_addr + (end_addr - entry->vme_start), FALSE); + pmap_pageable(pmap, pmap_addr, + pmap_addr + (end_addr - entry->vme_start), FALSE); /* * We simulate a fault to get the page and enter it * in the physical map. */ - for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { - rc = vm_fault_wire_fast(map, va, prot, entry, pmap, - pmap_addr + (va - entry->vme_start), - physpage_p); + effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE); + for (va = entry->vme_start; + va < end_addr; + va += effective_page_size) { + rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap, + pmap_addr + (va - entry->vme_start), + physpage_p); if (rc != KERN_SUCCESS) { - rc = vm_fault_internal(map, va, prot, TRUE, - ((pmap == kernel_pmap) - ? THREAD_UNINT - : THREAD_ABORTSAFE), - pmap, - (pmap_addr + - (va - entry->vme_start)), - physpage_p); + rc = vm_fault_internal(map, va, prot, TRUE, wire_tag, + ((pmap == kernel_pmap) + ? THREAD_UNINT + : THREAD_ABORTSAFE), + pmap, + (pmap_addr + + (va - entry->vme_start)), + physpage_p); DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL); } if (rc != KERN_SUCCESS) { - struct vm_map_entry tmp_entry = *entry; + struct vm_map_entry tmp_entry = *entry; /* unwire wired pages */ tmp_entry.vme_end = va; - vm_fault_unwire(map, - &tmp_entry, FALSE, pmap, pmap_addr); + vm_fault_unwire(map, + &tmp_entry, FALSE, pmap, pmap_addr); return rc; } @@ -5002,16 +6084,18 @@ vm_fault_wire( */ void vm_fault_unwire( - vm_map_t map, - vm_map_entry_t entry, - boolean_t deallocate, - pmap_t pmap, - vm_map_offset_t pmap_addr) + vm_map_t map, + vm_map_entry_t entry, + boolean_t deallocate, + pmap_t pmap, + vm_map_offset_t pmap_addr) { - register vm_map_offset_t va; - register vm_map_offset_t end_addr = entry->vme_end; - vm_object_t object; - struct vm_object_fault_info fault_info; + vm_map_offset_t va; + vm_map_offset_t end_addr = entry->vme_end; + vm_object_t object; + struct vm_object_fault_info fault_info = {}; + unsigned int unwired_pages; + vm_map_size_t effective_page_size; object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry); @@ -5021,13 +6105,13 @@ vm_fault_unwire( * anything to undo here. */ - if (object != VM_OBJECT_NULL && object->phys_contiguous) + if (object != VM_OBJECT_NULL && object->phys_contiguous) { return; + } fault_info.interruptible = THREAD_UNINT; fault_info.behavior = entry->behavior; fault_info.user_tag = VME_ALIAS(entry); - fault_info.pmap_options = 0; if (entry->iokit_acct || (!entry->is_sub_map && !entry->use_pmap)) { fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; @@ -5036,58 +6120,54 @@ vm_fault_unwire( fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); fault_info.no_cache = entry->no_cache; fault_info.stealth = TRUE; - fault_info.io_sync = FALSE; - fault_info.cs_bypass = FALSE; - fault_info.mark_zf_absent = FALSE; - fault_info.batch_pmap_op = FALSE; + + unwired_pages = 0; /* * Since the pages are wired down, we must be able to * get their mappings from the physical map system. */ - for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { - + effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE); + for (va = entry->vme_start; + va < end_addr; + va += effective_page_size) { if (object == VM_OBJECT_NULL) { if (pmap) { - pmap_change_wiring(pmap, - pmap_addr + (va - entry->vme_start), FALSE); + pmap_change_wiring(pmap, + pmap_addr + (va - entry->vme_start), FALSE); } - (void) vm_fault(map, va, VM_PROT_NONE, - TRUE, THREAD_UNINT, pmap, pmap_addr); + (void) vm_fault(map, va, VM_PROT_NONE, + TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr); } else { - vm_prot_t prot; - vm_page_t result_page; - vm_page_t top_page; - vm_object_t result_object; + vm_prot_t prot; + vm_page_t result_page; + vm_page_t top_page; + vm_object_t result_object; vm_fault_return_t result; - if (end_addr - va > (vm_size_t) -1) { - /* 32-bit overflow */ - fault_info.cluster_size = (vm_size_t) (0 - PAGE_SIZE); - } else { - fault_info.cluster_size = (vm_size_t) (end_addr - va); - assert(fault_info.cluster_size == end_addr - va); + /* cap cluster size at maximum UPL size */ + upl_size_t cluster_size; + if (os_sub_overflow(end_addr, va, &cluster_size)) { + cluster_size = 0 - (upl_size_t)PAGE_SIZE; } + fault_info.cluster_size = cluster_size; do { prot = VM_PROT_NONE; vm_object_lock(object); vm_object_paging_begin(object); - XPR(XPR_VM_FAULT, - "vm_fault_unwire -> vm_fault_page\n", - 0,0,0,0,0); result_page = VM_PAGE_NULL; - result = vm_fault_page( + result = vm_fault_page( object, (VME_OFFSET(entry) + - (va - entry->vme_start)), + (va - entry->vme_start)), VM_PROT_NONE, TRUE, FALSE, /* page not looked up */ &prot, &result_page, &top_page, (int *)0, - NULL, map->no_zero_fill, + NULL, map->no_zero_fill, FALSE, &fault_info); } while (result == VM_FAULT_RETRY); @@ -5097,12 +6177,13 @@ vm_fault_unwire( * move on to the next one in case the remaining pages are mapped from * different objects. During a forced unmount, the object is terminated * so the alive flag will be false if this happens. A forced unmount will - * will occur when an external disk is unplugged before the user does an + * will occur when an external disk is unplugged before the user does an * eject, so we don't want to panic in that situation. */ - if (result == VM_FAULT_MEMORY_ERROR && !object->alive) + if (result == VM_FAULT_MEMORY_ERROR && !object->alive) { continue; + } if (result == VM_FAULT_MEMORY_ERROR && object == kernel_object) { @@ -5116,29 +6197,35 @@ vm_fault_unwire( continue; } - if (result != VM_FAULT_SUCCESS) + if (result != VM_FAULT_SUCCESS) { panic("vm_fault_unwire: failure"); + } - result_object = result_page->object; + result_object = VM_PAGE_OBJECT(result_page); if (deallocate) { - assert(result_page->phys_page != - vm_page_fictitious_addr); - pmap_disconnect(result_page->phys_page); + assert(VM_PAGE_GET_PHYS_PAGE(result_page) != + vm_page_fictitious_addr); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page)); + if (VM_PAGE_WIRED(result_page)) { + unwired_pages++; + } VM_PAGE_FREE(result_page); } else { - if ((pmap) && (result_page->phys_page != vm_page_guard_addr)) - pmap_change_wiring(pmap, + if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) { + pmap_change_wiring(pmap, pmap_addr + (va - entry->vme_start), FALSE); + } if (VM_PAGE_WIRED(result_page)) { vm_page_lockspin_queues(); vm_page_unwire(result_page, TRUE); vm_page_unlock_queues(); + unwired_pages++; } - if(entry->zero_wired_pages) { - pmap_zero_page(result_page->phys_page); + if (entry->zero_wired_pages) { + pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page)); entry->zero_wired_pages = FALSE; } @@ -5154,9 +6241,23 @@ vm_fault_unwire( * such may be unwired themselves. */ - pmap_pageable(pmap, pmap_addr, - pmap_addr + (end_addr - entry->vme_start), TRUE); + pmap_pageable(pmap, pmap_addr, + pmap_addr + (end_addr - entry->vme_start), TRUE); + if (kernel_object == object) { + /* + * Would like to make user_tag in vm_object_fault_info + * vm_tag_t (unsigned short) but user_tag derives its value from + * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts + * to an _unsigned int_ which is used by non-fault_info paths throughout the + * code at many places. + * + * So, for now, an explicit truncation to unsigned short (vm_tag_t). + */ + assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag, + "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK)); + vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages)); + } } /* @@ -5181,58 +6282,63 @@ vm_fault_unwire( */ static kern_return_t vm_fault_wire_fast( - __unused vm_map_t map, - vm_map_offset_t va, - vm_prot_t caller_prot, - vm_map_entry_t entry, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p) + __unused vm_map_t map, + vm_map_offset_t va, + __unused vm_prot_t caller_prot, + vm_tag_t wire_tag, + vm_map_entry_t entry, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { - vm_object_t object; - vm_object_offset_t offset; - register vm_page_t m; - vm_prot_t prot; - thread_t thread = current_thread(); - int type_of_fault; - kern_return_t kr; + vm_object_t object; + vm_object_offset_t offset; + vm_page_t m; + vm_prot_t prot; + thread_t thread = current_thread(); + int type_of_fault; + kern_return_t kr; + vm_map_size_t fault_page_size; + vm_map_offset_t fault_phys_offset; + struct vm_object_fault_info fault_info = {}; VM_STAT_INCR(faults); - if (thread != THREAD_NULL && thread->task != TASK_NULL) - thread->task->faults++; + if (thread != THREAD_NULL && thread->task != TASK_NULL) { + thread->task->faults++; + } /* * Recovery actions */ -#undef RELEASE_PAGE -#define RELEASE_PAGE(m) { \ - PAGE_WAKEUP_DONE(m); \ - vm_page_lockspin_queues(); \ - vm_page_unwire(m, TRUE); \ - vm_page_unlock_queues(); \ +#undef RELEASE_PAGE +#define RELEASE_PAGE(m) { \ + PAGE_WAKEUP_DONE(m); \ + vm_page_lockspin_queues(); \ + vm_page_unwire(m, TRUE); \ + vm_page_unlock_queues(); \ } -#undef UNLOCK_THINGS -#define UNLOCK_THINGS { \ - vm_object_paging_end(object); \ - vm_object_unlock(object); \ +#undef UNLOCK_THINGS +#define UNLOCK_THINGS { \ + vm_object_paging_end(object); \ + vm_object_unlock(object); \ } -#undef UNLOCK_AND_DEALLOCATE -#define UNLOCK_AND_DEALLOCATE { \ - UNLOCK_THINGS; \ - vm_object_deallocate(object); \ +#undef UNLOCK_AND_DEALLOCATE +#define UNLOCK_AND_DEALLOCATE { \ + UNLOCK_THINGS; \ + vm_object_deallocate(object); \ } /* * Give up and have caller do things the hard way. */ -#define GIVE_UP { \ - UNLOCK_AND_DEALLOCATE; \ - return(KERN_FAILURE); \ +#define GIVE_UP { \ + UNLOCK_AND_DEALLOCATE; \ + return(KERN_FAILURE); \ } @@ -5241,7 +6347,7 @@ vm_fault_wire_fast( */ if (entry->is_sub_map) { assert(physpage_p == NULL); - return(KERN_FAILURE); + return KERN_FAILURE; } /* @@ -5252,7 +6358,7 @@ vm_fault_wire_fast( offset = (va - entry->vme_start) + VME_OFFSET(entry); prot = entry->protection; - /* + /* * Make a reference to this object to prevent its * disposal while we are messing with it. */ @@ -5278,19 +6384,14 @@ vm_fault_wire_fast( /* * Look for page in top-level object. If it's not there or * there's something going on, give up. - * ENCRYPTED SWAP: use the slow fault path, since we'll need to - * decrypt the page before wiring it down. */ - m = vm_page_lookup(object, offset); - if ((m == VM_PAGE_NULL) || (m->busy) || (m->encrypted) || - (m->unusual && ( m->error || m->restart || m->absent))) { - + m = vm_page_lookup(object, vm_object_trunc_page(offset)); + if ((m == VM_PAGE_NULL) || (m->vmp_busy) || + (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) { GIVE_UP; } - ASSERT_PAGE_DECRYPTED(m); - - if (m->fictitious && - m->phys_page == vm_page_guard_addr) { + if (m->vmp_fictitious && + VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { /* * Guard pages are fictitious pages and are never * entered into a pmap, so let's say it's been wired... @@ -5301,19 +6402,19 @@ vm_fault_wire_fast( /* * Wire the page down now. All bail outs beyond this - * point must unwire the page. + * point must unwire the page. */ vm_page_lockspin_queues(); - vm_page_wire(m, VM_PROT_MEMORY_TAG(caller_prot), TRUE); + vm_page_wire(m, wire_tag, TRUE); vm_page_unlock_queues(); /* * Mark page busy for other threads. */ - assert(!m->busy); - m->busy = TRUE; - assert(!m->absent); + assert(!m->vmp_busy); + m->vmp_busy = TRUE; + assert(!m->vmp_absent); /* * Give up if the page is being written and there's a copy object @@ -5323,26 +6424,37 @@ vm_fault_wire_fast( GIVE_UP; } + fault_info.user_tag = VME_ALIAS(entry); + fault_info.pmap_options = 0; + if (entry->iokit_acct || + (!entry->is_sub_map && !entry->use_pmap)) { + fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; + } + + fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE); + fault_phys_offset = offset - vm_object_trunc_page(offset); + /* * Put this page into the physical map. */ type_of_fault = DBG_CACHE_HIT_FAULT; kr = vm_fault_enter(m, - pmap, - pmap_addr, - prot, - prot, - TRUE, - FALSE, - FALSE, - FALSE, - VME_ALIAS(entry), - ((entry->iokit_acct || - (!entry->is_sub_map && !entry->use_pmap)) - ? PMAP_OPTIONS_ALT_ACCT - : 0), - NULL, - &type_of_fault); + pmap, + pmap_addr, + fault_page_size, + fault_phys_offset, + prot, + prot, + TRUE, /* wired */ + FALSE, /* change_wiring */ + wire_tag, + &fault_info, + NULL, + &type_of_fault); + if (kr != KERN_SUCCESS) { + RELEASE_PAGE(m); + GIVE_UP; + } done: /* @@ -5352,10 +6464,11 @@ done: if (physpage_p) { /* for vm_map_wire_and_extract() */ if (kr == KERN_SUCCESS) { - *physpage_p = m->phys_page; + assert(object == VM_PAGE_OBJECT(m)); + *physpage_p = VM_PAGE_GET_PHYS_PAGE(m); if (prot & VM_PROT_WRITE) { - vm_object_lock_assert_exclusive(m->object); - m->dirty = TRUE; + vm_object_lock_assert_exclusive(object); + m->vmp_dirty = TRUE; } } else { *physpage_p = 0; @@ -5366,7 +6479,6 @@ done: UNLOCK_AND_DEALLOCATE; return kr; - } /* @@ -5377,17 +6489,18 @@ done: static void vm_fault_copy_cleanup( - vm_page_t page, - vm_page_t top_page) + vm_page_t page, + vm_page_t top_page) { - vm_object_t object = page->object; + vm_object_t object = VM_PAGE_OBJECT(page); vm_object_lock(object); PAGE_WAKEUP_DONE(page); - if (!page->active && !page->inactive && !page->throttled) { + if (!VM_PAGE_PAGEABLE(page)) { vm_page_lockspin_queues(); - if (!page->active && !page->inactive && !page->throttled) + if (!VM_PAGE_PAGEABLE(page)) { vm_page_activate(page); + } vm_page_unlock_queues(); } vm_fault_cleanup(object, top_page); @@ -5395,17 +6508,17 @@ vm_fault_copy_cleanup( static void vm_fault_copy_dst_cleanup( - vm_page_t page) + vm_page_t page) { - vm_object_t object; + vm_object_t object; if (page != VM_PAGE_NULL) { - object = page->object; + object = VM_PAGE_OBJECT(page); vm_object_lock(object); vm_page_lockspin_queues(); vm_page_unwire(page, TRUE); vm_page_unlock_queues(); - vm_object_paging_end(object); + vm_object_paging_end(object); vm_object_unlock(object); } } @@ -5439,72 +6552,59 @@ vm_fault_copy_dst_cleanup( */ kern_return_t vm_fault_copy( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_map_size_t *copy_size, /* INOUT */ - vm_object_t dst_object, - vm_object_offset_t dst_offset, - vm_map_t dst_map, - vm_map_version_t *dst_version, - int interruptible) + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_map_size_t *copy_size, /* INOUT */ + vm_object_t dst_object, + vm_object_offset_t dst_offset, + vm_map_t dst_map, + vm_map_version_t *dst_version, + int interruptible) { - vm_page_t result_page; - - vm_page_t src_page; - vm_page_t src_top_page; - vm_prot_t src_prot; + vm_page_t result_page; + + vm_page_t src_page; + vm_page_t src_top_page; + vm_prot_t src_prot; - vm_page_t dst_page; - vm_page_t dst_top_page; - vm_prot_t dst_prot; + vm_page_t dst_page; + vm_page_t dst_top_page; + vm_prot_t dst_prot; - vm_map_size_t amount_left; - vm_object_t old_copy_object; - kern_return_t error = 0; - vm_fault_return_t result; + vm_map_size_t amount_left; + vm_object_t old_copy_object; + vm_object_t result_page_object = NULL; + kern_return_t error = 0; + vm_fault_return_t result; - vm_map_size_t part_size; - struct vm_object_fault_info fault_info_src; - struct vm_object_fault_info fault_info_dst; + vm_map_size_t part_size; + struct vm_object_fault_info fault_info_src = {}; + struct vm_object_fault_info fault_info_dst = {}; /* * In order not to confuse the clustered pageins, align * the different offsets on a page boundary. */ -#define RETURN(x) \ - MACRO_BEGIN \ - *copy_size -= amount_left; \ - MACRO_RETURN(x); \ +#define RETURN(x) \ + MACRO_BEGIN \ + *copy_size -= amount_left; \ + MACRO_RETURN(x); \ MACRO_END amount_left = *copy_size; fault_info_src.interruptible = interruptible; fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL; - fault_info_src.user_tag = 0; - fault_info_src.pmap_options = 0; fault_info_src.lo_offset = vm_object_trunc_page(src_offset); fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left; - fault_info_src.no_cache = FALSE; fault_info_src.stealth = TRUE; - fault_info_src.io_sync = FALSE; - fault_info_src.cs_bypass = FALSE; - fault_info_src.mark_zf_absent = FALSE; - fault_info_src.batch_pmap_op = FALSE; fault_info_dst.interruptible = interruptible; fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL; - fault_info_dst.user_tag = 0; - fault_info_dst.pmap_options = 0; fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset); fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left; - fault_info_dst.no_cache = FALSE; fault_info_dst.stealth = TRUE; - fault_info_dst.io_sync = FALSE; - fault_info_dst.cs_bypass = FALSE; - fault_info_dst.mark_zf_absent = FALSE; - fault_info_dst.batch_pmap_op = FALSE; do { /* while (amount_left > 0) */ /* @@ -5514,61 +6614,62 @@ vm_fault_copy( * COW semantics if any. */ - RetryDestinationFault: ; +RetryDestinationFault:; - dst_prot = VM_PROT_WRITE|VM_PROT_READ; + dst_prot = VM_PROT_WRITE | VM_PROT_READ; vm_object_lock(dst_object); vm_object_paging_begin(dst_object); - if (amount_left > (vm_size_t) -1) { - /* 32-bit overflow */ - fault_info_dst.cluster_size = (vm_size_t) (0 - PAGE_SIZE); - } else { - fault_info_dst.cluster_size = (vm_size_t) amount_left; - assert(fault_info_dst.cluster_size == amount_left); + /* cap cluster size at maximum UPL size */ + upl_size_t cluster_size; + if (os_convert_overflow(amount_left, &cluster_size)) { + cluster_size = 0 - (upl_size_t)PAGE_SIZE; } + fault_info_dst.cluster_size = cluster_size; - XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0); dst_page = VM_PAGE_NULL; result = vm_fault_page(dst_object, - vm_object_trunc_page(dst_offset), - VM_PROT_WRITE|VM_PROT_READ, - FALSE, - FALSE, /* page not looked up */ - &dst_prot, &dst_page, &dst_top_page, - (int *)0, - &error, - dst_map->no_zero_fill, - FALSE, &fault_info_dst); + vm_object_trunc_page(dst_offset), + VM_PROT_WRITE | VM_PROT_READ, + FALSE, + FALSE, /* page not looked up */ + &dst_prot, &dst_page, &dst_top_page, + (int *)0, + &error, + dst_map->no_zero_fill, + FALSE, &fault_info_dst); switch (result) { case VM_FAULT_SUCCESS: break; case VM_FAULT_RETRY: goto RetryDestinationFault; case VM_FAULT_MEMORY_SHORTAGE: - if (vm_page_wait(interruptible)) + if (vm_page_wait(interruptible)) { goto RetryDestinationFault; - /* fall thru */ + } + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: RETURN(MACH_SEND_INTERRUPTED); case VM_FAULT_SUCCESS_NO_VM_PAGE: /* success but no VM page: fail the copy */ vm_object_paging_end(dst_object); vm_object_unlock(dst_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: - if (error) - return (error); - else - return(KERN_MEMORY_ERROR); + if (error) { + return error; + } else { + return KERN_MEMORY_ERROR; + } default: panic("vm_fault_copy: unexpected error 0x%x from " - "vm_fault_page()\n", result); + "vm_fault_page()\n", result); } - assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); + assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); - old_copy_object = dst_page->object->copy; + assert(dst_object == VM_PAGE_OBJECT(dst_page)); + old_copy_object = dst_object->copy; /* * There exists the possiblity that the source and @@ -5577,7 +6678,7 @@ vm_fault_copy( * same, the call to vm_fault_page() for the * destination page will deadlock. To prevent this we * wire the page so we can drop busy without having - * the page daemon steal the page. We clean up the + * the page daemon steal the page. We clean up the * top page but keep the paging reference on the object * holding the dest page so it doesn't go away. */ @@ -5586,7 +6687,7 @@ vm_fault_copy( vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE); vm_page_unlock_queues(); PAGE_WAKEUP_DONE(dst_page); - vm_object_unlock(dst_page->object); + vm_object_unlock(dst_object); if (dst_top_page != VM_PAGE_NULL) { vm_object_lock(dst_object); @@ -5595,7 +6696,7 @@ vm_fault_copy( vm_object_unlock(dst_object); } - RetrySourceFault: ; +RetrySourceFault:; if (src_object == VM_OBJECT_NULL) { /* @@ -5607,7 +6708,7 @@ vm_fault_copy( } else { vm_object_lock(src_object); src_page = vm_page_lookup(src_object, - vm_object_trunc_page(src_offset)); + vm_object_trunc_page(src_offset)); if (src_page == dst_page) { src_prot = dst_prot; result_page = VM_PAGE_NULL; @@ -5615,24 +6716,19 @@ vm_fault_copy( src_prot = VM_PROT_READ; vm_object_paging_begin(src_object); - if (amount_left > (vm_size_t) -1) { - /* 32-bit overflow */ - fault_info_src.cluster_size = (vm_size_t) (0 - PAGE_SIZE); - } else { - fault_info_src.cluster_size = (vm_size_t) amount_left; - assert(fault_info_src.cluster_size == amount_left); + /* cap cluster size at maximum UPL size */ + if (os_convert_overflow(amount_left, &cluster_size)) { + cluster_size = 0 - (upl_size_t)PAGE_SIZE; } + fault_info_src.cluster_size = cluster_size; - XPR(XPR_VM_FAULT, - "vm_fault_copy(2) -> vm_fault_page\n", - 0,0,0,0,0); result_page = VM_PAGE_NULL; result = vm_fault_page( - src_object, + src_object, vm_object_trunc_page(src_offset), VM_PROT_READ, FALSE, FALSE, /* page not looked up */ - &src_prot, + &src_prot, &result_page, &src_top_page, (int *)0, &error, FALSE, FALSE, &fault_info_src); @@ -5643,9 +6739,10 @@ vm_fault_copy( case VM_FAULT_RETRY: goto RetrySourceFault; case VM_FAULT_MEMORY_SHORTAGE: - if (vm_page_wait(interruptible)) + if (vm_page_wait(interruptible)) { goto RetrySourceFault; - /* fall thru */ + } + OS_FALLTHROUGH; case VM_FAULT_INTERRUPTED: vm_fault_copy_dst_cleanup(dst_page); RETURN(MACH_SEND_INTERRUPTED); @@ -5653,45 +6750,52 @@ vm_fault_copy( /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + OS_FALLTHROUGH; case VM_FAULT_MEMORY_ERROR: vm_fault_copy_dst_cleanup(dst_page); - if (error) - return (error); - else - return(KERN_MEMORY_ERROR); + if (error) { + return error; + } else { + return KERN_MEMORY_ERROR; + } default: panic("vm_fault_copy(2): unexpected " - "error 0x%x from " - "vm_fault_page()\n", result); + "error 0x%x from " + "vm_fault_page()\n", result); } - + result_page_object = VM_PAGE_OBJECT(result_page); assert((src_top_page == VM_PAGE_NULL) == - (result_page->object == src_object)); + (result_page_object == src_object)); } - assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE); - vm_object_unlock(result_page->object); + assert((src_prot & VM_PROT_READ) != VM_PROT_NONE); + vm_object_unlock(result_page_object); } + vm_map_lock_read(dst_map); + if (!vm_map_verify(dst_map, dst_version)) { - if (result_page != VM_PAGE_NULL && src_page != dst_page) + vm_map_unlock_read(dst_map); + if (result_page != VM_PAGE_NULL && src_page != dst_page) { vm_fault_copy_cleanup(result_page, src_top_page); + } vm_fault_copy_dst_cleanup(dst_page); break; } + assert(dst_object == VM_PAGE_OBJECT(dst_page)); - vm_object_lock(dst_page->object); + vm_object_lock(dst_object); - if (dst_page->object->copy != old_copy_object) { - vm_object_unlock(dst_page->object); - vm_map_verify_done(dst_map, dst_version); - if (result_page != VM_PAGE_NULL && src_page != dst_page) + if (dst_object->copy != old_copy_object) { + vm_object_unlock(dst_object); + vm_map_unlock_read(dst_map); + if (result_page != VM_PAGE_NULL && src_page != dst_page) { vm_fault_copy_cleanup(result_page, src_top_page); + } vm_fault_copy_dst_cleanup(dst_page); break; } - vm_object_unlock(dst_page->object); + vm_object_unlock(dst_object); /* * Copy the page, and note that it is dirty @@ -5699,11 +6803,10 @@ vm_fault_copy( */ if (!page_aligned(src_offset) || - !page_aligned(dst_offset) || - !page_aligned(amount_left)) { - - vm_object_offset_t src_po, - dst_po; + !page_aligned(dst_offset) || + !page_aligned(amount_left)) { + vm_object_offset_t src_po, + dst_po; src_po = src_offset - vm_object_trunc_page(src_offset); dst_po = dst_offset - vm_object_trunc_page(dst_offset); @@ -5713,7 +6816,7 @@ vm_fault_copy( } else { part_size = PAGE_SIZE - src_po; } - if (part_size > (amount_left)){ + if (part_size > (amount_left)) { part_size = amount_left; } @@ -5721,51 +6824,50 @@ vm_fault_copy( assert((vm_offset_t) dst_po == dst_po); assert((vm_size_t) part_size == part_size); vm_page_part_zero_fill(dst_page, - (vm_offset_t) dst_po, - (vm_size_t) part_size); + (vm_offset_t) dst_po, + (vm_size_t) part_size); } else { assert((vm_offset_t) src_po == src_po); assert((vm_offset_t) dst_po == dst_po); assert((vm_size_t) part_size == part_size); vm_page_part_copy(result_page, - (vm_offset_t) src_po, - dst_page, - (vm_offset_t) dst_po, - (vm_size_t)part_size); - if(!dst_page->dirty){ + (vm_offset_t) src_po, + dst_page, + (vm_offset_t) dst_po, + (vm_size_t)part_size); + if (!dst_page->vmp_dirty) { vm_object_lock(dst_object); SET_PAGE_DIRTY(dst_page, TRUE); - vm_object_unlock(dst_page->object); + vm_object_unlock(dst_object); } - } } else { part_size = PAGE_SIZE; - if (result_page == VM_PAGE_NULL) + if (result_page == VM_PAGE_NULL) { vm_page_zero_fill(dst_page); - else{ - vm_object_lock(result_page->object); + } else { + vm_object_lock(result_page_object); vm_page_copy(result_page, dst_page); - vm_object_unlock(result_page->object); + vm_object_unlock(result_page_object); - if(!dst_page->dirty){ + if (!dst_page->vmp_dirty) { vm_object_lock(dst_object); SET_PAGE_DIRTY(dst_page, TRUE); - vm_object_unlock(dst_page->object); + vm_object_unlock(dst_object); } } - } /* * Unlock everything, and return */ - vm_map_verify_done(dst_map, dst_version); + vm_map_unlock_read(dst_map); - if (result_page != VM_PAGE_NULL && src_page != dst_page) + if (result_page != VM_PAGE_NULL && src_page != dst_page) { vm_fault_copy_cleanup(result_page, src_top_page); + } vm_fault_copy_dst_cleanup(dst_page); amount_left -= part_size; @@ -5774,12 +6876,12 @@ vm_fault_copy( } while (amount_left > 0); RETURN(KERN_SUCCESS); -#undef RETURN +#undef RETURN - /*NOTREACHED*/ + /*NOTREACHED*/ } -#if VM_FAULT_CLASSIFY +#if VM_FAULT_CLASSIFY /* * Temporary statistics gathering support. */ @@ -5787,42 +6889,41 @@ vm_fault_copy( /* * Statistics arrays: */ -#define VM_FAULT_TYPES_MAX 5 -#define VM_FAULT_LEVEL_MAX 8 +#define VM_FAULT_TYPES_MAX 5 +#define VM_FAULT_LEVEL_MAX 8 -int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX]; +int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX]; -#define VM_FAULT_TYPE_ZERO_FILL 0 -#define VM_FAULT_TYPE_MAP_IN 1 -#define VM_FAULT_TYPE_PAGER 2 -#define VM_FAULT_TYPE_COPY 3 -#define VM_FAULT_TYPE_OTHER 4 +#define VM_FAULT_TYPE_ZERO_FILL 0 +#define VM_FAULT_TYPE_MAP_IN 1 +#define VM_FAULT_TYPE_PAGER 2 +#define VM_FAULT_TYPE_COPY 3 +#define VM_FAULT_TYPE_OTHER 4 void -vm_fault_classify(vm_object_t object, - vm_object_offset_t offset, - vm_prot_t fault_type) +vm_fault_classify(vm_object_t object, + vm_object_offset_t offset, + vm_prot_t fault_type) { - int type, level = 0; - vm_page_t m; + int type, level = 0; + vm_page_t m; while (TRUE) { m = vm_page_lookup(object, offset); - if (m != VM_PAGE_NULL) { - if (m->busy || m->error || m->restart || m->absent) { + if (m != VM_PAGE_NULL) { + if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) { type = VM_FAULT_TYPE_OTHER; break; } if (((fault_type & VM_PROT_WRITE) == 0) || ((level == 0) && object->copy == VM_OBJECT_NULL)) { type = VM_FAULT_TYPE_MAP_IN; - break; + break; } type = VM_FAULT_TYPE_COPY; break; - } - else { + } else { if (object->pager_created) { type = VM_FAULT_TYPE_PAGER; break; @@ -5830,7 +6931,7 @@ vm_fault_classify(vm_object_t object, if (object->shadow == VM_OBJECT_NULL) { type = VM_FAULT_TYPE_ZERO_FILL; break; - } + } offset += object->vo_shadow_offset; object = object->shadow; @@ -5839,8 +6940,9 @@ vm_fault_classify(vm_object_t object, } } - if (level > VM_FAULT_LEVEL_MAX) + if (level > VM_FAULT_LEVEL_MAX) { level = VM_FAULT_LEVEL_MAX; + } vm_fault_stats[type][level] += 1; @@ -5862,24 +6964,28 @@ vm_fault_classify_init(void) return; } -#endif /* VM_FAULT_CLASSIFY */ +#endif /* VM_FAULT_CLASSIFY */ vm_offset_t -kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, uint32_t *fault_results) +kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) { -#pragma unused(map, cur_target_addr, fault_results) - - return 0; -#if 0 - vm_map_entry_t entry; - vm_object_t object; - vm_offset_t object_offset; - vm_page_t m; - int compressor_external_state, compressed_count_delta; - int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); - int my_fault_type = VM_PROT_READ; - kern_return_t kr; - + vm_map_entry_t entry; + vm_object_t object; + vm_offset_t object_offset; + vm_page_t m; + int compressor_external_state, compressed_count_delta; + int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); + int my_fault_type = VM_PROT_READ; + kern_return_t kr; + int effective_page_mask, effective_page_size; + + if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) { + effective_page_mask = VM_MAP_PAGE_MASK(map); + effective_page_size = VM_MAP_PAGE_SIZE(map); + } else { + effective_page_mask = PAGE_MASK; + effective_page_size = PAGE_SIZE; + } if (not_in_kdp) { panic("kdp_lightweight_fault called from outside of debugger context"); @@ -5887,8 +6993,8 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, uint32_t *fault assert(map != VM_MAP_NULL); - assert((cur_target_addr & PAGE_MASK) == 0); - if ((cur_target_addr & PAGE_MASK) != 0) { + assert((cur_target_addr & effective_page_mask) == 0); + if ((cur_target_addr & effective_page_mask) != 0) { return 0; } @@ -5917,65 +7023,49 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, uint32_t *fault } if (object->pager_created && (object->paging_in_progress || - object->activity_in_progress)) { + object->activity_in_progress)) { return 0; } - m = kdp_vm_page_lookup(object, object_offset); + m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset)); if (m != VM_PAGE_NULL) { - if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { return 0; } - if (m->laundry || m->busy || m->pageout || m->absent || m->error || m->cleaning || - m->overwriting || m->restart || m->unusual) { - return 0; - } - - assert(!m->private); - if (m->private) { - return 0; - } - - assert(!m->fictitious); - if (m->fictitious) { + if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || m->vmp_error || m->vmp_cleaning || + m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) { return 0; } - assert(!m->encrypted); - if (m->encrypted) { + assert(!m->vmp_private); + if (m->vmp_private) { return 0; } - assert(!m->encrypted_cleaning); - if (m->encrypted_cleaning) { + assert(!m->vmp_fictitious); + if (m->vmp_fictitious) { return 0; } - assert(!m->compressor); - if (m->compressor) { + assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR); + if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { return 0; } - if (fault_results) { - *fault_results |= kThreadFaultedBT; - } - return ptoa(m->phys_page); + return ptoa(VM_PAGE_GET_PHYS_PAGE(m)); } compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) { if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) { - kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset), - kdp_compressor_decompressed_page_ppnum, &my_fault_type, - compressor_flags, &compressed_count_delta); + kr = vm_compressor_pager_get(object->pager, + vm_object_trunc_page(object_offset + object->paging_offset), + kdp_compressor_decompressed_page_ppnum, &my_fault_type, + compressor_flags, &compressed_count_delta); if (kr == KERN_SUCCESS) { - if (fault_results) { - *fault_results |= kThreadDecompressedBT; - } return kdp_compressor_decompressed_page_paddr; } else { return 0; @@ -5990,33 +7080,31 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, uint32_t *fault object_offset += object->vo_shadow_offset; object = object->shadow; } -#endif /* 0 */ } - -#define CODE_SIGNING_CHUNK_SIZE 4096 -void -vm_page_validate_cs_mapped( - vm_page_t page, - const void *kaddr) +/* + * vm_page_validate_cs_fast(): + * Performs a few quick checks to determine if the page's code signature + * really needs to be fully validated. It could: + * 1. have been modified (i.e. automatically tainted), + * 2. have already been validated, + * 3. have already been found to be tainted, + * 4. no longer have a backing store. + * Returns FALSE if the page needs to be fully validated. + */ +static boolean_t +vm_page_validate_cs_fast( + vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) { - vm_object_t object; - vm_object_offset_t offset, offset_in_page; - kern_return_t kr; - memory_object_t pager; - void *blobs; - boolean_t validated; - unsigned tainted; - int num_chunks, num_chunks_validated; - - assert(page->busy); - vm_object_lock_assert_exclusive(page->object); - - if (!cs_validation) { - return; - } + vm_object_t object; + + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_held(object); - if (page->wpmapped && !page->cs_tainted) { + if (page->vmp_wpmapped && + !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) { /* * This page was mapped for "write" access sometime in the * past and could still be modifiable in the future. @@ -6024,149 +7112,155 @@ vm_page_validate_cs_mapped( * [ If the page was already found to be "tainted", no * need to re-validate. ] */ - page->cs_validated = TRUE; - page->cs_tainted = TRUE; + vm_object_lock_assert_exclusive(object); + VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE); + VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE); if (cs_debug) { - printf("CODESIGNING: vm_page_validate_cs: " - "page %p obj %p off 0x%llx " - "was modified\n", - page, page->object, page->offset); + printf("CODESIGNING: %s: " + "page %p obj %p off 0x%llx " + "was modified\n", + __FUNCTION__, + page, object, page->vmp_offset); } vm_cs_validated_dirtied++; } - if (page->cs_validated || page->cs_tainted) { - return; + if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) || + VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) { + return TRUE; } + vm_object_lock_assert_exclusive(object); - vm_cs_validates++; +#if CHECK_CS_VALIDATION_BITMAP + kern_return_t kr; - object = page->object; - assert(object->code_signed); - offset = page->offset; + kr = vnode_pager_cs_check_validation_bitmap( + object->pager, + page->vmp_offset + object->paging_offset, + CS_BITMAP_CHECK); + if (kr == KERN_SUCCESS) { + page->vmp_cs_validated = VMP_CS_ALL_TRUE; + page->vmp_cs_tainted = VMP_CS_ALL_FALSE; + vm_cs_bitmap_validated++; + return TRUE; + } +#endif /* CHECK_CS_VALIDATION_BITMAP */ if (!object->alive || object->terminating || object->pager == NULL) { /* * The object is terminating and we don't have its pager * so we can't validate the data... */ - return; + return TRUE; } + + /* we need to really validate this page */ + vm_object_lock_assert_exclusive(object); + return FALSE; +} + +void +vm_page_validate_cs_mapped_slow( + vm_page_t page, + const void *kaddr) +{ + vm_object_t object; + memory_object_offset_t mo_offset; + memory_object_t pager; + struct vnode *vnode; + int validated, tainted, nx; + + assert(page->vmp_busy); + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_exclusive(object); + + vm_cs_validates++; + /* * Since we get here to validate a page that was brought in by * the pager, we know that this pager is all setup and ready * by now. */ + assert(object->code_signed); assert(!object->internal); assert(object->pager != NULL); assert(object->pager_ready); pager = object->pager; assert(object->paging_in_progress); - kr = vnode_pager_get_object_cs_blobs(pager, &blobs); - if (kr != KERN_SUCCESS) { - blobs = NULL; - } + vnode = vnode_pager_lookup_vnode(pager); + mo_offset = page->vmp_offset + object->paging_offset; /* verify the SHA1 hash for this page */ - num_chunks_validated = 0; - for (offset_in_page = 0, num_chunks = 0; - offset_in_page < PAGE_SIZE_64; - offset_in_page += CODE_SIGNING_CHUNK_SIZE, num_chunks++) { - tainted = 0; - validated = cs_validate_page(blobs, - pager, - (object->paging_offset + - offset + - offset_in_page), - (const void *)((const char *)kaddr - + offset_in_page), - &tainted); - if (validated) { - num_chunks_validated++; - } - if (tainted & CS_VALIDATE_TAINTED) { - page->cs_tainted = TRUE; - } - if (tainted & CS_VALIDATE_NX) { - page->cs_nx = TRUE; - } - } - /* page is validated only if all its chunks are */ - if (num_chunks_validated == num_chunks) { - page->cs_validated = TRUE; + validated = 0; + tainted = 0; + nx = 0; + cs_validate_page(vnode, + pager, + mo_offset, + (const void *)((const char *)kaddr), + &validated, + &tainted, + &nx); + + page->vmp_cs_validated |= validated; + page->vmp_cs_tainted |= tainted; + page->vmp_cs_nx |= nx; + +#if CHECK_CS_VALIDATION_BITMAP + if (page->vmp_cs_validated == VMP_CS_ALL_TRUE && + page->vmp_cs_tainted == VMP_CS_ALL_FALSE) { + vnode_pager_cs_check_validation_bitmap(object->pager, + mo_offset, + CS_BITMAP_SET); } +#endif /* CHECK_CS_VALIDATION_BITMAP */ } void -vm_page_validate_cs( - vm_page_t page) +vm_page_validate_cs_mapped( + vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset, + const void *kaddr) { - vm_object_t object; - vm_object_offset_t offset; - vm_map_offset_t koffset; - vm_map_size_t ksize; - vm_offset_t kaddr; - kern_return_t kr; - boolean_t busy_page; - boolean_t need_unmap; - - vm_object_lock_assert_held(page->object); - - if (!cs_validation) { - return; - } - - if (page->wpmapped && !page->cs_tainted) { - vm_object_lock_assert_exclusive(page->object); - - /* - * This page was mapped for "write" access sometime in the - * past and could still be modifiable in the future. - * Consider it tainted. - * [ If the page was already found to be "tainted", no - * need to re-validate. ] - */ - page->cs_validated = TRUE; - page->cs_tainted = TRUE; - if (cs_debug) { - printf("CODESIGNING: vm_page_validate_cs: " - "page %p obj %p off 0x%llx " - "was modified\n", - page, page->object, page->offset); - } - vm_cs_validated_dirtied++; - } - - if (page->cs_validated || page->cs_tainted) { - return; - } - - if (page->slid) { - panic("vm_page_validate_cs(%p): page is slid\n", page); + if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) { + vm_page_validate_cs_mapped_slow(page, kaddr); } - assert(!page->slid); +} -#if CHECK_CS_VALIDATION_BITMAP - if ( vnode_pager_cs_check_validation_bitmap( page->object->pager, trunc_page(page->offset + page->object->paging_offset), CS_BITMAP_CHECK ) == KERN_SUCCESS) { - page->cs_validated = TRUE; - page->cs_tainted = FALSE; - vm_cs_bitmap_validated++; +void +vm_page_validate_cs( + vm_page_t page, + vm_map_size_t fault_page_size, + vm_map_offset_t fault_phys_offset) +{ + vm_object_t object; + vm_object_offset_t offset; + vm_map_offset_t koffset; + vm_map_size_t ksize; + vm_offset_t kaddr; + kern_return_t kr; + boolean_t busy_page; + boolean_t need_unmap; + + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_held(object); + + if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) { return; } -#endif - vm_object_lock_assert_exclusive(page->object); + vm_object_lock_assert_exclusive(object); - object = page->object; assert(object->code_signed); - offset = page->offset; + offset = page->vmp_offset; - busy_page = page->busy; + busy_page = page->vmp_busy; if (!busy_page) { /* keep page busy while we map (and unlock) the VM object */ - page->busy = TRUE; + page->vmp_busy = TRUE; } - + /* * Take a paging reference on the VM object * to protect it from collapse or bypass, @@ -6179,28 +7273,23 @@ vm_page_validate_cs( koffset = 0; need_unmap = FALSE; kr = vm_paging_map_object(page, - object, - offset, - VM_PROT_READ, - FALSE, /* can't unlock object ! */ - &ksize, - &koffset, - &need_unmap); + object, + offset, + VM_PROT_READ, + FALSE, /* can't unlock object ! */ + &ksize, + &koffset, + &need_unmap); if (kr != KERN_SUCCESS) { - panic("vm_page_validate_cs: could not map page: 0x%x\n", kr); + panic("%s: could not map page: 0x%x\n", __FUNCTION__, kr); } kaddr = CAST_DOWN(vm_offset_t, koffset); /* validate the mapped page */ - vm_page_validate_cs_mapped(page, (const void *) kaddr); + vm_page_validate_cs_mapped_slow(page, (const void *) kaddr); -#if CHECK_CS_VALIDATION_BITMAP - if ( page->cs_validated == TRUE && page->cs_tainted == FALSE ) { - vnode_pager_cs_check_validation_bitmap( object->pager, trunc_page( offset + object->paging_offset), CS_BITMAP_SET ); - } -#endif - assert(page->busy); - assert(object == page->object); + assert(page->vmp_busy); + assert(object == VM_PAGE_OBJECT(page)); vm_object_lock_assert_exclusive(object); if (!busy_page) { @@ -6218,33 +7307,29 @@ vm_page_validate_cs( void vm_page_validate_cs_mapped_chunk( - vm_page_t page, - const void *kaddr, - vm_offset_t chunk_offset, - boolean_t *validated_p, - unsigned *tainted_p) + vm_page_t page, + const void *kaddr, + vm_offset_t chunk_offset, + vm_size_t chunk_size, + boolean_t *validated_p, + unsigned *tainted_p) { - vm_object_t object; - vm_object_offset_t offset, offset_in_page; - kern_return_t kr; - memory_object_t pager; - void *blobs; - boolean_t validated; - unsigned tainted; + vm_object_t object; + vm_object_offset_t offset, offset_in_page; + memory_object_t pager; + struct vnode *vnode; + boolean_t validated; + unsigned tainted; *validated_p = FALSE; *tainted_p = 0; - assert(page->busy); - vm_object_lock_assert_exclusive(page->object); - - if (!cs_validation) { - return; - } + assert(page->vmp_busy); + object = VM_PAGE_OBJECT(page); + vm_object_lock_assert_exclusive(object); - object = page->object; assert(object->code_signed); - offset = page->offset; + offset = page->vmp_offset; if (!object->alive || object->terminating || object->pager == NULL) { /* @@ -6264,25 +7349,22 @@ vm_page_validate_cs_mapped_chunk( pager = object->pager; assert(object->paging_in_progress); - kr = vnode_pager_get_object_cs_blobs(pager, &blobs); - if (kr != KERN_SUCCESS) { - blobs = NULL; - } + vnode = vnode_pager_lookup_vnode(pager); /* verify the signature for this chunk */ offset_in_page = chunk_offset; assert(offset_in_page < PAGE_SIZE); - assert((offset_in_page & (CODE_SIGNING_CHUNK_SIZE-1)) == 0); tainted = 0; - validated = cs_validate_page(blobs, - pager, - (object->paging_offset + - offset + - offset_in_page), - (const void *)((const char *)kaddr - + offset_in_page), - &tainted); + validated = cs_validate_range(vnode, + pager, + (object->paging_offset + + offset + + offset_in_page), + (const void *)((const char *)kaddr + + offset_in_page), + chunk_size, + &tainted); if (validated) { *validated_p = TRUE; } @@ -6290,3 +7372,108 @@ vm_page_validate_cs_mapped_chunk( *tainted_p = tainted; } } + +static void +vm_rtfrecord_lock(void) +{ + lck_spin_lock(&vm_rtfr_slock); +} + +static void +vm_rtfrecord_unlock(void) +{ + lck_spin_unlock(&vm_rtfr_slock); +} + +unsigned int +vmrtfaultinfo_bufsz(void) +{ + return vmrtf_num_records * sizeof(vm_rtfault_record_t); +} + +#include + +__attribute__((noinline)) +static void +vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault) +{ + uint64_t fend = mach_continuous_time(); + + uint64_t cfpc = 0; + uint64_t ctid = cthread->thread_id; + uint64_t cupid = get_current_unique_pid(); + + uintptr_t bpc = 0; + int btr = 0; + bool u64 = false; + + /* Capture a single-frame backtrace; this extracts just the program + * counter at the point of the fault into "bpc", and should perform no + * further user stack traversals, thus avoiding copyin()s and further + * faults. + */ + unsigned int bfrs = backtrace_thread_user(cthread, &bpc, 1U, &btr, &u64, NULL, false); + + if ((btr == 0) && (bfrs > 0)) { + cfpc = bpc; + } + + assert((fstart != 0) && fend >= fstart); + vm_rtfrecord_lock(); + assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi); + + vmrtfrs.vmrtf_total++; + vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++]; + + cvmr->rtfabstime = fstart; + cvmr->rtfduration = fend - fstart; + cvmr->rtfaddr = fault_vaddr; + cvmr->rtfpc = cfpc; + cvmr->rtftype = type_of_fault; + cvmr->rtfupid = cupid; + cvmr->rtftid = ctid; + + if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) { + vmrtfrs.vmrtfr_curi = 0; + } + + vm_rtfrecord_unlock(); +} + +int +vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv) +{ + vm_rtfault_record_t *cvmrd = vrecords; + size_t residue = vrecordsz; + size_t numextracted = 0; + boolean_t early_exit = FALSE; + + vm_rtfrecord_lock(); + + for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) { + if (residue < sizeof(vm_rtfault_record_t)) { + early_exit = TRUE; + break; + } + + if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) { +#if DEVELOPMENT || DEBUG + if (isroot == FALSE) { + continue; + } +#else + continue; +#endif /* DEVDEBUG */ + } + + *cvmrd = vmrtfrs.vm_rtf_records[vmfi]; + cvmrd++; + residue -= sizeof(vm_rtfault_record_t); + numextracted++; + } + + vm_rtfrecord_unlock(); + + *vmrtfrv = numextracted; + return early_exit; +}