/*
- * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/thread.h>
#include <kern/sched_prim.h>
#include <kern/host.h>
-#include <kern/xpr.h>
#include <kern/mach_param.h>
#include <kern/macro_help.h>
#include <kern/zalloc.h>
#define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
+int vm_protect_privileged_from_untrusted = 1;
+
unsigned int vm_object_pagein_throttle = 16;
/*
uint64_t vm_hard_throttle_threshold;
-
-#define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \
- ((vm_page_free_count < vm_page_throttle_limit || \
- HARD_THROTTLE_LIMIT_REACHED()) && \
- proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED))
-
+OS_ALWAYS_INLINE
+boolean_t
+NEED_TO_HARD_THROTTLE_THIS_TASK(void)
+{
+ return vm_wants_task_throttled(current_task()) ||
+ ((vm_page_free_count < vm_page_throttle_limit ||
+ HARD_THROTTLE_LIMIT_REACHED()) &&
+ proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
+}
#define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
#define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
#define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
+#define VM_STAT_DECOMPRESSIONS() \
+MACRO_BEGIN \
+ VM_STAT_INCR(decompressions); \
+ current_thread()->decompressions++; \
+MACRO_END
+
boolean_t current_thread_aborted(void);
/* Forward declarations of internal routines. */
unsigned long vm_cs_query_modified = 0;
unsigned long vm_cs_validated_dirtied = 0;
unsigned long vm_cs_bitmap_validated = 0;
-#if PMAP_CS
-uint64_t vm_cs_defer_to_pmap_cs = 0;
-uint64_t vm_cs_defer_to_pmap_cs_not = 0;
-#endif /* PMAP_CS */
-void vm_pre_fault(vm_map_offset_t);
+void vm_pre_fault(vm_map_offset_t, vm_prot_t);
extern char *kdp_compressor_decompressed_page;
extern addr64_t kdp_compressor_decompressed_page_paddr;
} vmrtfrs;
#define VMRTF_DEFAULT_BUFSIZE (4096)
#define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
-int vmrtf_num_records = VMRTF_NUM_RECORDS_DEFAULT;
+TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
static void vm_rtfrecord_lock(void);
static void vm_rtfrecord_unlock(void);
static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
-lck_spin_t vm_rtfr_slock;
extern lck_grp_t vm_page_lck_grp_bucket;
extern lck_attr_t vm_page_lck_attr;
+LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
/*
* Routine: vm_fault_init
* Purpose:
* Initialize our private data structures.
*/
+__startup_func
void
vm_fault_init(void)
{
if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
- if (vm_compressor_temp > 0 &&
- ((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
+ if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
need_default_val = FALSE;
vm_compressor_mode = vm_compressor_temp;
break;
PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
}
printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
+
+ PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
+ &vm_protect_privileged_from_untrusted,
+ sizeof(vm_protect_privileged_from_untrusted));
}
-void
+__startup_func
+static void
vm_rtfault_record_init(void)
{
- PE_parse_boot_argn("vm_rtfault_records", &vmrtf_num_records, sizeof(vmrtf_num_records));
+ size_t size;
- assert(vmrtf_num_records >= 1);
vmrtf_num_records = MAX(vmrtf_num_records, 1);
- size_t kallocsz = vmrtf_num_records * sizeof(vm_rtfault_record_t);
- vmrtfrs.vm_rtf_records = kalloc(kallocsz);
- bzero(vmrtfrs.vm_rtf_records, kallocsz);
+ size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
+ vmrtfrs.vm_rtf_records = zalloc_permanent(size,
+ ZALIGN(vm_rtfault_record_t));
vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
- lck_spin_init(&vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
}
+STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
+
/*
* Routine: vm_fault_cleanup
* Purpose:
sequential = object->sequential;
orig_sequential = sequential;
+ offset = vm_object_trunc_page(offset);
+ if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
+ /* re-faulting in the same page: no change in behavior */
+ return;
+ }
+
switch (behavior) {
case VM_BEHAVIOR_RANDOM:
/*
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
#endif
-
- if (object == kernel_object || vm_page_deactivate_behind == FALSE) {
+ if (object == kernel_object || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
/*
* Do not deactivate pages from the kernel object: they
* are not intended to become pageable.
* or we've disabled the deactivate behind mechanism
+ * or we are dealing with an offset that is not aligned to
+ * the system's PAGE_SIZE because in that case we will
+ * handle the deactivation on the aligned offset and, thus,
+ * the full PAGE_SIZE page once. This helps us avoid the redundant
+ * deactivates and the extra faults.
*/
return FALSE;
}
return VM_FAULT_SUCCESS;
}
+/*
+ * Clear the code signing bits on the given page_t
+ */
+static void
+vm_fault_cs_clear(vm_page_t m)
+{
+ m->vmp_cs_validated = VMP_CS_ALL_FALSE;
+ m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
+ m->vmp_cs_nx = VMP_CS_ALL_FALSE;
+}
+
+/*
+ * Enqueues the given page on the throttled queue.
+ * The caller must hold the vm_page_queue_lock and it will be held on return.
+ */
+static void
+vm_fault_enqueue_throttled_locked(vm_page_t m)
+{
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!VM_PAGE_WIRED(m));
+
+ /*
+ * can't be on the pageout queue since we don't
+ * have a pager to try and clean to
+ */
+ vm_page_queues_remove(m, TRUE);
+ vm_page_check_pageable_safe(m);
+ vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
+ m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
+ vm_page_throttled_count++;
+}
/*
* do the work to zero fill a page and
* sending a program into this area. We
* choose this approach for performance
*/
+ vm_fault_cs_clear(m);
m->vmp_pmapped = TRUE;
- m->vmp_cs_validated = FALSE;
- m->vmp_cs_tainted = FALSE;
- m->vmp_cs_nx = FALSE;
-
if (no_zero_fill == TRUE) {
my_fault = DBG_NZF_PAGE_FAULT;
assert(!m->vmp_laundry);
assert(object != kernel_object);
//assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
-
if (!VM_DYNAMIC_PAGING_ENABLED() &&
(object->purgable == VM_PURGABLE_DENY ||
object->purgable == VM_PURGABLE_NONVOLATILE ||
object->purgable == VM_PURGABLE_VOLATILE)) {
vm_page_lockspin_queues();
-
if (!VM_DYNAMIC_PAGING_ENABLED()) {
- assert(!VM_PAGE_WIRED(m));
-
- /*
- * can't be on the pageout queue since we don't
- * have a pager to try and clean to
- */
- vm_page_queues_remove(m, TRUE);
- vm_page_check_pageable_safe(m);
- vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
- m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
- vm_page_throttled_count++;
+ vm_fault_enqueue_throttled_locked(m);
}
vm_page_unlock_queues();
}
first_m = VM_PAGE_NULL;
access_required = fault_type;
-
- XPR(XPR_VM_FAULT,
- "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n",
- object, offset, fault_type, *protection, 0);
-
/*
* default type of fault
*/
m = *result_page;
caller_lookup = FALSE; /* no longer valid after that */
} else {
- m = vm_page_lookup(object, offset);
+ m = vm_page_lookup(object, vm_object_trunc_page(offset));
}
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
#endif
wait_result = PAGE_SLEEP(object, m, interruptible);
- XPR(XPR_VM_FAULT,
- "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n",
- object, offset,
- m, 0, 0);
counter(c_vm_fault_page_block_busy_kernel++);
if (wait_result != THREAD_AWAKENED) {
return error;
}
- XPR(XPR_VM_FAULT,
- "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n",
- object, offset,
- m,
- first_object, 0);
-
if (object != first_object) {
/*
* free the absent page we just found
vm_page_queues_remove(m, FALSE);
vm_page_unlock_queues();
}
- XPR(XPR_VM_FAULT,
- "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n",
- object, offset,
- next_object,
- offset + object->vo_shadow_offset, 0);
offset += object->vo_shadow_offset;
fault_info->lo_offset += object->vo_shadow_offset;
#if TRACEFAULTPAGE
dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */
#endif
- XPR(XPR_VM_FAULT,
- "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n",
- object, offset,
- m, 0, 0);
/*
* take an extra ref so that object won't die
*/
vm_object_lock(object);
assert(object->ref_count > 0);
- m = vm_page_lookup(object, offset);
+ m = vm_page_lookup(object, vm_object_trunc_page(offset));
if (m != VM_PAGE_NULL && m->vmp_cleaning) {
PAGE_ASSERT_WAIT(m, interruptible);
#if TRACEFAULTPAGE
dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
#endif
- XPR(XPR_VM_FAULT,
- "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n",
- object, offset, m, 0, 0);
assert(!m->vmp_busy);
assert(!m->vmp_absent);
}
if (fault_info && fault_info->batch_pmap_op == TRUE) {
- vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
+ vm_page_insert_internal(m, object,
+ vm_object_trunc_page(offset),
+ VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
} else {
- vm_page_insert(m, object, offset);
+ vm_page_insert(m, object, vm_object_trunc_page(offset));
}
}
if (look_for_page) {
VM_PAGE_FREE(m);
}
- XPR(XPR_VM_FAULT,
- "vm_f_page: ready wait obj 0x%X, offset 0x%X\n",
- object, offset, 0, 0, 0);
-
/*
* take an extra ref so object won't die
*/
m->vmp_absent = TRUE;
if (fault_info && fault_info->batch_pmap_op == TRUE) {
- vm_page_insert_internal(m, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
+ vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
} else {
- vm_page_insert(m, object, offset);
+ vm_page_insert(m, object, vm_object_trunc_page(offset));
}
}
assert(m->vmp_busy);
wants_copy_flag = VM_PROT_NONE;
}
- XPR(XPR_VM_FAULT,
- "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n",
- object, offset, m,
- access_required | wants_copy_flag, 0);
-
if (object->copy == first_object) {
/*
* if we issue the memory_object_data_request in
*/
rc = memory_object_data_request(
pager,
- offset + object->paging_offset,
+ vm_object_trunc_page(offset) + object->paging_offset,
PAGE_SIZE,
access_required | wants_copy_flag,
(memory_object_fault_info_t)fault_info);
assert(m == VM_PAGE_NULL);
}
- XPR(XPR_VM_FAULT,
- "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n",
- object, offset, m,
- object->shadow, 0);
-
next_object = object->shadow;
if (next_object == VM_OBJECT_NULL) {
return VM_FAULT_MEMORY_SHORTAGE;
}
- vm_page_insert(m, object, offset);
+ vm_page_insert(m, object, vm_object_trunc_page(offset));
}
if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
m->vmp_absent = TRUE;
!first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
#endif /* EXTRA_ASSERTIONS */
- XPR(XPR_VM_FAULT,
- "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n",
- object, offset, m,
- first_object, first_m);
-
/*
* If the page is being written, but isn't
* already owned by the top-level object,
return VM_FAULT_MEMORY_SHORTAGE;
}
- XPR(XPR_VM_FAULT,
- "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n",
- object, offset,
- m, copy_m, 0);
vm_page_copy(m, copy_m);
* page we just copied into
*/
assert(copy_m->vmp_busy);
- vm_page_insert(copy_m, object, offset);
+ vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
SET_PAGE_DIRTY(copy_m, TRUE);
m = copy_m;
* paging_in_progress to do that...
*/
vm_object_paging_end(object);
- vm_object_collapse(object, offset, TRUE);
+ vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
vm_object_paging_begin(object);
} else {
*protection &= (~VM_PROT_WRITE);
* Does the page exist in the copy?
*/
copy_offset = first_offset - copy_object->vo_shadow_offset;
+ copy_offset = vm_object_trunc_page(copy_offset);
if (copy_object->vo_size <= copy_offset) {
/*
*result_page = m;
*top_page = first_m;
- XPR(XPR_VM_FAULT,
- "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
- object, offset, m, first_m, 0);
-
if (m != VM_PAGE_NULL) {
assert(VM_PAGE_OBJECT(m) == object);
vm_fault_is_sequential(object, offset, fault_info->behavior);
vm_fault_deactivate_behind(object, offset, fault_info->behavior);
} else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
- VM_STAT_INCR(decompressions);
+ VM_STAT_DECOMPRESSIONS();
}
if (type_of_fault) {
*type_of_fault = my_fault;
}
+extern int panic_on_cs_killed;
+extern int proc_selfpid(void);
+extern char *proc_name_address(void *p);
+unsigned long cs_enter_tainted_rejected = 0;
+unsigned long cs_enter_tainted_accepted = 0;
/*
* CODE SIGNING:
* 3. the page belongs to a code-signed object
* 4. the page has not been validated yet or has been mapped for write.
*/
-#define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj) \
- ((pmap) != kernel_pmap /*1*/ && \
- !(page)->vmp_cs_tainted /*2*/ && \
- (page_obj)->code_signed /*3*/ && \
- (!(page)->vmp_cs_validated || (page)->vmp_wpmapped /*4*/ ))
-
-
-/*
- * page queue lock must NOT be held
- * m->vmp_object must be locked
- *
- * NOTE: m->vmp_object could be locked "shared" only if we are called
- * from vm_fault() as part of a soft fault. If so, we must be
- * careful not to modify the VM object in any way that is not
- * legal under a shared lock...
- */
-extern int panic_on_cs_killed;
-extern int proc_selfpid(void);
-extern char *proc_name_address(void *p);
-unsigned long cs_enter_tainted_rejected = 0;
-unsigned long cs_enter_tainted_accepted = 0;
-kern_return_t
-vm_fault_enter(vm_page_t m,
- pmap_t pmap,
- vm_map_offset_t vaddr,
- vm_prot_t prot,
- vm_prot_t caller_prot,
- boolean_t wired,
- boolean_t change_wiring,
- vm_tag_t wire_tag,
- vm_object_fault_info_t fault_info,
- boolean_t *need_retry,
- int *type_of_fault)
+static bool
+vm_fault_cs_need_validation(
+ pmap_t pmap,
+ vm_page_t page,
+ vm_object_t page_obj,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset)
{
- kern_return_t kr, pe_result;
- boolean_t previously_pmapped = m->vmp_pmapped;
- boolean_t must_disconnect = 0;
- boolean_t map_is_switched, map_is_switch_protected;
- boolean_t cs_violation;
- int cs_enforcement_enabled;
- vm_prot_t fault_type;
- vm_object_t object;
- boolean_t no_cache = fault_info->no_cache;
- boolean_t cs_bypass = fault_info->cs_bypass;
- int pmap_options = fault_info->pmap_options;
-
- fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
- object = VM_PAGE_OBJECT(m);
-
- vm_object_lock_assert_held(object);
-
-#if KASAN
if (pmap == kernel_pmap) {
- kasan_notify_address(vaddr, PAGE_SIZE);
+ /* 1 - not user space */
+ return false;
}
-#endif
-
- LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
-
- if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
- assert(m->vmp_fictitious);
- return KERN_SUCCESS;
+ if (!page_obj->code_signed) {
+ /* 3 - page does not belong to a code-signed object */
+ return false;
}
-
- if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
- vm_object_lock_assert_exclusive(object);
- } else if ((fault_type & VM_PROT_WRITE) == 0 &&
- (!m->vmp_wpmapped
-#if VM_OBJECT_ACCESS_TRACKING
- || object->access_tracking
-#endif /* VM_OBJECT_ACCESS_TRACKING */
- )) {
- /*
- * This is not a "write" fault, so we
- * might not have taken the object lock
- * exclusively and we might not be able
- * to update the "wpmapped" bit in
- * vm_fault_enter().
- * Let's just grant read access to
- * the page for now and we'll
- * soft-fault again if we need write
- * access later...
- */
-
- /* This had better not be a JIT page. */
- if (!pmap_has_prot_policy(prot)) {
- prot &= ~VM_PROT_WRITE;
- } else {
- assert(cs_bypass);
+ if (fault_page_size == PAGE_SIZE) {
+ /* looking at the whole page */
+ assertf(fault_phys_offset == 0,
+ "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
+ (uint64_t)fault_page_size,
+ (uint64_t)fault_phys_offset);
+ if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
+ /* 2 - page is all tainted */
+ return false;
}
- }
- if (m->vmp_pmapped == FALSE) {
- if (m->vmp_clustered) {
- if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
- /*
- * found it in the cache, but this
- * is the first fault-in of the page (m->vmp_pmapped == FALSE)
- * so it must have come in as part of
- * a cluster... account 1 pagein against it
- */
- if (object->internal) {
- *type_of_fault = DBG_PAGEIND_FAULT;
- } else {
- *type_of_fault = DBG_PAGEINV_FAULT;
- }
-
- VM_PAGE_COUNT_AS_PAGEIN(m);
- }
- VM_PAGE_CONSUME_CLUSTERED(m);
+ if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
+ !page->vmp_wpmapped) {
+ /* 4 - already fully validated and never mapped writable */
+ return false;
+ }
+ } else {
+ /* looking at a specific sub-page */
+ if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
+ /* 2 - sub-page was already marked as tainted */
+ return false;
+ }
+ if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
+ !page->vmp_wpmapped) {
+ /* 4 - already validated and never mapped writable */
+ return false;
}
}
+ /* page needs to be validated */
+ return true;
+}
- if (*type_of_fault != DBG_COW_FAULT) {
- DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
- if (pmap == kernel_pmap) {
- DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
- }
+static bool
+vm_fault_cs_page_immutable(
+ vm_page_t m,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_prot_t prot __unused)
+{
+ if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
+ /*&& ((prot) & VM_PROT_EXECUTE)*/) {
+ return true;
}
+ return false;
+}
- /* Validate code signature if necessary. */
+static bool
+vm_fault_cs_page_nx(
+ vm_page_t m,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset)
+{
+ return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
+}
+
+/*
+ * Check if the page being entered into the pmap violates code signing.
+ */
+static kern_return_t
+vm_fault_cs_check_violation(
+ bool cs_bypass,
+ vm_object_t object,
+ vm_page_t m,
+ pmap_t pmap,
+ vm_prot_t prot,
+ vm_prot_t caller_prot,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_object_fault_info_t fault_info,
+ bool map_is_switched,
+ bool map_is_switch_protected,
+ bool *cs_violation)
+{
+#if !PMAP_CS
+#pragma unused(caller_prot)
+#pragma unused(fault_info)
+#endif /* !PMAP_CS */
+ int cs_enforcement_enabled;
if (!cs_bypass &&
- VM_FAULT_NEED_CS_VALIDATION(pmap, m, object)) {
+ vm_fault_cs_need_validation(pmap, m, object,
+ fault_page_size, fault_phys_offset)) {
vm_object_lock_assert_exclusive(object);
- if (m->vmp_cs_validated) {
+ if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
vm_cs_revalidates++;
}
/* VM map is locked, so 1 ref will remain on VM object -
* so no harm if vm_page_validate_cs drops the object lock */
-#if PMAP_CS
- if (fault_info->pmap_cs_associated &&
- pmap_cs_enforced(pmap) &&
- !m->vmp_cs_validated &&
- !m->vmp_cs_tainted &&
- !m->vmp_cs_nx &&
- (prot & VM_PROT_EXECUTE) &&
- (caller_prot & VM_PROT_EXECUTE)) {
- /*
- * With pmap_cs, the pmap layer will validate the
- * code signature for any executable pmap mapping.
- * No need for us to validate this page too:
- * in pmap_cs we trust...
- */
- vm_cs_defer_to_pmap_cs++;
- } else {
- vm_cs_defer_to_pmap_cs_not++;
- vm_page_validate_cs(m);
- }
-#else /* PMAP_CS */
- vm_page_validate_cs(m);
-#endif /* PMAP_CS */
+ vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
}
-#define page_immutable(m, prot) ((m)->vmp_cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/ )
-#define page_nx(m) ((m)->vmp_cs_nx)
-
- map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
- (pmap == vm_map_pmap(current_thread()->map)));
- map_is_switch_protected = current_thread()->map->switch_protect;
-
/* If the map is switched, and is switch-protected, we must protect
* some pages from being write-faulted: immutable pages because by
* definition they may not be written, and executable pages because that
* from the current map. We do that below right before we do the
* PMAP_ENTER.
*/
- cs_enforcement_enabled = cs_process_enforcement(NULL);
+ if (pmap == kernel_pmap) {
+ /* kernel fault: cs_enforcement does not apply */
+ cs_enforcement_enabled = 0;
+ } else {
+ cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
+ }
if (cs_enforcement_enabled && map_is_switched &&
- map_is_switch_protected && page_immutable(m, prot) &&
+ map_is_switch_protected &&
+ vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
(prot & VM_PROT_WRITE)) {
return KERN_CODESIGN_ERROR;
}
- if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) {
+ if (cs_enforcement_enabled &&
+ vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
+ (prot & VM_PROT_EXECUTE)) {
if (cs_debug) {
printf("page marked to be NX, not letting it be mapped EXEC\n");
}
*/
if (cs_bypass) {
/* code-signing is bypassed */
- cs_violation = FALSE;
- } else if (m->vmp_cs_tainted) {
+ *cs_violation = FALSE;
+ } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
/* tainted page */
- cs_violation = TRUE;
+ *cs_violation = TRUE;
} else if (!cs_enforcement_enabled) {
/* no further code-signing enforcement */
- cs_violation = FALSE;
- } else if (page_immutable(m, prot) &&
+ *cs_violation = FALSE;
+ } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
((prot & VM_PROT_WRITE) ||
m->vmp_wpmapped)) {
/*
* another map later, we will disconnect it from this pmap so
* we'll notice the change.
*/
- cs_violation = TRUE;
- } else if (!m->vmp_cs_validated &&
+ *cs_violation = TRUE;
+ } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
(prot & VM_PROT_EXECUTE)
-#if PMAP_CS
- /*
- * Executable pages will be validated by pmap_cs;
- * in pmap_cs we trust...
- * If pmap_cs is turned off, this is a code-signing
- * violation.
- */
- && !(pmap_cs_enforced(pmap))
-#endif /* PMAP_CS */
) {
- cs_violation = TRUE;
+ *cs_violation = TRUE;
} else {
- cs_violation = FALSE;
+ *cs_violation = FALSE;
}
+ return KERN_SUCCESS;
+}
- if (cs_violation) {
- /* We will have a tainted page. Have to handle the special case
- * of a switched map now. If the map is not switched, standard
- * procedure applies - call cs_invalid_page().
- * If the map is switched, the real owner is invalid already.
- * There is no point in invalidating the switching process since
- * it will not be executing from the map. So we don't call
- * cs_invalid_page() in that case. */
- boolean_t reject_page, cs_killed;
- if (map_is_switched) {
- assert(pmap == vm_map_pmap(current_thread()->map));
- assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
- reject_page = FALSE;
- } else {
- if (cs_debug > 5) {
- printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
- object->code_signed ? "yes" : "no",
- m->vmp_cs_validated ? "yes" : "no",
- m->vmp_cs_tainted ? "yes" : "no",
- m->vmp_wpmapped ? "yes" : "no",
- (int)prot);
- }
- reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
+/*
+ * Handles a code signing violation by either rejecting the page or forcing a disconnect.
+ * @param must_disconnect This value will be set to true if the caller must disconnect
+ * this page.
+ * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
+ */
+static kern_return_t
+vm_fault_cs_handle_violation(
+ vm_object_t object,
+ vm_page_t m,
+ pmap_t pmap,
+ vm_prot_t prot,
+ vm_map_offset_t vaddr,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ bool map_is_switched,
+ bool map_is_switch_protected,
+ bool *must_disconnect)
+{
+#if !MACH_ASSERT
+#pragma unused(pmap)
+#pragma unused(map_is_switch_protected)
+#endif /* !MACH_ASSERT */
+ /*
+ * We will have a tainted page. Have to handle the special case
+ * of a switched map now. If the map is not switched, standard
+ * procedure applies - call cs_invalid_page().
+ * If the map is switched, the real owner is invalid already.
+ * There is no point in invalidating the switching process since
+ * it will not be executing from the map. So we don't call
+ * cs_invalid_page() in that case.
+ */
+ boolean_t reject_page, cs_killed;
+ kern_return_t kr;
+ if (map_is_switched) {
+ assert(pmap == vm_map_pmap(current_thread()->map));
+ assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
+ reject_page = FALSE;
+ } else {
+ if (cs_debug > 5) {
+ printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
+ object->code_signed ? "yes" : "no",
+ VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
+ VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
+ m->vmp_wpmapped ? "yes" : "no",
+ (int)prot);
}
+ reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
+ }
- if (reject_page) {
- /* reject the invalid page: abort the page fault */
- int pid;
- const char *procname;
- task_t task;
- vm_object_t file_object, shadow;
- vm_object_offset_t file_offset;
- char *pathname, *filename;
- vm_size_t pathname_len, filename_len;
- boolean_t truncated_path;
+ if (reject_page) {
+ /* reject the invalid page: abort the page fault */
+ int pid;
+ const char *procname;
+ task_t task;
+ vm_object_t file_object, shadow;
+ vm_object_offset_t file_offset;
+ char *pathname, *filename;
+ vm_size_t pathname_len, filename_len;
+ boolean_t truncated_path;
#define __PATH_MAX 1024
- struct timespec mtime, cs_mtime;
- int shadow_depth;
- os_reason_t codesigning_exit_reason = OS_REASON_NULL;
-
- kr = KERN_CODESIGN_ERROR;
- cs_enter_tainted_rejected++;
-
- /* get process name and pid */
- procname = "?";
- task = current_task();
- pid = proc_selfpid();
- if (task->bsd_info != NULL) {
- procname = proc_name_address(task->bsd_info);
- }
-
- /* get file's VM object */
- file_object = object;
- file_offset = m->vmp_offset;
- for (shadow = file_object->shadow,
- shadow_depth = 0;
- shadow != VM_OBJECT_NULL;
- shadow = file_object->shadow,
- shadow_depth++) {
- vm_object_lock_shared(shadow);
- if (file_object != object) {
- vm_object_unlock(file_object);
- }
- file_offset += file_object->vo_shadow_offset;
- file_object = shadow;
- }
+ struct timespec mtime, cs_mtime;
+ int shadow_depth;
+ os_reason_t codesigning_exit_reason = OS_REASON_NULL;
+
+ kr = KERN_CODESIGN_ERROR;
+ cs_enter_tainted_rejected++;
+
+ /* get process name and pid */
+ procname = "?";
+ task = current_task();
+ pid = proc_selfpid();
+ if (task->bsd_info != NULL) {
+ procname = proc_name_address(task->bsd_info);
+ }
- mtime.tv_sec = 0;
- mtime.tv_nsec = 0;
- cs_mtime.tv_sec = 0;
- cs_mtime.tv_nsec = 0;
+ /* get file's VM object */
+ file_object = object;
+ file_offset = m->vmp_offset;
+ for (shadow = file_object->shadow,
+ shadow_depth = 0;
+ shadow != VM_OBJECT_NULL;
+ shadow = file_object->shadow,
+ shadow_depth++) {
+ vm_object_lock_shared(shadow);
+ if (file_object != object) {
+ vm_object_unlock(file_object);
+ }
+ file_offset += file_object->vo_shadow_offset;
+ file_object = shadow;
+ }
- /* get file's pathname and/or filename */
- pathname = NULL;
- filename = NULL;
- pathname_len = 0;
- filename_len = 0;
- truncated_path = FALSE;
- /* no pager -> no file -> no pathname, use "<nil>" in that case */
- if (file_object->pager != NULL) {
- pathname = (char *)kalloc(__PATH_MAX * 2);
- if (pathname) {
- pathname[0] = '\0';
- pathname_len = __PATH_MAX;
- filename = pathname + pathname_len;
- filename_len = __PATH_MAX;
- }
- vnode_pager_get_object_name(file_object->pager,
+ mtime.tv_sec = 0;
+ mtime.tv_nsec = 0;
+ cs_mtime.tv_sec = 0;
+ cs_mtime.tv_nsec = 0;
+
+ /* get file's pathname and/or filename */
+ pathname = NULL;
+ filename = NULL;
+ pathname_len = 0;
+ filename_len = 0;
+ truncated_path = FALSE;
+ /* no pager -> no file -> no pathname, use "<nil>" in that case */
+ if (file_object->pager != NULL) {
+ pathname = kheap_alloc(KHEAP_TEMP, __PATH_MAX * 2, Z_WAITOK);
+ if (pathname) {
+ pathname[0] = '\0';
+ pathname_len = __PATH_MAX;
+ filename = pathname + pathname_len;
+ filename_len = __PATH_MAX;
+
+ if (vnode_pager_get_object_name(file_object->pager,
pathname,
pathname_len,
filename,
filename_len,
- &truncated_path);
- if (pathname) {
+ &truncated_path) == KERN_SUCCESS) {
/* safety first... */
pathname[__PATH_MAX - 1] = '\0';
filename[__PATH_MAX - 1] = '\0';
+
+ vnode_pager_get_object_mtime(file_object->pager,
+ &mtime,
+ &cs_mtime);
+ } else {
+ kheap_free(KHEAP_TEMP, pathname, __PATH_MAX * 2);
+ pathname = NULL;
+ filename = NULL;
+ pathname_len = 0;
+ filename_len = 0;
+ truncated_path = FALSE;
+ }
+ }
+ }
+ printf("CODE SIGNING: process %d[%s]: "
+ "rejecting invalid page at address 0x%llx "
+ "from offset 0x%llx in file \"%s%s%s\" "
+ "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+ "(signed:%d validated:%d tainted:%d nx:%d "
+ "wpmapped:%d dirty:%d depth:%d)\n",
+ pid, procname, (addr64_t) vaddr,
+ file_offset,
+ (pathname ? pathname : "<nil>"),
+ (truncated_path ? "/.../" : ""),
+ (truncated_path ? filename : ""),
+ cs_mtime.tv_sec, cs_mtime.tv_nsec,
+ ((cs_mtime.tv_sec == mtime.tv_sec &&
+ cs_mtime.tv_nsec == mtime.tv_nsec)
+ ? "=="
+ : "!="),
+ mtime.tv_sec, mtime.tv_nsec,
+ object->code_signed,
+ VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
+ VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
+ VMP_CS_NX(m, fault_page_size, fault_phys_offset),
+ m->vmp_wpmapped,
+ m->vmp_dirty,
+ shadow_depth);
+
+ /*
+ * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
+ * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
+ * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
+ * will deal with the segmentation fault.
+ */
+ if (cs_killed) {
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
+ pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0);
+
+ codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
+ if (codesigning_exit_reason == NULL) {
+ printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
+ } else {
+ mach_vm_address_t data_addr = 0;
+ struct codesigning_exit_reason_info *ceri = NULL;
+ uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
+
+ if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
+ printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
+ } else {
+ if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
+ EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
+ ceri = (struct codesigning_exit_reason_info *)data_addr;
+ static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
+
+ ceri->ceri_virt_addr = vaddr;
+ ceri->ceri_file_offset = file_offset;
+ if (pathname) {
+ strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
+ } else {
+ ceri->ceri_pathname[0] = '\0';
+ }
+ if (filename) {
+ strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
+ } else {
+ ceri->ceri_filename[0] = '\0';
+ }
+ ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
+ ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
+ ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
+ ceri->ceri_page_modtime_secs = mtime.tv_sec;
+ ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
+ ceri->ceri_object_codesigned = (object->code_signed);
+ ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
+ ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
+ ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
+ ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
+ ceri->ceri_page_slid = 0;
+ ceri->ceri_page_dirty = (m->vmp_dirty);
+ ceri->ceri_page_shadow_depth = shadow_depth;
+ } else {
+#if DEBUG || DEVELOPMENT
+ panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
+#else
+ printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
+#endif /* DEBUG || DEVELOPMENT */
+ /* Free the buffer */
+ os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
+ }
}
- vnode_pager_get_object_mtime(file_object->pager,
- &mtime,
- &cs_mtime);
}
- printf("CODE SIGNING: process %d[%s]: "
- "rejecting invalid page at address 0x%llx "
+
+ set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
+ }
+ if (panic_on_cs_killed &&
+ object->object_is_shared_cache) {
+ char *tainted_contents;
+ vm_map_offset_t src_vaddr;
+ src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
+ tainted_contents = kalloc(PAGE_SIZE);
+ bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
+ printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
+ panic("CODE SIGNING: process %d[%s]: "
+ "rejecting invalid page (phys#0x%x) at address 0x%llx "
"from offset 0x%llx in file \"%s%s%s\" "
"(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
- "(signed:%d validated:%d tainted:%d nx:%d "
+ "(signed:%d validated:%d tainted:%d nx:%d"
"wpmapped:%d dirty:%d depth:%d)\n",
- pid, procname, (addr64_t) vaddr,
+ pid, procname,
+ VM_PAGE_GET_PHYS_PAGE(m),
+ (addr64_t) vaddr,
file_offset,
(pathname ? pathname : "<nil>"),
(truncated_path ? "/.../" : ""),
: "!="),
mtime.tv_sec, mtime.tv_nsec,
object->code_signed,
- m->vmp_cs_validated,
- m->vmp_cs_tainted,
- m->vmp_cs_nx,
+ VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
+ VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
+ VMP_CS_NX(m, fault_page_size, fault_phys_offset),
m->vmp_wpmapped,
m->vmp_dirty,
shadow_depth);
+ }
+ if (file_object != object) {
+ vm_object_unlock(file_object);
+ }
+ if (pathname_len != 0) {
+ kheap_free(KHEAP_TEMP, pathname, __PATH_MAX * 2);
+ pathname = NULL;
+ filename = NULL;
+ }
+ } else {
+ /* proceed with the invalid page */
+ kr = KERN_SUCCESS;
+ if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
+ !object->code_signed) {
/*
- * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
- * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
- * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
- * will deal with the segmentation fault.
+ * This page has not been (fully) validated but
+ * does not belong to a code-signed object
+ * so it should not be forcefully considered
+ * as tainted.
+ * We're just concerned about it here because
+ * we've been asked to "execute" it but that
+ * does not mean that it should cause other
+ * accesses to fail.
+ * This happens when a debugger sets a
+ * breakpoint and we then execute code in
+ * that page. Marking the page as "tainted"
+ * would cause any inspection tool ("leaks",
+ * "vmmap", "CrashReporter", ...) to get killed
+ * due to code-signing violation on that page,
+ * even though they're just reading it and not
+ * executing from it.
*/
- if (cs_killed) {
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
- pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0);
-
- codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
- if (codesigning_exit_reason == NULL) {
- printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
- } else {
- mach_vm_address_t data_addr = 0;
- struct codesigning_exit_reason_info *ceri = NULL;
- uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
-
- if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
- printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
- } else {
- if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
- EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
- ceri = (struct codesigning_exit_reason_info *)data_addr;
- static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
-
- ceri->ceri_virt_addr = vaddr;
- ceri->ceri_file_offset = file_offset;
- if (pathname) {
- strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
- } else {
- ceri->ceri_pathname[0] = '\0';
- }
- if (filename) {
- strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
- } else {
- ceri->ceri_filename[0] = '\0';
- }
- ceri->ceri_path_truncated = (truncated_path);
- ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
- ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
- ceri->ceri_page_modtime_secs = mtime.tv_sec;
- ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
- ceri->ceri_object_codesigned = (object->code_signed);
- ceri->ceri_page_codesig_validated = (m->vmp_cs_validated);
- ceri->ceri_page_codesig_tainted = (m->vmp_cs_tainted);
- ceri->ceri_page_codesig_nx = (m->vmp_cs_nx);
- ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
- ceri->ceri_page_slid = 0;
- ceri->ceri_page_dirty = (m->vmp_dirty);
- ceri->ceri_page_shadow_depth = shadow_depth;
- } else {
-#if DEBUG || DEVELOPMENT
- panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
-#else
- printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
-#endif /* DEBUG || DEVELOPMENT */
- /* Free the buffer */
- os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
- }
- }
- }
-
- set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
- }
- if (panic_on_cs_killed &&
- object->object_is_shared_cache) {
- panic("CODE SIGNING: process %d[%s]: "
- "rejecting invalid page at address 0x%llx "
- "from offset 0x%llx in file \"%s%s%s\" "
- "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
- "(signed:%d validated:%d tainted:%d nx:%d"
- "wpmapped:%d dirty:%d depth:%d)\n",
- pid, procname, (addr64_t) vaddr,
- file_offset,
- (pathname ? pathname : "<nil>"),
- (truncated_path ? "/.../" : ""),
- (truncated_path ? filename : ""),
- cs_mtime.tv_sec, cs_mtime.tv_nsec,
- ((cs_mtime.tv_sec == mtime.tv_sec &&
- cs_mtime.tv_nsec == mtime.tv_nsec)
- ? "=="
- : "!="),
- mtime.tv_sec, mtime.tv_nsec,
- object->code_signed,
- m->vmp_cs_validated,
- m->vmp_cs_tainted,
- m->vmp_cs_nx,
- m->vmp_wpmapped,
- m->vmp_dirty,
- shadow_depth);
- }
-
- if (file_object != object) {
- vm_object_unlock(file_object);
- }
- if (pathname_len != 0) {
- kfree(pathname, __PATH_MAX * 2);
- pathname = NULL;
- filename = NULL;
- }
} else {
- /* proceed with the invalid page */
- kr = KERN_SUCCESS;
- if (!m->vmp_cs_validated &&
- !object->code_signed) {
- /*
- * This page has not been (fully) validated but
- * does not belong to a code-signed object
- * so it should not be forcefully considered
- * as tainted.
- * We're just concerned about it here because
- * we've been asked to "execute" it but that
- * does not mean that it should cause other
- * accesses to fail.
- * This happens when a debugger sets a
- * breakpoint and we then execute code in
- * that page. Marking the page as "tainted"
- * would cause any inspection tool ("leaks",
- * "vmmap", "CrashReporter", ...) to get killed
- * due to code-signing violation on that page,
- * even though they're just reading it and not
- * executing from it.
- */
- } else {
- /*
- * Page might have been tainted before or not;
- * now it definitively is. If the page wasn't
- * tainted, we must disconnect it from all
- * pmaps later, to force existing mappings
- * through that code path for re-consideration
- * of the validity of that page.
- */
- must_disconnect = !m->vmp_cs_tainted;
- m->vmp_cs_tainted = TRUE;
+ /*
+ * Page might have been tainted before or not;
+ * now it definitively is. If the page wasn't
+ * tainted, we must disconnect it from all
+ * pmaps later, to force existing mappings
+ * through that code path for re-consideration
+ * of the validity of that page.
+ */
+ if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
+ *must_disconnect = TRUE;
+ VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
}
- cs_enter_tainted_accepted++;
}
- if (kr != KERN_SUCCESS) {
- if (cs_debug) {
- printf("CODESIGNING: vm_fault_enter(0x%llx): "
- "*** INVALID PAGE ***\n",
- (long long)vaddr);
- }
+ cs_enter_tainted_accepted++;
+ }
+ if (kr != KERN_SUCCESS) {
+ if (cs_debug) {
+ printf("CODESIGNING: vm_fault_enter(0x%llx): "
+ "*** INVALID PAGE ***\n",
+ (long long)vaddr);
+ }
#if !SECURE_KERNEL
- if (cs_enforcement_panic) {
- panic("CODESIGNING: panicking on invalid page\n");
- }
-#endif
+ if (cs_enforcement_panic) {
+ panic("CODESIGNING: panicking on invalid page\n");
}
- } else {
- /* proceed with the valid page */
- kr = KERN_SUCCESS;
+#endif
}
+ return kr;
+}
+/*
+ * Check that the code signature is valid for the given page being inserted into
+ * the pmap.
+ *
+ * @param must_disconnect This value will be set to true if the caller must disconnect
+ * this page.
+ * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
+ */
+static kern_return_t
+vm_fault_validate_cs(
+ bool cs_bypass,
+ vm_object_t object,
+ vm_page_t m,
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_prot_t prot,
+ vm_prot_t caller_prot,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_object_fault_info_t fault_info,
+ bool *must_disconnect)
+{
+ bool map_is_switched, map_is_switch_protected, cs_violation;
+ kern_return_t kr;
+ /* Validate code signature if necessary. */
+ map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
+ (pmap == vm_map_pmap(current_thread()->map)));
+ map_is_switch_protected = current_thread()->map->switch_protect;
+ kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
+ prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
+ map_is_switched, map_is_switch_protected, &cs_violation);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+ if (cs_violation) {
+ kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
+ fault_page_size, fault_phys_offset,
+ map_is_switched, map_is_switch_protected, must_disconnect);
+ }
+ return kr;
+}
+
+/*
+ * Enqueue the page on the appropriate paging queue.
+ */
+static void
+vm_fault_enqueue_page(
+ vm_object_t object,
+ vm_page_t m,
+ bool wired,
+ bool change_wiring,
+ vm_tag_t wire_tag,
+ bool no_cache,
+ int *type_of_fault,
+ kern_return_t kr)
+{
+ assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
boolean_t page_queues_locked = FALSE;
+ boolean_t previously_pmapped = m->vmp_pmapped;
#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
MACRO_BEGIN \
if (! page_queues_locked) { \
} \
MACRO_END
- /*
- * Hold queues lock to manipulate
- * the page queues. Change wiring
- * case is obvious.
- */
- assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
-
#if CONFIG_BACKGROUND_QUEUE
vm_page_update_background_state(m);
#endif
*/
lid = cpu_number();
- lq = &vm_page_local_q[lid].vpl_un.vpl;
+ lq = zpercpu_get_cpu(vm_page_local_q, lid);
VPL_LOCK(&lq->vpl_lock);
}
/* we're done with the page queues lock, if we ever took it */
__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+}
-
- /* If we have a KERN_SUCCESS from the previous checks, we either have
- * a good page, or a tainted page that has been accepted by the process.
- * In both cases the page will be entered into the pmap.
- * If the page is writeable, we need to disconnect it from other pmaps
- * now so those processes can take note.
+/*
+ * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
+ * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
+ * before being inserted into the pmap.
+ */
+static bool
+vm_fault_enter_set_mapped(
+ vm_object_t object,
+ vm_page_t m,
+ vm_prot_t prot,
+ vm_prot_t fault_type)
+{
+ bool page_needs_sync = false;
+ /*
+ * NOTE: we may only hold the vm_object lock SHARED
+ * at this point, so we need the phys_page lock to
+ * properly serialize updating the pmapped and
+ * xpmapped bits
*/
- if (kr == KERN_SUCCESS) {
- /*
- * NOTE: we may only hold the vm_object lock SHARED
- * at this point, so we need the phys_page lock to
- * properly serialize updating the pmapped and
- * xpmapped bits
- */
- if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
- ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+ if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
+ ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
- pmap_lock_phys_page(phys_page);
- /*
- * go ahead and take the opportunity
- * to set 'pmapped' here so that we don't
- * need to grab this lock a 2nd time
- * just below
- */
- m->vmp_pmapped = TRUE;
+ pmap_lock_phys_page(phys_page);
+ m->vmp_pmapped = TRUE;
- if (!m->vmp_xpmapped) {
- m->vmp_xpmapped = TRUE;
+ if (!m->vmp_xpmapped) {
+ m->vmp_xpmapped = TRUE;
- pmap_unlock_phys_page(phys_page);
+ pmap_unlock_phys_page(phys_page);
- if (!object->internal) {
- OSAddAtomic(1, &vm_page_xpmapped_external_count);
- }
+ if (!object->internal) {
+ OSAddAtomic(1, &vm_page_xpmapped_external_count);
+ }
#if defined(__arm__) || defined(__arm64__)
- pmap_sync_page_data_phys(phys_page);
+ page_needs_sync = true;
#else
- if (object->internal &&
- object->pager != NULL) {
- /*
- * This page could have been
- * uncompressed by the
- * compressor pager and its
- * contents might be only in
- * the data cache.
- * Since it's being mapped for
- * "execute" for the fist time,
- * make sure the icache is in
- * sync.
- */
- assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
- pmap_sync_page_data_phys(phys_page);
- }
-#endif
- } else {
- pmap_unlock_phys_page(phys_page);
+ if (object->internal &&
+ object->pager != NULL) {
+ /*
+ * This page could have been
+ * uncompressed by the
+ * compressor pager and its
+ * contents might be only in
+ * the data cache.
+ * Since it's being mapped for
+ * "execute" for the fist time,
+ * make sure the icache is in
+ * sync.
+ */
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+ page_needs_sync = true;
}
+#endif
} else {
- if (m->vmp_pmapped == FALSE) {
- ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+ pmap_unlock_phys_page(phys_page);
+ }
+ } else {
+ if (m->vmp_pmapped == FALSE) {
+ ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
- pmap_lock_phys_page(phys_page);
- m->vmp_pmapped = TRUE;
- pmap_unlock_phys_page(phys_page);
- }
+ pmap_lock_phys_page(phys_page);
+ m->vmp_pmapped = TRUE;
+ pmap_unlock_phys_page(phys_page);
}
+ }
- if (fault_type & VM_PROT_WRITE) {
- if (m->vmp_wpmapped == FALSE) {
- vm_object_lock_assert_exclusive(object);
- if (!object->internal && object->pager) {
- task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
- }
- m->vmp_wpmapped = TRUE;
+ if (fault_type & VM_PROT_WRITE) {
+ if (m->vmp_wpmapped == FALSE) {
+ vm_object_lock_assert_exclusive(object);
+ if (!object->internal && object->pager) {
+ task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
}
- if (must_disconnect) {
- /*
- * We can only get here
- * because of the CSE logic
- */
- assert(cs_enforcement_enabled);
- pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ m->vmp_wpmapped = TRUE;
+ }
+ }
+ return page_needs_sync;
+}
+
+/*
+ * Try to enter the given page into the pmap.
+ * Will retry without execute permission iff PMAP_CS is enabled and we encounter
+ * a codesigning failure on a non-execute fault.
+ */
+static kern_return_t
+vm_fault_attempt_pmap_enter(
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_page_t m,
+ vm_prot_t *prot,
+ vm_prot_t caller_prot,
+ vm_prot_t fault_type,
+ bool wired,
+ int pmap_options)
+{
+#if !PMAP_CS
+#pragma unused(caller_prot)
+#endif /* !PMAP_CS */
+ kern_return_t kr;
+ if (fault_page_size != PAGE_SIZE) {
+ DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
+ assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
+ fault_phys_offset < PAGE_SIZE),
+ "0x%llx\n", (uint64_t)fault_phys_offset);
+ } else {
+ assertf(fault_phys_offset == 0,
+ "0x%llx\n", (uint64_t)fault_phys_offset);
+ }
+
+ PMAP_ENTER_OPTIONS(pmap, vaddr,
+ fault_phys_offset,
+ m, *prot, fault_type, 0,
+ wired,
+ pmap_options,
+ kr);
+ return kr;
+}
+
+/*
+ * Enter the given page into the pmap.
+ * The map must be locked shared.
+ * The vm object must NOT be locked.
+ *
+ * @param need_retry if not null, avoid making a (potentially) blocking call into
+ * the pmap layer. When such a call would be necessary, return true in this boolean instead.
+ */
+static kern_return_t
+vm_fault_pmap_enter(
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_page_t m,
+ vm_prot_t *prot,
+ vm_prot_t caller_prot,
+ vm_prot_t fault_type,
+ bool wired,
+ int pmap_options,
+ boolean_t *need_retry)
+{
+ kern_return_t kr;
+ if (need_retry != NULL) {
+ /*
+ * Although we don't hold a lock on this object, we hold a lock
+ * on the top object in the chain. To prevent a deadlock, we
+ * can't allow the pmap layer to block.
+ */
+ pmap_options |= PMAP_OPTIONS_NOWAIT;
+ }
+ kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
+ fault_page_size, fault_phys_offset,
+ m, prot, caller_prot, fault_type, wired, pmap_options);
+ if (kr == KERN_RESOURCE_SHORTAGE) {
+ if (need_retry) {
+ /*
+ * There's nothing we can do here since we hold the
+ * lock on the top object in the chain. The caller
+ * will need to deal with this by dropping that lock and retrying.
+ */
+ *need_retry = TRUE;
+ vm_pmap_enter_retried++;
+ }
+ }
+ return kr;
+}
+
+/*
+ * Enter the given page into the pmap.
+ * The vm map must be locked shared.
+ * The vm object must be locked exclusive, unless this is a soft fault.
+ * For a soft fault, the object must be locked shared or exclusive.
+ *
+ * @param need_retry if not null, avoid making a (potentially) blocking call into
+ * the pmap layer. When such a call would be necessary, return true in this boolean instead.
+ */
+static kern_return_t
+vm_fault_pmap_enter_with_object_lock(
+ vm_object_t object,
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_page_t m,
+ vm_prot_t *prot,
+ vm_prot_t caller_prot,
+ vm_prot_t fault_type,
+ bool wired,
+ int pmap_options,
+ boolean_t *need_retry)
+{
+ kern_return_t kr;
+ /*
+ * Prevent a deadlock by not
+ * holding the object lock if we need to wait for a page in
+ * pmap_enter() - <rdar://problem/7138958>
+ */
+ kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
+ fault_page_size, fault_phys_offset,
+ m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
+#if __x86_64__
+ if (kr == KERN_INVALID_ARGUMENT &&
+ pmap == PMAP_NULL &&
+ wired) {
+ /*
+ * Wiring a page in a pmap-less VM map:
+ * VMware's "vmmon" kernel extension does this
+ * to grab pages.
+ * Let it proceed even though the PMAP_ENTER() failed.
+ */
+ kr = KERN_SUCCESS;
+ }
+#endif /* __x86_64__ */
+
+ if (kr == KERN_RESOURCE_SHORTAGE) {
+ if (need_retry) {
+ /*
+ * this will be non-null in the case where we hold the lock
+ * on the top-object in this chain... we can't just drop
+ * the lock on the object we're inserting the page into
+ * and recall the PMAP_ENTER since we can still cause
+ * a deadlock if one of the critical paths tries to
+ * acquire the lock on the top-object and we're blocked
+ * in PMAP_ENTER waiting for memory... our only recourse
+ * is to deal with it at a higher level where we can
+ * drop both locks.
+ */
+ *need_retry = TRUE;
+ vm_pmap_enter_retried++;
+ goto done;
+ }
+ /*
+ * The nonblocking version of pmap_enter did not succeed.
+ * and we don't need to drop other locks and retry
+ * at the level above us, so
+ * use the blocking version instead. Requires marking
+ * the page busy and unlocking the object
+ */
+ boolean_t was_busy = m->vmp_busy;
+
+ vm_object_lock_assert_exclusive(object);
+
+ m->vmp_busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER_OPTIONS(pmap, vaddr,
+ fault_phys_offset,
+ m, *prot, fault_type,
+ 0, wired,
+ pmap_options, kr);
+
+ assert(VM_PAGE_OBJECT(m) == object);
+
+ /* Take the object lock again. */
+ vm_object_lock(object);
+
+ /* If the page was busy, someone else will wake it up.
+ * Otherwise, we have to do it now. */
+ assert(m->vmp_busy);
+ if (!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ vm_pmap_enter_blocked++;
+ }
+
+done:
+ return kr;
+}
+
+/*
+ * Prepare to enter a page into the pmap by checking CS, protection bits,
+ * and setting mapped bits on the page_t.
+ * Does not modify the page's paging queue.
+ *
+ * page queue lock must NOT be held
+ * m->vmp_object must be locked
+ *
+ * NOTE: m->vmp_object could be locked "shared" only if we are called
+ * from vm_fault() as part of a soft fault.
+ */
+static kern_return_t
+vm_fault_enter_prepare(
+ vm_page_t m,
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_prot_t *prot,
+ vm_prot_t caller_prot,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ boolean_t change_wiring,
+ vm_prot_t fault_type,
+ vm_object_fault_info_t fault_info,
+ int *type_of_fault,
+ bool *page_needs_data_sync)
+{
+ kern_return_t kr;
+ bool is_tainted = false;
+ vm_object_t object;
+ boolean_t cs_bypass = fault_info->cs_bypass;
+
+ object = VM_PAGE_OBJECT(m);
+
+ vm_object_lock_assert_held(object);
+
+#if KASAN
+ if (pmap == kernel_pmap) {
+ kasan_notify_address(vaddr, PAGE_SIZE);
+ }
+#endif
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+
+ if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
+ vm_object_lock_assert_exclusive(object);
+ } else if ((fault_type & VM_PROT_WRITE) == 0 &&
+ !change_wiring &&
+ (!m->vmp_wpmapped
+#if VM_OBJECT_ACCESS_TRACKING
+ || object->access_tracking
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+ )) {
+ /*
+ * This is not a "write" fault, so we
+ * might not have taken the object lock
+ * exclusively and we might not be able
+ * to update the "wpmapped" bit in
+ * vm_fault_enter().
+ * Let's just grant read access to
+ * the page for now and we'll
+ * soft-fault again if we need write
+ * access later...
+ */
+
+ /* This had better not be a JIT page. */
+ if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
+ *prot &= ~VM_PROT_WRITE;
+ } else {
+ assert(cs_bypass);
+ }
+ }
+ if (m->vmp_pmapped == FALSE) {
+ if (m->vmp_clustered) {
+ if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
/*
- * If we are faulting for a write, we can clear
- * the execute bit - that will ensure the page is
- * checked again before being executable, which
- * protects against a map switch.
- * This only happens the first time the page
- * gets tainted, so we won't get stuck here
- * to make an already writeable page executable.
+ * found it in the cache, but this
+ * is the first fault-in of the page (m->vmp_pmapped == FALSE)
+ * so it must have come in as part of
+ * a cluster... account 1 pagein against it
*/
- if (!cs_bypass) {
- assert(!pmap_has_prot_policy(prot));
- prot &= ~VM_PROT_EXECUTE;
+ if (object->internal) {
+ *type_of_fault = DBG_PAGEIND_FAULT;
+ } else {
+ *type_of_fault = DBG_PAGEINV_FAULT;
}
+
+ VM_PAGE_COUNT_AS_PAGEIN(m);
+ }
+ VM_PAGE_CONSUME_CLUSTERED(m);
+ }
+ }
+
+ if (*type_of_fault != DBG_COW_FAULT) {
+ DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
+
+ if (pmap == kernel_pmap) {
+ DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
+ }
+ }
+
+ kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
+ *prot, caller_prot, fault_page_size, fault_phys_offset,
+ fault_info, &is_tainted);
+ if (kr == KERN_SUCCESS) {
+ /*
+ * We either have a good page, or a tainted page that has been accepted by the process.
+ * In both cases the page will be entered into the pmap.
+ */
+ *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
+ if ((fault_type & VM_PROT_WRITE) && is_tainted) {
+ /*
+ * This page is tainted but we're inserting it anyways.
+ * Since it's writeable, we need to disconnect it from other pmaps
+ * now so those processes can take note.
+ */
+
+ /*
+ * We can only get here
+ * because of the CSE logic
+ */
+ assert(pmap_get_vm_map_cs_enforced(pmap));
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ /*
+ * If we are faulting for a write, we can clear
+ * the execute bit - that will ensure the page is
+ * checked again before being executable, which
+ * protects against a map switch.
+ * This only happens the first time the page
+ * gets tainted, so we won't get stuck here
+ * to make an already writeable page executable.
+ */
+ if (!cs_bypass) {
+ assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot));
+ *prot &= ~VM_PROT_EXECUTE;
}
}
assert(VM_PAGE_OBJECT(m) == object);
}
}
#endif /* VM_OBJECT_ACCESS_TRACKING */
+ }
-#if PMAP_CS
- /*
- * If CS enforcement is on, we don't ask for an executable page if the
- * fault does not call for execution, because that can fail in
- * situations where the caller only actually wanted read access.
- * However, it may be better to instead retry without execute on
- * failure, or pass a flag into pmap_enter to do the right thing.
- */
- // TODO: <rdar://problem/30997388> maybe do something better than masking out VM_PROT_EXECUTE on non-execute faults
- if (pmap_cs_enforced(pmap) && !(caller_prot & VM_PROT_EXECUTE)) {
- prot &= ~VM_PROT_EXECUTE;
- }
-#endif
-
- /* Prevent a deadlock by not
- * holding the object lock if we need to wait for a page in
- * pmap_enter() - <rdar://problem/7138958> */
- PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0,
- wired,
- pmap_options | PMAP_OPTIONS_NOWAIT,
- pe_result);
-#if __x86_64__
- if (pe_result == KERN_INVALID_ARGUMENT &&
- pmap == PMAP_NULL &&
- wired) {
- /*
- * Wiring a page in a pmap-less VM map:
- * VMware's "vmmon" kernel extension does this
- * to grab pages.
- * Let it proceed even though the PMAP_ENTER() failed.
- */
- pe_result = KERN_SUCCESS;
- }
-#endif /* __x86_64__ */
-
- if (pe_result == KERN_RESOURCE_SHORTAGE) {
- if (need_retry) {
- /*
- * this will be non-null in the case where we hold the lock
- * on the top-object in this chain... we can't just drop
- * the lock on the object we're inserting the page into
- * and recall the PMAP_ENTER since we can still cause
- * a deadlock if one of the critical paths tries to
- * acquire the lock on the top-object and we're blocked
- * in PMAP_ENTER waiting for memory... our only recourse
- * is to deal with it at a higher level where we can
- * drop both locks.
- */
- *need_retry = TRUE;
- vm_pmap_enter_retried++;
- goto after_the_pmap_enter;
- }
- /* The nonblocking version of pmap_enter did not succeed.
- * and we don't need to drop other locks and retry
- * at the level above us, so
- * use the blocking version instead. Requires marking
- * the page busy and unlocking the object */
- boolean_t was_busy = m->vmp_busy;
+ return kr;
+}
- vm_object_lock_assert_exclusive(object);
+/*
+ * page queue lock must NOT be held
+ * m->vmp_object must be locked
+ *
+ * NOTE: m->vmp_object could be locked "shared" only if we are called
+ * from vm_fault() as part of a soft fault. If so, we must be
+ * careful not to modify the VM object in any way that is not
+ * legal under a shared lock...
+ */
+kern_return_t
+vm_fault_enter(
+ vm_page_t m,
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
+ vm_prot_t prot,
+ vm_prot_t caller_prot,
+ boolean_t wired,
+ boolean_t change_wiring,
+ vm_tag_t wire_tag,
+ vm_object_fault_info_t fault_info,
+ boolean_t *need_retry,
+ int *type_of_fault)
+{
+ kern_return_t kr;
+ vm_object_t object;
+ bool page_needs_data_sync;
+ vm_prot_t fault_type;
+ int pmap_options = fault_info->pmap_options;
- m->vmp_busy = TRUE;
- vm_object_unlock(object);
+ if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
+ assert(m->vmp_fictitious);
+ return KERN_SUCCESS;
+ }
- PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type,
- 0, wired,
- pmap_options, pe_result);
+ fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
- assert(VM_PAGE_OBJECT(m) == object);
+ kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
+ fault_page_size, fault_phys_offset, change_wiring, fault_type,
+ fault_info, type_of_fault, &page_needs_data_sync);
+ object = VM_PAGE_OBJECT(m);
- /* Take the object lock again. */
- vm_object_lock(object);
+ vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
- /* If the page was busy, someone else will wake it up.
- * Otherwise, we have to do it now. */
- assert(m->vmp_busy);
- if (!was_busy) {
- PAGE_WAKEUP_DONE(m);
- }
- vm_pmap_enter_blocked++;
+ if (kr == KERN_SUCCESS) {
+ if (page_needs_data_sync) {
+ pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
}
- kr = pe_result;
+ kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
+ fault_page_size, fault_phys_offset, m,
+ &prot, caller_prot, fault_type, wired, pmap_options, need_retry);
}
-after_the_pmap_enter:
return kr;
}
void
-vm_pre_fault(vm_map_offset_t vaddr)
+vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
{
if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
vm_fault(current_map(), /* map */
vaddr, /* vaddr */
- VM_PROT_READ, /* fault_type */
+ prot, /* fault_type */
FALSE, /* change_wiring */
VM_KERN_MEMORY_NONE, /* tag - not wiring */
THREAD_UNINT, /* interruptible */
* and deallocated when leaving vm_fault.
*/
-extern int _map_enter_debug;
extern uint64_t get_current_unique_pid(void);
unsigned long vm_fault_collapse_total = 0;
pmap_t caller_pmap,
vm_map_offset_t caller_pmap_addr)
{
- return vm_fault_internal(map, vaddr, fault_type, change_wiring, vm_tag_bt(),
+ return vm_fault_internal(map, vaddr, fault_type, change_wiring,
+ change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
interruptible, caller_pmap, caller_pmap_addr,
NULL);
}
NULL);
}
+static boolean_t
+current_proc_is_privileged(void)
+{
+ return csproc_get_platform_binary(current_proc());
+}
+
+uint64_t vm_copied_on_read = 0;
+
+/*
+ * Cleanup after a vm_fault_enter.
+ * At this point, the fault should either have failed (kr != KERN_SUCCESS)
+ * or the page should be in the pmap and on the correct paging queue.
+ *
+ * Precondition:
+ * map must be locked shared.
+ * m_object must be locked.
+ * If top_object != VM_OBJECT_NULL, it must be locked.
+ * real_map must be locked.
+ *
+ * Postcondition:
+ * map will be unlocked
+ * m_object will be unlocked
+ * top_object will be unlocked
+ * If real_map != map, it will be unlocked
+ */
+static void
+vm_fault_complete(
+ vm_map_t map,
+ vm_map_t real_map,
+ vm_object_t object,
+ vm_object_t m_object,
+ vm_page_t m,
+ vm_map_offset_t offset,
+ vm_map_offset_t trace_real_vaddr,
+ vm_object_fault_info_t fault_info,
+ vm_prot_t caller_prot,
+#if CONFIG_DTRACE
+ vm_map_offset_t real_vaddr,
+#else
+ __unused vm_map_offset_t real_vaddr,
+#endif /* CONFIG_DTRACE */
+ int type_of_fault,
+ boolean_t need_retry,
+ kern_return_t kr,
+ ppnum_t *physpage_p,
+ vm_prot_t prot,
+ vm_object_t top_object,
+ boolean_t need_collapse,
+ vm_map_offset_t cur_offset,
+ vm_prot_t fault_type,
+ vm_object_t *written_on_object,
+ memory_object_t *written_on_pager,
+ vm_object_offset_t *written_on_offset)
+{
+ int event_code = 0;
+ vm_map_lock_assert_shared(map);
+ vm_object_lock_assert_held(m_object);
+ if (top_object != VM_OBJECT_NULL) {
+ vm_object_lock_assert_held(top_object);
+ }
+ vm_map_lock_assert_held(real_map);
+
+ if (m_object->internal) {
+ event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
+ } else if (m_object->object_is_shared_cache) {
+ event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
+ } else {
+ event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
+ }
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
+ if (need_retry == FALSE) {
+ KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid(), 0, 0, 0, 0);
+ }
+ DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
+ if (kr == KERN_SUCCESS &&
+ physpage_p != NULL) {
+ /* for vm_map_wire_and_extract() */
+ *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
+ if (prot & VM_PROT_WRITE) {
+ vm_object_lock_assert_exclusive(m_object);
+ m->vmp_dirty = TRUE;
+ }
+ }
+
+ if (top_object != VM_OBJECT_NULL) {
+ /*
+ * It's safe to drop the top object
+ * now that we've done our
+ * vm_fault_enter(). Any other fault
+ * in progress for that virtual
+ * address will either find our page
+ * and translation or put in a new page
+ * and translation.
+ */
+ vm_object_unlock(top_object);
+ top_object = VM_OBJECT_NULL;
+ }
+
+ if (need_collapse == TRUE) {
+ vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
+ }
+
+ if (need_retry == FALSE &&
+ (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
+ /*
+ * evaluate access pattern and update state
+ * vm_fault_deactivate_behind depends on the
+ * state being up to date
+ */
+ vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
+
+ vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
+ }
+ /*
+ * That's it, clean up and return.
+ */
+ if (m->vmp_busy) {
+ vm_object_lock_assert_exclusive(m_object);
+ PAGE_WAKEUP_DONE(m);
+ }
+
+ if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
+ vm_object_paging_begin(m_object);
+
+ assert(*written_on_object == VM_OBJECT_NULL);
+ *written_on_object = m_object;
+ *written_on_pager = m_object->pager;
+ *written_on_offset = m_object->paging_offset + m->vmp_offset;
+ }
+ vm_object_unlock(object);
+
+ vm_map_unlock_read(map);
+ if (real_map != map) {
+ vm_map_unlock(real_map);
+ }
+}
+
+static inline int
+vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
+{
+ if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
+ return DBG_COR_FAULT;
+ }
+ return type_of_fault;
+}
+
kern_return_t
vm_fault_internal(
vm_map_t map,
wait_interrupt_t interruptible_state;
vm_map_t real_map = map;
vm_map_t original_map = map;
- boolean_t object_locks_dropped = FALSE;
+ bool object_locks_dropped = FALSE;
vm_prot_t fault_type;
vm_prot_t original_fault_type;
struct vm_object_fault_info fault_info = {};
- boolean_t need_collapse = FALSE;
+ bool need_collapse = FALSE;
boolean_t need_retry = FALSE;
boolean_t *need_retry_ptr = NULL;
- int object_lock_type = 0;
- int cur_object_lock_type;
+ uint8_t object_lock_type = 0;
+ uint8_t cur_object_lock_type;
vm_object_t top_object = VM_OBJECT_NULL;
vm_object_t written_on_object = VM_OBJECT_NULL;
memory_object_t written_on_pager = NULL;
vm_object_offset_t written_on_offset = 0;
int throttle_delay;
int compressed_count_delta;
- int grab_options;
+ uint8_t grab_options;
+ bool need_copy;
+ bool need_copy_on_read;
vm_map_offset_t trace_vaddr;
vm_map_offset_t trace_real_vaddr;
-#if DEVELOPMENT || DEBUG
+ vm_map_size_t fault_page_size;
+ vm_map_size_t fault_page_mask;
+ vm_map_offset_t fault_phys_offset;
vm_map_offset_t real_vaddr;
+ bool resilient_media_retry = FALSE;
+ vm_object_t resilient_media_object = VM_OBJECT_NULL;
+ vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1;
+ bool page_needs_data_sync = false;
+ /*
+ * Was the VM object contended when vm_map_lookup_locked locked it?
+ * If so, the zero fill path will drop the lock
+ * NB: Ideally we would always drop the lock rather than rely on
+ * this heuristic, but vm_object_unlock currently takes > 30 cycles.
+ */
+ bool object_is_contended = false;
real_vaddr = vaddr;
-#endif /* DEVELOPMENT || DEBUG */
trace_real_vaddr = vaddr;
- vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
+
+ if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
+ fault_phys_offset = (vm_map_offset_t)-1;
+ fault_page_size = VM_MAP_PAGE_SIZE(original_map);
+ fault_page_mask = VM_MAP_PAGE_MASK(original_map);
+ if (fault_page_size < PAGE_SIZE) {
+ DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
+ vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
+ }
+ } else {
+ fault_phys_offset = 0;
+ fault_page_size = PAGE_SIZE;
+ fault_page_mask = PAGE_MASK;
+ vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
+ }
if (map == kernel_map) {
trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
}
thread_t cthread = current_thread();
- boolean_t rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
+ bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
uint64_t fstart = 0;
if (rtfault) {
current_task()->faults++;
original_fault_type = fault_type;
+ need_copy = FALSE;
if (fault_type & VM_PROT_WRITE) {
+ need_copy = TRUE;
+ }
+
+ if (need_copy || change_wiring) {
object_lock_type = OBJECT_LOCK_EXCLUSIVE;
} else {
object_lock_type = OBJECT_LOCK_SHARED;
map = original_map;
vm_map_lock_read(map);
- kr = vm_map_lookup_locked(&map, vaddr, fault_type,
+ if (resilient_media_retry) {
+ /*
+ * If we have to insert a fake zero-filled page to hide
+ * a media failure to provide the real page, we need to
+ * resolve any pending copy-on-write on this mapping.
+ * VM_PROT_COPY tells vm_map_lookup_locked() to deal
+ * with that even if this is not a "write" fault.
+ */
+ need_copy = TRUE;
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ }
+
+ kr = vm_map_lookup_locked(&map, vaddr,
+ (fault_type | (need_copy ? VM_PROT_COPY : 0)),
object_lock_type, &version,
&object, &offset, &prot, &wired,
&fault_info,
- &real_map);
+ &real_map,
+ &object_is_contended);
if (kr != KERN_SUCCESS) {
vm_map_unlock_read(map);
goto done;
}
+
+
pmap = real_map->pmap;
fault_info.interruptible = interruptible;
fault_info.stealth = FALSE;
fault_info.mark_zf_absent = FALSE;
fault_info.batch_pmap_op = FALSE;
+ if (resilient_media_retry) {
+ /*
+ * We're retrying this fault after having detected a media
+ * failure from a "resilient_media" mapping.
+ * Check that the mapping is still pointing at the object
+ * that just failed to provide a page.
+ */
+ assert(resilient_media_object != VM_OBJECT_NULL);
+ assert(resilient_media_offset != (vm_object_offset_t)-1);
+ if (object != VM_OBJECT_NULL &&
+ object == resilient_media_object &&
+ offset == resilient_media_offset &&
+ fault_info.resilient_media) {
+ /*
+ * This mapping still points at the same object
+ * and is still "resilient_media": proceed in
+ * "recovery-from-media-failure" mode, where we'll
+ * insert a zero-filled page in the top object.
+ */
+// printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
+ } else {
+ /* not recovering: reset state */
+// printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset);
+ resilient_media_retry = FALSE;
+ /* release our extra reference on failed object */
+// printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
+ vm_object_deallocate(resilient_media_object);
+ resilient_media_object = VM_OBJECT_NULL;
+ resilient_media_offset = (vm_object_offset_t)-1;
+ }
+ } else {
+ assert(resilient_media_object == VM_OBJECT_NULL);
+ resilient_media_offset = (vm_object_offset_t)-1;
+ }
+
/*
* If the page is wired, we must fault for the current protection
* value, to avoid further faults.
*/
if (wired) {
fault_type = prot | VM_PROT_WRITE;
+ }
+ if (wired || need_copy) {
/*
* since we're treating this fault as a 'write'
* we must hold the top object lock exclusively
}
#endif
+ fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
+
/*
* If this page is to be inserted in a copy delay object
* for writing, and if the object has a copy, then the
break;
}
- m = vm_page_lookup(cur_object, cur_offset);
+ m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
m_object = NULL;
if (m != VM_PAGE_NULL) {
}
assert(m_object == VM_PAGE_OBJECT(m));
- if (VM_FAULT_NEED_CS_VALIDATION(map->pmap, m, m_object) ||
+ if (vm_fault_cs_need_validation(map->pmap, m, m_object,
+ PAGE_SIZE, 0) ||
(physpage_p != NULL && (prot & VM_PROT_WRITE))) {
-upgrade_for_validation:
+upgrade_lock_and_retry:
/*
* We might need to validate this page
* against its code signature, so we
goto FastPmapEnter;
}
- if ((fault_type & VM_PROT_WRITE) == 0) {
- if (!pmap_has_prot_policy(prot)) {
+ if (!need_copy &&
+ !fault_info.no_copy_on_read &&
+ cur_object != object &&
+ !cur_object->internal &&
+ !cur_object->pager_trusted &&
+ vm_protect_privileged_from_untrusted &&
+ !((prot & VM_PROT_EXECUTE) &&
+ cur_object->code_signed &&
+ pmap_get_vm_map_cs_enforced(caller_pmap ? caller_pmap : pmap)) &&
+ current_proc_is_privileged()) {
+ /*
+ * We're faulting on a page in "object" and
+ * went down the shadow chain to "cur_object"
+ * to find out that "cur_object"'s pager
+ * is not "trusted", i.e. we can not trust it
+ * to always return the same contents.
+ * Since the target is a "privileged" process,
+ * let's treat this as a copy-on-read fault, as
+ * if it was a copy-on-write fault.
+ * Once "object" gets a copy of this page, it
+ * won't have to rely on "cur_object" to
+ * provide the contents again.
+ *
+ * This is done by setting "need_copy" and
+ * retrying the fault from the top with the
+ * appropriate locking.
+ *
+ * Special case: if the mapping is executable
+ * and the untrusted object is code-signed and
+ * the process is "cs_enforced", we do not
+ * copy-on-read because that would break
+ * code-signing enforcement expectations (an
+ * executable page must belong to a code-signed
+ * object) and we can rely on code-signing
+ * to re-validate the page if it gets evicted
+ * and paged back in.
+ */
+// printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
+ vm_copied_on_read++;
+ need_copy = TRUE;
+
+ vm_object_unlock(object);
+ vm_object_unlock(cur_object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ vm_map_unlock_read(map);
+ if (real_map != map) {
+ vm_map_unlock(real_map);
+ }
+ goto RetryFault;
+ }
+
+ if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
+ if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
prot &= ~VM_PROT_WRITE;
} else {
/*
need_retry_ptr = NULL;
}
+ if (fault_page_size < PAGE_SIZE) {
+ DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
+ assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
+ fault_phys_offset < PAGE_SIZE),
+ "0x%llx\n", (uint64_t)fault_phys_offset);
+ } else {
+ assertf(fault_phys_offset == 0,
+ "0x%llx\n", (uint64_t)fault_phys_offset);
+ }
+
if (caller_pmap) {
kr = vm_fault_enter(m,
caller_pmap,
caller_pmap_addr,
+ fault_page_size,
+ fault_phys_offset,
prot,
caller_prot,
wired,
kr = vm_fault_enter(m,
pmap,
vaddr,
+ fault_page_size,
+ fault_phys_offset,
prot,
caller_prot,
wired,
need_retry_ptr,
&type_of_fault);
}
-#if DEVELOPMENT || DEBUG
- {
- int event_code = 0;
-
- if (m_object->internal) {
- event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
- } else if (m_object->object_is_shared_cache) {
- event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
- } else {
- event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
- }
-
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
-
- DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
- }
-#endif
- if (kr == KERN_SUCCESS &&
- physpage_p != NULL) {
- /* for vm_map_wire_and_extract() */
- *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
- if (prot & VM_PROT_WRITE) {
- vm_object_lock_assert_exclusive(m_object);
- m->vmp_dirty = TRUE;
- }
- }
-
- if (top_object != VM_OBJECT_NULL) {
- /*
- * It's safe to drop the top object
- * now that we've done our
- * vm_fault_enter(). Any other fault
- * in progress for that virtual
- * address will either find our page
- * and translation or put in a new page
- * and translation.
- */
- vm_object_unlock(top_object);
- top_object = VM_OBJECT_NULL;
- }
-
- if (need_collapse == TRUE) {
- vm_object_collapse(object, offset, TRUE);
- }
-
- if (need_retry == FALSE &&
- (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
- /*
- * evaluate access pattern and update state
- * vm_fault_deactivate_behind depends on the
- * state being up to date
- */
- vm_fault_is_sequential(m_object, cur_offset, fault_info.behavior);
-
- vm_fault_deactivate_behind(m_object, cur_offset, fault_info.behavior);
- }
- /*
- * That's it, clean up and return.
- */
- if (m->vmp_busy) {
- PAGE_WAKEUP_DONE(m);
- }
-
- if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
- vm_object_paging_begin(m_object);
-
- assert(written_on_object == VM_OBJECT_NULL);
- written_on_object = m_object;
- written_on_pager = m_object->pager;
- written_on_offset = m_object->paging_offset + m->vmp_offset;
- }
- vm_object_unlock(object);
-
- vm_map_unlock_read(map);
- if (real_map != map) {
- vm_map_unlock(real_map);
- }
+ vm_fault_complete(
+ map,
+ real_map,
+ object,
+ m_object,
+ m,
+ offset,
+ trace_real_vaddr,
+ &fault_info,
+ caller_prot,
+ real_vaddr,
+ vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
+ need_retry,
+ kr,
+ physpage_p,
+ prot,
+ top_object,
+ need_collapse,
+ cur_offset,
+ fault_type,
+ &written_on_object,
+ &written_on_pager,
+ &written_on_offset);
+ top_object = VM_OBJECT_NULL;
if (need_retry == TRUE) {
/*
* vm_fault_enter couldn't complete the PMAP_ENTER...
assert(m_object == VM_PAGE_OBJECT(m));
if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
- VM_FAULT_NEED_CS_VALIDATION(NULL, m, m_object)) {
- goto upgrade_for_validation;
+ vm_fault_cs_need_validation(NULL, m, m_object,
+ PAGE_SIZE, 0)) {
+ goto upgrade_lock_and_retry;
}
/*
* the page copy.
*/
vm_page_copy(cur_m, m);
- vm_page_insert(m, object, offset);
+ vm_page_insert(m, object, vm_object_trunc_page(offset));
+ if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
+ DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
+ }
m_object = object;
SET_PAGE_DIRTY(m, FALSE);
*/
if (object->ref_count > 1 && cur_m->vmp_pmapped) {
pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
+ } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
+ /*
+ * We've copied the full 16K page but we're
+ * about to call vm_fault_enter() only for
+ * the 4K chunk we're faulting on. The other
+ * three 4K chunks in that page could still
+ * be pmapped in this pmap.
+ * Since the VM object layer thinks that the
+ * entire page has been dealt with and the
+ * original page might no longer be needed,
+ * it might collapse/bypass the original VM
+ * object and free its pages, which would be
+ * bad (and would trigger pmap_verify_free()
+ * assertions) if the other 4K chunks are still
+ * pmapped.
+ */
+ /*
+ * XXX FBDP TODO4K: to be revisisted
+ * Technically, we need to pmap_disconnect()
+ * only the target pmap's mappings for the 4K
+ * chunks of this 16K VM page. If other pmaps
+ * have PTEs on these chunks, that means that
+ * the associated VM map must have a reference
+ * on the VM object, so no need to worry about
+ * those.
+ * pmap_protect() for each 4K chunk would be
+ * better but we'd have to check which chunks
+ * are actually mapped before and after this
+ * one.
+ * A full-blown pmap_disconnect() is easier
+ * for now but not efficient.
+ */
+ DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
}
if (cur_m->vmp_clustered) {
* No page at cur_object, cur_offset... m == NULL
*/
if (cur_object->pager_created) {
- int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
+ vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
int my_fault_type;
- int c_flags = C_DONT_BLOCK;
- boolean_t insert_cur_object = FALSE;
+ uint8_t c_flags = C_DONT_BLOCK;
+ bool insert_cur_object = FALSE;
/*
* May have to talk to a pager...
* so no need to take a
* "paging_in_progress" reference.
*/
- boolean_t shared_lock;
+ bool shared_lock;
if ((object == cur_object &&
object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
(object != cur_object &&
kr = vm_compressor_pager_get(
cur_object->pager,
- (cur_offset +
- cur_object->paging_offset),
+ (vm_object_trunc_page(cur_offset)
+ + cur_object->paging_offset),
VM_PAGE_GET_PHYS_PAGE(m),
&my_fault_type,
c_flags,
if (kr != KERN_SUCCESS) {
vm_page_release(m, FALSE);
m = VM_PAGE_NULL;
+ }
+ /*
+ * If vm_compressor_pager_get() returns
+ * KERN_MEMORY_FAILURE, then the
+ * compressed data is permanently lost,
+ * so return this error immediately.
+ */
+ if (kr == KERN_MEMORY_FAILURE) {
+ if (object != cur_object) {
+ vm_object_unlock(cur_object);
+ }
+ vm_object_unlock(object);
+ vm_map_unlock_read(map);
+ if (real_map != map) {
+ vm_map_unlock(real_map);
+ }
+ goto done;
+ } else if (kr != KERN_SUCCESS) {
break;
}
m->vmp_dirty = TRUE;
}
if (insert_cur_object) {
- vm_page_insert(m, cur_object, cur_offset);
+ vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
m_object = cur_object;
} else {
- vm_page_insert(m, object, offset);
+ vm_page_insert(m, object, vm_object_trunc_page(offset));
m_object = object;
}
type_of_fault = my_fault_type;
- VM_STAT_INCR(decompressions);
+ VM_STAT_DECOMPRESSIONS();
if (cur_object != object) {
if (insert_cur_object) {
* that the pager doesn't have this page
*/
}
- if (cur_object->shadow == VM_OBJECT_NULL) {
+ if (cur_object->shadow == VM_OBJECT_NULL ||
+ resilient_media_retry) {
/*
* Zero fill fault. Page gets
* inserted into the original object.
goto RetryFault;
}
}
- m = vm_page_alloc(object, offset);
+ if (!object->internal) {
+ panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
+ }
+ m = vm_page_alloc(object, vm_object_trunc_page(offset));
m_object = NULL;
if (m == VM_PAGE_NULL) {
m_object = object;
/*
- * Now zero fill page...
- * the page is probably going to
- * be written soon, so don't bother
- * to clear the modified bit
+ * Zeroing the page and entering into it into the pmap
+ * represents a significant amount of the zero fill fault handler's work.
*
- * NOTE: This code holds the map
- * lock across the zero fill.
+ * To improve fault scalability, we'll drop the object lock, if it appears contended,
+ * now that we've inserted the page into the vm object.
+ * Before dropping the lock, we need to check protection bits and set the
+ * mapped bits on the page. Then we can mark the page busy, drop the lock,
+ * zero it, and do the pmap enter. We'll need to reacquire the lock
+ * to clear the busy bit and wake up any waiters.
*/
- type_of_fault = vm_fault_zero_page(m, map->no_zero_fill);
+ vm_fault_cs_clear(m);
+ m->vmp_pmapped = TRUE;
+ if (map->no_zero_fill) {
+ type_of_fault = DBG_NZF_PAGE_FAULT;
+ } else {
+ type_of_fault = DBG_ZERO_FILL_FAULT;
+ }
+ {
+ pmap_t destination_pmap;
+ vm_map_offset_t destination_pmap_vaddr;
+ vm_prot_t enter_fault_type;
+ if (caller_pmap) {
+ destination_pmap = caller_pmap;
+ destination_pmap_vaddr = caller_pmap_addr;
+ } else {
+ destination_pmap = pmap;
+ destination_pmap_vaddr = vaddr;
+ }
+ if (change_wiring) {
+ enter_fault_type = VM_PROT_NONE;
+ } else {
+ enter_fault_type = caller_prot;
+ }
+ kr = vm_fault_enter_prepare(m,
+ destination_pmap,
+ destination_pmap_vaddr,
+ &prot,
+ caller_prot,
+ fault_page_size,
+ fault_phys_offset,
+ change_wiring,
+ enter_fault_type,
+ &fault_info,
+ &type_of_fault,
+ &page_needs_data_sync);
+ if (kr != KERN_SUCCESS) {
+ goto zero_fill_cleanup;
+ }
- goto FastPmapEnter;
+ if (object_is_contended) {
+ /*
+ * At this point the page is in the vm object, but not on a paging queue.
+ * Since it's accessible to another thread but its contents are invalid
+ * (it hasn't been zeroed) mark it busy before dropping the object lock.
+ */
+ m->vmp_busy = TRUE;
+ vm_object_unlock(object);
+ }
+ if (type_of_fault == DBG_ZERO_FILL_FAULT) {
+ /*
+ * Now zero fill page...
+ * the page is probably going to
+ * be written soon, so don't bother
+ * to clear the modified bit
+ *
+ * NOTE: This code holds the map
+ * lock across the zero fill.
+ */
+ vm_page_zero_fill(m);
+ VM_STAT_INCR(zero_fill_count);
+ DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
+ }
+ if (page_needs_data_sync) {
+ pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
+ }
+
+ if (top_object != VM_OBJECT_NULL) {
+ need_retry_ptr = &need_retry;
+ } else {
+ need_retry_ptr = NULL;
+ }
+ if (object_is_contended) {
+ kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
+ fault_page_size, fault_phys_offset,
+ m, &prot, caller_prot, enter_fault_type, wired,
+ fault_info.pmap_options, need_retry_ptr);
+ vm_object_lock(object);
+ } else {
+ kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
+ fault_page_size, fault_phys_offset,
+ m, &prot, caller_prot, enter_fault_type, wired,
+ fault_info.pmap_options, need_retry_ptr);
+ }
+ }
+zero_fill_cleanup:
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
+ (object->purgable == VM_PURGABLE_DENY ||
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_lockspin_queues();
+ if (!VM_DYNAMIC_PAGING_ENABLED()) {
+ vm_fault_enqueue_throttled_locked(m);
+ }
+ vm_page_unlock_queues();
+ }
+ vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr);
+
+ vm_fault_complete(
+ map,
+ real_map,
+ object,
+ m_object,
+ m,
+ offset,
+ trace_real_vaddr,
+ &fault_info,
+ caller_prot,
+ real_vaddr,
+ type_of_fault,
+ need_retry,
+ kr,
+ physpage_p,
+ prot,
+ top_object,
+ need_collapse,
+ cur_offset,
+ fault_type,
+ &written_on_object,
+ &written_on_pager,
+ &written_on_offset);
+ top_object = VM_OBJECT_NULL;
+ if (need_retry == TRUE) {
+ /*
+ * vm_fault_enter couldn't complete the PMAP_ENTER...
+ * at this point we don't hold any locks so it's safe
+ * to ask the pmap layer to expand the page table to
+ * accommodate this mapping... once expanded, we'll
+ * re-drive the fault which should result in vm_fault_enter
+ * being able to successfully enter the mapping this time around
+ */
+ (void)pmap_enter_options(
+ pmap, vaddr, 0, 0, 0, 0, 0,
+ PMAP_OPTIONS_NOENTER, NULL);
+
+ need_retry = FALSE;
+ goto RetryFault;
+ }
+ goto done;
}
/*
* On to the next level in the shadow chain
*/
cur_offset += cur_object->vo_shadow_offset;
new_object = cur_object->shadow;
+ fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
/*
* take the new_object's lock with the indicated state
assert(object != kernel_object);
assert(object != vm_submap_object);
+ if (resilient_media_retry) {
+ /*
+ * We could get here if we failed to get a free page
+ * to zero-fill and had to take the slow path again.
+ * Reset our "recovery-from-failed-media" state.
+ */
+ assert(resilient_media_object != VM_OBJECT_NULL);
+ assert(resilient_media_offset != (vm_object_offset_t)-1);
+ /* release our extra reference on failed object */
+// printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
+ vm_object_deallocate(resilient_media_object);
+ resilient_media_object = VM_OBJECT_NULL;
+ resilient_media_offset = (vm_object_offset_t)-1;
+ resilient_media_retry = FALSE;
+ }
+
/*
* Make a reference to this object to
* prevent its disposal while we are messing with
vm_object_reference_locked(object);
vm_object_paging_begin(object);
- XPR(XPR_VM_FAULT, "vm_fault -> vm_fault_page\n", 0, 0, 0, 0, 0);
-
+ set_thread_pagein_error(cthread, 0);
error_code = 0;
result_page = VM_PAGE_NULL;
*/
if (kr != VM_FAULT_SUCCESS &&
kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
- /*
- * we didn't succeed, lose the object reference immediately.
- */
- vm_object_deallocate(object);
+ if (kr == VM_FAULT_MEMORY_ERROR &&
+ fault_info.resilient_media) {
+ assertf(object->internal, "object %p", object);
+ /*
+ * This fault failed but the mapping was
+ * "media resilient", so we'll retry the fault in
+ * recovery mode to get a zero-filled page in the
+ * top object.
+ * Keep the reference on the failing object so
+ * that we can check that the mapping is still
+ * pointing to it when we retry the fault.
+ */
+// printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
+ assert(!resilient_media_retry); /* no double retry */
+ assert(resilient_media_object == VM_OBJECT_NULL);
+ assert(resilient_media_offset == (vm_object_offset_t)-1);
+ resilient_media_retry = TRUE;
+ resilient_media_object = object;
+ resilient_media_offset = offset;
+// printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
+ goto RetryFault;
+ } else {
+ /*
+ * we didn't succeed, lose the object reference
+ * immediately.
+ */
+ vm_object_deallocate(object);
+ object = VM_OBJECT_NULL; /* no longer valid */
+ }
/*
* See why we failed, and take corrective action.
THREAD_ABORTSAFE)) {
goto RetryFault;
}
- /*
- * fall thru
- */
+ OS_FALLTHROUGH;
case VM_FAULT_INTERRUPTED:
kr = KERN_ABORTED;
goto done;
&retry_object, &retry_offset, &retry_prot,
&wired,
&fault_info,
- &real_map);
+ &real_map,
+ NULL);
pmap = real_map->pmap;
if (kr != KERN_SUCCESS) {
* Check whether the protection has changed or the object
* has been copied while we left the map unlocked.
*/
- if (pmap_has_prot_policy(retry_prot)) {
+ if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
/* If the pmap layer cares, pass the full set. */
prot = retry_prot;
} else {
* The copy object changed while the top-level object
* was unlocked, so take away write permission.
*/
- assert(!pmap_has_prot_policy(prot));
+ assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot));
prot &= ~VM_PROT_WRITE;
}
} else {
object_locks_dropped = FALSE;
}
+ if (!need_copy &&
+ !fault_info.no_copy_on_read &&
+ m != VM_PAGE_NULL &&
+ VM_PAGE_OBJECT(m) != object &&
+ !VM_PAGE_OBJECT(m)->pager_trusted &&
+ vm_protect_privileged_from_untrusted &&
+ !((prot & VM_PROT_EXECUTE) &&
+ VM_PAGE_OBJECT(m)->code_signed &&
+ pmap_get_vm_map_cs_enforced(caller_pmap ? caller_pmap : pmap)) &&
+ current_proc_is_privileged()) {
+ /*
+ * We found the page we want in an "untrusted" VM object
+ * down the shadow chain. Since the target is "privileged"
+ * we want to perform a copy-on-read of that page, so that the
+ * mapped object gets a stable copy and does not have to
+ * rely on the "untrusted" object to provide the same
+ * contents if the page gets reclaimed and has to be paged
+ * in again later on.
+ *
+ * Special case: if the mapping is executable and the untrusted
+ * object is code-signed and the process is "cs_enforced", we
+ * do not copy-on-read because that would break code-signing
+ * enforcement expectations (an executable page must belong
+ * to a code-signed object) and we can rely on code-signing
+ * to re-validate the page if it gets evicted and paged back in.
+ */
+// printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
+ vm_copied_on_read++;
+ need_copy_on_read = TRUE;
+ need_copy = TRUE;
+ } else {
+ need_copy_on_read = FALSE;
+ }
+
/*
* If we want to wire down this page, but no longer have
* adequate permissions, we must start all over.
+ * If we decided to copy-on-read, we must also start all over.
*/
- if (wired && (fault_type != (prot | VM_PROT_WRITE))) {
+ if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
+ need_copy_on_read) {
vm_map_unlock_read(map);
if (real_map != map) {
vm_map_unlock(real_map);
* the pageout queues. If the pageout daemon comes
* across the page, it will remove it from the queues.
*/
+ if (fault_page_size < PAGE_SIZE) {
+ DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
+ assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
+ fault_phys_offset < PAGE_SIZE),
+ "0x%llx\n", (uint64_t)fault_phys_offset);
+ } else {
+ assertf(fault_phys_offset == 0,
+ "0x%llx\n", (uint64_t)fault_phys_offset);
+ }
if (caller_pmap) {
kr = vm_fault_enter(m,
caller_pmap,
caller_pmap_addr,
+ fault_page_size,
+ fault_phys_offset,
prot,
caller_prot,
wired,
kr = vm_fault_enter(m,
pmap,
vaddr,
+ fault_page_size,
+ fault_phys_offset,
prot,
caller_prot,
wired,
}
assert(VM_PAGE_OBJECT(m) == m_object);
-#if DEVELOPMENT || DEBUG
{
int event_code = 0;
event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
}
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid(), 0);
+ KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid(), 0, 0, 0, 0);
DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
}
-#endif
if (kr != KERN_SUCCESS) {
/* abort this page fault */
vm_map_unlock_read(map);
if (vm_map_lookup_entry(map, laddr, &entry) &&
(VME_OBJECT(entry) != NULL) &&
(VME_OBJECT(entry) == object)) {
- int superpage;
+ uint16_t superpage;
if (!object->pager_created &&
object->phys_contiguous &&
done:
thread_interrupt_level(interruptible_state);
+ if (resilient_media_object != VM_OBJECT_NULL) {
+ assert(resilient_media_retry);
+ assert(resilient_media_offset != (vm_object_offset_t)-1);
+ /* release extra reference on failed object */
+// printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
+ vm_object_deallocate(resilient_media_object);
+ resilient_media_object = VM_OBJECT_NULL;
+ resilient_media_offset = (vm_object_offset_t)-1;
+ resilient_media_retry = FALSE;
+ }
+ assert(!resilient_media_retry);
+
/*
* Only I/O throttle on faults which cause a pagein/swapin.
*/
((uint64_t)trace_vaddr >> 32),
trace_vaddr,
kr,
- type_of_fault,
+ vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
0);
+ if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
+ DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
+ }
+
return kr;
}
vm_map_offset_t va;
vm_map_offset_t end_addr = entry->vme_end;
kern_return_t rc;
+ vm_map_size_t effective_page_size;
assert(entry->in_transition);
* in the physical map.
*/
- for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
+ effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
+ for (va = entry->vme_start;
+ va < end_addr;
+ va += effective_page_size) {
rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
pmap_addr + (va - entry->vme_start),
physpage_p);
vm_object_t object;
struct vm_object_fault_info fault_info = {};
unsigned int unwired_pages;
+ vm_map_size_t effective_page_size;
object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
* get their mappings from the physical map system.
*/
- for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) {
+ effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
+ for (va = entry->vme_start;
+ va < end_addr;
+ va += effective_page_size) {
if (object == VM_OBJECT_NULL) {
if (pmap) {
pmap_change_wiring(pmap,
vm_object_lock(object);
vm_object_paging_begin(object);
- XPR(XPR_VM_FAULT,
- "vm_fault_unwire -> vm_fault_page\n",
- 0, 0, 0, 0, 0);
result_page = VM_PAGE_NULL;
result = vm_fault_page(
object,
pmap_addr + (end_addr - entry->vme_start), TRUE);
if (kernel_object == object) {
- vm_tag_update_size(fault_info.user_tag, -ptoa_64(unwired_pages));
+ /*
+ * Would like to make user_tag in vm_object_fault_info
+ * vm_tag_t (unsigned short) but user_tag derives its value from
+ * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
+ * to an _unsigned int_ which is used by non-fault_info paths throughout the
+ * code at many places.
+ *
+ * So, for now, an explicit truncation to unsigned short (vm_tag_t).
+ */
+ assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
+ "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
+ vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages));
}
}
thread_t thread = current_thread();
int type_of_fault;
kern_return_t kr;
+ vm_map_size_t fault_page_size;
+ vm_map_offset_t fault_phys_offset;
struct vm_object_fault_info fault_info = {};
VM_STAT_INCR(faults);
* Look for page in top-level object. If it's not there or
* there's something going on, give up.
*/
- m = vm_page_lookup(object, offset);
+ m = vm_page_lookup(object, vm_object_trunc_page(offset));
if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
(m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
GIVE_UP;
fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
}
+ fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
+ fault_phys_offset = offset - vm_object_trunc_page(offset);
+
/*
* Put this page into the physical map.
*/
kr = vm_fault_enter(m,
pmap,
pmap_addr,
+ fault_page_size,
+ fault_phys_offset,
prot,
prot,
TRUE, /* wired */
}
fault_info_dst.cluster_size = cluster_size;
- XPR(XPR_VM_FAULT, "vm_fault_copy -> vm_fault_page\n", 0, 0, 0, 0, 0);
dst_page = VM_PAGE_NULL;
result = vm_fault_page(dst_object,
vm_object_trunc_page(dst_offset),
if (vm_page_wait(interruptible)) {
goto RetryDestinationFault;
}
- /* fall thru */
+ OS_FALLTHROUGH;
case VM_FAULT_INTERRUPTED:
RETURN(MACH_SEND_INTERRUPTED);
case VM_FAULT_SUCCESS_NO_VM_PAGE:
/* success but no VM page: fail the copy */
vm_object_paging_end(dst_object);
vm_object_unlock(dst_object);
- /*FALLTHROUGH*/
+ OS_FALLTHROUGH;
case VM_FAULT_MEMORY_ERROR:
if (error) {
return error;
}
fault_info_src.cluster_size = cluster_size;
- XPR(XPR_VM_FAULT,
- "vm_fault_copy(2) -> vm_fault_page\n",
- 0, 0, 0, 0, 0);
result_page = VM_PAGE_NULL;
result = vm_fault_page(
src_object,
if (vm_page_wait(interruptible)) {
goto RetrySourceFault;
}
- /* fall thru */
+ OS_FALLTHROUGH;
case VM_FAULT_INTERRUPTED:
vm_fault_copy_dst_cleanup(dst_page);
RETURN(MACH_SEND_INTERRUPTED);
/* success but no VM page: fail */
vm_object_paging_end(src_object);
vm_object_unlock(src_object);
- /*FALLTHROUGH*/
+ OS_FALLTHROUGH;
case VM_FAULT_MEMORY_ERROR:
vm_fault_copy_dst_cleanup(dst_page);
if (error) {
int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
int my_fault_type = VM_PROT_READ;
kern_return_t kr;
+ int effective_page_mask, effective_page_size;
+
+ if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
+ effective_page_mask = VM_MAP_PAGE_MASK(map);
+ effective_page_size = VM_MAP_PAGE_SIZE(map);
+ } else {
+ effective_page_mask = PAGE_MASK;
+ effective_page_size = PAGE_SIZE;
+ }
if (not_in_kdp) {
panic("kdp_lightweight_fault called from outside of debugger context");
assert(map != VM_MAP_NULL);
- assert((cur_target_addr & PAGE_MASK) == 0);
- if ((cur_target_addr & PAGE_MASK) != 0) {
+ assert((cur_target_addr & effective_page_mask) == 0);
+ if ((cur_target_addr & effective_page_mask) != 0) {
return 0;
}
return 0;
}
- m = kdp_vm_page_lookup(object, object_offset);
+ m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
if (m != VM_PAGE_NULL) {
if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
- kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset),
+ kr = vm_compressor_pager_get(object->pager,
+ vm_object_trunc_page(object_offset + object->paging_offset),
kdp_compressor_decompressed_page_ppnum, &my_fault_type,
compressor_flags, &compressed_count_delta);
if (kr == KERN_SUCCESS) {
*/
static boolean_t
vm_page_validate_cs_fast(
- vm_page_t page)
+ vm_page_t page,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset)
{
vm_object_t object;
object = VM_PAGE_OBJECT(page);
vm_object_lock_assert_held(object);
- if (page->vmp_wpmapped && !page->vmp_cs_tainted) {
+ if (page->vmp_wpmapped &&
+ !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
/*
* This page was mapped for "write" access sometime in the
* past and could still be modifiable in the future.
* need to re-validate. ]
*/
vm_object_lock_assert_exclusive(object);
- page->vmp_cs_validated = TRUE;
- page->vmp_cs_tainted = TRUE;
+ VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
+ VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
if (cs_debug) {
printf("CODESIGNING: %s: "
"page %p obj %p off 0x%llx "
vm_cs_validated_dirtied++;
}
- if (page->vmp_cs_validated || page->vmp_cs_tainted) {
+ if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
+ VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
return TRUE;
}
vm_object_lock_assert_exclusive(object);
page->vmp_offset + object->paging_offset,
CS_BITMAP_CHECK);
if (kr == KERN_SUCCESS) {
- page->vmp_cs_validated = TRUE;
- page->vmp_cs_tainted = FALSE;
+ page->vmp_cs_validated = VMP_CS_ALL_TRUE;
+ page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
vm_cs_bitmap_validated++;
return TRUE;
}
memory_object_offset_t mo_offset;
memory_object_t pager;
struct vnode *vnode;
- boolean_t validated;
- unsigned tainted;
+ int validated, tainted, nx;
assert(page->vmp_busy);
object = VM_PAGE_OBJECT(page);
mo_offset = page->vmp_offset + object->paging_offset;
/* verify the SHA1 hash for this page */
+ validated = 0;
tainted = 0;
- validated = cs_validate_range(vnode,
+ nx = 0;
+ cs_validate_page(vnode,
pager,
mo_offset,
(const void *)((const char *)kaddr),
- PAGE_SIZE_64,
- &tainted);
+ &validated,
+ &tainted,
+ &nx);
- if (tainted & CS_VALIDATE_TAINTED) {
- page->vmp_cs_tainted = TRUE;
- }
- if (tainted & CS_VALIDATE_NX) {
- page->vmp_cs_nx = TRUE;
- }
- if (validated) {
- page->vmp_cs_validated = TRUE;
- }
+ page->vmp_cs_validated |= validated;
+ page->vmp_cs_tainted |= tainted;
+ page->vmp_cs_nx |= nx;
#if CHECK_CS_VALIDATION_BITMAP
- if (page->vmp_cs_validated && !page->vmp_cs_tainted) {
+ if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
+ page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
vnode_pager_cs_check_validation_bitmap(object->pager,
mo_offset,
CS_BITMAP_SET);
void
vm_page_validate_cs_mapped(
vm_page_t page,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset,
const void *kaddr)
{
- if (!vm_page_validate_cs_fast(page)) {
+ if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
vm_page_validate_cs_mapped_slow(page, kaddr);
}
}
void
vm_page_validate_cs(
- vm_page_t page)
+ vm_page_t page,
+ vm_map_size_t fault_page_size,
+ vm_map_offset_t fault_phys_offset)
{
vm_object_t object;
vm_object_offset_t offset;
object = VM_PAGE_OBJECT(page);
vm_object_lock_assert_held(object);
- if (vm_page_validate_cs_fast(page)) {
+ if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
return;
}
vm_object_lock_assert_exclusive(object);
#include <kern/backtrace.h>
+__attribute__((noinline))
static void
vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
{
uint64_t cupid = get_current_unique_pid();
uintptr_t bpc = 0;
- uint32_t bfrs = 0;
+ int btr = 0;
bool u64 = false;
/* Capture a single-frame backtrace; this extracts just the program
* further user stack traversals, thus avoiding copyin()s and further
* faults.
*/
- int btr = backtrace_thread_user(cthread, &bpc, 1U, &bfrs, &u64);
+ unsigned int bfrs = backtrace_thread_user(cthread, &bpc, 1U, &btr, &u64, NULL, false);
if ((btr == 0) && (bfrs > 0)) {
cfpc = bpc;
}
int
-vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void *vrecords, int *vmrtfrv)
+vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
{
vm_rtfault_record_t *cvmrd = vrecords;
size_t residue = vrecordsz;
- int numextracted = 0;
+ size_t numextracted = 0;
boolean_t early_exit = FALSE;
vm_rtfrecord_lock();