+ /* If we have a KERN_SUCCESS from the previous checks, we either have
+ * a good page, or a tainted page that has been accepted by the process.
+ * In both cases the page will be entered into the pmap.
+ * If the page is writeable, we need to disconnect it from other pmaps
+ * now so those processes can take note.
+ */
+ if (kr == KERN_SUCCESS) {
+ /*
+ * NOTE: we may only hold the vm_object lock SHARED
+ * at this point, so we need the phys_page lock to
+ * properly serialize updating the pmapped and
+ * xpmapped bits
+ */
+ if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
+ ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+ pmap_lock_phys_page(phys_page);
+ /*
+ * go ahead and take the opportunity
+ * to set 'pmapped' here so that we don't
+ * need to grab this lock a 2nd time
+ * just below
+ */
+ m->vmp_pmapped = TRUE;
+
+ if (!m->vmp_xpmapped) {
+ m->vmp_xpmapped = TRUE;
+
+ pmap_unlock_phys_page(phys_page);
+
+ if (!object->internal) {
+ OSAddAtomic(1, &vm_page_xpmapped_external_count);
+ }
+
+#if defined(__arm__) || defined(__arm64__)
+ pmap_sync_page_data_phys(phys_page);
+#else
+ if (object->internal &&
+ object->pager != NULL) {
+ /*
+ * This page could have been
+ * uncompressed by the
+ * compressor pager and its
+ * contents might be only in
+ * the data cache.
+ * Since it's being mapped for
+ * "execute" for the fist time,
+ * make sure the icache is in
+ * sync.
+ */
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+ pmap_sync_page_data_phys(phys_page);
+ }
+#endif
+ } else {
+ pmap_unlock_phys_page(phys_page);
+ }
+ } else {
+ if (m->vmp_pmapped == FALSE) {
+ ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+ pmap_lock_phys_page(phys_page);
+ m->vmp_pmapped = TRUE;
+ pmap_unlock_phys_page(phys_page);
+ }
+ }
+
+ if (fault_type & VM_PROT_WRITE) {
+ if (m->vmp_wpmapped == FALSE) {
+ vm_object_lock_assert_exclusive(object);
+ if (!object->internal && object->pager) {
+ task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
+ }
+ m->vmp_wpmapped = TRUE;
+ }
+ if (must_disconnect) {
+ /*
+ * We can only get here
+ * because of the CSE logic
+ */
+ assert(cs_enforcement_enabled);
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ /*
+ * If we are faulting for a write, we can clear
+ * the execute bit - that will ensure the page is
+ * checked again before being executable, which
+ * protects against a map switch.
+ * This only happens the first time the page
+ * gets tainted, so we won't get stuck here
+ * to make an already writeable page executable.
+ */
+ if (!cs_bypass) {
+ assert(!pmap_has_prot_policy(prot));
+ prot &= ~VM_PROT_EXECUTE;
+ }
+ }
+ }
+ assert(VM_PAGE_OBJECT(m) == object);
+
+#if VM_OBJECT_ACCESS_TRACKING
+ if (object->access_tracking) {
+ DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
+ if (fault_type & VM_PROT_WRITE) {
+ object->access_tracking_writes++;
+ vm_object_access_tracking_writes++;
+ } else {
+ object->access_tracking_reads++;
+ vm_object_access_tracking_reads++;
+ }
+ }
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
+
+#if PMAP_CS
+pmap_enter_retry:
+#endif
+ /* Prevent a deadlock by not
+ * holding the object lock if we need to wait for a page in
+ * pmap_enter() - <rdar://problem/7138958> */
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0,
+ wired,
+ pmap_options | PMAP_OPTIONS_NOWAIT,
+ pe_result);
+#if PMAP_CS
+ /*
+ * Retry without execute permission if we encountered a codesigning
+ * failure on a non-execute fault. This allows applications which
+ * don't actually need to execute code to still map it for read access.
+ */
+ if ((pe_result == KERN_CODESIGN_ERROR) && pmap_cs_enforced(pmap) &&
+ (prot & VM_PROT_EXECUTE) && !(caller_prot & VM_PROT_EXECUTE)) {
+ prot &= ~VM_PROT_EXECUTE;
+ goto pmap_enter_retry;
+ }
+#endif
+#if __x86_64__
+ if (pe_result == KERN_INVALID_ARGUMENT &&
+ pmap == PMAP_NULL &&
+ wired) {
+ /*
+ * Wiring a page in a pmap-less VM map:
+ * VMware's "vmmon" kernel extension does this
+ * to grab pages.
+ * Let it proceed even though the PMAP_ENTER() failed.
+ */
+ pe_result = KERN_SUCCESS;
+ }
+#endif /* __x86_64__ */
+
+ if (pe_result == KERN_RESOURCE_SHORTAGE) {
+ if (need_retry) {
+ /*
+ * this will be non-null in the case where we hold the lock
+ * on the top-object in this chain... we can't just drop
+ * the lock on the object we're inserting the page into
+ * and recall the PMAP_ENTER since we can still cause
+ * a deadlock if one of the critical paths tries to
+ * acquire the lock on the top-object and we're blocked
+ * in PMAP_ENTER waiting for memory... our only recourse
+ * is to deal with it at a higher level where we can
+ * drop both locks.
+ */
+ *need_retry = TRUE;
+ vm_pmap_enter_retried++;
+ goto after_the_pmap_enter;
+ }
+ /* The nonblocking version of pmap_enter did not succeed.
+ * and we don't need to drop other locks and retry
+ * at the level above us, so
+ * use the blocking version instead. Requires marking
+ * the page busy and unlocking the object */
+ boolean_t was_busy = m->vmp_busy;
+
+ vm_object_lock_assert_exclusive(object);
+
+ m->vmp_busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type,
+ 0, wired,
+ pmap_options, pe_result);
+
+ assert(VM_PAGE_OBJECT(m) == object);
+
+ /* Take the object lock again. */
+ vm_object_lock(object);
+
+ /* If the page was busy, someone else will wake it up.
+ * Otherwise, we have to do it now. */
+ assert(m->vmp_busy);
+ if (!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ vm_pmap_enter_blocked++;
+ }
+
+ kr = pe_result;
+ }
+
+after_the_pmap_enter:
+ return kr;
+}
+
+void
+vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
+{
+ if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
+ vm_fault(current_map(), /* map */
+ vaddr, /* vaddr */
+ prot, /* fault_type */
+ FALSE, /* change_wiring */
+ VM_KERN_MEMORY_NONE, /* tag - not wiring */
+ THREAD_UNINT, /* interruptible */
+ NULL, /* caller_pmap */
+ 0 /* caller_pmap_addr */);
+ }
+}
+
+
+/*
+ * Routine: vm_fault
+ * Purpose:
+ * Handle page faults, including pseudo-faults
+ * used to change the wiring status of pages.