+ * If the old page was in use by any users
+ * of the copy-object, it must be removed
+ * from all pmaps. (We can't know which
+ * pmaps use it.)
+ */
+ if (m->pmapped)
+ pmap_disconnect(m->phys_page);
+
+ /*
+ * If there's a pager, then immediately
+ * page out this page, using the "initialize"
+ * option. Else, we use the copy.
+ */
+ if ((!copy_object->pager_created)
+#if MACH_PAGEMAP
+ || vm_external_state_get(copy_object->existence_map, copy_offset) == VM_EXTERNAL_STATE_ABSENT
+#endif
+ ) {
+
+ vm_page_lockspin_queues();
+ assert(!m->cleaning);
+ vm_page_activate(copy_m);
+ vm_page_unlock_queues();
+
+ copy_m->dirty = TRUE;
+ PAGE_WAKEUP_DONE(copy_m);
+ }
+ else {
+ assert(copy_m->busy == TRUE);
+ assert(!m->cleaning);
+
+ /*
+ * dirty is protected by the object lock
+ */
+ copy_m->dirty = TRUE;
+
+ /*
+ * The page is already ready for pageout:
+ * not on pageout queues and busy.
+ * Unlock everything except the
+ * copy_object itself.
+ */
+ vm_object_unlock(object);
+
+ /*
+ * Write the page to the copy-object,
+ * flushing it from the kernel.
+ */
+ vm_pageout_initialize_page(copy_m);
+
+ /*
+ * Since the pageout may have
+ * temporarily dropped the
+ * copy_object's lock, we
+ * check whether we'll have
+ * to deallocate the hard way.
+ */
+ if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
+ vm_object_unlock(copy_object);
+ vm_object_deallocate(copy_object);
+ vm_object_lock(object);
+
+ continue;
+ }
+ /*
+ * Pick back up the old object's
+ * lock. [It is safe to do so,
+ * since it must be deeper in the
+ * object tree.]
+ */
+ vm_object_lock(object);
+ }
+ /*
+ * Because we're pushing a page upward
+ * in the object tree, we must restart
+ * any faults that are waiting here.
+ * [Note that this is an expansion of
+ * PAGE_WAKEUP that uses the THREAD_RESTART
+ * wait result]. Can't turn off the page's
+ * busy bit because we're not done with it.
+ */
+ if (m->wanted) {
+ m->wanted = FALSE;
+ thread_wakeup_with_result((event_t) m, THREAD_RESTART);
+ }
+ }
+ /*
+ * The reference count on copy_object must be
+ * at least 2: one for our extra reference,
+ * and at least one from the outside world
+ * (we checked that when we last locked
+ * copy_object).
+ */
+ vm_object_lock_assert_exclusive(copy_object);
+ copy_object->ref_count--;
+ assert(copy_object->ref_count > 0);
+
+ VM_OBJ_RES_DECR(copy_object);
+ vm_object_unlock(copy_object);
+
+ break;
+ }
+
+done:
+ *result_page = m;
+ *top_page = first_m;
+
+ XPR(XPR_VM_FAULT,
+ "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n",
+ object, offset, m, first_m, 0);
+
+ if (m != VM_PAGE_NULL) {
+ retval = VM_FAULT_SUCCESS;
+ if (my_fault == DBG_PAGEIN_FAULT) {
+
+ VM_STAT_INCR(pageins);
+ DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
+ current_task()->pageins++;
+
+ if (m->object->internal) {
+ DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL);
+ my_fault = DBG_PAGEIND_FAULT;
+ } else {
+ DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL);
+ my_fault = DBG_PAGEINV_FAULT;
+ }
+
+ /*
+ * evaluate access pattern and update state
+ * vm_fault_deactivate_behind depends on the
+ * state being up to date
+ */
+ vm_fault_is_sequential(object, offset, fault_info->behavior);
+
+ vm_fault_deactivate_behind(object, offset, fault_info->behavior);
+ }
+ if (type_of_fault)
+ *type_of_fault = my_fault;
+ } else {
+ retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
+ assert(first_m == VM_PAGE_NULL);
+ assert(object == first_object);
+ }
+
+ thread_interrupt_level(interruptible_state);
+
+#if TRACEFAULTPAGE
+ dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
+#endif
+ return retval;
+
+backoff:
+ thread_interrupt_level(interruptible_state);
+
+ if (wait_result == THREAD_INTERRUPTED)
+ return (VM_FAULT_INTERRUPTED);
+ return (VM_FAULT_RETRY);
+
+#undef RELEASE_PAGE
+}
+
+
+
+/*
+ * CODE SIGNING:
+ * When soft faulting a page, we have to validate the page if:
+ * 1. the page is being mapped in user space
+ * 2. the page hasn't already been found to be "tainted"
+ * 3. the page belongs to a code-signed object
+ * 4. the page has not been validated yet or has been mapped for write.
+ */
+#define VM_FAULT_NEED_CS_VALIDATION(pmap, page) \
+ ((pmap) != kernel_pmap /*1*/ && \
+ !(page)->cs_tainted /*2*/ && \
+ (page)->object->code_signed /*3*/ && \
+ (!(page)->cs_validated || (page)->wpmapped /*4*/))
+
+
+/*
+ * page queue lock must NOT be held
+ * m->object must be locked
+ *
+ * NOTE: m->object could be locked "shared" only if we are called
+ * from vm_fault() as part of a soft fault. If so, we must be
+ * careful not to modify the VM object in any way that is not
+ * legal under a shared lock...
+ */
+unsigned long cs_enter_tainted_rejected = 0;
+unsigned long cs_enter_tainted_accepted = 0;
+kern_return_t
+vm_fault_enter(vm_page_t m,
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ vm_prot_t prot,
+ vm_prot_t fault_type,
+ boolean_t wired,
+ boolean_t change_wiring,
+ boolean_t no_cache,
+ boolean_t cs_bypass,
+ int *type_of_fault)
+{
+ kern_return_t kr, pe_result;
+ boolean_t previously_pmapped = m->pmapped;
+ boolean_t must_disconnect = 0;
+ boolean_t map_is_switched, map_is_switch_protected;
+
+ vm_object_lock_assert_held(m->object);
+#if DEBUG
+ lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+#endif /* DEBUG */
+
+ if (m->phys_page == vm_page_guard_addr) {
+ assert(m->fictitious);
+ return KERN_SUCCESS;
+ }
+
+ if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
+
+ vm_object_lock_assert_exclusive(m->object);
+
+ } else if ((fault_type & VM_PROT_WRITE) == 0) {
+ /*
+ * This is not a "write" fault, so we
+ * might not have taken the object lock
+ * exclusively and we might not be able
+ * to update the "wpmapped" bit in
+ * vm_fault_enter().
+ * Let's just grant read access to
+ * the page for now and we'll
+ * soft-fault again if we need write
+ * access later...
+ */
+ prot &= ~VM_PROT_WRITE;
+ }
+ if (m->pmapped == FALSE) {
+
+ if ((*type_of_fault == DBG_CACHE_HIT_FAULT) && m->clustered) {
+ /*
+ * found it in the cache, but this
+ * is the first fault-in of the page (m->pmapped == FALSE)
+ * so it must have come in as part of
+ * a cluster... account 1 pagein against it