+ if (kr != KERN_SUCCESS) {
+ if (cs_debug) {
+ printf("CODESIGNING: vm_fault_enter(0x%llx): "
+ "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
+ (long long)vaddr, m, m->object, m->offset);
+ }
+#if !SECURE_KERNEL
+ if (cs_enforcement_panic) {
+ panic("CODESIGNING: panicking on invalid page\n");
+ }
+#endif
+ }
+
+ } else {
+ /* proceed with the valid page */
+ kr = KERN_SUCCESS;
+ }
+
+ boolean_t page_queues_locked = FALSE;
+#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
+MACRO_BEGIN \
+ if (! page_queues_locked) { \
+ page_queues_locked = TRUE; \
+ vm_page_lockspin_queues(); \
+ } \
+MACRO_END
+#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
+MACRO_BEGIN \
+ if (page_queues_locked) { \
+ page_queues_locked = FALSE; \
+ vm_page_unlock_queues(); \
+ } \
+MACRO_END
+
+ /*
+ * Hold queues lock to manipulate
+ * the page queues. Change wiring
+ * case is obvious.
+ */
+ assert(m->compressor || m->object != compressor_object);
+ if (m->compressor) {
+ /*
+ * Compressor pages are neither wired
+ * nor pageable and should never change.
+ */
+ assert(m->object == compressor_object);
+ } else if (change_wiring) {
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+ if (wired) {
+ if (kr == KERN_SUCCESS) {
+ vm_page_wire(m);
+ }
+ } else {
+ vm_page_unwire(m, TRUE);
+ }
+ /* we keep the page queues lock, if we need it later */
+
+ } else {
+ if (kr != KERN_SUCCESS) {
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+ vm_page_deactivate(m);
+ /* we keep the page queues lock, if we need it later */
+ } else if (((!m->active && !m->inactive) ||
+ m->clean_queue ||
+ no_cache) &&
+ !VM_PAGE_WIRED(m) && !m->throttled) {
+
+ if (vm_page_local_q &&
+ !no_cache &&
+ (*type_of_fault == DBG_COW_FAULT ||
+ *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
+ struct vpl *lq;
+ uint32_t lid;
+
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+ vm_object_lock_assert_exclusive(m->object);
+
+ /*
+ * we got a local queue to stuff this
+ * new page on...
+ * its safe to manipulate local and
+ * local_id at this point since we're
+ * behind an exclusive object lock and
+ * the page is not on any global queue.
+ *
+ * we'll use the current cpu number to
+ * select the queue note that we don't
+ * need to disable preemption... we're
+ * going to behind the local queue's
+ * lock to do the real work
+ */
+ lid = cpu_number();
+
+ lq = &vm_page_local_q[lid].vpl_un.vpl;
+
+ VPL_LOCK(&lq->vpl_lock);
+
+ queue_enter(&lq->vpl_queue, m,
+ vm_page_t, pageq);
+ m->local = TRUE;
+ m->local_id = lid;
+ lq->vpl_count++;
+
+ if (m->object->internal)
+ lq->vpl_internal_count++;
+ else
+ lq->vpl_external_count++;
+
+ VPL_UNLOCK(&lq->vpl_lock);
+
+ if (lq->vpl_count > vm_page_local_q_soft_limit)
+ {
+ /*
+ * we're beyond the soft limit
+ * for the local queue
+ * vm_page_reactivate_local will
+ * 'try' to take the global page
+ * queue lock... if it can't
+ * that's ok... we'll let the
+ * queue continue to grow up
+ * to the hard limit... at that
+ * point we'll wait for the
+ * lock... once we've got the
+ * lock, we'll transfer all of
+ * the pages from the local
+ * queue to the global active
+ * queue
+ */
+ vm_page_reactivate_local(lid, FALSE, FALSE);
+ }
+ } else {
+
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+ /*
+ * test again now that we hold the
+ * page queue lock
+ */
+ if (!VM_PAGE_WIRED(m)) {
+ if (m->clean_queue) {
+ VM_PAGE_QUEUES_REMOVE(m);
+
+ vm_pageout_cleaned_reactivated++;
+ vm_pageout_cleaned_fault_reactivated++;
+ }
+
+ if ((!m->active &&
+ !m->inactive) ||
+ no_cache) {
+ /*
+ * If this is a no_cache mapping
+ * and the page has never been
+ * mapped before or was
+ * previously a no_cache page,
+ * then we want to leave pages
+ * in the speculative state so
+ * that they can be readily
+ * recycled if free memory runs
+ * low. Otherwise the page is
+ * activated as normal.
+ */
+
+ if (no_cache &&
+ (!previously_pmapped ||
+ m->no_cache)) {
+ m->no_cache = TRUE;
+
+ if (!m->speculative)
+ vm_page_speculate(m, FALSE);
+
+ } else if (!m->active &&
+ !m->inactive) {
+
+ vm_page_activate(m);
+ }
+ }
+ }
+ /* we keep the page queues lock, if we need it later */
+ }
+ }
+ }
+
+ if ((prot & VM_PROT_EXECUTE) &&
+ ! m->xpmapped) {
+
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+ /*
+ * xpmapped is protected by the page queues lock
+ * so it matters not that we might only hold the
+ * object lock in the shared state
+ */
+
+ if (! m->xpmapped) {
+
+ m->xpmapped = TRUE;
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+ if ((COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) &&
+ m->object->internal &&
+ m->object->pager != NULL) {
+ /*
+ * This page could have been
+ * uncompressed by the
+ * compressor pager and its
+ * contents might be only in
+ * the data cache.
+ * Since it's being mapped for
+ * "execute" for the fist time,
+ * make sure the icache is in
+ * sync.
+ */
+ pmap_sync_page_data_phys(m->phys_page);
+ }
+