+ printf("CODE SIGNING: process %d[%s]: "
+ "rejecting invalid page at address 0x%llx "
+ "from offset 0x%llx in file \"%s%s%s\" "
+ "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+ "(signed:%d validated:%d tainted:%d nx:%d "
+ "wpmapped:%d slid:%d dirty:%d depth:%d)\n",
+ pid, procname, (addr64_t) vaddr,
+ file_offset,
+ (pathname ? pathname : "<nil>"),
+ (truncated_path ? "/.../" : ""),
+ (truncated_path ? filename : ""),
+ cs_mtime.tv_sec, cs_mtime.tv_nsec,
+ ((cs_mtime.tv_sec == mtime.tv_sec &&
+ cs_mtime.tv_nsec == mtime.tv_nsec)
+ ? "=="
+ : "!="),
+ mtime.tv_sec, mtime.tv_nsec,
+ object->code_signed,
+ m->cs_validated,
+ m->cs_tainted,
+ m->cs_nx,
+ m->wpmapped,
+ m->slid,
+ m->dirty,
+ shadow_depth);
+
+ /*
+ * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
+ * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
+ * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
+ * will deal with the segmentation fault.
+ */
+ if (cs_killed) {
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
+ pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0);
+
+ codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
+ if (codesigning_exit_reason == NULL) {
+ printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
+ } else {
+ mach_vm_address_t data_addr = 0;
+ struct codesigning_exit_reason_info *ceri = NULL;
+ uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
+
+ if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
+ printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
+ } else {
+ if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
+ EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
+ ceri = (struct codesigning_exit_reason_info *)data_addr;
+ static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
+
+ ceri->ceri_virt_addr = vaddr;
+ ceri->ceri_file_offset = file_offset;
+ if (pathname)
+ strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
+ else
+ ceri->ceri_pathname[0] = '\0';
+ if (filename)
+ strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
+ else
+ ceri->ceri_filename[0] = '\0';
+ ceri->ceri_path_truncated = (truncated_path);
+ ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
+ ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
+ ceri->ceri_page_modtime_secs = mtime.tv_sec;
+ ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
+ ceri->ceri_object_codesigned = (object->code_signed);
+ ceri->ceri_page_codesig_validated = (m->cs_validated);
+ ceri->ceri_page_codesig_tainted = (m->cs_tainted);
+ ceri->ceri_page_codesig_nx = (m->cs_nx);
+ ceri->ceri_page_wpmapped = (m->wpmapped);
+ ceri->ceri_page_slid = (m->slid);
+ ceri->ceri_page_dirty = (m->dirty);
+ ceri->ceri_page_shadow_depth = shadow_depth;
+ } else {
+#if DEBUG || DEVELOPMENT
+ panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
+#else
+ printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
+#endif /* DEBUG || DEVELOPMENT */
+ /* Free the buffer */
+ os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
+ }
+ }
+ }
+
+ set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
+ }
+ if (panic_on_cs_killed &&
+ object->object_slid) {
+ panic("CODE SIGNING: process %d[%s]: "
+ "rejecting invalid page at address 0x%llx "
+ "from offset 0x%llx in file \"%s%s%s\" "
+ "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
+ "(signed:%d validated:%d tainted:%d nx:%d"
+ "wpmapped:%d slid:%d dirty:%d depth:%d)\n",
+ pid, procname, (addr64_t) vaddr,
+ file_offset,
+ (pathname ? pathname : "<nil>"),
+ (truncated_path ? "/.../" : ""),
+ (truncated_path ? filename : ""),
+ cs_mtime.tv_sec, cs_mtime.tv_nsec,
+ ((cs_mtime.tv_sec == mtime.tv_sec &&
+ cs_mtime.tv_nsec == mtime.tv_nsec)
+ ? "=="
+ : "!="),
+ mtime.tv_sec, mtime.tv_nsec,
+ object->code_signed,
+ m->cs_validated,
+ m->cs_tainted,
+ m->cs_nx,
+ m->wpmapped,
+ m->slid,
+ m->dirty,
+ shadow_depth);
+ }
+
+ if (file_object != object) {
+ vm_object_unlock(file_object);
+ }
+ if (pathname_len != 0) {
+ kfree(pathname, __PATH_MAX * 2);
+ pathname = NULL;
+ filename = NULL;
+ }
+ } else {
+ /* proceed with the invalid page */
+ kr = KERN_SUCCESS;
+ if (!m->cs_validated &&
+ !object->code_signed) {
+ /*
+ * This page has not been (fully) validated but
+ * does not belong to a code-signed object
+ * so it should not be forcefully considered
+ * as tainted.
+ * We're just concerned about it here because
+ * we've been asked to "execute" it but that
+ * does not mean that it should cause other
+ * accesses to fail.
+ * This happens when a debugger sets a
+ * breakpoint and we then execute code in
+ * that page. Marking the page as "tainted"
+ * would cause any inspection tool ("leaks",
+ * "vmmap", "CrashReporter", ...) to get killed
+ * due to code-signing violation on that page,
+ * even though they're just reading it and not
+ * executing from it.
+ */
+ } else {
+ /*
+ * Page might have been tainted before or not;
+ * now it definitively is. If the page wasn't
+ * tainted, we must disconnect it from all
+ * pmaps later, to force existing mappings
+ * through that code path for re-consideration
+ * of the validity of that page.
+ */
+ must_disconnect = !m->cs_tainted;
+ m->cs_tainted = TRUE;
+ }
+ cs_enter_tainted_accepted++;
+ }
+ if (kr != KERN_SUCCESS) {
+ if (cs_debug) {
+ printf("CODESIGNING: vm_fault_enter(0x%llx): "
+ "*** INVALID PAGE ***\n",
+ (long long)vaddr);
+ }
+#if !SECURE_KERNEL
+ if (cs_enforcement_panic) {
+ panic("CODESIGNING: panicking on invalid page\n");
+ }
+#endif
+ }
+
+ } else {
+ /* proceed with the valid page */
+ kr = KERN_SUCCESS;
+ }
+
+ boolean_t page_queues_locked = FALSE;
+#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
+MACRO_BEGIN \
+ if (! page_queues_locked) { \
+ page_queues_locked = TRUE; \
+ vm_page_lockspin_queues(); \
+ } \
+MACRO_END
+#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
+MACRO_BEGIN \
+ if (page_queues_locked) { \
+ page_queues_locked = FALSE; \
+ vm_page_unlock_queues(); \
+ } \
+MACRO_END
+
+ /*
+ * Hold queues lock to manipulate
+ * the page queues. Change wiring
+ * case is obvious.
+ */
+ assert((m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
+
+#if CONFIG_BACKGROUND_QUEUE
+ vm_page_update_background_state(m);
+#endif
+ if (m->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
+ /*
+ * Compressor pages are neither wired
+ * nor pageable and should never change.
+ */
+ assert(object == compressor_object);
+ } else if (change_wiring) {
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+ if (wired) {
+ if (kr == KERN_SUCCESS) {
+ vm_page_wire(m, wire_tag, TRUE);
+ }
+ } else {
+ vm_page_unwire(m, TRUE);
+ }
+ /* we keep the page queues lock, if we need it later */
+
+ } else {
+ if (object->internal == TRUE) {
+ /*
+ * don't allow anonymous pages on
+ * the speculative queues
+ */
+ no_cache = FALSE;
+ }
+ if (kr != KERN_SUCCESS) {
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+ vm_page_deactivate(m);
+ /* we keep the page queues lock, if we need it later */
+ } else if (((m->vm_page_q_state == VM_PAGE_NOT_ON_Q) ||
+ (m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
+ (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
+ ((m->vm_page_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
+ !VM_PAGE_WIRED(m)) {
+
+ if (vm_page_local_q &&
+ (*type_of_fault == DBG_COW_FAULT ||
+ *type_of_fault == DBG_ZERO_FILL_FAULT) ) {
+ struct vpl *lq;
+ uint32_t lid;
+
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+ vm_object_lock_assert_exclusive(object);
+
+ /*
+ * we got a local queue to stuff this
+ * new page on...
+ * its safe to manipulate local and
+ * local_id at this point since we're
+ * behind an exclusive object lock and
+ * the page is not on any global queue.
+ *
+ * we'll use the current cpu number to
+ * select the queue note that we don't
+ * need to disable preemption... we're
+ * going to be behind the local queue's
+ * lock to do the real work
+ */
+ lid = cpu_number();
+
+ lq = &vm_page_local_q[lid].vpl_un.vpl;
+
+ VPL_LOCK(&lq->vpl_lock);
+
+ vm_page_check_pageable_safe(m);
+ vm_page_queue_enter(&lq->vpl_queue, m,
+ vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
+ m->local_id = lid;
+ lq->vpl_count++;
+
+ if (object->internal)
+ lq->vpl_internal_count++;
+ else
+ lq->vpl_external_count++;
+
+ VPL_UNLOCK(&lq->vpl_lock);
+
+ if (lq->vpl_count > vm_page_local_q_soft_limit)
+ {
+ /*
+ * we're beyond the soft limit
+ * for the local queue
+ * vm_page_reactivate_local will
+ * 'try' to take the global page
+ * queue lock... if it can't
+ * that's ok... we'll let the
+ * queue continue to grow up
+ * to the hard limit... at that
+ * point we'll wait for the
+ * lock... once we've got the
+ * lock, we'll transfer all of
+ * the pages from the local
+ * queue to the global active
+ * queue
+ */
+ vm_page_reactivate_local(lid, FALSE, FALSE);
+ }
+ } else {
+
+ __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
+
+ /*
+ * test again now that we hold the
+ * page queue lock
+ */
+ if (!VM_PAGE_WIRED(m)) {
+ if (m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ vm_page_queues_remove(m, FALSE);
+
+ vm_pageout_cleaned_reactivated++;
+ vm_pageout_cleaned_fault_reactivated++;
+ }
+
+ if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
+ no_cache) {
+ /*
+ * If this is a no_cache mapping
+ * and the page has never been
+ * mapped before or was
+ * previously a no_cache page,
+ * then we want to leave pages
+ * in the speculative state so
+ * that they can be readily
+ * recycled if free memory runs
+ * low. Otherwise the page is
+ * activated as normal.
+ */
+
+ if (no_cache &&
+ (!previously_pmapped ||
+ m->no_cache)) {
+ m->no_cache = TRUE;
+
+ if (m->vm_page_q_state != VM_PAGE_ON_SPECULATIVE_Q)
+ vm_page_speculate(m, FALSE);
+
+ } else if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
+ vm_page_activate(m);
+ }
+ }
+ }
+ /* we keep the page queues lock, if we need it later */
+ }
+ }
+ }
+ /* we're done with the page queues lock, if we ever took it */
+ __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
+
+
+ /* If we have a KERN_SUCCESS from the previous checks, we either have
+ * a good page, or a tainted page that has been accepted by the process.
+ * In both cases the page will be entered into the pmap.
+ * If the page is writeable, we need to disconnect it from other pmaps
+ * now so those processes can take note.
+ */
+ if (kr == KERN_SUCCESS) {
+ /*
+ * NOTE: we may only hold the vm_object lock SHARED
+ * at this point, so we need the phys_page lock to
+ * properly serialize updating the pmapped and
+ * xpmapped bits
+ */
+ if ((prot & VM_PROT_EXECUTE) && !m->xpmapped) {
+ ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+ pmap_lock_phys_page(phys_page);
+ /*
+ * go ahead and take the opportunity
+ * to set 'pmapped' here so that we don't
+ * need to grab this lock a 2nd time
+ * just below
+ */
+ m->pmapped = TRUE;
+
+ if (!m->xpmapped) {
+
+ m->xpmapped = TRUE;
+
+ pmap_unlock_phys_page(phys_page);
+
+ if (!object->internal)
+ OSAddAtomic(1, &vm_page_xpmapped_external_count);
+
+#if defined(__arm__) || defined(__arm64__)
+ pmap_sync_page_data_phys(phys_page);
+#else
+ if (object->internal &&
+ object->pager != NULL) {
+ /*
+ * This page could have been
+ * uncompressed by the
+ * compressor pager and its
+ * contents might be only in
+ * the data cache.
+ * Since it's being mapped for
+ * "execute" for the fist time,
+ * make sure the icache is in
+ * sync.
+ */
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+ pmap_sync_page_data_phys(phys_page);
+ }
+#endif
+ } else
+ pmap_unlock_phys_page(phys_page);
+ } else {
+ if (m->pmapped == FALSE) {
+ ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
+
+ pmap_lock_phys_page(phys_page);
+ m->pmapped = TRUE;
+ pmap_unlock_phys_page(phys_page);
+ }
+ }
+ if (vm_page_is_slideable(m)) {
+ boolean_t was_busy = m->busy;
+
+ vm_object_lock_assert_exclusive(object);
+
+ m->busy = TRUE;
+ kr = vm_page_slide(m, 0);
+ assert(m->busy);
+ if(!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ if (kr != KERN_SUCCESS) {
+ /*
+ * This page has not been slid correctly,
+ * do not do the pmap_enter() !
+ * Let vm_fault_enter() return the error
+ * so the caller can fail the fault.
+ */
+ goto after_the_pmap_enter;
+ }
+ }
+
+ if (fault_type & VM_PROT_WRITE) {
+
+ if (m->wpmapped == FALSE) {
+ vm_object_lock_assert_exclusive(object);
+ if (!object->internal && object->pager) {
+ task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
+ }
+ m->wpmapped = TRUE;
+ }
+ if (must_disconnect) {
+ /*
+ * We can only get here
+ * because of the CSE logic
+ */
+ assert(cs_enforcement_enabled);
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ /*
+ * If we are faulting for a write, we can clear
+ * the execute bit - that will ensure the page is
+ * checked again before being executable, which
+ * protects against a map switch.
+ * This only happens the first time the page
+ * gets tainted, so we won't get stuck here
+ * to make an already writeable page executable.
+ */
+ if (!cs_bypass){
+ assert(!pmap_has_prot_policy(prot));
+ prot &= ~VM_PROT_EXECUTE;
+ }
+ }
+ }
+ assert(VM_PAGE_OBJECT(m) == object);
+
+ /* Prevent a deadlock by not
+ * holding the object lock if we need to wait for a page in
+ * pmap_enter() - <rdar://problem/7138958> */
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0,
+ wired,
+ pmap_options | PMAP_OPTIONS_NOWAIT,
+ pe_result);
+#if __x86_64__
+ if (pe_result == KERN_INVALID_ARGUMENT &&
+ pmap == PMAP_NULL &&
+ wired) {
+ /*
+ * Wiring a page in a pmap-less VM map:
+ * VMware's "vmmon" kernel extension does this
+ * to grab pages.
+ * Let it proceed even though the PMAP_ENTER() failed.
+ */
+ pe_result = KERN_SUCCESS;
+ }
+#endif /* __x86_64__ */
+
+ if(pe_result == KERN_RESOURCE_SHORTAGE) {
+
+ if (need_retry) {
+ /*
+ * this will be non-null in the case where we hold the lock
+ * on the top-object in this chain... we can't just drop
+ * the lock on the object we're inserting the page into
+ * and recall the PMAP_ENTER since we can still cause
+ * a deadlock if one of the critical paths tries to
+ * acquire the lock on the top-object and we're blocked
+ * in PMAP_ENTER waiting for memory... our only recourse
+ * is to deal with it at a higher level where we can
+ * drop both locks.
+ */
+ *need_retry = TRUE;
+ vm_pmap_enter_retried++;
+ goto after_the_pmap_enter;
+ }
+ /* The nonblocking version of pmap_enter did not succeed.
+ * and we don't need to drop other locks and retry
+ * at the level above us, so
+ * use the blocking version instead. Requires marking
+ * the page busy and unlocking the object */
+ boolean_t was_busy = m->busy;
+
+ vm_object_lock_assert_exclusive(object);
+
+ m->busy = TRUE;
+ vm_object_unlock(object);
+
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type,
+ 0, wired,
+ pmap_options, pe_result);
+
+ assert(VM_PAGE_OBJECT(m) == object);
+
+ /* Take the object lock again. */