+ /* A page could be tainted, or pose a risk of being tainted later.
+ * Check whether the receiving process wants it, and make it feel
+ * the consequences (that hapens in cs_invalid_page()).
+ * For CS Enforcement, two other conditions will
+ * cause that page to be tainted as well:
+ * - pmapping an unsigned page executable - this means unsigned code;
+ * - writeable mapping of a validated page - the content of that page
+ * can be changed without the kernel noticing, therefore unsigned
+ * code can be created
+ */
+ if (m->cs_tainted ||
+ (( !cs_enforcement_disable && !cs_bypass ) &&
+ (/* The page is unsigned and wants to be executable */
+ (!m->cs_validated && (prot & VM_PROT_EXECUTE)) ||
+ /* The page should be immutable, but is in danger of being modified
+ * This is the case where we want policy from the code directory -
+ * is the page immutable or not? For now we have to assume that
+ * code pages will be immutable, data pages not.
+ * We'll assume a page is a code page if it has a code directory
+ * and we fault for execution.
+ * That is good enough since if we faulted the code page for
+ * writing in another map before, it is wpmapped; if we fault
+ * it for writing in this map later it will also be faulted for executing
+ * at the same time; and if we fault for writing in another map
+ * later, we will disconnect it from this pmap so we'll notice
+ * the change.
+ */
+ (page_immutable(m, prot) && ((prot & VM_PROT_WRITE) || m->wpmapped))
+ ))
+ )
+ {
+ /* We will have a tainted page. Have to handle the special case
+ * of a switched map now. If the map is not switched, standard
+ * procedure applies - call cs_invalid_page().
+ * If the map is switched, the real owner is invalid already.
+ * There is no point in invalidating the switching process since
+ * it will not be executing from the map. So we don't call
+ * cs_invalid_page() in that case. */
+ boolean_t reject_page;
+ if(map_is_switched) {
+ assert(pmap==vm_map_pmap(current_thread()->map));
+ assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
+ reject_page = FALSE;
+ } else {
+ reject_page = cs_invalid_page((addr64_t) vaddr);
+ }
+
+ if (reject_page) {
+ /* reject the tainted page: abort the page fault */
+ kr = KERN_CODESIGN_ERROR;
+ cs_enter_tainted_rejected++;
+ } else {
+ /* proceed with the tainted page */
+ kr = KERN_SUCCESS;
+ /* Page might have been tainted before or not; now it
+ * definitively is. If the page wasn't tainted, we must
+ * disconnect it from all pmaps later. */
+ must_disconnect = !m->cs_tainted;
+ m->cs_tainted = TRUE;
+ cs_enter_tainted_accepted++;
+ }
+ if (cs_debug || kr != KERN_SUCCESS) {
+ printf("CODESIGNING: vm_fault_enter(0x%llx): "
+ "page %p obj %p off 0x%llx *** INVALID PAGE ***\n",
+ (long long)vaddr, m, m->object, m->offset);
+ }
+
+ } else {
+ /* proceed with the valid page */
+ kr = KERN_SUCCESS;
+ }
+
+ /* If we have a KERN_SUCCESS from the previous checks, we either have
+ * a good page, or a tainted page that has been accepted by the process.
+ * In both cases the page will be entered into the pmap.
+ * If the page is writeable, we need to disconnect it from other pmaps
+ * now so those processes can take note.
+ */
+ if (kr == KERN_SUCCESS) {
+ /*
+ * NOTE: we may only hold the vm_object lock SHARED
+ * at this point, but the update of pmapped is ok
+ * since this is the ONLY bit updated behind the SHARED
+ * lock... however, we need to figure out how to do an atomic
+ * update on a bit field to make this less fragile... right
+ * now I don't know how to coerce 'C' to give me the offset info
+ * that's needed for an AtomicCompareAndSwap
+ */
+ m->pmapped = TRUE;
+ if(vm_page_is_slideable(m)) {
+ boolean_t was_busy = m->busy;
+ m->busy = TRUE;
+ kr = vm_page_slide(m, 0);
+ assert(m->busy);
+ if(!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ if (kr != KERN_SUCCESS) {
+ /*
+ * This page has not been slid correctly,
+ * do not do the pmap_enter() !
+ * Let vm_fault_enter() return the error
+ * so the caller can fail the fault.
+ */
+ goto after_the_pmap_enter;
+ }
+ }
+
+ if (fault_type & VM_PROT_WRITE) {
+
+ if (m->wpmapped == FALSE) {
+ vm_object_lock_assert_exclusive(m->object);
+
+ m->wpmapped = TRUE;
+ }
+ if (must_disconnect) {
+ /*
+ * We can only get here
+ * because of the CSE logic
+ */
+ assert(cs_enforcement_disable == FALSE);
+ pmap_disconnect(m->phys_page);
+ /*
+ * If we are faulting for a write, we can clear
+ * the execute bit - that will ensure the page is
+ * checked again before being executable, which
+ * protects against a map switch.
+ * This only happens the first time the page
+ * gets tainted, so we won't get stuck here
+ * to make an already writeable page executable.
+ */
+ if (!cs_bypass){
+ prot &= ~VM_PROT_EXECUTE;
+ }
+ }
+ }
+
+ /* Prevent a deadlock by not
+ * holding the object lock if we need to wait for a page in
+ * pmap_enter() - <rdar://problem/7138958> */
+ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, 0,
+ wired, PMAP_OPTIONS_NOWAIT, pe_result);
+
+ if(pe_result == KERN_RESOURCE_SHORTAGE) {
+ /* The nonblocking version of pmap_enter did not succeed.
+ * Use the blocking version instead. Requires marking
+ * the page busy and unlocking the object */
+ boolean_t was_busy = m->busy;
+ m->busy = TRUE;
+ vm_object_unlock(m->object);
+
+ PMAP_ENTER(pmap, vaddr, m, prot, 0, wired);
+
+ /* Take the object lock again. */
+ vm_object_lock(m->object);
+
+ /* If the page was busy, someone else will wake it up.
+ * Otherwise, we have to do it now. */
+ assert(m->busy);
+ if(!was_busy) {
+ PAGE_WAKEUP_DONE(m);
+ }
+ vm_pmap_enter_blocked++;
+ }
+ }