]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/trap.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
index 0cedaa19dd6d9268b06abb3a646c1142662d0caa..ad07c88874d3da1fca1c985719f2fd2dd1e34f7a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -116,7 +116,7 @@ extern void kprint_state(x86_saved_state64_t *saved_state);
  * Forward declarations
  */
 static void user_page_fault_continue(kern_return_t kret);
-static void panic_trap(x86_saved_state64_t *saved_state);
+static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result);
 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
 
 volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
@@ -131,6 +131,7 @@ extern boolean_t dtrace_tally_fault(user_addr_t);
 extern boolean_t pmap_smep_enabled;
 extern boolean_t pmap_smap_enabled;
 
+__attribute__((noreturn))
 void
 thread_syscall_return(
         kern_return_t ret)
@@ -191,6 +192,13 @@ thread_syscall_return(
                                ret);
 #endif
        }
+
+#if DEBUG || DEVELOPMENT
+       kern_allocation_name_t
+       prior __assert_only = thread_get_kernel_state(thr_act)->allocation_name;
+       assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
+#endif /* DEBUG || DEVELOPMENT */
+
        throttle_lowpri_io(1);
 
        thread_exception_return();
@@ -348,39 +356,24 @@ interrupt(x86_saved_state_t *state)
        int             ipl;
        int             cnum = cpu_number();
        cpu_data_t      *cdp = cpu_data_ptr[cnum];
-       int             itype = 0;
+       int             itype = DBG_INTR_TYPE_UNKNOWN;
 
-       if (is_saved_state64(state) == TRUE) {
-               x86_saved_state64_t     *state64;
-
-               state64 = saved_state64(state);
-               rip = state64->isf.rip;
-               rsp = state64->isf.rsp;
-               interrupt_num = state64->isf.trapno;
-#ifdef __x86_64__
-               if(state64->isf.cs & 0x03)
-#endif
-                       user_mode = TRUE;
-       } else {
-               x86_saved_state32_t     *state32;
-
-               state32 = saved_state32(state);
-               if (state32->cs & 0x03)
-                       user_mode = TRUE;
-               rip = state32->eip;
-               rsp = state32->uesp;
-               interrupt_num = state32->trapno;
-       }
+        x86_saved_state64_t    *state64 = saved_state64(state);
+       rip = state64->isf.rip;
+       rsp = state64->isf.rsp;
+       interrupt_num = state64->isf.trapno;
+       if(state64->isf.cs & 0x03)
+               user_mode = TRUE;
 
        if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage)
                cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
 
        if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT))
-               itype = 1;
+               itype = DBG_INTR_TYPE_IPI;
        else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT))
-               itype = 2;
+               itype = DBG_INTR_TYPE_TIMER;
        else
-               itype = 3;
+               itype = DBG_INTR_TYPE_OTHER;
 
        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
                MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
@@ -391,12 +384,7 @@ interrupt(x86_saved_state_t *state)
        SCHED_STATS_INTERRUPT(current_processor());
 
 #if CONFIG_TELEMETRY
-       if (telemetry_needs_record
-               && (current_task() != kernel_task)
-#if CONFIG_SCHED_IDLE_IN_PLACE
-               && ((current_thread()->state & TH_IDLE) == 0) /* idle-in-place should be treated like the idle thread */
-#endif
-               ) {
+       if (telemetry_needs_record) {
                telemetry_mark_curthread(user_mode);
        }
 #endif
@@ -458,7 +446,7 @@ interrupt(x86_saved_state_t *state)
         */
        if (!user_mode) {
                uint64_t depth = cdp->cpu_kernel_stack
-                                + sizeof(struct x86_kernel_state)
+                                + sizeof(struct thread_kernel_state)
                                 + sizeof(struct i386_exception_link *)
                                 - rsp;
                if (__improbable(depth > kernel_stack_depth_max)) {
@@ -476,6 +464,7 @@ interrupt(x86_saved_state_t *state)
                MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
                interrupt_num, 0, 0, 0, 0);
 
+       assert(ml_get_interrupts_enabled() == FALSE);
 }
 
 static inline void
@@ -507,6 +496,7 @@ kernel_trap(
        int                     type;
        vm_map_t                map = 0;        /* protected by T_PAGE_FAULT */
        kern_return_t           result = KERN_FAILURE;
+       kern_return_t           fault_result = KERN_SUCCESS;
        thread_t                thread;
        ast_t                   *myast;
        boolean_t               intr;
@@ -516,8 +506,9 @@ kernel_trap(
 #if NCOPY_WINDOWS > 0
        int                     fault_in_copy_window = -1;
 #endif
-       int                     is_user = 0;
-       
+       int                     is_user;
+       int                     trap_pl = get_preemption_level();
+
        thread = current_thread();
 
        if (__improbable(is_saved_state32(state)))
@@ -535,6 +526,8 @@ kernel_trap(
 
        myast = ast_pending();
 
+       is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
+
        perfASTCallback astfn = perfASTHook;
        if (__improbable(astfn != NULL)) {
                if (*myast & AST_CHUD_ALL)
@@ -563,14 +556,21 @@ kernel_trap(
         * as soon we possibly can to hold latency down
         */
        if (__improbable(T_PREEMPT == type)) {
-               ast_taken(AST_PREEMPTION, FALSE);
+               ast_taken_kernel();
 
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
                        (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
                        0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
                return;
        }
-       
+
+       user_addr_t     kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+               (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
+               (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
+               VM_KERNEL_UNSLIDE(kern_ip), 0);
+
+
        if (T_PAGE_FAULT == type) {
                /*
                 * assume we're faulting in the kernel map
@@ -605,13 +605,11 @@ kernel_trap(
                                        map = thread->map;
                                        fault_in_copy_window = window_index;
                                }
-                               is_user = -1;
                        }
 #else
                        if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) {
                                /* fault occurred in userspace */
                                map = thread->map;
-                               is_user = -1;
 
                                /* Intercept a potential Supervisor Mode Execute
                                 * Protection fault. These criteria identify
@@ -620,7 +618,19 @@ kernel_trap(
                                 * (The VM could just redrive a SMEP fault, hence
                                 * the intercept).
                                 */
-                               if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
+                               if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) &&
+                                       (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
+                                       goto debugger_entry;
+                               }
+
+                               /*
+                                * Additionally check for SMAP faults...
+                                * which are characterized by page-present and
+                                * the AC bit unset (i.e. not from copyin/out path).
+                                */
+                               if (__improbable(code & T_PF_PROT &&
+                                                pmap_smap_enabled &&
+                                                (saved_state->isf.rflags & EFL_AC) == 0)) {
                                        goto debugger_entry;
                                }
 
@@ -636,17 +646,14 @@ kernel_trap(
                                        set_cr3_raw(map->pmap->pm_cr3);
                                        return;
                                }
-
+                               if (__improbable(vaddr < PAGE_SIZE) &&
+                                   ((thread->machine.specFlags & CopyIOActive) == 0)) {
+                                       goto debugger_entry;
+                               }
                        }
 #endif
                }
        }
-       user_addr_t     kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);  
-       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
-               (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
-               (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
-               VM_KERNEL_UNSLIDE(kern_ip), 0);
-
 
        (void) ml_set_interrupts_enabled(intr);
 
@@ -705,11 +712,10 @@ kernel_trap(
                if (code & T_PF_EXECUTE)
                        prot |= VM_PROT_EXECUTE;
 
-               result = vm_fault(map,
-                                 vm_map_trunc_page(vaddr,
-                                                   PAGE_MASK),
+               fault_result = result = vm_fault(map,
+                                 vaddr,
                                  prot,
-                                 FALSE, 
+                                 FALSE, VM_KERN_MEMORY_NONE,
                                  THREAD_UNINT, NULL, 0);
 
                if (result == KERN_SUCCESS) {
@@ -773,14 +779,12 @@ debugger_entry:
                 */
                sync_iss_to_iks(state);
 #if  MACH_KDP
-               if (current_debugger != KDB_CUR_DB) {
-                       if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr))
-                               return;
-               }
+               if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr))
+                       return;
 #endif
        }
        pal_cli();
-       panic_trap(saved_state);
+       panic_trap(saved_state, trap_pl, fault_result);
        /*
         * NO RETURN
         */
@@ -793,15 +797,13 @@ set_recovery_ip(x86_saved_state64_t  *saved_state, vm_offset_t ip)
         saved_state->isf.rip = ip;
 }
 
-
-
-
 static void
-panic_trap(x86_saved_state64_t *regs)
+panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result)
 {
        const char      *trapname = "Unknown";
        pal_cr_t        cr0, cr2, cr3, cr4;
        boolean_t       potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE;
+       boolean_t       potential_smap_fault = FALSE;
 
        pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 );
        assert(ml_get_interrupts_enabled() == FALSE);
@@ -826,6 +828,12 @@ panic_trap(x86_saved_state64_t *regs)
                } else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
                        potential_kernel_NX_fault = TRUE;
                }
+       } else if (pmap_smap_enabled &&
+                  regs->isf.trapno == T_PAGE_FAULT &&
+                  regs->isf.err & T_PF_PROT &&
+                  regs->cr2 < VM_MAX_USER_PAGE_ADDRESS &&
+                  regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
+               potential_smap_fault = TRUE;
        }
 
 #undef panic
@@ -836,7 +844,7 @@ panic_trap(x86_saved_state64_t *regs)
              "R8:  0x%016llx, R9:  0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
              "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
              "RFL: 0x%016llx, RIP: 0x%016llx, CS:  0x%016llx, SS:  0x%016llx\n"
-             "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s\n",
+             "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
              regs->isf.rip, regs->isf.trapno, trapname,
              cr0, cr2, cr3, cr4,
              regs->rax, regs->rbx, regs->rcx, regs->rdx,
@@ -848,7 +856,9 @@ panic_trap(x86_saved_state64_t *regs)
              virtualized ? " VMM" : "",
              potential_kernel_NX_fault ? " Kernel NX fault" : "",
              potential_smep_fault ? " SMEP/User NX fault" : "",
-             "");
+             potential_smap_fault ? " SMAP fault" : "",
+             pl,
+             fault_result);
        /*
         * This next statement is not executed,
         * but it's needed to stop the compiler using tail call optimization
@@ -1016,6 +1026,9 @@ user_trap(
                break;
 
            case T_INVALID_OPCODE:
+#if !defined(RC_HIDE_XNU_J137)
+               fpUDflt(rip);   /* May return from exception directly */
+#endif
                exc = EXC_BAD_INSTRUCTION;
                code = EXC_I386_INVOP;
                break;
@@ -1075,9 +1088,8 @@ user_trap(
                if (__improbable(err & T_PF_EXECUTE))
                        prot |= VM_PROT_EXECUTE;
                kret = vm_fault(thread->map,
-                               vm_map_trunc_page(vaddr,
-                                                 PAGE_MASK),
-                               prot, FALSE,
+                               vaddr,
+                               prot, FALSE, VM_KERN_MEMORY_NONE,
                                THREAD_ABORTSAFE, NULL, 0);
 
                if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
@@ -1122,29 +1134,6 @@ user_trap(
        /* NOTREACHED */
 }
 
-
-/*
- * Handle AST traps for i386.
- */
-
-extern void     log_thread_action (thread_t, char *);
-
-void
-i386_astintr(int preemption)
-{
-       ast_t           mask = AST_ALL;
-       spl_t           s;
-
-       if (preemption)
-               mask = AST_PREEMPTION;
-
-       s = splsched();
-
-       ast_taken(mask, s);
-
-       splx(s);
-}
-
 /*
  * Handle exceptions for i386.
  *
@@ -1182,7 +1171,7 @@ i386_exception(
 void
 sync_iss_to_iks(x86_saved_state_t *saved_state)
 {
-       struct x86_kernel_state *iks;
+       struct x86_kernel_state *iks = NULL;
        vm_offset_t kstack;
        boolean_t record_active_regs = FALSE;
 
@@ -1190,7 +1179,8 @@ sync_iss_to_iks(x86_saved_state_t *saved_state)
        if (saved_state && saved_state->flavor == THREAD_STATE_NONE)
                pal_get_kern_regs( saved_state );
 
-       if ((kstack = current_thread()->kernel_stack) != 0) {
+       if (current_thread() != NULL && 
+           (kstack = current_thread()->kernel_stack) != 0) {
                x86_saved_state64_t     *regs = saved_state64(saved_state);
 
                iks = STACK_IKS(kstack);
@@ -1253,3 +1243,27 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) {
                __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
        }
 }
+
+#if DEBUG
+extern void    thread_exception_return_internal(void) __dead2;
+
+void thread_exception_return(void) {
+       thread_t thread = current_thread();
+       ml_set_interrupts_enabled(FALSE);
+       if (thread_is_64bit(thread) != task_has_64BitAddr(thread->task)) {
+               panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit(thread),  task_has_64BitAddr(thread->task));
+       }
+
+       if (thread_is_64bit(thread)) {
+               if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
+                       panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
+               }
+       } else {
+                       if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
+                               panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
+
+               }
+       }
+       thread_exception_return_internal();
+}
+#endif