]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/trap.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
index 3a99e32a7de3cc328f70cf115a0a67db5e88cf05..7924f4f7dd5f0ce543e13640e96759c0de1fe40d 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -116,7 +116,7 @@ extern void kprint_state(x86_saved_state64_t *saved_state);
  * Forward declarations
  */
 static void user_page_fault_continue(kern_return_t kret);
  * Forward declarations
  */
 static void user_page_fault_continue(kern_return_t kret);
-static void panic_trap(x86_saved_state64_t *saved_state);
+static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result);
 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
 
 volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
 static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
 
 volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
@@ -131,6 +131,7 @@ extern boolean_t dtrace_tally_fault(user_addr_t);
 extern boolean_t pmap_smep_enabled;
 extern boolean_t pmap_smap_enabled;
 
 extern boolean_t pmap_smep_enabled;
 extern boolean_t pmap_smap_enabled;
 
+__attribute__((noreturn))
 void
 thread_syscall_return(
         kern_return_t ret)
 void
 thread_syscall_return(
         kern_return_t ret)
@@ -391,12 +392,7 @@ interrupt(x86_saved_state_t *state)
        SCHED_STATS_INTERRUPT(current_processor());
 
 #if CONFIG_TELEMETRY
        SCHED_STATS_INTERRUPT(current_processor());
 
 #if CONFIG_TELEMETRY
-       if (telemetry_needs_record
-               && (current_task() != kernel_task)
-#if CONFIG_SCHED_IDLE_IN_PLACE
-               && ((current_thread()->state & TH_IDLE) == 0) /* idle-in-place should be treated like the idle thread */
-#endif
-               ) {
+       if (telemetry_needs_record) {
                telemetry_mark_curthread(user_mode);
        }
 #endif
                telemetry_mark_curthread(user_mode);
        }
 #endif
@@ -476,6 +472,7 @@ interrupt(x86_saved_state_t *state)
                MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
                interrupt_num, 0, 0, 0, 0);
 
                MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
                interrupt_num, 0, 0, 0, 0);
 
+       assert(ml_get_interrupts_enabled() == FALSE);
 }
 
 static inline void
 }
 
 static inline void
@@ -507,6 +504,7 @@ kernel_trap(
        int                     type;
        vm_map_t                map = 0;        /* protected by T_PAGE_FAULT */
        kern_return_t           result = KERN_FAILURE;
        int                     type;
        vm_map_t                map = 0;        /* protected by T_PAGE_FAULT */
        kern_return_t           result = KERN_FAILURE;
+       kern_return_t           fault_result = KERN_SUCCESS;
        thread_t                thread;
        ast_t                   *myast;
        boolean_t               intr;
        thread_t                thread;
        ast_t                   *myast;
        boolean_t               intr;
@@ -516,8 +514,9 @@ kernel_trap(
 #if NCOPY_WINDOWS > 0
        int                     fault_in_copy_window = -1;
 #endif
 #if NCOPY_WINDOWS > 0
        int                     fault_in_copy_window = -1;
 #endif
-       int                     is_user = 0;
-       
+       int                     is_user;
+       int                     trap_pl = get_preemption_level();
+
        thread = current_thread();
 
        if (__improbable(is_saved_state32(state)))
        thread = current_thread();
 
        if (__improbable(is_saved_state32(state)))
@@ -535,6 +534,8 @@ kernel_trap(
 
        myast = ast_pending();
 
 
        myast = ast_pending();
 
+       is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
+
        perfASTCallback astfn = perfASTHook;
        if (__improbable(astfn != NULL)) {
                if (*myast & AST_CHUD_ALL)
        perfASTCallback astfn = perfASTHook;
        if (__improbable(astfn != NULL)) {
                if (*myast & AST_CHUD_ALL)
@@ -570,7 +571,14 @@ kernel_trap(
                        0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
                return;
        }
                        0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0);
                return;
        }
-       
+
+       user_addr_t     kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);
+       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+               (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
+               (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
+               VM_KERNEL_UNSLIDE(kern_ip), 0);
+
+
        if (T_PAGE_FAULT == type) {
                /*
                 * assume we're faulting in the kernel map
        if (T_PAGE_FAULT == type) {
                /*
                 * assume we're faulting in the kernel map
@@ -605,13 +613,11 @@ kernel_trap(
                                        map = thread->map;
                                        fault_in_copy_window = window_index;
                                }
                                        map = thread->map;
                                        fault_in_copy_window = window_index;
                                }
-                               is_user = -1;
                        }
 #else
                        if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) {
                                /* fault occurred in userspace */
                                map = thread->map;
                        }
 #else
                        if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) {
                                /* fault occurred in userspace */
                                map = thread->map;
-                               is_user = -1;
 
                                /* Intercept a potential Supervisor Mode Execute
                                 * Protection fault. These criteria identify
 
                                /* Intercept a potential Supervisor Mode Execute
                                 * Protection fault. These criteria identify
@@ -620,7 +626,8 @@ kernel_trap(
                                 * (The VM could just redrive a SMEP fault, hence
                                 * the intercept).
                                 */
                                 * (The VM could just redrive a SMEP fault, hence
                                 * the intercept).
                                 */
-                               if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
+                               if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) &&
+                                       (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) {
                                        goto debugger_entry;
                                }
 
                                        goto debugger_entry;
                                }
 
@@ -647,17 +654,14 @@ kernel_trap(
                                        set_cr3_raw(map->pmap->pm_cr3);
                                        return;
                                }
                                        set_cr3_raw(map->pmap->pm_cr3);
                                        return;
                                }
-
+                               if (__improbable(vaddr < PAGE_SIZE) &&
+                                   ((thread->machine.specFlags & CopyIOActive) == 0)) {
+                                       goto debugger_entry;
+                               }
                        }
 #endif
                }
        }
                        }
 #endif
                }
        }
-       user_addr_t     kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr);  
-       KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
-               (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
-               (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user,
-               VM_KERNEL_UNSLIDE(kern_ip), 0);
-
 
        (void) ml_set_interrupts_enabled(intr);
 
 
        (void) ml_set_interrupts_enabled(intr);
 
@@ -716,9 +720,8 @@ kernel_trap(
                if (code & T_PF_EXECUTE)
                        prot |= VM_PROT_EXECUTE;
 
                if (code & T_PF_EXECUTE)
                        prot |= VM_PROT_EXECUTE;
 
-               result = vm_fault(map,
-                                 vm_map_trunc_page(vaddr,
-                                                   PAGE_MASK),
+               fault_result = result = vm_fault(map,
+                                 vaddr,
                                  prot,
                                  FALSE, 
                                  THREAD_UNINT, NULL, 0);
                                  prot,
                                  FALSE, 
                                  THREAD_UNINT, NULL, 0);
@@ -784,14 +787,12 @@ debugger_entry:
                 */
                sync_iss_to_iks(state);
 #if  MACH_KDP
                 */
                sync_iss_to_iks(state);
 #if  MACH_KDP
-               if (current_debugger != KDB_CUR_DB) {
-                       if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr))
-                               return;
-               }
+               if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr))
+                       return;
 #endif
        }
        pal_cli();
 #endif
        }
        pal_cli();
-       panic_trap(saved_state);
+       panic_trap(saved_state, trap_pl, fault_result);
        /*
         * NO RETURN
         */
        /*
         * NO RETURN
         */
@@ -804,11 +805,8 @@ set_recovery_ip(x86_saved_state64_t  *saved_state, vm_offset_t ip)
         saved_state->isf.rip = ip;
 }
 
         saved_state->isf.rip = ip;
 }
 
-
-
-
 static void
 static void
-panic_trap(x86_saved_state64_t *regs)
+panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result)
 {
        const char      *trapname = "Unknown";
        pal_cr_t        cr0, cr2, cr3, cr4;
 {
        const char      *trapname = "Unknown";
        pal_cr_t        cr0, cr2, cr3, cr4;
@@ -854,7 +852,7 @@ panic_trap(x86_saved_state64_t *regs)
              "R8:  0x%016llx, R9:  0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
              "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
              "RFL: 0x%016llx, RIP: 0x%016llx, CS:  0x%016llx, SS:  0x%016llx\n"
              "R8:  0x%016llx, R9:  0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
              "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
              "RFL: 0x%016llx, RIP: 0x%016llx, CS:  0x%016llx, SS:  0x%016llx\n"
-             "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s\n",
+             "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
              regs->isf.rip, regs->isf.trapno, trapname,
              cr0, cr2, cr3, cr4,
              regs->rax, regs->rbx, regs->rcx, regs->rdx,
              regs->isf.rip, regs->isf.trapno, trapname,
              cr0, cr2, cr3, cr4,
              regs->rax, regs->rbx, regs->rcx, regs->rdx,
@@ -866,7 +864,9 @@ panic_trap(x86_saved_state64_t *regs)
              virtualized ? " VMM" : "",
              potential_kernel_NX_fault ? " Kernel NX fault" : "",
              potential_smep_fault ? " SMEP/User NX fault" : "",
              virtualized ? " VMM" : "",
              potential_kernel_NX_fault ? " Kernel NX fault" : "",
              potential_smep_fault ? " SMEP/User NX fault" : "",
-             potential_smap_fault ? " SMAP fault" : "");
+             potential_smap_fault ? " SMAP fault" : "",
+             pl,
+             fault_result);
        /*
         * This next statement is not executed,
         * but it's needed to stop the compiler using tail call optimization
        /*
         * This next statement is not executed,
         * but it's needed to stop the compiler using tail call optimization
@@ -1093,8 +1093,7 @@ user_trap(
                if (__improbable(err & T_PF_EXECUTE))
                        prot |= VM_PROT_EXECUTE;
                kret = vm_fault(thread->map,
                if (__improbable(err & T_PF_EXECUTE))
                        prot |= VM_PROT_EXECUTE;
                kret = vm_fault(thread->map,
-                               vm_map_trunc_page(vaddr,
-                                                 PAGE_MASK),
+                               vaddr,
                                prot, FALSE,
                                THREAD_ABORTSAFE, NULL, 0);
 
                                prot, FALSE,
                                THREAD_ABORTSAFE, NULL, 0);
 
@@ -1271,3 +1270,27 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) {
                __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
        }
 }
                __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
        }
 }
+
+#if DEBUG
+extern void    thread_exception_return_internal(void) __dead2;
+
+void thread_exception_return(void) {
+       thread_t thread = current_thread();
+       ml_set_interrupts_enabled(FALSE);
+       if (thread_is_64bit(thread) != task_has_64BitAddr(thread->task)) {
+               panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit(thread),  task_has_64BitAddr(thread->task));
+       }
+
+       if (thread_is_64bit(thread)) {
+               if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
+                       panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
+               }
+       } else {
+                       if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
+                               panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
+
+               }
+       }
+       thread_exception_return_internal();
+}
+#endif