X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/fe8ab488e9161c46dd9885d58fc52996dc0249ff..008676633c2ad2c325837c2b64915f7ded690a8f:/osfmk/i386/trap.c diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c index 0cedaa19d..7924f4f7d 100644 --- a/osfmk/i386/trap.c +++ b/osfmk/i386/trap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -116,7 +116,7 @@ extern void kprint_state(x86_saved_state64_t *saved_state); * Forward declarations */ static void user_page_fault_continue(kern_return_t kret); -static void panic_trap(x86_saved_state64_t *saved_state); +static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result); static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip); volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */ @@ -131,6 +131,7 @@ extern boolean_t dtrace_tally_fault(user_addr_t); extern boolean_t pmap_smep_enabled; extern boolean_t pmap_smap_enabled; +__attribute__((noreturn)) void thread_syscall_return( kern_return_t ret) @@ -391,12 +392,7 @@ interrupt(x86_saved_state_t *state) SCHED_STATS_INTERRUPT(current_processor()); #if CONFIG_TELEMETRY - if (telemetry_needs_record - && (current_task() != kernel_task) -#if CONFIG_SCHED_IDLE_IN_PLACE - && ((current_thread()->state & TH_IDLE) == 0) /* idle-in-place should be treated like the idle thread */ -#endif - ) { + if (telemetry_needs_record) { telemetry_mark_curthread(user_mode); } #endif @@ -476,6 +472,7 @@ interrupt(x86_saved_state_t *state) MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, interrupt_num, 0, 0, 0, 0); + assert(ml_get_interrupts_enabled() == FALSE); } static inline void @@ -507,6 +504,7 @@ kernel_trap( int type; vm_map_t map = 0; /* protected by T_PAGE_FAULT */ kern_return_t result = KERN_FAILURE; + kern_return_t fault_result = KERN_SUCCESS; thread_t thread; ast_t *myast; boolean_t intr; @@ -516,8 +514,9 @@ kernel_trap( #if NCOPY_WINDOWS > 0 int fault_in_copy_window = -1; #endif - int is_user = 0; - + int is_user; + int trap_pl = get_preemption_level(); + thread = current_thread(); if (__improbable(is_saved_state32(state))) @@ -535,6 +534,8 @@ kernel_trap( myast = ast_pending(); + is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS); + perfASTCallback astfn = perfASTHook; if (__improbable(astfn != NULL)) { if (*myast & AST_CHUD_ALL) @@ -570,7 +571,14 @@ kernel_trap( 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0); return; } - + + user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, + (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user, + VM_KERNEL_UNSLIDE(kern_ip), 0); + + if (T_PAGE_FAULT == type) { /* * assume we're faulting in the kernel map @@ -605,13 +613,11 @@ kernel_trap( map = thread->map; fault_in_copy_window = window_index; } - is_user = -1; } #else if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) { /* fault occurred in userspace */ map = thread->map; - is_user = -1; /* Intercept a potential Supervisor Mode Execute * Protection fault. These criteria identify @@ -620,7 +626,19 @@ kernel_trap( * (The VM could just redrive a SMEP fault, hence * the intercept). */ - if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { + if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && + (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { + goto debugger_entry; + } + + /* + * Additionally check for SMAP faults... + * which are characterized by page-present and + * the AC bit unset (i.e. not from copyin/out path). + */ + if (__improbable(code & T_PF_PROT && + pmap_smap_enabled && + (saved_state->isf.rflags & EFL_AC) == 0)) { goto debugger_entry; } @@ -636,17 +654,14 @@ kernel_trap( set_cr3_raw(map->pmap->pm_cr3); return; } - + if (__improbable(vaddr < PAGE_SIZE) && + ((thread->machine.specFlags & CopyIOActive) == 0)) { + goto debugger_entry; + } } #endif } } - user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, - (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user, - VM_KERNEL_UNSLIDE(kern_ip), 0); - (void) ml_set_interrupts_enabled(intr); @@ -705,9 +720,8 @@ kernel_trap( if (code & T_PF_EXECUTE) prot |= VM_PROT_EXECUTE; - result = vm_fault(map, - vm_map_trunc_page(vaddr, - PAGE_MASK), + fault_result = result = vm_fault(map, + vaddr, prot, FALSE, THREAD_UNINT, NULL, 0); @@ -773,14 +787,12 @@ debugger_entry: */ sync_iss_to_iks(state); #if MACH_KDP - if (current_debugger != KDB_CUR_DB) { - if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) - return; - } + if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) + return; #endif } pal_cli(); - panic_trap(saved_state); + panic_trap(saved_state, trap_pl, fault_result); /* * NO RETURN */ @@ -793,15 +805,13 @@ set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip) saved_state->isf.rip = ip; } - - - static void -panic_trap(x86_saved_state64_t *regs) +panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result) { const char *trapname = "Unknown"; pal_cr_t cr0, cr2, cr3, cr4; boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE; + boolean_t potential_smap_fault = FALSE; pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 ); assert(ml_get_interrupts_enabled() == FALSE); @@ -826,6 +836,12 @@ panic_trap(x86_saved_state64_t *regs) } else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { potential_kernel_NX_fault = TRUE; } + } else if (pmap_smap_enabled && + regs->isf.trapno == T_PAGE_FAULT && + regs->isf.err & T_PF_PROT && + regs->cr2 < VM_MAX_USER_PAGE_ADDRESS && + regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { + potential_smap_fault = TRUE; } #undef panic @@ -836,7 +852,7 @@ panic_trap(x86_saved_state64_t *regs) "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" - "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s\n", + "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n", regs->isf.rip, regs->isf.trapno, trapname, cr0, cr2, cr3, cr4, regs->rax, regs->rbx, regs->rcx, regs->rdx, @@ -848,7 +864,9 @@ panic_trap(x86_saved_state64_t *regs) virtualized ? " VMM" : "", potential_kernel_NX_fault ? " Kernel NX fault" : "", potential_smep_fault ? " SMEP/User NX fault" : "", - ""); + potential_smap_fault ? " SMAP fault" : "", + pl, + fault_result); /* * This next statement is not executed, * but it's needed to stop the compiler using tail call optimization @@ -1075,8 +1093,7 @@ user_trap( if (__improbable(err & T_PF_EXECUTE)) prot |= VM_PROT_EXECUTE; kret = vm_fault(thread->map, - vm_map_trunc_page(vaddr, - PAGE_MASK), + vaddr, prot, FALSE, THREAD_ABORTSAFE, NULL, 0); @@ -1253,3 +1270,27 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) { __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax"); } } + +#if DEBUG +extern void thread_exception_return_internal(void) __dead2; + +void thread_exception_return(void) { + thread_t thread = current_thread(); + ml_set_interrupts_enabled(FALSE); + if (thread_is_64bit(thread) != task_has_64BitAddr(thread->task)) { + panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit(thread), task_has_64BitAddr(thread->task)); + } + + if (thread_is_64bit(thread)) { + if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) { + panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS)); + } + } else { + if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) { + panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS)); + + } + } + thread_exception_return_internal(); +} +#endif