X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..143464d58d2bd6378e74eec636961ceb0d32fb91:/osfmk/i386/trap.c?ds=sidebyside diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c index 55be4fc75..e15f40b05 100644 --- a/osfmk/i386/trap.c +++ b/osfmk/i386/trap.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -60,8 +60,6 @@ * Hardware trap/fault handler. */ -#include -#include #include #include @@ -91,21 +89,11 @@ #include #include #include - +#if CONFIG_TELEMETRY +#include +#endif #include -#if MACH_KGDB -#include -#endif /* MACH_KGDB */ - -#if MACH_KDB -#include -#include -#include -#include -#include -#endif /* MACH_KDB */ - #include #include @@ -117,7 +105,7 @@ #include #include - +#include #include extern void throttle_lowpri_io(int); @@ -127,14 +115,8 @@ extern void kprint_state(x86_saved_state64_t *saved_state); * Forward declarations */ static void user_page_fault_continue(kern_return_t kret); -#ifdef __i386__ -static void panic_trap(x86_saved_state32_t *saved_state); -static void set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip); -extern void panic_64(x86_saved_state_t *, int, const char *, boolean_t); -#else static void panic_trap(x86_saved_state64_t *saved_state); static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip); -#endif volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */ @@ -145,6 +127,8 @@ perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routi extern boolean_t dtrace_tally_fault(user_addr_t); #endif +extern boolean_t pmap_smep_enabled; + void thread_syscall_return( kern_return_t ret) @@ -165,7 +149,7 @@ thread_syscall_return( == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT); if (kdebug_enable && is_mach) { /* Mach trap */ - KERNEL_DEBUG_CONSTANT( + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_EXCP_SC,code)|DBG_FUNC_END, ret, 0, 0, 0, 0); } @@ -189,7 +173,7 @@ thread_syscall_return( is_mach = (code < 0); if (kdebug_enable && is_mach) { /* Mach trap */ - KERNEL_DEBUG_CONSTANT( + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_EXCP_SC,-code)|DBG_FUNC_END, ret, 0, 0, 0, 0); } @@ -205,51 +189,13 @@ thread_syscall_return( ret); #endif } - throttle_lowpri_io(TRUE); + throttle_lowpri_io(1); thread_exception_return(); /*NOTREACHED*/ } -#if MACH_KDB -boolean_t debug_all_traps_with_kdb = FALSE; -extern struct db_watchpoint *db_watchpoint_list; -extern boolean_t db_watchpoints_inserted; -extern boolean_t db_breakpoints_inserted; - -void -thread_kdb_return(void) -{ - thread_t thr_act = current_thread(); - x86_saved_state_t *iss = USER_STATE(thr_act); - - pal_register_cache_state(thr_act, DIRTY); - - if (is_saved_state64(iss)) { - x86_saved_state64_t *regs; - - regs = saved_state64(iss); - - if (kdb_trap(regs->isf.trapno, (int)regs->isf.err, (void *)regs)) { - thread_exception_return(); - /*NOTREACHED*/ - } - - } else { - x86_saved_state32_t *regs; - - regs = saved_state32(iss); - - if (kdb_trap(regs->trapno, regs->err, (void *)regs)) { - thread_exception_return(); - /*NOTREACHED*/ - } - } -} - -#endif /* MACH_KDB */ - static inline void user_page_fault_continue( kern_return_t kr) @@ -257,61 +203,20 @@ user_page_fault_continue( thread_t thread = current_thread(); user_addr_t vaddr; -#if MACH_KDB - x86_saved_state_t *regs = USER_STATE(thread); - int err; - int trapno; - - assert((is_saved_state32(regs) && !thread_is_64bit(thread)) || - (is_saved_state64(regs) && thread_is_64bit(thread))); -#endif - - if (thread_is_64bit(thread)) { - x86_saved_state64_t *uregs; + if (thread_is_64bit(thread)) { + x86_saved_state64_t *uregs; uregs = USER_REGS64(thread); -#if MACH_KDB - trapno = uregs->isf.trapno; - err = (int)uregs->isf.err; -#endif vaddr = (user_addr_t)uregs->cr2; } else { x86_saved_state32_t *uregs; uregs = USER_REGS32(thread); -#if MACH_KDB - trapno = uregs->trapno; - err = uregs->err; -#endif vaddr = uregs->cr2; } - if (__probable((kr == KERN_SUCCESS) || (kr == KERN_ABORTED))) { -#if MACH_KDB - if (!db_breakpoints_inserted) { - db_set_breakpoints(); - } - if (db_watchpoint_list && - db_watchpoints_inserted && - (err & T_PF_WRITE) && - db_find_watchpoint(thread->map, - (vm_offset_t)vaddr, - saved_state32(regs))) - kdb_trap(T_WATCHPOINT, 0, saved_state32(regs)); -#endif /* MACH_KDB */ - thread_exception_return(); - /*NOTREACHED*/ - } - -#if MACH_KDB - if (debug_all_traps_with_kdb && - kdb_trap(trapno, err, saved_state32(regs))) { - thread_exception_return(); - /*NOTREACHED*/ - } -#endif /* MACH_KDB */ /* PAL debug hook */ pal_dbg_page_fault( thread, vaddr, kr ); @@ -426,6 +331,9 @@ void interrupt_populate_latency_stats(char *buf, unsigned bufsize) { snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency); } +uint32_t interrupt_timer_coalescing_enabled = 1; +uint64_t interrupt_coalesced_timers; + /* * Handle interrupts: * - local APIC interrupts (IPIs, timers, etc) are handled by the kernel, @@ -440,6 +348,8 @@ interrupt(x86_saved_state_t *state) boolean_t user_mode = FALSE; int ipl; int cnum = cpu_number(); + cpu_data_t *cdp = cpu_data_ptr[cnum]; + int itype = 0; if (is_saved_state64(state) == TRUE) { x86_saved_state64_t *state64; @@ -463,14 +373,37 @@ interrupt(x86_saved_state_t *state) interrupt_num = state32->trapno; } - KERNEL_DEBUG_CONSTANT( + if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) + cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++; + + if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) + itype = 1; + else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) + itype = 2; + else + itype = 3; + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, - interrupt_num, rip, user_mode, 0, 0); + interrupt_num, + (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)), + user_mode, itype, 0); SCHED_STATS_INTERRUPT(current_processor()); - ipl = get_preemption_level(); +#if CONFIG_TELEMETRY + if (telemetry_needs_record + && (current_task() != kernel_task) +#if CONFIG_SCHED_IDLE_IN_PLACE + && ((current_thread()->state & TH_IDLE) == 0) /* idle-in-place should be treated like the idle thread */ +#endif + ) { + telemetry_mark_curthread(user_mode); + } +#endif + ipl = get_preemption_level(); + /* * Handle local APIC interrupts * else call platform expert for devices. @@ -482,21 +415,41 @@ interrupt(x86_saved_state_t *state) panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x\n", interrupt_num, ipl, get_preemption_level()); } - KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, - interrupt_num, 0, 0, 0, 0); - if (cpu_data_ptr[cnum]->cpu_nested_istack) { - cpu_data_ptr[cnum]->cpu_nested_istack_events++; + if (__improbable(cdp->cpu_nested_istack)) { + cdp->cpu_nested_istack_events++; } else { - uint64_t int_latency = mach_absolute_time() - cpu_data_ptr[cnum]->cpu_int_event_time; - if (ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended()) { - panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cpu_data_ptr[cnum]->cpu_prior_signals, cpu_data_ptr[cnum]->cpu_signals); + uint64_t ctime = mach_absolute_time(); + uint64_t int_latency = ctime - cdp->cpu_int_event_time; + uint64_t esdeadline, ehdeadline; + /* Attempt to process deferred timers in the context of + * this interrupt, unless interrupt time has already exceeded + * TCOAL_ILAT_THRESHOLD. + */ +#define TCOAL_ILAT_THRESHOLD (30000ULL) + + if ((int_latency < TCOAL_ILAT_THRESHOLD) && + interrupt_timer_coalescing_enabled) { + esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline; + ehdeadline = cdp->rtclock_timer.deadline; + if ((ctime >= esdeadline) && (ctime < ehdeadline)) { + interrupt_coalesced_timers++; + TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0); + rtclock_intr(state); + TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0); + } else { + TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0); + } } - if (int_latency > cpu_data_ptr[cnum]->cpu_max_observed_int_latency) { - cpu_data_ptr[cnum]->cpu_max_observed_int_latency = int_latency; - cpu_data_ptr[cnum]->cpu_max_observed_int_latency_vector = interrupt_num; + + if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) { + panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals); + } + + if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) { + cdp->cpu_max_observed_int_latency = int_latency; + cdp->cpu_max_observed_int_latency_vector = interrupt_num; } } @@ -504,17 +457,22 @@ interrupt(x86_saved_state_t *state) * Having serviced the interrupt first, look at the interrupted stack depth. */ if (!user_mode) { - uint64_t depth = cpu_data_ptr[cnum]->cpu_kernel_stack + uint64_t depth = cdp->cpu_kernel_stack + sizeof(struct x86_kernel_state) + sizeof(struct i386_exception_link *) - rsp; - if (depth > kernel_stack_depth_max) { + if (__improbable(depth > kernel_stack_depth_max)) { kernel_stack_depth_max = (vm_offset_t)depth; KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH), - (long) depth, (long) rip, 0, 0, 0); + (long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0); } } + + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, + interrupt_num, 0, 0, 0, 0); + } static inline void @@ -540,11 +498,7 @@ kernel_trap( x86_saved_state_t *state, uintptr_t *lo_spp) { -#ifdef __i386__ - x86_saved_state32_t *saved_state; -#else x86_saved_state64_t *saved_state; -#endif int code; user_addr_t vaddr; int type; @@ -560,28 +514,9 @@ kernel_trap( int fault_in_copy_window = -1; #endif int is_user = 0; -#if MACH_KDB - pt_entry_t *pte; -#endif /* MACH_KDB */ thread = current_thread(); -#ifdef __i386__ - if (__improbable(is_saved_state64(state))) { - panic_64(state, 0, "Kernel trap with 64-bit state", FALSE); - } - - saved_state = saved_state32(state); - - /* Record cpu where state was captured (trampolines don't set this) */ - saved_state->cpu = cpu_number(); - - vaddr = (user_addr_t)saved_state->cr2; - type = saved_state->trapno; - code = saved_state->err & 0xffff; - intr = (saved_state->efl & EFL_IF) != 0; /* state of ints at trap */ - kern_ip = (vm_offset_t)saved_state->eip; -#else if (__improbable(is_saved_state32(state))) panic("kernel_trap(%p) with 32-bit state", state); saved_state = saved_state64(state); @@ -594,7 +529,6 @@ kernel_trap( code = (int)(saved_state->isf.err & 0xffff); intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */ kern_ip = (vm_offset_t)saved_state->isf.rip; -#endif myast = ast_pending(); @@ -637,8 +571,9 @@ kernel_trap( if (__improbable(T_PREEMPT == type)) { ast_taken(AST_PREEMPTION, FALSE); - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, - 0, 0, 0, kern_ip, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, + 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0); return; } @@ -679,10 +614,22 @@ kernel_trap( is_user = -1; } #else - if (vaddr < VM_MAX_USER_PAGE_ADDRESS) { + if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) { /* fault occurred in userspace */ map = thread->map; is_user = -1; + + /* Intercept a potential Supervisor Mode Execute + * Protection fault. These criteria identify + * both NX faults and SMEP faults, but both + * are fatal. We avoid checking PTEs (racy). + * (The VM could just redrive a SMEP fault, hence + * the intercept). + */ + if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { + goto debugger_entry; + } + /* * If we're not sharing cr3 with the user * and we faulted in copyio, @@ -699,9 +646,11 @@ kernel_trap( #endif } } - KERNEL_DEBUG_CONSTANT( + user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, - (unsigned)(vaddr >> 32), (unsigned)vaddr, is_user, kern_ip, 0); + (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user, + VM_KERNEL_UNSLIDE(kern_ip), 0); (void) ml_set_interrupts_enabled(intr); @@ -724,11 +673,7 @@ kernel_trap( fpSSEexterrflt(); return; case T_DEBUG: -#ifdef __i386__ - if ((saved_state->efl & EFL_TF) == 0 && NO_WATCHPOINTS) -#else if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) -#endif { /* We've somehow encountered a debug * register match that does not belong @@ -744,40 +689,6 @@ kernel_trap( goto debugger_entry; #endif case T_PAGE_FAULT: - /* - * If the current map is a submap of the kernel map, - * and the address is within that map, fault on that - * map. If the same check is done in vm_fault - * (vm_map_lookup), we may deadlock on the kernel map - * lock. - */ - - prot = VM_PROT_READ; - - if (code & T_PF_WRITE) - prot |= VM_PROT_WRITE; -#if PAE - if (code & T_PF_EXECUTE) - prot |= VM_PROT_EXECUTE; -#endif - -#if MACH_KDB - /* - * Check for watchpoint on kernel static data. - * vm_fault would fail in this case - */ - if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted && - (code & T_PF_WRITE) && vaddr < vm_map_max(map) && - ((*(pte = pmap_pte(kernel_pmap, (vm_map_offset_t)vaddr))) & INTEL_PTE_WRITE) == 0) { - pmap_store_pte( - pte, - *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE); - /* XXX need invltlb here? */ - - result = KERN_SUCCESS; - goto look_for_watchpoints; - } -#endif /* MACH_KDB */ #if CONFIG_DTRACE if (thread != THREAD_NULL && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ @@ -791,25 +702,23 @@ kernel_trap( } } #endif /* CONFIG_DTRACE */ + + prot = VM_PROT_READ; + + if (code & T_PF_WRITE) + prot |= VM_PROT_WRITE; +#if PAE + if (code & T_PF_EXECUTE) + prot |= VM_PROT_EXECUTE; +#endif result = vm_fault(map, - vm_map_trunc_page(vaddr), + vm_map_trunc_page(vaddr, + PAGE_MASK), prot, FALSE, THREAD_UNINT, NULL, 0); -#if MACH_KDB - if (result == KERN_SUCCESS) { - /* - * Look for watchpoints - */ -look_for_watchpoints: - if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted && (code & T_PF_WRITE) && - db_find_watchpoint(map, vaddr, saved_state)) - kdb_trap(T_WATCHPOINT, 0, saved_state); - } -#endif /* MACH_KDB */ - if (result == KERN_SUCCESS) { #if NCOPY_WINDOWS > 0 if (fault_in_copy_window != -1) { @@ -863,9 +772,6 @@ FALL_THROUGH: kprintf("kernel_trap() ignoring spurious trap 15\n"); return; } -#if defined(__x86_64__) && DEBUG - kprint_state(saved_state); -#endif debugger_entry: /* Ensure that the i386_kernel_state at the base of the * current thread's stack (if any) is synchronized with the @@ -873,30 +779,14 @@ debugger_entry: * access through the debugger. */ sync_iss_to_iks(state); -#if MACH_KDB -restart_debugger: -#endif /* MACH_KDB */ #if MACH_KDP - if (current_debugger != KDB_CUR_DB) { + if (current_debugger != KDB_CUR_DB) { if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) return; - } else { -#endif /* MACH_KDP */ -#if MACH_KDB - if (kdb_trap(type, code, saved_state)) { - if (switch_debugger) { - current_debugger = KDP_CUR_DB; - switch_debugger = 0; - goto restart_debugger; - } - return; - } -#endif /* MACH_KDB */ -#if MACH_KDP } #endif } - __asm__ volatile("cli":::"cc"); + pal_cli(); panic_trap(saved_state); /* * NO RETURN @@ -904,66 +794,21 @@ restart_debugger: } -#ifdef __i386__ -static void -set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip) -{ - saved_state->eip = ip; -} -#else static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip) { saved_state->isf.rip = ip; } -#endif -#ifdef __i386__ -static void -panic_trap(x86_saved_state32_t *regs) -{ - const char *trapname = "Unknown"; - pal_cr_t cr0, cr2, cr3, cr4; - - pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 ); - - /* - * Issue an I/O port read if one has been requested - this is an - * event logic analyzers can use as a trigger point. - */ - panic_io_port_read(); - kprintf("panic trap number 0x%x, eip 0x%x\n", regs->trapno, regs->eip); - kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n", - cr0, cr2, cr3, cr4); - if (regs->trapno < TRAP_TYPES) - trapname = trap_type[regs->trapno]; -#undef panic - panic("Kernel trap at 0x%08x, type %d=%s, registers:\n" - "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n" - "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n" - "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n" - "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n" - "Error code: 0x%08x\n", - regs->eip, regs->trapno, trapname, cr0, cr2, cr3, cr4, - regs->eax,regs->ebx,regs->ecx,regs->edx, - regs->cr2,regs->ebp,regs->esi,regs->edi, - regs->efl,regs->eip,regs->cs & 0xFFFF, regs->ds & 0xFFFF, regs->err); - /* - * This next statement is not executed, - * but it's needed to stop the compiler using tail call optimization - * for the panic call - which confuses the subsequent backtrace. - */ - cr0 = 0; -} -#else static void panic_trap(x86_saved_state64_t *regs) { const char *trapname = "Unknown"; pal_cr_t cr0, cr2, cr3, cr4; + boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE; pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 ); assert(ml_get_interrupts_enabled() == FALSE); @@ -981,6 +826,15 @@ panic_trap(x86_saved_state64_t *regs) if (regs->isf.trapno < TRAP_TYPES) trapname = trap_type[regs->isf.trapno]; + + if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) { + if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) { + potential_smep_fault = TRUE; + } else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { + potential_kernel_NX_fault = TRUE; + } + } + #undef panic panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n" "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n" @@ -989,7 +843,7 @@ panic_trap(x86_saved_state64_t *regs) "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" - "CR2: 0x%016llx, Error code: 0x%016llx, Faulting CPU: 0x%x\n", + "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s\n", regs->isf.rip, regs->isf.trapno, trapname, cr0, cr2, cr3, cr4, regs->rax, regs->rbx, regs->rcx, regs->rdx, @@ -997,7 +851,10 @@ panic_trap(x86_saved_state64_t *regs) regs->r8, regs->r9, regs->r10, regs->r11, regs->r12, regs->r13, regs->r14, regs->r15, regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, - regs->isf.ss & 0xFFFF,regs->cr2, regs->isf.err, regs->isf.cpu); + regs->isf.ss & 0xFFFF,regs->cr2, regs->isf.err, regs->isf.cpu, + virtualized ? " VMM" : "", + potential_kernel_NX_fault ? " Kernel NX fault" : "", + potential_smep_fault ? " SMEP/User NX fault" : ""); /* * This next statement is not executed, * but it's needed to stop the compiler using tail call optimization @@ -1005,7 +862,6 @@ panic_trap(x86_saved_state64_t *regs) */ cr0 = 0; } -#endif #if CONFIG_DTRACE extern kern_return_t dtrace_user_probe(x86_saved_state_t *); @@ -1071,7 +927,7 @@ user_trap( pal_sti(); - KERNEL_DEBUG_CONSTANT( + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE, (unsigned)(vaddr>>32), (unsigned)vaddr, (unsigned)(rip>>32), (unsigned)rip, 0); @@ -1214,7 +1070,8 @@ user_trap( break; case T_PAGE_FAULT: - prot = VM_PROT_READ; + { + prot = VM_PROT_READ; if (err & T_PF_WRITE) prot |= VM_PROT_WRITE; @@ -1222,13 +1079,19 @@ user_trap( if (__improbable(err & T_PF_EXECUTE)) prot |= VM_PROT_EXECUTE; #endif - kret = vm_fault(thread->map, vm_map_trunc_page(vaddr), - prot, FALSE, - THREAD_ABORTSAFE, NULL, 0); + kret = vm_fault(thread->map, + vm_map_trunc_page(vaddr, + PAGE_MASK), + prot, FALSE, + THREAD_ABORTSAFE, NULL, 0); + + if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) { + thread_exception_return(); + /*NOTREACHED*/ + } user_page_fault_continue(kret); - - /* NOTREACHED */ + } /* NOTREACHED */ break; case T_SSE_FLOAT_ERROR: @@ -1254,14 +1117,6 @@ user_trap( break; default: -#if MACH_KGDB - Debugger("Unanticipated user trap"); - return; -#endif /* MACH_KGDB */ -#if MACH_KDB - if (kdb_trap(type, err, saved_state32(saved_state))) - return; -#endif /* MACH_KDB */ panic("Unexpected user trap, type %d", type); return; } @@ -1322,39 +1177,6 @@ i386_exception( } -#if MACH_KDB - -extern void db_i386_state(x86_saved_state32_t *regs); - -#include - -void -db_i386_state( - x86_saved_state32_t *regs) -{ - db_printf("eip %8x\n", regs->eip); - db_printf("trap %8x\n", regs->trapno); - db_printf("err %8x\n", regs->err); - db_printf("efl %8x\n", regs->efl); - db_printf("ebp %8x\n", regs->ebp); - db_printf("esp %8x\n", regs->cr2); - db_printf("uesp %8x\n", regs->uesp); - db_printf("cs %8x\n", regs->cs & 0xff); - db_printf("ds %8x\n", regs->ds & 0xff); - db_printf("es %8x\n", regs->es & 0xff); - db_printf("fs %8x\n", regs->fs & 0xff); - db_printf("gs %8x\n", regs->gs & 0xff); - db_printf("ss %8x\n", regs->ss & 0xff); - db_printf("eax %8x\n", regs->eax); - db_printf("ebx %8x\n", regs->ebx); - db_printf("ecx %8x\n", regs->ecx); - db_printf("edx %8x\n", regs->edx); - db_printf("esi %8x\n", regs->esi); - db_printf("edi %8x\n", regs->edi); -} - -#endif /* MACH_KDB */ - /* Synchronize a thread's i386_kernel_state (if any) with the given * i386_saved_state_t obtained from the trap/IPI handler; called in * kernel_trap() prior to entering the debugger, and when receiving @@ -1373,27 +1195,11 @@ sync_iss_to_iks(x86_saved_state_t *saved_state) pal_get_kern_regs( saved_state ); if ((kstack = current_thread()->kernel_stack) != 0) { -#ifdef __i386__ - x86_saved_state32_t *regs = saved_state32(saved_state); -#else x86_saved_state64_t *regs = saved_state64(saved_state); -#endif iks = STACK_IKS(kstack); /* Did we take the trap/interrupt in kernel mode? */ -#ifdef __i386__ - if (regs == USER_REGS32(current_thread())) - record_active_regs = TRUE; - else { - iks->k_ebx = regs->ebx; - iks->k_esp = (int)regs; - iks->k_ebp = regs->ebp; - iks->k_edi = regs->edi; - iks->k_esi = regs->esi; - iks->k_eip = regs->eip; - } -#else if (regs == USER_REGS64(current_thread())) record_active_regs = TRUE; else { @@ -1406,20 +1212,9 @@ sync_iss_to_iks(x86_saved_state_t *saved_state) iks->k_r15 = regs->r15; iks->k_rip = regs->isf.rip; } -#endif } if (record_active_regs == TRUE) { -#ifdef __i386__ - /* Show the trap handler path */ - __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx)); - __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp)); - __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp)); - __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi)); - __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi)); - /* "Current" instruction pointer */ - __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip)); -#else /* Show the trap handler path */ __asm__ volatile("movq %%rbx, %0" : "=m" (iks->k_rbx)); __asm__ volatile("movq %%rsp, %0" : "=m" (iks->k_rsp)); @@ -1433,7 +1228,6 @@ sync_iss_to_iks(x86_saved_state_t *saved_state) : "=m" (iks->k_rip) : : "rax"); -#endif } } @@ -1450,16 +1244,6 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) { if ((kstack = current_thread()->kernel_stack) != 0) { iks = STACK_IKS(kstack); -#ifdef __i386__ - /* Display the trap handler path */ - __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx)); - __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp)); - __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp)); - __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi)); - __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi)); - /* "Current" instruction pointer */ - __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip)); -#else /* Display the trap handler path */ __asm__ volatile("movq %%rbx, %0" : "=m" (iks->k_rbx)); __asm__ volatile("movq %%rsp, %0" : "=m" (iks->k_rsp)); @@ -1470,6 +1254,5 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) { __asm__ volatile("movq %%r15, %0" : "=m" (iks->k_r15)); /* "Current" instruction pointer */ __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax"); -#endif } }