/*
- * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/telemetry.h>
#endif
#include <sys/kdebug.h>
+#include <kperf/kperf.h>
#include <prng/random.h>
#include <string.h>
static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result);
static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
-volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
-
#if CONFIG_DTRACE
/* See <rdar://problem/4613924> */
perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
pal_register_cache_state(thr_act, DIRTY);
- if (thread_is_64bit(thr_act)) {
+ if (thread_is_64bit_addr(thr_act)) {
x86_saved_state64_t *regs;
regs = USER_REGS64(thr_act);
ret);
#endif
}
+
+#if DEBUG || DEVELOPMENT
+ kern_allocation_name_t
+ prior __assert_only = thread_get_kernel_state(thr_act)->allocation_name;
+ assertf(prior == NULL, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior));
+#endif /* DEBUG || DEVELOPMENT */
+
throttle_lowpri_io(1);
thread_exception_return();
thread_t thread = current_thread();
user_addr_t vaddr;
- if (thread_is_64bit(thread)) {
+ if (thread_is_64bit_addr(thread)) {
x86_saved_state64_t *uregs;
uregs = USER_REGS64(thread);
int ipl;
int cnum = cpu_number();
cpu_data_t *cdp = cpu_data_ptr[cnum];
- int itype = 0;
-
- if (is_saved_state64(state) == TRUE) {
- x86_saved_state64_t *state64;
+ int itype = DBG_INTR_TYPE_UNKNOWN;
- state64 = saved_state64(state);
- rip = state64->isf.rip;
- rsp = state64->isf.rsp;
- interrupt_num = state64->isf.trapno;
-#ifdef __x86_64__
- if(state64->isf.cs & 0x03)
-#endif
- user_mode = TRUE;
- } else {
- x86_saved_state32_t *state32;
-
- state32 = saved_state32(state);
- if (state32->cs & 0x03)
- user_mode = TRUE;
- rip = state32->eip;
- rsp = state32->uesp;
- interrupt_num = state32->trapno;
- }
+ x86_saved_state64_t *state64 = saved_state64(state);
+ rip = state64->isf.rip;
+ rsp = state64->isf.rsp;
+ interrupt_num = state64->isf.trapno;
+ if(state64->isf.cs & 0x03)
+ user_mode = TRUE;
if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage)
cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++;
if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT))
- itype = 1;
+ itype = DBG_INTR_TYPE_IPI;
else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT))
- itype = 2;
+ itype = DBG_INTR_TYPE_TIMER;
else
- itype = 3;
+ itype = DBG_INTR_TYPE_OTHER;
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START,
#if CONFIG_TELEMETRY
if (telemetry_needs_record) {
- telemetry_mark_curthread(user_mode);
+ telemetry_mark_curthread(user_mode, FALSE);
}
#endif
*/
if (!user_mode) {
uint64_t depth = cdp->cpu_kernel_stack
- + sizeof(struct x86_kernel_state)
+ + sizeof(struct thread_kernel_state)
+ sizeof(struct i386_exception_link *)
- rsp;
if (__improbable(depth > kernel_stack_depth_max)) {
(long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0);
}
}
-
+
if (cnum == master_cpu)
ml_entropy_collect();
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
- MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
- interrupt_num, 0, 0, 0, 0);
+#if KPERF
+ kperf_interrupt();
+#endif /* KPERF */
+
+ KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END,
+ interrupt_num);
assert(ml_get_interrupts_enabled() == FALSE);
}
kern_return_t result = KERN_FAILURE;
kern_return_t fault_result = KERN_SUCCESS;
thread_t thread;
- ast_t *myast;
boolean_t intr;
vm_prot_t prot;
struct recovery *rp;
intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */
kern_ip = (vm_offset_t)saved_state->isf.rip;
- myast = ast_pending();
-
is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS);
- perfASTCallback astfn = perfASTHook;
- if (__improbable(astfn != NULL)) {
- if (*myast & AST_CHUD_ALL)
- astfn(AST_CHUD_ALL, myast);
- } else
- *myast &= ~AST_CHUD_ALL;
-
-
#if CONFIG_DTRACE
/*
* Is there a DTrace hook?
* as soon we possibly can to hold latency down
*/
if (__improbable(T_PREEMPT == type)) {
- ast_taken(AST_PREEMPTION, FALSE);
+ ast_taken_kernel();
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
return;
case T_SSE_FLOAT_ERROR:
- fpSSEexterrflt();
+ fpSSEexterrflt();
return;
- case T_DEBUG:
+
+ case T_INVALID_OPCODE:
+ fpUDflt(kern_ip);
+ goto debugger_entry;
+
+ case T_DEBUG:
if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS)
{
/* We've somehow encountered a debug
return;
}
goto debugger_entry;
-#ifdef __x86_64__
case T_INT3:
goto debugger_entry;
-#endif
case T_PAGE_FAULT:
#if CONFIG_DTRACE
fault_result = result = vm_fault(map,
vaddr,
prot,
- FALSE,
+ FALSE, VM_KERN_MEMORY_NONE,
THREAD_UNINT, NULL, 0);
if (result == KERN_SUCCESS) {
*/
}
-
static void
set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip)
{
*/
panic_io_port_read();
- kprintf("panic trap number 0x%x, rip 0x%016llx\n",
- regs->isf.trapno, regs->isf.rip);
+ kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n",
+ cpu_number(), regs->isf.trapno, regs->isf.rip);
kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n",
cr0, cr2, cr3, cr4);
extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
#endif
+#if DEBUG
+uint32_t fsigs[2];
+uint32_t fsigns, fsigcs;
+#endif
+
/*
* Trap from user mode.
*/
user_addr_t vaddr;
vm_prot_t prot;
thread_t thread = current_thread();
- ast_t *myast;
kern_return_t kret;
user_addr_t rip;
unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */
- assert((is_saved_state32(saved_state) && !thread_is_64bit(thread)) ||
- (is_saved_state64(saved_state) && thread_is_64bit(thread)));
+ assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) ||
+ (is_saved_state64(saved_state) && thread_is_64bit_addr(thread)));
if (is_saved_state64(saved_state)) {
x86_saved_state64_t *regs;
subcode = 0;
exc = 0;
-#if DEBUG_TRACE
- kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
- saved_state, type, vaddr);
-#endif
-
- perfASTCallback astfn = perfASTHook;
- if (__improbable(astfn != NULL)) {
- myast = ast_pending();
- if (*myast & AST_CHUD_ALL) {
- astfn(AST_CHUD_ALL, myast);
- }
- }
-
- /* Is there a hook? */
- perfCallback fn = perfTrapHook;
- if (__improbable(fn != NULL)) {
- if (fn(type, saved_state, 0, 0) == KERN_SUCCESS)
- return; /* If it succeeds, we are done... */
- }
-
#if CONFIG_DTRACE
/*
* DTrace does not consume all user traps, only INT_3's for now.
* because the high order bits are not
* used on x86_64
*/
- if (thread_is_64bit(thread)) {
+ if (thread_is_64bit_addr(thread)) {
x86_debug_state64_t *ids = pcb->ids;
ids->dr6 = dr6;
} else { /* 32 bit thread */
break;
case T_INVALID_OPCODE:
+#if !defined(RC_HIDE_XNU_J137)
+ fpUDflt(rip); /* May return from exception directly */
+#endif
exc = EXC_BAD_INSTRUCTION;
code = EXC_I386_INVOP;
break;
prot |= VM_PROT_WRITE;
if (__improbable(err & T_PF_EXECUTE))
prot |= VM_PROT_EXECUTE;
+#if DEVELOPMENT || DEBUG
+ uint32_t fsig = 0;
+ fsig = thread_fpsimd_hash(thread);
+#if DEBUG
+ fsigs[0] = fsig;
+#endif
+#endif
kret = vm_fault(thread->map,
vaddr,
- prot, FALSE,
+ prot, FALSE, VM_KERN_MEMORY_NONE,
THREAD_ABORTSAFE, NULL, 0);
-
+#if DEVELOPMENT || DEBUG
+ if (fsig) {
+ uint32_t fsig2 = thread_fpsimd_hash(thread);
+#if DEBUG
+ fsigcs++;
+ fsigs[1] = fsig2;
+#endif
+ if (fsig != fsig2) {
+ panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread, fsig, fsig2);
+ }
+ } else {
+#if DEBUG
+ fsigns++;
+#endif
+ }
+#endif
if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
thread_exception_return();
/*NOTREACHED*/
/* NOTREACHED */
}
-
-/*
- * Handle AST traps for i386.
- */
-
-extern void log_thread_action (thread_t, char *);
-
-void
-i386_astintr(int preemption)
-{
- ast_t mask = AST_ALL;
- spl_t s;
-
- if (preemption)
- mask = AST_PREEMPTION;
-
- s = splsched();
-
- ast_taken(mask, s);
-
- splx(s);
-}
-
/*
* Handle exceptions for i386.
*
void
sync_iss_to_iks(x86_saved_state_t *saved_state)
{
- struct x86_kernel_state *iks;
+ struct x86_kernel_state *iks = NULL;
vm_offset_t kstack;
boolean_t record_active_regs = FALSE;
if (saved_state && saved_state->flavor == THREAD_STATE_NONE)
pal_get_kern_regs( saved_state );
- if ((kstack = current_thread()->kernel_stack) != 0) {
+ if (current_thread() != NULL &&
+ (kstack = current_thread()->kernel_stack) != 0) {
x86_saved_state64_t *regs = saved_state64(saved_state);
iks = STACK_IKS(kstack);
}
#if DEBUG
+#define TERI 1
+#endif
+
+#if TERI
extern void thread_exception_return_internal(void) __dead2;
void thread_exception_return(void) {
thread_t thread = current_thread();
ml_set_interrupts_enabled(FALSE);
- if (thread_is_64bit(thread) != task_has_64BitAddr(thread->task)) {
- panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit(thread), task_has_64BitAddr(thread->task));
+ if (thread_is_64bit_addr(thread) != task_has_64Bit_addr(thread->task)) {
+ panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit_addr(thread), task_has_64Bit_addr(thread->task));
}
- if (thread_is_64bit(thread)) {
+ if (thread_is_64bit_addr(thread)) {
if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
}