X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/5c9f46613a83ebfc29a5b1f099448259e96a98f0..cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e:/osfmk/i386/mp.c diff --git a/osfmk/i386/mp.c b/osfmk/i386/mp.c index 1c11d9601..b6654cc39 100644 --- a/osfmk/i386/mp.c +++ b/osfmk/i386/mp.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,7 +32,6 @@ #include #include #include -#include #include #include @@ -44,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -55,8 +55,6 @@ #include #include -#include - #include #include #include @@ -77,9 +75,6 @@ #endif #include -#include -#include - #include #include @@ -88,67 +83,67 @@ #include #endif /* MONOTONIC */ -#if MP_DEBUG -#define PAUSE delay(1000000) -#define DBG(x...) kprintf(x) +#if MP_DEBUG +#define PAUSE delay(1000000) +#define DBG(x...) kprintf(x) #else #define DBG(x...) #define PAUSE -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ /* Debugging/test trace events: */ -#define TRACE_MP_TLB_FLUSH MACHDBG_CODE(DBG_MACH_MP, 0) -#define TRACE_MP_CPUS_CALL MACHDBG_CODE(DBG_MACH_MP, 1) -#define TRACE_MP_CPUS_CALL_LOCAL MACHDBG_CODE(DBG_MACH_MP, 2) -#define TRACE_MP_CPUS_CALL_ACTION MACHDBG_CODE(DBG_MACH_MP, 3) -#define TRACE_MP_CPUS_CALL_NOBUF MACHDBG_CODE(DBG_MACH_MP, 4) -#define TRACE_MP_CPU_FAST_START MACHDBG_CODE(DBG_MACH_MP, 5) -#define TRACE_MP_CPU_START MACHDBG_CODE(DBG_MACH_MP, 6) -#define TRACE_MP_CPU_DEACTIVATE MACHDBG_CODE(DBG_MACH_MP, 7) +#define TRACE_MP_TLB_FLUSH MACHDBG_CODE(DBG_MACH_MP, 0) +#define TRACE_MP_CPUS_CALL MACHDBG_CODE(DBG_MACH_MP, 1) +#define TRACE_MP_CPUS_CALL_LOCAL MACHDBG_CODE(DBG_MACH_MP, 2) +#define TRACE_MP_CPUS_CALL_ACTION MACHDBG_CODE(DBG_MACH_MP, 3) +#define TRACE_MP_CPUS_CALL_NOBUF MACHDBG_CODE(DBG_MACH_MP, 4) +#define TRACE_MP_CPU_FAST_START MACHDBG_CODE(DBG_MACH_MP, 5) +#define TRACE_MP_CPU_START MACHDBG_CODE(DBG_MACH_MP, 6) +#define TRACE_MP_CPU_DEACTIVATE MACHDBG_CODE(DBG_MACH_MP, 7) -#define ABS(v) (((v) > 0)?(v):-(v)) +#define ABS(v) (((v) > 0)?(v):-(v)) -void slave_boot_init(void); -void i386_cpu_IPI(int cpu); +void slave_boot_init(void); +void i386_cpu_IPI(int cpu); #if MACH_KDP -static void mp_kdp_wait(boolean_t flush, boolean_t isNMI); +static void mp_kdp_wait(boolean_t flush, boolean_t isNMI); #endif /* MACH_KDP */ #if MACH_KDP -static boolean_t cpu_signal_pending(int cpu, mp_event_t event); +static boolean_t cpu_signal_pending(int cpu, mp_event_t event); #endif /* MACH_KDP */ -static int NMIInterruptHandler(x86_saved_state_t *regs); +static int NMIInterruptHandler(x86_saved_state_t *regs); -boolean_t smp_initialized = FALSE; -uint32_t TSC_sync_margin = 0xFFF; -volatile boolean_t force_immediate_debugger_NMI = FALSE; -volatile boolean_t pmap_tlb_flush_timeout = FALSE; +boolean_t smp_initialized = FALSE; +uint32_t TSC_sync_margin = 0xFFF; +volatile boolean_t force_immediate_debugger_NMI = FALSE; +volatile boolean_t pmap_tlb_flush_timeout = FALSE; #if DEBUG || DEVELOPMENT -boolean_t mp_interrupt_watchdog_enabled = TRUE; -uint32_t mp_interrupt_watchdog_events = 0; +boolean_t mp_interrupt_watchdog_enabled = TRUE; +uint32_t mp_interrupt_watchdog_events = 0; #endif -decl_simple_lock_data(,debugger_callback_lock); +decl_simple_lock_data(, debugger_callback_lock); struct debugger_callback *debugger_callback = NULL; decl_lck_mtx_data(static, mp_cpu_boot_lock); -lck_mtx_ext_t mp_cpu_boot_lock_ext; +lck_mtx_ext_t mp_cpu_boot_lock_ext; /* Variables needed for MP rendezvous. */ -decl_simple_lock_data(,mp_rv_lock); -static void (*mp_rv_setup_func)(void *arg); -static void (*mp_rv_action_func)(void *arg); -static void (*mp_rv_teardown_func)(void *arg); -static void *mp_rv_func_arg; -static volatile int mp_rv_ncpus; - /* Cache-aligned barriers: */ -static volatile long mp_rv_entry __attribute__((aligned(64))); -static volatile long mp_rv_exit __attribute__((aligned(64))); -static volatile long mp_rv_complete __attribute__((aligned(64))); - -volatile uint64_t debugger_entry_time; -volatile uint64_t debugger_exit_time; +decl_simple_lock_data(, mp_rv_lock); +static void (*mp_rv_setup_func)(void *arg); +static void (*mp_rv_action_func)(void *arg); +static void (*mp_rv_teardown_func)(void *arg); +static void *mp_rv_func_arg; +static volatile int mp_rv_ncpus; +/* Cache-aligned barriers: */ +static volatile long mp_rv_entry __attribute__((aligned(64))); +static volatile long mp_rv_exit __attribute__((aligned(64))); +static volatile long mp_rv_complete __attribute__((aligned(64))); + +volatile uint64_t debugger_entry_time; +volatile uint64_t debugger_exit_time; #if MACH_KDP #include extern int kdp_snapshot; @@ -166,68 +161,50 @@ static struct _kdp_xcpu_call_func { /* Variables needed for MP broadcast. */ static void (*mp_bc_action_func)(void *arg); static void *mp_bc_func_arg; -static int mp_bc_ncpus; +static int mp_bc_ncpus; static volatile long mp_bc_count; decl_lck_mtx_data(static, mp_bc_lock); -lck_mtx_ext_t mp_bc_lock_ext; -static volatile int debugger_cpu = -1; -volatile long NMIPI_acks = 0; -volatile long NMI_count = 0; -static NMI_reason_t NMI_panic_reason = NONE; -static int vector_timed_out; +lck_mtx_ext_t mp_bc_lock_ext; +static volatile int debugger_cpu = -1; +volatile long NMIPI_acks = 0; +volatile long NMI_count = 0; +static NMI_reason_t NMI_panic_reason = NONE; +static int vector_timed_out; -extern void NMI_cpus(void); +extern void NMI_cpus(void); -static void mp_cpus_call_init(void); -static void mp_cpus_call_action(void); -static void mp_call_PM(void); +static void mp_cpus_call_init(void); +static void mp_cpus_call_action(void); +static void mp_call_PM(void); -char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init +char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init /* PAL-related routines */ -boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, - int ipi_vector, i386_intr_func_t ipi_handler); +boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, + int ipi_vector, i386_intr_func_t ipi_handler); void i386_start_cpu(int lapic_id, int cpu_num); void i386_send_NMI(int cpu); void NMIPI_enable(boolean_t); -#if GPROF -/* - * Initialize dummy structs for profiling. These aren't used but - * allows hertz_tick() to be built with GPROF defined. - */ -struct profile_vars _profile_vars; -struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars }; -#define GPROF_INIT() \ -{ \ - int i; \ - \ - /* Hack to initialize pointers to unused profiling structs */ \ - for (i = 1; i < MAX_CPUS; i++) \ - _profile_vars_cpus[i] = &_profile_vars; \ -} -#else -#define GPROF_INIT() -#endif /* GPROF */ -static lck_grp_t smp_lck_grp; -static lck_grp_attr_t smp_lck_grp_attr; +static lck_grp_t smp_lck_grp; +static lck_grp_attr_t smp_lck_grp_attr; -#define NUM_CPU_WARM_CALLS 20 -struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS]; -queue_head_t cpu_warm_call_list; +#define NUM_CPU_WARM_CALLS 20 +struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS]; +queue_head_t cpu_warm_call_list; decl_simple_lock_data(static, cpu_warm_lock); typedef struct cpu_warm_data { - timer_call_t cwd_call; - uint64_t cwd_deadline; - int cwd_result; + timer_call_t cwd_call; + uint64_t cwd_deadline; + int cwd_result; } *cpu_warm_data_t; -static void cpu_prewarm_init(void); -static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1); -static void _cpu_warm_setup(void *arg); -static timer_call_t grab_warm_timer_call(void); -static void free_warm_timer_call(timer_call_t call); +static void cpu_prewarm_init(void); +static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1); +static void _cpu_warm_setup(void *arg); +static timer_call_t grab_warm_timer_call(void); +static void free_warm_timer_call(timer_call_t call); void smp_init(void) @@ -240,13 +217,13 @@ smp_init(void) lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL); console_init(); - if(!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler, - LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler)) + if (!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler, + LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler)) { return; + } cpu_thread_init(); - GPROF_INIT(); DBGLOG_CPU_INIT(master_cpu); mp_cpus_call_init(); @@ -254,15 +231,15 @@ smp_init(void) #if DEBUG || DEVELOPMENT if (PE_parse_boot_argn("interrupt_watchdog", - &mp_interrupt_watchdog_enabled, - sizeof(mp_interrupt_watchdog_enabled))) { + &mp_interrupt_watchdog_enabled, + sizeof(mp_interrupt_watchdog_enabled))) { kprintf("Interrupt watchdog %sabled\n", - mp_interrupt_watchdog_enabled ? "en" : "dis"); + mp_interrupt_watchdog_enabled ? "en" : "dis"); } #endif if (PE_parse_boot_argn("TSC_sync_margin", - &TSC_sync_margin, sizeof(TSC_sync_margin))) { + &TSC_sync_margin, sizeof(TSC_sync_margin))) { kprintf("TSC sync Margin 0x%x\n", TSC_sync_margin); } else if (cpuid_vmm_present()) { kprintf("TSC sync margin disabled\n"); @@ -276,18 +253,18 @@ smp_init(void) } typedef struct { - int target_cpu; - int target_lapic; - int starter_cpu; + int target_cpu; + int target_lapic; + int starter_cpu; } processor_start_info_t; -static processor_start_info_t start_info __attribute__((aligned(64))); +static processor_start_info_t start_info __attribute__((aligned(64))); -/* +/* * Cache-alignment is to avoid cross-cpu false-sharing interference. */ -static volatile long tsc_entry_barrier __attribute__((aligned(64))); -static volatile long tsc_exit_barrier __attribute__((aligned(64))); -static volatile uint64_t tsc_target __attribute__((aligned(64))); +static volatile long tsc_entry_barrier __attribute__((aligned(64))); +static volatile long tsc_exit_barrier __attribute__((aligned(64))); +static volatile uint64_t tsc_target __attribute__((aligned(64))); /* * Poll a CPU to see when it has marked itself as running. @@ -296,8 +273,9 @@ static void mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay) { while (iters-- > 0) { - if (cpu_datap(slot_num)->cpu_running) + if (cpu_datap(slot_num)->cpu_running) { break; + } delay(usecdelay); } } @@ -308,17 +286,18 @@ mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay) kern_return_t intel_startCPU_fast(int slot_num) { - kern_return_t rc; + kern_return_t rc; /* * Try to perform a fast restart */ rc = pmCPUExitHalt(slot_num); - if (rc != KERN_SUCCESS) + if (rc != KERN_SUCCESS) { /* * The CPU was not eligible for a fast restart. */ - return(rc); + return rc; + } KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_FAST_START | DBG_FUNC_START, @@ -328,7 +307,7 @@ intel_startCPU_fast(int slot_num) * Wait until the CPU is back online. */ mp_disable_preemption(); - + /* * We use short pauses (1us) for low latency. 30,000 iterations is * longer than a full restart would require so it should be more @@ -346,10 +325,11 @@ intel_startCPU_fast(int slot_num) * Check to make sure that the CPU is really running. If not, * go through the slow path. */ - if (cpu_datap(slot_num)->cpu_running) - return(KERN_SUCCESS); - else - return(KERN_FAILURE); + if (cpu_datap(slot_num)->cpu_running) { + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } } static void @@ -365,8 +345,9 @@ started_cpu(void) */ tsc_target = 0; atomic_decl(&tsc_entry_barrier, 1); - while (tsc_entry_barrier != 0) - ; /* spin for starter and target at barrier */ + while (tsc_entry_barrier != 0) { + ; /* spin for starter and target at barrier */ + } tsc_target = rdtsc64(); atomic_decl(&tsc_exit_barrier, 1); } @@ -375,15 +356,16 @@ started_cpu(void) static void start_cpu(void *arg) { - int i = 1000; - processor_start_info_t *psip = (processor_start_info_t *) arg; + int i = 1000; + processor_start_info_t *psip = (processor_start_info_t *) arg; /* Ignore this if the current processor is not the starter */ - if (cpu_number() != psip->starter_cpu) + if (cpu_number() != psip->starter_cpu) { return; + } DBG("start_cpu(%p) about to start cpu %d, lapic %d\n", - arg, psip->target_cpu, psip->target_lapic); + arg, psip->target_cpu, psip->target_lapic); KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_START | DBG_FUNC_START, @@ -392,14 +374,14 @@ start_cpu(void *arg) i386_start_cpu(psip->target_lapic, psip->target_cpu); -#ifdef POSTCODE_DELAY +#ifdef POSTCODE_DELAY /* Wait much longer if postcodes are displayed for a delay period. */ i *= 10000; #endif DBG("start_cpu(%p) about to wait for cpu %d\n", - arg, psip->target_cpu); + arg, psip->target_cpu); - mp_wait_for_cpu_up(psip->target_cpu, i*100, 100); + mp_wait_for_cpu_up(psip->target_cpu, i * 100, 100); KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_START | DBG_FUNC_END, @@ -414,26 +396,28 @@ start_cpu(void *arg) * TSC_sync_margin (TSC_SYNC_MARGIN) ticks. This margin * can be overriden by boot-arg (with 0 meaning no checking). */ - uint64_t tsc_starter; - int64_t tsc_delta; + uint64_t tsc_starter; + int64_t tsc_delta; atomic_decl(&tsc_entry_barrier, 1); - while (tsc_entry_barrier != 0) - ; /* spin for both processors at barrier */ + while (tsc_entry_barrier != 0) { + ; /* spin for both processors at barrier */ + } tsc_starter = rdtsc64(); atomic_decl(&tsc_exit_barrier, 1); - while (tsc_exit_barrier != 0) - ; /* spin for target to store its TSC */ + while (tsc_exit_barrier != 0) { + ; /* spin for target to store its TSC */ + } tsc_delta = tsc_target - tsc_starter; kprintf("TSC sync for cpu %d: 0x%016llx delta 0x%llx (%lld)\n", - psip->target_cpu, tsc_target, tsc_delta, tsc_delta); - if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) { + psip->target_cpu, tsc_target, tsc_delta, tsc_delta); + if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) { #if DEBUG panic( #else printf( #endif "Unsynchronized TSC for cpu %d: " - "0x%016llx, delta 0x%llx\n", + "0x%016llx, delta 0x%llx\n", psip->target_cpu, tsc_target, tsc_delta); } } @@ -441,10 +425,10 @@ start_cpu(void *arg) kern_return_t intel_startCPU( - int slot_num) + int slot_num) { - int lapic = cpu_to_lapic[slot_num]; - boolean_t istate; + int lapic = cpu_to_lapic[slot_num]; + boolean_t istate; assert(lapic != -1); @@ -499,13 +483,13 @@ intel_startCPU( } } -#if MP_DEBUG -cpu_signal_event_log_t *cpu_signal[MAX_CPUS]; -cpu_signal_event_log_t *cpu_handle[MAX_CPUS]; +#if MP_DEBUG +cpu_signal_event_log_t *cpu_signal[MAX_CPUS]; +cpu_signal_event_log_t *cpu_handle[MAX_CPUS]; MP_EVENT_NAME_DECL(); -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ /* * Note: called with NULL state when polling for TLB flush and cross-calls. @@ -513,11 +497,11 @@ MP_EVENT_NAME_DECL(); int cpu_signal_handler(x86_saved_state_t *regs) { -#if !MACH_KDP +#if !MACH_KDP #pragma unused (regs) #endif /* !MACH_KDP */ - int my_cpu; - volatile int *my_word; + int my_cpu; + volatile int *my_word; SCHED_STATS_IPI(current_processor()); @@ -531,9 +515,9 @@ cpu_signal_handler(x86_saved_state_t *regs) cpu_data_ptr[my_cpu]->cpu_prior_signals = *my_word; do { -#if MACH_KDP +#if MACH_KDP if (i_bit(MP_KDP, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_KDP); + DBGLOG(cpu_handle, my_cpu, MP_KDP); i_bit_clear(MP_KDP, my_word); /* Ensure that the i386_kernel_state at the base of the * current thread's stack (if any) is synchronized with the @@ -541,27 +525,25 @@ cpu_signal_handler(x86_saved_state_t *regs) * access through the debugger. */ sync_iss_to_iks(regs); - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); + } mp_kdp_wait(TRUE, FALSE); - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + } } else -#endif /* MACH_KDP */ +#endif /* MACH_KDP */ if (i_bit(MP_TLB_FLUSH, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH); + DBGLOG(cpu_handle, my_cpu, MP_TLB_FLUSH); i_bit_clear(MP_TLB_FLUSH, my_word); pmap_update_interrupt(); - } else if (i_bit(MP_CHUD, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_CHUD); - i_bit_clear(MP_CHUD, my_word); - chudxnu_cpu_signal_handler(); } else if (i_bit(MP_CALL, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_CALL); + DBGLOG(cpu_handle, my_cpu, MP_CALL); i_bit_clear(MP_CALL, my_word); mp_cpus_call_action(); } else if (i_bit(MP_CALL_PM, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_CALL_PM); + DBGLOG(cpu_handle, my_cpu, MP_CALL_PM); i_bit_clear(MP_CALL_PM, my_word); mp_call_PM(); } @@ -569,7 +551,7 @@ cpu_signal_handler(x86_saved_state_t *regs) /* Called to poll only for cross-calls and TLB flush */ break; } else if (i_bit(MP_AST, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_AST); + DBGLOG(cpu_handle, my_cpu, MP_AST); i_bit_clear(MP_AST, my_word); ast_check(cpu_to_processor(my_cpu)); } @@ -579,64 +561,68 @@ cpu_signal_handler(x86_saved_state_t *regs) } extern void kprintf_break_lock(void); -static int +int NMIInterruptHandler(x86_saved_state_t *regs) { - void *stackptr; - char pstr[192]; - uint64_t now = mach_absolute_time(); + void *stackptr; + char pstr[256]; + uint64_t now = mach_absolute_time(); if (panic_active() && !panicDebugging) { - if (pmsafe_debug) + if (pmsafe_debug) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); - for(;;) + } + for (;;) { cpu_pause(); + } } atomic_incl(&NMIPI_acks, 1); atomic_incl(&NMI_count, 1); sync_iss_to_iks_unconditionally(regs); - __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr)); - if (cpu_number() == debugger_cpu) + if (cpu_number() == debugger_cpu) { goto NMExit; + } if (NMI_panic_reason == SPINLOCK_TIMEOUT) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for spinlock acquisition timeout, spinlock: %p, spinlock owner: %p, current_thread: %p, spinlock_owner_cpu: 0x%x\n", - cpu_number(), now, spinlock_timed_out, (void *) spinlock_timed_out->interlock.lock_data, current_thread(), spinlock_owner_cpu); + "Panic(CPU %d, time %llu): NMIPI for spinlock acquisition timeout, spinlock: %p, spinlock owner: %p, current_thread: %p, spinlock_owner_cpu: 0x%x\n", + cpu_number(), now, spinlock_timed_out, (void *) spinlock_timed_out->interlock.lock_data, current_thread(), spinlock_owner_cpu); panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs); } else if (NMI_panic_reason == TLB_FLUSH_TIMEOUT) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: TLB flush timeout, TLB state:0x%x\n", - cpu_number(), now, current_cpu_datap()->cpu_tlb_invalid); + "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: TLB flush timeout, TLB state:0x%x\n", + cpu_number(), now, current_cpu_datap()->cpu_tlb_invalid); panic_i386_backtrace(stackptr, 48, &pstr[0], TRUE, regs); } else if (NMI_panic_reason == CROSSCALL_TIMEOUT) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: cross-call timeout\n", - cpu_number(), now); + "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: cross-call timeout\n", + cpu_number(), now); panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs); } else if (NMI_panic_reason == INTERRUPT_WATCHDOG) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: interrupt watchdog for vector 0x%x\n", - cpu_number(), now, vector_timed_out); + "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: interrupt watchdog for vector 0x%x\n", + cpu_number(), now, vector_timed_out); panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs); } - + #if MACH_KDP - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); + } current_cpu_datap()->cpu_NMI_acknowledged = TRUE; i_bit_clear(MP_KDP, ¤t_cpu_datap()->cpu_signals); if (panic_active() || NMI_panic_reason != NONE) { mp_kdp_wait(FALSE, TRUE); } else if (!mp_kdp_trap && - !mp_kdp_is_NMI && - virtualized && (debug_boot_arg & DB_NMI)) { + !mp_kdp_is_NMI && + virtualized && (debug_boot_arg & DB_NMI)) { /* * Under a VMM with the debug boot-arg set, drop into kdp. * Since an NMI is involved, there's a risk of contending with - * a panic. And side-effects of NMIs may result in entry into, + * a panic. And side-effects of NMIs may result in entry into, * and continuing from, the debugger being unreliable. */ if (__sync_bool_compare_and_swap(&mp_kdp_is_NMI, FALSE, TRUE)) { @@ -651,10 +637,11 @@ NMIInterruptHandler(x86_saved_state_t *regs) } else { mp_kdp_wait(FALSE, FALSE); } - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + } #endif -NMExit: +NMExit: return 1; } @@ -692,41 +679,45 @@ cpu_NMI_interrupt(int cpu) void NMI_cpus(void) { - unsigned int cpu; - boolean_t intrs_enabled; - uint64_t tsc_timeout; + unsigned int cpu; + boolean_t intrs_enabled; + uint64_t tsc_timeout; intrs_enabled = ml_set_interrupts_enabled(FALSE); - + NMIPI_enable(TRUE); for (cpu = 0; cpu < real_ncpus; cpu++) { - if (!cpu_datap(cpu)->cpu_running) + if (!cpu_is_running(cpu)) { continue; + } cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; cpu_NMI_interrupt(cpu); tsc_timeout = !machine_timeout_suspended() ? - rdtsc64() + (1000 * 1000 * 1000 * 10ULL) : - ~0ULL; + rdtsc64() + (1000 * 1000 * 1000 * 10ULL) : + ~0ULL; while (!cpu_datap(cpu)->cpu_NMI_acknowledged) { handle_pending_TLB_flushes(); cpu_pause(); - if (rdtsc64() > tsc_timeout) + if (rdtsc64() > tsc_timeout) { panic("NMI_cpus() timeout cpu %d", cpu); + } } cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; } + NMIPI_enable(FALSE); ml_set_interrupts_enabled(intrs_enabled); } -static void (* volatile mp_PM_func)(void) = NULL; +static void(*volatile mp_PM_func)(void) = NULL; static void mp_call_PM(void) { assert(!ml_get_interrupts_enabled()); - if (mp_PM_func != NULL) + if (mp_PM_func != NULL) { mp_PM_func(); + } } void @@ -735,10 +726,11 @@ cpu_PM_interrupt(int cpu) assert(!ml_get_interrupts_enabled()); if (mp_PM_func != NULL) { - if (cpu == cpu_number()) + if (cpu == cpu_number()) { mp_PM_func(); - else + } else { i386_signal_cpu(cpu, MP_CALL_PM, ASYNC); + } } } @@ -751,36 +743,39 @@ PM_interrupt_register(void (*fn)(void)) void i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode) { - volatile int *signals = &cpu_datap(cpu)->cpu_signals; - uint64_t tsc_timeout; + volatile int *signals = &cpu_datap(cpu)->cpu_signals; + uint64_t tsc_timeout; + - - if (!cpu_datap(cpu)->cpu_running) + if (!cpu_datap(cpu)->cpu_running) { return; + } - if (event == MP_TLB_FLUSH) - KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_START, cpu, 0, 0, 0, 0); + if (event == MP_TLB_FLUSH) { + KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_START, cpu, 0, 0, 0, 0); + } DBGLOG(cpu_signal, cpu, event); - + i_bit_set(event, signals); i386_cpu_IPI(cpu); if (mode == SYNC) { - again: +again: tsc_timeout = !machine_timeout_suspended() ? - rdtsc64() + (1000*1000*1000) : - ~0ULL; + rdtsc64() + (1000 * 1000 * 1000) : + ~0ULL; while (i_bit(event, signals) && rdtsc64() < tsc_timeout) { cpu_pause(); } if (i_bit(event, signals)) { DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n", - cpu, event); + cpu, event); goto again; } } - if (event == MP_TLB_FLUSH) - KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_END, cpu, 0, 0, 0, 0); + if (event == MP_TLB_FLUSH) { + KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_END, cpu, 0, 0, 0, 0); + } } /* @@ -790,11 +785,12 @@ i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode) static boolean_t mp_spin_timeout(uint64_t tsc_start) { - uint64_t tsc_timeout; + uint64_t tsc_timeout; cpu_pause(); - if (machine_timeout_suspended()) + if (machine_timeout_suspended()) { return FALSE; + } /* * The timeout is 4 * the spinlock timeout period @@ -802,8 +798,8 @@ mp_spin_timeout(uint64_t tsc_start) * in which case we allow an even greater margin. */ tsc_timeout = disable_serial_output ? LockTimeOutTSC << 2 - : LockTimeOutTSC << 4; - return (rdtsc64() > tsc_start + tsc_timeout); + : LockTimeOutTSC << 4; + return rdtsc64() > tsc_start + tsc_timeout; } /* @@ -815,30 +811,30 @@ boolean_t mp_safe_spin_lock(usimple_lock_t lock) { if (ml_get_interrupts_enabled()) { - simple_lock(lock); + simple_lock(lock, LCK_GRP_NULL); return TRUE; } else { uint64_t tsc_spin_start = rdtsc64(); - while (!simple_lock_try(lock)) { + while (!simple_lock_try(lock, LCK_GRP_NULL)) { cpu_signal_handler(NULL); if (mp_spin_timeout(tsc_spin_start)) { uint32_t lock_cpu; uintptr_t lowner = (uintptr_t) - lock->interlock.lock_data; + lock->interlock.lock_data; spinlock_timed_out = lock; lock_cpu = spinlock_timeout_NMI(lowner); NMIPI_panic(cpu_to_cpumask(lock_cpu), SPINLOCK_TIMEOUT); panic("mp_safe_spin_lock() timed out, lock: %p, owner thread: 0x%lx, current_thread: %p, owner on CPU 0x%x, time: %llu", - lock, lowner, current_thread(), lock_cpu, mach_absolute_time()); + lock, lowner, current_thread(), lock_cpu, mach_absolute_time()); } } return FALSE; - } + } } /* * All-CPU rendezvous: - * - CPUs are signalled, + * - CPUs are signalled, * - all execute the setup function (if specified), * - rendezvous (i.e. all cpus reach a barrier), * - all execute the action function (if specified), @@ -853,12 +849,20 @@ mp_safe_spin_lock(usimple_lock_t lock) static void mp_rendezvous_action(__unused void *null) { - boolean_t intrs_enabled; - uint64_t tsc_spin_start; + boolean_t intrs_enabled; + uint64_t tsc_spin_start; + + /* + * Note that mp_rv_lock was acquired by the thread that initiated the + * rendezvous and must have been acquired before we enter + * mp_rendezvous_action(). + */ + current_cpu_datap()->cpu_rendezvous_in_progress = TRUE; /* setup function */ - if (mp_rv_setup_func != NULL) + if (mp_rv_setup_func != NULL) { mp_rv_setup_func(mp_rv_func_arg); + } intrs_enabled = ml_get_interrupts_enabled(); @@ -868,55 +872,65 @@ mp_rendezvous_action(__unused void *null) while (mp_rv_entry < mp_rv_ncpus) { /* poll for pesky tlb flushes if interrupts disabled */ - if (!intrs_enabled) + if (!intrs_enabled) { handle_pending_TLB_flushes(); + } if (mp_spin_timeout(tsc_spin_start)) { panic("mp_rv_action() entry: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_entry, mp_rv_ncpus, tsc_spin_start, rdtsc64()); } } /* action function */ - if (mp_rv_action_func != NULL) + if (mp_rv_action_func != NULL) { mp_rv_action_func(mp_rv_func_arg); + } /* spin on exit rendezvous */ atomic_incl(&mp_rv_exit, 1); tsc_spin_start = rdtsc64(); while (mp_rv_exit < mp_rv_ncpus) { - if (!intrs_enabled) + if (!intrs_enabled) { handle_pending_TLB_flushes(); - if (mp_spin_timeout(tsc_spin_start)) + } + if (mp_spin_timeout(tsc_spin_start)) { panic("mp_rv_action() exit: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_exit, mp_rv_ncpus, tsc_spin_start, rdtsc64()); + } } /* teardown function */ - if (mp_rv_teardown_func != NULL) + if (mp_rv_teardown_func != NULL) { mp_rv_teardown_func(mp_rv_func_arg); + } + + current_cpu_datap()->cpu_rendezvous_in_progress = FALSE; /* Bump completion count */ atomic_incl(&mp_rv_complete, 1); } void -mp_rendezvous(void (*setup_func)(void *), - void (*action_func)(void *), - void (*teardown_func)(void *), - void *arg) +mp_rendezvous(void (*setup_func)(void *), + void (*action_func)(void *), + void (*teardown_func)(void *), + void *arg) { - uint64_t tsc_spin_start; + uint64_t tsc_spin_start; if (!smp_initialized) { - if (setup_func != NULL) + if (setup_func != NULL) { setup_func(arg); - if (action_func != NULL) + } + if (action_func != NULL) { action_func(arg); - if (teardown_func != NULL) + } + if (teardown_func != NULL) { teardown_func(arg); + } return; } - + /* obtain rendezvous lock */ - (void) mp_safe_spin_lock(&mp_rv_lock); + mp_rendezvous_lock(); /* set static function pointers */ mp_rv_setup_func = setup_func; @@ -944,10 +958,11 @@ mp_rendezvous(void (*setup_func)(void *), */ tsc_spin_start = rdtsc64(); while (mp_rv_complete < mp_rv_ncpus) { - if (mp_spin_timeout(tsc_spin_start)) + if (mp_spin_timeout(tsc_spin_start)) { panic("mp_rendezvous() timeout: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_complete, mp_rv_ncpus, tsc_spin_start, rdtsc64()); + } } - + /* Tidy up */ mp_rv_setup_func = NULL; mp_rv_action_func = NULL; @@ -955,6 +970,18 @@ mp_rendezvous(void (*setup_func)(void *), mp_rv_func_arg = NULL; /* release lock */ + mp_rendezvous_unlock(); +} + +void +mp_rendezvous_lock(void) +{ + (void) mp_safe_spin_lock(&mp_rv_lock); +} + +void +mp_rendezvous_unlock(void) +{ simple_unlock(&mp_rv_lock); } @@ -988,40 +1015,40 @@ teardown_restore_intrs(__unused void * param_not_used) */ void mp_rendezvous_no_intrs( - void (*action_func)(void *), - void *arg) + void (*action_func)(void *), + void *arg) { mp_rendezvous(setup_disable_intrs, - action_func, - teardown_restore_intrs, - arg); + action_func, + teardown_restore_intrs, + arg); } typedef struct { - queue_chain_t link; /* queue linkage */ - void (*func)(void *,void *); /* routine to call */ - void *arg0; /* routine's 1st arg */ - void *arg1; /* routine's 2nd arg */ - cpumask_t *maskp; /* completion response mask */ + queue_chain_t link; /* queue linkage */ + void (*func)(void *, void *); /* routine to call */ + void *arg0; /* routine's 1st arg */ + void *arg1; /* routine's 2nd arg */ + cpumask_t *maskp; /* completion response mask */ } mp_call_t; typedef struct { - queue_head_t queue; - decl_simple_lock_data(, lock); + queue_head_t queue; + decl_simple_lock_data(, lock); } mp_call_queue_t; -#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS -static mp_call_queue_t mp_cpus_call_freelist; -static mp_call_queue_t mp_cpus_call_head[MAX_CPUS]; +#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS +static mp_call_queue_t mp_cpus_call_freelist; +static mp_call_queue_t mp_cpus_call_head[MAX_CPUS]; static inline boolean_t mp_call_head_lock(mp_call_queue_t *cqp) { - boolean_t intrs_enabled; + boolean_t intrs_enabled; intrs_enabled = ml_set_interrupts_enabled(FALSE); - simple_lock(&cqp->lock); + simple_lock(&cqp->lock, LCK_GRP_NULL); return intrs_enabled; } @@ -1030,16 +1057,19 @@ mp_call_head_lock(mp_call_queue_t *cqp) * Deliver an NMIPI to a set of processors to cause them to panic . */ void -NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) { - unsigned int cpu, cpu_bit; +NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) +{ + unsigned int cpu; + cpumask_t cpu_bit; uint64_t deadline; NMIPI_enable(TRUE); NMI_panic_reason = why; for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - if ((cpu_mask & cpu_bit) == 0) + if ((cpu_mask & cpu_bit) == 0) { continue; + } cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; cpu_NMI_interrupt(cpu); } @@ -1047,10 +1077,11 @@ NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) { /* Wait (only so long) for NMi'ed cpus to respond */ deadline = mach_absolute_time() + LockTimeOut; for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - if ((cpu_mask & cpu_bit) == 0) + if ((cpu_mask & cpu_bit) == 0) { continue; + } while (!cpu_datap(cpu)->cpu_NMI_acknowledged && - mach_absolute_time() < deadline) { + mach_absolute_time() < deadline) { cpu_pause(); } } @@ -1061,7 +1092,7 @@ static inline boolean_t mp_call_head_is_locked(mp_call_queue_t *cqp) { return !ml_get_interrupts_enabled() && - hw_lock_held((hw_lock_t)&cqp->lock); + hw_lock_held((hw_lock_t)&cqp->lock); } #endif @@ -1075,13 +1106,14 @@ mp_call_head_unlock(mp_call_queue_t *cqp, boolean_t intrs_enabled) static inline mp_call_t * mp_call_alloc(void) { - mp_call_t *callp = NULL; - boolean_t intrs_enabled; - mp_call_queue_t *cqp = &mp_cpus_call_freelist; + mp_call_t *callp = NULL; + boolean_t intrs_enabled; + mp_call_queue_t *cqp = &mp_cpus_call_freelist; intrs_enabled = mp_call_head_lock(cqp); - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { queue_remove_first(&cqp->queue, callp, typeof(callp), link); + } mp_call_head_unlock(cqp, intrs_enabled); return callp; @@ -1090,8 +1122,8 @@ mp_call_alloc(void) static inline void mp_call_free(mp_call_t *callp) { - boolean_t intrs_enabled; - mp_call_queue_t *cqp = &mp_cpus_call_freelist; + boolean_t intrs_enabled; + mp_call_queue_t *cqp = &mp_cpus_call_freelist; intrs_enabled = mp_call_head_lock(cqp); queue_enter_first(&cqp->queue, callp, typeof(callp), link); @@ -1101,18 +1133,19 @@ mp_call_free(mp_call_t *callp) static inline mp_call_t * mp_call_dequeue_locked(mp_call_queue_t *cqp) { - mp_call_t *callp = NULL; + mp_call_t *callp = NULL; assert(mp_call_head_is_locked(cqp)); - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { queue_remove_first(&cqp->queue, callp, typeof(callp), link); + } return callp; } static inline void mp_call_enqueue_locked( - mp_call_queue_t *cqp, - mp_call_t *callp) + mp_call_queue_t *cqp, + mp_call_t *callp) { queue_enter(&cqp->queue, callp, typeof(callp), link); } @@ -1121,7 +1154,7 @@ mp_call_enqueue_locked( static void mp_cpus_call_init(void) { - mp_call_queue_t *cqp = &mp_cpus_call_freelist; + mp_call_queue_t *cqp = &mp_cpus_call_freelist; DBG("mp_cpus_call_init()\n"); simple_lock_init(&cqp->lock, 0); @@ -1135,9 +1168,9 @@ mp_cpus_call_init(void) void mp_cpus_call_cpu_init(int cpu) { - int i; - mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; - mp_call_t *callp; + int i; + mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; + mp_call_t *callp; simple_lock_init(&cqp->lock, 0); queue_init(&cqp->queue); @@ -1156,10 +1189,10 @@ mp_cpus_call_cpu_init(int cpu) static void mp_cpus_call_action(void) { - mp_call_queue_t *cqp; - boolean_t intrs_enabled; - mp_call_t *callp; - mp_call_t call; + mp_call_queue_t *cqp; + boolean_t intrs_enabled; + mp_call_t *callp; + mp_call_t call; assert(!ml_get_interrupts_enabled()); cqp = &mp_cpus_call_head[cpu_number()]; @@ -1177,8 +1210,9 @@ mp_cpus_call_action(void) call.func(call.arg0, call.arg1); (void) mp_call_head_lock(cqp); } - if (call.maskp != NULL) + if (call.maskp != NULL) { i_bit_set(cpu_number(), call.maskp); + } } mp_call_head_unlock(cqp, intrs_enabled); } @@ -1191,34 +1225,34 @@ mp_cpus_call_action(void) * ASYNC: function call is queued to the specified cpus * waiting for all calls to complete in parallel before returning * NOSYNC: function calls are queued - * but we return before confirmation of calls completing. + * but we return before confirmation of calls completing. * The action function may be NULL. * The cpu mask may include the local cpu. Offline cpus are ignored. * The return value is the number of cpus on which the call was made or queued. */ cpu_t mp_cpus_call( - cpumask_t cpus, - mp_sync_t mode, - void (*action_func)(void *), - void *arg) + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *), + void *arg) { return mp_cpus_call1( - cpus, - mode, - (void (*)(void *,void *))action_func, - arg, - NULL, - NULL); + cpus, + mode, + (void (*)(void *, void *))action_func, + arg, + NULL, + NULL); } static void -mp_cpus_call_wait(boolean_t intrs_enabled, - cpumask_t cpus_called, - cpumask_t *cpus_responded) +mp_cpus_call_wait(boolean_t intrs_enabled, + cpumask_t cpus_called, + cpumask_t *cpus_responded) { - mp_call_queue_t *cqp; - uint64_t tsc_spin_start; + mp_call_queue_t *cqp; + uint64_t tsc_spin_start; assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); cqp = &mp_cpus_call_head[cpu_number()]; @@ -1227,46 +1261,48 @@ mp_cpus_call_wait(boolean_t intrs_enabled, while (*cpus_responded != cpus_called) { if (!intrs_enabled) { /* Sniffing w/o locking */ - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { mp_cpus_call_action(); + } cpu_signal_handler(NULL); } if (mp_spin_timeout(tsc_spin_start)) { - cpumask_t cpus_unresponsive; + cpumask_t cpus_unresponsive; cpus_unresponsive = cpus_called & ~(*cpus_responded); NMIPI_panic(cpus_unresponsive, CROSSCALL_TIMEOUT); panic("mp_cpus_call_wait() timeout, cpus: 0x%llx", - cpus_unresponsive); + cpus_unresponsive); } } } cpu_t mp_cpus_call1( - cpumask_t cpus, - mp_sync_t mode, - void (*action_func)(void *, void *), - void *arg0, - void *arg1, - cpumask_t *cpus_calledp) -{ - cpu_t cpu = 0; - boolean_t intrs_enabled = FALSE; - boolean_t call_self = FALSE; - cpumask_t cpus_called = 0; - cpumask_t cpus_responded = 0; - long cpus_call_count = 0; - uint64_t tsc_spin_start; - boolean_t topo_lock; + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *, void *), + void *arg0, + void *arg1, + cpumask_t *cpus_calledp) +{ + cpu_t cpu = 0; + boolean_t intrs_enabled = FALSE; + boolean_t call_self = FALSE; + cpumask_t cpus_called = 0; + cpumask_t cpus_responded = 0; + long cpus_call_count = 0; + uint64_t tsc_spin_start; + boolean_t topo_lock; KERNEL_DEBUG_CONSTANT( TRACE_MP_CPUS_CALL | DBG_FUNC_START, cpus, mode, VM_KERNEL_UNSLIDE(action_func), VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1)); if (!smp_initialized) { - if ((cpus & CPUMASK_SELF) == 0) + if ((cpus & CPUMASK_SELF) == 0) { goto out; + } if (action_func != NULL) { intrs_enabled = ml_set_interrupts_enabled(FALSE); action_func(arg0, arg1); @@ -1283,7 +1319,7 @@ mp_cpus_call1( * although an exception is made if we're calling only the master * processor since that always remains active. Note: this exception * is expected for longterm timer nosync cross-calls to the master cpu. - */ + */ mp_disable_preemption(); intrs_enabled = ml_get_interrupts_enabled(); topo_lock = (cpus != cpu_to_cpumask(master_cpu)); @@ -1293,8 +1329,9 @@ mp_cpus_call1( } for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) { if (((cpu_to_cpumask(cpu) & cpus) == 0) || - !cpu_datap(cpu)->cpu_running) + !cpu_is_running(cpu)) { continue; + } tsc_spin_start = rdtsc64(); if (cpu == (cpu_t) cpu_number()) { /* @@ -1313,13 +1350,14 @@ mp_cpus_call1( /* * Here to queue a call to cpu and IPI. */ - mp_call_t *callp = NULL; - mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; - boolean_t intrs_inner; + mp_call_t *callp = NULL; + mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; + boolean_t intrs_inner; - queue_call: - if (callp == NULL) +queue_call: + if (callp == NULL) { callp = mp_call_alloc(); + } intrs_inner = mp_call_head_lock(cqp); if (callp == NULL) { mp_call_head_unlock(cqp, intrs_inner); @@ -1328,13 +1366,15 @@ mp_cpus_call1( cpu, 0, 0, 0, 0); if (!intrs_inner) { /* Sniffing w/o locking */ - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { mp_cpus_call_action(); + } handle_pending_TLB_flushes(); } - if (mp_spin_timeout(tsc_spin_start)) + if (mp_spin_timeout(tsc_spin_start)) { panic("mp_cpus_call1() timeout start: 0x%llx, cur: 0x%llx", - tsc_spin_start, rdtsc64()); + tsc_spin_start, rdtsc64()); + } goto queue_call; } callp->maskp = (mode == NOSYNC) ? NULL : &cpus_responded; @@ -1357,7 +1397,7 @@ mp_cpus_call1( } /* Call locally if mode not SYNC */ - if (mode != SYNC && call_self ) { + if (mode != SYNC && call_self) { KERNEL_DEBUG_CONSTANT( TRACE_MP_CPUS_CALL_LOCAL, VM_KERNEL_UNSLIDE(action_func), VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1), 0, 0); @@ -1369,20 +1409,22 @@ mp_cpus_call1( } /* For ASYNC, now wait for all signaled cpus to complete their calls */ - if (mode == ASYNC) + if (mode == ASYNC) { mp_cpus_call_wait(intrs_enabled, cpus_called, &cpus_responded); + } /* Safe to allow pre-emption now */ mp_enable_preemption(); out: - if (call_self){ + if (call_self) { cpus_called |= cpu_to_cpumask(cpu); cpus_call_count++; } - if (cpus_calledp) + if (cpus_calledp) { *cpus_calledp = cpus_called; + } KERNEL_DEBUG_CONSTANT( TRACE_MP_CPUS_CALL | DBG_FUNC_END, @@ -1395,75 +1437,75 @@ out: static void mp_broadcast_action(__unused void *null) { - /* call action function */ - if (mp_bc_action_func != NULL) - mp_bc_action_func(mp_bc_func_arg); + /* call action function */ + if (mp_bc_action_func != NULL) { + mp_bc_action_func(mp_bc_func_arg); + } - /* if we're the last one through, wake up the instigator */ - if (atomic_decl_and_test(&mp_bc_count, 1)) - thread_wakeup(((event_t)(uintptr_t) &mp_bc_count)); + /* if we're the last one through, wake up the instigator */ + if (atomic_decl_and_test(&mp_bc_count, 1)) { + thread_wakeup(((event_t)(uintptr_t) &mp_bc_count)); + } } /* * mp_broadcast() runs a given function on all active cpus. * The caller blocks until the functions has run on all cpus. - * The caller will also block if there is another pending braodcast. + * The caller will also block if there is another pending broadcast. */ void mp_broadcast( - void (*action_func)(void *), - void *arg) -{ - if (!smp_initialized) { - if (action_func != NULL) - action_func(arg); - return; - } - - /* obtain broadcast lock */ - lck_mtx_lock(&mp_bc_lock); - - /* set static function pointers */ - mp_bc_action_func = action_func; - mp_bc_func_arg = arg; - - assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT); - - /* - * signal other processors, which will call mp_broadcast_action() - */ - mp_bc_count = real_ncpus; /* assume max possible active */ - mp_bc_ncpus = mp_cpus_call(CPUMASK_OTHERS, NOSYNC, *mp_broadcast_action, NULL) + 1; - atomic_decl(&mp_bc_count, real_ncpus - mp_bc_ncpus); /* subtract inactive */ - - /* call executor function on this cpu */ - mp_broadcast_action(NULL); - - /* block for other cpus to have run action_func */ - if (mp_bc_ncpus > 1) - thread_block(THREAD_CONTINUE_NULL); - else - clear_wait(current_thread(), THREAD_AWAKENED); - - /* release lock */ - lck_mtx_unlock(&mp_bc_lock); + void (*action_func)(void *), + void *arg) +{ + if (!smp_initialized) { + if (action_func != NULL) { + action_func(arg); + } + return; + } + + /* obtain broadcast lock */ + lck_mtx_lock(&mp_bc_lock); + + /* set static function pointers */ + mp_bc_action_func = action_func; + mp_bc_func_arg = arg; + + assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT); + + /* + * signal other processors, which will call mp_broadcast_action() + */ + mp_bc_count = real_ncpus; /* assume max possible active */ + mp_bc_ncpus = mp_cpus_call(CPUMASK_ALL, NOSYNC, *mp_broadcast_action, NULL); + atomic_decl(&mp_bc_count, real_ncpus - mp_bc_ncpus); /* subtract inactive */ + + /* block for other cpus to have run action_func */ + if (mp_bc_ncpus > 1) { + thread_block(THREAD_CONTINUE_NULL); + } else { + clear_wait(current_thread(), THREAD_AWAKENED); + } + + /* release lock */ + lck_mtx_unlock(&mp_bc_lock); } void mp_cpus_kick(cpumask_t cpus) { - cpu_t cpu; - boolean_t intrs_enabled = FALSE; + cpu_t cpu; + boolean_t intrs_enabled = FALSE; intrs_enabled = ml_set_interrupts_enabled(FALSE); mp_safe_spin_lock(&x86_topo_lock); for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) { if ((cpu == (cpu_t) cpu_number()) - || ((cpu_to_cpumask(cpu) & cpus) == 0) - || (!cpu_datap(cpu)->cpu_running)) - { - continue; + || ((cpu_to_cpumask(cpu) & cpus) == 0) + || !cpu_is_running(cpu)) { + continue; } lapic_send_ipi(cpu, LAPIC_VECTOR(KICK)); @@ -1476,7 +1518,7 @@ mp_cpus_kick(cpumask_t cpus) void i386_activate_cpu(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); assert(!ml_get_interrupts_enabled()); @@ -1488,17 +1530,17 @@ i386_activate_cpu(void) mp_safe_spin_lock(&x86_topo_lock); cdp->cpu_running = TRUE; started_cpu(); + pmap_tlbi_range(0, ~0ULL, true, 0); simple_unlock(&x86_topo_lock); - flush_tlb_raw(); } void i386_deactivate_cpu(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); assert(!ml_get_interrupts_enabled()); - + KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_DEACTIVATE | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -1525,8 +1567,9 @@ i386_deactivate_cpu(void) mp_disable_preemption(); ml_set_interrupts_enabled(TRUE); - while (cdp->cpu_signals && x86_lcpu()->rtcDeadline != EndOfAllTime) + while (cdp->cpu_signals && x86_lcpu()->rtcDeadline != EndOfAllTime) { cpu_pause(); + } /* * Ensure there's no remaining timer deadline set * - AICPM may have left one active. @@ -1541,22 +1584,22 @@ i386_deactivate_cpu(void) 0, 0, 0, 0, 0); } -int pmsafe_debug = 1; +int pmsafe_debug = 1; -#if MACH_KDP -volatile boolean_t mp_kdp_trap = FALSE; -volatile boolean_t mp_kdp_is_NMI = FALSE; -volatile unsigned long mp_kdp_ncpus; -boolean_t mp_kdp_state; +#if MACH_KDP +volatile boolean_t mp_kdp_trap = FALSE; +volatile boolean_t mp_kdp_is_NMI = FALSE; +volatile unsigned long mp_kdp_ncpus; +boolean_t mp_kdp_state; void mp_kdp_enter(boolean_t proceed_on_failure) { - unsigned int cpu; - unsigned int ncpus = 0; - unsigned int my_cpu; - uint64_t tsc_timeout; + unsigned int cpu; + unsigned int ncpus = 0; + unsigned int my_cpu; + uint64_t tsc_timeout; DBG("mp_kdp_enter()\n"); @@ -1582,10 +1625,10 @@ mp_kdp_enter(boolean_t proceed_on_failure) } if (proceed_on_failure) { if (mach_absolute_time() - start_time > 500000000ll) { - kprintf("mp_kdp_enter() can't get x86_topo_lock! Debugging anyway! #YOLO\n"); + paniclog_append_noflush("mp_kdp_enter() can't get x86_topo_lock! Debugging anyway! #YOLO\n"); break; } - locked = simple_lock_try(&x86_topo_lock); + locked = simple_lock_try(&x86_topo_lock, LCK_GRP_NULL); if (!locked) { cpu_pause(); } @@ -1604,8 +1647,9 @@ mp_kdp_enter(boolean_t proceed_on_failure) } } - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); + } debugger_cpu = my_cpu; ncpus = 1; @@ -1619,8 +1663,9 @@ mp_kdp_enter(boolean_t proceed_on_failure) DBG("mp_kdp_enter() signaling other processors\n"); if (force_immediate_debugger_NMI == FALSE) { for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; + } ncpus++; i386_signal_cpu(cpu, MP_KDP, ASYNC); } @@ -1655,30 +1700,55 @@ mp_kdp_enter(boolean_t proceed_on_failure) NMIPI_enable(TRUE); } if (mp_kdp_ncpus != ncpus) { - DBG("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu); + unsigned int wait_cycles = 0; + if (proceed_on_failure) { + paniclog_append_noflush("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu); + } else { + DBG("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu); + } for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; - if (cpu_signal_pending(cpu, MP_KDP)) + } + if (cpu_signal_pending(cpu, MP_KDP)) { + cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; cpu_NMI_interrupt(cpu); + } } /* Wait again for the same timeout */ tsc_timeout = rdtsc64() + (LockTimeOutTSC); while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) { handle_pending_TLB_flushes(); cpu_pause(); + ++wait_cycles; } if (mp_kdp_ncpus != ncpus) { - panic("mp_kdp_enter() timed-out waiting after NMI"); + paniclog_append_noflush("mp_kdp_enter() NMI pending on cpus:"); + for (cpu = 0; cpu < real_ncpus; cpu++) { + if (cpu_is_running(cpu) && !cpu_datap(cpu)->cpu_NMI_acknowledged) { + paniclog_append_noflush(" %d", cpu); + } + } + paniclog_append_noflush("\n"); + if (proceed_on_failure) { + paniclog_append_noflush("mp_kdp_enter() timed-out during %s wait after NMI;" + "expected %u acks but received %lu after %u loops in %llu ticks\n", + (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC); + } else { + panic("mp_kdp_enter() timed-out during %s wait after NMI;" + "expected %u acks but received %lu after %u loops in %llu ticks", + (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC); + } } } - } - else + } else { for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; + } cpu_NMI_interrupt(cpu); } + } if (locked) { simple_unlock(&x86_topo_lock); @@ -1686,49 +1756,71 @@ mp_kdp_enter(boolean_t proceed_on_failure) DBG("mp_kdp_enter() %d processors done %s\n", (int)mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out"); - + postcode(MP_KDP_ENTER); } +boolean_t +mp_kdp_all_cpus_halted() +{ + unsigned int ncpus = 0, cpu = 0, my_cpu = 0; + + my_cpu = cpu_number(); + ncpus = 1; /* current CPU */ + for (cpu = 0; cpu < real_ncpus; cpu++) { + if (cpu == my_cpu || !cpu_is_running(cpu)) { + continue; + } + ncpus++; + } + + return mp_kdp_ncpus == ncpus; +} + static boolean_t cpu_signal_pending(int cpu, mp_event_t event) { - volatile int *signals = &cpu_datap(cpu)->cpu_signals; + volatile int *signals = &cpu_datap(cpu)->cpu_signals; boolean_t retval = FALSE; - if (i_bit(event, signals)) + if (i_bit(event, signals)) { retval = TRUE; + } return retval; } -long kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func, - void *arg0, void *arg1) +long +kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func, + void *arg0, void *arg1) { - if (lcpu > (real_ncpus - 1)) + if (lcpu > (real_ncpus - 1)) { return -1; + } - if (func == NULL) + if (func == NULL) { return -1; + } kdp_xcpu_call_func.func = func; - kdp_xcpu_call_func.ret = -1; + kdp_xcpu_call_func.ret = -1; kdp_xcpu_call_func.arg0 = arg0; kdp_xcpu_call_func.arg1 = arg1; kdp_xcpu_call_func.cpu = lcpu; DBG("Invoking function %p on CPU %d\n", func, (int32_t)lcpu); - while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE) + while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE) { cpu_pause(); - return kdp_xcpu_call_func.ret; + } + return kdp_xcpu_call_func.ret; } static void kdp_x86_xcpu_poll(void) { if ((uint16_t)cpu_number() == kdp_xcpu_call_func.cpu) { - kdp_xcpu_call_func.ret = + kdp_xcpu_call_func.ret = kdp_xcpu_call_func.func(kdp_xcpu_call_func.arg0, - kdp_xcpu_call_func.arg1, - cpu_number()); + kdp_xcpu_call_func.arg1, + cpu_number()); kdp_xcpu_call_func.cpu = KDP_XCPU_NONE; } } @@ -1746,14 +1838,15 @@ mp_kdp_wait(boolean_t flush, boolean_t isNMI) atomic_incl((volatile long *)&mp_kdp_ncpus, 1); while (mp_kdp_trap || (isNMI == TRUE)) { - /* + /* * A TLB shootdown request may be pending--this would result * in the requesting processor waiting in PMAP_UPDATE_TLBS() * until this processor handles it. * Process it, so it can now enter mp_kdp_wait() */ - if (flush) + if (flush) { handle_pending_TLB_flushes(); + } kdp_x86_xcpu_poll(); cpu_pause(); @@ -1778,18 +1871,19 @@ mp_kdp_exit(void) /* Wait other processors to stop spinning. XXX needs timeout */ DBG("mp_kdp_exit() waiting for processors to resume\n"); while (mp_kdp_ncpus > 0) { - /* + /* * a TLB shootdown request may be pending... this would result in the requesting * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it. * Process it, so it can now enter mp_kdp_wait() */ - handle_pending_TLB_flushes(); + handle_pending_TLB_flushes(); cpu_pause(); } - if (pmsafe_debug && !kdp_snapshot) - pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + if (pmsafe_debug && !kdp_snapshot) { + pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + } debugger_exit_time = mach_absolute_time(); @@ -1798,27 +1892,28 @@ mp_kdp_exit(void) postcode(MP_KDP_EXIT); } -#endif /* MACH_KDP */ +#endif /* MACH_KDP */ boolean_t -mp_recent_debugger_activity(void) { +mp_recent_debugger_activity(void) +{ uint64_t abstime = mach_absolute_time(); - return (((abstime - debugger_entry_time) < LastDebuggerEntryAllowance) || - ((abstime - debugger_exit_time) < LastDebuggerEntryAllowance)); + return ((abstime - debugger_entry_time) < LastDebuggerEntryAllowance) || + ((abstime - debugger_exit_time) < LastDebuggerEntryAllowance); } /*ARGSUSED*/ void init_ast_check( - __unused processor_t processor) + __unused processor_t processor) { } void cause_ast_check( - processor_t processor) + processor_t processor) { - int cpu = processor->cpu_id; + int cpu = processor->cpu_id; if (cpu != cpu_number()) { i386_signal_cpu(cpu, MP_AST, ASYNC); @@ -1830,7 +1925,7 @@ void slave_machine_init(void *param) { /* - * Here in process context, but with interrupts disabled. + * Here in process context, but with interrupts disabled. */ DBG("slave_machine_init() CPU%d\n", get_cpu_number()); @@ -1840,11 +1935,12 @@ slave_machine_init(void *param) */ clock_init(); } - cpu_machine_init(); /* Interrupts enabled hereafter */ + cpu_machine_init(); /* Interrupts enabled hereafter */ } #undef cpu_number -int cpu_number(void) +int +cpu_number(void) { return get_cpu_number(); } @@ -1868,7 +1964,7 @@ grab_warm_timer_call() timer_call_t call = NULL; x = splsched(); - simple_lock(&cpu_warm_lock); + simple_lock(&cpu_warm_lock, LCK_GRP_NULL); if (!queue_empty(&cpu_warm_call_list)) { call = (timer_call_t) dequeue_head(&cpu_warm_call_list); } @@ -1884,7 +1980,7 @@ free_warm_timer_call(timer_call_t call) spl_t x; x = splsched(); - simple_lock(&cpu_warm_lock); + simple_lock(&cpu_warm_lock, LCK_GRP_NULL); enqueue_head(&cpu_warm_call_list, (queue_entry_t)call); simple_unlock(&cpu_warm_lock); splx(x); @@ -1895,8 +1991,8 @@ free_warm_timer_call(timer_call_t call) */ static void cpu_warm_timer_call_func( - call_entry_param_t p0, - __unused call_entry_param_t p1) + call_entry_param_t p0, + __unused call_entry_param_t p1) { free_warm_timer_call((timer_call_t)p0); return; @@ -1907,7 +2003,7 @@ cpu_warm_timer_call_func( */ static void _cpu_warm_setup( - void *arg) + void *arg) { cpu_warm_data_t cwdp = (cpu_warm_data_t)arg; @@ -1922,7 +2018,7 @@ _cpu_warm_setup( */ kern_return_t ml_interrupt_prewarm( - uint64_t deadline) + uint64_t deadline) { struct cpu_warm_data cwd; timer_call_t call; @@ -1932,8 +2028,8 @@ ml_interrupt_prewarm( panic("%s: Interrupts disabled?\n", __FUNCTION__); } - /* - * If the platform doesn't need our help, say that we succeeded. + /* + * If the platform doesn't need our help, say that we succeeded. */ if (!ml_get_interrupt_prewake_applicable()) { return KERN_SUCCESS; @@ -1968,10 +2064,10 @@ ml_interrupt_prewarm( void kernel_spin(uint64_t spin_ns) { - boolean_t istate; - uint64_t spin_abs; - uint64_t deadline; - cpu_data_t *cdp; + boolean_t istate; + uint64_t spin_abs; + uint64_t deadline; + cpu_data_t *cdp; kprintf("kernel_spin(%llu) spinning uninterruptibly\n", spin_ns); istate = ml_set_interrupts_enabled(FALSE); @@ -1983,8 +2079,9 @@ kernel_spin(uint64_t spin_ns) cdp->cpu_int_state = (void *) USER_STATE(current_thread()); deadline = mach_absolute_time() + spin_ns; - while (mach_absolute_time() < deadline) + while (mach_absolute_time() < deadline) { cpu_pause(); + } cdp->cpu_int_event_time = 0; cdp->cpu_int_state = NULL; @@ -2002,17 +2099,18 @@ kernel_spin(uint64_t spin_ns) void mp_interrupt_watchdog(void) { - cpu_t cpu; - boolean_t intrs_enabled = FALSE; - uint16_t cpu_int_num; - uint64_t cpu_int_event_time; - uint64_t cpu_rip; - uint64_t cpu_int_duration; - uint64_t now; - x86_saved_state_t *cpu_int_state; - - if (__improbable(!mp_interrupt_watchdog_enabled)) + cpu_t cpu; + boolean_t intrs_enabled = FALSE; + uint16_t cpu_int_num; + uint64_t cpu_int_event_time; + uint64_t cpu_rip; + uint64_t cpu_int_duration; + uint64_t now; + x86_saved_state_t *cpu_int_state; + + if (__improbable(!mp_interrupt_watchdog_enabled)) { return; + } intrs_enabled = ml_set_interrupts_enabled(FALSE); now = mach_absolute_time(); @@ -2021,20 +2119,24 @@ mp_interrupt_watchdog(void) * check all other processors for long outstanding interrupt handling. */ for (cpu = 0; - cpu < (cpu_t) real_ncpus && !machine_timeout_suspended(); - cpu++) { + cpu < (cpu_t) real_ncpus && !machine_timeout_suspended(); + cpu++) { if ((cpu == (cpu_t) cpu_number()) || - (!cpu_datap(cpu)->cpu_running)) + (!cpu_is_running(cpu))) { continue; + } cpu_int_event_time = cpu_datap(cpu)->cpu_int_event_time; - if (cpu_int_event_time == 0) + if (cpu_int_event_time == 0) { continue; - if (__improbable(now < cpu_int_event_time)) - continue; /* skip due to inter-processor skew */ + } + if (__improbable(now < cpu_int_event_time)) { + continue; /* skip due to inter-processor skew */ + } cpu_int_state = cpu_datap(cpu)->cpu_int_state; - if (__improbable(cpu_int_state == NULL)) + if (__improbable(cpu_int_state == NULL)) { /* The interrupt may have been dismissed */ continue; + } /* Here with a cpu handling an interrupt */ @@ -2045,8 +2147,8 @@ mp_interrupt_watchdog(void) vector_timed_out = cpu_int_num; NMIPI_panic(cpu_to_cpumask(cpu), INTERRUPT_WATCHDOG); panic("Interrupt watchdog, " - "cpu: %d interrupt: 0x%x time: %llu..%llu state: %p RIP: 0x%llx", - cpu, cpu_int_num, cpu_int_event_time, now, cpu_int_state, cpu_rip); + "cpu: %d interrupt: 0x%x time: %llu..%llu state: %p RIP: 0x%llx", + cpu, cpu_int_num, cpu_int_event_time, now, cpu_int_state, cpu_rip); /* NOT REACHED */ } else if (__improbable(cpu_int_duration > (uint64_t) std_quantum)) { mp_interrupt_watchdog_events++; @@ -2054,8 +2156,8 @@ mp_interrupt_watchdog(void) cpu_rip = saved_state64(cpu_int_state)->isf.rip; ml_set_interrupts_enabled(intrs_enabled); printf("Interrupt watchdog, " - "cpu: %d interrupt: 0x%x time: %llu..%llu RIP: 0x%llx\n", - cpu, cpu_int_num, cpu_int_event_time, now, cpu_rip); + "cpu: %d interrupt: 0x%x time: %llu..%llu RIP: 0x%llx\n", + cpu, cpu_int_num, cpu_int_event_time, now, cpu_rip); return; } }