]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
CommitLineData
55e303ae 1/*
cb323159 2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
55e303ae 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
55e303ae 32#include <mach_kdp.h>
39037602 33#include <kdp/kdp_internal.h>
55e303ae 34#include <mach_ldebug.h>
91447636
A
35
36#include <mach/mach_types.h>
37#include <mach/kern_return.h>
38
39#include <kern/kern_types.h>
40#include <kern/startup.h>
c910b4d9 41#include <kern/timer_queue.h>
91447636
A
42#include <kern/processor.h>
43#include <kern/cpu_number.h>
44#include <kern/cpu_data.h>
45#include <kern/assert.h>
0a7de745 46#include <kern/lock_group.h>
91447636 47#include <kern/machine.h>
0c530ab8 48#include <kern/pms.h>
593a1d5f 49#include <kern/misc_protos.h>
39236c6e 50#include <kern/timer_call.h>
6d2010ae
A
51#include <kern/kalloc.h>
52#include <kern/queue.h>
fe8ab488 53#include <prng/random.h>
91447636
A
54
55#include <vm/vm_map.h>
56#include <vm/vm_kern.h>
57
fe8ab488 58#include <i386/bit_routines.h>
b0d623f7
A
59#include <i386/proc_reg.h>
60#include <i386/cpu_threads.h>
61#include <i386/mp_desc.h>
62#include <i386/misc_protos.h>
63#include <i386/trap.h>
64#include <i386/postcode.h>
65#include <i386/machine_routines.h>
55e303ae
A
66#include <i386/mp.h>
67#include <i386/mp_events.h>
593a1d5f 68#include <i386/lapic.h>
55e303ae 69#include <i386/cpuid.h>
b0d623f7 70#include <i386/fpu.h>
55e303ae 71#include <i386/machine_cpu.h>
0c530ab8 72#include <i386/pmCPU.h>
b0d623f7 73#if CONFIG_MCA
2d21ac55 74#include <i386/machine_check.h>
b0d623f7
A
75#endif
76#include <i386/acpi.h>
0c530ab8 77
0c530ab8 78#include <sys/kdebug.h>
55e303ae 79
39236c6e
A
80#include <console/serial_protos.h>
81
5ba3f43e
A
82#if MONOTONIC
83#include <kern/monotonic.h>
84#endif /* MONOTONIC */
85
0a7de745
A
86#if MP_DEBUG
87#define PAUSE delay(1000000)
88#define DBG(x...) kprintf(x)
55e303ae
A
89#else
90#define DBG(x...)
91#define PAUSE
0a7de745 92#endif /* MP_DEBUG */
55e303ae 93
6d2010ae 94/* Debugging/test trace events: */
0a7de745
A
95#define TRACE_MP_TLB_FLUSH MACHDBG_CODE(DBG_MACH_MP, 0)
96#define TRACE_MP_CPUS_CALL MACHDBG_CODE(DBG_MACH_MP, 1)
97#define TRACE_MP_CPUS_CALL_LOCAL MACHDBG_CODE(DBG_MACH_MP, 2)
98#define TRACE_MP_CPUS_CALL_ACTION MACHDBG_CODE(DBG_MACH_MP, 3)
99#define TRACE_MP_CPUS_CALL_NOBUF MACHDBG_CODE(DBG_MACH_MP, 4)
100#define TRACE_MP_CPU_FAST_START MACHDBG_CODE(DBG_MACH_MP, 5)
101#define TRACE_MP_CPU_START MACHDBG_CODE(DBG_MACH_MP, 6)
102#define TRACE_MP_CPU_DEACTIVATE MACHDBG_CODE(DBG_MACH_MP, 7)
55e303ae 103
0a7de745 104#define ABS(v) (((v) > 0)?(v):-(v))
7e4a7d39 105
0a7de745
A
106void slave_boot_init(void);
107void i386_cpu_IPI(int cpu);
55e303ae 108
39236c6e 109#if MACH_KDP
0a7de745 110static void mp_kdp_wait(boolean_t flush, boolean_t isNMI);
39236c6e 111#endif /* MACH_KDP */
55e303ae 112
39236c6e 113#if MACH_KDP
0a7de745 114static boolean_t cpu_signal_pending(int cpu, mp_event_t event);
39236c6e 115#endif /* MACH_KDP */
0a7de745 116static int NMIInterruptHandler(x86_saved_state_t *regs);
0c530ab8 117
0a7de745
A
118boolean_t smp_initialized = FALSE;
119uint32_t TSC_sync_margin = 0xFFF;
120volatile boolean_t force_immediate_debugger_NMI = FALSE;
121volatile boolean_t pmap_tlb_flush_timeout = FALSE;
5ba3f43e 122#if DEBUG || DEVELOPMENT
0a7de745
A
123boolean_t mp_interrupt_watchdog_enabled = TRUE;
124uint32_t mp_interrupt_watchdog_events = 0;
5ba3f43e 125#endif
91447636 126
0a7de745 127decl_simple_lock_data(, debugger_callback_lock);
39037602
A
128struct debugger_callback *debugger_callback = NULL;
129
b0d623f7 130decl_lck_mtx_data(static, mp_cpu_boot_lock);
0a7de745 131lck_mtx_ext_t mp_cpu_boot_lock_ext;
55e303ae
A
132
133/* Variables needed for MP rendezvous. */
0a7de745
A
134decl_simple_lock_data(, mp_rv_lock);
135static void (*mp_rv_setup_func)(void *arg);
136static void (*mp_rv_action_func)(void *arg);
137static void (*mp_rv_teardown_func)(void *arg);
138static void *mp_rv_func_arg;
139static volatile int mp_rv_ncpus;
140/* Cache-aligned barriers: */
141static volatile long mp_rv_entry __attribute__((aligned(64)));
142static volatile long mp_rv_exit __attribute__((aligned(64)));
143static volatile long mp_rv_complete __attribute__((aligned(64)));
144
145volatile uint64_t debugger_entry_time;
146volatile uint64_t debugger_exit_time;
b0d623f7 147#if MACH_KDP
7ddcb079 148#include <kdp/kdp.h>
d41d1dae 149extern int kdp_snapshot;
b0d623f7
A
150static struct _kdp_xcpu_call_func {
151 kdp_x86_xcpu_func_t func;
152 void *arg0, *arg1;
153 volatile long ret;
154 volatile uint16_t cpu;
155} kdp_xcpu_call_func = {
156 .cpu = KDP_XCPU_NONE
157};
158
159#endif
160
2d21ac55
A
161/* Variables needed for MP broadcast. */
162static void (*mp_bc_action_func)(void *arg);
163static void *mp_bc_func_arg;
0a7de745 164static int mp_bc_ncpus;
2d21ac55 165static volatile long mp_bc_count;
b0d623f7 166decl_lck_mtx_data(static, mp_bc_lock);
0a7de745
A
167lck_mtx_ext_t mp_bc_lock_ext;
168static volatile int debugger_cpu = -1;
169volatile long NMIPI_acks = 0;
170volatile long NMI_count = 0;
171static NMI_reason_t NMI_panic_reason = NONE;
172static int vector_timed_out;
39236c6e 173
0a7de745 174extern void NMI_cpus(void);
2d21ac55 175
0a7de745
A
176static void mp_cpus_call_init(void);
177static void mp_cpus_call_action(void);
178static void mp_call_PM(void);
2d21ac55 179
0a7de745 180char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init
b0d623f7 181
6d2010ae 182/* PAL-related routines */
0a7de745
A
183boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler,
184 int ipi_vector, i386_intr_func_t ipi_handler);
6d2010ae
A
185void i386_start_cpu(int lapic_id, int cpu_num);
186void i386_send_NMI(int cpu);
5c9f4661 187void NMIPI_enable(boolean_t);
91447636 188
0a7de745
A
189static lck_grp_t smp_lck_grp;
190static lck_grp_attr_t smp_lck_grp_attr;
b0d623f7 191
0a7de745
A
192#define NUM_CPU_WARM_CALLS 20
193struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS];
194queue_head_t cpu_warm_call_list;
6d2010ae
A
195decl_simple_lock_data(static, cpu_warm_lock);
196
197typedef struct cpu_warm_data {
0a7de745
A
198 timer_call_t cwd_call;
199 uint64_t cwd_deadline;
200 int cwd_result;
6d2010ae
A
201} *cpu_warm_data_t;
202
0a7de745
A
203static void cpu_prewarm_init(void);
204static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1);
205static void _cpu_warm_setup(void *arg);
206static timer_call_t grab_warm_timer_call(void);
207static void free_warm_timer_call(timer_call_t call);
b0d623f7 208
55e303ae
A
209void
210smp_init(void)
55e303ae 211{
91447636 212 simple_lock_init(&mp_rv_lock, 0);
39037602 213 simple_lock_init(&debugger_callback_lock, 0);
b0d623f7
A
214 lck_grp_attr_setdefault(&smp_lck_grp_attr);
215 lck_grp_init(&smp_lck_grp, "i386_smp", &smp_lck_grp_attr);
216 lck_mtx_init_ext(&mp_cpu_boot_lock, &mp_cpu_boot_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
217 lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
91447636 218 console_init();
55e303ae 219
0a7de745
A
220 if (!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler,
221 LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler)) {
55e303ae 222 return;
0a7de745 223 }
55e303ae 224
91447636
A
225 cpu_thread_init();
226
91447636
A
227 DBGLOG_CPU_INIT(master_cpu);
228
6d2010ae 229 mp_cpus_call_init();
fe8ab488 230 mp_cpus_call_cpu_init(master_cpu);
55e303ae 231
5ba3f43e
A
232#if DEBUG || DEVELOPMENT
233 if (PE_parse_boot_argn("interrupt_watchdog",
0a7de745
A
234 &mp_interrupt_watchdog_enabled,
235 sizeof(mp_interrupt_watchdog_enabled))) {
5ba3f43e 236 kprintf("Interrupt watchdog %sabled\n",
0a7de745 237 mp_interrupt_watchdog_enabled ? "en" : "dis");
5ba3f43e
A
238 }
239#endif
240
7e4a7d39 241 if (PE_parse_boot_argn("TSC_sync_margin",
0a7de745 242 &TSC_sync_margin, sizeof(TSC_sync_margin))) {
7e4a7d39 243 kprintf("TSC sync Margin 0x%x\n", TSC_sync_margin);
316670eb
A
244 } else if (cpuid_vmm_present()) {
245 kprintf("TSC sync margin disabled\n");
246 TSC_sync_margin = 0;
247 }
55e303ae
A
248 smp_initialized = TRUE;
249
6d2010ae
A
250 cpu_prewarm_init();
251
55e303ae
A
252 return;
253}
254
7e4a7d39 255typedef struct {
0a7de745
A
256 int target_cpu;
257 int target_lapic;
258 int starter_cpu;
7e4a7d39 259} processor_start_info_t;
0a7de745 260static processor_start_info_t start_info __attribute__((aligned(64)));
7e4a7d39 261
0a7de745 262/*
7e4a7d39
A
263 * Cache-alignment is to avoid cross-cpu false-sharing interference.
264 */
0a7de745
A
265static volatile long tsc_entry_barrier __attribute__((aligned(64)));
266static volatile long tsc_exit_barrier __attribute__((aligned(64)));
267static volatile uint64_t tsc_target __attribute__((aligned(64)));
7e4a7d39 268
0c530ab8 269/*
593a1d5f 270 * Poll a CPU to see when it has marked itself as running.
0c530ab8 271 */
593a1d5f
A
272static void
273mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay)
91447636 274{
7e4a7d39 275 while (iters-- > 0) {
0a7de745 276 if (cpu_datap(slot_num)->cpu_running) {
7e4a7d39 277 break;
0a7de745 278 }
593a1d5f 279 delay(usecdelay);
91447636 280 }
55e303ae
A
281}
282
b0d623f7
A
283/*
284 * Quickly bring a CPU back online which has been halted.
285 */
286kern_return_t
287intel_startCPU_fast(int slot_num)
288{
0a7de745 289 kern_return_t rc;
b0d623f7
A
290
291 /*
292 * Try to perform a fast restart
293 */
294 rc = pmCPUExitHalt(slot_num);
0a7de745 295 if (rc != KERN_SUCCESS) {
b0d623f7
A
296 /*
297 * The CPU was not eligible for a fast restart.
298 */
0a7de745
A
299 return rc;
300 }
b0d623f7 301
bd504ef0
A
302 KERNEL_DEBUG_CONSTANT(
303 TRACE_MP_CPU_FAST_START | DBG_FUNC_START,
304 slot_num, 0, 0, 0, 0);
305
b0d623f7
A
306 /*
307 * Wait until the CPU is back online.
308 */
309 mp_disable_preemption();
0a7de745 310
b0d623f7
A
311 /*
312 * We use short pauses (1us) for low latency. 30,000 iterations is
313 * longer than a full restart would require so it should be more
314 * than long enough.
315 */
6d2010ae 316
b0d623f7
A
317 mp_wait_for_cpu_up(slot_num, 30000, 1);
318 mp_enable_preemption();
319
bd504ef0
A
320 KERNEL_DEBUG_CONSTANT(
321 TRACE_MP_CPU_FAST_START | DBG_FUNC_END,
322 slot_num, cpu_datap(slot_num)->cpu_running, 0, 0, 0);
323
b0d623f7
A
324 /*
325 * Check to make sure that the CPU is really running. If not,
326 * go through the slow path.
327 */
0a7de745
A
328 if (cpu_datap(slot_num)->cpu_running) {
329 return KERN_SUCCESS;
330 } else {
331 return KERN_FAILURE;
332 }
b0d623f7
A
333}
334
7e4a7d39
A
335static void
336started_cpu(void)
337{
338 /* Here on the started cpu with cpu_running set TRUE */
c910b4d9 339
7e4a7d39
A
340 if (TSC_sync_margin &&
341 start_info.target_cpu == cpu_number()) {
342 /*
343 * I've just started-up, synchronize again with the starter cpu
344 * and then snap my TSC.
345 */
346 tsc_target = 0;
347 atomic_decl(&tsc_entry_barrier, 1);
0a7de745
A
348 while (tsc_entry_barrier != 0) {
349 ; /* spin for starter and target at barrier */
350 }
7e4a7d39
A
351 tsc_target = rdtsc64();
352 atomic_decl(&tsc_exit_barrier, 1);
353 }
354}
c910b4d9
A
355
356static void
357start_cpu(void *arg)
358{
0a7de745
A
359 int i = 1000;
360 processor_start_info_t *psip = (processor_start_info_t *) arg;
c910b4d9
A
361
362 /* Ignore this if the current processor is not the starter */
0a7de745 363 if (cpu_number() != psip->starter_cpu) {
c910b4d9 364 return;
0a7de745 365 }
c910b4d9 366
bd504ef0 367 DBG("start_cpu(%p) about to start cpu %d, lapic %d\n",
0a7de745 368 arg, psip->target_cpu, psip->target_lapic);
bd504ef0
A
369
370 KERNEL_DEBUG_CONSTANT(
371 TRACE_MP_CPU_START | DBG_FUNC_START,
372 psip->target_cpu,
373 psip->target_lapic, 0, 0, 0);
374
6d2010ae 375 i386_start_cpu(psip->target_lapic, psip->target_cpu);
c910b4d9 376
0a7de745 377#ifdef POSTCODE_DELAY
c910b4d9
A
378 /* Wait much longer if postcodes are displayed for a delay period. */
379 i *= 10000;
380#endif
bd504ef0 381 DBG("start_cpu(%p) about to wait for cpu %d\n",
0a7de745 382 arg, psip->target_cpu);
bd504ef0 383
0a7de745 384 mp_wait_for_cpu_up(psip->target_cpu, i * 100, 100);
bd504ef0
A
385
386 KERNEL_DEBUG_CONSTANT(
387 TRACE_MP_CPU_START | DBG_FUNC_END,
388 psip->target_cpu,
389 cpu_datap(psip->target_cpu)->cpu_running, 0, 0, 0);
390
7e4a7d39
A
391 if (TSC_sync_margin &&
392 cpu_datap(psip->target_cpu)->cpu_running) {
393 /*
394 * Compare the TSC from the started processor with ours.
395 * Report and log/panic if it diverges by more than
396 * TSC_sync_margin (TSC_SYNC_MARGIN) ticks. This margin
397 * can be overriden by boot-arg (with 0 meaning no checking).
398 */
0a7de745
A
399 uint64_t tsc_starter;
400 int64_t tsc_delta;
7e4a7d39 401 atomic_decl(&tsc_entry_barrier, 1);
0a7de745
A
402 while (tsc_entry_barrier != 0) {
403 ; /* spin for both processors at barrier */
404 }
7e4a7d39
A
405 tsc_starter = rdtsc64();
406 atomic_decl(&tsc_exit_barrier, 1);
0a7de745
A
407 while (tsc_exit_barrier != 0) {
408 ; /* spin for target to store its TSC */
409 }
7e4a7d39
A
410 tsc_delta = tsc_target - tsc_starter;
411 kprintf("TSC sync for cpu %d: 0x%016llx delta 0x%llx (%lld)\n",
0a7de745
A
412 psip->target_cpu, tsc_target, tsc_delta, tsc_delta);
413 if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) {
7e4a7d39
A
414#if DEBUG
415 panic(
416#else
417 printf(
418#endif
419 "Unsynchronized TSC for cpu %d: "
0a7de745 420 "0x%016llx, delta 0x%llx\n",
7e4a7d39
A
421 psip->target_cpu, tsc_target, tsc_delta);
422 }
423 }
c910b4d9
A
424}
425
55e303ae
A
426kern_return_t
427intel_startCPU(
0a7de745 428 int slot_num)
55e303ae 429{
0a7de745
A
430 int lapic = cpu_to_lapic[slot_num];
431 boolean_t istate;
55e303ae 432
91447636
A
433 assert(lapic != -1);
434
435 DBGLOG_CPU_INIT(slot_num);
55e303ae 436
91447636 437 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
6d2010ae 438 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) (uintptr_t)IdlePTD);
55e303ae 439
0c530ab8
A
440 /*
441 * Initialize (or re-initialize) the descriptor tables for this cpu.
442 * Propagate processor mode to slave.
443 */
5ba3f43e 444 cpu_desc_init(cpu_datap(slot_num));
91447636 445
c910b4d9 446 /* Serialize use of the slave boot stack, etc. */
b0d623f7 447 lck_mtx_lock(&mp_cpu_boot_lock);
55e303ae 448
c910b4d9 449 istate = ml_set_interrupts_enabled(FALSE);
91447636 450 if (slot_num == get_cpu_number()) {
c910b4d9 451 ml_set_interrupts_enabled(istate);
b0d623f7 452 lck_mtx_unlock(&mp_cpu_boot_lock);
91447636
A
453 return KERN_SUCCESS;
454 }
55e303ae 455
b0d623f7
A
456 start_info.starter_cpu = cpu_number();
457 start_info.target_cpu = slot_num;
c910b4d9 458 start_info.target_lapic = lapic;
7e4a7d39
A
459 tsc_entry_barrier = 2;
460 tsc_exit_barrier = 2;
55e303ae 461
c910b4d9 462 /*
b0d623f7 463 * Perform the processor startup sequence with all running
c910b4d9
A
464 * processors rendezvous'ed. This is required during periods when
465 * the cache-disable bit is set for MTRR/PAT initialization.
466 */
b0d623f7 467 mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);
55e303ae 468
7e4a7d39
A
469 start_info.target_cpu = 0;
470
c910b4d9 471 ml_set_interrupts_enabled(istate);
b0d623f7 472 lck_mtx_unlock(&mp_cpu_boot_lock);
55e303ae 473
91447636 474 if (!cpu_datap(slot_num)->cpu_running) {
0c530ab8 475 kprintf("Failed to start CPU %02d\n", slot_num);
91447636
A
476 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
477 delay(1000000);
b0d623f7 478 halt_cpu();
55e303ae
A
479 return KERN_SUCCESS;
480 } else {
2d21ac55 481 kprintf("Started cpu %d (lapic id %08x)\n", slot_num, lapic);
55e303ae
A
482 return KERN_SUCCESS;
483 }
484}
485
0a7de745
A
486#if MP_DEBUG
487cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
488cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
55e303ae
A
489
490MP_EVENT_NAME_DECL();
491
0a7de745 492#endif /* MP_DEBUG */
55e303ae 493
fe8ab488
A
494/*
495 * Note: called with NULL state when polling for TLB flush and cross-calls.
496 */
593a1d5f 497int
0c530ab8 498cpu_signal_handler(x86_saved_state_t *regs)
55e303ae 499{
0a7de745 500#if !MACH_KDP
39236c6e
A
501#pragma unused (regs)
502#endif /* !MACH_KDP */
0a7de745
A
503 int my_cpu;
504 volatile int *my_word;
55e303ae 505
6d2010ae 506 SCHED_STATS_IPI(current_processor());
55e303ae
A
507
508 my_cpu = cpu_number();
060df5ea
A
509 my_word = &cpu_data_ptr[my_cpu]->cpu_signals;
510 /* Store the initial set of signals for diagnostics. New
511 * signals could arrive while these are being processed
512 * so it's no more than a hint.
513 */
6d2010ae 514
060df5ea 515 cpu_data_ptr[my_cpu]->cpu_prior_signals = *my_word;
55e303ae
A
516
517 do {
0a7de745 518#if MACH_KDP
fe8ab488 519 if (i_bit(MP_KDP, my_word)) {
0a7de745 520 DBGLOG(cpu_handle, my_cpu, MP_KDP);
55e303ae 521 i_bit_clear(MP_KDP, my_word);
0c530ab8
A
522/* Ensure that the i386_kernel_state at the base of the
523 * current thread's stack (if any) is synchronized with the
524 * context at the moment of the interrupt, to facilitate
525 * access through the debugger.
0c530ab8 526 */
b0d623f7 527 sync_iss_to_iks(regs);
0a7de745 528 if (pmsafe_debug && !kdp_snapshot) {
d41d1dae 529 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
0a7de745 530 }
b0d623f7 531 mp_kdp_wait(TRUE, FALSE);
0a7de745 532 if (pmsafe_debug && !kdp_snapshot) {
d41d1dae 533 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
0a7de745 534 }
55e303ae 535 } else
0a7de745 536#endif /* MACH_KDP */
91447636 537 if (i_bit(MP_TLB_FLUSH, my_word)) {
0a7de745 538 DBGLOG(cpu_handle, my_cpu, MP_TLB_FLUSH);
55e303ae
A
539 i_bit_clear(MP_TLB_FLUSH, my_word);
540 pmap_update_interrupt();
2d21ac55 541 } else if (i_bit(MP_CALL, my_word)) {
0a7de745 542 DBGLOG(cpu_handle, my_cpu, MP_CALL);
2d21ac55
A
543 i_bit_clear(MP_CALL, my_word);
544 mp_cpus_call_action();
c910b4d9 545 } else if (i_bit(MP_CALL_PM, my_word)) {
0a7de745 546 DBGLOG(cpu_handle, my_cpu, MP_CALL_PM);
c910b4d9
A
547 i_bit_clear(MP_CALL_PM, my_word);
548 mp_call_PM();
55e303ae 549 }
fe8ab488
A
550 if (regs == NULL) {
551 /* Called to poll only for cross-calls and TLB flush */
552 break;
553 } else if (i_bit(MP_AST, my_word)) {
0a7de745 554 DBGLOG(cpu_handle, my_cpu, MP_AST);
fe8ab488
A
555 i_bit_clear(MP_AST, my_word);
556 ast_check(cpu_to_processor(my_cpu));
557 }
55e303ae
A
558 } while (*my_word);
559
593a1d5f 560 return 0;
55e303ae
A
561}
562
fe8ab488 563extern void kprintf_break_lock(void);
d9a64523 564int
2d21ac55 565NMIInterruptHandler(x86_saved_state_t *regs)
0c530ab8 566{
0a7de745
A
567 void *stackptr;
568 char pstr[256];
569 uint64_t now = mach_absolute_time();
060df5ea 570
6d2010ae 571 if (panic_active() && !panicDebugging) {
0a7de745 572 if (pmsafe_debug) {
6d2010ae 573 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
0a7de745
A
574 }
575 for (;;) {
6d2010ae 576 cpu_pause();
0a7de745 577 }
6d2010ae
A
578 }
579
060df5ea 580 atomic_incl(&NMIPI_acks, 1);
39236c6e 581 atomic_incl(&NMI_count, 1);
0c530ab8 582 sync_iss_to_iks_unconditionally(regs);
0a7de745 583 __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr));
935ed37a 584
0a7de745 585 if (cpu_number() == debugger_cpu) {
fe8ab488 586 goto NMExit;
0a7de745 587 }
593a1d5f 588
5ba3f43e
A
589 if (NMI_panic_reason == SPINLOCK_TIMEOUT) {
590 snprintf(&pstr[0], sizeof(pstr),
0a7de745
A
591 "Panic(CPU %d, time %llu): NMIPI for spinlock acquisition timeout, spinlock: %p, spinlock owner: %p, current_thread: %p, spinlock_owner_cpu: 0x%x\n",
592 cpu_number(), now, spinlock_timed_out, (void *) spinlock_timed_out->interlock.lock_data, current_thread(), spinlock_owner_cpu);
fe8ab488 593 panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs);
5ba3f43e
A
594 } else if (NMI_panic_reason == TLB_FLUSH_TIMEOUT) {
595 snprintf(&pstr[0], sizeof(pstr),
0a7de745
A
596 "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: TLB flush timeout, TLB state:0x%x\n",
597 cpu_number(), now, current_cpu_datap()->cpu_tlb_invalid);
6d2010ae 598 panic_i386_backtrace(stackptr, 48, &pstr[0], TRUE, regs);
5ba3f43e
A
599 } else if (NMI_panic_reason == CROSSCALL_TIMEOUT) {
600 snprintf(&pstr[0], sizeof(pstr),
0a7de745
A
601 "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: cross-call timeout\n",
602 cpu_number(), now);
5ba3f43e
A
603 panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs);
604 } else if (NMI_panic_reason == INTERRUPT_WATCHDOG) {
605 snprintf(&pstr[0], sizeof(pstr),
0a7de745
A
606 "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: interrupt watchdog for vector 0x%x\n",
607 cpu_number(), now, vector_timed_out);
5ba3f43e
A
608 panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs);
609 }
0a7de745 610
b0d623f7 611#if MACH_KDP
0a7de745 612 if (pmsafe_debug && !kdp_snapshot) {
d41d1dae 613 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
0a7de745 614 }
060df5ea 615 current_cpu_datap()->cpu_NMI_acknowledged = TRUE;
15129b1c 616 i_bit_clear(MP_KDP, &current_cpu_datap()->cpu_signals);
5ba3f43e 617 if (panic_active() || NMI_panic_reason != NONE) {
fe8ab488 618 mp_kdp_wait(FALSE, TRUE);
39037602 619 } else if (!mp_kdp_trap &&
0a7de745
A
620 !mp_kdp_is_NMI &&
621 virtualized && (debug_boot_arg & DB_NMI)) {
fe8ab488
A
622 /*
623 * Under a VMM with the debug boot-arg set, drop into kdp.
624 * Since an NMI is involved, there's a risk of contending with
0a7de745 625 * a panic. And side-effects of NMIs may result in entry into,
fe8ab488
A
626 * and continuing from, the debugger being unreliable.
627 */
39037602
A
628 if (__sync_bool_compare_and_swap(&mp_kdp_is_NMI, FALSE, TRUE)) {
629 kprintf_break_lock();
630 kprintf("Debugger entry requested by NMI\n");
631 kdp_i386_trap(T_DEBUG, saved_state64(regs), 0, 0);
632 printf("Debugger entry requested by NMI\n");
633 mp_kdp_is_NMI = FALSE;
634 } else {
635 mp_kdp_wait(FALSE, FALSE);
636 }
fe8ab488
A
637 } else {
638 mp_kdp_wait(FALSE, FALSE);
639 }
0a7de745 640 if (pmsafe_debug && !kdp_snapshot) {
d41d1dae 641 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
0a7de745 642 }
b0d623f7 643#endif
0a7de745 644NMExit:
0c530ab8
A
645 return 1;
646}
647
2d21ac55
A
648
649/*
650 * cpu_interrupt is really just to be used by the scheduler to
651 * get a CPU's attention it may not always issue an IPI. If an
652 * IPI is always needed then use i386_cpu_IPI.
653 */
654void
655cpu_interrupt(int cpu)
656{
6d2010ae
A
657 boolean_t did_IPI = FALSE;
658
2d21ac55
A
659 if (smp_initialized
660 && pmCPUExitIdle(cpu_datap(cpu))) {
661 i386_cpu_IPI(cpu);
6d2010ae 662 did_IPI = TRUE;
2d21ac55 663 }
6d2010ae
A
664
665 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), cpu, did_IPI, 0, 0, 0);
55e303ae
A
666}
667
0c530ab8
A
668/*
669 * Send a true NMI via the local APIC to the specified CPU.
670 */
935ed37a 671void
0c530ab8
A
672cpu_NMI_interrupt(int cpu)
673{
0c530ab8 674 if (smp_initialized) {
6d2010ae 675 i386_send_NMI(cpu);
0c530ab8 676 }
0c530ab8
A
677}
678
39236c6e
A
679void
680NMI_cpus(void)
681{
0a7de745
A
682 unsigned int cpu;
683 boolean_t intrs_enabled;
684 uint64_t tsc_timeout;
39236c6e
A
685
686 intrs_enabled = ml_set_interrupts_enabled(FALSE);
d9a64523 687 NMIPI_enable(TRUE);
39236c6e 688 for (cpu = 0; cpu < real_ncpus; cpu++) {
0a7de745 689 if (!cpu_is_running(cpu)) {
39236c6e 690 continue;
0a7de745 691 }
39236c6e
A
692 cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE;
693 cpu_NMI_interrupt(cpu);
694 tsc_timeout = !machine_timeout_suspended() ?
0a7de745
A
695 rdtsc64() + (1000 * 1000 * 1000 * 10ULL) :
696 ~0ULL;
39236c6e
A
697 while (!cpu_datap(cpu)->cpu_NMI_acknowledged) {
698 handle_pending_TLB_flushes();
699 cpu_pause();
0a7de745 700 if (rdtsc64() > tsc_timeout) {
39236c6e 701 panic("NMI_cpus() timeout cpu %d", cpu);
0a7de745 702 }
39236c6e
A
703 }
704 cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE;
705 }
d9a64523 706 NMIPI_enable(FALSE);
39236c6e
A
707
708 ml_set_interrupts_enabled(intrs_enabled);
709}
710
0a7de745 711static void(*volatile mp_PM_func)(void) = NULL;
c910b4d9
A
712
713static void
714mp_call_PM(void)
715{
716 assert(!ml_get_interrupts_enabled());
717
0a7de745 718 if (mp_PM_func != NULL) {
c910b4d9 719 mp_PM_func();
0a7de745 720 }
c910b4d9
A
721}
722
723void
724cpu_PM_interrupt(int cpu)
725{
726 assert(!ml_get_interrupts_enabled());
727
728 if (mp_PM_func != NULL) {
0a7de745 729 if (cpu == cpu_number()) {
c910b4d9 730 mp_PM_func();
0a7de745 731 } else {
c910b4d9 732 i386_signal_cpu(cpu, MP_CALL_PM, ASYNC);
0a7de745 733 }
c910b4d9
A
734 }
735}
736
737void
738PM_interrupt_register(void (*fn)(void))
739{
740 mp_PM_func = fn;
741}
742
55e303ae
A
743void
744i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
745{
0a7de745
A
746 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
747 uint64_t tsc_timeout;
748
6601e61a 749
0a7de745 750 if (!cpu_datap(cpu)->cpu_running) {
55e303ae 751 return;
0a7de745 752 }
55e303ae 753
0a7de745
A
754 if (event == MP_TLB_FLUSH) {
755 KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_START, cpu, 0, 0, 0, 0);
756 }
6601e61a 757
0c530ab8 758 DBGLOG(cpu_signal, cpu, event);
0a7de745 759
55e303ae 760 i_bit_set(event, signals);
2d21ac55 761 i386_cpu_IPI(cpu);
55e303ae 762 if (mode == SYNC) {
0a7de745 763again:
39236c6e 764 tsc_timeout = !machine_timeout_suspended() ?
0a7de745
A
765 rdtsc64() + (1000 * 1000 * 1000) :
766 ~0ULL;
91447636 767 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
55e303ae
A
768 cpu_pause();
769 }
770 if (i_bit(event, signals)) {
771 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
0a7de745 772 cpu, event);
55e303ae
A
773 goto again;
774 }
775 }
0a7de745
A
776 if (event == MP_TLB_FLUSH) {
777 KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_END, cpu, 0, 0, 0, 0);
778 }
55e303ae
A
779}
780
39236c6e
A
781/*
782 * Helper function called when busy-waiting: panic if too long
783 * a TSC-based time has elapsed since the start of the spin.
784 */
fe8ab488
A
785static boolean_t
786mp_spin_timeout(uint64_t tsc_start)
39236c6e 787{
0a7de745 788 uint64_t tsc_timeout;
39236c6e
A
789
790 cpu_pause();
0a7de745 791 if (machine_timeout_suspended()) {
fe8ab488 792 return FALSE;
0a7de745 793 }
39236c6e
A
794
795 /*
796 * The timeout is 4 * the spinlock timeout period
797 * unless we have serial console printing (kprintf) enabled
798 * in which case we allow an even greater margin.
799 */
39037602 800 tsc_timeout = disable_serial_output ? LockTimeOutTSC << 2
0a7de745
A
801 : LockTimeOutTSC << 4;
802 return rdtsc64() > tsc_start + tsc_timeout;
fe8ab488
A
803}
804
805/*
806 * Helper function to take a spinlock while ensuring that incoming IPIs
807 * are still serviced if interrupts are masked while we spin.
39037602 808 * Returns current interrupt state.
fe8ab488 809 */
5ba3f43e 810boolean_t
fe8ab488
A
811mp_safe_spin_lock(usimple_lock_t lock)
812{
813 if (ml_get_interrupts_enabled()) {
0a7de745 814 simple_lock(lock, LCK_GRP_NULL);
fe8ab488
A
815 return TRUE;
816 } else {
817 uint64_t tsc_spin_start = rdtsc64();
0a7de745 818 while (!simple_lock_try(lock, LCK_GRP_NULL)) {
fe8ab488
A
819 cpu_signal_handler(NULL);
820 if (mp_spin_timeout(tsc_spin_start)) {
821 uint32_t lock_cpu;
822 uintptr_t lowner = (uintptr_t)
0a7de745 823 lock->interlock.lock_data;
fe8ab488
A
824 spinlock_timed_out = lock;
825 lock_cpu = spinlock_timeout_NMI(lowner);
5ba3f43e
A
826 NMIPI_panic(cpu_to_cpumask(lock_cpu), SPINLOCK_TIMEOUT);
827 panic("mp_safe_spin_lock() timed out, lock: %p, owner thread: 0x%lx, current_thread: %p, owner on CPU 0x%x, time: %llu",
0a7de745 828 lock, lowner, current_thread(), lock_cpu, mach_absolute_time());
fe8ab488
A
829 }
830 }
831 return FALSE;
0a7de745 832 }
39236c6e
A
833}
834
55e303ae
A
835/*
836 * All-CPU rendezvous:
0a7de745 837 * - CPUs are signalled,
55e303ae
A
838 * - all execute the setup function (if specified),
839 * - rendezvous (i.e. all cpus reach a barrier),
840 * - all execute the action function (if specified),
841 * - rendezvous again,
842 * - execute the teardown function (if specified), and then
843 * - resume.
844 *
845 * Note that the supplied external functions _must_ be reentrant and aware
846 * that they are running in parallel and in an unknown lock context.
847 */
848
849static void
39037602 850mp_rendezvous_action(__unused void *null)
55e303ae 851{
0a7de745
A
852 boolean_t intrs_enabled;
853 uint64_t tsc_spin_start;
55e303ae 854
d9a64523
A
855 /*
856 * Note that mp_rv_lock was acquired by the thread that initiated the
857 * rendezvous and must have been acquired before we enter
858 * mp_rendezvous_action().
859 */
860 current_cpu_datap()->cpu_rendezvous_in_progress = TRUE;
861
55e303ae 862 /* setup function */
0a7de745 863 if (mp_rv_setup_func != NULL) {
55e303ae 864 mp_rv_setup_func(mp_rv_func_arg);
0a7de745 865 }
2d21ac55
A
866
867 intrs_enabled = ml_get_interrupts_enabled();
868
55e303ae 869 /* spin on entry rendezvous */
0c530ab8 870 atomic_incl(&mp_rv_entry, 1);
39236c6e 871 tsc_spin_start = rdtsc64();
490019cf 872
0c530ab8 873 while (mp_rv_entry < mp_rv_ncpus) {
2d21ac55 874 /* poll for pesky tlb flushes if interrupts disabled */
0a7de745 875 if (!intrs_enabled) {
2d21ac55 876 handle_pending_TLB_flushes();
0a7de745 877 }
490019cf
A
878 if (mp_spin_timeout(tsc_spin_start)) {
879 panic("mp_rv_action() entry: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_entry, mp_rv_ncpus, tsc_spin_start, rdtsc64());
880 }
0c530ab8 881 }
6d2010ae 882
55e303ae 883 /* action function */
0a7de745 884 if (mp_rv_action_func != NULL) {
55e303ae 885 mp_rv_action_func(mp_rv_func_arg);
0a7de745 886 }
6d2010ae 887
55e303ae 888 /* spin on exit rendezvous */
0c530ab8 889 atomic_incl(&mp_rv_exit, 1);
39236c6e 890 tsc_spin_start = rdtsc64();
2d21ac55 891 while (mp_rv_exit < mp_rv_ncpus) {
0a7de745 892 if (!intrs_enabled) {
2d21ac55 893 handle_pending_TLB_flushes();
0a7de745
A
894 }
895 if (mp_spin_timeout(tsc_spin_start)) {
490019cf 896 panic("mp_rv_action() exit: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_exit, mp_rv_ncpus, tsc_spin_start, rdtsc64());
0a7de745 897 }
2d21ac55 898 }
6d2010ae 899
55e303ae 900 /* teardown function */
0a7de745 901 if (mp_rv_teardown_func != NULL) {
55e303ae 902 mp_rv_teardown_func(mp_rv_func_arg);
0a7de745 903 }
0c530ab8 904
d9a64523
A
905 current_cpu_datap()->cpu_rendezvous_in_progress = FALSE;
906
0c530ab8
A
907 /* Bump completion count */
908 atomic_incl(&mp_rv_complete, 1);
55e303ae
A
909}
910
911void
0a7de745
A
912mp_rendezvous(void (*setup_func)(void *),
913 void (*action_func)(void *),
914 void (*teardown_func)(void *),
915 void *arg)
55e303ae 916{
0a7de745 917 uint64_t tsc_spin_start;
55e303ae
A
918
919 if (!smp_initialized) {
0a7de745 920 if (setup_func != NULL) {
55e303ae 921 setup_func(arg);
0a7de745
A
922 }
923 if (action_func != NULL) {
55e303ae 924 action_func(arg);
0a7de745
A
925 }
926 if (teardown_func != NULL) {
55e303ae 927 teardown_func(arg);
0a7de745 928 }
55e303ae
A
929 return;
930 }
0a7de745 931
55e303ae 932 /* obtain rendezvous lock */
d9a64523 933 mp_rendezvous_lock();
55e303ae
A
934
935 /* set static function pointers */
936 mp_rv_setup_func = setup_func;
937 mp_rv_action_func = action_func;
938 mp_rv_teardown_func = teardown_func;
939 mp_rv_func_arg = arg;
940
0c530ab8
A
941 mp_rv_entry = 0;
942 mp_rv_exit = 0;
943 mp_rv_complete = 0;
55e303ae
A
944
945 /*
946 * signal other processors, which will call mp_rendezvous_action()
2d21ac55 947 * with interrupts disabled
55e303ae 948 */
39037602 949 mp_rv_ncpus = mp_cpus_call(CPUMASK_OTHERS, NOSYNC, &mp_rendezvous_action, NULL) + 1;
55e303ae
A
950
951 /* call executor function on this cpu */
39037602 952 mp_rendezvous_action(NULL);
55e303ae 953
0c530ab8
A
954 /*
955 * Spin for everyone to complete.
956 * This is necessary to ensure that all processors have proceeded
957 * from the exit barrier before we release the rendezvous structure.
958 */
39236c6e 959 tsc_spin_start = rdtsc64();
0c530ab8 960 while (mp_rv_complete < mp_rv_ncpus) {
0a7de745 961 if (mp_spin_timeout(tsc_spin_start)) {
490019cf 962 panic("mp_rendezvous() timeout: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_complete, mp_rv_ncpus, tsc_spin_start, rdtsc64());
0a7de745 963 }
0c530ab8 964 }
0a7de745 965
2d21ac55
A
966 /* Tidy up */
967 mp_rv_setup_func = NULL;
968 mp_rv_action_func = NULL;
969 mp_rv_teardown_func = NULL;
970 mp_rv_func_arg = NULL;
971
55e303ae 972 /* release lock */
d9a64523
A
973 mp_rendezvous_unlock();
974}
975
976void
977mp_rendezvous_lock(void)
978{
979 (void) mp_safe_spin_lock(&mp_rv_lock);
980}
981
982void
983mp_rendezvous_unlock(void)
984{
55e303ae
A
985 simple_unlock(&mp_rv_lock);
986}
987
0c530ab8
A
988void
989mp_rendezvous_break_lock(void)
990{
991 simple_lock_init(&mp_rv_lock, 0);
992}
993
994static void
995setup_disable_intrs(__unused void * param_not_used)
996{
997 /* disable interrupts before the first barrier */
998 boolean_t intr = ml_set_interrupts_enabled(FALSE);
999
1000 current_cpu_datap()->cpu_iflag = intr;
1001 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1002}
1003
1004static void
1005teardown_restore_intrs(__unused void * param_not_used)
1006{
1007 /* restore interrupt flag following MTRR changes */
1008 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
1009 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1010}
1011
1012/*
1013 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
1014 * This is exported for use by kexts.
1015 */
1016void
1017mp_rendezvous_no_intrs(
0a7de745
A
1018 void (*action_func)(void *),
1019 void *arg)
0c530ab8
A
1020{
1021 mp_rendezvous(setup_disable_intrs,
0a7de745
A
1022 action_func,
1023 teardown_restore_intrs,
1024 arg);
0c530ab8
A
1025}
1026
6d2010ae
A
1027
1028typedef struct {
0a7de745
A
1029 queue_chain_t link; /* queue linkage */
1030 void (*func)(void *, void *); /* routine to call */
1031 void *arg0; /* routine's 1st arg */
1032 void *arg1; /* routine's 2nd arg */
1033 cpumask_t *maskp; /* completion response mask */
6d2010ae 1034} mp_call_t;
316670eb
A
1035
1036
1037typedef struct {
0a7de745
A
1038 queue_head_t queue;
1039 decl_simple_lock_data(, lock);
316670eb 1040} mp_call_queue_t;
0a7de745
A
1041#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS
1042static mp_call_queue_t mp_cpus_call_freelist;
1043static mp_call_queue_t mp_cpus_call_head[MAX_CPUS];
6d2010ae
A
1044
1045static inline boolean_t
316670eb 1046mp_call_head_lock(mp_call_queue_t *cqp)
6d2010ae 1047{
0a7de745 1048 boolean_t intrs_enabled;
6d2010ae
A
1049
1050 intrs_enabled = ml_set_interrupts_enabled(FALSE);
0a7de745 1051 simple_lock(&cqp->lock, LCK_GRP_NULL);
6d2010ae
A
1052
1053 return intrs_enabled;
1054}
1055
5ba3f43e
A
1056/*
1057 * Deliver an NMIPI to a set of processors to cause them to panic .
1058 */
fe8ab488 1059void
0a7de745
A
1060NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why)
1061{
d9a64523
A
1062 unsigned int cpu;
1063 cpumask_t cpu_bit;
fe8ab488
A
1064 uint64_t deadline;
1065
5c9f4661 1066 NMIPI_enable(TRUE);
5ba3f43e
A
1067 NMI_panic_reason = why;
1068
fe8ab488 1069 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
0a7de745 1070 if ((cpu_mask & cpu_bit) == 0) {
5ba3f43e 1071 continue;
0a7de745 1072 }
5ba3f43e
A
1073 cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE;
1074 cpu_NMI_interrupt(cpu);
1075 }
1076
1077 /* Wait (only so long) for NMi'ed cpus to respond */
1078 deadline = mach_absolute_time() + LockTimeOut;
1079 for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) {
0a7de745 1080 if ((cpu_mask & cpu_bit) == 0) {
5ba3f43e 1081 continue;
0a7de745 1082 }
5ba3f43e 1083 while (!cpu_datap(cpu)->cpu_NMI_acknowledged &&
0a7de745 1084 mach_absolute_time() < deadline) {
5ba3f43e
A
1085 cpu_pause();
1086 }
fe8ab488 1087 }
fe8ab488
A
1088}
1089
1090#if MACH_ASSERT
6d2010ae 1091static inline boolean_t
316670eb 1092mp_call_head_is_locked(mp_call_queue_t *cqp)
6d2010ae
A
1093{
1094 return !ml_get_interrupts_enabled() &&
0a7de745 1095 hw_lock_held((hw_lock_t)&cqp->lock);
6d2010ae 1096}
fe8ab488 1097#endif
6d2010ae
A
1098
1099static inline void
316670eb 1100mp_call_head_unlock(mp_call_queue_t *cqp, boolean_t intrs_enabled)
6d2010ae 1101{
316670eb 1102 simple_unlock(&cqp->lock);
6d2010ae
A
1103 ml_set_interrupts_enabled(intrs_enabled);
1104}
1105
1106static inline mp_call_t *
1107mp_call_alloc(void)
1108{
0a7de745
A
1109 mp_call_t *callp = NULL;
1110 boolean_t intrs_enabled;
1111 mp_call_queue_t *cqp = &mp_cpus_call_freelist;
316670eb
A
1112
1113 intrs_enabled = mp_call_head_lock(cqp);
0a7de745 1114 if (!queue_empty(&cqp->queue)) {
316670eb 1115 queue_remove_first(&cqp->queue, callp, typeof(callp), link);
0a7de745 1116 }
316670eb 1117 mp_call_head_unlock(cqp, intrs_enabled);
6d2010ae 1118
6d2010ae
A
1119 return callp;
1120}
1121
1122static inline void
1123mp_call_free(mp_call_t *callp)
0c530ab8 1124{
0a7de745
A
1125 boolean_t intrs_enabled;
1126 mp_call_queue_t *cqp = &mp_cpus_call_freelist;
316670eb
A
1127
1128 intrs_enabled = mp_call_head_lock(cqp);
1129 queue_enter_first(&cqp->queue, callp, typeof(callp), link);
1130 mp_call_head_unlock(cqp, intrs_enabled);
6d2010ae
A
1131}
1132
1133static inline mp_call_t *
316670eb 1134mp_call_dequeue_locked(mp_call_queue_t *cqp)
6d2010ae 1135{
0a7de745 1136 mp_call_t *callp = NULL;
0c530ab8 1137
316670eb 1138 assert(mp_call_head_is_locked(cqp));
0a7de745 1139 if (!queue_empty(&cqp->queue)) {
316670eb 1140 queue_remove_first(&cqp->queue, callp, typeof(callp), link);
0a7de745 1141 }
6d2010ae
A
1142 return callp;
1143}
1144
316670eb
A
1145static inline void
1146mp_call_enqueue_locked(
0a7de745
A
1147 mp_call_queue_t *cqp,
1148 mp_call_t *callp)
316670eb
A
1149{
1150 queue_enter(&cqp->queue, callp, typeof(callp), link);
1151}
1152
6d2010ae
A
1153/* Called on the boot processor to initialize global structures */
1154static void
1155mp_cpus_call_init(void)
1156{
0a7de745 1157 mp_call_queue_t *cqp = &mp_cpus_call_freelist;
316670eb 1158
6d2010ae 1159 DBG("mp_cpus_call_init()\n");
316670eb
A
1160 simple_lock_init(&cqp->lock, 0);
1161 queue_init(&cqp->queue);
6d2010ae
A
1162}
1163
1164/*
fe8ab488 1165 * Called at processor registration to add call buffers to the free list
6d2010ae 1166 * and to initialize the per-cpu call queue.
6d2010ae 1167 */
fe8ab488
A
1168void
1169mp_cpus_call_cpu_init(int cpu)
6d2010ae 1170{
0a7de745
A
1171 int i;
1172 mp_call_queue_t *cqp = &mp_cpus_call_head[cpu];
1173 mp_call_t *callp;
6d2010ae 1174
316670eb
A
1175 simple_lock_init(&cqp->lock, 0);
1176 queue_init(&cqp->queue);
6d2010ae
A
1177 for (i = 0; i < MP_CPUS_CALL_BUFS_PER_CPU; i++) {
1178 callp = (mp_call_t *) kalloc(sizeof(mp_call_t));
6d2010ae 1179 mp_call_free(callp);
0c530ab8 1180 }
6d2010ae 1181
fe8ab488 1182 DBG("mp_cpus_call_init(%d) done\n", cpu);
0c530ab8
A
1183}
1184
2d21ac55
A
1185/*
1186 * This is called from cpu_signal_handler() to process an MP_CALL signal.
6d2010ae 1187 * And also from i386_deactivate_cpu() when a cpu is being taken offline.
2d21ac55
A
1188 */
1189static void
1190mp_cpus_call_action(void)
1191{
0a7de745
A
1192 mp_call_queue_t *cqp;
1193 boolean_t intrs_enabled;
1194 mp_call_t *callp;
1195 mp_call_t call;
6d2010ae
A
1196
1197 assert(!ml_get_interrupts_enabled());
316670eb
A
1198 cqp = &mp_cpus_call_head[cpu_number()];
1199 intrs_enabled = mp_call_head_lock(cqp);
1200 while ((callp = mp_call_dequeue_locked(cqp)) != NULL) {
6d2010ae
A
1201 /* Copy call request to the stack to free buffer */
1202 call = *callp;
1203 mp_call_free(callp);
1204 if (call.func != NULL) {
316670eb 1205 mp_call_head_unlock(cqp, intrs_enabled);
6d2010ae
A
1206 KERNEL_DEBUG_CONSTANT(
1207 TRACE_MP_CPUS_CALL_ACTION,
4bd07ac2
A
1208 VM_KERNEL_UNSLIDE(call.func), VM_KERNEL_UNSLIDE_OR_PERM(call.arg0),
1209 VM_KERNEL_UNSLIDE_OR_PERM(call.arg1), VM_KERNEL_ADDRPERM(call.maskp), 0);
6d2010ae 1210 call.func(call.arg0, call.arg1);
316670eb 1211 (void) mp_call_head_lock(cqp);
6d2010ae 1212 }
0a7de745 1213 if (call.maskp != NULL) {
fe8ab488 1214 i_bit_set(cpu_number(), call.maskp);
0a7de745 1215 }
6d2010ae 1216 }
316670eb 1217 mp_call_head_unlock(cqp, intrs_enabled);
2d21ac55
A
1218}
1219
1220/*
1221 * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
6d2010ae
A
1222 * Possible modes are:
1223 * SYNC: function is called serially on target cpus in logical cpu order
1224 * waiting for each call to be acknowledged before proceeding
1225 * ASYNC: function call is queued to the specified cpus
1226 * waiting for all calls to complete in parallel before returning
1227 * NOSYNC: function calls are queued
0a7de745 1228 * but we return before confirmation of calls completing.
2d21ac55
A
1229 * The action function may be NULL.
1230 * The cpu mask may include the local cpu. Offline cpus are ignored.
6d2010ae 1231 * The return value is the number of cpus on which the call was made or queued.
2d21ac55
A
1232 */
1233cpu_t
1234mp_cpus_call(
0a7de745
A
1235 cpumask_t cpus,
1236 mp_sync_t mode,
1237 void (*action_func)(void *),
1238 void *arg)
6d2010ae
A
1239{
1240 return mp_cpus_call1(
0a7de745
A
1241 cpus,
1242 mode,
1243 (void (*)(void *, void *))action_func,
1244 arg,
1245 NULL,
1246 NULL);
6d2010ae
A
1247}
1248
1249static void
0a7de745
A
1250mp_cpus_call_wait(boolean_t intrs_enabled,
1251 cpumask_t cpus_called,
1252 cpumask_t *cpus_responded)
6d2010ae 1253{
0a7de745
A
1254 mp_call_queue_t *cqp;
1255 uint64_t tsc_spin_start;
6d2010ae 1256
39037602 1257 assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0);
316670eb 1258 cqp = &mp_cpus_call_head[cpu_number()];
6d2010ae 1259
39236c6e 1260 tsc_spin_start = rdtsc64();
fe8ab488 1261 while (*cpus_responded != cpus_called) {
6d2010ae 1262 if (!intrs_enabled) {
316670eb 1263 /* Sniffing w/o locking */
0a7de745 1264 if (!queue_empty(&cqp->queue)) {
6d2010ae 1265 mp_cpus_call_action();
0a7de745 1266 }
fe8ab488
A
1267 cpu_signal_handler(NULL);
1268 }
1269 if (mp_spin_timeout(tsc_spin_start)) {
0a7de745 1270 cpumask_t cpus_unresponsive;
fe8ab488 1271
fe8ab488 1272 cpus_unresponsive = cpus_called & ~(*cpus_responded);
5ba3f43e 1273 NMIPI_panic(cpus_unresponsive, CROSSCALL_TIMEOUT);
3e170ce0 1274 panic("mp_cpus_call_wait() timeout, cpus: 0x%llx",
0a7de745 1275 cpus_unresponsive);
6d2010ae 1276 }
6d2010ae
A
1277 }
1278}
1279
1280cpu_t
1281mp_cpus_call1(
0a7de745
A
1282 cpumask_t cpus,
1283 mp_sync_t mode,
1284 void (*action_func)(void *, void *),
1285 void *arg0,
1286 void *arg1,
1287 cpumask_t *cpus_calledp)
1288{
1289 cpu_t cpu = 0;
1290 boolean_t intrs_enabled = FALSE;
1291 boolean_t call_self = FALSE;
1292 cpumask_t cpus_called = 0;
1293 cpumask_t cpus_responded = 0;
1294 long cpus_call_count = 0;
1295 uint64_t tsc_spin_start;
1296 boolean_t topo_lock;
6d2010ae
A
1297
1298 KERNEL_DEBUG_CONSTANT(
1299 TRACE_MP_CPUS_CALL | DBG_FUNC_START,
4bd07ac2 1300 cpus, mode, VM_KERNEL_UNSLIDE(action_func), VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1));
2d21ac55
A
1301
1302 if (!smp_initialized) {
0a7de745 1303 if ((cpus & CPUMASK_SELF) == 0) {
6d2010ae 1304 goto out;
0a7de745 1305 }
2d21ac55 1306 if (action_func != NULL) {
6d2010ae
A
1307 intrs_enabled = ml_set_interrupts_enabled(FALSE);
1308 action_func(arg0, arg1);
2d21ac55
A
1309 ml_set_interrupts_enabled(intrs_enabled);
1310 }
6d2010ae
A
1311 call_self = TRUE;
1312 goto out;
2d21ac55 1313 }
2d21ac55 1314
6d2010ae
A
1315 /*
1316 * Queue the call for each non-local requested cpu.
fe8ab488
A
1317 * This is performed under the topo lock to prevent changes to
1318 * cpus online state and to prevent concurrent rendezvouses --
1319 * although an exception is made if we're calling only the master
1320 * processor since that always remains active. Note: this exception
1321 * is expected for longterm timer nosync cross-calls to the master cpu.
0a7de745 1322 */
fe8ab488
A
1323 mp_disable_preemption();
1324 intrs_enabled = ml_get_interrupts_enabled();
1325 topo_lock = (cpus != cpu_to_cpumask(master_cpu));
1326 if (topo_lock) {
1327 ml_set_interrupts_enabled(FALSE);
1328 (void) mp_safe_spin_lock(&x86_topo_lock);
1329 }
2d21ac55
A
1330 for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) {
1331 if (((cpu_to_cpumask(cpu) & cpus) == 0) ||
0a7de745 1332 !cpu_is_running(cpu)) {
2d21ac55 1333 continue;
0a7de745 1334 }
fe8ab488 1335 tsc_spin_start = rdtsc64();
2d21ac55
A
1336 if (cpu == (cpu_t) cpu_number()) {
1337 /*
1338 * We don't IPI ourself and if calling asynchronously,
1339 * we defer our call until we have signalled all others.
1340 */
1341 call_self = TRUE;
1342 if (mode == SYNC && action_func != NULL) {
6d2010ae
A
1343 KERNEL_DEBUG_CONSTANT(
1344 TRACE_MP_CPUS_CALL_LOCAL,
316670eb 1345 VM_KERNEL_UNSLIDE(action_func),
4bd07ac2 1346 VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1), 0, 0);
6d2010ae 1347 action_func(arg0, arg1);
2d21ac55
A
1348 }
1349 } else {
1350 /*
6d2010ae 1351 * Here to queue a call to cpu and IPI.
2d21ac55 1352 */
0a7de745
A
1353 mp_call_t *callp = NULL;
1354 mp_call_queue_t *cqp = &mp_cpus_call_head[cpu];
1355 boolean_t intrs_inner;
316670eb 1356
0a7de745
A
1357queue_call:
1358 if (callp == NULL) {
316670eb 1359 callp = mp_call_alloc();
0a7de745 1360 }
fe8ab488 1361 intrs_inner = mp_call_head_lock(cqp);
39037602
A
1362 if (callp == NULL) {
1363 mp_call_head_unlock(cqp, intrs_inner);
1364 KERNEL_DEBUG_CONSTANT(
1365 TRACE_MP_CPUS_CALL_NOBUF,
1366 cpu, 0, 0, 0, 0);
1367 if (!intrs_inner) {
1368 /* Sniffing w/o locking */
0a7de745 1369 if (!queue_empty(&cqp->queue)) {
39037602 1370 mp_cpus_call_action();
0a7de745 1371 }
39037602 1372 handle_pending_TLB_flushes();
2d21ac55 1373 }
0a7de745 1374 if (mp_spin_timeout(tsc_spin_start)) {
39037602 1375 panic("mp_cpus_call1() timeout start: 0x%llx, cur: 0x%llx",
0a7de745
A
1376 tsc_spin_start, rdtsc64());
1377 }
39037602 1378 goto queue_call;
6d2010ae 1379 }
39037602 1380 callp->maskp = (mode == NOSYNC) ? NULL : &cpus_responded;
316670eb
A
1381 callp->func = action_func;
1382 callp->arg0 = arg0;
1383 callp->arg1 = arg1;
1384 mp_call_enqueue_locked(cqp, callp);
fe8ab488 1385 cpus_call_count++;
6d2010ae
A
1386 cpus_called |= cpu_to_cpumask(cpu);
1387 i386_signal_cpu(cpu, MP_CALL, ASYNC);
fe8ab488 1388 mp_call_head_unlock(cqp, intrs_inner);
6d2010ae 1389 if (mode == SYNC) {
fe8ab488 1390 mp_cpus_call_wait(intrs_inner, cpus_called, &cpus_responded);
2d21ac55
A
1391 }
1392 }
1393 }
fe8ab488
A
1394 if (topo_lock) {
1395 simple_unlock(&x86_topo_lock);
1396 ml_set_interrupts_enabled(intrs_enabled);
1397 }
2d21ac55 1398
6d2010ae 1399 /* Call locally if mode not SYNC */
0a7de745 1400 if (mode != SYNC && call_self) {
6d2010ae
A
1401 KERNEL_DEBUG_CONSTANT(
1402 TRACE_MP_CPUS_CALL_LOCAL,
4bd07ac2 1403 VM_KERNEL_UNSLIDE(action_func), VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1), 0, 0);
6d2010ae
A
1404 if (action_func != NULL) {
1405 ml_set_interrupts_enabled(FALSE);
1406 action_func(arg0, arg1);
2d21ac55
A
1407 ml_set_interrupts_enabled(intrs_enabled);
1408 }
2d21ac55 1409 }
2d21ac55 1410
6d2010ae 1411 /* For ASYNC, now wait for all signaled cpus to complete their calls */
0a7de745 1412 if (mode == ASYNC) {
fe8ab488 1413 mp_cpus_call_wait(intrs_enabled, cpus_called, &cpus_responded);
0a7de745 1414 }
6d2010ae 1415
39037602
A
1416 /* Safe to allow pre-emption now */
1417 mp_enable_preemption();
1418
6d2010ae 1419out:
0a7de745 1420 if (call_self) {
fe8ab488
A
1421 cpus_called |= cpu_to_cpumask(cpu);
1422 cpus_call_count++;
1423 }
6d2010ae 1424
0a7de745 1425 if (cpus_calledp) {
6d2010ae 1426 *cpus_calledp = cpus_called;
0a7de745 1427 }
6d2010ae
A
1428
1429 KERNEL_DEBUG_CONSTANT(
1430 TRACE_MP_CPUS_CALL | DBG_FUNC_END,
39037602 1431 cpus_call_count, cpus_called, 0, 0, 0);
2d21ac55 1432
fe8ab488 1433 return (cpu_t) cpus_call_count;
2d21ac55
A
1434}
1435
6d2010ae 1436
2d21ac55 1437static void
39037602 1438mp_broadcast_action(__unused void *null)
2d21ac55 1439{
0a7de745
A
1440 /* call action function */
1441 if (mp_bc_action_func != NULL) {
1442 mp_bc_action_func(mp_bc_func_arg);
1443 }
2d21ac55 1444
0a7de745
A
1445 /* if we're the last one through, wake up the instigator */
1446 if (atomic_decl_and_test(&mp_bc_count, 1)) {
1447 thread_wakeup(((event_t)(uintptr_t) &mp_bc_count));
1448 }
2d21ac55
A
1449}
1450
1451/*
1452 * mp_broadcast() runs a given function on all active cpus.
1453 * The caller blocks until the functions has run on all cpus.
0a7de745 1454 * The caller will also block if there is another pending broadcast.
2d21ac55
A
1455 */
1456void
1457mp_broadcast(
0a7de745
A
1458 void (*action_func)(void *),
1459 void *arg)
1460{
1461 if (!smp_initialized) {
1462 if (action_func != NULL) {
1463 action_func(arg);
1464 }
1465 return;
1466 }
1467
1468 /* obtain broadcast lock */
1469 lck_mtx_lock(&mp_bc_lock);
1470
1471 /* set static function pointers */
1472 mp_bc_action_func = action_func;
1473 mp_bc_func_arg = arg;
1474
1475 assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT);
1476
1477 /*
1478 * signal other processors, which will call mp_broadcast_action()
1479 */
1480 mp_bc_count = real_ncpus; /* assume max possible active */
cb323159 1481 mp_bc_ncpus = mp_cpus_call(CPUMASK_ALL, NOSYNC, *mp_broadcast_action, NULL);
0a7de745
A
1482 atomic_decl(&mp_bc_count, real_ncpus - mp_bc_ncpus); /* subtract inactive */
1483
0a7de745
A
1484 /* block for other cpus to have run action_func */
1485 if (mp_bc_ncpus > 1) {
1486 thread_block(THREAD_CONTINUE_NULL);
1487 } else {
1488 clear_wait(current_thread(), THREAD_AWAKENED);
1489 }
1490
1491 /* release lock */
1492 lck_mtx_unlock(&mp_bc_lock);
2d21ac55
A
1493}
1494
fe8ab488
A
1495void
1496mp_cpus_kick(cpumask_t cpus)
1497{
0a7de745
A
1498 cpu_t cpu;
1499 boolean_t intrs_enabled = FALSE;
fe8ab488
A
1500
1501 intrs_enabled = ml_set_interrupts_enabled(FALSE);
1502 mp_safe_spin_lock(&x86_topo_lock);
1503
1504 for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) {
1505 if ((cpu == (cpu_t) cpu_number())
0a7de745
A
1506 || ((cpu_to_cpumask(cpu) & cpus) == 0)
1507 || !cpu_is_running(cpu)) {
1508 continue;
fe8ab488
A
1509 }
1510
1511 lapic_send_ipi(cpu, LAPIC_VECTOR(KICK));
1512 }
1513
1514 simple_unlock(&x86_topo_lock);
1515 ml_set_interrupts_enabled(intrs_enabled);
1516}
1517
2d21ac55
A
1518void
1519i386_activate_cpu(void)
1520{
0a7de745 1521 cpu_data_t *cdp = current_cpu_datap();
2d21ac55
A
1522
1523 assert(!ml_get_interrupts_enabled());
1524
1525 if (!smp_initialized) {
1526 cdp->cpu_running = TRUE;
1527 return;
1528 }
1529
5ba3f43e 1530 mp_safe_spin_lock(&x86_topo_lock);
2d21ac55 1531 cdp->cpu_running = TRUE;
7e4a7d39 1532 started_cpu();
0a7de745 1533 pmap_tlbi_range(0, ~0ULL, true, 0);
2d21ac55
A
1534 simple_unlock(&x86_topo_lock);
1535}
1536
1537void
1538i386_deactivate_cpu(void)
1539{
0a7de745 1540 cpu_data_t *cdp = current_cpu_datap();
2d21ac55
A
1541
1542 assert(!ml_get_interrupts_enabled());
0a7de745 1543
bd504ef0
A
1544 KERNEL_DEBUG_CONSTANT(
1545 TRACE_MP_CPU_DEACTIVATE | DBG_FUNC_START,
1546 0, 0, 0, 0, 0);
2d21ac55 1547
5ba3f43e 1548 mp_safe_spin_lock(&x86_topo_lock);
2d21ac55
A
1549 cdp->cpu_running = FALSE;
1550 simple_unlock(&x86_topo_lock);
1551
bd504ef0
A
1552 /*
1553 * Move all of this cpu's timers to the master/boot cpu,
1554 * and poke it in case there's a sooner deadline for it to schedule.
1555 */
c910b4d9 1556 timer_queue_shutdown(&cdp->rtclock_timer.queue);
39236c6e 1557 mp_cpus_call(cpu_to_cpumask(master_cpu), ASYNC, timer_queue_expire_local, NULL);
c910b4d9 1558
5ba3f43e
A
1559#if MONOTONIC
1560 mt_cpu_down(cdp);
1561#endif /* MONOTONIC */
1562
2d21ac55 1563 /*
bd504ef0
A
1564 * Open an interrupt window
1565 * and ensure any pending IPI or timer is serviced
2d21ac55 1566 */
bd504ef0
A
1567 mp_disable_preemption();
1568 ml_set_interrupts_enabled(TRUE);
1569
0a7de745 1570 while (cdp->cpu_signals && x86_lcpu()->rtcDeadline != EndOfAllTime) {
bd504ef0 1571 cpu_pause();
0a7de745 1572 }
bd504ef0
A
1573 /*
1574 * Ensure there's no remaining timer deadline set
1575 * - AICPM may have left one active.
1576 */
1577 setPop(0);
1578
1579 ml_set_interrupts_enabled(FALSE);
1580 mp_enable_preemption();
1581
1582 KERNEL_DEBUG_CONSTANT(
1583 TRACE_MP_CPU_DEACTIVATE | DBG_FUNC_END,
1584 0, 0, 0, 0, 0);
2d21ac55
A
1585}
1586
0a7de745 1587int pmsafe_debug = 1;
2d21ac55 1588
0a7de745
A
1589#if MACH_KDP
1590volatile boolean_t mp_kdp_trap = FALSE;
1591volatile boolean_t mp_kdp_is_NMI = FALSE;
1592volatile unsigned long mp_kdp_ncpus;
1593boolean_t mp_kdp_state;
91447636 1594
55e303ae
A
1595
1596void
5ba3f43e 1597mp_kdp_enter(boolean_t proceed_on_failure)
55e303ae 1598{
0a7de745
A
1599 unsigned int cpu;
1600 unsigned int ncpus = 0;
1601 unsigned int my_cpu;
1602 uint64_t tsc_timeout;
55e303ae
A
1603
1604 DBG("mp_kdp_enter()\n");
1605
1606 /*
1607 * Here to enter the debugger.
1608 * In case of races, only one cpu is allowed to enter kdp after
1609 * stopping others.
1610 */
91447636 1611 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
060df5ea 1612 my_cpu = cpu_number();
7ddcb079
A
1613
1614 if (my_cpu == (unsigned) debugger_cpu) {
1615 kprintf("\n\nRECURSIVE DEBUGGER ENTRY DETECTED\n\n");
1616 kdp_reset();
1617 return;
1618 }
1619
5ba3f43e
A
1620 uint64_t start_time = cpu_datap(my_cpu)->debugger_entry_time = mach_absolute_time();
1621 int locked = 0;
1622 while (!locked || mp_kdp_trap) {
1623 if (locked) {
1624 simple_unlock(&x86_topo_lock);
1625 }
1626 if (proceed_on_failure) {
1627 if (mach_absolute_time() - start_time > 500000000ll) {
d9a64523 1628 paniclog_append_noflush("mp_kdp_enter() can't get x86_topo_lock! Debugging anyway! #YOLO\n");
5ba3f43e
A
1629 break;
1630 }
0a7de745 1631 locked = simple_lock_try(&x86_topo_lock, LCK_GRP_NULL);
5ba3f43e
A
1632 if (!locked) {
1633 cpu_pause();
1634 }
1635 } else {
1636 mp_safe_spin_lock(&x86_topo_lock);
1637 locked = TRUE;
1638 }
2d21ac55 1639
5ba3f43e
A
1640 if (locked && mp_kdp_trap) {
1641 simple_unlock(&x86_topo_lock);
1642 DBG("mp_kdp_enter() race lost\n");
b0d623f7 1643#if MACH_KDP
5ba3f43e 1644 mp_kdp_wait(TRUE, FALSE);
b0d623f7 1645#endif
5ba3f43e
A
1646 locked = FALSE;
1647 }
55e303ae 1648 }
5ba3f43e 1649
0a7de745 1650 if (pmsafe_debug && !kdp_snapshot) {
5ba3f43e 1651 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
0a7de745 1652 }
5ba3f43e 1653
593a1d5f 1654 debugger_cpu = my_cpu;
060df5ea 1655 ncpus = 1;
5ba3f43e 1656 atomic_incl((volatile long *)&mp_kdp_ncpus, 1);
55e303ae 1657 mp_kdp_trap = TRUE;
060df5ea 1658 debugger_entry_time = cpu_datap(my_cpu)->debugger_entry_time;
55e303ae 1659
0c530ab8
A
1660 /*
1661 * Deliver a nudge to other cpus, counting how many
1662 */
55e303ae 1663 DBG("mp_kdp_enter() signaling other processors\n");
2d21ac55 1664 if (force_immediate_debugger_NMI == FALSE) {
060df5ea 1665 for (cpu = 0; cpu < real_ncpus; cpu++) {
0a7de745 1666 if (cpu == my_cpu || !cpu_is_running(cpu)) {
2d21ac55 1667 continue;
0a7de745 1668 }
2d21ac55
A
1669 ncpus++;
1670 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1671 }
1672 /*
1673 * Wait other processors to synchronize
1674 */
1675 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
0c530ab8 1676
2d21ac55
A
1677 /*
1678 * This timeout is rather arbitrary; we don't want to NMI
1679 * processors that are executing at potentially
1680 * "unsafe-to-interrupt" points such as the trampolines,
1681 * but neither do we want to lose state by waiting too long.
1682 */
39037602 1683 tsc_timeout = rdtsc64() + (LockTimeOutTSC);
0c530ab8 1684
2d21ac55
A
1685 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1686 /*
1687 * A TLB shootdown request may be pending--this would
1688 * result in the requesting processor waiting in
1689 * PMAP_UPDATE_TLBS() until this processor deals with it.
1690 * Process it, so it can now enter mp_kdp_wait()
1691 */
1692 handle_pending_TLB_flushes();
1693 cpu_pause();
1694 }
1695 /* If we've timed out, and some processor(s) are still unresponsive,
5c9f4661
A
1696 * interrupt them with an NMI via the local APIC, iff a panic is
1697 * in progress.
0c530ab8 1698 */
5c9f4661
A
1699 if (panic_active()) {
1700 NMIPI_enable(TRUE);
1701 }
2d21ac55 1702 if (mp_kdp_ncpus != ncpus) {
d9a64523 1703 unsigned int wait_cycles = 0;
0a7de745 1704 if (proceed_on_failure) {
d9a64523 1705 paniclog_append_noflush("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu);
0a7de745 1706 } else {
d9a64523 1707 DBG("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu);
0a7de745 1708 }
2d21ac55 1709 for (cpu = 0; cpu < real_ncpus; cpu++) {
0a7de745 1710 if (cpu == my_cpu || !cpu_is_running(cpu)) {
2d21ac55 1711 continue;
0a7de745 1712 }
a39ff7e2 1713 if (cpu_signal_pending(cpu, MP_KDP)) {
d9a64523 1714 cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE;
2d21ac55 1715 cpu_NMI_interrupt(cpu);
a39ff7e2 1716 }
2d21ac55 1717 }
39037602
A
1718 /* Wait again for the same timeout */
1719 tsc_timeout = rdtsc64() + (LockTimeOutTSC);
1720 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1721 handle_pending_TLB_flushes();
1722 cpu_pause();
d9a64523 1723 ++wait_cycles;
39037602
A
1724 }
1725 if (mp_kdp_ncpus != ncpus) {
d9a64523
A
1726 paniclog_append_noflush("mp_kdp_enter() NMI pending on cpus:");
1727 for (cpu = 0; cpu < real_ncpus; cpu++) {
0a7de745 1728 if (cpu_is_running(cpu) && !cpu_datap(cpu)->cpu_NMI_acknowledged) {
d9a64523 1729 paniclog_append_noflush(" %d", cpu);
0a7de745 1730 }
d9a64523
A
1731 }
1732 paniclog_append_noflush("\n");
1733 if (proceed_on_failure) {
1734 paniclog_append_noflush("mp_kdp_enter() timed-out during %s wait after NMI;"
1735 "expected %u acks but received %lu after %u loops in %llu ticks\n",
0a7de745 1736 (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC);
d9a64523
A
1737 } else {
1738 panic("mp_kdp_enter() timed-out during %s wait after NMI;"
1739 "expected %u acks but received %lu after %u loops in %llu ticks",
0a7de745 1740 (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC);
d9a64523 1741 }
39037602 1742 }
2d21ac55 1743 }
0a7de745 1744 } else {
0c530ab8 1745 for (cpu = 0; cpu < real_ncpus; cpu++) {
0a7de745 1746 if (cpu == my_cpu || !cpu_is_running(cpu)) {
0c530ab8 1747 continue;
0a7de745 1748 }
2d21ac55 1749 cpu_NMI_interrupt(cpu);
0c530ab8 1750 }
0a7de745 1751 }
0c530ab8 1752
5ba3f43e
A
1753 if (locked) {
1754 simple_unlock(&x86_topo_lock);
1755 }
1756
bd504ef0 1757 DBG("mp_kdp_enter() %d processors done %s\n",
6d2010ae 1758 (int)mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
0a7de745 1759
91447636 1760 postcode(MP_KDP_ENTER);
55e303ae
A
1761}
1762
d9a64523
A
1763boolean_t
1764mp_kdp_all_cpus_halted()
1765{
1766 unsigned int ncpus = 0, cpu = 0, my_cpu = 0;
1767
1768 my_cpu = cpu_number();
1769 ncpus = 1; /* current CPU */
1770 for (cpu = 0; cpu < real_ncpus; cpu++) {
0a7de745 1771 if (cpu == my_cpu || !cpu_is_running(cpu)) {
d9a64523 1772 continue;
0a7de745 1773 }
d9a64523
A
1774 ncpus++;
1775 }
1776
0a7de745 1777 return mp_kdp_ncpus == ncpus;
d9a64523
A
1778}
1779
0c530ab8
A
1780static boolean_t
1781cpu_signal_pending(int cpu, mp_event_t event)
1782{
0a7de745 1783 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
0c530ab8
A
1784 boolean_t retval = FALSE;
1785
0a7de745 1786 if (i_bit(event, signals)) {
0c530ab8 1787 retval = TRUE;
0a7de745 1788 }
0c530ab8
A
1789 return retval;
1790}
b0d623f7 1791
0a7de745
A
1792long
1793kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func,
1794 void *arg0, void *arg1)
b0d623f7 1795{
0a7de745 1796 if (lcpu > (real_ncpus - 1)) {
b0d623f7 1797 return -1;
0a7de745 1798 }
b0d623f7 1799
0a7de745 1800 if (func == NULL) {
b0d623f7 1801 return -1;
0a7de745 1802 }
b0d623f7
A
1803
1804 kdp_xcpu_call_func.func = func;
0a7de745 1805 kdp_xcpu_call_func.ret = -1;
b0d623f7
A
1806 kdp_xcpu_call_func.arg0 = arg0;
1807 kdp_xcpu_call_func.arg1 = arg1;
1808 kdp_xcpu_call_func.cpu = lcpu;
1809 DBG("Invoking function %p on CPU %d\n", func, (int32_t)lcpu);
0a7de745 1810 while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE) {
b0d623f7 1811 cpu_pause();
0a7de745
A
1812 }
1813 return kdp_xcpu_call_func.ret;
b0d623f7
A
1814}
1815
1816static void
1817kdp_x86_xcpu_poll(void)
1818{
1819 if ((uint16_t)cpu_number() == kdp_xcpu_call_func.cpu) {
0a7de745 1820 kdp_xcpu_call_func.ret =
b0d623f7 1821 kdp_xcpu_call_func.func(kdp_xcpu_call_func.arg0,
0a7de745
A
1822 kdp_xcpu_call_func.arg1,
1823 cpu_number());
b0d623f7
A
1824 kdp_xcpu_call_func.cpu = KDP_XCPU_NONE;
1825 }
1826}
0c530ab8 1827
55e303ae 1828static void
b0d623f7 1829mp_kdp_wait(boolean_t flush, boolean_t isNMI)
55e303ae 1830{
6601e61a 1831 DBG("mp_kdp_wait()\n");
813fb2f6 1832
bd504ef0 1833 current_cpu_datap()->debugger_ipi_time = mach_absolute_time();
b0d623f7 1834#if CONFIG_MCA
2d21ac55
A
1835 /* If we've trapped due to a machine-check, save MCA registers */
1836 mca_check_save();
b0d623f7 1837#endif
2d21ac55 1838
2d21ac55 1839 atomic_incl((volatile long *)&mp_kdp_ncpus, 1);
b0d623f7 1840 while (mp_kdp_trap || (isNMI == TRUE)) {
0a7de745 1841 /*
2d21ac55
A
1842 * A TLB shootdown request may be pending--this would result
1843 * in the requesting processor waiting in PMAP_UPDATE_TLBS()
1844 * until this processor handles it.
0c530ab8
A
1845 * Process it, so it can now enter mp_kdp_wait()
1846 */
0a7de745 1847 if (flush) {
2d21ac55 1848 handle_pending_TLB_flushes();
0a7de745 1849 }
b0d623f7
A
1850
1851 kdp_x86_xcpu_poll();
55e303ae
A
1852 cpu_pause();
1853 }
2d21ac55 1854
0c530ab8 1855 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
55e303ae
A
1856 DBG("mp_kdp_wait() done\n");
1857}
1858
1859void
1860mp_kdp_exit(void)
1861{
1862 DBG("mp_kdp_exit()\n");
593a1d5f 1863 debugger_cpu = -1;
0c530ab8 1864 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
b0d623f7
A
1865
1866 debugger_exit_time = mach_absolute_time();
1867
55e303ae 1868 mp_kdp_trap = FALSE;
39236c6e 1869 mfence();
55e303ae
A
1870
1871 /* Wait other processors to stop spinning. XXX needs timeout */
1872 DBG("mp_kdp_exit() waiting for processors to resume\n");
0c530ab8 1873 while (mp_kdp_ncpus > 0) {
0a7de745 1874 /*
0c530ab8
A
1875 * a TLB shootdown request may be pending... this would result in the requesting
1876 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1877 * Process it, so it can now enter mp_kdp_wait()
1878 */
0a7de745 1879 handle_pending_TLB_flushes();
0c530ab8 1880
55e303ae
A
1881 cpu_pause();
1882 }
2d21ac55 1883
0a7de745
A
1884 if (pmsafe_debug && !kdp_snapshot) {
1885 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
1886 }
2d21ac55 1887
6d2010ae
A
1888 debugger_exit_time = mach_absolute_time();
1889
55e303ae 1890 DBG("mp_kdp_exit() done\n");
91447636 1891 (void) ml_set_interrupts_enabled(mp_kdp_state);
5ba3f43e 1892 postcode(MP_KDP_EXIT);
39037602
A
1893}
1894
0a7de745 1895#endif /* MACH_KDP */
55e303ae 1896
b0d623f7 1897boolean_t
0a7de745
A
1898mp_recent_debugger_activity(void)
1899{
060df5ea 1900 uint64_t abstime = mach_absolute_time();
0a7de745
A
1901 return ((abstime - debugger_entry_time) < LastDebuggerEntryAllowance) ||
1902 ((abstime - debugger_exit_time) < LastDebuggerEntryAllowance);
b0d623f7
A
1903}
1904
55e303ae
A
1905/*ARGSUSED*/
1906void
1907init_ast_check(
0a7de745 1908 __unused processor_t processor)
55e303ae
A
1909{
1910}
1911
1912void
1913cause_ast_check(
0a7de745 1914 processor_t processor)
55e303ae 1915{
0a7de745 1916 int cpu = processor->cpu_id;
55e303ae
A
1917
1918 if (cpu != cpu_number()) {
1919 i386_signal_cpu(cpu, MP_AST, ASYNC);
6d2010ae 1920 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), cpu, 1, 0, 0, 0);
55e303ae
A
1921 }
1922}
1923
593a1d5f
A
1924void
1925slave_machine_init(void *param)
91447636
A
1926{
1927 /*
0a7de745 1928 * Here in process context, but with interrupts disabled.
91447636
A
1929 */
1930 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1931
593a1d5f
A
1932 if (param == FULL_SLAVE_INIT) {
1933 /*
1934 * Cold start
1935 */
1936 clock_init();
593a1d5f 1937 }
0a7de745 1938 cpu_machine_init(); /* Interrupts enabled hereafter */
55e303ae
A
1939}
1940
b0d623f7 1941#undef cpu_number
0a7de745
A
1942int
1943cpu_number(void)
55e303ae
A
1944{
1945 return get_cpu_number();
1946}
1947
6d2010ae
A
1948static void
1949cpu_prewarm_init()
1950{
1951 int i;
1952
1953 simple_lock_init(&cpu_warm_lock, 0);
1954 queue_init(&cpu_warm_call_list);
1955 for (i = 0; i < NUM_CPU_WARM_CALLS; i++) {
1956 enqueue_head(&cpu_warm_call_list, (queue_entry_t)&cpu_warm_call_arr[i]);
1957 }
1958}
1959
1960static timer_call_t
1961grab_warm_timer_call()
1962{
1963 spl_t x;
1964 timer_call_t call = NULL;
1965
1966 x = splsched();
0a7de745 1967 simple_lock(&cpu_warm_lock, LCK_GRP_NULL);
6d2010ae
A
1968 if (!queue_empty(&cpu_warm_call_list)) {
1969 call = (timer_call_t) dequeue_head(&cpu_warm_call_list);
1970 }
1971 simple_unlock(&cpu_warm_lock);
1972 splx(x);
1973
1974 return call;
1975}
1976
1977static void
1978free_warm_timer_call(timer_call_t call)
1979{
1980 spl_t x;
1981
1982 x = splsched();
0a7de745 1983 simple_lock(&cpu_warm_lock, LCK_GRP_NULL);
6d2010ae
A
1984 enqueue_head(&cpu_warm_call_list, (queue_entry_t)call);
1985 simple_unlock(&cpu_warm_lock);
1986 splx(x);
1987}
1988
1989/*
1990 * Runs in timer call context (interrupts disabled).
1991 */
1992static void
1993cpu_warm_timer_call_func(
0a7de745
A
1994 call_entry_param_t p0,
1995 __unused call_entry_param_t p1)
6d2010ae
A
1996{
1997 free_warm_timer_call((timer_call_t)p0);
1998 return;
1999}
2000
2001/*
2002 * Runs with interrupts disabled on the CPU we wish to warm (i.e. CPU 0).
2003 */
2004static void
2005_cpu_warm_setup(
0a7de745 2006 void *arg)
6d2010ae
A
2007{
2008 cpu_warm_data_t cwdp = (cpu_warm_data_t)arg;
2009
39236c6e 2010 timer_call_enter(cwdp->cwd_call, cwdp->cwd_deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
6d2010ae
A
2011 cwdp->cwd_result = 0;
2012
2013 return;
2014}
2015
2016/*
2017 * Not safe to call with interrupts disabled.
2018 */
2019kern_return_t
2020ml_interrupt_prewarm(
0a7de745 2021 uint64_t deadline)
6d2010ae
A
2022{
2023 struct cpu_warm_data cwd;
2024 timer_call_t call;
2025 cpu_t ct;
2026
2027 if (ml_get_interrupts_enabled() == FALSE) {
2028 panic("%s: Interrupts disabled?\n", __FUNCTION__);
2029 }
2030
0a7de745
A
2031 /*
2032 * If the platform doesn't need our help, say that we succeeded.
6d2010ae
A
2033 */
2034 if (!ml_get_interrupt_prewake_applicable()) {
2035 return KERN_SUCCESS;
2036 }
2037
2038 /*
2039 * Grab a timer call to use.
2040 */
2041 call = grab_warm_timer_call();
2042 if (call == NULL) {
2043 return KERN_RESOURCE_SHORTAGE;
2044 }
2045
2046 timer_call_setup(call, cpu_warm_timer_call_func, call);
2047 cwd.cwd_call = call;
2048 cwd.cwd_deadline = deadline;
2049 cwd.cwd_result = 0;
2050
2051 /*
2052 * For now, non-local interrupts happen on the master processor.
2053 */
2054 ct = mp_cpus_call(cpu_to_cpumask(master_cpu), SYNC, _cpu_warm_setup, &cwd);
2055 if (ct == 0) {
2056 free_warm_timer_call(call);
2057 return KERN_FAILURE;
2058 } else {
2059 return cwd.cwd_result;
2060 }
2061}
39037602
A
2062
2063#if DEBUG || DEVELOPMENT
2064void
2065kernel_spin(uint64_t spin_ns)
2066{
0a7de745
A
2067 boolean_t istate;
2068 uint64_t spin_abs;
2069 uint64_t deadline;
2070 cpu_data_t *cdp;
39037602
A
2071
2072 kprintf("kernel_spin(%llu) spinning uninterruptibly\n", spin_ns);
2073 istate = ml_set_interrupts_enabled(FALSE);
5ba3f43e 2074 cdp = current_cpu_datap();
39037602 2075 nanoseconds_to_absolutetime(spin_ns, &spin_abs);
5ba3f43e
A
2076
2077 /* Fake interrupt handler entry for testing mp_interrupt_watchdog() */
2078 cdp->cpu_int_event_time = mach_absolute_time();
2079 cdp->cpu_int_state = (void *) USER_STATE(current_thread());
2080
39037602 2081 deadline = mach_absolute_time() + spin_ns;
0a7de745 2082 while (mach_absolute_time() < deadline) {
39037602 2083 cpu_pause();
0a7de745 2084 }
5ba3f43e
A
2085
2086 cdp->cpu_int_event_time = 0;
2087 cdp->cpu_int_state = NULL;
2088
39037602
A
2089 ml_set_interrupts_enabled(istate);
2090 kprintf("kernel_spin() continuing\n");
2091}
5ba3f43e
A
2092
2093/*
2094 * Called from the scheduler's maintenance thread,
2095 * scan running processors for long-running ISRs and:
2096 * - panic if longer than LockTimeOut, or
2097 * - log if more than a quantum.
2098 */
2099void
2100mp_interrupt_watchdog(void)
2101{
0a7de745
A
2102 cpu_t cpu;
2103 boolean_t intrs_enabled = FALSE;
2104 uint16_t cpu_int_num;
2105 uint64_t cpu_int_event_time;
2106 uint64_t cpu_rip;
2107 uint64_t cpu_int_duration;
2108 uint64_t now;
2109 x86_saved_state_t *cpu_int_state;
2110
2111 if (__improbable(!mp_interrupt_watchdog_enabled)) {
5ba3f43e 2112 return;
0a7de745 2113 }
5ba3f43e
A
2114
2115 intrs_enabled = ml_set_interrupts_enabled(FALSE);
2116 now = mach_absolute_time();
2117 /*
2118 * While timeouts are not suspended,
2119 * check all other processors for long outstanding interrupt handling.
2120 */
2121 for (cpu = 0;
0a7de745
A
2122 cpu < (cpu_t) real_ncpus && !machine_timeout_suspended();
2123 cpu++) {
5ba3f43e 2124 if ((cpu == (cpu_t) cpu_number()) ||
0a7de745 2125 (!cpu_is_running(cpu))) {
5ba3f43e 2126 continue;
0a7de745 2127 }
5ba3f43e 2128 cpu_int_event_time = cpu_datap(cpu)->cpu_int_event_time;
0a7de745 2129 if (cpu_int_event_time == 0) {
5ba3f43e 2130 continue;
0a7de745
A
2131 }
2132 if (__improbable(now < cpu_int_event_time)) {
2133 continue; /* skip due to inter-processor skew */
2134 }
5ba3f43e 2135 cpu_int_state = cpu_datap(cpu)->cpu_int_state;
0a7de745 2136 if (__improbable(cpu_int_state == NULL)) {
5ba3f43e
A
2137 /* The interrupt may have been dismissed */
2138 continue;
0a7de745 2139 }
5ba3f43e
A
2140
2141 /* Here with a cpu handling an interrupt */
2142
2143 cpu_int_duration = now - cpu_int_event_time;
2144 if (__improbable(cpu_int_duration > LockTimeOut)) {
2145 cpu_int_num = saved_state64(cpu_int_state)->isf.trapno;
2146 cpu_rip = saved_state64(cpu_int_state)->isf.rip;
2147 vector_timed_out = cpu_int_num;
2148 NMIPI_panic(cpu_to_cpumask(cpu), INTERRUPT_WATCHDOG);
2149 panic("Interrupt watchdog, "
0a7de745
A
2150 "cpu: %d interrupt: 0x%x time: %llu..%llu state: %p RIP: 0x%llx",
2151 cpu, cpu_int_num, cpu_int_event_time, now, cpu_int_state, cpu_rip);
5ba3f43e
A
2152 /* NOT REACHED */
2153 } else if (__improbable(cpu_int_duration > (uint64_t) std_quantum)) {
2154 mp_interrupt_watchdog_events++;
2155 cpu_int_num = saved_state64(cpu_int_state)->isf.trapno;
2156 cpu_rip = saved_state64(cpu_int_state)->isf.rip;
2157 ml_set_interrupts_enabled(intrs_enabled);
2158 printf("Interrupt watchdog, "
0a7de745
A
2159 "cpu: %d interrupt: 0x%x time: %llu..%llu RIP: 0x%llx\n",
2160 cpu, cpu_int_num, cpu_int_event_time, now, cpu_rip);
5ba3f43e
A
2161 return;
2162 }
2163 }
2164
2165 ml_set_interrupts_enabled(intrs_enabled);
2166}
39037602 2167#endif