]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
CommitLineData
55e303ae 1/*
55e303ae 2 *
2d21ac55 3 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
55e303ae 4 *
2d21ac55
A
5 * This file contains Original Code and/or Modifications of Original Code
6 * as defined in and that are subject to the Apple Public Source License
7 * Version 2.0 (the 'License'). You may not use this file except in
8 * compliance with the License. The rights granted to you under the License
9 * may not be used to create, or enable the creation or redistribution of,
10 * unlawful or unlicensed copies of an Apple operating system, or to
11 * circumvent, violate, or enable the circumvention or violation of, any
12 * terms of an Apple operating system software license agreement.
8f6c56a5 13 *
2d21ac55
A
14 * Please obtain a copy of the License at
15 * http://www.opensource.apple.com/apsl/ and read it before using this file.
16 *
17 * The Original Code and all software distributed under the License are
18 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
19 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
21 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 * Please see the License for the specific language governing rights and
23 * limitations under the License.
8f6c56a5 24 *
2d21ac55 25 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
26 */
27/*
28 * @OSF_COPYRIGHT@
29 */
30
55e303ae
A
31#include <mach_rt.h>
32#include <mach_kdb.h>
33#include <mach_kdp.h>
34#include <mach_ldebug.h>
91447636
A
35#include <gprof.h>
36
37#include <mach/mach_types.h>
38#include <mach/kern_return.h>
39
40#include <kern/kern_types.h>
41#include <kern/startup.h>
c910b4d9 42#include <kern/timer_queue.h>
91447636
A
43#include <kern/processor.h>
44#include <kern/cpu_number.h>
45#include <kern/cpu_data.h>
46#include <kern/assert.h>
47#include <kern/machine.h>
0c530ab8 48#include <kern/pms.h>
593a1d5f 49#include <kern/misc_protos.h>
7e4a7d39 50#include <kern/etimer.h>
6d2010ae
A
51#include <kern/kalloc.h>
52#include <kern/queue.h>
91447636
A
53
54#include <vm/vm_map.h>
55#include <vm/vm_kern.h>
56
57#include <profiling/profile-mk.h>
55e303ae 58
b0d623f7
A
59#include <i386/proc_reg.h>
60#include <i386/cpu_threads.h>
61#include <i386/mp_desc.h>
62#include <i386/misc_protos.h>
63#include <i386/trap.h>
64#include <i386/postcode.h>
65#include <i386/machine_routines.h>
55e303ae
A
66#include <i386/mp.h>
67#include <i386/mp_events.h>
593a1d5f 68#include <i386/lapic.h>
55e303ae 69#include <i386/cpuid.h>
b0d623f7 70#include <i386/fpu.h>
55e303ae 71#include <i386/machine_cpu.h>
0c530ab8 72#include <i386/pmCPU.h>
b0d623f7 73#if CONFIG_MCA
2d21ac55 74#include <i386/machine_check.h>
b0d623f7
A
75#endif
76#include <i386/acpi.h>
0c530ab8
A
77
78#include <chud/chud_xnu.h>
79#include <chud/chud_xnu_private.h>
80
81#include <sys/kdebug.h>
82#if MACH_KDB
b0d623f7 83#include <machine/db_machdep.h>
0c530ab8
A
84#include <ddb/db_aout.h>
85#include <ddb/db_access.h>
86#include <ddb/db_sym.h>
87#include <ddb/db_variables.h>
88#include <ddb/db_command.h>
89#include <ddb/db_output.h>
90#include <ddb/db_expr.h>
91#endif
55e303ae
A
92
93#if MP_DEBUG
94#define PAUSE delay(1000000)
95#define DBG(x...) kprintf(x)
96#else
97#define DBG(x...)
98#define PAUSE
99#endif /* MP_DEBUG */
100
6d2010ae
A
101/* Debugging/test trace events: */
102#define TRACE_MP_TLB_FLUSH MACHDBG_CODE(DBG_MACH_MP, 0)
103#define TRACE_MP_CPUS_CALL MACHDBG_CODE(DBG_MACH_MP, 1)
104#define TRACE_MP_CPUS_CALL_LOCAL MACHDBG_CODE(DBG_MACH_MP, 2)
105#define TRACE_MP_CPUS_CALL_ACTION MACHDBG_CODE(DBG_MACH_MP, 3)
106#define TRACE_MP_CPUS_CALL_NOBUF MACHDBG_CODE(DBG_MACH_MP, 4)
55e303ae 107
7e4a7d39
A
108#define ABS(v) (((v) > 0)?(v):-(v))
109
55e303ae 110void slave_boot_init(void);
6d2010ae 111void i386_cpu_IPI(int cpu);
55e303ae 112
0c530ab8
A
113#if MACH_KDB
114static void mp_kdb_wait(void);
115volatile boolean_t mp_kdb_trap = FALSE;
116volatile long mp_kdb_ncpus = 0;
117#endif
118
b0d623f7 119static void mp_kdp_wait(boolean_t flush, boolean_t isNMI);
55e303ae 120static void mp_rendezvous_action(void);
2d21ac55 121static void mp_broadcast_action(void);
55e303ae 122
0c530ab8 123static boolean_t cpu_signal_pending(int cpu, mp_event_t event);
593a1d5f 124static int NMIInterruptHandler(x86_saved_state_t *regs);
0c530ab8 125
b0d623f7 126boolean_t smp_initialized = FALSE;
7e4a7d39 127uint32_t TSC_sync_margin = 0xFFF;
935ed37a
A
128volatile boolean_t force_immediate_debugger_NMI = FALSE;
129volatile boolean_t pmap_tlb_flush_timeout = FALSE;
55e303ae 130decl_simple_lock_data(,mp_kdp_lock);
91447636 131
b0d623f7
A
132decl_lck_mtx_data(static, mp_cpu_boot_lock);
133lck_mtx_ext_t mp_cpu_boot_lock_ext;
55e303ae
A
134
135/* Variables needed for MP rendezvous. */
0c530ab8 136decl_simple_lock_data(,mp_rv_lock);
b0d623f7
A
137static void (*mp_rv_setup_func)(void *arg);
138static void (*mp_rv_action_func)(void *arg);
139static void (*mp_rv_teardown_func)(void *arg);
140static void *mp_rv_func_arg;
141static volatile int mp_rv_ncpus;
0c530ab8
A
142 /* Cache-aligned barriers: */
143static volatile long mp_rv_entry __attribute__((aligned(64)));
144static volatile long mp_rv_exit __attribute__((aligned(64)));
145static volatile long mp_rv_complete __attribute__((aligned(64)));
55e303ae 146
b0d623f7
A
147volatile uint64_t debugger_entry_time;
148volatile uint64_t debugger_exit_time;
149#if MACH_KDP
150
d41d1dae 151extern int kdp_snapshot;
b0d623f7
A
152static struct _kdp_xcpu_call_func {
153 kdp_x86_xcpu_func_t func;
154 void *arg0, *arg1;
155 volatile long ret;
156 volatile uint16_t cpu;
157} kdp_xcpu_call_func = {
158 .cpu = KDP_XCPU_NONE
159};
160
161#endif
162
2d21ac55
A
163/* Variables needed for MP broadcast. */
164static void (*mp_bc_action_func)(void *arg);
165static void *mp_bc_func_arg;
593a1d5f 166static int mp_bc_ncpus;
2d21ac55 167static volatile long mp_bc_count;
b0d623f7
A
168decl_lck_mtx_data(static, mp_bc_lock);
169lck_mtx_ext_t mp_bc_lock_ext;
593a1d5f 170static volatile int debugger_cpu = -1;
060df5ea 171volatile long NMIPI_acks = 0;
2d21ac55 172
6d2010ae
A
173static void mp_cpus_call_init(void);
174static void mp_cpus_call_cpu_init(void);
2d21ac55 175static void mp_cpus_call_action(void);
c910b4d9 176static void mp_call_PM(void);
2d21ac55 177
b0d623f7
A
178char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init
179
6d2010ae
A
180/* PAL-related routines */
181boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler,
182 int ipi_vector, i386_intr_func_t ipi_handler);
183void i386_start_cpu(int lapic_id, int cpu_num);
184void i386_send_NMI(int cpu);
b0d623f7 185
91447636
A
186#if GPROF
187/*
188 * Initialize dummy structs for profiling. These aren't used but
189 * allows hertz_tick() to be built with GPROF defined.
190 */
191struct profile_vars _profile_vars;
192struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
193#define GPROF_INIT() \
194{ \
195 int i; \
196 \
197 /* Hack to initialize pointers to unused profiling structs */ \
198 for (i = 1; i < MAX_CPUS; i++) \
199 _profile_vars_cpus[i] = &_profile_vars; \
200}
201#else
202#define GPROF_INIT()
203#endif /* GPROF */
204
b0d623f7
A
205static lck_grp_t smp_lck_grp;
206static lck_grp_attr_t smp_lck_grp_attr;
207
6d2010ae
A
208#define NUM_CPU_WARM_CALLS 20
209struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS];
210queue_head_t cpu_warm_call_list;
211decl_simple_lock_data(static, cpu_warm_lock);
212
213typedef struct cpu_warm_data {
214 timer_call_t cwd_call;
215 uint64_t cwd_deadline;
216 int cwd_result;
217} *cpu_warm_data_t;
218
219static void cpu_prewarm_init(void);
220static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1);
221static void _cpu_warm_setup(void *arg);
222static timer_call_t grab_warm_timer_call(void);
223static void free_warm_timer_call(timer_call_t call);
b0d623f7 224
55e303ae
A
225void
226smp_init(void)
55e303ae 227{
91447636
A
228 simple_lock_init(&mp_kdp_lock, 0);
229 simple_lock_init(&mp_rv_lock, 0);
b0d623f7
A
230 lck_grp_attr_setdefault(&smp_lck_grp_attr);
231 lck_grp_init(&smp_lck_grp, "i386_smp", &smp_lck_grp_attr);
232 lck_mtx_init_ext(&mp_cpu_boot_lock, &mp_cpu_boot_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
233 lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
91447636 234 console_init();
55e303ae 235
6d2010ae
A
236 if(!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler,
237 LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler))
55e303ae
A
238 return;
239
91447636
A
240 cpu_thread_init();
241
91447636
A
242 GPROF_INIT();
243 DBGLOG_CPU_INIT(master_cpu);
244
6d2010ae
A
245 mp_cpus_call_init();
246 mp_cpus_call_cpu_init();
55e303ae 247
7e4a7d39
A
248 if (PE_parse_boot_argn("TSC_sync_margin",
249 &TSC_sync_margin, sizeof(TSC_sync_margin)))
250 kprintf("TSC sync Margin 0x%x\n", TSC_sync_margin);
55e303ae
A
251 smp_initialized = TRUE;
252
6d2010ae
A
253 cpu_prewarm_init();
254
55e303ae
A
255 return;
256}
257
7e4a7d39
A
258typedef struct {
259 int target_cpu;
260 int target_lapic;
261 int starter_cpu;
262} processor_start_info_t;
263static processor_start_info_t start_info __attribute__((aligned(64)));
264
265/*
266 * Cache-alignment is to avoid cross-cpu false-sharing interference.
267 */
268static volatile long tsc_entry_barrier __attribute__((aligned(64)));
269static volatile long tsc_exit_barrier __attribute__((aligned(64)));
270static volatile uint64_t tsc_target __attribute__((aligned(64)));
271
0c530ab8 272/*
593a1d5f 273 * Poll a CPU to see when it has marked itself as running.
0c530ab8 274 */
593a1d5f
A
275static void
276mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay)
91447636 277{
7e4a7d39 278 while (iters-- > 0) {
593a1d5f 279 if (cpu_datap(slot_num)->cpu_running)
7e4a7d39 280 break;
593a1d5f 281 delay(usecdelay);
91447636 282 }
55e303ae
A
283}
284
b0d623f7
A
285/*
286 * Quickly bring a CPU back online which has been halted.
287 */
288kern_return_t
289intel_startCPU_fast(int slot_num)
290{
7e4a7d39 291 kern_return_t rc;
b0d623f7
A
292
293 /*
294 * Try to perform a fast restart
295 */
296 rc = pmCPUExitHalt(slot_num);
297 if (rc != KERN_SUCCESS)
298 /*
299 * The CPU was not eligible for a fast restart.
300 */
301 return(rc);
302
303 /*
304 * Wait until the CPU is back online.
305 */
306 mp_disable_preemption();
307
308 /*
309 * We use short pauses (1us) for low latency. 30,000 iterations is
310 * longer than a full restart would require so it should be more
311 * than long enough.
312 */
6d2010ae 313
b0d623f7
A
314 mp_wait_for_cpu_up(slot_num, 30000, 1);
315 mp_enable_preemption();
316
317 /*
318 * Check to make sure that the CPU is really running. If not,
319 * go through the slow path.
320 */
321 if (cpu_datap(slot_num)->cpu_running)
322 return(KERN_SUCCESS);
7e4a7d39 323 else
b0d623f7
A
324 return(KERN_FAILURE);
325}
326
7e4a7d39
A
327static void
328started_cpu(void)
329{
330 /* Here on the started cpu with cpu_running set TRUE */
c910b4d9 331
7e4a7d39
A
332 if (TSC_sync_margin &&
333 start_info.target_cpu == cpu_number()) {
334 /*
335 * I've just started-up, synchronize again with the starter cpu
336 * and then snap my TSC.
337 */
338 tsc_target = 0;
339 atomic_decl(&tsc_entry_barrier, 1);
340 while (tsc_entry_barrier != 0)
341 ; /* spin for starter and target at barrier */
342 tsc_target = rdtsc64();
343 atomic_decl(&tsc_exit_barrier, 1);
344 }
345}
c910b4d9
A
346
347static void
348start_cpu(void *arg)
349{
350 int i = 1000;
351 processor_start_info_t *psip = (processor_start_info_t *) arg;
352
353 /* Ignore this if the current processor is not the starter */
354 if (cpu_number() != psip->starter_cpu)
355 return;
356
6d2010ae 357 i386_start_cpu(psip->target_lapic, psip->target_cpu);
c910b4d9
A
358
359#ifdef POSTCODE_DELAY
360 /* Wait much longer if postcodes are displayed for a delay period. */
361 i *= 10000;
362#endif
363 mp_wait_for_cpu_up(psip->target_cpu, i*100, 100);
7e4a7d39
A
364 if (TSC_sync_margin &&
365 cpu_datap(psip->target_cpu)->cpu_running) {
366 /*
367 * Compare the TSC from the started processor with ours.
368 * Report and log/panic if it diverges by more than
369 * TSC_sync_margin (TSC_SYNC_MARGIN) ticks. This margin
370 * can be overriden by boot-arg (with 0 meaning no checking).
371 */
372 uint64_t tsc_starter;
373 int64_t tsc_delta;
374 atomic_decl(&tsc_entry_barrier, 1);
375 while (tsc_entry_barrier != 0)
376 ; /* spin for both processors at barrier */
377 tsc_starter = rdtsc64();
378 atomic_decl(&tsc_exit_barrier, 1);
379 while (tsc_exit_barrier != 0)
380 ; /* spin for target to store its TSC */
381 tsc_delta = tsc_target - tsc_starter;
382 kprintf("TSC sync for cpu %d: 0x%016llx delta 0x%llx (%lld)\n",
383 psip->target_cpu, tsc_target, tsc_delta, tsc_delta);
384 if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) {
385#if DEBUG
386 panic(
387#else
388 printf(
389#endif
390 "Unsynchronized TSC for cpu %d: "
391 "0x%016llx, delta 0x%llx\n",
392 psip->target_cpu, tsc_target, tsc_delta);
393 }
394 }
c910b4d9
A
395}
396
b0d623f7
A
397extern char prot_mode_gdt[];
398extern char slave_boot_base[];
399extern char real_mode_bootstrap_base[];
400extern char real_mode_bootstrap_end[];
401extern char slave_boot_end[];
402
55e303ae
A
403kern_return_t
404intel_startCPU(
405 int slot_num)
406{
c910b4d9
A
407 int lapic = cpu_to_lapic[slot_num];
408 boolean_t istate;
55e303ae 409
91447636
A
410 assert(lapic != -1);
411
412 DBGLOG_CPU_INIT(slot_num);
55e303ae 413
91447636 414 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
6d2010ae 415 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) (uintptr_t)IdlePTD);
55e303ae 416
0c530ab8
A
417 /*
418 * Initialize (or re-initialize) the descriptor tables for this cpu.
419 * Propagate processor mode to slave.
420 */
421 if (cpu_mode_is64bit())
b0d623f7 422 cpu_desc_init64(cpu_datap(slot_num));
0c530ab8 423 else
b0d623f7 424 cpu_desc_init(cpu_datap(slot_num));
91447636 425
c910b4d9 426 /* Serialize use of the slave boot stack, etc. */
b0d623f7 427 lck_mtx_lock(&mp_cpu_boot_lock);
55e303ae 428
c910b4d9 429 istate = ml_set_interrupts_enabled(FALSE);
91447636 430 if (slot_num == get_cpu_number()) {
c910b4d9 431 ml_set_interrupts_enabled(istate);
b0d623f7 432 lck_mtx_unlock(&mp_cpu_boot_lock);
91447636
A
433 return KERN_SUCCESS;
434 }
55e303ae 435
b0d623f7
A
436 start_info.starter_cpu = cpu_number();
437 start_info.target_cpu = slot_num;
c910b4d9 438 start_info.target_lapic = lapic;
7e4a7d39
A
439 tsc_entry_barrier = 2;
440 tsc_exit_barrier = 2;
55e303ae 441
c910b4d9 442 /*
b0d623f7 443 * Perform the processor startup sequence with all running
c910b4d9
A
444 * processors rendezvous'ed. This is required during periods when
445 * the cache-disable bit is set for MTRR/PAT initialization.
446 */
b0d623f7 447 mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);
55e303ae 448
7e4a7d39
A
449 start_info.target_cpu = 0;
450
c910b4d9 451 ml_set_interrupts_enabled(istate);
b0d623f7 452 lck_mtx_unlock(&mp_cpu_boot_lock);
55e303ae 453
91447636 454 if (!cpu_datap(slot_num)->cpu_running) {
0c530ab8 455 kprintf("Failed to start CPU %02d\n", slot_num);
91447636
A
456 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
457 delay(1000000);
b0d623f7 458 halt_cpu();
55e303ae
A
459 return KERN_SUCCESS;
460 } else {
2d21ac55 461 kprintf("Started cpu %d (lapic id %08x)\n", slot_num, lapic);
55e303ae
A
462 return KERN_SUCCESS;
463 }
464}
465
55e303ae 466#if MP_DEBUG
91447636
A
467cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
468cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
55e303ae
A
469
470MP_EVENT_NAME_DECL();
471
55e303ae
A
472#endif /* MP_DEBUG */
473
593a1d5f 474int
0c530ab8 475cpu_signal_handler(x86_saved_state_t *regs)
55e303ae 476{
91447636 477 int my_cpu;
55e303ae
A
478 volatile int *my_word;
479#if MACH_KDB && MACH_ASSERT
480 int i=100;
481#endif /* MACH_KDB && MACH_ASSERT */
482
6d2010ae 483 SCHED_STATS_IPI(current_processor());
55e303ae
A
484
485 my_cpu = cpu_number();
060df5ea
A
486 my_word = &cpu_data_ptr[my_cpu]->cpu_signals;
487 /* Store the initial set of signals for diagnostics. New
488 * signals could arrive while these are being processed
489 * so it's no more than a hint.
490 */
6d2010ae 491
060df5ea 492 cpu_data_ptr[my_cpu]->cpu_prior_signals = *my_word;
55e303ae
A
493
494 do {
495#if MACH_KDB && MACH_ASSERT
496 if (i-- <= 0)
0c530ab8 497 Debugger("cpu_signal_handler: signals did not clear");
55e303ae
A
498#endif /* MACH_KDB && MACH_ASSERT */
499#if MACH_KDP
500 if (i_bit(MP_KDP, my_word)) {
501 DBGLOG(cpu_handle,my_cpu,MP_KDP);
502 i_bit_clear(MP_KDP, my_word);
0c530ab8
A
503/* Ensure that the i386_kernel_state at the base of the
504 * current thread's stack (if any) is synchronized with the
505 * context at the moment of the interrupt, to facilitate
506 * access through the debugger.
0c530ab8 507 */
b0d623f7 508 sync_iss_to_iks(regs);
d41d1dae
A
509 if (pmsafe_debug && !kdp_snapshot)
510 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
b0d623f7 511 mp_kdp_wait(TRUE, FALSE);
d41d1dae
A
512 if (pmsafe_debug && !kdp_snapshot)
513 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
55e303ae
A
514 } else
515#endif /* MACH_KDP */
91447636 516 if (i_bit(MP_TLB_FLUSH, my_word)) {
55e303ae
A
517 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
518 i_bit_clear(MP_TLB_FLUSH, my_word);
519 pmap_update_interrupt();
520 } else if (i_bit(MP_AST, my_word)) {
521 DBGLOG(cpu_handle,my_cpu,MP_AST);
522 i_bit_clear(MP_AST, my_word);
523 ast_check(cpu_to_processor(my_cpu));
524#if MACH_KDB
525 } else if (i_bit(MP_KDB, my_word)) {
55e303ae
A
526
527 i_bit_clear(MP_KDB, my_word);
0c530ab8
A
528 current_cpu_datap()->cpu_kdb_is_slave++;
529 mp_kdb_wait();
530 current_cpu_datap()->cpu_kdb_is_slave--;
55e303ae
A
531#endif /* MACH_KDB */
532 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
533 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
534 i_bit_clear(MP_RENDEZVOUS, my_word);
535 mp_rendezvous_action();
2d21ac55
A
536 } else if (i_bit(MP_BROADCAST, my_word)) {
537 DBGLOG(cpu_handle,my_cpu,MP_BROADCAST);
538 i_bit_clear(MP_BROADCAST, my_word);
539 mp_broadcast_action();
0c530ab8
A
540 } else if (i_bit(MP_CHUD, my_word)) {
541 DBGLOG(cpu_handle,my_cpu,MP_CHUD);
542 i_bit_clear(MP_CHUD, my_word);
543 chudxnu_cpu_signal_handler();
2d21ac55
A
544 } else if (i_bit(MP_CALL, my_word)) {
545 DBGLOG(cpu_handle,my_cpu,MP_CALL);
546 i_bit_clear(MP_CALL, my_word);
547 mp_cpus_call_action();
c910b4d9
A
548 } else if (i_bit(MP_CALL_PM, my_word)) {
549 DBGLOG(cpu_handle,my_cpu,MP_CALL_PM);
550 i_bit_clear(MP_CALL_PM, my_word);
551 mp_call_PM();
55e303ae
A
552 }
553 } while (*my_word);
554
593a1d5f 555 return 0;
55e303ae
A
556}
557
593a1d5f 558static int
2d21ac55 559NMIInterruptHandler(x86_saved_state_t *regs)
0c530ab8 560{
935ed37a 561 void *stackptr;
060df5ea 562
6d2010ae
A
563 if (panic_active() && !panicDebugging) {
564 if (pmsafe_debug)
565 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
566 for(;;)
567 cpu_pause();
568 }
569
060df5ea 570 atomic_incl(&NMIPI_acks, 1);
0c530ab8 571 sync_iss_to_iks_unconditionally(regs);
b0d623f7 572#if defined (__i386__)
935ed37a 573 __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
b0d623f7
A
574#elif defined (__x86_64__)
575 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
576#endif
935ed37a 577
593a1d5f
A
578 if (cpu_number() == debugger_cpu)
579 goto NMExit;
580
060df5ea
A
581 if (spinlock_timed_out) {
582 char pstr[160];
583 snprintf(&pstr[0], sizeof(pstr), "Panic(CPU %d): NMIPI for spinlock acquisition timeout, spinlock: %p, spinlock owner: %p, current_thread: %p, spinlock_owner_cpu: 0x%x\n", cpu_number(), spinlock_timed_out, (void *) spinlock_timed_out->interlock.lock_data, current_thread(), spinlock_owner_cpu);
584 panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs);
060df5ea 585 } else if (pmap_tlb_flush_timeout == TRUE) {
593a1d5f 586 char pstr[128];
6d2010ae
A
587 snprintf(&pstr[0], sizeof(pstr), "Panic(CPU %d): Unresponsive processor (this CPU did not acknowledge interrupts) TLB state:%d\n", cpu_number(), current_cpu_datap()->cpu_tlb_invalid);
588 panic_i386_backtrace(stackptr, 48, &pstr[0], TRUE, regs);
935ed37a 589 }
b0d623f7
A
590
591#if MACH_KDP
d41d1dae
A
592 if (pmsafe_debug && !kdp_snapshot)
593 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
060df5ea
A
594 current_cpu_datap()->cpu_NMI_acknowledged = TRUE;
595 mp_kdp_wait(FALSE, pmap_tlb_flush_timeout || spinlock_timed_out || panic_active());
d41d1dae
A
596 if (pmsafe_debug && !kdp_snapshot)
597 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
b0d623f7 598#endif
593a1d5f 599NMExit:
0c530ab8
A
600 return 1;
601}
602
2d21ac55
A
603
604/*
605 * cpu_interrupt is really just to be used by the scheduler to
606 * get a CPU's attention it may not always issue an IPI. If an
607 * IPI is always needed then use i386_cpu_IPI.
608 */
609void
610cpu_interrupt(int cpu)
611{
6d2010ae
A
612 boolean_t did_IPI = FALSE;
613
2d21ac55
A
614 if (smp_initialized
615 && pmCPUExitIdle(cpu_datap(cpu))) {
616 i386_cpu_IPI(cpu);
6d2010ae 617 did_IPI = TRUE;
2d21ac55 618 }
6d2010ae
A
619
620 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), cpu, did_IPI, 0, 0, 0);
55e303ae
A
621}
622
0c530ab8
A
623/*
624 * Send a true NMI via the local APIC to the specified CPU.
625 */
935ed37a 626void
0c530ab8
A
627cpu_NMI_interrupt(int cpu)
628{
0c530ab8 629 if (smp_initialized) {
6d2010ae 630 i386_send_NMI(cpu);
0c530ab8 631 }
0c530ab8
A
632}
633
b0d623f7 634static void (* volatile mp_PM_func)(void) = NULL;
c910b4d9
A
635
636static void
637mp_call_PM(void)
638{
639 assert(!ml_get_interrupts_enabled());
640
641 if (mp_PM_func != NULL)
642 mp_PM_func();
643}
644
645void
646cpu_PM_interrupt(int cpu)
647{
648 assert(!ml_get_interrupts_enabled());
649
650 if (mp_PM_func != NULL) {
651 if (cpu == cpu_number())
652 mp_PM_func();
653 else
654 i386_signal_cpu(cpu, MP_CALL_PM, ASYNC);
655 }
656}
657
658void
659PM_interrupt_register(void (*fn)(void))
660{
661 mp_PM_func = fn;
662}
663
55e303ae
A
664void
665i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
666{
91447636
A
667 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
668 uint64_t tsc_timeout;
6601e61a 669
0c530ab8 670
91447636 671 if (!cpu_datap(cpu)->cpu_running)
55e303ae
A
672 return;
673
0c530ab8 674 if (event == MP_TLB_FLUSH)
6d2010ae 675 KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_START, cpu, 0, 0, 0, 0);
6601e61a 676
0c530ab8
A
677 DBGLOG(cpu_signal, cpu, event);
678
55e303ae 679 i_bit_set(event, signals);
2d21ac55 680 i386_cpu_IPI(cpu);
55e303ae
A
681 if (mode == SYNC) {
682 again:
91447636
A
683 tsc_timeout = rdtsc64() + (1000*1000*1000);
684 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
55e303ae
A
685 cpu_pause();
686 }
687 if (i_bit(event, signals)) {
688 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
689 cpu, event);
690 goto again;
691 }
692 }
0c530ab8 693 if (event == MP_TLB_FLUSH)
6d2010ae 694 KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_END, cpu, 0, 0, 0, 0);
55e303ae
A
695}
696
2d21ac55
A
697/*
698 * Send event to all running cpus.
699 * Called with the topology locked.
700 */
55e303ae
A
701void
702i386_signal_cpus(mp_event_t event, mp_sync_t mode)
703{
91447636
A
704 unsigned int cpu;
705 unsigned int my_cpu = cpu_number();
55e303ae 706
b0d623f7 707 assert(hw_lock_held((hw_lock_t)&x86_topo_lock));
2d21ac55 708
91447636
A
709 for (cpu = 0; cpu < real_ncpus; cpu++) {
710 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
711 continue;
712 i386_signal_cpu(cpu, event, mode);
713 }
714}
715
2d21ac55
A
716/*
717 * Return the number of running cpus.
718 * Called with the topology locked.
719 */
55e303ae
A
720int
721i386_active_cpus(void)
722{
91447636
A
723 unsigned int cpu;
724 unsigned int ncpus = 0;
55e303ae 725
b0d623f7 726 assert(hw_lock_held((hw_lock_t)&x86_topo_lock));
2d21ac55 727
91447636
A
728 for (cpu = 0; cpu < real_ncpus; cpu++) {
729 if (cpu_datap(cpu)->cpu_running)
55e303ae
A
730 ncpus++;
731 }
732 return(ncpus);
733}
734
735/*
736 * All-CPU rendezvous:
737 * - CPUs are signalled,
738 * - all execute the setup function (if specified),
739 * - rendezvous (i.e. all cpus reach a barrier),
740 * - all execute the action function (if specified),
741 * - rendezvous again,
742 * - execute the teardown function (if specified), and then
743 * - resume.
744 *
745 * Note that the supplied external functions _must_ be reentrant and aware
746 * that they are running in parallel and in an unknown lock context.
747 */
748
749static void
750mp_rendezvous_action(void)
751{
2d21ac55 752 boolean_t intrs_enabled;
55e303ae
A
753
754 /* setup function */
755 if (mp_rv_setup_func != NULL)
756 mp_rv_setup_func(mp_rv_func_arg);
2d21ac55
A
757
758 intrs_enabled = ml_get_interrupts_enabled();
759
55e303ae 760 /* spin on entry rendezvous */
0c530ab8
A
761 atomic_incl(&mp_rv_entry, 1);
762 while (mp_rv_entry < mp_rv_ncpus) {
2d21ac55
A
763 /* poll for pesky tlb flushes if interrupts disabled */
764 if (!intrs_enabled)
765 handle_pending_TLB_flushes();
55e303ae 766 cpu_pause();
0c530ab8 767 }
6d2010ae 768
55e303ae
A
769 /* action function */
770 if (mp_rv_action_func != NULL)
771 mp_rv_action_func(mp_rv_func_arg);
6d2010ae 772
55e303ae 773 /* spin on exit rendezvous */
0c530ab8 774 atomic_incl(&mp_rv_exit, 1);
2d21ac55
A
775 while (mp_rv_exit < mp_rv_ncpus) {
776 if (!intrs_enabled)
777 handle_pending_TLB_flushes();
55e303ae 778 cpu_pause();
2d21ac55 779 }
6d2010ae 780
55e303ae
A
781 /* teardown function */
782 if (mp_rv_teardown_func != NULL)
783 mp_rv_teardown_func(mp_rv_func_arg);
0c530ab8
A
784
785 /* Bump completion count */
786 atomic_incl(&mp_rv_complete, 1);
55e303ae
A
787}
788
789void
790mp_rendezvous(void (*setup_func)(void *),
791 void (*action_func)(void *),
792 void (*teardown_func)(void *),
793 void *arg)
794{
795
796 if (!smp_initialized) {
797 if (setup_func != NULL)
798 setup_func(arg);
799 if (action_func != NULL)
800 action_func(arg);
801 if (teardown_func != NULL)
802 teardown_func(arg);
803 return;
804 }
805
806 /* obtain rendezvous lock */
807 simple_lock(&mp_rv_lock);
808
809 /* set static function pointers */
810 mp_rv_setup_func = setup_func;
811 mp_rv_action_func = action_func;
812 mp_rv_teardown_func = teardown_func;
813 mp_rv_func_arg = arg;
814
0c530ab8
A
815 mp_rv_entry = 0;
816 mp_rv_exit = 0;
817 mp_rv_complete = 0;
55e303ae
A
818
819 /*
820 * signal other processors, which will call mp_rendezvous_action()
2d21ac55 821 * with interrupts disabled
55e303ae 822 */
2d21ac55 823 simple_lock(&x86_topo_lock);
0c530ab8 824 mp_rv_ncpus = i386_active_cpus();
55e303ae 825 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
2d21ac55 826 simple_unlock(&x86_topo_lock);
55e303ae
A
827
828 /* call executor function on this cpu */
829 mp_rendezvous_action();
830
0c530ab8
A
831 /*
832 * Spin for everyone to complete.
833 * This is necessary to ensure that all processors have proceeded
834 * from the exit barrier before we release the rendezvous structure.
835 */
836 while (mp_rv_complete < mp_rv_ncpus) {
837 cpu_pause();
838 }
839
2d21ac55
A
840 /* Tidy up */
841 mp_rv_setup_func = NULL;
842 mp_rv_action_func = NULL;
843 mp_rv_teardown_func = NULL;
844 mp_rv_func_arg = NULL;
845
55e303ae
A
846 /* release lock */
847 simple_unlock(&mp_rv_lock);
848}
849
0c530ab8
A
850void
851mp_rendezvous_break_lock(void)
852{
853 simple_lock_init(&mp_rv_lock, 0);
854}
855
856static void
857setup_disable_intrs(__unused void * param_not_used)
858{
859 /* disable interrupts before the first barrier */
860 boolean_t intr = ml_set_interrupts_enabled(FALSE);
861
862 current_cpu_datap()->cpu_iflag = intr;
863 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
864}
865
866static void
867teardown_restore_intrs(__unused void * param_not_used)
868{
869 /* restore interrupt flag following MTRR changes */
870 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
871 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
872}
873
874/*
875 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
876 * This is exported for use by kexts.
877 */
878void
879mp_rendezvous_no_intrs(
880 void (*action_func)(void *),
881 void *arg)
882{
883 mp_rendezvous(setup_disable_intrs,
884 action_func,
885 teardown_restore_intrs,
886 arg);
887}
888
6d2010ae
A
889
890typedef struct {
891 queue_chain_t link; /* queue linkage */
892 void (*func)(void *,void *); /* routine to call */
893 void *arg0; /* routine's 1st arg */
894 void *arg1; /* routine's 2nd arg */
895 volatile long *countp; /* completion counter */
896} mp_call_t;
897
898#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS
899static queue_head_t mp_cpus_call_freelist;
900static queue_head_t mp_cpus_call_queue[MAX_CPUS];
901/*
902 * The free list and the per-cpu call queues are protected by the following
903 * lock which is taken wil interrupts disabled.
904 */
905decl_simple_lock_data(,mp_cpus_call_lock);
906
907static inline boolean_t
908mp_call_lock(void)
909{
910 boolean_t intrs_enabled;
911
912 intrs_enabled = ml_set_interrupts_enabled(FALSE);
913 simple_lock(&mp_cpus_call_lock);
914
915 return intrs_enabled;
916}
917
918static inline boolean_t
919mp_call_is_locked(void)
920{
921 return !ml_get_interrupts_enabled() &&
922 hw_lock_held((hw_lock_t)&mp_cpus_call_lock);
923}
924
925static inline void
926mp_call_unlock(boolean_t intrs_enabled)
927{
928 simple_unlock(&mp_cpus_call_lock);
929 ml_set_interrupts_enabled(intrs_enabled);
930}
931
932static inline mp_call_t *
933mp_call_alloc(void)
934{
935 mp_call_t *callp;
936
937 assert(mp_call_is_locked());
938 if (queue_empty(&mp_cpus_call_freelist))
939 return NULL;
940 queue_remove_first(&mp_cpus_call_freelist, callp, typeof(callp), link);
941 return callp;
942}
943
944static inline void
945mp_call_free(mp_call_t *callp)
0c530ab8 946{
6d2010ae
A
947 assert(mp_call_is_locked());
948 queue_enter_first(&mp_cpus_call_freelist, callp, typeof(callp), link);
949}
950
951static inline mp_call_t *
952mp_call_dequeue(queue_t call_queue)
953{
954 mp_call_t *callp;
0c530ab8 955
6d2010ae
A
956 assert(mp_call_is_locked());
957 if (queue_empty(call_queue))
958 return NULL;
959 queue_remove_first(call_queue, callp, typeof(callp), link);
960 return callp;
961}
962
963/* Called on the boot processor to initialize global structures */
964static void
965mp_cpus_call_init(void)
966{
967 DBG("mp_cpus_call_init()\n");
968 simple_lock_init(&mp_cpus_call_lock, 0);
969 queue_init(&mp_cpus_call_freelist);
970}
971
972/*
973 * Called by each processor to add call buffers to the free list
974 * and to initialize the per-cpu call queue.
975 * Also called but ignored on slave processors on re-start/wake.
976 */
977static void
978mp_cpus_call_cpu_init(void)
979{
980 boolean_t intrs_enabled;
981 int i;
982 mp_call_t *callp;
983
984 if (mp_cpus_call_queue[cpu_number()].next != NULL)
985 return; /* restart/wake case: called already */
986
987 queue_init(&mp_cpus_call_queue[cpu_number()]);
988 for (i = 0; i < MP_CPUS_CALL_BUFS_PER_CPU; i++) {
989 callp = (mp_call_t *) kalloc(sizeof(mp_call_t));
990 intrs_enabled = mp_call_lock();
991 mp_call_free(callp);
992 mp_call_unlock(intrs_enabled);
0c530ab8 993 }
6d2010ae
A
994
995 DBG("mp_cpus_call_init() done on cpu %d\n", cpu_number());
0c530ab8
A
996}
997
2d21ac55
A
998/*
999 * This is called from cpu_signal_handler() to process an MP_CALL signal.
6d2010ae 1000 * And also from i386_deactivate_cpu() when a cpu is being taken offline.
2d21ac55
A
1001 */
1002static void
1003mp_cpus_call_action(void)
1004{
6d2010ae
A
1005 queue_t cpu_head;
1006 boolean_t intrs_enabled;
1007 mp_call_t *callp;
1008 mp_call_t call;
1009
1010 assert(!ml_get_interrupts_enabled());
1011 cpu_head = &mp_cpus_call_queue[cpu_number()];
1012 intrs_enabled = mp_call_lock();
1013 while ((callp = mp_call_dequeue(cpu_head)) != NULL) {
1014 /* Copy call request to the stack to free buffer */
1015 call = *callp;
1016 mp_call_free(callp);
1017 if (call.func != NULL) {
1018 mp_call_unlock(intrs_enabled);
1019 KERNEL_DEBUG_CONSTANT(
1020 TRACE_MP_CPUS_CALL_ACTION,
1021 call.func, call.arg0, call.arg1, call.countp, 0);
1022 call.func(call.arg0, call.arg1);
1023 (void) mp_call_lock();
1024 }
1025 if (call.countp != NULL)
1026 atomic_incl(call.countp, 1);
1027 }
1028 mp_call_unlock(intrs_enabled);
1029}
1030
1031static boolean_t
1032mp_call_queue(
1033 int cpu,
1034 void (*action_func)(void *, void *),
1035 void *arg0,
1036 void *arg1,
1037 volatile long *countp)
1038{
1039 queue_t cpu_head = &mp_cpus_call_queue[cpu];
1040 mp_call_t *callp;
1041
1042 assert(mp_call_is_locked());
1043 callp = mp_call_alloc();
1044 if (callp == NULL)
1045 return FALSE;
1046
1047 callp->func = action_func;
1048 callp->arg0 = arg0;
1049 callp->arg1 = arg1;
1050 callp->countp = countp;
1051
1052 queue_enter(cpu_head, callp, typeof(callp), link);
1053
1054 return TRUE;
2d21ac55
A
1055}
1056
1057/*
1058 * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
6d2010ae
A
1059 * Possible modes are:
1060 * SYNC: function is called serially on target cpus in logical cpu order
1061 * waiting for each call to be acknowledged before proceeding
1062 * ASYNC: function call is queued to the specified cpus
1063 * waiting for all calls to complete in parallel before returning
1064 * NOSYNC: function calls are queued
1065 * but we return before confirmation of calls completing.
2d21ac55
A
1066 * The action function may be NULL.
1067 * The cpu mask may include the local cpu. Offline cpus are ignored.
6d2010ae 1068 * The return value is the number of cpus on which the call was made or queued.
2d21ac55
A
1069 */
1070cpu_t
1071mp_cpus_call(
1072 cpumask_t cpus,
1073 mp_sync_t mode,
1074 void (*action_func)(void *),
1075 void *arg)
6d2010ae
A
1076{
1077 return mp_cpus_call1(
1078 cpus,
1079 mode,
1080 (void (*)(void *,void *))action_func,
1081 arg,
1082 NULL,
1083 NULL,
1084 NULL);
1085}
1086
1087static void
1088mp_cpus_call_wait(boolean_t intrs_enabled,
1089 long mp_cpus_signals,
1090 volatile long *mp_cpus_calls)
1091{
1092 queue_t cpu_head;
1093
1094 cpu_head = &mp_cpus_call_queue[cpu_number()];
1095
1096 while (*mp_cpus_calls < mp_cpus_signals) {
1097 if (!intrs_enabled) {
1098 if (!queue_empty(cpu_head))
1099 mp_cpus_call_action();
1100
1101 handle_pending_TLB_flushes();
1102 }
1103 cpu_pause();
1104 }
1105}
1106
1107cpu_t
1108mp_cpus_call1(
1109 cpumask_t cpus,
1110 mp_sync_t mode,
1111 void (*action_func)(void *, void *),
1112 void *arg0,
1113 void *arg1,
1114 cpumask_t *cpus_calledp,
1115 cpumask_t *cpus_notcalledp)
2d21ac55
A
1116{
1117 cpu_t cpu;
6d2010ae 1118 boolean_t intrs_enabled = FALSE;
2d21ac55 1119 boolean_t call_self = FALSE;
6d2010ae
A
1120 cpumask_t cpus_called = 0;
1121 cpumask_t cpus_notcalled = 0;
1122 long mp_cpus_signals = 0;
1123 volatile long mp_cpus_calls = 0;
1124
1125 KERNEL_DEBUG_CONSTANT(
1126 TRACE_MP_CPUS_CALL | DBG_FUNC_START,
1127 cpus, mode, action_func, arg0, arg1);
2d21ac55
A
1128
1129 if (!smp_initialized) {
1130 if ((cpus & CPUMASK_SELF) == 0)
6d2010ae 1131 goto out;
2d21ac55 1132 if (action_func != NULL) {
6d2010ae
A
1133 intrs_enabled = ml_set_interrupts_enabled(FALSE);
1134 action_func(arg0, arg1);
2d21ac55
A
1135 ml_set_interrupts_enabled(intrs_enabled);
1136 }
6d2010ae
A
1137 call_self = TRUE;
1138 goto out;
2d21ac55 1139 }
2d21ac55 1140
6d2010ae
A
1141 /*
1142 * Queue the call for each non-local requested cpu.
1143 * The topo lock is not taken. Instead we sniff the cpu_running state
1144 * and then re-check it after taking the call lock. A cpu being taken
1145 * offline runs the action function after clearing the cpu_running.
1146 */
2d21ac55
A
1147 for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) {
1148 if (((cpu_to_cpumask(cpu) & cpus) == 0) ||
1149 !cpu_datap(cpu)->cpu_running)
1150 continue;
1151 if (cpu == (cpu_t) cpu_number()) {
1152 /*
1153 * We don't IPI ourself and if calling asynchronously,
1154 * we defer our call until we have signalled all others.
1155 */
1156 call_self = TRUE;
6d2010ae 1157 cpus_called |= cpu_to_cpumask(cpu);
2d21ac55 1158 if (mode == SYNC && action_func != NULL) {
6d2010ae
A
1159 KERNEL_DEBUG_CONSTANT(
1160 TRACE_MP_CPUS_CALL_LOCAL,
1161 action_func, arg0, arg1, 0, 0);
1162 action_func(arg0, arg1);
2d21ac55
A
1163 }
1164 } else {
1165 /*
6d2010ae
A
1166 * Here to queue a call to cpu and IPI.
1167 * Spinning for request buffer unless NOSYNC.
2d21ac55 1168 */
6d2010ae
A
1169 queue_call:
1170 intrs_enabled = mp_call_lock();
1171 if (!cpu_datap(cpu)->cpu_running) {
1172 mp_call_unlock(intrs_enabled);
1173 continue;
1174 }
1175 if (mode == NOSYNC) {
1176 if (!mp_call_queue(cpu, action_func, arg0, arg1,
1177 NULL)) {
1178 cpus_notcalled |= cpu_to_cpumask(cpu);
1179 mp_call_unlock(intrs_enabled);
1180 KERNEL_DEBUG_CONSTANT(
1181 TRACE_MP_CPUS_CALL_NOBUF,
1182 cpu, 0, 0, 0, 0);
1183 continue;
1184 }
1185 } else {
1186 if (!mp_call_queue(cpu, action_func, arg0, arg1,
1187 &mp_cpus_calls)) {
1188 mp_call_unlock(intrs_enabled);
1189 KERNEL_DEBUG_CONSTANT(
1190 TRACE_MP_CPUS_CALL_NOBUF,
1191 cpu, 0, 0, 0, 0);
1192 if (!intrs_enabled) {
1193 mp_cpus_call_action();
2d21ac55 1194 handle_pending_TLB_flushes();
6d2010ae 1195 }
2d21ac55 1196 cpu_pause();
6d2010ae 1197 goto queue_call;
2d21ac55 1198 }
6d2010ae
A
1199 }
1200 mp_cpus_signals++;
1201 cpus_called |= cpu_to_cpumask(cpu);
1202 i386_signal_cpu(cpu, MP_CALL, ASYNC);
1203 mp_call_unlock(intrs_enabled);
1204 if (mode == SYNC) {
1205 mp_cpus_call_wait(intrs_enabled, mp_cpus_signals, &mp_cpus_calls);
2d21ac55
A
1206 }
1207 }
1208 }
2d21ac55 1209
6d2010ae
A
1210 /* Call locally if mode not SYNC */
1211 if (mode != SYNC && call_self ) {
1212 KERNEL_DEBUG_CONSTANT(
1213 TRACE_MP_CPUS_CALL_LOCAL,
1214 action_func, arg0, arg1, 0, 0);
1215 if (action_func != NULL) {
1216 ml_set_interrupts_enabled(FALSE);
1217 action_func(arg0, arg1);
2d21ac55
A
1218 ml_set_interrupts_enabled(intrs_enabled);
1219 }
2d21ac55 1220 }
2d21ac55 1221
6d2010ae
A
1222 /* For ASYNC, now wait for all signaled cpus to complete their calls */
1223 if (mode == ASYNC) {
1224 mp_cpus_call_wait(intrs_enabled, mp_cpus_signals, &mp_cpus_calls);
1225 }
1226
1227out:
1228 cpu = (cpu_t) mp_cpus_signals + (call_self ? 1 : 0);
1229
1230 if (cpus_calledp)
1231 *cpus_calledp = cpus_called;
1232 if (cpus_notcalledp)
1233 *cpus_notcalledp = cpus_notcalled;
1234
1235 KERNEL_DEBUG_CONSTANT(
1236 TRACE_MP_CPUS_CALL | DBG_FUNC_END,
1237 cpu, cpus_called, cpus_notcalled, 0, 0);
2d21ac55
A
1238
1239 return cpu;
1240}
1241
6d2010ae 1242
2d21ac55
A
1243static void
1244mp_broadcast_action(void)
1245{
1246 /* call action function */
1247 if (mp_bc_action_func != NULL)
1248 mp_bc_action_func(mp_bc_func_arg);
1249
1250 /* if we're the last one through, wake up the instigator */
b0d623f7
A
1251 if (atomic_decl_and_test(&mp_bc_count, 1))
1252 thread_wakeup(((event_t)(uintptr_t) &mp_bc_count));
2d21ac55
A
1253}
1254
1255/*
1256 * mp_broadcast() runs a given function on all active cpus.
1257 * The caller blocks until the functions has run on all cpus.
1258 * The caller will also block if there is another pending braodcast.
1259 */
1260void
1261mp_broadcast(
1262 void (*action_func)(void *),
1263 void *arg)
1264{
1265 if (!smp_initialized) {
1266 if (action_func != NULL)
1267 action_func(arg);
1268 return;
1269 }
1270
1271 /* obtain broadcast lock */
b0d623f7 1272 lck_mtx_lock(&mp_bc_lock);
2d21ac55
A
1273
1274 /* set static function pointers */
1275 mp_bc_action_func = action_func;
1276 mp_bc_func_arg = arg;
1277
b0d623f7 1278 assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT);
2d21ac55
A
1279
1280 /*
1281 * signal other processors, which will call mp_broadcast_action()
1282 */
1283 simple_lock(&x86_topo_lock);
1284 mp_bc_ncpus = i386_active_cpus(); /* total including this cpu */
1285 mp_bc_count = mp_bc_ncpus;
1286 i386_signal_cpus(MP_BROADCAST, ASYNC);
1287
1288 /* call executor function on this cpu */
1289 mp_broadcast_action();
1290 simple_unlock(&x86_topo_lock);
1291
1292 /* block for all cpus to have run action_func */
1293 if (mp_bc_ncpus > 1)
1294 thread_block(THREAD_CONTINUE_NULL);
1295 else
1296 clear_wait(current_thread(), THREAD_AWAKENED);
1297
1298 /* release lock */
b0d623f7 1299 lck_mtx_unlock(&mp_bc_lock);
2d21ac55
A
1300}
1301
1302void
1303i386_activate_cpu(void)
1304{
1305 cpu_data_t *cdp = current_cpu_datap();
1306
1307 assert(!ml_get_interrupts_enabled());
1308
1309 if (!smp_initialized) {
1310 cdp->cpu_running = TRUE;
1311 return;
1312 }
1313
1314 simple_lock(&x86_topo_lock);
1315 cdp->cpu_running = TRUE;
7e4a7d39 1316 started_cpu();
2d21ac55
A
1317 simple_unlock(&x86_topo_lock);
1318}
1319
c910b4d9
A
1320extern void etimer_timer_expire(void *arg);
1321
2d21ac55
A
1322void
1323i386_deactivate_cpu(void)
1324{
1325 cpu_data_t *cdp = current_cpu_datap();
1326
1327 assert(!ml_get_interrupts_enabled());
1328
1329 simple_lock(&x86_topo_lock);
1330 cdp->cpu_running = FALSE;
1331 simple_unlock(&x86_topo_lock);
1332
c910b4d9
A
1333 timer_queue_shutdown(&cdp->rtclock_timer.queue);
1334 cdp->rtclock_timer.deadline = EndOfAllTime;
1335 mp_cpus_call(cpu_to_cpumask(master_cpu), ASYNC, etimer_timer_expire, NULL);
1336
2d21ac55
A
1337 /*
1338 * In case a rendezvous/braodcast/call was initiated to this cpu
1339 * before we cleared cpu_running, we must perform any actions due.
1340 */
1341 if (i_bit(MP_RENDEZVOUS, &cdp->cpu_signals))
1342 mp_rendezvous_action();
1343 if (i_bit(MP_BROADCAST, &cdp->cpu_signals))
1344 mp_broadcast_action();
1345 if (i_bit(MP_CALL, &cdp->cpu_signals))
1346 mp_cpus_call_action();
1347 cdp->cpu_signals = 0; /* all clear */
1348}
1349
1350int pmsafe_debug = 1;
1351
55e303ae
A
1352#if MACH_KDP
1353volatile boolean_t mp_kdp_trap = FALSE;
593a1d5f 1354volatile unsigned long mp_kdp_ncpus;
91447636
A
1355boolean_t mp_kdp_state;
1356
55e303ae
A
1357
1358void
1359mp_kdp_enter(void)
1360{
91447636 1361 unsigned int cpu;
6d2010ae 1362 unsigned int ncpus = 0;
593a1d5f 1363 unsigned int my_cpu;
91447636 1364 uint64_t tsc_timeout;
55e303ae
A
1365
1366 DBG("mp_kdp_enter()\n");
1367
1368 /*
1369 * Here to enter the debugger.
1370 * In case of races, only one cpu is allowed to enter kdp after
1371 * stopping others.
1372 */
91447636 1373 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
060df5ea
A
1374 my_cpu = cpu_number();
1375 cpu_datap(my_cpu)->debugger_entry_time = mach_absolute_time();
55e303ae 1376 simple_lock(&mp_kdp_lock);
060df5ea 1377
d41d1dae 1378 if (pmsafe_debug && !kdp_snapshot)
2d21ac55
A
1379 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
1380
55e303ae
A
1381 while (mp_kdp_trap) {
1382 simple_unlock(&mp_kdp_lock);
1383 DBG("mp_kdp_enter() race lost\n");
b0d623f7
A
1384#if MACH_KDP
1385 mp_kdp_wait(TRUE, FALSE);
1386#endif
55e303ae
A
1387 simple_lock(&mp_kdp_lock);
1388 }
593a1d5f 1389 debugger_cpu = my_cpu;
060df5ea 1390 ncpus = 1;
55e303ae
A
1391 mp_kdp_ncpus = 1; /* self */
1392 mp_kdp_trap = TRUE;
060df5ea 1393 debugger_entry_time = cpu_datap(my_cpu)->debugger_entry_time;
55e303ae 1394 simple_unlock(&mp_kdp_lock);
55e303ae 1395
0c530ab8
A
1396 /*
1397 * Deliver a nudge to other cpus, counting how many
1398 */
55e303ae 1399 DBG("mp_kdp_enter() signaling other processors\n");
2d21ac55 1400 if (force_immediate_debugger_NMI == FALSE) {
060df5ea 1401 for (cpu = 0; cpu < real_ncpus; cpu++) {
2d21ac55
A
1402 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1403 continue;
1404 ncpus++;
1405 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1406 }
1407 /*
1408 * Wait other processors to synchronize
1409 */
1410 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
0c530ab8 1411
2d21ac55
A
1412 /*
1413 * This timeout is rather arbitrary; we don't want to NMI
1414 * processors that are executing at potentially
1415 * "unsafe-to-interrupt" points such as the trampolines,
1416 * but neither do we want to lose state by waiting too long.
1417 */
1418 tsc_timeout = rdtsc64() + (ncpus * 1000 * 1000);
0c530ab8 1419
2d21ac55
A
1420 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1421 /*
1422 * A TLB shootdown request may be pending--this would
1423 * result in the requesting processor waiting in
1424 * PMAP_UPDATE_TLBS() until this processor deals with it.
1425 * Process it, so it can now enter mp_kdp_wait()
1426 */
1427 handle_pending_TLB_flushes();
1428 cpu_pause();
1429 }
1430 /* If we've timed out, and some processor(s) are still unresponsive,
1431 * interrupt them with an NMI via the local APIC.
0c530ab8 1432 */
2d21ac55
A
1433 if (mp_kdp_ncpus != ncpus) {
1434 for (cpu = 0; cpu < real_ncpus; cpu++) {
1435 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1436 continue;
1437 if (cpu_signal_pending(cpu, MP_KDP))
1438 cpu_NMI_interrupt(cpu);
1439 }
1440 }
55e303ae 1441 }
2d21ac55 1442 else
0c530ab8
A
1443 for (cpu = 0; cpu < real_ncpus; cpu++) {
1444 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1445 continue;
2d21ac55 1446 cpu_NMI_interrupt(cpu);
0c530ab8 1447 }
0c530ab8 1448
060df5ea 1449 DBG("mp_kdp_enter() %lu processors done %s\n",
6d2010ae 1450 (int)mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
0c530ab8 1451
91447636 1452 postcode(MP_KDP_ENTER);
55e303ae
A
1453}
1454
0c530ab8
A
1455static boolean_t
1456cpu_signal_pending(int cpu, mp_event_t event)
1457{
1458 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
1459 boolean_t retval = FALSE;
1460
1461 if (i_bit(event, signals))
1462 retval = TRUE;
1463 return retval;
1464}
b0d623f7
A
1465
1466long kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func,
1467 void *arg0, void *arg1)
1468{
1469 if (lcpu > (real_ncpus - 1))
1470 return -1;
1471
1472 if (func == NULL)
1473 return -1;
1474
1475 kdp_xcpu_call_func.func = func;
1476 kdp_xcpu_call_func.ret = -1;
1477 kdp_xcpu_call_func.arg0 = arg0;
1478 kdp_xcpu_call_func.arg1 = arg1;
1479 kdp_xcpu_call_func.cpu = lcpu;
1480 DBG("Invoking function %p on CPU %d\n", func, (int32_t)lcpu);
1481 while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE)
1482 cpu_pause();
1483 return kdp_xcpu_call_func.ret;
1484}
1485
1486static void
1487kdp_x86_xcpu_poll(void)
1488{
1489 if ((uint16_t)cpu_number() == kdp_xcpu_call_func.cpu) {
1490 kdp_xcpu_call_func.ret =
1491 kdp_xcpu_call_func.func(kdp_xcpu_call_func.arg0,
1492 kdp_xcpu_call_func.arg1,
1493 cpu_number());
1494 kdp_xcpu_call_func.cpu = KDP_XCPU_NONE;
1495 }
1496}
0c530ab8 1497
55e303ae 1498static void
b0d623f7 1499mp_kdp_wait(boolean_t flush, boolean_t isNMI)
55e303ae 1500{
6601e61a 1501 DBG("mp_kdp_wait()\n");
2d21ac55 1502 /* If an I/O port has been specified as a debugging aid, issue a read */
0c530ab8
A
1503 panic_io_port_read();
1504
b0d623f7 1505#if CONFIG_MCA
2d21ac55
A
1506 /* If we've trapped due to a machine-check, save MCA registers */
1507 mca_check_save();
b0d623f7 1508#endif
2d21ac55 1509
2d21ac55 1510 atomic_incl((volatile long *)&mp_kdp_ncpus, 1);
b0d623f7 1511 while (mp_kdp_trap || (isNMI == TRUE)) {
0c530ab8 1512 /*
2d21ac55
A
1513 * A TLB shootdown request may be pending--this would result
1514 * in the requesting processor waiting in PMAP_UPDATE_TLBS()
1515 * until this processor handles it.
0c530ab8
A
1516 * Process it, so it can now enter mp_kdp_wait()
1517 */
2d21ac55
A
1518 if (flush)
1519 handle_pending_TLB_flushes();
b0d623f7
A
1520
1521 kdp_x86_xcpu_poll();
55e303ae
A
1522 cpu_pause();
1523 }
2d21ac55 1524
0c530ab8 1525 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
55e303ae
A
1526 DBG("mp_kdp_wait() done\n");
1527}
1528
1529void
1530mp_kdp_exit(void)
1531{
1532 DBG("mp_kdp_exit()\n");
593a1d5f 1533 debugger_cpu = -1;
0c530ab8 1534 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
b0d623f7
A
1535
1536 debugger_exit_time = mach_absolute_time();
1537
55e303ae 1538 mp_kdp_trap = FALSE;
0c530ab8 1539 __asm__ volatile("mfence");
55e303ae
A
1540
1541 /* Wait other processors to stop spinning. XXX needs timeout */
1542 DBG("mp_kdp_exit() waiting for processors to resume\n");
0c530ab8
A
1543 while (mp_kdp_ncpus > 0) {
1544 /*
1545 * a TLB shootdown request may be pending... this would result in the requesting
1546 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1547 * Process it, so it can now enter mp_kdp_wait()
1548 */
1549 handle_pending_TLB_flushes();
1550
55e303ae
A
1551 cpu_pause();
1552 }
2d21ac55 1553
d41d1dae 1554 if (pmsafe_debug && !kdp_snapshot)
2d21ac55
A
1555 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
1556
6d2010ae
A
1557 debugger_exit_time = mach_absolute_time();
1558
55e303ae 1559 DBG("mp_kdp_exit() done\n");
91447636
A
1560 (void) ml_set_interrupts_enabled(mp_kdp_state);
1561 postcode(0);
55e303ae
A
1562}
1563#endif /* MACH_KDP */
1564
b0d623f7
A
1565boolean_t
1566mp_recent_debugger_activity() {
060df5ea
A
1567 uint64_t abstime = mach_absolute_time();
1568 return (((abstime - debugger_entry_time) < LastDebuggerEntryAllowance) ||
1569 ((abstime - debugger_exit_time) < LastDebuggerEntryAllowance));
b0d623f7
A
1570}
1571
55e303ae
A
1572/*ARGSUSED*/
1573void
1574init_ast_check(
91447636 1575 __unused processor_t processor)
55e303ae
A
1576{
1577}
1578
1579void
1580cause_ast_check(
1581 processor_t processor)
1582{
b0d623f7 1583 int cpu = processor->cpu_id;
55e303ae
A
1584
1585 if (cpu != cpu_number()) {
1586 i386_signal_cpu(cpu, MP_AST, ASYNC);
6d2010ae 1587 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), cpu, 1, 0, 0, 0);
55e303ae
A
1588 }
1589}
1590
0c530ab8 1591#if MACH_KDB
55e303ae
A
1592/*
1593 * invoke kdb on slave processors
1594 */
1595
1596void
1597remote_kdb(void)
1598{
91447636
A
1599 unsigned int my_cpu = cpu_number();
1600 unsigned int cpu;
0c530ab8
A
1601 int kdb_ncpus;
1602 uint64_t tsc_timeout = 0;
55e303ae 1603
0c530ab8
A
1604 mp_kdb_trap = TRUE;
1605 mp_kdb_ncpus = 1;
1606 for (kdb_ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
91447636 1607 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae 1608 continue;
0c530ab8
A
1609 kdb_ncpus++;
1610 i386_signal_cpu(cpu, MP_KDB, ASYNC);
89b3af67 1611 }
0c530ab8
A
1612 DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus);
1613
1614 tsc_timeout = rdtsc64() + (kdb_ncpus * 100 * 1000 * 1000);
1615
1616 while (mp_kdb_ncpus != kdb_ncpus && rdtsc64() < tsc_timeout) {
1617 /*
1618 * a TLB shootdown request may be pending... this would result in the requesting
1619 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1620 * Process it, so it can now enter mp_kdp_wait()
1621 */
1622 handle_pending_TLB_flushes();
1623
1624 cpu_pause();
1625 }
6d2010ae 1626 DBG("mp_kdp_enter() %lu processors done %s\n",
0c530ab8
A
1627 mp_kdb_ncpus, (mp_kdb_ncpus == kdb_ncpus) ? "OK" : "timed out");
1628}
1629
1630static void
1631mp_kdb_wait(void)
1632{
1633 DBG("mp_kdb_wait()\n");
1634
2d21ac55 1635 /* If an I/O port has been specified as a debugging aid, issue a read */
0c530ab8
A
1636 panic_io_port_read();
1637
1638 atomic_incl(&mp_kdb_ncpus, 1);
1639 while (mp_kdb_trap) {
1640 /*
1641 * a TLB shootdown request may be pending... this would result in the requesting
1642 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1643 * Process it, so it can now enter mp_kdp_wait()
1644 */
1645 handle_pending_TLB_flushes();
1646
1647 cpu_pause();
1648 }
1649 atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
1650 DBG("mp_kdb_wait() done\n");
55e303ae
A
1651}
1652
1653/*
1654 * Clear kdb interrupt
1655 */
1656
1657void
1658clear_kdb_intr(void)
1659{
1660 mp_disable_preemption();
91447636 1661 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
55e303ae
A
1662 mp_enable_preemption();
1663}
1664
0c530ab8
A
1665void
1666mp_kdb_exit(void)
1667{
1668 DBG("mp_kdb_exit()\n");
1669 atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
1670 mp_kdb_trap = FALSE;
1671 __asm__ volatile("mfence");
1672
1673 while (mp_kdb_ncpus > 0) {
1674 /*
1675 * a TLB shootdown request may be pending... this would result in the requesting
1676 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1677 * Process it, so it can now enter mp_kdp_wait()
1678 */
1679 handle_pending_TLB_flushes();
1680
1681 cpu_pause();
1682 }
2d21ac55 1683
0c530ab8
A
1684 DBG("mp_kdb_exit() done\n");
1685}
1686
1687#endif /* MACH_KDB */
1688
593a1d5f
A
1689void
1690slave_machine_init(void *param)
91447636
A
1691{
1692 /*
0c530ab8 1693 * Here in process context, but with interrupts disabled.
91447636
A
1694 */
1695 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1696
593a1d5f
A
1697 if (param == FULL_SLAVE_INIT) {
1698 /*
1699 * Cold start
1700 */
1701 clock_init();
593a1d5f 1702 cpu_machine_init(); /* Interrupts enabled hereafter */
6d2010ae 1703 mp_cpus_call_cpu_init();
593a1d5f 1704 }
55e303ae
A
1705}
1706
b0d623f7 1707#undef cpu_number
55e303ae
A
1708int cpu_number(void)
1709{
1710 return get_cpu_number();
1711}
1712
1713#if MACH_KDB
1714#include <ddb/db_output.h>
1715
1716#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1717
1718
1719#if TRAP_DEBUG
1720#define MTRAPS 100
1721struct mp_trap_hist_struct {
1722 unsigned char type;
1723 unsigned char data[5];
1724} trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1725 *max_trap_hist = &trap_hist[MTRAPS];
1726
1727void db_trap_hist(void);
1728
1729/*
1730 * SPL:
1731 * 1: new spl
1732 * 2: old spl
1733 * 3: new tpr
1734 * 4: old tpr
1735 * INT:
1736 * 1: int vec
1737 * 2: old spl
1738 * 3: new spl
1739 * 4: post eoi tpr
1740 * 5: exit tpr
1741 */
1742
1743void
1744db_trap_hist(void)
1745{
1746 int i,j;
1747 for(i=0;i<MTRAPS;i++)
1748 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1749 db_printf("%s%s",
1750 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1751 (trap_hist[i].type == 1)?"SPL":"INT");
1752 for(j=0;j<5;j++)
1753 db_printf(" %02x", trap_hist[i].data[j]);
1754 db_printf("\n");
1755 }
1756
1757}
1758#endif /* TRAP_DEBUG */
55e303ae
A
1759#endif /* MACH_KDB */
1760
6d2010ae
A
1761static void
1762cpu_prewarm_init()
1763{
1764 int i;
1765
1766 simple_lock_init(&cpu_warm_lock, 0);
1767 queue_init(&cpu_warm_call_list);
1768 for (i = 0; i < NUM_CPU_WARM_CALLS; i++) {
1769 enqueue_head(&cpu_warm_call_list, (queue_entry_t)&cpu_warm_call_arr[i]);
1770 }
1771}
1772
1773static timer_call_t
1774grab_warm_timer_call()
1775{
1776 spl_t x;
1777 timer_call_t call = NULL;
1778
1779 x = splsched();
1780 simple_lock(&cpu_warm_lock);
1781 if (!queue_empty(&cpu_warm_call_list)) {
1782 call = (timer_call_t) dequeue_head(&cpu_warm_call_list);
1783 }
1784 simple_unlock(&cpu_warm_lock);
1785 splx(x);
1786
1787 return call;
1788}
1789
1790static void
1791free_warm_timer_call(timer_call_t call)
1792{
1793 spl_t x;
1794
1795 x = splsched();
1796 simple_lock(&cpu_warm_lock);
1797 enqueue_head(&cpu_warm_call_list, (queue_entry_t)call);
1798 simple_unlock(&cpu_warm_lock);
1799 splx(x);
1800}
1801
1802/*
1803 * Runs in timer call context (interrupts disabled).
1804 */
1805static void
1806cpu_warm_timer_call_func(
1807 call_entry_param_t p0,
1808 __unused call_entry_param_t p1)
1809{
1810 free_warm_timer_call((timer_call_t)p0);
1811 return;
1812}
1813
1814/*
1815 * Runs with interrupts disabled on the CPU we wish to warm (i.e. CPU 0).
1816 */
1817static void
1818_cpu_warm_setup(
1819 void *arg)
1820{
1821 cpu_warm_data_t cwdp = (cpu_warm_data_t)arg;
1822
1823 timer_call_enter(cwdp->cwd_call, cwdp->cwd_deadline, TIMER_CALL_CRITICAL | TIMER_CALL_LOCAL);
1824 cwdp->cwd_result = 0;
1825
1826 return;
1827}
1828
1829/*
1830 * Not safe to call with interrupts disabled.
1831 */
1832kern_return_t
1833ml_interrupt_prewarm(
1834 uint64_t deadline)
1835{
1836 struct cpu_warm_data cwd;
1837 timer_call_t call;
1838 cpu_t ct;
1839
1840 if (ml_get_interrupts_enabled() == FALSE) {
1841 panic("%s: Interrupts disabled?\n", __FUNCTION__);
1842 }
1843
1844 /*
1845 * If the platform doesn't need our help, say that we succeeded.
1846 */
1847 if (!ml_get_interrupt_prewake_applicable()) {
1848 return KERN_SUCCESS;
1849 }
1850
1851 /*
1852 * Grab a timer call to use.
1853 */
1854 call = grab_warm_timer_call();
1855 if (call == NULL) {
1856 return KERN_RESOURCE_SHORTAGE;
1857 }
1858
1859 timer_call_setup(call, cpu_warm_timer_call_func, call);
1860 cwd.cwd_call = call;
1861 cwd.cwd_deadline = deadline;
1862 cwd.cwd_result = 0;
1863
1864 /*
1865 * For now, non-local interrupts happen on the master processor.
1866 */
1867 ct = mp_cpus_call(cpu_to_cpumask(master_cpu), SYNC, _cpu_warm_setup, &cwd);
1868 if (ct == 0) {
1869 free_warm_timer_call(call);
1870 return KERN_FAILURE;
1871 } else {
1872 return cwd.cwd_result;
1873 }
1874}