]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.c
xnu-1456.1.26.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
CommitLineData
55e303ae 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
55e303ae 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
55e303ae 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
55e303ae
A
32#include <mach_rt.h>
33#include <mach_kdb.h>
34#include <mach_kdp.h>
35#include <mach_ldebug.h>
91447636
A
36#include <gprof.h>
37
38#include <mach/mach_types.h>
39#include <mach/kern_return.h>
40
41#include <kern/kern_types.h>
42#include <kern/startup.h>
c910b4d9 43#include <kern/timer_queue.h>
91447636
A
44#include <kern/processor.h>
45#include <kern/cpu_number.h>
46#include <kern/cpu_data.h>
47#include <kern/assert.h>
48#include <kern/machine.h>
0c530ab8 49#include <kern/pms.h>
593a1d5f 50#include <kern/misc_protos.h>
91447636
A
51
52#include <vm/vm_map.h>
53#include <vm/vm_kern.h>
54
55#include <profiling/profile-mk.h>
55e303ae 56
b0d623f7
A
57#include <i386/proc_reg.h>
58#include <i386/cpu_threads.h>
59#include <i386/mp_desc.h>
60#include <i386/misc_protos.h>
61#include <i386/trap.h>
62#include <i386/postcode.h>
63#include <i386/machine_routines.h>
55e303ae
A
64#include <i386/mp.h>
65#include <i386/mp_events.h>
593a1d5f 66#include <i386/lapic.h>
55e303ae 67#include <i386/ipl.h>
55e303ae 68#include <i386/cpuid.h>
b0d623f7 69#include <i386/fpu.h>
55e303ae 70#include <i386/machine_cpu.h>
91447636 71#include <i386/mtrr.h>
0c530ab8 72#include <i386/pmCPU.h>
b0d623f7 73#if CONFIG_MCA
2d21ac55 74#include <i386/machine_check.h>
b0d623f7
A
75#endif
76#include <i386/acpi.h>
0c530ab8
A
77
78#include <chud/chud_xnu.h>
79#include <chud/chud_xnu_private.h>
80
81#include <sys/kdebug.h>
82#if MACH_KDB
b0d623f7 83#include <machine/db_machdep.h>
0c530ab8
A
84#include <ddb/db_aout.h>
85#include <ddb/db_access.h>
86#include <ddb/db_sym.h>
87#include <ddb/db_variables.h>
88#include <ddb/db_command.h>
89#include <ddb/db_output.h>
90#include <ddb/db_expr.h>
91#endif
55e303ae
A
92
93#if MP_DEBUG
94#define PAUSE delay(1000000)
95#define DBG(x...) kprintf(x)
96#else
97#define DBG(x...)
98#define PAUSE
99#endif /* MP_DEBUG */
100
55e303ae 101
55e303ae
A
102void slave_boot_init(void);
103
0c530ab8
A
104#if MACH_KDB
105static void mp_kdb_wait(void);
106volatile boolean_t mp_kdb_trap = FALSE;
107volatile long mp_kdb_ncpus = 0;
108#endif
109
b0d623f7 110static void mp_kdp_wait(boolean_t flush, boolean_t isNMI);
55e303ae 111static void mp_rendezvous_action(void);
2d21ac55 112static void mp_broadcast_action(void);
55e303ae 113
0c530ab8 114static boolean_t cpu_signal_pending(int cpu, mp_event_t event);
593a1d5f
A
115static int cpu_signal_handler(x86_saved_state_t *regs);
116static int NMIInterruptHandler(x86_saved_state_t *regs);
0c530ab8 117
b0d623f7 118boolean_t smp_initialized = FALSE;
935ed37a
A
119volatile boolean_t force_immediate_debugger_NMI = FALSE;
120volatile boolean_t pmap_tlb_flush_timeout = FALSE;
55e303ae 121decl_simple_lock_data(,mp_kdp_lock);
91447636 122
b0d623f7
A
123decl_lck_mtx_data(static, mp_cpu_boot_lock);
124lck_mtx_ext_t mp_cpu_boot_lock_ext;
55e303ae
A
125
126/* Variables needed for MP rendezvous. */
0c530ab8 127decl_simple_lock_data(,mp_rv_lock);
b0d623f7
A
128static void (*mp_rv_setup_func)(void *arg);
129static void (*mp_rv_action_func)(void *arg);
130static void (*mp_rv_teardown_func)(void *arg);
131static void *mp_rv_func_arg;
132static volatile int mp_rv_ncpus;
0c530ab8
A
133 /* Cache-aligned barriers: */
134static volatile long mp_rv_entry __attribute__((aligned(64)));
135static volatile long mp_rv_exit __attribute__((aligned(64)));
136static volatile long mp_rv_complete __attribute__((aligned(64)));
55e303ae 137
b0d623f7
A
138volatile uint64_t debugger_entry_time;
139volatile uint64_t debugger_exit_time;
140#if MACH_KDP
141
142static struct _kdp_xcpu_call_func {
143 kdp_x86_xcpu_func_t func;
144 void *arg0, *arg1;
145 volatile long ret;
146 volatile uint16_t cpu;
147} kdp_xcpu_call_func = {
148 .cpu = KDP_XCPU_NONE
149};
150
151#endif
152
2d21ac55
A
153/* Variables needed for MP broadcast. */
154static void (*mp_bc_action_func)(void *arg);
155static void *mp_bc_func_arg;
593a1d5f 156static int mp_bc_ncpus;
2d21ac55 157static volatile long mp_bc_count;
b0d623f7
A
158decl_lck_mtx_data(static, mp_bc_lock);
159lck_mtx_ext_t mp_bc_lock_ext;
593a1d5f 160static volatile int debugger_cpu = -1;
2d21ac55
A
161
162static void mp_cpus_call_action(void);
c910b4d9 163static void mp_call_PM(void);
2d21ac55 164
b0d623f7
A
165char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init
166
167
91447636
A
168#if GPROF
169/*
170 * Initialize dummy structs for profiling. These aren't used but
171 * allows hertz_tick() to be built with GPROF defined.
172 */
173struct profile_vars _profile_vars;
174struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
175#define GPROF_INIT() \
176{ \
177 int i; \
178 \
179 /* Hack to initialize pointers to unused profiling structs */ \
180 for (i = 1; i < MAX_CPUS; i++) \
181 _profile_vars_cpus[i] = &_profile_vars; \
182}
183#else
184#define GPROF_INIT()
185#endif /* GPROF */
186
b0d623f7
A
187static lck_grp_t smp_lck_grp;
188static lck_grp_attr_t smp_lck_grp_attr;
189
190extern void slave_pstart(void);
191
55e303ae
A
192void
193smp_init(void)
55e303ae 194{
91447636
A
195 simple_lock_init(&mp_kdp_lock, 0);
196 simple_lock_init(&mp_rv_lock, 0);
b0d623f7
A
197 lck_grp_attr_setdefault(&smp_lck_grp_attr);
198 lck_grp_init(&smp_lck_grp, "i386_smp", &smp_lck_grp_attr);
199 lck_mtx_init_ext(&mp_cpu_boot_lock, &mp_cpu_boot_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
200 lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL);
91447636 201 console_init();
55e303ae
A
202
203 /* Local APIC? */
91447636 204 if (!lapic_probe())
55e303ae
A
205 return;
206
55e303ae 207 lapic_init();
593a1d5f
A
208 lapic_configure();
209 lapic_set_intr_func(LAPIC_NMI_INTERRUPT, NMIInterruptHandler);
210 lapic_set_intr_func(LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler);
55e303ae 211
91447636
A
212 cpu_thread_init();
213
91447636
A
214 GPROF_INIT();
215 DBGLOG_CPU_INIT(master_cpu);
216
b0d623f7 217 install_real_mode_bootstrap(slave_pstart);
55e303ae
A
218
219 smp_initialized = TRUE;
220
221 return;
222}
223
0c530ab8 224/*
593a1d5f 225 * Poll a CPU to see when it has marked itself as running.
0c530ab8 226 */
593a1d5f
A
227static void
228mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay)
91447636 229{
593a1d5f
A
230 while (iters-- > 0) {
231 if (cpu_datap(slot_num)->cpu_running)
232 break;
233 delay(usecdelay);
91447636 234 }
55e303ae
A
235}
236
b0d623f7
A
237/*
238 * Quickly bring a CPU back online which has been halted.
239 */
240kern_return_t
241intel_startCPU_fast(int slot_num)
242{
243 kern_return_t rc;
244
245 /*
246 * Try to perform a fast restart
247 */
248 rc = pmCPUExitHalt(slot_num);
249 if (rc != KERN_SUCCESS)
250 /*
251 * The CPU was not eligible for a fast restart.
252 */
253 return(rc);
254
255 /*
256 * Wait until the CPU is back online.
257 */
258 mp_disable_preemption();
259
260 /*
261 * We use short pauses (1us) for low latency. 30,000 iterations is
262 * longer than a full restart would require so it should be more
263 * than long enough.
264 */
265 mp_wait_for_cpu_up(slot_num, 30000, 1);
266 mp_enable_preemption();
267
268 /*
269 * Check to make sure that the CPU is really running. If not,
270 * go through the slow path.
271 */
272 if (cpu_datap(slot_num)->cpu_running)
273 return(KERN_SUCCESS);
274 else
275 return(KERN_FAILURE);
276}
277
c910b4d9 278typedef struct {
b0d623f7
A
279 int target_cpu;
280 int target_lapic;
281 int starter_cpu;
c910b4d9
A
282} processor_start_info_t;
283
284static processor_start_info_t start_info;
285
286static void
287start_cpu(void *arg)
288{
289 int i = 1000;
290 processor_start_info_t *psip = (processor_start_info_t *) arg;
291
292 /* Ignore this if the current processor is not the starter */
293 if (cpu_number() != psip->starter_cpu)
294 return;
295
296 LAPIC_WRITE(ICRD, psip->target_lapic << LAPIC_ICRD_DEST_SHIFT);
297 LAPIC_WRITE(ICR, LAPIC_ICR_DM_INIT);
b0d623f7 298 delay(100);
c910b4d9
A
299
300 LAPIC_WRITE(ICRD, psip->target_lapic << LAPIC_ICRD_DEST_SHIFT);
b0d623f7 301 LAPIC_WRITE(ICR, LAPIC_ICR_DM_STARTUP|(REAL_MODE_BOOTSTRAP_OFFSET>>12));
c910b4d9
A
302
303#ifdef POSTCODE_DELAY
304 /* Wait much longer if postcodes are displayed for a delay period. */
305 i *= 10000;
306#endif
307 mp_wait_for_cpu_up(psip->target_cpu, i*100, 100);
308}
309
b0d623f7
A
310extern char prot_mode_gdt[];
311extern char slave_boot_base[];
312extern char real_mode_bootstrap_base[];
313extern char real_mode_bootstrap_end[];
314extern char slave_boot_end[];
315
55e303ae
A
316kern_return_t
317intel_startCPU(
318 int slot_num)
319{
c910b4d9
A
320 int lapic = cpu_to_lapic[slot_num];
321 boolean_t istate;
55e303ae 322
91447636
A
323 assert(lapic != -1);
324
325 DBGLOG_CPU_INIT(slot_num);
55e303ae 326
91447636
A
327 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
328 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
55e303ae 329
0c530ab8
A
330 /*
331 * Initialize (or re-initialize) the descriptor tables for this cpu.
332 * Propagate processor mode to slave.
333 */
334 if (cpu_mode_is64bit())
b0d623f7 335 cpu_desc_init64(cpu_datap(slot_num));
0c530ab8 336 else
b0d623f7 337 cpu_desc_init(cpu_datap(slot_num));
91447636 338
c910b4d9 339 /* Serialize use of the slave boot stack, etc. */
b0d623f7 340 lck_mtx_lock(&mp_cpu_boot_lock);
55e303ae 341
c910b4d9 342 istate = ml_set_interrupts_enabled(FALSE);
91447636 343 if (slot_num == get_cpu_number()) {
c910b4d9 344 ml_set_interrupts_enabled(istate);
b0d623f7 345 lck_mtx_unlock(&mp_cpu_boot_lock);
91447636
A
346 return KERN_SUCCESS;
347 }
55e303ae 348
b0d623f7
A
349 start_info.starter_cpu = cpu_number();
350 start_info.target_cpu = slot_num;
c910b4d9 351 start_info.target_lapic = lapic;
55e303ae 352
c910b4d9 353 /*
b0d623f7 354 * Perform the processor startup sequence with all running
c910b4d9
A
355 * processors rendezvous'ed. This is required during periods when
356 * the cache-disable bit is set for MTRR/PAT initialization.
357 */
b0d623f7 358 mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);
55e303ae 359
c910b4d9 360 ml_set_interrupts_enabled(istate);
b0d623f7 361 lck_mtx_unlock(&mp_cpu_boot_lock);
55e303ae 362
91447636 363 if (!cpu_datap(slot_num)->cpu_running) {
0c530ab8 364 kprintf("Failed to start CPU %02d\n", slot_num);
91447636
A
365 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
366 delay(1000000);
b0d623f7 367 halt_cpu();
55e303ae
A
368 return KERN_SUCCESS;
369 } else {
2d21ac55 370 kprintf("Started cpu %d (lapic id %08x)\n", slot_num, lapic);
55e303ae
A
371 return KERN_SUCCESS;
372 }
373}
374
55e303ae 375#if MP_DEBUG
91447636
A
376cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
377cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
55e303ae
A
378
379MP_EVENT_NAME_DECL();
380
55e303ae
A
381#endif /* MP_DEBUG */
382
593a1d5f 383int
0c530ab8 384cpu_signal_handler(x86_saved_state_t *regs)
55e303ae 385{
91447636 386 int my_cpu;
55e303ae
A
387 volatile int *my_word;
388#if MACH_KDB && MACH_ASSERT
389 int i=100;
390#endif /* MACH_KDB && MACH_ASSERT */
391
392 mp_disable_preemption();
393
394 my_cpu = cpu_number();
91447636 395 my_word = &current_cpu_datap()->cpu_signals;
55e303ae
A
396
397 do {
398#if MACH_KDB && MACH_ASSERT
399 if (i-- <= 0)
0c530ab8 400 Debugger("cpu_signal_handler: signals did not clear");
55e303ae
A
401#endif /* MACH_KDB && MACH_ASSERT */
402#if MACH_KDP
403 if (i_bit(MP_KDP, my_word)) {
404 DBGLOG(cpu_handle,my_cpu,MP_KDP);
405 i_bit_clear(MP_KDP, my_word);
0c530ab8
A
406/* Ensure that the i386_kernel_state at the base of the
407 * current thread's stack (if any) is synchronized with the
408 * context at the moment of the interrupt, to facilitate
409 * access through the debugger.
0c530ab8 410 */
b0d623f7
A
411 sync_iss_to_iks(regs);
412 mp_kdp_wait(TRUE, FALSE);
55e303ae
A
413 } else
414#endif /* MACH_KDP */
91447636 415 if (i_bit(MP_TLB_FLUSH, my_word)) {
55e303ae
A
416 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
417 i_bit_clear(MP_TLB_FLUSH, my_word);
418 pmap_update_interrupt();
419 } else if (i_bit(MP_AST, my_word)) {
420 DBGLOG(cpu_handle,my_cpu,MP_AST);
421 i_bit_clear(MP_AST, my_word);
422 ast_check(cpu_to_processor(my_cpu));
423#if MACH_KDB
424 } else if (i_bit(MP_KDB, my_word)) {
55e303ae
A
425
426 i_bit_clear(MP_KDB, my_word);
0c530ab8
A
427 current_cpu_datap()->cpu_kdb_is_slave++;
428 mp_kdb_wait();
429 current_cpu_datap()->cpu_kdb_is_slave--;
55e303ae
A
430#endif /* MACH_KDB */
431 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
432 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
433 i_bit_clear(MP_RENDEZVOUS, my_word);
434 mp_rendezvous_action();
2d21ac55
A
435 } else if (i_bit(MP_BROADCAST, my_word)) {
436 DBGLOG(cpu_handle,my_cpu,MP_BROADCAST);
437 i_bit_clear(MP_BROADCAST, my_word);
438 mp_broadcast_action();
0c530ab8
A
439 } else if (i_bit(MP_CHUD, my_word)) {
440 DBGLOG(cpu_handle,my_cpu,MP_CHUD);
441 i_bit_clear(MP_CHUD, my_word);
442 chudxnu_cpu_signal_handler();
2d21ac55
A
443 } else if (i_bit(MP_CALL, my_word)) {
444 DBGLOG(cpu_handle,my_cpu,MP_CALL);
445 i_bit_clear(MP_CALL, my_word);
446 mp_cpus_call_action();
c910b4d9
A
447 } else if (i_bit(MP_CALL_PM, my_word)) {
448 DBGLOG(cpu_handle,my_cpu,MP_CALL_PM);
449 i_bit_clear(MP_CALL_PM, my_word);
450 mp_call_PM();
55e303ae
A
451 }
452 } while (*my_word);
453
454 mp_enable_preemption();
455
593a1d5f 456 return 0;
55e303ae
A
457}
458
593a1d5f 459static int
2d21ac55 460NMIInterruptHandler(x86_saved_state_t *regs)
0c530ab8 461{
935ed37a
A
462 void *stackptr;
463
0c530ab8 464 sync_iss_to_iks_unconditionally(regs);
b0d623f7 465#if defined (__i386__)
935ed37a 466 __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
b0d623f7
A
467#elif defined (__x86_64__)
468 __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
469#endif
935ed37a 470
593a1d5f
A
471 if (cpu_number() == debugger_cpu)
472 goto NMExit;
473
935ed37a 474 if (pmap_tlb_flush_timeout == TRUE && current_cpu_datap()->cpu_tlb_invalid) {
593a1d5f
A
475 char pstr[128];
476 snprintf(&pstr[0], sizeof(pstr), "Panic(CPU %d): Unresponsive processor\n", cpu_number());
b0d623f7 477 panic_i386_backtrace(stackptr, 16, &pstr[0], TRUE, regs);
935ed37a 478 }
b0d623f7
A
479
480#if MACH_KDP
481 mp_kdp_wait(FALSE, pmap_tlb_flush_timeout);
482#endif
593a1d5f 483NMExit:
0c530ab8
A
484 return 1;
485}
486
91447636 487#ifdef MP_DEBUG
b0d623f7
A
488int max_lock_loops = 100000000;
489int trappedalready = 0; /* (BRINGUP) */
91447636 490#endif /* MP_DEBUG */
0c530ab8 491
2d21ac55
A
492static void
493i386_cpu_IPI(int cpu)
55e303ae
A
494{
495 boolean_t state;
0c530ab8 496
2d21ac55 497#ifdef MP_DEBUG
0c530ab8 498 if(cpu_datap(cpu)->cpu_signals & 6) { /* (BRINGUP) */
2d21ac55 499 kprintf("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu)->cpu_signals, cpu);
0c530ab8 500 }
2d21ac55 501#endif /* MP_DEBUG */
55e303ae 502
0c530ab8 503#if MACH_KDB
2d21ac55
A
504#ifdef MP_DEBUG
505 if(!trappedalready && (cpu_datap(cpu)->cpu_signals & 6)) { /* (BRINGUP) */
506 if(kdb_cpu != cpu_number()) {
507 trappedalready = 1;
508 panic("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d and I do not own debugger, owner = %08X\n",
509 cpu_datap(cpu)->cpu_signals, cpu, kdb_cpu);
510 }
511 }
512#endif /* MP_DEBUG */
0c530ab8
A
513#endif
514
2d21ac55 515 /* Wait for previous interrupt to be delivered... */
91447636 516#ifdef MP_DEBUG
2d21ac55 517 int pending_busy_count = 0;
593a1d5f 518 while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
2d21ac55
A
519 if (++pending_busy_count > max_lock_loops)
520 panic("i386_cpu_IPI() deadlock\n");
91447636 521#else
593a1d5f 522 while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
91447636 523#endif /* MP_DEBUG */
2d21ac55 524 cpu_pause();
55e303ae
A
525 }
526
2d21ac55 527 state = ml_set_interrupts_enabled(FALSE);
593a1d5f
A
528 LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
529 LAPIC_WRITE(ICR, LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED);
2d21ac55
A
530 (void) ml_set_interrupts_enabled(state);
531}
532
533/*
534 * cpu_interrupt is really just to be used by the scheduler to
535 * get a CPU's attention it may not always issue an IPI. If an
536 * IPI is always needed then use i386_cpu_IPI.
537 */
538void
539cpu_interrupt(int cpu)
540{
541 if (smp_initialized
542 && pmCPUExitIdle(cpu_datap(cpu))) {
543 i386_cpu_IPI(cpu);
544 }
55e303ae
A
545}
546
0c530ab8
A
547/*
548 * Send a true NMI via the local APIC to the specified CPU.
549 */
935ed37a 550void
0c530ab8
A
551cpu_NMI_interrupt(int cpu)
552{
553 boolean_t state;
554
555 if (smp_initialized) {
556 state = ml_set_interrupts_enabled(FALSE);
2d21ac55 557/* Program the interrupt command register */
593a1d5f 558 LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
2d21ac55 559/* The vector is ignored in this case--the target CPU will enter on the
0c530ab8
A
560 * NMI vector.
561 */
593a1d5f 562 LAPIC_WRITE(ICR, LAPIC_VECTOR(INTERPROCESSOR)|LAPIC_ICR_DM_NMI);
0c530ab8
A
563 (void) ml_set_interrupts_enabled(state);
564 }
0c530ab8
A
565}
566
b0d623f7 567static void (* volatile mp_PM_func)(void) = NULL;
c910b4d9
A
568
569static void
570mp_call_PM(void)
571{
572 assert(!ml_get_interrupts_enabled());
573
574 if (mp_PM_func != NULL)
575 mp_PM_func();
576}
577
578void
579cpu_PM_interrupt(int cpu)
580{
581 assert(!ml_get_interrupts_enabled());
582
583 if (mp_PM_func != NULL) {
584 if (cpu == cpu_number())
585 mp_PM_func();
586 else
587 i386_signal_cpu(cpu, MP_CALL_PM, ASYNC);
588 }
589}
590
591void
592PM_interrupt_register(void (*fn)(void))
593{
594 mp_PM_func = fn;
595}
596
55e303ae
A
597void
598i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
599{
91447636
A
600 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
601 uint64_t tsc_timeout;
6601e61a 602
0c530ab8 603
91447636 604 if (!cpu_datap(cpu)->cpu_running)
55e303ae
A
605 return;
606
0c530ab8
A
607 if (event == MP_TLB_FLUSH)
608 KERNEL_DEBUG(0xef800020 | DBG_FUNC_START, cpu, 0, 0, 0, 0);
6601e61a 609
0c530ab8
A
610 DBGLOG(cpu_signal, cpu, event);
611
55e303ae 612 i_bit_set(event, signals);
2d21ac55 613 i386_cpu_IPI(cpu);
55e303ae
A
614 if (mode == SYNC) {
615 again:
91447636
A
616 tsc_timeout = rdtsc64() + (1000*1000*1000);
617 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
55e303ae
A
618 cpu_pause();
619 }
620 if (i_bit(event, signals)) {
621 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
622 cpu, event);
623 goto again;
624 }
625 }
0c530ab8
A
626 if (event == MP_TLB_FLUSH)
627 KERNEL_DEBUG(0xef800020 | DBG_FUNC_END, cpu, 0, 0, 0, 0);
55e303ae
A
628}
629
2d21ac55
A
630/*
631 * Send event to all running cpus.
632 * Called with the topology locked.
633 */
55e303ae
A
634void
635i386_signal_cpus(mp_event_t event, mp_sync_t mode)
636{
91447636
A
637 unsigned int cpu;
638 unsigned int my_cpu = cpu_number();
55e303ae 639
b0d623f7 640 assert(hw_lock_held((hw_lock_t)&x86_topo_lock));
2d21ac55 641
91447636
A
642 for (cpu = 0; cpu < real_ncpus; cpu++) {
643 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
644 continue;
645 i386_signal_cpu(cpu, event, mode);
646 }
647}
648
2d21ac55
A
649/*
650 * Return the number of running cpus.
651 * Called with the topology locked.
652 */
55e303ae
A
653int
654i386_active_cpus(void)
655{
91447636
A
656 unsigned int cpu;
657 unsigned int ncpus = 0;
55e303ae 658
b0d623f7 659 assert(hw_lock_held((hw_lock_t)&x86_topo_lock));
2d21ac55 660
91447636
A
661 for (cpu = 0; cpu < real_ncpus; cpu++) {
662 if (cpu_datap(cpu)->cpu_running)
55e303ae
A
663 ncpus++;
664 }
665 return(ncpus);
666}
667
668/*
669 * All-CPU rendezvous:
670 * - CPUs are signalled,
671 * - all execute the setup function (if specified),
672 * - rendezvous (i.e. all cpus reach a barrier),
673 * - all execute the action function (if specified),
674 * - rendezvous again,
675 * - execute the teardown function (if specified), and then
676 * - resume.
677 *
678 * Note that the supplied external functions _must_ be reentrant and aware
679 * that they are running in parallel and in an unknown lock context.
680 */
681
682static void
683mp_rendezvous_action(void)
684{
2d21ac55 685 boolean_t intrs_enabled;
55e303ae
A
686
687 /* setup function */
688 if (mp_rv_setup_func != NULL)
689 mp_rv_setup_func(mp_rv_func_arg);
2d21ac55
A
690
691 intrs_enabled = ml_get_interrupts_enabled();
692
b0d623f7 693
55e303ae 694 /* spin on entry rendezvous */
0c530ab8
A
695 atomic_incl(&mp_rv_entry, 1);
696 while (mp_rv_entry < mp_rv_ncpus) {
2d21ac55
A
697 /* poll for pesky tlb flushes if interrupts disabled */
698 if (!intrs_enabled)
699 handle_pending_TLB_flushes();
55e303ae 700 cpu_pause();
0c530ab8 701 }
55e303ae
A
702 /* action function */
703 if (mp_rv_action_func != NULL)
704 mp_rv_action_func(mp_rv_func_arg);
705 /* spin on exit rendezvous */
0c530ab8 706 atomic_incl(&mp_rv_exit, 1);
2d21ac55
A
707 while (mp_rv_exit < mp_rv_ncpus) {
708 if (!intrs_enabled)
709 handle_pending_TLB_flushes();
55e303ae 710 cpu_pause();
2d21ac55 711 }
55e303ae
A
712 /* teardown function */
713 if (mp_rv_teardown_func != NULL)
714 mp_rv_teardown_func(mp_rv_func_arg);
0c530ab8
A
715
716 /* Bump completion count */
717 atomic_incl(&mp_rv_complete, 1);
55e303ae
A
718}
719
720void
721mp_rendezvous(void (*setup_func)(void *),
722 void (*action_func)(void *),
723 void (*teardown_func)(void *),
724 void *arg)
725{
726
727 if (!smp_initialized) {
728 if (setup_func != NULL)
729 setup_func(arg);
730 if (action_func != NULL)
731 action_func(arg);
732 if (teardown_func != NULL)
733 teardown_func(arg);
734 return;
735 }
736
737 /* obtain rendezvous lock */
738 simple_lock(&mp_rv_lock);
739
740 /* set static function pointers */
741 mp_rv_setup_func = setup_func;
742 mp_rv_action_func = action_func;
743 mp_rv_teardown_func = teardown_func;
744 mp_rv_func_arg = arg;
745
0c530ab8
A
746 mp_rv_entry = 0;
747 mp_rv_exit = 0;
748 mp_rv_complete = 0;
55e303ae
A
749
750 /*
751 * signal other processors, which will call mp_rendezvous_action()
2d21ac55 752 * with interrupts disabled
55e303ae 753 */
2d21ac55 754 simple_lock(&x86_topo_lock);
0c530ab8 755 mp_rv_ncpus = i386_active_cpus();
55e303ae 756 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
2d21ac55 757 simple_unlock(&x86_topo_lock);
55e303ae
A
758
759 /* call executor function on this cpu */
760 mp_rendezvous_action();
761
0c530ab8
A
762 /*
763 * Spin for everyone to complete.
764 * This is necessary to ensure that all processors have proceeded
765 * from the exit barrier before we release the rendezvous structure.
766 */
767 while (mp_rv_complete < mp_rv_ncpus) {
768 cpu_pause();
769 }
770
2d21ac55
A
771 /* Tidy up */
772 mp_rv_setup_func = NULL;
773 mp_rv_action_func = NULL;
774 mp_rv_teardown_func = NULL;
775 mp_rv_func_arg = NULL;
776
55e303ae
A
777 /* release lock */
778 simple_unlock(&mp_rv_lock);
779}
780
0c530ab8
A
781void
782mp_rendezvous_break_lock(void)
783{
784 simple_lock_init(&mp_rv_lock, 0);
785}
786
787static void
788setup_disable_intrs(__unused void * param_not_used)
789{
790 /* disable interrupts before the first barrier */
791 boolean_t intr = ml_set_interrupts_enabled(FALSE);
792
793 current_cpu_datap()->cpu_iflag = intr;
794 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
795}
796
797static void
798teardown_restore_intrs(__unused void * param_not_used)
799{
800 /* restore interrupt flag following MTRR changes */
801 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
802 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
803}
804
805/*
806 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
807 * This is exported for use by kexts.
808 */
809void
810mp_rendezvous_no_intrs(
811 void (*action_func)(void *),
812 void *arg)
813{
814 mp_rendezvous(setup_disable_intrs,
815 action_func,
816 teardown_restore_intrs,
817 arg);
818}
819
820void
821handle_pending_TLB_flushes(void)
822{
823 volatile int *my_word = &current_cpu_datap()->cpu_signals;
824
2d21ac55
A
825 if (i_bit(MP_TLB_FLUSH, my_word)) {
826 DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH);
0c530ab8
A
827 i_bit_clear(MP_TLB_FLUSH, my_word);
828 pmap_update_interrupt();
829 }
830}
831
2d21ac55
A
832/*
833 * This is called from cpu_signal_handler() to process an MP_CALL signal.
834 */
835static void
836mp_cpus_call_action(void)
837{
838 if (mp_rv_action_func != NULL)
839 mp_rv_action_func(mp_rv_func_arg);
840 atomic_incl(&mp_rv_complete, 1);
841}
842
843/*
844 * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
845 * If the mode is SYNC, the function is called serially on the target cpus
846 * in logical cpu order. If the mode is ASYNC, the function is called in
847 * parallel over the specified cpus.
848 * The action function may be NULL.
849 * The cpu mask may include the local cpu. Offline cpus are ignored.
850 * Return does not occur until the function has completed on all cpus.
851 * The return value is the number of cpus on which the function was called.
852 */
853cpu_t
854mp_cpus_call(
855 cpumask_t cpus,
856 mp_sync_t mode,
857 void (*action_func)(void *),
858 void *arg)
859{
860 cpu_t cpu;
861 boolean_t intrs_enabled = ml_get_interrupts_enabled();
862 boolean_t call_self = FALSE;
863
864 if (!smp_initialized) {
865 if ((cpus & CPUMASK_SELF) == 0)
866 return 0;
867 if (action_func != NULL) {
868 (void) ml_set_interrupts_enabled(FALSE);
869 action_func(arg);
870 ml_set_interrupts_enabled(intrs_enabled);
871 }
872 return 1;
873 }
874
875 /* obtain rendezvous lock */
876 simple_lock(&mp_rv_lock);
877
878 /* Use the rendezvous data structures for this call */
879 mp_rv_action_func = action_func;
880 mp_rv_func_arg = arg;
881 mp_rv_ncpus = 0;
882 mp_rv_complete = 0;
883
884 simple_lock(&x86_topo_lock);
885 for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) {
886 if (((cpu_to_cpumask(cpu) & cpus) == 0) ||
887 !cpu_datap(cpu)->cpu_running)
888 continue;
889 if (cpu == (cpu_t) cpu_number()) {
890 /*
891 * We don't IPI ourself and if calling asynchronously,
892 * we defer our call until we have signalled all others.
893 */
894 call_self = TRUE;
895 if (mode == SYNC && action_func != NULL) {
896 (void) ml_set_interrupts_enabled(FALSE);
897 action_func(arg);
898 ml_set_interrupts_enabled(intrs_enabled);
899 }
900 } else {
901 /*
902 * Bump count of other cpus called and signal this cpu.
903 * Note: we signal asynchronously regardless of mode
904 * because we wait on mp_rv_complete either here
905 * (if mode == SYNC) or later (if mode == ASYNC).
906 * While spinning, poll for TLB flushes if interrupts
907 * are disabled.
908 */
909 mp_rv_ncpus++;
910 i386_signal_cpu(cpu, MP_CALL, ASYNC);
911 if (mode == SYNC) {
912 simple_unlock(&x86_topo_lock);
913 while (mp_rv_complete < mp_rv_ncpus) {
914 if (!intrs_enabled)
915 handle_pending_TLB_flushes();
916 cpu_pause();
917 }
918 simple_lock(&x86_topo_lock);
919 }
920 }
921 }
922 simple_unlock(&x86_topo_lock);
923
924 /*
925 * If calls are being made asynchronously,
926 * make the local call now if needed, and then
927 * wait for all other cpus to finish their calls.
928 */
929 if (mode == ASYNC) {
930 if (call_self && action_func != NULL) {
931 (void) ml_set_interrupts_enabled(FALSE);
932 action_func(arg);
933 ml_set_interrupts_enabled(intrs_enabled);
934 }
935 while (mp_rv_complete < mp_rv_ncpus) {
936 if (!intrs_enabled)
937 handle_pending_TLB_flushes();
938 cpu_pause();
939 }
940 }
941
942 /* Determine the number of cpus called */
943 cpu = mp_rv_ncpus + (call_self ? 1 : 0);
944
945 simple_unlock(&mp_rv_lock);
946
947 return cpu;
948}
949
950static void
951mp_broadcast_action(void)
952{
953 /* call action function */
954 if (mp_bc_action_func != NULL)
955 mp_bc_action_func(mp_bc_func_arg);
956
957 /* if we're the last one through, wake up the instigator */
b0d623f7
A
958 if (atomic_decl_and_test(&mp_bc_count, 1))
959 thread_wakeup(((event_t)(uintptr_t) &mp_bc_count));
2d21ac55
A
960}
961
962/*
963 * mp_broadcast() runs a given function on all active cpus.
964 * The caller blocks until the functions has run on all cpus.
965 * The caller will also block if there is another pending braodcast.
966 */
967void
968mp_broadcast(
969 void (*action_func)(void *),
970 void *arg)
971{
972 if (!smp_initialized) {
973 if (action_func != NULL)
974 action_func(arg);
975 return;
976 }
977
978 /* obtain broadcast lock */
b0d623f7 979 lck_mtx_lock(&mp_bc_lock);
2d21ac55
A
980
981 /* set static function pointers */
982 mp_bc_action_func = action_func;
983 mp_bc_func_arg = arg;
984
b0d623f7 985 assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT);
2d21ac55
A
986
987 /*
988 * signal other processors, which will call mp_broadcast_action()
989 */
990 simple_lock(&x86_topo_lock);
991 mp_bc_ncpus = i386_active_cpus(); /* total including this cpu */
992 mp_bc_count = mp_bc_ncpus;
993 i386_signal_cpus(MP_BROADCAST, ASYNC);
994
995 /* call executor function on this cpu */
996 mp_broadcast_action();
997 simple_unlock(&x86_topo_lock);
998
999 /* block for all cpus to have run action_func */
1000 if (mp_bc_ncpus > 1)
1001 thread_block(THREAD_CONTINUE_NULL);
1002 else
1003 clear_wait(current_thread(), THREAD_AWAKENED);
1004
1005 /* release lock */
b0d623f7 1006 lck_mtx_unlock(&mp_bc_lock);
2d21ac55
A
1007}
1008
1009void
1010i386_activate_cpu(void)
1011{
1012 cpu_data_t *cdp = current_cpu_datap();
1013
1014 assert(!ml_get_interrupts_enabled());
1015
1016 if (!smp_initialized) {
1017 cdp->cpu_running = TRUE;
1018 return;
1019 }
1020
1021 simple_lock(&x86_topo_lock);
1022 cdp->cpu_running = TRUE;
1023 simple_unlock(&x86_topo_lock);
1024}
1025
c910b4d9
A
1026extern void etimer_timer_expire(void *arg);
1027
2d21ac55
A
1028void
1029i386_deactivate_cpu(void)
1030{
1031 cpu_data_t *cdp = current_cpu_datap();
1032
1033 assert(!ml_get_interrupts_enabled());
1034
1035 simple_lock(&x86_topo_lock);
1036 cdp->cpu_running = FALSE;
1037 simple_unlock(&x86_topo_lock);
1038
c910b4d9
A
1039 timer_queue_shutdown(&cdp->rtclock_timer.queue);
1040 cdp->rtclock_timer.deadline = EndOfAllTime;
1041 mp_cpus_call(cpu_to_cpumask(master_cpu), ASYNC, etimer_timer_expire, NULL);
1042
2d21ac55
A
1043 /*
1044 * In case a rendezvous/braodcast/call was initiated to this cpu
1045 * before we cleared cpu_running, we must perform any actions due.
1046 */
1047 if (i_bit(MP_RENDEZVOUS, &cdp->cpu_signals))
1048 mp_rendezvous_action();
1049 if (i_bit(MP_BROADCAST, &cdp->cpu_signals))
1050 mp_broadcast_action();
1051 if (i_bit(MP_CALL, &cdp->cpu_signals))
1052 mp_cpus_call_action();
1053 cdp->cpu_signals = 0; /* all clear */
1054}
1055
1056int pmsafe_debug = 1;
1057
55e303ae
A
1058#if MACH_KDP
1059volatile boolean_t mp_kdp_trap = FALSE;
593a1d5f 1060volatile unsigned long mp_kdp_ncpus;
91447636
A
1061boolean_t mp_kdp_state;
1062
55e303ae
A
1063
1064void
1065mp_kdp_enter(void)
1066{
91447636
A
1067 unsigned int cpu;
1068 unsigned int ncpus;
593a1d5f 1069 unsigned int my_cpu;
91447636 1070 uint64_t tsc_timeout;
55e303ae
A
1071
1072 DBG("mp_kdp_enter()\n");
1073
1074 /*
1075 * Here to enter the debugger.
1076 * In case of races, only one cpu is allowed to enter kdp after
1077 * stopping others.
1078 */
91447636 1079 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
55e303ae 1080 simple_lock(&mp_kdp_lock);
b0d623f7 1081 debugger_entry_time = mach_absolute_time();
2d21ac55
A
1082 if (pmsafe_debug)
1083 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
1084
55e303ae
A
1085 while (mp_kdp_trap) {
1086 simple_unlock(&mp_kdp_lock);
1087 DBG("mp_kdp_enter() race lost\n");
b0d623f7
A
1088#if MACH_KDP
1089 mp_kdp_wait(TRUE, FALSE);
1090#endif
55e303ae
A
1091 simple_lock(&mp_kdp_lock);
1092 }
593a1d5f
A
1093 my_cpu = cpu_number();
1094 debugger_cpu = my_cpu;
55e303ae
A
1095 mp_kdp_ncpus = 1; /* self */
1096 mp_kdp_trap = TRUE;
1097 simple_unlock(&mp_kdp_lock);
55e303ae 1098
0c530ab8
A
1099 /*
1100 * Deliver a nudge to other cpus, counting how many
1101 */
55e303ae 1102 DBG("mp_kdp_enter() signaling other processors\n");
2d21ac55
A
1103 if (force_immediate_debugger_NMI == FALSE) {
1104 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1105 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1106 continue;
1107 ncpus++;
1108 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1109 }
1110 /*
1111 * Wait other processors to synchronize
1112 */
1113 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
0c530ab8 1114
2d21ac55
A
1115 /*
1116 * This timeout is rather arbitrary; we don't want to NMI
1117 * processors that are executing at potentially
1118 * "unsafe-to-interrupt" points such as the trampolines,
1119 * but neither do we want to lose state by waiting too long.
1120 */
1121 tsc_timeout = rdtsc64() + (ncpus * 1000 * 1000);
0c530ab8 1122
2d21ac55
A
1123 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1124 /*
1125 * A TLB shootdown request may be pending--this would
1126 * result in the requesting processor waiting in
1127 * PMAP_UPDATE_TLBS() until this processor deals with it.
1128 * Process it, so it can now enter mp_kdp_wait()
1129 */
1130 handle_pending_TLB_flushes();
1131 cpu_pause();
1132 }
1133 /* If we've timed out, and some processor(s) are still unresponsive,
1134 * interrupt them with an NMI via the local APIC.
0c530ab8 1135 */
2d21ac55
A
1136 if (mp_kdp_ncpus != ncpus) {
1137 for (cpu = 0; cpu < real_ncpus; cpu++) {
1138 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1139 continue;
1140 if (cpu_signal_pending(cpu, MP_KDP))
1141 cpu_NMI_interrupt(cpu);
1142 }
1143 }
55e303ae 1144 }
2d21ac55 1145 else
0c530ab8
A
1146 for (cpu = 0; cpu < real_ncpus; cpu++) {
1147 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1148 continue;
2d21ac55 1149 cpu_NMI_interrupt(cpu);
0c530ab8 1150 }
0c530ab8 1151
2d21ac55
A
1152 DBG("mp_kdp_enter() %u processors done %s\n",
1153 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
0c530ab8 1154
91447636 1155 postcode(MP_KDP_ENTER);
55e303ae
A
1156}
1157
0c530ab8
A
1158static boolean_t
1159cpu_signal_pending(int cpu, mp_event_t event)
1160{
1161 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
1162 boolean_t retval = FALSE;
1163
1164 if (i_bit(event, signals))
1165 retval = TRUE;
1166 return retval;
1167}
b0d623f7
A
1168
1169long kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func,
1170 void *arg0, void *arg1)
1171{
1172 if (lcpu > (real_ncpus - 1))
1173 return -1;
1174
1175 if (func == NULL)
1176 return -1;
1177
1178 kdp_xcpu_call_func.func = func;
1179 kdp_xcpu_call_func.ret = -1;
1180 kdp_xcpu_call_func.arg0 = arg0;
1181 kdp_xcpu_call_func.arg1 = arg1;
1182 kdp_xcpu_call_func.cpu = lcpu;
1183 DBG("Invoking function %p on CPU %d\n", func, (int32_t)lcpu);
1184 while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE)
1185 cpu_pause();
1186 return kdp_xcpu_call_func.ret;
1187}
1188
1189static void
1190kdp_x86_xcpu_poll(void)
1191{
1192 if ((uint16_t)cpu_number() == kdp_xcpu_call_func.cpu) {
1193 kdp_xcpu_call_func.ret =
1194 kdp_xcpu_call_func.func(kdp_xcpu_call_func.arg0,
1195 kdp_xcpu_call_func.arg1,
1196 cpu_number());
1197 kdp_xcpu_call_func.cpu = KDP_XCPU_NONE;
1198 }
1199}
0c530ab8 1200
55e303ae 1201static void
b0d623f7 1202mp_kdp_wait(boolean_t flush, boolean_t isNMI)
55e303ae 1203{
6601e61a 1204 DBG("mp_kdp_wait()\n");
2d21ac55 1205 /* If an I/O port has been specified as a debugging aid, issue a read */
0c530ab8
A
1206 panic_io_port_read();
1207
b0d623f7 1208#if CONFIG_MCA
2d21ac55
A
1209 /* If we've trapped due to a machine-check, save MCA registers */
1210 mca_check_save();
b0d623f7 1211#endif
2d21ac55
A
1212
1213 if (pmsafe_debug)
1214 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_SAFE);
1215
1216 atomic_incl((volatile long *)&mp_kdp_ncpus, 1);
b0d623f7 1217 while (mp_kdp_trap || (isNMI == TRUE)) {
0c530ab8 1218 /*
2d21ac55
A
1219 * A TLB shootdown request may be pending--this would result
1220 * in the requesting processor waiting in PMAP_UPDATE_TLBS()
1221 * until this processor handles it.
0c530ab8
A
1222 * Process it, so it can now enter mp_kdp_wait()
1223 */
2d21ac55
A
1224 if (flush)
1225 handle_pending_TLB_flushes();
b0d623f7
A
1226
1227 kdp_x86_xcpu_poll();
55e303ae
A
1228 cpu_pause();
1229 }
2d21ac55
A
1230
1231 if (pmsafe_debug)
1232 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
1233
0c530ab8 1234 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
55e303ae
A
1235 DBG("mp_kdp_wait() done\n");
1236}
1237
1238void
1239mp_kdp_exit(void)
1240{
1241 DBG("mp_kdp_exit()\n");
593a1d5f 1242 debugger_cpu = -1;
0c530ab8 1243 atomic_decl((volatile long *)&mp_kdp_ncpus, 1);
b0d623f7
A
1244
1245 debugger_exit_time = mach_absolute_time();
1246
55e303ae 1247 mp_kdp_trap = FALSE;
0c530ab8 1248 __asm__ volatile("mfence");
55e303ae
A
1249
1250 /* Wait other processors to stop spinning. XXX needs timeout */
1251 DBG("mp_kdp_exit() waiting for processors to resume\n");
0c530ab8
A
1252 while (mp_kdp_ncpus > 0) {
1253 /*
1254 * a TLB shootdown request may be pending... this would result in the requesting
1255 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1256 * Process it, so it can now enter mp_kdp_wait()
1257 */
1258 handle_pending_TLB_flushes();
1259
55e303ae
A
1260 cpu_pause();
1261 }
2d21ac55
A
1262
1263 if (pmsafe_debug)
1264 pmSafeMode(&current_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL);
1265
55e303ae 1266 DBG("mp_kdp_exit() done\n");
91447636
A
1267 (void) ml_set_interrupts_enabled(mp_kdp_state);
1268 postcode(0);
55e303ae
A
1269}
1270#endif /* MACH_KDP */
1271
b0d623f7
A
1272boolean_t
1273mp_recent_debugger_activity() {
1274 return (((mach_absolute_time() - debugger_entry_time) < LastDebuggerEntryAllowance) ||
1275 ((mach_absolute_time() - debugger_exit_time) < LastDebuggerEntryAllowance));
1276}
1277
55e303ae
A
1278/*ARGSUSED*/
1279void
1280init_ast_check(
91447636 1281 __unused processor_t processor)
55e303ae
A
1282{
1283}
1284
1285void
1286cause_ast_check(
1287 processor_t processor)
1288{
b0d623f7 1289 int cpu = processor->cpu_id;
55e303ae
A
1290
1291 if (cpu != cpu_number()) {
1292 i386_signal_cpu(cpu, MP_AST, ASYNC);
1293 }
1294}
1295
0c530ab8 1296#if MACH_KDB
55e303ae
A
1297/*
1298 * invoke kdb on slave processors
1299 */
1300
1301void
1302remote_kdb(void)
1303{
91447636
A
1304 unsigned int my_cpu = cpu_number();
1305 unsigned int cpu;
0c530ab8
A
1306 int kdb_ncpus;
1307 uint64_t tsc_timeout = 0;
55e303ae 1308
0c530ab8
A
1309 mp_kdb_trap = TRUE;
1310 mp_kdb_ncpus = 1;
1311 for (kdb_ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
91447636 1312 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae 1313 continue;
0c530ab8
A
1314 kdb_ncpus++;
1315 i386_signal_cpu(cpu, MP_KDB, ASYNC);
89b3af67 1316 }
0c530ab8
A
1317 DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus);
1318
1319 tsc_timeout = rdtsc64() + (kdb_ncpus * 100 * 1000 * 1000);
1320
1321 while (mp_kdb_ncpus != kdb_ncpus && rdtsc64() < tsc_timeout) {
1322 /*
1323 * a TLB shootdown request may be pending... this would result in the requesting
1324 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1325 * Process it, so it can now enter mp_kdp_wait()
1326 */
1327 handle_pending_TLB_flushes();
1328
1329 cpu_pause();
1330 }
1331 DBG("mp_kdp_enter() %d processors done %s\n",
1332 mp_kdb_ncpus, (mp_kdb_ncpus == kdb_ncpus) ? "OK" : "timed out");
1333}
1334
1335static void
1336mp_kdb_wait(void)
1337{
1338 DBG("mp_kdb_wait()\n");
1339
2d21ac55 1340 /* If an I/O port has been specified as a debugging aid, issue a read */
0c530ab8
A
1341 panic_io_port_read();
1342
1343 atomic_incl(&mp_kdb_ncpus, 1);
1344 while (mp_kdb_trap) {
1345 /*
1346 * a TLB shootdown request may be pending... this would result in the requesting
1347 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1348 * Process it, so it can now enter mp_kdp_wait()
1349 */
1350 handle_pending_TLB_flushes();
1351
1352 cpu_pause();
1353 }
1354 atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
1355 DBG("mp_kdb_wait() done\n");
55e303ae
A
1356}
1357
1358/*
1359 * Clear kdb interrupt
1360 */
1361
1362void
1363clear_kdb_intr(void)
1364{
1365 mp_disable_preemption();
91447636 1366 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
55e303ae
A
1367 mp_enable_preemption();
1368}
1369
0c530ab8
A
1370void
1371mp_kdb_exit(void)
1372{
1373 DBG("mp_kdb_exit()\n");
1374 atomic_decl((volatile long *)&mp_kdb_ncpus, 1);
1375 mp_kdb_trap = FALSE;
1376 __asm__ volatile("mfence");
1377
1378 while (mp_kdb_ncpus > 0) {
1379 /*
1380 * a TLB shootdown request may be pending... this would result in the requesting
1381 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1382 * Process it, so it can now enter mp_kdp_wait()
1383 */
1384 handle_pending_TLB_flushes();
1385
1386 cpu_pause();
1387 }
2d21ac55 1388
0c530ab8
A
1389 DBG("mp_kdb_exit() done\n");
1390}
1391
1392#endif /* MACH_KDB */
1393
593a1d5f
A
1394void
1395slave_machine_init(void *param)
91447636
A
1396{
1397 /*
0c530ab8 1398 * Here in process context, but with interrupts disabled.
91447636
A
1399 */
1400 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1401
593a1d5f
A
1402 if (param == FULL_SLAVE_INIT) {
1403 /*
1404 * Cold start
1405 */
1406 clock_init();
0c530ab8 1407
593a1d5f
A
1408 cpu_machine_init(); /* Interrupts enabled hereafter */
1409 }
55e303ae
A
1410}
1411
b0d623f7 1412#undef cpu_number
55e303ae
A
1413int cpu_number(void)
1414{
1415 return get_cpu_number();
1416}
1417
1418#if MACH_KDB
1419#include <ddb/db_output.h>
1420
1421#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1422
1423
1424#if TRAP_DEBUG
1425#define MTRAPS 100
1426struct mp_trap_hist_struct {
1427 unsigned char type;
1428 unsigned char data[5];
1429} trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1430 *max_trap_hist = &trap_hist[MTRAPS];
1431
1432void db_trap_hist(void);
1433
1434/*
1435 * SPL:
1436 * 1: new spl
1437 * 2: old spl
1438 * 3: new tpr
1439 * 4: old tpr
1440 * INT:
1441 * 1: int vec
1442 * 2: old spl
1443 * 3: new spl
1444 * 4: post eoi tpr
1445 * 5: exit tpr
1446 */
1447
1448void
1449db_trap_hist(void)
1450{
1451 int i,j;
1452 for(i=0;i<MTRAPS;i++)
1453 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1454 db_printf("%s%s",
1455 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1456 (trap_hist[i].type == 1)?"SPL":"INT");
1457 for(j=0;j<5;j++)
1458 db_printf(" %02x", trap_hist[i].data[j]);
1459 db_printf("\n");
1460 }
1461
1462}
1463#endif /* TRAP_DEBUG */
55e303ae
A
1464#endif /* MACH_KDB */
1465