]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
CommitLineData
55e303ae 1/*
c0fea474 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
55e303ae
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
55e303ae 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
55e303ae
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
55e303ae
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
55e303ae
A
26#include <mach_rt.h>
27#include <mach_kdb.h>
28#include <mach_kdp.h>
29#include <mach_ldebug.h>
91447636
A
30#include <gprof.h>
31
32#include <mach/mach_types.h>
33#include <mach/kern_return.h>
34
35#include <kern/kern_types.h>
36#include <kern/startup.h>
37#include <kern/processor.h>
38#include <kern/cpu_number.h>
39#include <kern/cpu_data.h>
40#include <kern/assert.h>
41#include <kern/machine.h>
c0fea474 42#include <kern/pms.h>
91447636
A
43
44#include <vm/vm_map.h>
45#include <vm/vm_kern.h>
46
47#include <profiling/profile-mk.h>
55e303ae
A
48
49#include <i386/mp.h>
50#include <i386/mp_events.h>
51#include <i386/mp_slave_boot.h>
52#include <i386/apic.h>
53#include <i386/ipl.h>
54#include <i386/fpu.h>
55e303ae
A
55#include <i386/cpuid.h>
56#include <i386/proc_reg.h>
57#include <i386/machine_cpu.h>
58#include <i386/misc_protos.h>
91447636
A
59#include <i386/mtrr.h>
60#include <i386/postcode.h>
61#include <i386/perfmon.h>
62#include <i386/cpu_threads.h>
63#include <i386/mp_desc.h>
c0fea474
A
64#include <i386/trap.h>
65#include <i386/machine_routines.h>
66#include <i386/pmCPU.h>
67#include <i386/hpet.h>
68
69#include <chud/chud_xnu.h>
70#include <chud/chud_xnu_private.h>
71
72#include <sys/kdebug.h>
73#if MACH_KDB
74#include <i386/db_machdep.h>
75#include <ddb/db_aout.h>
76#include <ddb/db_access.h>
77#include <ddb/db_sym.h>
78#include <ddb/db_variables.h>
79#include <ddb/db_command.h>
80#include <ddb/db_output.h>
81#include <ddb/db_expr.h>
82#endif
55e303ae
A
83
84#if MP_DEBUG
85#define PAUSE delay(1000000)
86#define DBG(x...) kprintf(x)
87#else
88#define DBG(x...)
89#define PAUSE
90#endif /* MP_DEBUG */
91
92/* Initialize lapic_id so cpu_number() works on non SMP systems */
93unsigned long lapic_id_initdata = 0;
94unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
91447636
A
95vm_offset_t lapic_start;
96
97static i386_intr_func_t lapic_timer_func;
98static i386_intr_func_t lapic_pmi_func;
c0fea474 99static i386_intr_func_t lapic_thermal_func;
91447636
A
100
101/* TRUE if local APIC was enabled by the OS not by the BIOS */
102static boolean_t lapic_os_enabled = FALSE;
103
104/* Base vector for local APIC interrupt sources */
105int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
55e303ae 106
55e303ae
A
107void slave_boot_init(void);
108
c0fea474
A
109#if MACH_KDB
110static void mp_kdb_wait(void);
111volatile boolean_t mp_kdb_trap = FALSE;
112volatile long mp_kdb_ncpus = 0;
113#endif
114
55e303ae
A
115static void mp_kdp_wait(void);
116static void mp_rendezvous_action(void);
117
c0fea474
A
118static int NMIInterruptHandler(void *regs);
119static boolean_t cpu_signal_pending(int cpu, mp_event_t event);
120static void cpu_NMI_interrupt(int cpu);
121
55e303ae
A
122boolean_t smp_initialized = FALSE;
123
124decl_simple_lock_data(,mp_kdp_lock);
91447636
A
125
126decl_mutex_data(static, mp_cpu_boot_lock);
55e303ae
A
127
128/* Variables needed for MP rendezvous. */
129static void (*mp_rv_setup_func)(void *arg);
130static void (*mp_rv_action_func)(void *arg);
131static void (*mp_rv_teardown_func)(void *arg);
132static void *mp_rv_func_arg;
133static int mp_rv_ncpus;
c0fea474 134static volatile long mp_rv_waiters[2];
55e303ae
A
135decl_simple_lock_data(,mp_rv_lock);
136
91447636
A
137int lapic_to_cpu[MAX_CPUS];
138int cpu_to_lapic[MAX_CPUS];
55e303ae
A
139
140static void
141lapic_cpu_map_init(void)
142{
143 int i;
144
91447636 145 for (i = 0; i < MAX_CPUS; i++) {
55e303ae 146 lapic_to_cpu[i] = -1;
91447636
A
147 cpu_to_lapic[i] = -1;
148 }
55e303ae
A
149}
150
151void
91447636 152lapic_cpu_map(int apic_id, int cpu)
55e303ae 153{
91447636
A
154 cpu_to_lapic[cpu] = apic_id;
155 lapic_to_cpu[apic_id] = cpu;
55e303ae
A
156}
157
c0fea474
A
158/*
159 * Retrieve the local apic ID a cpu.
160 *
161 * Returns the local apic ID for the given processor.
162 * If the processor does not exist or apic not configured, returns -1.
163 */
164
165uint32_t
166ml_get_apicid(uint32_t cpu)
167{
168 if(cpu >= (uint32_t)MAX_CPUS)
169 return 0xFFFFFFFF; /* Return -1 if cpu too big */
170
171 /* Return the apic ID (or -1 if not configured) */
172 return (uint32_t)cpu_to_lapic[cpu];
173
174}
175
55e303ae
A
176#ifdef MP_DEBUG
177static void
178lapic_cpu_map_dump(void)
179{
180 int i;
181
91447636 182 for (i = 0; i < MAX_CPUS; i++) {
55e303ae
A
183 if (cpu_to_lapic[i] == -1)
184 continue;
185 kprintf("cpu_to_lapic[%d]: %d\n",
186 i, cpu_to_lapic[i]);
187 }
91447636 188 for (i = 0; i < MAX_CPUS; i++) {
55e303ae
A
189 if (lapic_to_cpu[i] == -1)
190 continue;
191 kprintf("lapic_to_cpu[%d]: %d\n",
192 i, lapic_to_cpu[i]);
193 }
194}
91447636
A
195#define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
196#define LAPIC_DUMP() lapic_dump()
197#else
198#define LAPIC_CPU_MAP_DUMP()
199#define LAPIC_DUMP()
55e303ae
A
200#endif /* MP_DEBUG */
201
91447636
A
202#if GPROF
203/*
204 * Initialize dummy structs for profiling. These aren't used but
205 * allows hertz_tick() to be built with GPROF defined.
206 */
207struct profile_vars _profile_vars;
208struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
209#define GPROF_INIT() \
210{ \
211 int i; \
212 \
213 /* Hack to initialize pointers to unused profiling structs */ \
214 for (i = 1; i < MAX_CPUS; i++) \
215 _profile_vars_cpus[i] = &_profile_vars; \
216}
217#else
218#define GPROF_INIT()
219#endif /* GPROF */
220
55e303ae
A
221void
222smp_init(void)
55e303ae
A
223{
224 int result;
225 vm_map_entry_t entry;
226 uint32_t lo;
227 uint32_t hi;
228 boolean_t is_boot_processor;
229 boolean_t is_lapic_enabled;
91447636
A
230 vm_offset_t lapic_base;
231
232 simple_lock_init(&mp_kdp_lock, 0);
233 simple_lock_init(&mp_rv_lock, 0);
234 mutex_init(&mp_cpu_boot_lock, 0);
235 console_init();
55e303ae
A
236
237 /* Local APIC? */
91447636 238 if (!lapic_probe())
55e303ae
A
239 return;
240
55e303ae
A
241 /* Examine the local APIC state */
242 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
243 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
244 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
91447636
A
245 lapic_base = (lo & MSR_IA32_APIC_BASE_BASE);
246 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
55e303ae
A
247 is_lapic_enabled ? "enabled" : "disabled",
248 is_boot_processor ? "BSP" : "AP");
91447636
A
249 if (!is_boot_processor || !is_lapic_enabled)
250 panic("Unexpected local APIC state\n");
55e303ae
A
251
252 /* Establish a map to the local apic */
253 lapic_start = vm_map_min(kernel_map);
254 result = vm_map_find_space(kernel_map, &lapic_start,
c0fea474
A
255 round_page(LAPIC_SIZE), 0,
256 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
55e303ae 257 if (result != KERN_SUCCESS) {
91447636 258 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
55e303ae
A
259 }
260 vm_map_unlock(kernel_map);
c0fea474
A
261/* Map in the local APIC non-cacheable, as recommended by Intel
262 * in section 8.4.1 of the "System Programming Guide".
263 */
55e303ae
A
264 pmap_enter(pmap_kernel(),
265 lapic_start,
91447636 266 (ppnum_t) i386_btop(lapic_base),
c0fea474
A
267 VM_PROT_READ|VM_PROT_WRITE,
268 VM_WIMG_IO,
55e303ae
A
269 TRUE);
270 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
271
91447636
A
272 if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
273 printf("Local APIC version not 0x14 as expected\n");
274 }
275
55e303ae
A
276 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
277 lapic_cpu_map_init();
278 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
c0fea474 279 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
55e303ae
A
280
281 lapic_init();
282
91447636
A
283 cpu_thread_init();
284
91447636
A
285 GPROF_INIT();
286 DBGLOG_CPU_INIT(master_cpu);
287
55e303ae 288 slave_boot_init();
55e303ae
A
289
290 smp_initialized = TRUE;
291
292 return;
293}
294
295
91447636 296static int
55e303ae
A
297lapic_esr_read(void)
298{
299 /* write-read register */
300 LAPIC_REG(ERROR_STATUS) = 0;
301 return LAPIC_REG(ERROR_STATUS);
302}
303
91447636 304static void
55e303ae
A
305lapic_esr_clear(void)
306{
307 LAPIC_REG(ERROR_STATUS) = 0;
308 LAPIC_REG(ERROR_STATUS) = 0;
309}
310
91447636 311static const char *DM[8] = {
55e303ae
A
312 "Fixed",
313 "Lowest Priority",
314 "Invalid",
315 "Invalid",
316 "NMI",
317 "Reset",
318 "Invalid",
319 "ExtINT"};
320
321void
322lapic_dump(void)
323{
324 int i;
55e303ae
A
325
326#define BOOL(a) ((a)?' ':'!')
327
328 kprintf("LAPIC %d at 0x%x version 0x%x\n",
329 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
330 lapic_start,
331 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
332 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
333 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
334 LAPIC_REG(APR)&LAPIC_APR_MASK,
335 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
336 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
337 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
338 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
339 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
340 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
341 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
342 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
343 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
344 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
345 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
346 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
347 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
91447636
A
348 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
349 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
350 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
351 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
55e303ae
A
352 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
353 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
55e303ae
A
354 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
355 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
c0fea474
A
356 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
357 LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_VECTOR_MASK,
358 DM[(LAPIC_REG(LVT_THERMAL)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
359 (LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
360 BOOL(LAPIC_REG(LVT_THERMAL)&LAPIC_LVT_MASKED));
55e303ae
A
361 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
362 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
363 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
364 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
365 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
366 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
367 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
368 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
369 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
370 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
371 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
372 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
373 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
374 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
375 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
376 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
377 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
378 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
379 kprintf("ESR: %08x \n", lapic_esr_read());
380 kprintf(" ");
381 for(i=0xf; i>=0; i--)
382 kprintf("%x%x%x%x",i,i,i,i);
383 kprintf("\n");
384 kprintf("TMR: 0x");
385 for(i=7; i>=0; i--)
386 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
387 kprintf("\n");
388 kprintf("IRR: 0x");
389 for(i=7; i>=0; i--)
390 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
391 kprintf("\n");
392 kprintf("ISR: 0x");
393 for(i=7; i >= 0; i--)
394 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
395 kprintf("\n");
396}
397
c0fea474
A
398#if MACH_KDB
399/*
400 * Displays apic junk
401 *
402 * da
403 */
404void
405db_apic(__unused db_expr_t addr,
406 __unused int have_addr,
407 __unused db_expr_t count,
408 __unused char *modif)
409{
410
411 lapic_dump();
412
413 return;
414}
415
416#endif
417
91447636
A
418boolean_t
419lapic_probe(void)
420{
421 uint32_t lo;
422 uint32_t hi;
423
424 if (cpuid_features() & CPUID_FEATURE_APIC)
425 return TRUE;
426
427 if (cpuid_family() == 6 || cpuid_family() == 15) {
428 /*
429 * Mobile Pentiums:
430 * There may be a local APIC which wasn't enabled by BIOS.
431 * So we try to enable it explicitly.
432 */
433 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
434 lo &= ~MSR_IA32_APIC_BASE_BASE;
435 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
436 lo |= MSR_IA32_APIC_BASE_ENABLE;
437 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
438
439 /*
440 * Re-initialize cpu features info and re-check.
441 */
c0fea474 442 cpuid_set_info();
91447636
A
443 if (cpuid_features() & CPUID_FEATURE_APIC) {
444 printf("Local APIC discovered and enabled\n");
445 lapic_os_enabled = TRUE;
446 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
447 return TRUE;
448 }
449 }
450
451 return FALSE;
452}
453
55e303ae 454void
91447636 455lapic_shutdown(void)
55e303ae 456{
91447636
A
457 uint32_t lo;
458 uint32_t hi;
459 uint32_t value;
460
461 /* Shutdown if local APIC was enabled by OS */
462 if (lapic_os_enabled == FALSE)
463 return;
55e303ae
A
464
465 mp_disable_preemption();
466
91447636
A
467 /* ExtINT: masked */
468 if (get_cpu_number() == master_cpu) {
469 value = LAPIC_REG(LVT_LINT0);
470 value |= LAPIC_LVT_MASKED;
471 LAPIC_REG(LVT_LINT0) = value;
472 }
473
474 /* Timer: masked */
475 LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
476
477 /* Perfmon: masked */
478 LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
479
480 /* Error: masked */
481 LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
482
483 /* APIC software disabled */
484 LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
485
486 /* Bypass the APIC completely and update cpu features */
487 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
488 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
489 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
c0fea474 490 cpuid_set_info();
91447636
A
491
492 mp_enable_preemption();
493}
494
495void
496lapic_init(void)
497{
498 int value;
499
55e303ae
A
500 /* Set flat delivery model, logical processor id */
501 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
502 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
503
504 /* Accept all */
505 LAPIC_REG(TPR) = 0;
506
91447636 507 LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
55e303ae
A
508
509 /* ExtINT */
510 if (get_cpu_number() == master_cpu) {
511 value = LAPIC_REG(LVT_LINT0);
91447636 512 value &= ~LAPIC_LVT_MASKED;
55e303ae
A
513 value |= LAPIC_LVT_DM_EXTINT;
514 LAPIC_REG(LVT_LINT0) = value;
515 }
516
91447636
A
517 /* Timer: unmasked, one-shot */
518 LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
519
520 /* Perfmon: unmasked */
521 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
522
c0fea474
A
523 /* Thermal: unmasked */
524 LAPIC_REG(LVT_THERMAL) = LAPIC_VECTOR(THERMAL);
525
55e303ae
A
526 lapic_esr_clear();
527
91447636 528 LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
55e303ae
A
529}
530
91447636
A
531void
532lapic_set_timer_func(i386_intr_func_t func)
533{
534 lapic_timer_func = func;
535}
536
537void
538lapic_set_timer(
539 boolean_t interrupt,
540 lapic_timer_mode_t mode,
541 lapic_timer_divide_t divisor,
542 lapic_timer_count_t initial_count)
543{
544 boolean_t state;
545 uint32_t timer_vector;
546
547 state = ml_set_interrupts_enabled(FALSE);
548 timer_vector = LAPIC_REG(LVT_TIMER);
549 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
550 timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
551 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
552 LAPIC_REG(LVT_TIMER) = timer_vector;
553 LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
554 LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
555 ml_set_interrupts_enabled(state);
556}
55e303ae
A
557
558void
91447636
A
559lapic_get_timer(
560 lapic_timer_mode_t *mode,
561 lapic_timer_divide_t *divisor,
562 lapic_timer_count_t *initial_count,
563 lapic_timer_count_t *current_count)
564{
565 boolean_t state;
566
567 state = ml_set_interrupts_enabled(FALSE);
568 if (mode)
569 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
570 periodic : one_shot;
571 if (divisor)
572 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
573 if (initial_count)
574 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
575 if (current_count)
576 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
577 ml_set_interrupts_enabled(state);
578}
579
580void
581lapic_set_pmi_func(i386_intr_func_t func)
582{
583 lapic_pmi_func = func;
584}
585
c0fea474
A
586void
587lapic_set_thermal_func(i386_intr_func_t func)
588{
589 lapic_thermal_func = func;
590}
591
91447636
A
592static inline void
593_lapic_end_of_interrupt(void)
55e303ae
A
594{
595 LAPIC_REG(EOI) = 0;
596}
597
598void
91447636
A
599lapic_end_of_interrupt(void)
600{
601 _lapic_end_of_interrupt();
602}
603
604int
c0fea474 605lapic_interrupt(int interrupt, x86_saved_state_t *state)
55e303ae 606{
c0fea474
A
607 int retval = 0;
608
609 /* Did we just field an interruption for the HPET comparator? */
610 if(current_cpu_datap()->cpu_pmHpetVec == ((uint32_t)interrupt - 0x40)) {
611 /* Yes, go handle it... */
612 retval = HPETInterrupt();
613 /* Was it really handled? */
614 if(retval) {
615 /* If so, EOI the 'rupt */
616 _lapic_end_of_interrupt();
617 /*
618 * and then leave,
619 * indicating that this has been handled
620 */
621 return 1;
622 }
623 }
624
91447636 625 interrupt -= lapic_interrupt_base;
c0fea474
A
626 if (interrupt < 0) {
627 if (interrupt == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base)) {
628 retval = NMIInterruptHandler(state);
629 _lapic_end_of_interrupt();
630 return retval;
631 }
632 else
633 return 0;
634 }
55e303ae
A
635
636 switch(interrupt) {
91447636
A
637 case LAPIC_PERFCNT_INTERRUPT:
638 if (lapic_pmi_func != NULL)
c0fea474 639 (*lapic_pmi_func)(NULL);
91447636
A
640 /* Clear interrupt masked */
641 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
642 _lapic_end_of_interrupt();
c0fea474
A
643 retval = 1;
644 break;
91447636
A
645 case LAPIC_TIMER_INTERRUPT:
646 _lapic_end_of_interrupt();
647 if (lapic_timer_func != NULL)
c0fea474
A
648 (*lapic_timer_func)(state);
649 retval = 1;
650 break;
651 case LAPIC_THERMAL_INTERRUPT:
652 if (lapic_thermal_func != NULL)
653 (*lapic_thermal_func)(NULL);
654 _lapic_end_of_interrupt();
655 retval = 1;
656 break;
91447636
A
657 case LAPIC_ERROR_INTERRUPT:
658 lapic_dump();
55e303ae 659 panic("Local APIC error\n");
91447636 660 _lapic_end_of_interrupt();
c0fea474
A
661 retval = 1;
662 break;
91447636 663 case LAPIC_SPURIOUS_INTERRUPT:
55e303ae 664 kprintf("SPIV\n");
91447636 665 /* No EOI required here */
c0fea474
A
666 retval = 1;
667 break;
91447636 668 case LAPIC_INTERPROCESSOR_INTERRUPT:
91447636 669 _lapic_end_of_interrupt();
c0fea474
A
670 cpu_signal_handler(state);
671 retval = 1;
672 break;
55e303ae 673 }
c0fea474
A
674
675 return retval;
91447636
A
676}
677
678void
679lapic_smm_restore(void)
680{
681 boolean_t state;
682
683 if (lapic_os_enabled == FALSE)
684 return;
685
686 state = ml_set_interrupts_enabled(FALSE);
687
688 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
689 /*
690 * Bogus SMI handler enables interrupts but does not know about
691 * local APIC interrupt sources. When APIC timer counts down to
692 * zero while in SMM, local APIC will end up waiting for an EOI
693 * but no interrupt was delivered to the OS.
694 */
695 _lapic_end_of_interrupt();
696
697 /*
698 * timer is one-shot, trigger another quick countdown to trigger
699 * another timer interrupt.
700 */
701 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
702 LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
703 }
704
705 kprintf("lapic_smm_restore\n");
706 }
707
708 ml_set_interrupts_enabled(state);
55e303ae
A
709}
710
711kern_return_t
712intel_startCPU(
713 int slot_num)
714{
715
716 int i = 1000;
91447636 717 int lapic = cpu_to_lapic[slot_num];
55e303ae 718
91447636
A
719 assert(lapic != -1);
720
721 DBGLOG_CPU_INIT(slot_num);
55e303ae 722
91447636
A
723 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
724 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
55e303ae 725
c0fea474
A
726 /*
727 * Initialize (or re-initialize) the descriptor tables for this cpu.
728 * Propagate processor mode to slave.
729 */
730 if (cpu_mode_is64bit())
731 cpu_desc_init64(cpu_datap(slot_num), FALSE);
732 else
733 cpu_desc_init(cpu_datap(slot_num), FALSE);
91447636
A
734
735 /* Serialize use of the slave boot stack. */
736 mutex_lock(&mp_cpu_boot_lock);
55e303ae
A
737
738 mp_disable_preemption();
91447636
A
739 if (slot_num == get_cpu_number()) {
740 mp_enable_preemption();
741 mutex_unlock(&mp_cpu_boot_lock);
742 return KERN_SUCCESS;
743 }
55e303ae 744
91447636 745 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
55e303ae
A
746 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
747 delay(10000);
748
91447636 749 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
55e303ae
A
750 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
751 delay(200);
752
91447636
A
753 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
754 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
755 delay(200);
756
757#ifdef POSTCODE_DELAY
758 /* Wait much longer if postcodes are displayed for a delay period. */
759 i *= 10000;
760#endif
55e303ae 761 while(i-- > 0) {
91447636 762 if (cpu_datap(slot_num)->cpu_running)
55e303ae 763 break;
91447636 764 delay(10000);
55e303ae
A
765 }
766
767 mp_enable_preemption();
91447636 768 mutex_unlock(&mp_cpu_boot_lock);
55e303ae 769
91447636 770 if (!cpu_datap(slot_num)->cpu_running) {
c0fea474 771 kprintf("Failed to start CPU %02d\n", slot_num);
91447636
A
772 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
773 delay(1000000);
774 cpu_shutdown();
55e303ae
A
775 return KERN_SUCCESS;
776 } else {
c0fea474 777 kprintf("Started cpu %d (lapic id %p)\n", slot_num, lapic);
55e303ae
A
778 printf("Started CPU %02d\n", slot_num);
779 return KERN_SUCCESS;
780 }
781}
782
91447636
A
783extern char slave_boot_base[];
784extern char slave_boot_end[];
c0fea474 785extern void slave_pstart(void);
91447636 786
55e303ae
A
787void
788slave_boot_init(void)
789{
91447636
A
790 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
791 slave_boot_base,
792 kvtophys((vm_offset_t) slave_boot_base),
793 MP_BOOT,
794 slave_boot_end-slave_boot_base);
55e303ae
A
795
796 /*
797 * Copy the boot entry code to the real-mode vector area MP_BOOT.
798 * This is in page 1 which has been reserved for this purpose by
799 * machine_startup() from the boot processor.
800 * The slave boot code is responsible for switching to protected
91447636 801 * mode and then jumping to the common startup, _start().
55e303ae 802 */
c0fea474 803 bcopy_phys(kvtophys((vm_offset_t) slave_boot_base),
91447636
A
804 (addr64_t) MP_BOOT,
805 slave_boot_end-slave_boot_base);
55e303ae
A
806
807 /*
808 * Zero a stack area above the boot code.
809 */
91447636
A
810 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
811 bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
55e303ae
A
812
813 /*
814 * Set the location at the base of the stack to point to the
815 * common startup entry.
816 */
91447636 817 DBG("writing 0x%x at phys 0x%x\n",
c0fea474 818 kvtophys((vm_offset_t) &slave_pstart), MP_MACH_START+MP_BOOT);
91447636 819 ml_phys_write_word(MP_MACH_START+MP_BOOT,
c0fea474 820 (unsigned int)kvtophys((vm_offset_t) &slave_pstart));
55e303ae
A
821
822 /* Flush caches */
823 __asm__("wbinvd");
824}
825
826#if MP_DEBUG
91447636
A
827cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
828cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
55e303ae
A
829
830MP_EVENT_NAME_DECL();
831
55e303ae
A
832#endif /* MP_DEBUG */
833
834void
c0fea474 835cpu_signal_handler(x86_saved_state_t *regs)
55e303ae 836{
91447636 837 int my_cpu;
55e303ae
A
838 volatile int *my_word;
839#if MACH_KDB && MACH_ASSERT
840 int i=100;
841#endif /* MACH_KDB && MACH_ASSERT */
842
843 mp_disable_preemption();
844
845 my_cpu = cpu_number();
91447636 846 my_word = &current_cpu_datap()->cpu_signals;
55e303ae
A
847
848 do {
849#if MACH_KDB && MACH_ASSERT
850 if (i-- <= 0)
c0fea474 851 Debugger("cpu_signal_handler: signals did not clear");
55e303ae
A
852#endif /* MACH_KDB && MACH_ASSERT */
853#if MACH_KDP
854 if (i_bit(MP_KDP, my_word)) {
855 DBGLOG(cpu_handle,my_cpu,MP_KDP);
856 i_bit_clear(MP_KDP, my_word);
c0fea474
A
857/* Ensure that the i386_kernel_state at the base of the
858 * current thread's stack (if any) is synchronized with the
859 * context at the moment of the interrupt, to facilitate
860 * access through the debugger.
861 * XXX 64-bit state?
862 */
863 sync_iss_to_iks(saved_state32(regs));
55e303ae
A
864 mp_kdp_wait();
865 } else
866#endif /* MACH_KDP */
91447636 867 if (i_bit(MP_TLB_FLUSH, my_word)) {
55e303ae
A
868 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
869 i_bit_clear(MP_TLB_FLUSH, my_word);
870 pmap_update_interrupt();
871 } else if (i_bit(MP_AST, my_word)) {
872 DBGLOG(cpu_handle,my_cpu,MP_AST);
873 i_bit_clear(MP_AST, my_word);
874 ast_check(cpu_to_processor(my_cpu));
875#if MACH_KDB
876 } else if (i_bit(MP_KDB, my_word)) {
55e303ae
A
877
878 i_bit_clear(MP_KDB, my_word);
c0fea474
A
879 current_cpu_datap()->cpu_kdb_is_slave++;
880 mp_kdb_wait();
881 current_cpu_datap()->cpu_kdb_is_slave--;
55e303ae
A
882#endif /* MACH_KDB */
883 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
884 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
885 i_bit_clear(MP_RENDEZVOUS, my_word);
886 mp_rendezvous_action();
c0fea474
A
887 } else if (i_bit(MP_CHUD, my_word)) {
888 DBGLOG(cpu_handle,my_cpu,MP_CHUD);
889 i_bit_clear(MP_CHUD, my_word);
890 chudxnu_cpu_signal_handler();
55e303ae
A
891 }
892 } while (*my_word);
893
894 mp_enable_preemption();
895
896}
897
c0fea474
A
898
899/* We want this to show up in backtraces, so mark it noinline
900 */
901static int __attribute__((noinline))
902NMIInterruptHandler(void *regs)
903{
904 boolean_t state = ml_set_interrupts_enabled(FALSE);
905 sync_iss_to_iks_unconditionally(regs);
906 mp_kdp_wait();
907 (void) ml_set_interrupts_enabled(state);
908 return 1;
909}
910
91447636
A
911#ifdef MP_DEBUG
912extern int max_lock_loops;
913#endif /* MP_DEBUG */
c0fea474
A
914
915int trappedalready = 0; /* (BRINGUP */
916
55e303ae
A
917void
918cpu_interrupt(int cpu)
919{
920 boolean_t state;
c0fea474
A
921
922 if(cpu_datap(cpu)->cpu_signals & 6) { /* (BRINGUP) */
923 kprintf("cpu_interrupt: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu)->cpu_signals, cpu);
924 }
55e303ae
A
925
926 if (smp_initialized) {
927
c0fea474
A
928#if MACH_KDB
929// if(!trappedalready && (cpu_datap(cpu)->cpu_signals & 6)) { /* (BRINGUP) */
930// if(kdb_cpu != cpu_number()) {
931// trappedalready = 1;
932// panic("cpu_interrupt: sending enter debugger signal (%08X) to cpu %d and I do not own debugger, owner = %08X\n",
933// cpu_datap(cpu)->cpu_signals, cpu, kdb_cpu);
934// }
935// }
936#endif
937
55e303ae 938 /* Wait for previous interrupt to be delivered... */
91447636 939#ifdef MP_DEBUG
c0fea474 940 int pending_busy_count = 0;
91447636
A
941 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
942 if (++pending_busy_count > max_lock_loops)
943 panic("cpus_interrupt() deadlock\n");
944#else
945 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
946#endif /* MP_DEBUG */
55e303ae 947 cpu_pause();
91447636 948 }
55e303ae
A
949
950 state = ml_set_interrupts_enabled(FALSE);
951 LAPIC_REG(ICRD) =
952 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
953 LAPIC_REG(ICR) =
91447636 954 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
55e303ae
A
955 (void) ml_set_interrupts_enabled(state);
956 }
957
958}
959
c0fea474
A
960/*
961 * Send a true NMI via the local APIC to the specified CPU.
962 */
963static void
964cpu_NMI_interrupt(int cpu)
965{
966 boolean_t state;
967
968 if (smp_initialized) {
969 state = ml_set_interrupts_enabled(FALSE);
970 LAPIC_REG(ICRD) =
971 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
972/* The vector is ignored in this case, the other CPU will come in on the
973 * NMI vector.
974 */
975 LAPIC_REG(ICR) =
976 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI;
977 (void) ml_set_interrupts_enabled(state);
978 }
979
980}
981
55e303ae
A
982void
983i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
984{
91447636
A
985 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
986 uint64_t tsc_timeout;
55e303ae 987
c0fea474 988
91447636 989 if (!cpu_datap(cpu)->cpu_running)
55e303ae
A
990 return;
991
c0fea474
A
992 if (event == MP_TLB_FLUSH)
993 KERNEL_DEBUG(0xef800020 | DBG_FUNC_START, cpu, 0, 0, 0, 0);
55e303ae 994
c0fea474
A
995 DBGLOG(cpu_signal, cpu, event);
996
55e303ae
A
997 i_bit_set(event, signals);
998 cpu_interrupt(cpu);
999 if (mode == SYNC) {
1000 again:
91447636
A
1001 tsc_timeout = rdtsc64() + (1000*1000*1000);
1002 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
55e303ae
A
1003 cpu_pause();
1004 }
1005 if (i_bit(event, signals)) {
1006 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
1007 cpu, event);
1008 goto again;
1009 }
1010 }
c0fea474
A
1011 if (event == MP_TLB_FLUSH)
1012 KERNEL_DEBUG(0xef800020 | DBG_FUNC_END, cpu, 0, 0, 0, 0);
55e303ae
A
1013}
1014
1015void
1016i386_signal_cpus(mp_event_t event, mp_sync_t mode)
1017{
91447636
A
1018 unsigned int cpu;
1019 unsigned int my_cpu = cpu_number();
55e303ae 1020
91447636
A
1021 for (cpu = 0; cpu < real_ncpus; cpu++) {
1022 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
1023 continue;
1024 i386_signal_cpu(cpu, event, mode);
1025 }
1026}
1027
1028int
1029i386_active_cpus(void)
1030{
91447636
A
1031 unsigned int cpu;
1032 unsigned int ncpus = 0;
55e303ae 1033
91447636
A
1034 for (cpu = 0; cpu < real_ncpus; cpu++) {
1035 if (cpu_datap(cpu)->cpu_running)
55e303ae
A
1036 ncpus++;
1037 }
1038 return(ncpus);
1039}
1040
1041/*
1042 * All-CPU rendezvous:
1043 * - CPUs are signalled,
1044 * - all execute the setup function (if specified),
1045 * - rendezvous (i.e. all cpus reach a barrier),
1046 * - all execute the action function (if specified),
1047 * - rendezvous again,
1048 * - execute the teardown function (if specified), and then
1049 * - resume.
1050 *
1051 * Note that the supplied external functions _must_ be reentrant and aware
1052 * that they are running in parallel and in an unknown lock context.
1053 */
1054
1055static void
1056mp_rendezvous_action(void)
1057{
1058
1059 /* setup function */
1060 if (mp_rv_setup_func != NULL)
1061 mp_rv_setup_func(mp_rv_func_arg);
1062 /* spin on entry rendezvous */
1063 atomic_incl(&mp_rv_waiters[0], 1);
c0fea474
A
1064 while (mp_rv_waiters[0] < mp_rv_ncpus) {
1065 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1066 /* poll for pesky tlb flushes */
1067 handle_pending_TLB_flushes();
1068 ml_set_interrupts_enabled(intr);
55e303ae 1069 cpu_pause();
c0fea474 1070 }
55e303ae
A
1071 /* action function */
1072 if (mp_rv_action_func != NULL)
1073 mp_rv_action_func(mp_rv_func_arg);
1074 /* spin on exit rendezvous */
1075 atomic_incl(&mp_rv_waiters[1], 1);
c0fea474 1076 while (mp_rv_waiters[1] < mp_rv_ncpus)
55e303ae
A
1077 cpu_pause();
1078 /* teardown function */
1079 if (mp_rv_teardown_func != NULL)
1080 mp_rv_teardown_func(mp_rv_func_arg);
1081}
1082
1083void
1084mp_rendezvous(void (*setup_func)(void *),
1085 void (*action_func)(void *),
1086 void (*teardown_func)(void *),
1087 void *arg)
1088{
1089
1090 if (!smp_initialized) {
1091 if (setup_func != NULL)
1092 setup_func(arg);
1093 if (action_func != NULL)
1094 action_func(arg);
1095 if (teardown_func != NULL)
1096 teardown_func(arg);
1097 return;
1098 }
1099
1100 /* obtain rendezvous lock */
1101 simple_lock(&mp_rv_lock);
1102
1103 /* set static function pointers */
1104 mp_rv_setup_func = setup_func;
1105 mp_rv_action_func = action_func;
1106 mp_rv_teardown_func = teardown_func;
1107 mp_rv_func_arg = arg;
1108
1109 mp_rv_waiters[0] = 0; /* entry rendezvous count */
1110 mp_rv_waiters[1] = 0; /* exit rendezvous count */
1111 mp_rv_ncpus = i386_active_cpus();
1112
1113 /*
1114 * signal other processors, which will call mp_rendezvous_action()
1115 * with interrupts disabled
1116 */
1117 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
1118
1119 /* call executor function on this cpu */
1120 mp_rendezvous_action();
1121
1122 /* release lock */
1123 simple_unlock(&mp_rv_lock);
1124}
1125
c0fea474
A
1126void
1127mp_rendezvous_break_lock(void)
1128{
1129 simple_lock_init(&mp_rv_lock, 0);
1130}
1131
1132static void
1133setup_disable_intrs(__unused void * param_not_used)
1134{
1135 /* disable interrupts before the first barrier */
1136 boolean_t intr = ml_set_interrupts_enabled(FALSE);
1137
1138 current_cpu_datap()->cpu_iflag = intr;
1139 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1140}
1141
1142static void
1143teardown_restore_intrs(__unused void * param_not_used)
1144{
1145 /* restore interrupt flag following MTRR changes */
1146 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag);
1147 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__);
1148}
1149
1150/*
1151 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
1152 * This is exported for use by kexts.
1153 */
1154void
1155mp_rendezvous_no_intrs(
1156 void (*action_func)(void *),
1157 void *arg)
1158{
1159 mp_rendezvous(setup_disable_intrs,
1160 action_func,
1161 teardown_restore_intrs,
1162 arg);
1163}
1164
1165void
1166handle_pending_TLB_flushes(void)
1167{
1168 volatile int *my_word = &current_cpu_datap()->cpu_signals;
1169
1170 if (i_bit(MP_TLB_FLUSH, my_word)) {
1171 DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH);
1172 i_bit_clear(MP_TLB_FLUSH, my_word);
1173 pmap_update_interrupt();
1174 }
1175}
1176
1177
55e303ae
A
1178#if MACH_KDP
1179volatile boolean_t mp_kdp_trap = FALSE;
c0fea474 1180volatile long mp_kdp_ncpus;
91447636
A
1181boolean_t mp_kdp_state;
1182
55e303ae
A
1183
1184void
1185mp_kdp_enter(void)
1186{
91447636
A
1187 unsigned int cpu;
1188 unsigned int ncpus;
1189 unsigned int my_cpu = cpu_number();
1190 uint64_t tsc_timeout;
55e303ae
A
1191
1192 DBG("mp_kdp_enter()\n");
1193
1194 /*
1195 * Here to enter the debugger.
1196 * In case of races, only one cpu is allowed to enter kdp after
1197 * stopping others.
1198 */
91447636 1199 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
55e303ae 1200 simple_lock(&mp_kdp_lock);
c0fea474 1201
55e303ae
A
1202 while (mp_kdp_trap) {
1203 simple_unlock(&mp_kdp_lock);
1204 DBG("mp_kdp_enter() race lost\n");
1205 mp_kdp_wait();
1206 simple_lock(&mp_kdp_lock);
1207 }
1208 mp_kdp_ncpus = 1; /* self */
1209 mp_kdp_trap = TRUE;
1210 simple_unlock(&mp_kdp_lock);
55e303ae 1211
c0fea474
A
1212 /*
1213 * Deliver a nudge to other cpus, counting how many
1214 */
55e303ae 1215 DBG("mp_kdp_enter() signaling other processors\n");
91447636
A
1216 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1217 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
1218 continue;
1219 ncpus++;
c0fea474 1220 i386_signal_cpu(cpu, MP_KDP, ASYNC);
55e303ae 1221 }
c0fea474
A
1222 /*
1223 * Wait other processors to synchronize
1224 */
55e303ae 1225 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
c0fea474
A
1226
1227 tsc_timeout = rdtsc64() + (ncpus * 100 * 1000 * 1000);
1228
1229 while (mp_kdp_ncpus != ncpus && rdtsc64() < tsc_timeout) {
1230 /*
1231 * A TLB shootdown request may be pending... this would
1232 * result in the requesting processor waiting in
1233 * PMAP_UPDATE_TLBS() until this processor deals with it.
1234 * Process it, so it can now enter mp_kdp_wait()
1235 */
1236 handle_pending_TLB_flushes();
55e303ae
A
1237 cpu_pause();
1238 }
c0fea474
A
1239/* If we've timed out, and some processor(s) are still unresponsive,
1240 * interrupt them with an NMI via the local APIC.
1241 */
1242 if (mp_kdp_ncpus != ncpus) {
1243 for (cpu = 0; cpu < real_ncpus; cpu++) {
1244 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1245 continue;
1246 if (cpu_signal_pending(cpu, MP_KDP))
1247 cpu_NMI_interrupt(cpu);
1248 }
1249 }
1250
55e303ae
A
1251 DBG("mp_kdp_enter() %d processors done %s\n",
1252 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
c0fea474 1253
91447636 1254 postcode(MP_KDP_ENTER);
55e303ae
A
1255}
1256
c0fea474
A
1257static boolean_t
1258cpu_signal_pending(int cpu, mp_event_t event)
1259{
1260 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
1261 boolean_t retval = FALSE;
1262
1263 if (i_bit(event, signals))
1264 retval = TRUE;
1265 return retval;
1266}
1267
55e303ae
A
1268static void
1269mp_kdp_wait(void)
1270{
1271 DBG("mp_kdp_wait()\n");
1272 atomic_incl(&mp_kdp_ncpus, 1);
1273 while (mp_kdp_trap) {
c0fea474
A
1274 /*
1275 * a TLB shootdown request may be pending... this would result in the requesting
1276 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1277 * Process it, so it can now enter mp_kdp_wait()
1278 */
1279 handle_pending_TLB_flushes();
1280
55e303ae
A
1281 cpu_pause();
1282 }
1283 atomic_decl(&mp_kdp_ncpus, 1);
1284 DBG("mp_kdp_wait() done\n");
1285}
1286
1287void
1288mp_kdp_exit(void)
1289{
1290 DBG("mp_kdp_exit()\n");
1291 atomic_decl(&mp_kdp_ncpus, 1);
1292 mp_kdp_trap = FALSE;
c0fea474 1293 __asm__ volatile("mfence");
55e303ae
A
1294
1295 /* Wait other processors to stop spinning. XXX needs timeout */
1296 DBG("mp_kdp_exit() waiting for processors to resume\n");
c0fea474
A
1297 while (mp_kdp_ncpus > 0) {
1298 /*
1299 * a TLB shootdown request may be pending... this would result in the requesting
1300 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1301 * Process it, so it can now enter mp_kdp_wait()
1302 */
1303 handle_pending_TLB_flushes();
1304
55e303ae
A
1305 cpu_pause();
1306 }
1307 DBG("mp_kdp_exit() done\n");
91447636
A
1308 (void) ml_set_interrupts_enabled(mp_kdp_state);
1309 postcode(0);
55e303ae
A
1310}
1311#endif /* MACH_KDP */
1312
55e303ae
A
1313/*ARGSUSED*/
1314void
1315init_ast_check(
91447636 1316 __unused processor_t processor)
55e303ae
A
1317{
1318}
1319
1320void
1321cause_ast_check(
1322 processor_t processor)
1323{
91447636 1324 int cpu = PROCESSOR_DATA(processor, slot_num);
55e303ae
A
1325
1326 if (cpu != cpu_number()) {
1327 i386_signal_cpu(cpu, MP_AST, ASYNC);
1328 }
1329}
1330
c0fea474 1331#if MACH_KDB
55e303ae
A
1332/*
1333 * invoke kdb on slave processors
1334 */
1335
1336void
1337remote_kdb(void)
1338{
91447636
A
1339 unsigned int my_cpu = cpu_number();
1340 unsigned int cpu;
c0fea474
A
1341 int kdb_ncpus;
1342 uint64_t tsc_timeout = 0;
55e303ae 1343
c0fea474
A
1344 mp_kdb_trap = TRUE;
1345 mp_kdb_ncpus = 1;
1346 for (kdb_ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
91447636 1347 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae 1348 continue;
c0fea474
A
1349 kdb_ncpus++;
1350 i386_signal_cpu(cpu, MP_KDB, ASYNC);
55e303ae 1351 }
c0fea474
A
1352 DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus);
1353
1354 tsc_timeout = rdtsc64() + (kdb_ncpus * 100 * 1000 * 1000);
1355
1356 while (mp_kdb_ncpus != kdb_ncpus && rdtsc64() < tsc_timeout) {
1357 /*
1358 * a TLB shootdown request may be pending... this would result in the requesting
1359 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1360 * Process it, so it can now enter mp_kdp_wait()
1361 */
1362 handle_pending_TLB_flushes();
1363
1364 cpu_pause();
1365 }
1366 DBG("mp_kdp_enter() %d processors done %s\n",
1367 mp_kdb_ncpus, (mp_kdb_ncpus == kdb_ncpus) ? "OK" : "timed out");
1368}
1369
1370static void
1371mp_kdb_wait(void)
1372{
1373 DBG("mp_kdb_wait()\n");
1374 atomic_incl(&mp_kdb_ncpus, 1);
1375 while (mp_kdb_trap) {
1376 /*
1377 * a TLB shootdown request may be pending... this would result in the requesting
1378 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1379 * Process it, so it can now enter mp_kdp_wait()
1380 */
1381 handle_pending_TLB_flushes();
1382
1383 cpu_pause();
1384 }
1385 atomic_decl(&mp_kdb_ncpus, 1);
1386 DBG("mp_kdb_wait() done\n");
55e303ae
A
1387}
1388
1389/*
1390 * Clear kdb interrupt
1391 */
1392
1393void
1394clear_kdb_intr(void)
1395{
1396 mp_disable_preemption();
91447636 1397 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
55e303ae
A
1398 mp_enable_preemption();
1399}
1400
c0fea474
A
1401void
1402mp_kdb_exit(void)
1403{
1404 DBG("mp_kdb_exit()\n");
1405 atomic_decl(&mp_kdb_ncpus, 1);
1406 mp_kdb_trap = FALSE;
1407 __asm__ volatile("mfence");
1408
1409 while (mp_kdb_ncpus > 0) {
1410 /*
1411 * a TLB shootdown request may be pending... this would result in the requesting
1412 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1413 * Process it, so it can now enter mp_kdp_wait()
1414 */
1415 handle_pending_TLB_flushes();
1416
1417 cpu_pause();
1418 }
1419 DBG("mp_kdb_exit() done\n");
1420}
1421
1422#endif /* MACH_KDB */
1423
91447636
A
1424/*
1425 * i386_init_slave() is called from pstart.
1426 * We're in the cpu's interrupt stack with interrupts disabled.
c0fea474
A
1427 * At this point we are in legacy mode. We need to switch on IA32e
1428 * if the mode is set to 64-bits.
91447636 1429 */
55e303ae 1430void
91447636 1431i386_init_slave(void)
55e303ae 1432{
91447636 1433 postcode(I386_INIT_SLAVE);
55e303ae
A
1434
1435 /* Ensure that caching and write-through are enabled */
1436 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
1437
91447636
A
1438 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1439 get_cpu_number(), get_cpu_phys_number());
55e303ae 1440
c0fea474
A
1441 assert(!ml_get_interrupts_enabled());
1442 if (cpu_mode_is64bit()) {
1443 cpu_IA32e_enable(current_cpu_datap());
1444 cpu_desc_load64(current_cpu_datap());
1445 fast_syscall_init64();
1446 } else {
1447 fast_syscall_init();
1448 }
1449
55e303ae
A
1450 lapic_init();
1451
91447636
A
1452 LAPIC_DUMP();
1453 LAPIC_CPU_MAP_DUMP();
1454
c0fea474
A
1455 init_fpu();
1456
91447636
A
1457 mtrr_update_cpu();
1458
1459 pat_init();
1460
c0fea474
A
1461 cpu_thread_init();
1462
1463 cpu_init(); /* Sets cpu_running which starter cpu waits for */
91447636
A
1464
1465 slave_main();
1466
1467 panic("i386_init_slave() returned from slave_main()");
1468}
1469
1470void
1471slave_machine_init(void)
1472{
1473 /*
c0fea474 1474 * Here in process context, but with interrupts disabled.
91447636
A
1475 */
1476 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1477
91447636 1478 clock_init();
c0fea474
A
1479
1480 cpu_machine_init(); /* Interrupts enabled hereafter */
55e303ae
A
1481}
1482
1483#undef cpu_number()
1484int cpu_number(void)
1485{
1486 return get_cpu_number();
1487}
1488
1489#if MACH_KDB
1490#include <ddb/db_output.h>
1491
1492#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1493
1494
1495#if TRAP_DEBUG
1496#define MTRAPS 100
1497struct mp_trap_hist_struct {
1498 unsigned char type;
1499 unsigned char data[5];
1500} trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1501 *max_trap_hist = &trap_hist[MTRAPS];
1502
1503void db_trap_hist(void);
1504
1505/*
1506 * SPL:
1507 * 1: new spl
1508 * 2: old spl
1509 * 3: new tpr
1510 * 4: old tpr
1511 * INT:
1512 * 1: int vec
1513 * 2: old spl
1514 * 3: new spl
1515 * 4: post eoi tpr
1516 * 5: exit tpr
1517 */
1518
1519void
1520db_trap_hist(void)
1521{
1522 int i,j;
1523 for(i=0;i<MTRAPS;i++)
1524 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1525 db_printf("%s%s",
1526 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1527 (trap_hist[i].type == 1)?"SPL":"INT");
1528 for(j=0;j<5;j++)
1529 db_printf(" %02x", trap_hist[i].data[j]);
1530 db_printf("\n");
1531 }
1532
1533}
1534#endif /* TRAP_DEBUG */
55e303ae
A
1535#endif /* MACH_KDB */
1536