]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
CommitLineData
55e303ae 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
55e303ae
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
55e303ae 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
55e303ae
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
55e303ae
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * @OSF_COPYRIGHT@
25 */
26
55e303ae
A
27#include <mach_rt.h>
28#include <mach_kdb.h>
29#include <mach_kdp.h>
30#include <mach_ldebug.h>
91447636
A
31#include <gprof.h>
32
33#include <mach/mach_types.h>
34#include <mach/kern_return.h>
35
36#include <kern/kern_types.h>
37#include <kern/startup.h>
38#include <kern/processor.h>
39#include <kern/cpu_number.h>
40#include <kern/cpu_data.h>
41#include <kern/assert.h>
42#include <kern/machine.h>
43
44#include <vm/vm_map.h>
45#include <vm/vm_kern.h>
46
47#include <profiling/profile-mk.h>
55e303ae
A
48
49#include <i386/mp.h>
50#include <i386/mp_events.h>
51#include <i386/mp_slave_boot.h>
52#include <i386/apic.h>
53#include <i386/ipl.h>
54#include <i386/fpu.h>
55#include <i386/pio.h>
56#include <i386/cpuid.h>
57#include <i386/proc_reg.h>
58#include <i386/machine_cpu.h>
59#include <i386/misc_protos.h>
91447636
A
60#include <i386/mtrr.h>
61#include <i386/postcode.h>
62#include <i386/perfmon.h>
63#include <i386/cpu_threads.h>
64#include <i386/mp_desc.h>
55e303ae
A
65
66#if MP_DEBUG
67#define PAUSE delay(1000000)
68#define DBG(x...) kprintf(x)
69#else
70#define DBG(x...)
71#define PAUSE
72#endif /* MP_DEBUG */
73
91447636
A
74/*
75 * By default, use high vectors to leave vector space for systems
76 * with multiple I/O APIC's. However some systems that boot with
77 * local APIC disabled will hang in SMM when vectors greater than
78 * 0x5F are used. Those systems are not expected to have I/O APIC
79 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
80 */
81#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
82#define LAPIC_REDUCED_INTERRUPT_BASE 0x50
83/*
84 * Specific lapic interrupts are relative to this base:
85 */
86#define LAPIC_PERFCNT_INTERRUPT 0xB
87#define LAPIC_TIMER_INTERRUPT 0xC
88#define LAPIC_SPURIOUS_INTERRUPT 0xD
89#define LAPIC_INTERPROCESSOR_INTERRUPT 0xE
90#define LAPIC_ERROR_INTERRUPT 0xF
91
55e303ae
A
92/* Initialize lapic_id so cpu_number() works on non SMP systems */
93unsigned long lapic_id_initdata = 0;
94unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
91447636
A
95vm_offset_t lapic_start;
96
97static i386_intr_func_t lapic_timer_func;
98static i386_intr_func_t lapic_pmi_func;
99
100/* TRUE if local APIC was enabled by the OS not by the BIOS */
101static boolean_t lapic_os_enabled = FALSE;
102
103/* Base vector for local APIC interrupt sources */
104int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
55e303ae 105
55e303ae
A
106void slave_boot_init(void);
107
108static void mp_kdp_wait(void);
109static void mp_rendezvous_action(void);
110
111boolean_t smp_initialized = FALSE;
112
113decl_simple_lock_data(,mp_kdp_lock);
91447636
A
114
115decl_mutex_data(static, mp_cpu_boot_lock);
55e303ae
A
116
117/* Variables needed for MP rendezvous. */
118static void (*mp_rv_setup_func)(void *arg);
119static void (*mp_rv_action_func)(void *arg);
120static void (*mp_rv_teardown_func)(void *arg);
121static void *mp_rv_func_arg;
122static int mp_rv_ncpus;
91447636 123static long mp_rv_waiters[2];
55e303ae
A
124decl_simple_lock_data(,mp_rv_lock);
125
91447636
A
126int lapic_to_cpu[MAX_CPUS];
127int cpu_to_lapic[MAX_CPUS];
55e303ae
A
128
129static void
130lapic_cpu_map_init(void)
131{
132 int i;
133
91447636 134 for (i = 0; i < MAX_CPUS; i++) {
55e303ae 135 lapic_to_cpu[i] = -1;
91447636
A
136 cpu_to_lapic[i] = -1;
137 }
55e303ae
A
138}
139
140void
91447636 141lapic_cpu_map(int apic_id, int cpu)
55e303ae 142{
91447636
A
143 cpu_to_lapic[cpu] = apic_id;
144 lapic_to_cpu[apic_id] = cpu;
55e303ae
A
145}
146
147#ifdef MP_DEBUG
148static void
149lapic_cpu_map_dump(void)
150{
151 int i;
152
91447636 153 for (i = 0; i < MAX_CPUS; i++) {
55e303ae
A
154 if (cpu_to_lapic[i] == -1)
155 continue;
156 kprintf("cpu_to_lapic[%d]: %d\n",
157 i, cpu_to_lapic[i]);
158 }
91447636 159 for (i = 0; i < MAX_CPUS; i++) {
55e303ae
A
160 if (lapic_to_cpu[i] == -1)
161 continue;
162 kprintf("lapic_to_cpu[%d]: %d\n",
163 i, lapic_to_cpu[i]);
164 }
165}
91447636
A
166#define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
167#define LAPIC_DUMP() lapic_dump()
168#else
169#define LAPIC_CPU_MAP_DUMP()
170#define LAPIC_DUMP()
55e303ae
A
171#endif /* MP_DEBUG */
172
173#define LAPIC_REG(reg) \
174 (*((volatile int *)(lapic_start + LAPIC_##reg)))
175#define LAPIC_REG_OFFSET(reg,off) \
176 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
177
91447636
A
178#define LAPIC_VECTOR(src) \
179 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
180
181#define LAPIC_ISR_IS_SET(base,src) \
182 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
183 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
184
185#if GPROF
186/*
187 * Initialize dummy structs for profiling. These aren't used but
188 * allows hertz_tick() to be built with GPROF defined.
189 */
190struct profile_vars _profile_vars;
191struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
192#define GPROF_INIT() \
193{ \
194 int i; \
195 \
196 /* Hack to initialize pointers to unused profiling structs */ \
197 for (i = 1; i < MAX_CPUS; i++) \
198 _profile_vars_cpus[i] = &_profile_vars; \
199}
200#else
201#define GPROF_INIT()
202#endif /* GPROF */
203
204extern void master_up(void);
55e303ae
A
205
206void
207smp_init(void)
55e303ae
A
208{
209 int result;
210 vm_map_entry_t entry;
211 uint32_t lo;
212 uint32_t hi;
213 boolean_t is_boot_processor;
214 boolean_t is_lapic_enabled;
91447636
A
215 vm_offset_t lapic_base;
216
217 simple_lock_init(&mp_kdp_lock, 0);
218 simple_lock_init(&mp_rv_lock, 0);
219 mutex_init(&mp_cpu_boot_lock, 0);
220 console_init();
55e303ae
A
221
222 /* Local APIC? */
91447636 223 if (!lapic_probe())
55e303ae
A
224 return;
225
55e303ae
A
226 /* Examine the local APIC state */
227 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
228 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
229 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
91447636
A
230 lapic_base = (lo & MSR_IA32_APIC_BASE_BASE);
231 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
55e303ae
A
232 is_lapic_enabled ? "enabled" : "disabled",
233 is_boot_processor ? "BSP" : "AP");
91447636
A
234 if (!is_boot_processor || !is_lapic_enabled)
235 panic("Unexpected local APIC state\n");
55e303ae
A
236
237 /* Establish a map to the local apic */
238 lapic_start = vm_map_min(kernel_map);
239 result = vm_map_find_space(kernel_map, &lapic_start,
240 round_page(LAPIC_SIZE), 0, &entry);
241 if (result != KERN_SUCCESS) {
91447636 242 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
55e303ae
A
243 }
244 vm_map_unlock(kernel_map);
245 pmap_enter(pmap_kernel(),
246 lapic_start,
91447636 247 (ppnum_t) i386_btop(lapic_base),
55e303ae
A
248 VM_PROT_READ|VM_PROT_WRITE,
249 VM_WIMG_USE_DEFAULT,
250 TRUE);
251 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
252
91447636
A
253 if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
254 printf("Local APIC version not 0x14 as expected\n");
255 }
256
55e303ae
A
257 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
258 lapic_cpu_map_init();
259 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
260
261 lapic_init();
262
91447636
A
263 cpu_thread_init();
264
265 if (pmc_init() != KERN_SUCCESS)
266 printf("Performance counters not available\n");
267
268 GPROF_INIT();
269 DBGLOG_CPU_INIT(master_cpu);
270
55e303ae
A
271 slave_boot_init();
272 master_up();
273
274 smp_initialized = TRUE;
275
276 return;
277}
278
279
91447636 280static int
55e303ae
A
281lapic_esr_read(void)
282{
283 /* write-read register */
284 LAPIC_REG(ERROR_STATUS) = 0;
285 return LAPIC_REG(ERROR_STATUS);
286}
287
91447636 288static void
55e303ae
A
289lapic_esr_clear(void)
290{
291 LAPIC_REG(ERROR_STATUS) = 0;
292 LAPIC_REG(ERROR_STATUS) = 0;
293}
294
91447636 295static const char *DM[8] = {
55e303ae
A
296 "Fixed",
297 "Lowest Priority",
298 "Invalid",
299 "Invalid",
300 "NMI",
301 "Reset",
302 "Invalid",
303 "ExtINT"};
304
305void
306lapic_dump(void)
307{
308 int i;
55e303ae
A
309
310#define BOOL(a) ((a)?' ':'!')
311
312 kprintf("LAPIC %d at 0x%x version 0x%x\n",
313 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
314 lapic_start,
315 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
316 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
317 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
318 LAPIC_REG(APR)&LAPIC_APR_MASK,
319 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
320 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
321 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
322 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
323 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
324 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
325 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
326 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
327 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
328 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
329 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
330 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
331 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
91447636
A
332 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
333 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
334 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
335 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
55e303ae
A
336 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
337 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
55e303ae
A
338 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
339 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
340 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
341 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
342 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
343 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
344 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
345 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
346 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
347 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
348 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
349 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
350 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
351 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
352 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
353 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
354 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
355 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
356 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
357 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
358 kprintf("ESR: %08x \n", lapic_esr_read());
359 kprintf(" ");
360 for(i=0xf; i>=0; i--)
361 kprintf("%x%x%x%x",i,i,i,i);
362 kprintf("\n");
363 kprintf("TMR: 0x");
364 for(i=7; i>=0; i--)
365 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
366 kprintf("\n");
367 kprintf("IRR: 0x");
368 for(i=7; i>=0; i--)
369 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
370 kprintf("\n");
371 kprintf("ISR: 0x");
372 for(i=7; i >= 0; i--)
373 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
374 kprintf("\n");
375}
376
91447636
A
377boolean_t
378lapic_probe(void)
379{
380 uint32_t lo;
381 uint32_t hi;
382
383 if (cpuid_features() & CPUID_FEATURE_APIC)
384 return TRUE;
385
386 if (cpuid_family() == 6 || cpuid_family() == 15) {
387 /*
388 * Mobile Pentiums:
389 * There may be a local APIC which wasn't enabled by BIOS.
390 * So we try to enable it explicitly.
391 */
392 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
393 lo &= ~MSR_IA32_APIC_BASE_BASE;
394 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
395 lo |= MSR_IA32_APIC_BASE_ENABLE;
396 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
397
398 /*
399 * Re-initialize cpu features info and re-check.
400 */
401 set_cpu_model();
402 if (cpuid_features() & CPUID_FEATURE_APIC) {
403 printf("Local APIC discovered and enabled\n");
404 lapic_os_enabled = TRUE;
405 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
406 return TRUE;
407 }
408 }
409
410 return FALSE;
411}
412
55e303ae 413void
91447636 414lapic_shutdown(void)
55e303ae 415{
91447636
A
416 uint32_t lo;
417 uint32_t hi;
418 uint32_t value;
419
420 /* Shutdown if local APIC was enabled by OS */
421 if (lapic_os_enabled == FALSE)
422 return;
55e303ae
A
423
424 mp_disable_preemption();
425
91447636
A
426 /* ExtINT: masked */
427 if (get_cpu_number() == master_cpu) {
428 value = LAPIC_REG(LVT_LINT0);
429 value |= LAPIC_LVT_MASKED;
430 LAPIC_REG(LVT_LINT0) = value;
431 }
432
433 /* Timer: masked */
434 LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
435
436 /* Perfmon: masked */
437 LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
438
439 /* Error: masked */
440 LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
441
442 /* APIC software disabled */
443 LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
444
445 /* Bypass the APIC completely and update cpu features */
446 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
447 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
448 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
449 set_cpu_model();
450
451 mp_enable_preemption();
452}
453
454void
455lapic_init(void)
456{
457 int value;
458
55e303ae
A
459 /* Set flat delivery model, logical processor id */
460 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
461 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
462
463 /* Accept all */
464 LAPIC_REG(TPR) = 0;
465
91447636 466 LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
55e303ae
A
467
468 /* ExtINT */
469 if (get_cpu_number() == master_cpu) {
470 value = LAPIC_REG(LVT_LINT0);
91447636 471 value &= ~LAPIC_LVT_MASKED;
55e303ae
A
472 value |= LAPIC_LVT_DM_EXTINT;
473 LAPIC_REG(LVT_LINT0) = value;
474 }
475
91447636
A
476 /* Timer: unmasked, one-shot */
477 LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
478
479 /* Perfmon: unmasked */
480 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
481
55e303ae
A
482 lapic_esr_clear();
483
91447636 484 LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
55e303ae 485
55e303ae
A
486}
487
91447636
A
488void
489lapic_set_timer_func(i386_intr_func_t func)
490{
491 lapic_timer_func = func;
492}
493
494void
495lapic_set_timer(
496 boolean_t interrupt,
497 lapic_timer_mode_t mode,
498 lapic_timer_divide_t divisor,
499 lapic_timer_count_t initial_count)
500{
501 boolean_t state;
502 uint32_t timer_vector;
503
504 state = ml_set_interrupts_enabled(FALSE);
505 timer_vector = LAPIC_REG(LVT_TIMER);
506 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
507 timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
508 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
509 LAPIC_REG(LVT_TIMER) = timer_vector;
510 LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
511 LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
512 ml_set_interrupts_enabled(state);
513}
55e303ae
A
514
515void
91447636
A
516lapic_get_timer(
517 lapic_timer_mode_t *mode,
518 lapic_timer_divide_t *divisor,
519 lapic_timer_count_t *initial_count,
520 lapic_timer_count_t *current_count)
521{
522 boolean_t state;
523
524 state = ml_set_interrupts_enabled(FALSE);
525 if (mode)
526 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
527 periodic : one_shot;
528 if (divisor)
529 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
530 if (initial_count)
531 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
532 if (current_count)
533 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
534 ml_set_interrupts_enabled(state);
535}
536
537void
538lapic_set_pmi_func(i386_intr_func_t func)
539{
540 lapic_pmi_func = func;
541}
542
543static inline void
544_lapic_end_of_interrupt(void)
55e303ae
A
545{
546 LAPIC_REG(EOI) = 0;
547}
548
549void
91447636
A
550lapic_end_of_interrupt(void)
551{
552 _lapic_end_of_interrupt();
553}
554
555int
55e303ae
A
556lapic_interrupt(int interrupt, void *state)
557{
91447636
A
558 interrupt -= lapic_interrupt_base;
559 if (interrupt < 0)
560 return 0;
55e303ae
A
561
562 switch(interrupt) {
91447636
A
563 case LAPIC_PERFCNT_INTERRUPT:
564 if (lapic_pmi_func != NULL)
565 (*lapic_pmi_func)(
566 (struct i386_interrupt_state *) state);
567 /* Clear interrupt masked */
568 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
569 _lapic_end_of_interrupt();
570 return 1;
571 case LAPIC_TIMER_INTERRUPT:
572 _lapic_end_of_interrupt();
573 if (lapic_timer_func != NULL)
574 (*lapic_timer_func)(
575 (struct i386_interrupt_state *) state);
576 return 1;
577 case LAPIC_ERROR_INTERRUPT:
578 lapic_dump();
55e303ae 579 panic("Local APIC error\n");
91447636
A
580 _lapic_end_of_interrupt();
581 return 1;
582 case LAPIC_SPURIOUS_INTERRUPT:
55e303ae 583 kprintf("SPIV\n");
91447636
A
584 /* No EOI required here */
585 return 1;
586 case LAPIC_INTERPROCESSOR_INTERRUPT:
55e303ae 587 cpu_signal_handler((struct i386_interrupt_state *) state);
91447636
A
588 _lapic_end_of_interrupt();
589 return 1;
55e303ae 590 }
91447636
A
591 return 0;
592}
593
594void
595lapic_smm_restore(void)
596{
597 boolean_t state;
598
599 if (lapic_os_enabled == FALSE)
600 return;
601
602 state = ml_set_interrupts_enabled(FALSE);
603
604 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
605 /*
606 * Bogus SMI handler enables interrupts but does not know about
607 * local APIC interrupt sources. When APIC timer counts down to
608 * zero while in SMM, local APIC will end up waiting for an EOI
609 * but no interrupt was delivered to the OS.
610 */
611 _lapic_end_of_interrupt();
612
613 /*
614 * timer is one-shot, trigger another quick countdown to trigger
615 * another timer interrupt.
616 */
617 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
618 LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
619 }
620
621 kprintf("lapic_smm_restore\n");
622 }
623
624 ml_set_interrupts_enabled(state);
55e303ae
A
625}
626
627kern_return_t
628intel_startCPU(
629 int slot_num)
630{
631
632 int i = 1000;
91447636 633 int lapic = cpu_to_lapic[slot_num];
55e303ae 634
91447636
A
635 assert(lapic != -1);
636
637 DBGLOG_CPU_INIT(slot_num);
55e303ae 638
91447636
A
639 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
640 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
55e303ae 641
91447636
A
642 /* Initialize (or re-initialize) the descriptor tables for this cpu. */
643 mp_desc_init(cpu_datap(slot_num), FALSE);
644
645 /* Serialize use of the slave boot stack. */
646 mutex_lock(&mp_cpu_boot_lock);
55e303ae
A
647
648 mp_disable_preemption();
91447636
A
649 if (slot_num == get_cpu_number()) {
650 mp_enable_preemption();
651 mutex_unlock(&mp_cpu_boot_lock);
652 return KERN_SUCCESS;
653 }
55e303ae 654
91447636 655 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
55e303ae
A
656 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
657 delay(10000);
658
91447636 659 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
55e303ae
A
660 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
661 delay(200);
662
91447636
A
663 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
664 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
665 delay(200);
666
667#ifdef POSTCODE_DELAY
668 /* Wait much longer if postcodes are displayed for a delay period. */
669 i *= 10000;
670#endif
55e303ae 671 while(i-- > 0) {
91447636 672 if (cpu_datap(slot_num)->cpu_running)
55e303ae 673 break;
91447636 674 delay(10000);
55e303ae
A
675 }
676
677 mp_enable_preemption();
91447636 678 mutex_unlock(&mp_cpu_boot_lock);
55e303ae 679
91447636 680 if (!cpu_datap(slot_num)->cpu_running) {
55e303ae 681 DBG("Failed to start CPU %02d\n", slot_num);
91447636
A
682 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
683 delay(1000000);
684 cpu_shutdown();
55e303ae
A
685 return KERN_SUCCESS;
686 } else {
687 DBG("Started CPU %02d\n", slot_num);
688 printf("Started CPU %02d\n", slot_num);
689 return KERN_SUCCESS;
690 }
691}
692
91447636
A
693extern char slave_boot_base[];
694extern char slave_boot_end[];
695extern void pstart(void);
696
55e303ae
A
697void
698slave_boot_init(void)
699{
91447636
A
700 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
701 slave_boot_base,
702 kvtophys((vm_offset_t) slave_boot_base),
703 MP_BOOT,
704 slave_boot_end-slave_boot_base);
55e303ae
A
705
706 /*
707 * Copy the boot entry code to the real-mode vector area MP_BOOT.
708 * This is in page 1 which has been reserved for this purpose by
709 * machine_startup() from the boot processor.
710 * The slave boot code is responsible for switching to protected
91447636 711 * mode and then jumping to the common startup, _start().
55e303ae 712 */
91447636
A
713 bcopy_phys((addr64_t) kvtophys((vm_offset_t) slave_boot_base),
714 (addr64_t) MP_BOOT,
715 slave_boot_end-slave_boot_base);
55e303ae
A
716
717 /*
718 * Zero a stack area above the boot code.
719 */
91447636
A
720 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
721 bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
55e303ae
A
722
723 /*
724 * Set the location at the base of the stack to point to the
725 * common startup entry.
726 */
91447636
A
727 DBG("writing 0x%x at phys 0x%x\n",
728 kvtophys((vm_offset_t) &pstart), MP_MACH_START+MP_BOOT);
729 ml_phys_write_word(MP_MACH_START+MP_BOOT,
730 kvtophys((vm_offset_t) &pstart));
55e303ae
A
731
732 /* Flush caches */
733 __asm__("wbinvd");
734}
735
736#if MP_DEBUG
91447636
A
737cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
738cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
55e303ae
A
739
740MP_EVENT_NAME_DECL();
741
55e303ae
A
742#endif /* MP_DEBUG */
743
744void
91447636 745cpu_signal_handler(__unused struct i386_interrupt_state *regs)
55e303ae 746{
91447636 747 int my_cpu;
55e303ae
A
748 volatile int *my_word;
749#if MACH_KDB && MACH_ASSERT
750 int i=100;
751#endif /* MACH_KDB && MACH_ASSERT */
752
753 mp_disable_preemption();
754
755 my_cpu = cpu_number();
91447636 756 my_word = &current_cpu_datap()->cpu_signals;
55e303ae
A
757
758 do {
759#if MACH_KDB && MACH_ASSERT
760 if (i-- <= 0)
761 Debugger("cpu_signal_handler");
762#endif /* MACH_KDB && MACH_ASSERT */
763#if MACH_KDP
764 if (i_bit(MP_KDP, my_word)) {
765 DBGLOG(cpu_handle,my_cpu,MP_KDP);
766 i_bit_clear(MP_KDP, my_word);
767 mp_kdp_wait();
768 } else
769#endif /* MACH_KDP */
91447636 770 if (i_bit(MP_TLB_FLUSH, my_word)) {
55e303ae
A
771 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
772 i_bit_clear(MP_TLB_FLUSH, my_word);
773 pmap_update_interrupt();
774 } else if (i_bit(MP_AST, my_word)) {
775 DBGLOG(cpu_handle,my_cpu,MP_AST);
776 i_bit_clear(MP_AST, my_word);
777 ast_check(cpu_to_processor(my_cpu));
778#if MACH_KDB
779 } else if (i_bit(MP_KDB, my_word)) {
780 extern kdb_is_slave[];
781
782 i_bit_clear(MP_KDB, my_word);
783 kdb_is_slave[my_cpu]++;
784 kdb_kintr();
785#endif /* MACH_KDB */
786 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
787 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
788 i_bit_clear(MP_RENDEZVOUS, my_word);
789 mp_rendezvous_action();
790 }
791 } while (*my_word);
792
793 mp_enable_preemption();
794
795}
796
91447636
A
797#ifdef MP_DEBUG
798extern int max_lock_loops;
799#endif /* MP_DEBUG */
55e303ae
A
800void
801cpu_interrupt(int cpu)
802{
803 boolean_t state;
804
805 if (smp_initialized) {
806
807 /* Wait for previous interrupt to be delivered... */
91447636
A
808#ifdef MP_DEBUG
809 int pending_busy_count = 0;
810 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
811 if (++pending_busy_count > max_lock_loops)
812 panic("cpus_interrupt() deadlock\n");
813#else
814 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
815#endif /* MP_DEBUG */
55e303ae 816 cpu_pause();
91447636 817 }
55e303ae
A
818
819 state = ml_set_interrupts_enabled(FALSE);
820 LAPIC_REG(ICRD) =
821 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
822 LAPIC_REG(ICR) =
91447636 823 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
55e303ae
A
824 (void) ml_set_interrupts_enabled(state);
825 }
826
827}
828
55e303ae
A
829void
830i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
831{
91447636
A
832 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
833 uint64_t tsc_timeout;
55e303ae
A
834
835
91447636 836 if (!cpu_datap(cpu)->cpu_running)
55e303ae
A
837 return;
838
839 DBGLOG(cpu_signal, cpu, event);
840
841 i_bit_set(event, signals);
842 cpu_interrupt(cpu);
843 if (mode == SYNC) {
844 again:
91447636
A
845 tsc_timeout = rdtsc64() + (1000*1000*1000);
846 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
55e303ae
A
847 cpu_pause();
848 }
849 if (i_bit(event, signals)) {
850 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
851 cpu, event);
852 goto again;
853 }
854 }
855}
856
857void
858i386_signal_cpus(mp_event_t event, mp_sync_t mode)
859{
91447636
A
860 unsigned int cpu;
861 unsigned int my_cpu = cpu_number();
55e303ae 862
91447636
A
863 for (cpu = 0; cpu < real_ncpus; cpu++) {
864 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
865 continue;
866 i386_signal_cpu(cpu, event, mode);
867 }
868}
869
870int
871i386_active_cpus(void)
872{
91447636
A
873 unsigned int cpu;
874 unsigned int ncpus = 0;
55e303ae 875
91447636
A
876 for (cpu = 0; cpu < real_ncpus; cpu++) {
877 if (cpu_datap(cpu)->cpu_running)
55e303ae
A
878 ncpus++;
879 }
880 return(ncpus);
881}
882
883/*
884 * All-CPU rendezvous:
885 * - CPUs are signalled,
886 * - all execute the setup function (if specified),
887 * - rendezvous (i.e. all cpus reach a barrier),
888 * - all execute the action function (if specified),
889 * - rendezvous again,
890 * - execute the teardown function (if specified), and then
891 * - resume.
892 *
893 * Note that the supplied external functions _must_ be reentrant and aware
894 * that they are running in parallel and in an unknown lock context.
895 */
896
897static void
898mp_rendezvous_action(void)
899{
900
901 /* setup function */
902 if (mp_rv_setup_func != NULL)
903 mp_rv_setup_func(mp_rv_func_arg);
904 /* spin on entry rendezvous */
905 atomic_incl(&mp_rv_waiters[0], 1);
91447636 906 while (*((volatile long *) &mp_rv_waiters[0]) < mp_rv_ncpus)
55e303ae
A
907 cpu_pause();
908 /* action function */
909 if (mp_rv_action_func != NULL)
910 mp_rv_action_func(mp_rv_func_arg);
911 /* spin on exit rendezvous */
912 atomic_incl(&mp_rv_waiters[1], 1);
91447636 913 while (*((volatile long *) &mp_rv_waiters[1]) < mp_rv_ncpus)
55e303ae
A
914 cpu_pause();
915 /* teardown function */
916 if (mp_rv_teardown_func != NULL)
917 mp_rv_teardown_func(mp_rv_func_arg);
918}
919
920void
921mp_rendezvous(void (*setup_func)(void *),
922 void (*action_func)(void *),
923 void (*teardown_func)(void *),
924 void *arg)
925{
926
927 if (!smp_initialized) {
928 if (setup_func != NULL)
929 setup_func(arg);
930 if (action_func != NULL)
931 action_func(arg);
932 if (teardown_func != NULL)
933 teardown_func(arg);
934 return;
935 }
936
937 /* obtain rendezvous lock */
938 simple_lock(&mp_rv_lock);
939
940 /* set static function pointers */
941 mp_rv_setup_func = setup_func;
942 mp_rv_action_func = action_func;
943 mp_rv_teardown_func = teardown_func;
944 mp_rv_func_arg = arg;
945
946 mp_rv_waiters[0] = 0; /* entry rendezvous count */
947 mp_rv_waiters[1] = 0; /* exit rendezvous count */
948 mp_rv_ncpus = i386_active_cpus();
949
950 /*
951 * signal other processors, which will call mp_rendezvous_action()
952 * with interrupts disabled
953 */
954 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
955
956 /* call executor function on this cpu */
957 mp_rendezvous_action();
958
959 /* release lock */
960 simple_unlock(&mp_rv_lock);
961}
962
963#if MACH_KDP
964volatile boolean_t mp_kdp_trap = FALSE;
965long mp_kdp_ncpus;
91447636
A
966boolean_t mp_kdp_state;
967
55e303ae
A
968
969void
970mp_kdp_enter(void)
971{
91447636
A
972 unsigned int cpu;
973 unsigned int ncpus;
974 unsigned int my_cpu = cpu_number();
975 uint64_t tsc_timeout;
55e303ae
A
976
977 DBG("mp_kdp_enter()\n");
978
979 /*
980 * Here to enter the debugger.
981 * In case of races, only one cpu is allowed to enter kdp after
982 * stopping others.
983 */
91447636 984 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
55e303ae
A
985 simple_lock(&mp_kdp_lock);
986 while (mp_kdp_trap) {
987 simple_unlock(&mp_kdp_lock);
988 DBG("mp_kdp_enter() race lost\n");
989 mp_kdp_wait();
990 simple_lock(&mp_kdp_lock);
991 }
992 mp_kdp_ncpus = 1; /* self */
993 mp_kdp_trap = TRUE;
994 simple_unlock(&mp_kdp_lock);
55e303ae
A
995
996 /* Deliver a nudge to other cpus, counting how many */
997 DBG("mp_kdp_enter() signaling other processors\n");
91447636
A
998 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
999 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
1000 continue;
1001 ncpus++;
1002 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1003 }
1004
1005 /* Wait other processors to spin. */
1006 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
91447636
A
1007 tsc_timeout = rdtsc64() + (1000*1000*1000);
1008 while (*((volatile unsigned int *) &mp_kdp_ncpus) != ncpus
1009 && rdtsc64() < tsc_timeout) {
55e303ae
A
1010 cpu_pause();
1011 }
1012 DBG("mp_kdp_enter() %d processors done %s\n",
1013 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
91447636 1014 postcode(MP_KDP_ENTER);
55e303ae
A
1015}
1016
1017static void
1018mp_kdp_wait(void)
1019{
91447636
A
1020 boolean_t state;
1021
1022 state = ml_set_interrupts_enabled(TRUE);
55e303ae
A
1023 DBG("mp_kdp_wait()\n");
1024 atomic_incl(&mp_kdp_ncpus, 1);
1025 while (mp_kdp_trap) {
1026 cpu_pause();
1027 }
1028 atomic_decl(&mp_kdp_ncpus, 1);
1029 DBG("mp_kdp_wait() done\n");
91447636 1030 (void) ml_set_interrupts_enabled(state);
55e303ae
A
1031}
1032
1033void
1034mp_kdp_exit(void)
1035{
1036 DBG("mp_kdp_exit()\n");
1037 atomic_decl(&mp_kdp_ncpus, 1);
1038 mp_kdp_trap = FALSE;
1039
1040 /* Wait other processors to stop spinning. XXX needs timeout */
1041 DBG("mp_kdp_exit() waiting for processors to resume\n");
1042 while (*((volatile long *) &mp_kdp_ncpus) > 0) {
1043 cpu_pause();
1044 }
1045 DBG("mp_kdp_exit() done\n");
91447636
A
1046 (void) ml_set_interrupts_enabled(mp_kdp_state);
1047 postcode(0);
55e303ae
A
1048}
1049#endif /* MACH_KDP */
1050
55e303ae
A
1051/*ARGSUSED*/
1052void
1053init_ast_check(
91447636 1054 __unused processor_t processor)
55e303ae
A
1055{
1056}
1057
1058void
1059cause_ast_check(
1060 processor_t processor)
1061{
91447636 1062 int cpu = PROCESSOR_DATA(processor, slot_num);
55e303ae
A
1063
1064 if (cpu != cpu_number()) {
1065 i386_signal_cpu(cpu, MP_AST, ASYNC);
1066 }
1067}
1068
1069/*
1070 * invoke kdb on slave processors
1071 */
1072
1073void
1074remote_kdb(void)
1075{
91447636
A
1076 unsigned int my_cpu = cpu_number();
1077 unsigned int cpu;
55e303ae
A
1078
1079 mp_disable_preemption();
91447636
A
1080 for (cpu = 0; cpu < real_ncpus; cpu++) {
1081 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
1082 continue;
1083 i386_signal_cpu(cpu, MP_KDB, SYNC);
1084 }
1085 mp_enable_preemption();
1086}
1087
1088/*
1089 * Clear kdb interrupt
1090 */
1091
1092void
1093clear_kdb_intr(void)
1094{
1095 mp_disable_preemption();
91447636 1096 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
55e303ae
A
1097 mp_enable_preemption();
1098}
1099
91447636
A
1100/*
1101 * i386_init_slave() is called from pstart.
1102 * We're in the cpu's interrupt stack with interrupts disabled.
1103 */
55e303ae 1104void
91447636 1105i386_init_slave(void)
55e303ae 1106{
91447636 1107 postcode(I386_INIT_SLAVE);
55e303ae
A
1108
1109 /* Ensure that caching and write-through are enabled */
1110 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
1111
91447636
A
1112 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1113 get_cpu_number(), get_cpu_phys_number());
55e303ae
A
1114
1115 lapic_init();
1116
91447636
A
1117 LAPIC_DUMP();
1118 LAPIC_CPU_MAP_DUMP();
1119
1120 mtrr_update_cpu();
1121
1122 pat_init();
1123
1124 cpu_init();
1125
1126 slave_main();
1127
1128 panic("i386_init_slave() returned from slave_main()");
1129}
1130
1131void
1132slave_machine_init(void)
1133{
1134 /*
1135 * Here in process context.
1136 */
1137 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1138
55e303ae
A
1139 init_fpu();
1140
91447636 1141 cpu_thread_init();
55e303ae 1142
91447636 1143 pmc_init();
55e303ae 1144
91447636 1145 cpu_machine_init();
55e303ae 1146
91447636 1147 clock_init();
55e303ae
A
1148}
1149
1150#undef cpu_number()
1151int cpu_number(void)
1152{
1153 return get_cpu_number();
1154}
1155
1156#if MACH_KDB
1157#include <ddb/db_output.h>
1158
1159#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1160
1161
1162#if TRAP_DEBUG
1163#define MTRAPS 100
1164struct mp_trap_hist_struct {
1165 unsigned char type;
1166 unsigned char data[5];
1167} trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1168 *max_trap_hist = &trap_hist[MTRAPS];
1169
1170void db_trap_hist(void);
1171
1172/*
1173 * SPL:
1174 * 1: new spl
1175 * 2: old spl
1176 * 3: new tpr
1177 * 4: old tpr
1178 * INT:
1179 * 1: int vec
1180 * 2: old spl
1181 * 3: new spl
1182 * 4: post eoi tpr
1183 * 5: exit tpr
1184 */
1185
1186void
1187db_trap_hist(void)
1188{
1189 int i,j;
1190 for(i=0;i<MTRAPS;i++)
1191 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1192 db_printf("%s%s",
1193 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1194 (trap_hist[i].type == 1)?"SPL":"INT");
1195 for(j=0;j<5;j++)
1196 db_printf(" %02x", trap_hist[i].data[j]);
1197 db_printf("\n");
1198 }
1199
1200}
1201#endif /* TRAP_DEBUG */
1202
1203void db_lapic(int cpu);
1204unsigned int db_remote_read(int cpu, int reg);
1205void db_ioapic(unsigned int);
1206void kdb_console(void);
1207
1208void
1209kdb_console(void)
1210{
1211}
1212
1213#define BOOLP(a) ((a)?' ':'!')
1214
1215static char *DM[8] = {
1216 "Fixed",
1217 "Lowest Priority",
1218 "Invalid",
1219 "Invalid",
1220 "NMI",
1221 "Reset",
1222 "Invalid",
1223 "ExtINT"};
1224
1225unsigned int
1226db_remote_read(int cpu, int reg)
1227{
1228 return -1;
1229}
1230
1231void
1232db_lapic(int cpu)
1233{
1234}
1235
1236void
1237db_ioapic(unsigned int ind)
1238{
1239}
1240
1241#endif /* MACH_KDB */
1242