]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/mp.c
2f63b5386e9ae80c605eaa634413598b1c43d424
[apple/xnu.git] / osfmk / i386 / mp.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33
34 #include <mach_rt.h>
35 #include <mach_kdb.h>
36 #include <mach_kdp.h>
37 #include <mach_ldebug.h>
38 #include <gprof.h>
39
40 #include <mach/mach_types.h>
41 #include <mach/kern_return.h>
42
43 #include <kern/kern_types.h>
44 #include <kern/startup.h>
45 #include <kern/processor.h>
46 #include <kern/cpu_number.h>
47 #include <kern/cpu_data.h>
48 #include <kern/assert.h>
49 #include <kern/machine.h>
50
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
53
54 #include <profiling/profile-mk.h>
55
56 #include <i386/mp.h>
57 #include <i386/mp_events.h>
58 #include <i386/mp_slave_boot.h>
59 #include <i386/apic.h>
60 #include <i386/ipl.h>
61 #include <i386/fpu.h>
62 #include <i386/pio.h>
63 #include <i386/cpuid.h>
64 #include <i386/proc_reg.h>
65 #include <i386/machine_cpu.h>
66 #include <i386/misc_protos.h>
67 #include <i386/mtrr.h>
68 #include <i386/postcode.h>
69 #include <i386/perfmon.h>
70 #include <i386/cpu_threads.h>
71 #include <i386/mp_desc.h>
72
73 #if MP_DEBUG
74 #define PAUSE delay(1000000)
75 #define DBG(x...) kprintf(x)
76 #else
77 #define DBG(x...)
78 #define PAUSE
79 #endif /* MP_DEBUG */
80
81 /*
82 * By default, use high vectors to leave vector space for systems
83 * with multiple I/O APIC's. However some systems that boot with
84 * local APIC disabled will hang in SMM when vectors greater than
85 * 0x5F are used. Those systems are not expected to have I/O APIC
86 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
87 */
88 #define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
89 #define LAPIC_REDUCED_INTERRUPT_BASE 0x50
90 /*
91 * Specific lapic interrupts are relative to this base:
92 */
93 #define LAPIC_PERFCNT_INTERRUPT 0xB
94 #define LAPIC_TIMER_INTERRUPT 0xC
95 #define LAPIC_SPURIOUS_INTERRUPT 0xD
96 #define LAPIC_INTERPROCESSOR_INTERRUPT 0xE
97 #define LAPIC_ERROR_INTERRUPT 0xF
98
99 /* Initialize lapic_id so cpu_number() works on non SMP systems */
100 unsigned long lapic_id_initdata = 0;
101 unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
102 vm_offset_t lapic_start;
103
104 static i386_intr_func_t lapic_timer_func;
105 static i386_intr_func_t lapic_pmi_func;
106
107 /* TRUE if local APIC was enabled by the OS not by the BIOS */
108 static boolean_t lapic_os_enabled = FALSE;
109
110 /* Base vector for local APIC interrupt sources */
111 int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
112
113 void slave_boot_init(void);
114
115 static void mp_kdp_wait(void);
116 static void mp_rendezvous_action(void);
117
118 boolean_t smp_initialized = FALSE;
119
120 decl_simple_lock_data(,mp_kdp_lock);
121
122 decl_mutex_data(static, mp_cpu_boot_lock);
123
124 /* Variables needed for MP rendezvous. */
125 static void (*mp_rv_setup_func)(void *arg);
126 static void (*mp_rv_action_func)(void *arg);
127 static void (*mp_rv_teardown_func)(void *arg);
128 static void *mp_rv_func_arg;
129 static int mp_rv_ncpus;
130 static long mp_rv_waiters[2];
131 decl_simple_lock_data(,mp_rv_lock);
132
133 int lapic_to_cpu[MAX_CPUS];
134 int cpu_to_lapic[MAX_CPUS];
135
136 static void
137 lapic_cpu_map_init(void)
138 {
139 int i;
140
141 for (i = 0; i < MAX_CPUS; i++) {
142 lapic_to_cpu[i] = -1;
143 cpu_to_lapic[i] = -1;
144 }
145 }
146
147 void
148 lapic_cpu_map(int apic_id, int cpu)
149 {
150 cpu_to_lapic[cpu] = apic_id;
151 lapic_to_cpu[apic_id] = cpu;
152 }
153
154 #ifdef MP_DEBUG
155 static void
156 lapic_cpu_map_dump(void)
157 {
158 int i;
159
160 for (i = 0; i < MAX_CPUS; i++) {
161 if (cpu_to_lapic[i] == -1)
162 continue;
163 kprintf("cpu_to_lapic[%d]: %d\n",
164 i, cpu_to_lapic[i]);
165 }
166 for (i = 0; i < MAX_CPUS; i++) {
167 if (lapic_to_cpu[i] == -1)
168 continue;
169 kprintf("lapic_to_cpu[%d]: %d\n",
170 i, lapic_to_cpu[i]);
171 }
172 }
173 #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
174 #define LAPIC_DUMP() lapic_dump()
175 #else
176 #define LAPIC_CPU_MAP_DUMP()
177 #define LAPIC_DUMP()
178 #endif /* MP_DEBUG */
179
180 #define LAPIC_REG(reg) \
181 (*((volatile int *)(lapic_start + LAPIC_##reg)))
182 #define LAPIC_REG_OFFSET(reg,off) \
183 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
184
185 #define LAPIC_VECTOR(src) \
186 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
187
188 #define LAPIC_ISR_IS_SET(base,src) \
189 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
190 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
191
192 #if GPROF
193 /*
194 * Initialize dummy structs for profiling. These aren't used but
195 * allows hertz_tick() to be built with GPROF defined.
196 */
197 struct profile_vars _profile_vars;
198 struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
199 #define GPROF_INIT() \
200 { \
201 int i; \
202 \
203 /* Hack to initialize pointers to unused profiling structs */ \
204 for (i = 1; i < MAX_CPUS; i++) \
205 _profile_vars_cpus[i] = &_profile_vars; \
206 }
207 #else
208 #define GPROF_INIT()
209 #endif /* GPROF */
210
211 extern void master_up(void);
212
213 void
214 smp_init(void)
215 {
216 int result;
217 vm_map_entry_t entry;
218 uint32_t lo;
219 uint32_t hi;
220 boolean_t is_boot_processor;
221 boolean_t is_lapic_enabled;
222 vm_offset_t lapic_base;
223
224 simple_lock_init(&mp_kdp_lock, 0);
225 simple_lock_init(&mp_rv_lock, 0);
226 mutex_init(&mp_cpu_boot_lock, 0);
227 console_init();
228
229 /* Local APIC? */
230 if (!lapic_probe())
231 return;
232
233 /* Examine the local APIC state */
234 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
235 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
236 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
237 lapic_base = (lo & MSR_IA32_APIC_BASE_BASE);
238 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
239 is_lapic_enabled ? "enabled" : "disabled",
240 is_boot_processor ? "BSP" : "AP");
241 if (!is_boot_processor || !is_lapic_enabled)
242 panic("Unexpected local APIC state\n");
243
244 /* Establish a map to the local apic */
245 lapic_start = vm_map_min(kernel_map);
246 result = vm_map_find_space(kernel_map, &lapic_start,
247 round_page(LAPIC_SIZE), 0, &entry);
248 if (result != KERN_SUCCESS) {
249 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
250 }
251 vm_map_unlock(kernel_map);
252 pmap_enter(pmap_kernel(),
253 lapic_start,
254 (ppnum_t) i386_btop(lapic_base),
255 VM_PROT_READ|VM_PROT_WRITE,
256 VM_WIMG_USE_DEFAULT,
257 TRUE);
258 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
259
260 if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
261 printf("Local APIC version not 0x14 as expected\n");
262 }
263
264 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
265 lapic_cpu_map_init();
266 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
267
268 lapic_init();
269
270 cpu_thread_init();
271
272 if (pmc_init() != KERN_SUCCESS)
273 printf("Performance counters not available\n");
274
275 GPROF_INIT();
276 DBGLOG_CPU_INIT(master_cpu);
277
278 slave_boot_init();
279 master_up();
280
281 smp_initialized = TRUE;
282
283 return;
284 }
285
286
287 static int
288 lapic_esr_read(void)
289 {
290 /* write-read register */
291 LAPIC_REG(ERROR_STATUS) = 0;
292 return LAPIC_REG(ERROR_STATUS);
293 }
294
295 static void
296 lapic_esr_clear(void)
297 {
298 LAPIC_REG(ERROR_STATUS) = 0;
299 LAPIC_REG(ERROR_STATUS) = 0;
300 }
301
302 static const char *DM[8] = {
303 "Fixed",
304 "Lowest Priority",
305 "Invalid",
306 "Invalid",
307 "NMI",
308 "Reset",
309 "Invalid",
310 "ExtINT"};
311
312 void
313 lapic_dump(void)
314 {
315 int i;
316
317 #define BOOL(a) ((a)?' ':'!')
318
319 kprintf("LAPIC %d at 0x%x version 0x%x\n",
320 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
321 lapic_start,
322 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
323 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
324 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
325 LAPIC_REG(APR)&LAPIC_APR_MASK,
326 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
327 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
328 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
329 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
330 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
331 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
332 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
333 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
334 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
335 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
336 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
337 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
338 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
339 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
340 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
341 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
342 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
343 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
344 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
345 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
346 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
347 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
348 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
349 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
350 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
351 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
352 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
353 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
354 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
355 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
356 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
357 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
358 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
359 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
360 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
361 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
362 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
363 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
364 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
365 kprintf("ESR: %08x \n", lapic_esr_read());
366 kprintf(" ");
367 for(i=0xf; i>=0; i--)
368 kprintf("%x%x%x%x",i,i,i,i);
369 kprintf("\n");
370 kprintf("TMR: 0x");
371 for(i=7; i>=0; i--)
372 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
373 kprintf("\n");
374 kprintf("IRR: 0x");
375 for(i=7; i>=0; i--)
376 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
377 kprintf("\n");
378 kprintf("ISR: 0x");
379 for(i=7; i >= 0; i--)
380 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
381 kprintf("\n");
382 }
383
384 boolean_t
385 lapic_probe(void)
386 {
387 uint32_t lo;
388 uint32_t hi;
389
390 if (cpuid_features() & CPUID_FEATURE_APIC)
391 return TRUE;
392
393 if (cpuid_family() == 6 || cpuid_family() == 15) {
394 /*
395 * Mobile Pentiums:
396 * There may be a local APIC which wasn't enabled by BIOS.
397 * So we try to enable it explicitly.
398 */
399 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
400 lo &= ~MSR_IA32_APIC_BASE_BASE;
401 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
402 lo |= MSR_IA32_APIC_BASE_ENABLE;
403 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
404
405 /*
406 * Re-initialize cpu features info and re-check.
407 */
408 set_cpu_model();
409 if (cpuid_features() & CPUID_FEATURE_APIC) {
410 printf("Local APIC discovered and enabled\n");
411 lapic_os_enabled = TRUE;
412 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
413 return TRUE;
414 }
415 }
416
417 return FALSE;
418 }
419
420 void
421 lapic_shutdown(void)
422 {
423 uint32_t lo;
424 uint32_t hi;
425 uint32_t value;
426
427 /* Shutdown if local APIC was enabled by OS */
428 if (lapic_os_enabled == FALSE)
429 return;
430
431 mp_disable_preemption();
432
433 /* ExtINT: masked */
434 if (get_cpu_number() == master_cpu) {
435 value = LAPIC_REG(LVT_LINT0);
436 value |= LAPIC_LVT_MASKED;
437 LAPIC_REG(LVT_LINT0) = value;
438 }
439
440 /* Timer: masked */
441 LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
442
443 /* Perfmon: masked */
444 LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
445
446 /* Error: masked */
447 LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
448
449 /* APIC software disabled */
450 LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
451
452 /* Bypass the APIC completely and update cpu features */
453 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
454 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
455 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
456 set_cpu_model();
457
458 mp_enable_preemption();
459 }
460
461 void
462 lapic_init(void)
463 {
464 int value;
465
466 /* Set flat delivery model, logical processor id */
467 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
468 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
469
470 /* Accept all */
471 LAPIC_REG(TPR) = 0;
472
473 LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
474
475 /* ExtINT */
476 if (get_cpu_number() == master_cpu) {
477 value = LAPIC_REG(LVT_LINT0);
478 value &= ~LAPIC_LVT_MASKED;
479 value |= LAPIC_LVT_DM_EXTINT;
480 LAPIC_REG(LVT_LINT0) = value;
481 }
482
483 /* Timer: unmasked, one-shot */
484 LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
485
486 /* Perfmon: unmasked */
487 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
488
489 lapic_esr_clear();
490
491 LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
492
493 }
494
495 void
496 lapic_set_timer_func(i386_intr_func_t func)
497 {
498 lapic_timer_func = func;
499 }
500
501 void
502 lapic_set_timer(
503 boolean_t interrupt,
504 lapic_timer_mode_t mode,
505 lapic_timer_divide_t divisor,
506 lapic_timer_count_t initial_count)
507 {
508 boolean_t state;
509 uint32_t timer_vector;
510
511 state = ml_set_interrupts_enabled(FALSE);
512 timer_vector = LAPIC_REG(LVT_TIMER);
513 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
514 timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
515 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
516 LAPIC_REG(LVT_TIMER) = timer_vector;
517 LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
518 LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
519 ml_set_interrupts_enabled(state);
520 }
521
522 void
523 lapic_get_timer(
524 lapic_timer_mode_t *mode,
525 lapic_timer_divide_t *divisor,
526 lapic_timer_count_t *initial_count,
527 lapic_timer_count_t *current_count)
528 {
529 boolean_t state;
530
531 state = ml_set_interrupts_enabled(FALSE);
532 if (mode)
533 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
534 periodic : one_shot;
535 if (divisor)
536 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
537 if (initial_count)
538 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
539 if (current_count)
540 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
541 ml_set_interrupts_enabled(state);
542 }
543
544 void
545 lapic_set_pmi_func(i386_intr_func_t func)
546 {
547 lapic_pmi_func = func;
548 }
549
550 static inline void
551 _lapic_end_of_interrupt(void)
552 {
553 LAPIC_REG(EOI) = 0;
554 }
555
556 void
557 lapic_end_of_interrupt(void)
558 {
559 _lapic_end_of_interrupt();
560 }
561
562 int
563 lapic_interrupt(int interrupt, void *state)
564 {
565 interrupt -= lapic_interrupt_base;
566 if (interrupt < 0)
567 return 0;
568
569 switch(interrupt) {
570 case LAPIC_PERFCNT_INTERRUPT:
571 if (lapic_pmi_func != NULL)
572 (*lapic_pmi_func)(
573 (struct i386_interrupt_state *) state);
574 /* Clear interrupt masked */
575 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
576 _lapic_end_of_interrupt();
577 return 1;
578 case LAPIC_TIMER_INTERRUPT:
579 _lapic_end_of_interrupt();
580 if (lapic_timer_func != NULL)
581 (*lapic_timer_func)(
582 (struct i386_interrupt_state *) state);
583 return 1;
584 case LAPIC_ERROR_INTERRUPT:
585 lapic_dump();
586 panic("Local APIC error\n");
587 _lapic_end_of_interrupt();
588 return 1;
589 case LAPIC_SPURIOUS_INTERRUPT:
590 kprintf("SPIV\n");
591 /* No EOI required here */
592 return 1;
593 case LAPIC_INTERPROCESSOR_INTERRUPT:
594 cpu_signal_handler((struct i386_interrupt_state *) state);
595 _lapic_end_of_interrupt();
596 return 1;
597 }
598 return 0;
599 }
600
601 void
602 lapic_smm_restore(void)
603 {
604 boolean_t state;
605
606 if (lapic_os_enabled == FALSE)
607 return;
608
609 state = ml_set_interrupts_enabled(FALSE);
610
611 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
612 /*
613 * Bogus SMI handler enables interrupts but does not know about
614 * local APIC interrupt sources. When APIC timer counts down to
615 * zero while in SMM, local APIC will end up waiting for an EOI
616 * but no interrupt was delivered to the OS.
617 */
618 _lapic_end_of_interrupt();
619
620 /*
621 * timer is one-shot, trigger another quick countdown to trigger
622 * another timer interrupt.
623 */
624 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
625 LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
626 }
627
628 kprintf("lapic_smm_restore\n");
629 }
630
631 ml_set_interrupts_enabled(state);
632 }
633
634 kern_return_t
635 intel_startCPU(
636 int slot_num)
637 {
638
639 int i = 1000;
640 int lapic = cpu_to_lapic[slot_num];
641
642 assert(lapic != -1);
643
644 DBGLOG_CPU_INIT(slot_num);
645
646 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
647 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
648
649 /* Initialize (or re-initialize) the descriptor tables for this cpu. */
650 mp_desc_init(cpu_datap(slot_num), FALSE);
651
652 /* Serialize use of the slave boot stack. */
653 mutex_lock(&mp_cpu_boot_lock);
654
655 mp_disable_preemption();
656 if (slot_num == get_cpu_number()) {
657 mp_enable_preemption();
658 mutex_unlock(&mp_cpu_boot_lock);
659 return KERN_SUCCESS;
660 }
661
662 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
663 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
664 delay(10000);
665
666 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
667 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
668 delay(200);
669
670 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
671 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
672 delay(200);
673
674 #ifdef POSTCODE_DELAY
675 /* Wait much longer if postcodes are displayed for a delay period. */
676 i *= 10000;
677 #endif
678 while(i-- > 0) {
679 if (cpu_datap(slot_num)->cpu_running)
680 break;
681 delay(10000);
682 }
683
684 mp_enable_preemption();
685 mutex_unlock(&mp_cpu_boot_lock);
686
687 if (!cpu_datap(slot_num)->cpu_running) {
688 DBG("Failed to start CPU %02d\n", slot_num);
689 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
690 delay(1000000);
691 cpu_shutdown();
692 return KERN_SUCCESS;
693 } else {
694 DBG("Started CPU %02d\n", slot_num);
695 printf("Started CPU %02d\n", slot_num);
696 return KERN_SUCCESS;
697 }
698 }
699
700 extern char slave_boot_base[];
701 extern char slave_boot_end[];
702 extern void pstart(void);
703
704 void
705 slave_boot_init(void)
706 {
707 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
708 slave_boot_base,
709 kvtophys((vm_offset_t) slave_boot_base),
710 MP_BOOT,
711 slave_boot_end-slave_boot_base);
712
713 /*
714 * Copy the boot entry code to the real-mode vector area MP_BOOT.
715 * This is in page 1 which has been reserved for this purpose by
716 * machine_startup() from the boot processor.
717 * The slave boot code is responsible for switching to protected
718 * mode and then jumping to the common startup, _start().
719 */
720 bcopy_phys((addr64_t) kvtophys((vm_offset_t) slave_boot_base),
721 (addr64_t) MP_BOOT,
722 slave_boot_end-slave_boot_base);
723
724 /*
725 * Zero a stack area above the boot code.
726 */
727 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
728 bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
729
730 /*
731 * Set the location at the base of the stack to point to the
732 * common startup entry.
733 */
734 DBG("writing 0x%x at phys 0x%x\n",
735 kvtophys((vm_offset_t) &pstart), MP_MACH_START+MP_BOOT);
736 ml_phys_write_word(MP_MACH_START+MP_BOOT,
737 kvtophys((vm_offset_t) &pstart));
738
739 /* Flush caches */
740 __asm__("wbinvd");
741 }
742
743 #if MP_DEBUG
744 cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
745 cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
746
747 MP_EVENT_NAME_DECL();
748
749 #endif /* MP_DEBUG */
750
751 void
752 cpu_signal_handler(__unused struct i386_interrupt_state *regs)
753 {
754 int my_cpu;
755 volatile int *my_word;
756 #if MACH_KDB && MACH_ASSERT
757 int i=100;
758 #endif /* MACH_KDB && MACH_ASSERT */
759
760 mp_disable_preemption();
761
762 my_cpu = cpu_number();
763 my_word = &current_cpu_datap()->cpu_signals;
764
765 do {
766 #if MACH_KDB && MACH_ASSERT
767 if (i-- <= 0)
768 Debugger("cpu_signal_handler");
769 #endif /* MACH_KDB && MACH_ASSERT */
770 #if MACH_KDP
771 if (i_bit(MP_KDP, my_word)) {
772 DBGLOG(cpu_handle,my_cpu,MP_KDP);
773 i_bit_clear(MP_KDP, my_word);
774 mp_kdp_wait();
775 } else
776 #endif /* MACH_KDP */
777 if (i_bit(MP_TLB_FLUSH, my_word)) {
778 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
779 i_bit_clear(MP_TLB_FLUSH, my_word);
780 pmap_update_interrupt();
781 } else if (i_bit(MP_AST, my_word)) {
782 DBGLOG(cpu_handle,my_cpu,MP_AST);
783 i_bit_clear(MP_AST, my_word);
784 ast_check(cpu_to_processor(my_cpu));
785 #if MACH_KDB
786 } else if (i_bit(MP_KDB, my_word)) {
787 extern kdb_is_slave[];
788
789 i_bit_clear(MP_KDB, my_word);
790 kdb_is_slave[my_cpu]++;
791 kdb_kintr();
792 #endif /* MACH_KDB */
793 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
794 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
795 i_bit_clear(MP_RENDEZVOUS, my_word);
796 mp_rendezvous_action();
797 }
798 } while (*my_word);
799
800 mp_enable_preemption();
801
802 }
803
804 #ifdef MP_DEBUG
805 extern int max_lock_loops;
806 #endif /* MP_DEBUG */
807 void
808 cpu_interrupt(int cpu)
809 {
810 boolean_t state;
811
812 if (smp_initialized) {
813
814 /* Wait for previous interrupt to be delivered... */
815 #ifdef MP_DEBUG
816 int pending_busy_count = 0;
817 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
818 if (++pending_busy_count > max_lock_loops)
819 panic("cpus_interrupt() deadlock\n");
820 #else
821 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
822 #endif /* MP_DEBUG */
823 cpu_pause();
824 }
825
826 state = ml_set_interrupts_enabled(FALSE);
827 LAPIC_REG(ICRD) =
828 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
829 LAPIC_REG(ICR) =
830 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
831 (void) ml_set_interrupts_enabled(state);
832 }
833
834 }
835
836 void
837 i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
838 {
839 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
840 uint64_t tsc_timeout;
841
842
843 if (!cpu_datap(cpu)->cpu_running)
844 return;
845
846 DBGLOG(cpu_signal, cpu, event);
847
848 i_bit_set(event, signals);
849 cpu_interrupt(cpu);
850 if (mode == SYNC) {
851 again:
852 tsc_timeout = rdtsc64() + (1000*1000*1000);
853 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
854 cpu_pause();
855 }
856 if (i_bit(event, signals)) {
857 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
858 cpu, event);
859 goto again;
860 }
861 }
862 }
863
864 void
865 i386_signal_cpus(mp_event_t event, mp_sync_t mode)
866 {
867 unsigned int cpu;
868 unsigned int my_cpu = cpu_number();
869
870 for (cpu = 0; cpu < real_ncpus; cpu++) {
871 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
872 continue;
873 i386_signal_cpu(cpu, event, mode);
874 }
875 }
876
877 int
878 i386_active_cpus(void)
879 {
880 unsigned int cpu;
881 unsigned int ncpus = 0;
882
883 for (cpu = 0; cpu < real_ncpus; cpu++) {
884 if (cpu_datap(cpu)->cpu_running)
885 ncpus++;
886 }
887 return(ncpus);
888 }
889
890 /*
891 * All-CPU rendezvous:
892 * - CPUs are signalled,
893 * - all execute the setup function (if specified),
894 * - rendezvous (i.e. all cpus reach a barrier),
895 * - all execute the action function (if specified),
896 * - rendezvous again,
897 * - execute the teardown function (if specified), and then
898 * - resume.
899 *
900 * Note that the supplied external functions _must_ be reentrant and aware
901 * that they are running in parallel and in an unknown lock context.
902 */
903
904 static void
905 mp_rendezvous_action(void)
906 {
907
908 /* setup function */
909 if (mp_rv_setup_func != NULL)
910 mp_rv_setup_func(mp_rv_func_arg);
911 /* spin on entry rendezvous */
912 atomic_incl(&mp_rv_waiters[0], 1);
913 while (*((volatile long *) &mp_rv_waiters[0]) < mp_rv_ncpus)
914 cpu_pause();
915 /* action function */
916 if (mp_rv_action_func != NULL)
917 mp_rv_action_func(mp_rv_func_arg);
918 /* spin on exit rendezvous */
919 atomic_incl(&mp_rv_waiters[1], 1);
920 while (*((volatile long *) &mp_rv_waiters[1]) < mp_rv_ncpus)
921 cpu_pause();
922 /* teardown function */
923 if (mp_rv_teardown_func != NULL)
924 mp_rv_teardown_func(mp_rv_func_arg);
925 }
926
927 void
928 mp_rendezvous(void (*setup_func)(void *),
929 void (*action_func)(void *),
930 void (*teardown_func)(void *),
931 void *arg)
932 {
933
934 if (!smp_initialized) {
935 if (setup_func != NULL)
936 setup_func(arg);
937 if (action_func != NULL)
938 action_func(arg);
939 if (teardown_func != NULL)
940 teardown_func(arg);
941 return;
942 }
943
944 /* obtain rendezvous lock */
945 simple_lock(&mp_rv_lock);
946
947 /* set static function pointers */
948 mp_rv_setup_func = setup_func;
949 mp_rv_action_func = action_func;
950 mp_rv_teardown_func = teardown_func;
951 mp_rv_func_arg = arg;
952
953 mp_rv_waiters[0] = 0; /* entry rendezvous count */
954 mp_rv_waiters[1] = 0; /* exit rendezvous count */
955 mp_rv_ncpus = i386_active_cpus();
956
957 /*
958 * signal other processors, which will call mp_rendezvous_action()
959 * with interrupts disabled
960 */
961 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
962
963 /* call executor function on this cpu */
964 mp_rendezvous_action();
965
966 /* release lock */
967 simple_unlock(&mp_rv_lock);
968 }
969
970 #if MACH_KDP
971 volatile boolean_t mp_kdp_trap = FALSE;
972 long mp_kdp_ncpus;
973 boolean_t mp_kdp_state;
974
975
976 void
977 mp_kdp_enter(void)
978 {
979 unsigned int cpu;
980 unsigned int ncpus;
981 unsigned int my_cpu = cpu_number();
982 uint64_t tsc_timeout;
983
984 DBG("mp_kdp_enter()\n");
985
986 /*
987 * Here to enter the debugger.
988 * In case of races, only one cpu is allowed to enter kdp after
989 * stopping others.
990 */
991 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
992 simple_lock(&mp_kdp_lock);
993 while (mp_kdp_trap) {
994 simple_unlock(&mp_kdp_lock);
995 DBG("mp_kdp_enter() race lost\n");
996 mp_kdp_wait();
997 simple_lock(&mp_kdp_lock);
998 }
999 mp_kdp_ncpus = 1; /* self */
1000 mp_kdp_trap = TRUE;
1001 simple_unlock(&mp_kdp_lock);
1002
1003 /* Deliver a nudge to other cpus, counting how many */
1004 DBG("mp_kdp_enter() signaling other processors\n");
1005 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1006 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1007 continue;
1008 ncpus++;
1009 i386_signal_cpu(cpu, MP_KDP, ASYNC);
1010 }
1011
1012 /* Wait other processors to spin. */
1013 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
1014 tsc_timeout = rdtsc64() + (1000*1000*1000);
1015 while (*((volatile unsigned int *) &mp_kdp_ncpus) != ncpus
1016 && rdtsc64() < tsc_timeout) {
1017 cpu_pause();
1018 }
1019 DBG("mp_kdp_enter() %d processors done %s\n",
1020 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
1021 postcode(MP_KDP_ENTER);
1022 }
1023
1024 static void
1025 mp_kdp_wait(void)
1026 {
1027 boolean_t state;
1028
1029 state = ml_set_interrupts_enabled(TRUE);
1030 DBG("mp_kdp_wait()\n");
1031 atomic_incl(&mp_kdp_ncpus, 1);
1032 while (mp_kdp_trap) {
1033 cpu_pause();
1034 }
1035 atomic_decl(&mp_kdp_ncpus, 1);
1036 DBG("mp_kdp_wait() done\n");
1037 (void) ml_set_interrupts_enabled(state);
1038 }
1039
1040 void
1041 mp_kdp_exit(void)
1042 {
1043 DBG("mp_kdp_exit()\n");
1044 atomic_decl(&mp_kdp_ncpus, 1);
1045 mp_kdp_trap = FALSE;
1046
1047 /* Wait other processors to stop spinning. XXX needs timeout */
1048 DBG("mp_kdp_exit() waiting for processors to resume\n");
1049 while (*((volatile long *) &mp_kdp_ncpus) > 0) {
1050 cpu_pause();
1051 }
1052 DBG("mp_kdp_exit() done\n");
1053 (void) ml_set_interrupts_enabled(mp_kdp_state);
1054 postcode(0);
1055 }
1056 #endif /* MACH_KDP */
1057
1058 /*ARGSUSED*/
1059 void
1060 init_ast_check(
1061 __unused processor_t processor)
1062 {
1063 }
1064
1065 void
1066 cause_ast_check(
1067 processor_t processor)
1068 {
1069 int cpu = PROCESSOR_DATA(processor, slot_num);
1070
1071 if (cpu != cpu_number()) {
1072 i386_signal_cpu(cpu, MP_AST, ASYNC);
1073 }
1074 }
1075
1076 /*
1077 * invoke kdb on slave processors
1078 */
1079
1080 void
1081 remote_kdb(void)
1082 {
1083 unsigned int my_cpu = cpu_number();
1084 unsigned int cpu;
1085
1086 mp_disable_preemption();
1087 for (cpu = 0; cpu < real_ncpus; cpu++) {
1088 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
1089 continue;
1090 i386_signal_cpu(cpu, MP_KDB, SYNC);
1091 }
1092 mp_enable_preemption();
1093 }
1094
1095 /*
1096 * Clear kdb interrupt
1097 */
1098
1099 void
1100 clear_kdb_intr(void)
1101 {
1102 mp_disable_preemption();
1103 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
1104 mp_enable_preemption();
1105 }
1106
1107 /*
1108 * i386_init_slave() is called from pstart.
1109 * We're in the cpu's interrupt stack with interrupts disabled.
1110 */
1111 void
1112 i386_init_slave(void)
1113 {
1114 postcode(I386_INIT_SLAVE);
1115
1116 /* Ensure that caching and write-through are enabled */
1117 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
1118
1119 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1120 get_cpu_number(), get_cpu_phys_number());
1121
1122 lapic_init();
1123
1124 LAPIC_DUMP();
1125 LAPIC_CPU_MAP_DUMP();
1126
1127 mtrr_update_cpu();
1128
1129 pat_init();
1130
1131 cpu_init();
1132
1133 slave_main();
1134
1135 panic("i386_init_slave() returned from slave_main()");
1136 }
1137
1138 void
1139 slave_machine_init(void)
1140 {
1141 /*
1142 * Here in process context.
1143 */
1144 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1145
1146 init_fpu();
1147
1148 cpu_thread_init();
1149
1150 pmc_init();
1151
1152 cpu_machine_init();
1153
1154 clock_init();
1155 }
1156
1157 #undef cpu_number()
1158 int cpu_number(void)
1159 {
1160 return get_cpu_number();
1161 }
1162
1163 #if MACH_KDB
1164 #include <ddb/db_output.h>
1165
1166 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1167
1168
1169 #if TRAP_DEBUG
1170 #define MTRAPS 100
1171 struct mp_trap_hist_struct {
1172 unsigned char type;
1173 unsigned char data[5];
1174 } trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1175 *max_trap_hist = &trap_hist[MTRAPS];
1176
1177 void db_trap_hist(void);
1178
1179 /*
1180 * SPL:
1181 * 1: new spl
1182 * 2: old spl
1183 * 3: new tpr
1184 * 4: old tpr
1185 * INT:
1186 * 1: int vec
1187 * 2: old spl
1188 * 3: new spl
1189 * 4: post eoi tpr
1190 * 5: exit tpr
1191 */
1192
1193 void
1194 db_trap_hist(void)
1195 {
1196 int i,j;
1197 for(i=0;i<MTRAPS;i++)
1198 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1199 db_printf("%s%s",
1200 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1201 (trap_hist[i].type == 1)?"SPL":"INT");
1202 for(j=0;j<5;j++)
1203 db_printf(" %02x", trap_hist[i].data[j]);
1204 db_printf("\n");
1205 }
1206
1207 }
1208 #endif /* TRAP_DEBUG */
1209
1210 void db_lapic(int cpu);
1211 unsigned int db_remote_read(int cpu, int reg);
1212 void db_ioapic(unsigned int);
1213 void kdb_console(void);
1214
1215 void
1216 kdb_console(void)
1217 {
1218 }
1219
1220 #define BOOLP(a) ((a)?' ':'!')
1221
1222 static char *DM[8] = {
1223 "Fixed",
1224 "Lowest Priority",
1225 "Invalid",
1226 "Invalid",
1227 "NMI",
1228 "Reset",
1229 "Invalid",
1230 "ExtINT"};
1231
1232 unsigned int
1233 db_remote_read(int cpu, int reg)
1234 {
1235 return -1;
1236 }
1237
1238 void
1239 db_lapic(int cpu)
1240 {
1241 }
1242
1243 void
1244 db_ioapic(unsigned int ind)
1245 {
1246 }
1247
1248 #endif /* MACH_KDB */
1249