]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.c
CommitLineData
55e303ae 1/*
21362eb3 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
55e303ae 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
55e303ae 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
55e303ae
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
55e303ae
A
32#include <mach_rt.h>
33#include <mach_kdb.h>
34#include <mach_kdp.h>
35#include <mach_ldebug.h>
91447636
A
36#include <gprof.h>
37
38#include <mach/mach_types.h>
39#include <mach/kern_return.h>
40
41#include <kern/kern_types.h>
42#include <kern/startup.h>
43#include <kern/processor.h>
44#include <kern/cpu_number.h>
45#include <kern/cpu_data.h>
46#include <kern/assert.h>
47#include <kern/machine.h>
48
49#include <vm/vm_map.h>
50#include <vm/vm_kern.h>
51
52#include <profiling/profile-mk.h>
55e303ae
A
53
54#include <i386/mp.h>
55#include <i386/mp_events.h>
56#include <i386/mp_slave_boot.h>
57#include <i386/apic.h>
58#include <i386/ipl.h>
59#include <i386/fpu.h>
21362eb3 60#include <i386/pio.h>
55e303ae
A
61#include <i386/cpuid.h>
62#include <i386/proc_reg.h>
63#include <i386/machine_cpu.h>
64#include <i386/misc_protos.h>
91447636
A
65#include <i386/mtrr.h>
66#include <i386/postcode.h>
67#include <i386/perfmon.h>
68#include <i386/cpu_threads.h>
69#include <i386/mp_desc.h>
55e303ae
A
70
71#if MP_DEBUG
72#define PAUSE delay(1000000)
73#define DBG(x...) kprintf(x)
74#else
75#define DBG(x...)
76#define PAUSE
77#endif /* MP_DEBUG */
78
21362eb3
A
79/*
80 * By default, use high vectors to leave vector space for systems
81 * with multiple I/O APIC's. However some systems that boot with
82 * local APIC disabled will hang in SMM when vectors greater than
83 * 0x5F are used. Those systems are not expected to have I/O APIC
84 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
85 */
86#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
87#define LAPIC_REDUCED_INTERRUPT_BASE 0x50
88/*
89 * Specific lapic interrupts are relative to this base:
90 */
91#define LAPIC_PERFCNT_INTERRUPT 0xB
92#define LAPIC_TIMER_INTERRUPT 0xC
93#define LAPIC_SPURIOUS_INTERRUPT 0xD
94#define LAPIC_INTERPROCESSOR_INTERRUPT 0xE
95#define LAPIC_ERROR_INTERRUPT 0xF
96
55e303ae
A
97/* Initialize lapic_id so cpu_number() works on non SMP systems */
98unsigned long lapic_id_initdata = 0;
99unsigned long lapic_id = (unsigned long)&lapic_id_initdata;
91447636
A
100vm_offset_t lapic_start;
101
102static i386_intr_func_t lapic_timer_func;
103static i386_intr_func_t lapic_pmi_func;
104
105/* TRUE if local APIC was enabled by the OS not by the BIOS */
106static boolean_t lapic_os_enabled = FALSE;
107
108/* Base vector for local APIC interrupt sources */
109int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
55e303ae 110
55e303ae
A
111void slave_boot_init(void);
112
113static void mp_kdp_wait(void);
114static void mp_rendezvous_action(void);
115
116boolean_t smp_initialized = FALSE;
117
118decl_simple_lock_data(,mp_kdp_lock);
91447636
A
119
120decl_mutex_data(static, mp_cpu_boot_lock);
55e303ae
A
121
122/* Variables needed for MP rendezvous. */
123static void (*mp_rv_setup_func)(void *arg);
124static void (*mp_rv_action_func)(void *arg);
125static void (*mp_rv_teardown_func)(void *arg);
126static void *mp_rv_func_arg;
127static int mp_rv_ncpus;
21362eb3 128static long mp_rv_waiters[2];
55e303ae
A
129decl_simple_lock_data(,mp_rv_lock);
130
91447636
A
131int lapic_to_cpu[MAX_CPUS];
132int cpu_to_lapic[MAX_CPUS];
55e303ae
A
133
134static void
135lapic_cpu_map_init(void)
136{
137 int i;
138
91447636 139 for (i = 0; i < MAX_CPUS; i++) {
55e303ae 140 lapic_to_cpu[i] = -1;
91447636
A
141 cpu_to_lapic[i] = -1;
142 }
55e303ae
A
143}
144
145void
91447636 146lapic_cpu_map(int apic_id, int cpu)
55e303ae 147{
91447636
A
148 cpu_to_lapic[cpu] = apic_id;
149 lapic_to_cpu[apic_id] = cpu;
55e303ae
A
150}
151
152#ifdef MP_DEBUG
153static void
154lapic_cpu_map_dump(void)
155{
156 int i;
157
91447636 158 for (i = 0; i < MAX_CPUS; i++) {
55e303ae
A
159 if (cpu_to_lapic[i] == -1)
160 continue;
161 kprintf("cpu_to_lapic[%d]: %d\n",
162 i, cpu_to_lapic[i]);
163 }
91447636 164 for (i = 0; i < MAX_CPUS; i++) {
55e303ae
A
165 if (lapic_to_cpu[i] == -1)
166 continue;
167 kprintf("lapic_to_cpu[%d]: %d\n",
168 i, lapic_to_cpu[i]);
169 }
170}
91447636
A
171#define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
172#define LAPIC_DUMP() lapic_dump()
173#else
174#define LAPIC_CPU_MAP_DUMP()
175#define LAPIC_DUMP()
55e303ae
A
176#endif /* MP_DEBUG */
177
21362eb3
A
178#define LAPIC_REG(reg) \
179 (*((volatile int *)(lapic_start + LAPIC_##reg)))
180#define LAPIC_REG_OFFSET(reg,off) \
181 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
182
183#define LAPIC_VECTOR(src) \
184 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
185
186#define LAPIC_ISR_IS_SET(base,src) \
187 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
188 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
189
91447636
A
190#if GPROF
191/*
192 * Initialize dummy structs for profiling. These aren't used but
193 * allows hertz_tick() to be built with GPROF defined.
194 */
195struct profile_vars _profile_vars;
196struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars };
197#define GPROF_INIT() \
198{ \
199 int i; \
200 \
201 /* Hack to initialize pointers to unused profiling structs */ \
202 for (i = 1; i < MAX_CPUS; i++) \
203 _profile_vars_cpus[i] = &_profile_vars; \
204}
205#else
206#define GPROF_INIT()
207#endif /* GPROF */
208
21362eb3
A
209extern void master_up(void);
210
55e303ae
A
211void
212smp_init(void)
55e303ae
A
213{
214 int result;
215 vm_map_entry_t entry;
216 uint32_t lo;
217 uint32_t hi;
218 boolean_t is_boot_processor;
219 boolean_t is_lapic_enabled;
91447636
A
220 vm_offset_t lapic_base;
221
222 simple_lock_init(&mp_kdp_lock, 0);
223 simple_lock_init(&mp_rv_lock, 0);
224 mutex_init(&mp_cpu_boot_lock, 0);
225 console_init();
55e303ae
A
226
227 /* Local APIC? */
91447636 228 if (!lapic_probe())
55e303ae
A
229 return;
230
55e303ae
A
231 /* Examine the local APIC state */
232 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
233 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
234 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
91447636
A
235 lapic_base = (lo & MSR_IA32_APIC_BASE_BASE);
236 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base,
55e303ae
A
237 is_lapic_enabled ? "enabled" : "disabled",
238 is_boot_processor ? "BSP" : "AP");
91447636
A
239 if (!is_boot_processor || !is_lapic_enabled)
240 panic("Unexpected local APIC state\n");
55e303ae
A
241
242 /* Establish a map to the local apic */
243 lapic_start = vm_map_min(kernel_map);
244 result = vm_map_find_space(kernel_map, &lapic_start,
21362eb3 245 round_page(LAPIC_SIZE), 0, &entry);
55e303ae 246 if (result != KERN_SUCCESS) {
91447636 247 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result);
55e303ae
A
248 }
249 vm_map_unlock(kernel_map);
250 pmap_enter(pmap_kernel(),
251 lapic_start,
91447636 252 (ppnum_t) i386_btop(lapic_base),
21362eb3
A
253 VM_PROT_READ|VM_PROT_WRITE,
254 VM_WIMG_USE_DEFAULT,
55e303ae
A
255 TRUE);
256 lapic_id = (unsigned long)(lapic_start + LAPIC_ID);
257
91447636
A
258 if ((LAPIC_REG(VERSION)&LAPIC_VERSION_MASK) != 0x14) {
259 printf("Local APIC version not 0x14 as expected\n");
260 }
261
55e303ae
A
262 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
263 lapic_cpu_map_init();
264 lapic_cpu_map((LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
265
266 lapic_init();
267
91447636
A
268 cpu_thread_init();
269
21362eb3
A
270 if (pmc_init() != KERN_SUCCESS)
271 printf("Performance counters not available\n");
272
91447636
A
273 GPROF_INIT();
274 DBGLOG_CPU_INIT(master_cpu);
275
55e303ae 276 slave_boot_init();
21362eb3 277 master_up();
55e303ae
A
278
279 smp_initialized = TRUE;
280
281 return;
282}
283
284
91447636 285static int
55e303ae
A
286lapic_esr_read(void)
287{
288 /* write-read register */
289 LAPIC_REG(ERROR_STATUS) = 0;
290 return LAPIC_REG(ERROR_STATUS);
291}
292
91447636 293static void
55e303ae
A
294lapic_esr_clear(void)
295{
296 LAPIC_REG(ERROR_STATUS) = 0;
297 LAPIC_REG(ERROR_STATUS) = 0;
298}
299
91447636 300static const char *DM[8] = {
55e303ae
A
301 "Fixed",
302 "Lowest Priority",
303 "Invalid",
304 "Invalid",
305 "NMI",
306 "Reset",
307 "Invalid",
308 "ExtINT"};
309
310void
311lapic_dump(void)
312{
313 int i;
55e303ae
A
314
315#define BOOL(a) ((a)?' ':'!')
316
317 kprintf("LAPIC %d at 0x%x version 0x%x\n",
318 (LAPIC_REG(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
319 lapic_start,
320 LAPIC_REG(VERSION)&LAPIC_VERSION_MASK);
321 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
322 LAPIC_REG(TPR)&LAPIC_TPR_MASK,
323 LAPIC_REG(APR)&LAPIC_APR_MASK,
324 LAPIC_REG(PPR)&LAPIC_PPR_MASK);
325 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
326 LAPIC_REG(DFR)>>LAPIC_DFR_SHIFT,
327 LAPIC_REG(LDR)>>LAPIC_LDR_SHIFT);
328 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
329 BOOL(LAPIC_REG(SVR)&LAPIC_SVR_ENABLE),
330 BOOL(!(LAPIC_REG(SVR)&LAPIC_SVR_FOCUS_OFF)),
331 LAPIC_REG(SVR) & LAPIC_SVR_MASK);
332 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
333 LAPIC_REG(LVT_TIMER)&LAPIC_LVT_VECTOR_MASK,
334 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
335 BOOL(LAPIC_REG(LVT_TIMER)&LAPIC_LVT_MASKED),
336 (LAPIC_REG(LVT_TIMER)&LAPIC_LVT_PERIODIC)?"Periodic":"OneShot");
91447636
A
337 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT));
338 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT));
339 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG));
340 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
55e303ae
A
341 LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_VECTOR_MASK,
342 DM[(LAPIC_REG(LVT_PERFCNT)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
55e303ae
A
343 (LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
344 BOOL(LAPIC_REG(LVT_PERFCNT)&LAPIC_LVT_MASKED));
345 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
346 LAPIC_REG(LVT_LINT0)&LAPIC_LVT_VECTOR_MASK,
347 DM[(LAPIC_REG(LVT_LINT0)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
348 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
349 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
350 (LAPIC_REG(LVT_LINT0)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
351 BOOL(LAPIC_REG(LVT_LINT0)&LAPIC_LVT_MASKED));
352 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
353 LAPIC_REG(LVT_LINT1)&LAPIC_LVT_VECTOR_MASK,
354 DM[(LAPIC_REG(LVT_LINT1)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK],
355 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_TM_LEVEL)?"Level":"Edge ",
356 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_IP_PLRITY_LOW)?"Low ":"High",
357 (LAPIC_REG(LVT_LINT1)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
358 BOOL(LAPIC_REG(LVT_LINT1)&LAPIC_LVT_MASKED));
359 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
360 LAPIC_REG(LVT_ERROR)&LAPIC_LVT_VECTOR_MASK,
361 (LAPIC_REG(LVT_ERROR)&LAPIC_LVT_DS_PENDING)?"SendPending":"Idle",
362 BOOL(LAPIC_REG(LVT_ERROR)&LAPIC_LVT_MASKED));
363 kprintf("ESR: %08x \n", lapic_esr_read());
364 kprintf(" ");
365 for(i=0xf; i>=0; i--)
366 kprintf("%x%x%x%x",i,i,i,i);
367 kprintf("\n");
368 kprintf("TMR: 0x");
369 for(i=7; i>=0; i--)
370 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE, i*0x10));
371 kprintf("\n");
372 kprintf("IRR: 0x");
373 for(i=7; i>=0; i--)
374 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE, i*0x10));
375 kprintf("\n");
376 kprintf("ISR: 0x");
377 for(i=7; i >= 0; i--)
378 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE, i*0x10));
379 kprintf("\n");
380}
381
91447636
A
382boolean_t
383lapic_probe(void)
384{
385 uint32_t lo;
386 uint32_t hi;
387
388 if (cpuid_features() & CPUID_FEATURE_APIC)
389 return TRUE;
390
391 if (cpuid_family() == 6 || cpuid_family() == 15) {
392 /*
393 * Mobile Pentiums:
394 * There may be a local APIC which wasn't enabled by BIOS.
395 * So we try to enable it explicitly.
396 */
397 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
398 lo &= ~MSR_IA32_APIC_BASE_BASE;
399 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
400 lo |= MSR_IA32_APIC_BASE_ENABLE;
401 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
402
403 /*
404 * Re-initialize cpu features info and re-check.
405 */
21362eb3 406 set_cpu_model();
91447636
A
407 if (cpuid_features() & CPUID_FEATURE_APIC) {
408 printf("Local APIC discovered and enabled\n");
409 lapic_os_enabled = TRUE;
410 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
411 return TRUE;
412 }
413 }
414
415 return FALSE;
416}
417
55e303ae 418void
91447636 419lapic_shutdown(void)
55e303ae 420{
91447636
A
421 uint32_t lo;
422 uint32_t hi;
423 uint32_t value;
424
425 /* Shutdown if local APIC was enabled by OS */
426 if (lapic_os_enabled == FALSE)
427 return;
55e303ae
A
428
429 mp_disable_preemption();
430
91447636
A
431 /* ExtINT: masked */
432 if (get_cpu_number() == master_cpu) {
433 value = LAPIC_REG(LVT_LINT0);
434 value |= LAPIC_LVT_MASKED;
435 LAPIC_REG(LVT_LINT0) = value;
436 }
437
438 /* Timer: masked */
439 LAPIC_REG(LVT_TIMER) |= LAPIC_LVT_MASKED;
440
441 /* Perfmon: masked */
442 LAPIC_REG(LVT_PERFCNT) |= LAPIC_LVT_MASKED;
443
444 /* Error: masked */
445 LAPIC_REG(LVT_ERROR) |= LAPIC_LVT_MASKED;
446
447 /* APIC software disabled */
448 LAPIC_REG(SVR) &= ~LAPIC_SVR_ENABLE;
449
450 /* Bypass the APIC completely and update cpu features */
451 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
452 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
453 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
21362eb3 454 set_cpu_model();
91447636
A
455
456 mp_enable_preemption();
457}
458
459void
460lapic_init(void)
461{
462 int value;
463
55e303ae
A
464 /* Set flat delivery model, logical processor id */
465 LAPIC_REG(DFR) = LAPIC_DFR_FLAT;
466 LAPIC_REG(LDR) = (get_cpu_number()) << LAPIC_LDR_SHIFT;
467
468 /* Accept all */
469 LAPIC_REG(TPR) = 0;
470
91447636 471 LAPIC_REG(SVR) = LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE;
55e303ae
A
472
473 /* ExtINT */
474 if (get_cpu_number() == master_cpu) {
475 value = LAPIC_REG(LVT_LINT0);
91447636 476 value &= ~LAPIC_LVT_MASKED;
55e303ae
A
477 value |= LAPIC_LVT_DM_EXTINT;
478 LAPIC_REG(LVT_LINT0) = value;
479 }
480
91447636
A
481 /* Timer: unmasked, one-shot */
482 LAPIC_REG(LVT_TIMER) = LAPIC_VECTOR(TIMER);
483
484 /* Perfmon: unmasked */
485 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
486
55e303ae
A
487 lapic_esr_clear();
488
91447636 489 LAPIC_REG(LVT_ERROR) = LAPIC_VECTOR(ERROR);
21362eb3 490
55e303ae
A
491}
492
91447636
A
493void
494lapic_set_timer_func(i386_intr_func_t func)
495{
496 lapic_timer_func = func;
497}
498
499void
500lapic_set_timer(
501 boolean_t interrupt,
502 lapic_timer_mode_t mode,
503 lapic_timer_divide_t divisor,
504 lapic_timer_count_t initial_count)
505{
506 boolean_t state;
507 uint32_t timer_vector;
508
509 state = ml_set_interrupts_enabled(FALSE);
510 timer_vector = LAPIC_REG(LVT_TIMER);
511 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
512 timer_vector |= interrupt ? 0 : LAPIC_LVT_MASKED;
513 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
514 LAPIC_REG(LVT_TIMER) = timer_vector;
515 LAPIC_REG(TIMER_DIVIDE_CONFIG) = divisor;
516 LAPIC_REG(TIMER_INITIAL_COUNT) = initial_count;
517 ml_set_interrupts_enabled(state);
518}
55e303ae
A
519
520void
91447636
A
521lapic_get_timer(
522 lapic_timer_mode_t *mode,
523 lapic_timer_divide_t *divisor,
524 lapic_timer_count_t *initial_count,
525 lapic_timer_count_t *current_count)
526{
527 boolean_t state;
528
529 state = ml_set_interrupts_enabled(FALSE);
530 if (mode)
531 *mode = (LAPIC_REG(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
532 periodic : one_shot;
533 if (divisor)
534 *divisor = LAPIC_REG(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
535 if (initial_count)
536 *initial_count = LAPIC_REG(TIMER_INITIAL_COUNT);
537 if (current_count)
538 *current_count = LAPIC_REG(TIMER_CURRENT_COUNT);
539 ml_set_interrupts_enabled(state);
540}
541
542void
543lapic_set_pmi_func(i386_intr_func_t func)
544{
545 lapic_pmi_func = func;
546}
547
548static inline void
549_lapic_end_of_interrupt(void)
55e303ae
A
550{
551 LAPIC_REG(EOI) = 0;
552}
553
554void
91447636
A
555lapic_end_of_interrupt(void)
556{
557 _lapic_end_of_interrupt();
558}
559
560int
21362eb3 561lapic_interrupt(int interrupt, void *state)
55e303ae 562{
91447636 563 interrupt -= lapic_interrupt_base;
21362eb3
A
564 if (interrupt < 0)
565 return 0;
55e303ae
A
566
567 switch(interrupt) {
91447636
A
568 case LAPIC_PERFCNT_INTERRUPT:
569 if (lapic_pmi_func != NULL)
21362eb3
A
570 (*lapic_pmi_func)(
571 (struct i386_interrupt_state *) state);
91447636
A
572 /* Clear interrupt masked */
573 LAPIC_REG(LVT_PERFCNT) = LAPIC_VECTOR(PERFCNT);
574 _lapic_end_of_interrupt();
21362eb3 575 return 1;
91447636
A
576 case LAPIC_TIMER_INTERRUPT:
577 _lapic_end_of_interrupt();
578 if (lapic_timer_func != NULL)
21362eb3
A
579 (*lapic_timer_func)(
580 (struct i386_interrupt_state *) state);
581 return 1;
91447636
A
582 case LAPIC_ERROR_INTERRUPT:
583 lapic_dump();
55e303ae 584 panic("Local APIC error\n");
91447636 585 _lapic_end_of_interrupt();
21362eb3 586 return 1;
91447636 587 case LAPIC_SPURIOUS_INTERRUPT:
55e303ae 588 kprintf("SPIV\n");
91447636 589 /* No EOI required here */
21362eb3 590 return 1;
91447636 591 case LAPIC_INTERPROCESSOR_INTERRUPT:
21362eb3 592 cpu_signal_handler((struct i386_interrupt_state *) state);
91447636 593 _lapic_end_of_interrupt();
21362eb3 594 return 1;
55e303ae 595 }
21362eb3 596 return 0;
91447636
A
597}
598
599void
600lapic_smm_restore(void)
601{
602 boolean_t state;
603
604 if (lapic_os_enabled == FALSE)
605 return;
606
607 state = ml_set_interrupts_enabled(FALSE);
608
609 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
610 /*
611 * Bogus SMI handler enables interrupts but does not know about
612 * local APIC interrupt sources. When APIC timer counts down to
613 * zero while in SMM, local APIC will end up waiting for an EOI
614 * but no interrupt was delivered to the OS.
615 */
616 _lapic_end_of_interrupt();
617
618 /*
619 * timer is one-shot, trigger another quick countdown to trigger
620 * another timer interrupt.
621 */
622 if (LAPIC_REG(TIMER_CURRENT_COUNT) == 0) {
623 LAPIC_REG(TIMER_INITIAL_COUNT) = 1;
624 }
625
626 kprintf("lapic_smm_restore\n");
627 }
628
629 ml_set_interrupts_enabled(state);
55e303ae
A
630}
631
632kern_return_t
633intel_startCPU(
634 int slot_num)
635{
636
637 int i = 1000;
91447636 638 int lapic = cpu_to_lapic[slot_num];
55e303ae 639
91447636
A
640 assert(lapic != -1);
641
642 DBGLOG_CPU_INIT(slot_num);
55e303ae 643
91447636
A
644 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num, lapic);
645 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD, (int) IdlePTD);
55e303ae 646
21362eb3
A
647 /* Initialize (or re-initialize) the descriptor tables for this cpu. */
648 mp_desc_init(cpu_datap(slot_num), FALSE);
91447636
A
649
650 /* Serialize use of the slave boot stack. */
651 mutex_lock(&mp_cpu_boot_lock);
55e303ae
A
652
653 mp_disable_preemption();
91447636
A
654 if (slot_num == get_cpu_number()) {
655 mp_enable_preemption();
656 mutex_unlock(&mp_cpu_boot_lock);
657 return KERN_SUCCESS;
658 }
55e303ae 659
91447636 660 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
55e303ae
A
661 LAPIC_REG(ICR) = LAPIC_ICR_DM_INIT;
662 delay(10000);
663
91447636 664 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
55e303ae
A
665 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
666 delay(200);
667
91447636
A
668 LAPIC_REG(ICRD) = lapic << LAPIC_ICRD_DEST_SHIFT;
669 LAPIC_REG(ICR) = LAPIC_ICR_DM_STARTUP|(MP_BOOT>>12);
670 delay(200);
671
672#ifdef POSTCODE_DELAY
673 /* Wait much longer if postcodes are displayed for a delay period. */
674 i *= 10000;
675#endif
55e303ae 676 while(i-- > 0) {
91447636 677 if (cpu_datap(slot_num)->cpu_running)
55e303ae 678 break;
91447636 679 delay(10000);
55e303ae
A
680 }
681
682 mp_enable_preemption();
91447636 683 mutex_unlock(&mp_cpu_boot_lock);
55e303ae 684
91447636 685 if (!cpu_datap(slot_num)->cpu_running) {
21362eb3 686 DBG("Failed to start CPU %02d\n", slot_num);
91447636
A
687 printf("Failed to start CPU %02d, rebooting...\n", slot_num);
688 delay(1000000);
689 cpu_shutdown();
55e303ae
A
690 return KERN_SUCCESS;
691 } else {
21362eb3 692 DBG("Started CPU %02d\n", slot_num);
55e303ae
A
693 printf("Started CPU %02d\n", slot_num);
694 return KERN_SUCCESS;
695 }
696}
697
91447636
A
698extern char slave_boot_base[];
699extern char slave_boot_end[];
21362eb3 700extern void pstart(void);
91447636 701
55e303ae
A
702void
703slave_boot_init(void)
704{
91447636
A
705 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
706 slave_boot_base,
707 kvtophys((vm_offset_t) slave_boot_base),
708 MP_BOOT,
709 slave_boot_end-slave_boot_base);
55e303ae
A
710
711 /*
712 * Copy the boot entry code to the real-mode vector area MP_BOOT.
713 * This is in page 1 which has been reserved for this purpose by
714 * machine_startup() from the boot processor.
715 * The slave boot code is responsible for switching to protected
91447636 716 * mode and then jumping to the common startup, _start().
55e303ae 717 */
21362eb3 718 bcopy_phys((addr64_t) kvtophys((vm_offset_t) slave_boot_base),
91447636
A
719 (addr64_t) MP_BOOT,
720 slave_boot_end-slave_boot_base);
55e303ae
A
721
722 /*
723 * Zero a stack area above the boot code.
724 */
91447636
A
725 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
726 bzero_phys((addr64_t)MP_BOOTSTACK+MP_BOOT-0x400, 0x400);
55e303ae
A
727
728 /*
729 * Set the location at the base of the stack to point to the
730 * common startup entry.
731 */
91447636 732 DBG("writing 0x%x at phys 0x%x\n",
21362eb3 733 kvtophys((vm_offset_t) &pstart), MP_MACH_START+MP_BOOT);
91447636 734 ml_phys_write_word(MP_MACH_START+MP_BOOT,
21362eb3 735 kvtophys((vm_offset_t) &pstart));
55e303ae
A
736
737 /* Flush caches */
738 __asm__("wbinvd");
739}
740
741#if MP_DEBUG
91447636
A
742cpu_signal_event_log_t *cpu_signal[MAX_CPUS];
743cpu_signal_event_log_t *cpu_handle[MAX_CPUS];
55e303ae
A
744
745MP_EVENT_NAME_DECL();
746
55e303ae
A
747#endif /* MP_DEBUG */
748
749void
21362eb3 750cpu_signal_handler(__unused struct i386_interrupt_state *regs)
55e303ae 751{
91447636 752 int my_cpu;
55e303ae
A
753 volatile int *my_word;
754#if MACH_KDB && MACH_ASSERT
755 int i=100;
756#endif /* MACH_KDB && MACH_ASSERT */
757
758 mp_disable_preemption();
759
760 my_cpu = cpu_number();
91447636 761 my_word = &current_cpu_datap()->cpu_signals;
55e303ae
A
762
763 do {
764#if MACH_KDB && MACH_ASSERT
765 if (i-- <= 0)
21362eb3 766 Debugger("cpu_signal_handler");
55e303ae
A
767#endif /* MACH_KDB && MACH_ASSERT */
768#if MACH_KDP
769 if (i_bit(MP_KDP, my_word)) {
770 DBGLOG(cpu_handle,my_cpu,MP_KDP);
771 i_bit_clear(MP_KDP, my_word);
772 mp_kdp_wait();
773 } else
774#endif /* MACH_KDP */
91447636 775 if (i_bit(MP_TLB_FLUSH, my_word)) {
55e303ae
A
776 DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH);
777 i_bit_clear(MP_TLB_FLUSH, my_word);
778 pmap_update_interrupt();
779 } else if (i_bit(MP_AST, my_word)) {
780 DBGLOG(cpu_handle,my_cpu,MP_AST);
781 i_bit_clear(MP_AST, my_word);
782 ast_check(cpu_to_processor(my_cpu));
783#if MACH_KDB
784 } else if (i_bit(MP_KDB, my_word)) {
21362eb3 785 extern kdb_is_slave[];
55e303ae
A
786
787 i_bit_clear(MP_KDB, my_word);
21362eb3
A
788 kdb_is_slave[my_cpu]++;
789 kdb_kintr();
55e303ae
A
790#endif /* MACH_KDB */
791 } else if (i_bit(MP_RENDEZVOUS, my_word)) {
792 DBGLOG(cpu_handle,my_cpu,MP_RENDEZVOUS);
793 i_bit_clear(MP_RENDEZVOUS, my_word);
794 mp_rendezvous_action();
795 }
796 } while (*my_word);
797
798 mp_enable_preemption();
799
800}
801
91447636
A
802#ifdef MP_DEBUG
803extern int max_lock_loops;
804#endif /* MP_DEBUG */
55e303ae
A
805void
806cpu_interrupt(int cpu)
807{
808 boolean_t state;
809
810 if (smp_initialized) {
811
812 /* Wait for previous interrupt to be delivered... */
91447636 813#ifdef MP_DEBUG
21362eb3 814 int pending_busy_count = 0;
91447636
A
815 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
816 if (++pending_busy_count > max_lock_loops)
817 panic("cpus_interrupt() deadlock\n");
818#else
819 while (LAPIC_REG(ICR) & LAPIC_ICR_DS_PENDING) {
820#endif /* MP_DEBUG */
55e303ae 821 cpu_pause();
91447636 822 }
55e303ae
A
823
824 state = ml_set_interrupts_enabled(FALSE);
825 LAPIC_REG(ICRD) =
826 cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT;
827 LAPIC_REG(ICR) =
91447636 828 LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_FIXED;
55e303ae
A
829 (void) ml_set_interrupts_enabled(state);
830 }
831
832}
833
55e303ae
A
834void
835i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode)
836{
91447636
A
837 volatile int *signals = &cpu_datap(cpu)->cpu_signals;
838 uint64_t tsc_timeout;
89b3af67 839
21362eb3 840
91447636 841 if (!cpu_datap(cpu)->cpu_running)
55e303ae
A
842 return;
843
89b3af67 844 DBGLOG(cpu_signal, cpu, event);
21362eb3 845
55e303ae
A
846 i_bit_set(event, signals);
847 cpu_interrupt(cpu);
848 if (mode == SYNC) {
849 again:
91447636
A
850 tsc_timeout = rdtsc64() + (1000*1000*1000);
851 while (i_bit(event, signals) && rdtsc64() < tsc_timeout) {
55e303ae
A
852 cpu_pause();
853 }
854 if (i_bit(event, signals)) {
855 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
856 cpu, event);
857 goto again;
858 }
859 }
860}
861
862void
863i386_signal_cpus(mp_event_t event, mp_sync_t mode)
864{
91447636
A
865 unsigned int cpu;
866 unsigned int my_cpu = cpu_number();
55e303ae 867
91447636
A
868 for (cpu = 0; cpu < real_ncpus; cpu++) {
869 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
870 continue;
871 i386_signal_cpu(cpu, event, mode);
872 }
873}
874
875int
876i386_active_cpus(void)
877{
91447636
A
878 unsigned int cpu;
879 unsigned int ncpus = 0;
55e303ae 880
91447636
A
881 for (cpu = 0; cpu < real_ncpus; cpu++) {
882 if (cpu_datap(cpu)->cpu_running)
55e303ae
A
883 ncpus++;
884 }
885 return(ncpus);
886}
887
888/*
889 * All-CPU rendezvous:
890 * - CPUs are signalled,
891 * - all execute the setup function (if specified),
892 * - rendezvous (i.e. all cpus reach a barrier),
893 * - all execute the action function (if specified),
894 * - rendezvous again,
895 * - execute the teardown function (if specified), and then
896 * - resume.
897 *
898 * Note that the supplied external functions _must_ be reentrant and aware
899 * that they are running in parallel and in an unknown lock context.
900 */
901
902static void
903mp_rendezvous_action(void)
904{
905
906 /* setup function */
907 if (mp_rv_setup_func != NULL)
908 mp_rv_setup_func(mp_rv_func_arg);
909 /* spin on entry rendezvous */
910 atomic_incl(&mp_rv_waiters[0], 1);
21362eb3 911 while (*((volatile long *) &mp_rv_waiters[0]) < mp_rv_ncpus)
55e303ae
A
912 cpu_pause();
913 /* action function */
914 if (mp_rv_action_func != NULL)
915 mp_rv_action_func(mp_rv_func_arg);
916 /* spin on exit rendezvous */
917 atomic_incl(&mp_rv_waiters[1], 1);
21362eb3 918 while (*((volatile long *) &mp_rv_waiters[1]) < mp_rv_ncpus)
55e303ae
A
919 cpu_pause();
920 /* teardown function */
921 if (mp_rv_teardown_func != NULL)
922 mp_rv_teardown_func(mp_rv_func_arg);
923}
924
925void
926mp_rendezvous(void (*setup_func)(void *),
927 void (*action_func)(void *),
928 void (*teardown_func)(void *),
929 void *arg)
930{
931
932 if (!smp_initialized) {
933 if (setup_func != NULL)
934 setup_func(arg);
935 if (action_func != NULL)
936 action_func(arg);
937 if (teardown_func != NULL)
938 teardown_func(arg);
939 return;
940 }
941
942 /* obtain rendezvous lock */
943 simple_lock(&mp_rv_lock);
944
945 /* set static function pointers */
946 mp_rv_setup_func = setup_func;
947 mp_rv_action_func = action_func;
948 mp_rv_teardown_func = teardown_func;
949 mp_rv_func_arg = arg;
950
951 mp_rv_waiters[0] = 0; /* entry rendezvous count */
952 mp_rv_waiters[1] = 0; /* exit rendezvous count */
953 mp_rv_ncpus = i386_active_cpus();
954
955 /*
956 * signal other processors, which will call mp_rendezvous_action()
957 * with interrupts disabled
958 */
959 i386_signal_cpus(MP_RENDEZVOUS, ASYNC);
960
961 /* call executor function on this cpu */
962 mp_rendezvous_action();
963
964 /* release lock */
965 simple_unlock(&mp_rv_lock);
966}
967
968#if MACH_KDP
969volatile boolean_t mp_kdp_trap = FALSE;
21362eb3 970long mp_kdp_ncpus;
91447636
A
971boolean_t mp_kdp_state;
972
55e303ae
A
973
974void
975mp_kdp_enter(void)
976{
91447636
A
977 unsigned int cpu;
978 unsigned int ncpus;
979 unsigned int my_cpu = cpu_number();
980 uint64_t tsc_timeout;
55e303ae
A
981
982 DBG("mp_kdp_enter()\n");
983
984 /*
985 * Here to enter the debugger.
986 * In case of races, only one cpu is allowed to enter kdp after
987 * stopping others.
988 */
91447636 989 mp_kdp_state = ml_set_interrupts_enabled(FALSE);
55e303ae
A
990 simple_lock(&mp_kdp_lock);
991 while (mp_kdp_trap) {
992 simple_unlock(&mp_kdp_lock);
993 DBG("mp_kdp_enter() race lost\n");
994 mp_kdp_wait();
995 simple_lock(&mp_kdp_lock);
996 }
997 mp_kdp_ncpus = 1; /* self */
998 mp_kdp_trap = TRUE;
999 simple_unlock(&mp_kdp_lock);
55e303ae 1000
21362eb3 1001 /* Deliver a nudge to other cpus, counting how many */
55e303ae 1002 DBG("mp_kdp_enter() signaling other processors\n");
91447636
A
1003 for (ncpus = 1, cpu = 0; cpu < real_ncpus; cpu++) {
1004 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae
A
1005 continue;
1006 ncpus++;
21362eb3 1007 i386_signal_cpu(cpu, MP_KDP, ASYNC);
55e303ae 1008 }
89b3af67 1009
21362eb3
A
1010 /* Wait other processors to spin. */
1011 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus);
1012 tsc_timeout = rdtsc64() + (1000*1000*1000);
1013 while (*((volatile unsigned int *) &mp_kdp_ncpus) != ncpus
1014 && rdtsc64() < tsc_timeout) {
55e303ae
A
1015 cpu_pause();
1016 }
1017 DBG("mp_kdp_enter() %d processors done %s\n",
1018 mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out");
91447636 1019 postcode(MP_KDP_ENTER);
55e303ae
A
1020}
1021
1022static void
1023mp_kdp_wait(void)
1024{
21362eb3 1025 boolean_t state;
89b3af67 1026
21362eb3
A
1027 state = ml_set_interrupts_enabled(TRUE);
1028 DBG("mp_kdp_wait()\n");
55e303ae
A
1029 atomic_incl(&mp_kdp_ncpus, 1);
1030 while (mp_kdp_trap) {
1031 cpu_pause();
1032 }
1033 atomic_decl(&mp_kdp_ncpus, 1);
1034 DBG("mp_kdp_wait() done\n");
21362eb3 1035 (void) ml_set_interrupts_enabled(state);
55e303ae
A
1036}
1037
1038void
1039mp_kdp_exit(void)
1040{
1041 DBG("mp_kdp_exit()\n");
1042 atomic_decl(&mp_kdp_ncpus, 1);
1043 mp_kdp_trap = FALSE;
1044
1045 /* Wait other processors to stop spinning. XXX needs timeout */
1046 DBG("mp_kdp_exit() waiting for processors to resume\n");
21362eb3 1047 while (*((volatile long *) &mp_kdp_ncpus) > 0) {
55e303ae
A
1048 cpu_pause();
1049 }
1050 DBG("mp_kdp_exit() done\n");
91447636
A
1051 (void) ml_set_interrupts_enabled(mp_kdp_state);
1052 postcode(0);
55e303ae
A
1053}
1054#endif /* MACH_KDP */
1055
55e303ae
A
1056/*ARGSUSED*/
1057void
1058init_ast_check(
91447636 1059 __unused processor_t processor)
55e303ae
A
1060{
1061}
1062
1063void
1064cause_ast_check(
1065 processor_t processor)
1066{
91447636 1067 int cpu = PROCESSOR_DATA(processor, slot_num);
55e303ae
A
1068
1069 if (cpu != cpu_number()) {
1070 i386_signal_cpu(cpu, MP_AST, ASYNC);
1071 }
1072}
1073
1074/*
1075 * invoke kdb on slave processors
1076 */
1077
1078void
1079remote_kdb(void)
1080{
91447636
A
1081 unsigned int my_cpu = cpu_number();
1082 unsigned int cpu;
55e303ae 1083
21362eb3
A
1084 mp_disable_preemption();
1085 for (cpu = 0; cpu < real_ncpus; cpu++) {
91447636 1086 if (cpu == my_cpu || !cpu_datap(cpu)->cpu_running)
55e303ae 1087 continue;
21362eb3 1088 i386_signal_cpu(cpu, MP_KDB, SYNC);
89b3af67 1089 }
21362eb3 1090 mp_enable_preemption();
55e303ae
A
1091}
1092
1093/*
1094 * Clear kdb interrupt
1095 */
1096
1097void
1098clear_kdb_intr(void)
1099{
1100 mp_disable_preemption();
91447636 1101 i_bit_clear(MP_KDB, &current_cpu_datap()->cpu_signals);
55e303ae
A
1102 mp_enable_preemption();
1103}
1104
91447636
A
1105/*
1106 * i386_init_slave() is called from pstart.
1107 * We're in the cpu's interrupt stack with interrupts disabled.
1108 */
55e303ae 1109void
91447636 1110i386_init_slave(void)
55e303ae 1111{
91447636 1112 postcode(I386_INIT_SLAVE);
55e303ae
A
1113
1114 /* Ensure that caching and write-through are enabled */
1115 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD));
1116
91447636
A
1117 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1118 get_cpu_number(), get_cpu_phys_number());
55e303ae
A
1119
1120 lapic_init();
1121
91447636
A
1122 LAPIC_DUMP();
1123 LAPIC_CPU_MAP_DUMP();
1124
1125 mtrr_update_cpu();
1126
1127 pat_init();
1128
21362eb3 1129 cpu_init();
91447636
A
1130
1131 slave_main();
1132
1133 panic("i386_init_slave() returned from slave_main()");
1134}
1135
1136void
1137slave_machine_init(void)
1138{
1139 /*
21362eb3 1140 * Here in process context.
91447636
A
1141 */
1142 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1143
21362eb3 1144 init_fpu();
89b3af67 1145
21362eb3
A
1146 cpu_thread_init();
1147
1148 pmc_init();
1149
1150 cpu_machine_init();
1151
1152 clock_init();
55e303ae
A
1153}
1154
1155#undef cpu_number()
1156int cpu_number(void)
1157{
1158 return get_cpu_number();
1159}
1160
1161#if MACH_KDB
1162#include <ddb/db_output.h>
1163
1164#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1165
1166
1167#if TRAP_DEBUG
1168#define MTRAPS 100
1169struct mp_trap_hist_struct {
1170 unsigned char type;
1171 unsigned char data[5];
1172} trap_hist[MTRAPS], *cur_trap_hist = trap_hist,
1173 *max_trap_hist = &trap_hist[MTRAPS];
1174
1175void db_trap_hist(void);
1176
1177/*
1178 * SPL:
1179 * 1: new spl
1180 * 2: old spl
1181 * 3: new tpr
1182 * 4: old tpr
1183 * INT:
1184 * 1: int vec
1185 * 2: old spl
1186 * 3: new spl
1187 * 4: post eoi tpr
1188 * 5: exit tpr
1189 */
1190
1191void
1192db_trap_hist(void)
1193{
1194 int i,j;
1195 for(i=0;i<MTRAPS;i++)
1196 if (trap_hist[i].type == 1 || trap_hist[i].type == 2) {
1197 db_printf("%s%s",
1198 (&trap_hist[i]>=cur_trap_hist)?"*":" ",
1199 (trap_hist[i].type == 1)?"SPL":"INT");
1200 for(j=0;j<5;j++)
1201 db_printf(" %02x", trap_hist[i].data[j]);
1202 db_printf("\n");
1203 }
1204
1205}
1206#endif /* TRAP_DEBUG */
21362eb3
A
1207
1208void db_lapic(int cpu);
1209unsigned int db_remote_read(int cpu, int reg);
1210void db_ioapic(unsigned int);
1211void kdb_console(void);
1212
1213void
1214kdb_console(void)
1215{
1216}
1217
1218#define BOOLP(a) ((a)?' ':'!')
1219
1220static char *DM[8] = {
1221 "Fixed",
1222 "Lowest Priority",
1223 "Invalid",
1224 "Invalid",
1225 "NMI",
1226 "Reset",
1227 "Invalid",
1228 "ExtINT"};
1229
1230unsigned int
1231db_remote_read(int cpu, int reg)
1232{
1233 return -1;
1234}
1235
1236void
1237db_lapic(int cpu)
1238{
1239}
1240
1241void
1242db_ioapic(unsigned int ind)
1243{
1244}
1245
55e303ae
A
1246#endif /* MACH_KDB */
1247