2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
30 #include <mach_ldebug.h>
33 #include <i386/mp_events.h>
34 #include <i386/mp_slave_boot.h>
35 #include <i386/apic.h>
39 #include <i386/cpuid.h>
40 #include <i386/proc_reg.h>
41 #include <i386/machine_cpu.h>
42 #include <i386/misc_protos.h>
43 #include <vm/vm_kern.h>
44 #include <mach/mach_types.h>
45 #include <mach/kern_return.h>
46 #include <kern/startup.h>
47 #include <kern/processor.h>
48 #include <kern/cpu_number.h>
49 #include <kern/cpu_data.h>
50 #include <kern/assert.h>
53 #define PAUSE delay(1000000)
54 #define DBG(x...) kprintf(x)
60 /* Initialize lapic_id so cpu_number() works on non SMP systems */
61 unsigned long lapic_id_initdata
= 0;
62 unsigned long lapic_id
= (unsigned long)&lapic_id_initdata
;
63 vm_offset_t lapic_start
;
65 void lapic_init(void);
66 void slave_boot_init(void);
68 static void mp_kdp_wait(void);
69 static void mp_rendezvous_action(void);
71 boolean_t smp_initialized
= FALSE
;
73 decl_simple_lock_data(,mp_kdp_lock
);
74 decl_simple_lock_data(,mp_putc_lock
);
76 /* Variables needed for MP rendezvous. */
77 static void (*mp_rv_setup_func
)(void *arg
);
78 static void (*mp_rv_action_func
)(void *arg
);
79 static void (*mp_rv_teardown_func
)(void *arg
);
80 static void *mp_rv_func_arg
;
81 static int mp_rv_ncpus
;
82 static volatile long mp_rv_waiters
[2];
83 decl_simple_lock_data(,mp_rv_lock
);
85 int lapic_to_cpu
[LAPIC_ID_MAX
+1];
86 int cpu_to_lapic
[NCPUS
];
89 lapic_cpu_map_init(void)
93 for (i
= 0; i
< NCPUS
; i
++)
95 for (i
= 0; i
<= LAPIC_ID_MAX
; i
++)
100 lapic_cpu_map(int apic_id
, int cpu_number
)
102 cpu_to_lapic
[cpu_number
] = apic_id
;
103 lapic_to_cpu
[apic_id
] = cpu_number
;
108 lapic_cpu_map_dump(void)
112 for (i
= 0; i
< NCPUS
; i
++) {
113 if (cpu_to_lapic
[i
] == -1)
115 kprintf("cpu_to_lapic[%d]: %d\n",
118 for (i
= 0; i
<= LAPIC_ID_MAX
; i
++) {
119 if (lapic_to_cpu
[i
] == -1)
121 kprintf("lapic_to_cpu[%d]: %d\n",
125 #endif /* MP_DEBUG */
127 #define LAPIC_REG(reg) \
128 (*((volatile int *)(lapic_start + LAPIC_##reg)))
129 #define LAPIC_REG_OFFSET(reg,off) \
130 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
138 vm_map_entry_t entry
;
141 boolean_t is_boot_processor
;
142 boolean_t is_lapic_enabled
;
145 if ((cpuid_features() & CPUID_FEATURE_APIC
) == 0)
148 simple_lock_init(&mp_kdp_lock
, ETAP_MISC_PRINTF
);
149 simple_lock_init(&mp_rv_lock
, ETAP_MISC_PRINTF
);
150 simple_lock_init(&mp_putc_lock
, ETAP_MISC_PRINTF
);
152 /* Examine the local APIC state */
153 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
154 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
155 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
156 DBG("MSR_IA32_APIC_BASE 0x%x:0x%x %s %s\n", hi
, lo
,
157 is_lapic_enabled
? "enabled" : "disabled",
158 is_boot_processor
? "BSP" : "AP");
159 assert(is_boot_processor
);
160 assert(is_lapic_enabled
);
162 /* Establish a map to the local apic */
163 lapic_start
= vm_map_min(kernel_map
);
164 result
= vm_map_find_space(kernel_map
, &lapic_start
,
165 round_page(LAPIC_SIZE
), 0, &entry
);
166 if (result
!= KERN_SUCCESS
) {
167 printf("smp_init: vm_map_find_entry FAILED (err=%d). "
168 "Only supporting ONE cpu.\n", result
);
171 vm_map_unlock(kernel_map
);
172 pmap_enter(pmap_kernel(),
174 (ppnum_t
) i386_btop(i386_trunc_page(LAPIC_START
)),
175 VM_PROT_READ
|VM_PROT_WRITE
,
178 lapic_id
= (unsigned long)(lapic_start
+ LAPIC_ID
);
180 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
181 lapic_cpu_map_init();
182 lapic_cpu_map((LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
, 0);
189 smp_initialized
= TRUE
;
198 /* write-read register */
199 LAPIC_REG(ERROR_STATUS
) = 0;
200 return LAPIC_REG(ERROR_STATUS
);
204 lapic_esr_clear(void)
206 LAPIC_REG(ERROR_STATUS
) = 0;
207 LAPIC_REG(ERROR_STATUS
) = 0;
210 static char *DM
[8] = {
226 #define BOOL(a) ((a)?' ':'!')
228 kprintf("LAPIC %d at 0x%x version 0x%x\n",
229 (LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
,
231 LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
);
232 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
233 LAPIC_REG(TPR
)&LAPIC_TPR_MASK
,
234 LAPIC_REG(APR
)&LAPIC_APR_MASK
,
235 LAPIC_REG(PPR
)&LAPIC_PPR_MASK
);
236 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
237 LAPIC_REG(DFR
)>>LAPIC_DFR_SHIFT
,
238 LAPIC_REG(LDR
)>>LAPIC_LDR_SHIFT
);
239 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
240 BOOL(LAPIC_REG(SVR
)&LAPIC_SVR_ENABLE
),
241 BOOL(!(LAPIC_REG(SVR
)&LAPIC_SVR_FOCUS_OFF
)),
242 LAPIC_REG(SVR
) & LAPIC_SVR_MASK
);
243 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
244 LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_VECTOR_MASK
,
245 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
246 BOOL(LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_MASKED
),
247 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_PERIODIC
)?"Periodic":"OneShot");
248 kprintf("LVT_PERFCNT: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
249 LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_VECTOR_MASK
,
250 DM
[(LAPIC_REG(LVT_PERFCNT
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
251 (LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
252 (LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
253 (LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
254 BOOL(LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_MASKED
));
255 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
256 LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_VECTOR_MASK
,
257 DM
[(LAPIC_REG(LVT_LINT0
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
258 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
259 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
260 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
261 BOOL(LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_MASKED
));
262 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
263 LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_VECTOR_MASK
,
264 DM
[(LAPIC_REG(LVT_LINT1
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
265 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
266 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
267 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
268 BOOL(LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_MASKED
));
269 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
270 LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_VECTOR_MASK
,
271 (LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
272 BOOL(LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_MASKED
));
273 kprintf("ESR: %08x \n", lapic_esr_read());
275 for(i
=0xf; i
>=0; i
--)
276 kprintf("%x%x%x%x",i
,i
,i
,i
);
280 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE
, i
*0x10));
284 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE
, i
*0x10));
287 for(i
=7; i
>= 0; i
--)
288 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE
, i
*0x10));
297 mp_disable_preemption();
299 /* Set flat delivery model, logical processor id */
300 LAPIC_REG(DFR
) = LAPIC_DFR_FLAT
;
301 LAPIC_REG(LDR
) = (get_cpu_number()) << LAPIC_LDR_SHIFT
;
306 LAPIC_REG(SVR
) = SPURIOUS_INTERRUPT
| LAPIC_SVR_ENABLE
;
309 if (get_cpu_number() == master_cpu
) {
310 value
= LAPIC_REG(LVT_LINT0
);
311 value
|= LAPIC_LVT_DM_EXTINT
;
312 LAPIC_REG(LVT_LINT0
) = value
;
317 LAPIC_REG(LVT_ERROR
) = APIC_ERROR_INTERRUPT
;
319 mp_enable_preemption();
324 lapic_end_of_interrupt(void)
330 lapic_interrupt(int interrupt
, void *state
)
334 case APIC_ERROR_INTERRUPT
:
335 panic("Local APIC error\n");
337 case SPURIOUS_INTERRUPT
:
340 case INTERPROCESS_INTERRUPT
:
341 cpu_signal_handler((struct i386_interrupt_state
*) state
);
344 lapic_end_of_interrupt();
353 int lapic_id
= cpu_to_lapic
[slot_num
];
355 if (slot_num
== get_cpu_number())
358 assert(lapic_id
!= -1);
360 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num
, lapic_id
);
362 mp_disable_preemption();
364 LAPIC_REG(ICRD
) = lapic_id
<< LAPIC_ICRD_DEST_SHIFT
;
365 LAPIC_REG(ICR
) = LAPIC_ICR_DM_INIT
;
368 LAPIC_REG(ICRD
) = lapic_id
<< LAPIC_ICRD_DEST_SHIFT
;
369 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
374 if (machine_slot
[slot_num
].running
)
378 mp_enable_preemption();
380 if (!machine_slot
[slot_num
].running
) {
381 DBG("Failed to start CPU %02d\n", slot_num
);
382 printf("Failed to start CPU %02d\n", slot_num
);
385 DBG("Started CPU %02d\n", slot_num
);
386 printf("Started CPU %02d\n", slot_num
);
392 slave_boot_init(void)
394 extern char slave_boot_base
[];
395 extern char slave_boot_end
[];
396 extern void pstart(void);
398 DBG("slave_base=%p slave_end=%p MP_BOOT P=%p V=%p\n",
399 slave_boot_base
, slave_boot_end
, MP_BOOT
, phystokv(MP_BOOT
));
402 * Copy the boot entry code to the real-mode vector area MP_BOOT.
403 * This is in page 1 which has been reserved for this purpose by
404 * machine_startup() from the boot processor.
405 * The slave boot code is responsible for switching to protected
406 * mode and then jumping to the common startup, pstart().
408 bcopy(slave_boot_base
,
409 (char *)phystokv(MP_BOOT
),
410 slave_boot_end
-slave_boot_base
);
413 * Zero a stack area above the boot code.
415 bzero((char *)(phystokv(MP_BOOTSTACK
+MP_BOOT
)-0x400), 0x400);
418 * Set the location at the base of the stack to point to the
419 * common startup entry.
421 *((vm_offset_t
*) phystokv(MP_MACH_START
+MP_BOOT
)) =
422 kvtophys((vm_offset_t
)&pstart
);
429 cpu_signal_event_log_t cpu_signal
[NCPUS
] = { 0, 0, 0 };
430 cpu_signal_event_log_t cpu_handle
[NCPUS
] = { 0, 0, 0 };
432 MP_EVENT_NAME_DECL();
435 cpu_signal_dump_last(int cpu
)
437 cpu_signal_event_log_t
*logp
= &cpu_signal
[cpu
];
439 cpu_signal_event_t
*eventp
;
441 last
= (logp
->next_entry
== 0) ?
442 LOG_NENTRIES
- 1 : logp
->next_entry
- 1;
444 eventp
= &logp
->entry
[last
];
446 kprintf("cpu%d: tsc=%lld cpu_signal(%d,%s)\n",
447 cpu
, eventp
->time
, eventp
->cpu
, mp_event_name
[eventp
->event
]);
451 cpu_handle_dump_last(int cpu
)
453 cpu_signal_event_log_t
*logp
= &cpu_handle
[cpu
];
455 cpu_signal_event_t
*eventp
;
457 last
= (logp
->next_entry
== 0) ?
458 LOG_NENTRIES
- 1 : logp
->next_entry
- 1;
460 eventp
= &logp
->entry
[last
];
462 kprintf("cpu%d: tsc=%lld cpu_signal_handle%s\n",
463 cpu
, eventp
->time
, mp_event_name
[eventp
->event
]);
465 #endif /* MP_DEBUG */
468 cpu_signal_handler(struct i386_interrupt_state
*regs
)
471 volatile int *my_word
;
472 #if MACH_KDB && MACH_ASSERT
474 #endif /* MACH_KDB && MACH_ASSERT */
476 mp_disable_preemption();
478 my_cpu
= cpu_number();
479 my_word
= &cpu_data
[my_cpu
].cpu_signals
;
482 #if MACH_KDB && MACH_ASSERT
484 Debugger("cpu_signal_handler");
485 #endif /* MACH_KDB && MACH_ASSERT */
487 if (i_bit(MP_KDP
, my_word
)) {
488 DBGLOG(cpu_handle
,my_cpu
,MP_KDP
);
489 i_bit_clear(MP_KDP
, my_word
);
492 #endif /* MACH_KDP */
493 if (i_bit(MP_CLOCK
, my_word
)) {
494 DBGLOG(cpu_handle
,my_cpu
,MP_CLOCK
);
495 i_bit_clear(MP_CLOCK
, my_word
);
497 } else if (i_bit(MP_TLB_FLUSH
, my_word
)) {
498 DBGLOG(cpu_handle
,my_cpu
,MP_TLB_FLUSH
);
499 i_bit_clear(MP_TLB_FLUSH
, my_word
);
500 pmap_update_interrupt();
501 } else if (i_bit(MP_AST
, my_word
)) {
502 DBGLOG(cpu_handle
,my_cpu
,MP_AST
);
503 i_bit_clear(MP_AST
, my_word
);
504 ast_check(cpu_to_processor(my_cpu
));
506 } else if (i_bit(MP_KDB
, my_word
)) {
507 extern kdb_is_slave
[];
509 i_bit_clear(MP_KDB
, my_word
);
510 kdb_is_slave
[my_cpu
]++;
512 #endif /* MACH_KDB */
513 } else if (i_bit(MP_RENDEZVOUS
, my_word
)) {
514 DBGLOG(cpu_handle
,my_cpu
,MP_RENDEZVOUS
);
515 i_bit_clear(MP_RENDEZVOUS
, my_word
);
516 mp_rendezvous_action();
520 mp_enable_preemption();
525 cpu_interrupt(int cpu
)
529 if (smp_initialized
) {
531 /* Wait for previous interrupt to be delivered... */
532 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
)
535 state
= ml_set_interrupts_enabled(FALSE
);
537 cpu_to_lapic
[cpu
] << LAPIC_ICRD_DEST_SHIFT
;
539 INTERPROCESS_INTERRUPT
| LAPIC_ICR_DM_FIXED
;
540 (void) ml_set_interrupts_enabled(state
);
551 * Clock interrupts are chained from the boot processor
552 * to the next logical processor that is running and from
553 * there on to any further running processor etc.
555 mp_disable_preemption();
556 for (cpu
=cpu_number()+1; cpu
<NCPUS
; cpu
++)
557 if (machine_slot
[cpu
].running
) {
558 i386_signal_cpu(cpu
, MP_CLOCK
, ASYNC
);
559 mp_enable_preemption();
562 mp_enable_preemption();
567 i386_signal_cpu(int cpu
, mp_event_t event
, mp_sync_t mode
)
569 volatile int *signals
= &cpu_data
[cpu
].cpu_signals
;
573 if (!cpu_data
[cpu
].cpu_status
)
576 DBGLOG(cpu_signal
, cpu
, event
);
578 i_bit_set(event
, signals
);
582 timeout
= rdtsc64() + (1000*1000*1000);
583 while (i_bit(event
, signals
) && rdtsc64() < timeout
) {
586 if (i_bit(event
, signals
)) {
587 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
595 i386_signal_cpus(mp_event_t event
, mp_sync_t mode
)
598 int my_cpu
= cpu_number();
600 for (cpu
= 0; cpu
< NCPUS
; cpu
++) {
601 if (cpu
== my_cpu
|| !machine_slot
[cpu
].running
)
603 i386_signal_cpu(cpu
, event
, mode
);
608 i386_active_cpus(void)
613 for (cpu
= 0; cpu
< NCPUS
; cpu
++) {
614 if (machine_slot
[cpu
].running
)
621 * All-CPU rendezvous:
622 * - CPUs are signalled,
623 * - all execute the setup function (if specified),
624 * - rendezvous (i.e. all cpus reach a barrier),
625 * - all execute the action function (if specified),
626 * - rendezvous again,
627 * - execute the teardown function (if specified), and then
630 * Note that the supplied external functions _must_ be reentrant and aware
631 * that they are running in parallel and in an unknown lock context.
635 mp_rendezvous_action(void)
639 if (mp_rv_setup_func
!= NULL
)
640 mp_rv_setup_func(mp_rv_func_arg
);
641 /* spin on entry rendezvous */
642 atomic_incl(&mp_rv_waiters
[0], 1);
643 while (mp_rv_waiters
[0] < mp_rv_ncpus
)
645 /* action function */
646 if (mp_rv_action_func
!= NULL
)
647 mp_rv_action_func(mp_rv_func_arg
);
648 /* spin on exit rendezvous */
649 atomic_incl(&mp_rv_waiters
[1], 1);
650 while (mp_rv_waiters
[1] < mp_rv_ncpus
)
652 /* teardown function */
653 if (mp_rv_teardown_func
!= NULL
)
654 mp_rv_teardown_func(mp_rv_func_arg
);
658 mp_rendezvous(void (*setup_func
)(void *),
659 void (*action_func
)(void *),
660 void (*teardown_func
)(void *),
664 if (!smp_initialized
) {
665 if (setup_func
!= NULL
)
667 if (action_func
!= NULL
)
669 if (teardown_func
!= NULL
)
674 /* obtain rendezvous lock */
675 simple_lock(&mp_rv_lock
);
677 /* set static function pointers */
678 mp_rv_setup_func
= setup_func
;
679 mp_rv_action_func
= action_func
;
680 mp_rv_teardown_func
= teardown_func
;
681 mp_rv_func_arg
= arg
;
683 mp_rv_waiters
[0] = 0; /* entry rendezvous count */
684 mp_rv_waiters
[1] = 0; /* exit rendezvous count */
685 mp_rv_ncpus
= i386_active_cpus();
688 * signal other processors, which will call mp_rendezvous_action()
689 * with interrupts disabled
691 i386_signal_cpus(MP_RENDEZVOUS
, ASYNC
);
693 /* call executor function on this cpu */
694 mp_rendezvous_action();
697 simple_unlock(&mp_rv_lock
);
701 volatile boolean_t mp_kdp_trap
= FALSE
;
709 int my_cpu
= cpu_number();
713 DBG("mp_kdp_enter()\n");
716 * Here to enter the debugger.
717 * In case of races, only one cpu is allowed to enter kdp after
720 state
= ml_set_interrupts_enabled(FALSE
);
721 simple_lock(&mp_kdp_lock
);
722 while (mp_kdp_trap
) {
723 simple_unlock(&mp_kdp_lock
);
724 DBG("mp_kdp_enter() race lost\n");
726 simple_lock(&mp_kdp_lock
);
728 mp_kdp_ncpus
= 1; /* self */
730 simple_unlock(&mp_kdp_lock
);
731 (void) ml_set_interrupts_enabled(state
);
733 /* Deliver a nudge to other cpus, counting how many */
734 DBG("mp_kdp_enter() signaling other processors\n");
735 for (ncpus
= 1, cpu
= 0; cpu
< NCPUS
; cpu
++) {
736 if (cpu
== my_cpu
|| !machine_slot
[cpu
].running
)
739 i386_signal_cpu(cpu
, MP_KDP
, ASYNC
);
742 /* Wait other processors to spin. */
743 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus
);
744 timeout
= rdtsc64() + (1000*1000*1000);
745 while (*((volatile long *) &mp_kdp_ncpus
) != ncpus
746 && rdtsc64() < timeout
) {
749 DBG("mp_kdp_enter() %d processors done %s\n",
750 mp_kdp_ncpus
, (mp_kdp_ncpus
== ncpus
) ? "OK" : "timed out");
756 DBG("mp_kdp_wait()\n");
757 atomic_incl(&mp_kdp_ncpus
, 1);
758 while (mp_kdp_trap
) {
761 atomic_decl(&mp_kdp_ncpus
, 1);
762 DBG("mp_kdp_wait() done\n");
768 DBG("mp_kdp_exit()\n");
769 atomic_decl(&mp_kdp_ncpus
, 1);
772 /* Wait other processors to stop spinning. XXX needs timeout */
773 DBG("mp_kdp_exit() waiting for processors to resume\n");
774 while (*((volatile long *) &mp_kdp_ncpus
) > 0) {
777 DBG("mp_kdp_exit() done\n");
779 #endif /* MACH_KDP */
787 i_bit_set(0, &cpu_data
[cpu
].cpu_signals
);
794 processor_t processor
)
800 processor_t processor
)
802 int cpu
= processor
->slot_num
;
804 if (cpu
!= cpu_number()) {
805 i386_signal_cpu(cpu
, MP_AST
, ASYNC
);
810 * invoke kdb on slave processors
816 int my_cpu
= cpu_number();
819 mp_disable_preemption();
820 for (cpu
= 0; cpu
< NCPUS
; cpu
++) {
821 if (cpu
== my_cpu
|| !machine_slot
[cpu
].running
)
823 i386_signal_cpu(cpu
, MP_KDB
, SYNC
);
825 mp_enable_preemption();
829 * Clear kdb interrupt
835 mp_disable_preemption();
836 i_bit_clear(MP_KDB
, &cpu_data
[cpu_number()].cpu_signals
);
837 mp_enable_preemption();
841 slave_machine_init(void)
845 /* Ensure that caching and write-through are enabled */
846 set_cr0(get_cr0() & ~(CR0_NW
|CR0_CD
));
848 mp_disable_preemption();
849 my_cpu
= get_cpu_number();
851 DBG("slave_machine_init() CPU%d: phys (%d) active.\n",
852 my_cpu
, get_cpu_phys_number());
860 mp_enable_preemption();
864 lapic_cpu_map_dump();
865 #endif /* MP_DEBUG */
872 return get_cpu_number();
876 #include <ddb/db_output.h>
878 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
883 struct mp_trap_hist_struct
{
885 unsigned char data
[5];
886 } trap_hist
[MTRAPS
], *cur_trap_hist
= trap_hist
,
887 *max_trap_hist
= &trap_hist
[MTRAPS
];
889 void db_trap_hist(void);
909 for(i
=0;i
<MTRAPS
;i
++)
910 if (trap_hist
[i
].type
== 1 || trap_hist
[i
].type
== 2) {
912 (&trap_hist
[i
]>=cur_trap_hist
)?"*":" ",
913 (trap_hist
[i
].type
== 1)?"SPL":"INT");
915 db_printf(" %02x", trap_hist
[i
].data
[j
]);
920 #endif /* TRAP_DEBUG */
922 void db_lapic(int cpu
);
923 unsigned int db_remote_read(int cpu
, int reg
);
924 void db_ioapic(unsigned int);
925 void kdb_console(void);
932 #define BOOLP(a) ((a)?' ':'!')
934 static char *DM
[8] = {
945 db_remote_read(int cpu
, int reg
)
956 db_ioapic(unsigned int ind
)
960 #endif /* MACH_KDB */