2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
29 #include <mach_ldebug.h>
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
35 #include <kern/kern_types.h>
36 #include <kern/startup.h>
37 #include <kern/processor.h>
38 #include <kern/cpu_number.h>
39 #include <kern/cpu_data.h>
40 #include <kern/assert.h>
41 #include <kern/machine.h>
43 #include <vm/vm_map.h>
44 #include <vm/vm_kern.h>
46 #include <profiling/profile-mk.h>
49 #include <i386/mp_events.h>
50 #include <i386/mp_slave_boot.h>
51 #include <i386/apic.h>
55 #include <i386/cpuid.h>
56 #include <i386/proc_reg.h>
57 #include <i386/machine_cpu.h>
58 #include <i386/misc_protos.h>
59 #include <i386/mtrr.h>
60 #include <i386/postcode.h>
61 #include <i386/perfmon.h>
62 #include <i386/cpu_threads.h>
63 #include <i386/mp_desc.h>
66 #define PAUSE delay(1000000)
67 #define DBG(x...) kprintf(x)
74 * By default, use high vectors to leave vector space for systems
75 * with multiple I/O APIC's. However some systems that boot with
76 * local APIC disabled will hang in SMM when vectors greater than
77 * 0x5F are used. Those systems are not expected to have I/O APIC
78 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
80 #define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
81 #define LAPIC_REDUCED_INTERRUPT_BASE 0x50
83 * Specific lapic interrupts are relative to this base:
85 #define LAPIC_PERFCNT_INTERRUPT 0xB
86 #define LAPIC_TIMER_INTERRUPT 0xC
87 #define LAPIC_SPURIOUS_INTERRUPT 0xD
88 #define LAPIC_INTERPROCESSOR_INTERRUPT 0xE
89 #define LAPIC_ERROR_INTERRUPT 0xF
91 /* Initialize lapic_id so cpu_number() works on non SMP systems */
92 unsigned long lapic_id_initdata
= 0;
93 unsigned long lapic_id
= (unsigned long)&lapic_id_initdata
;
94 vm_offset_t lapic_start
;
96 static i386_intr_func_t lapic_timer_func
;
97 static i386_intr_func_t lapic_pmi_func
;
99 /* TRUE if local APIC was enabled by the OS not by the BIOS */
100 static boolean_t lapic_os_enabled
= FALSE
;
102 /* Base vector for local APIC interrupt sources */
103 int lapic_interrupt_base
= LAPIC_DEFAULT_INTERRUPT_BASE
;
105 void slave_boot_init(void);
107 static void mp_kdp_wait(void);
108 static void mp_rendezvous_action(void);
110 boolean_t smp_initialized
= FALSE
;
112 decl_simple_lock_data(,mp_kdp_lock
);
114 decl_mutex_data(static, mp_cpu_boot_lock
);
116 /* Variables needed for MP rendezvous. */
117 static void (*mp_rv_setup_func
)(void *arg
);
118 static void (*mp_rv_action_func
)(void *arg
);
119 static void (*mp_rv_teardown_func
)(void *arg
);
120 static void *mp_rv_func_arg
;
121 static int mp_rv_ncpus
;
122 static long mp_rv_waiters
[2];
123 decl_simple_lock_data(,mp_rv_lock
);
125 int lapic_to_cpu
[MAX_CPUS
];
126 int cpu_to_lapic
[MAX_CPUS
];
129 lapic_cpu_map_init(void)
133 for (i
= 0; i
< MAX_CPUS
; i
++) {
134 lapic_to_cpu
[i
] = -1;
135 cpu_to_lapic
[i
] = -1;
140 lapic_cpu_map(int apic_id
, int cpu
)
142 cpu_to_lapic
[cpu
] = apic_id
;
143 lapic_to_cpu
[apic_id
] = cpu
;
148 lapic_cpu_map_dump(void)
152 for (i
= 0; i
< MAX_CPUS
; i
++) {
153 if (cpu_to_lapic
[i
] == -1)
155 kprintf("cpu_to_lapic[%d]: %d\n",
158 for (i
= 0; i
< MAX_CPUS
; i
++) {
159 if (lapic_to_cpu
[i
] == -1)
161 kprintf("lapic_to_cpu[%d]: %d\n",
165 #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
166 #define LAPIC_DUMP() lapic_dump()
168 #define LAPIC_CPU_MAP_DUMP()
170 #endif /* MP_DEBUG */
172 #define LAPIC_REG(reg) \
173 (*((volatile int *)(lapic_start + LAPIC_##reg)))
174 #define LAPIC_REG_OFFSET(reg,off) \
175 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
177 #define LAPIC_VECTOR(src) \
178 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
180 #define LAPIC_ISR_IS_SET(base,src) \
181 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
182 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
186 * Initialize dummy structs for profiling. These aren't used but
187 * allows hertz_tick() to be built with GPROF defined.
189 struct profile_vars _profile_vars
;
190 struct profile_vars
*_profile_vars_cpus
[MAX_CPUS
] = { &_profile_vars
};
191 #define GPROF_INIT() \
195 /* Hack to initialize pointers to unused profiling structs */ \
196 for (i = 1; i < MAX_CPUS; i++) \
197 _profile_vars_cpus[i] = &_profile_vars; \
203 extern void master_up(void);
209 vm_map_entry_t entry
;
212 boolean_t is_boot_processor
;
213 boolean_t is_lapic_enabled
;
214 vm_offset_t lapic_base
;
216 simple_lock_init(&mp_kdp_lock
, 0);
217 simple_lock_init(&mp_rv_lock
, 0);
218 mutex_init(&mp_cpu_boot_lock
, 0);
225 /* Examine the local APIC state */
226 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
227 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
228 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
229 lapic_base
= (lo
& MSR_IA32_APIC_BASE_BASE
);
230 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base
,
231 is_lapic_enabled
? "enabled" : "disabled",
232 is_boot_processor
? "BSP" : "AP");
233 if (!is_boot_processor
|| !is_lapic_enabled
)
234 panic("Unexpected local APIC state\n");
236 /* Establish a map to the local apic */
237 lapic_start
= vm_map_min(kernel_map
);
238 result
= vm_map_find_space(kernel_map
, &lapic_start
,
239 round_page(LAPIC_SIZE
), 0, &entry
);
240 if (result
!= KERN_SUCCESS
) {
241 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result
);
243 vm_map_unlock(kernel_map
);
244 pmap_enter(pmap_kernel(),
246 (ppnum_t
) i386_btop(lapic_base
),
247 VM_PROT_READ
|VM_PROT_WRITE
,
250 lapic_id
= (unsigned long)(lapic_start
+ LAPIC_ID
);
252 if ((LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
) != 0x14) {
253 printf("Local APIC version not 0x14 as expected\n");
256 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
257 lapic_cpu_map_init();
258 lapic_cpu_map((LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
, 0);
264 if (pmc_init() != KERN_SUCCESS
)
265 printf("Performance counters not available\n");
268 DBGLOG_CPU_INIT(master_cpu
);
273 smp_initialized
= TRUE
;
282 /* write-read register */
283 LAPIC_REG(ERROR_STATUS
) = 0;
284 return LAPIC_REG(ERROR_STATUS
);
288 lapic_esr_clear(void)
290 LAPIC_REG(ERROR_STATUS
) = 0;
291 LAPIC_REG(ERROR_STATUS
) = 0;
294 static const char *DM
[8] = {
309 #define BOOL(a) ((a)?' ':'!')
311 kprintf("LAPIC %d at 0x%x version 0x%x\n",
312 (LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
,
314 LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
);
315 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
316 LAPIC_REG(TPR
)&LAPIC_TPR_MASK
,
317 LAPIC_REG(APR
)&LAPIC_APR_MASK
,
318 LAPIC_REG(PPR
)&LAPIC_PPR_MASK
);
319 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
320 LAPIC_REG(DFR
)>>LAPIC_DFR_SHIFT
,
321 LAPIC_REG(LDR
)>>LAPIC_LDR_SHIFT
);
322 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
323 BOOL(LAPIC_REG(SVR
)&LAPIC_SVR_ENABLE
),
324 BOOL(!(LAPIC_REG(SVR
)&LAPIC_SVR_FOCUS_OFF
)),
325 LAPIC_REG(SVR
) & LAPIC_SVR_MASK
);
326 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
327 LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_VECTOR_MASK
,
328 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
329 BOOL(LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_MASKED
),
330 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_PERIODIC
)?"Periodic":"OneShot");
331 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT
));
332 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT
));
333 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG
));
334 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
335 LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_VECTOR_MASK
,
336 DM
[(LAPIC_REG(LVT_PERFCNT
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
337 (LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
338 BOOL(LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_MASKED
));
339 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
340 LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_VECTOR_MASK
,
341 DM
[(LAPIC_REG(LVT_LINT0
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
342 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
343 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
344 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
345 BOOL(LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_MASKED
));
346 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
347 LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_VECTOR_MASK
,
348 DM
[(LAPIC_REG(LVT_LINT1
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
349 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
350 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
351 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
352 BOOL(LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_MASKED
));
353 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
354 LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_VECTOR_MASK
,
355 (LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
356 BOOL(LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_MASKED
));
357 kprintf("ESR: %08x \n", lapic_esr_read());
359 for(i
=0xf; i
>=0; i
--)
360 kprintf("%x%x%x%x",i
,i
,i
,i
);
364 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE
, i
*0x10));
368 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE
, i
*0x10));
371 for(i
=7; i
>= 0; i
--)
372 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE
, i
*0x10));
382 if (cpuid_features() & CPUID_FEATURE_APIC
)
385 if (cpuid_family() == 6 || cpuid_family() == 15) {
388 * There may be a local APIC which wasn't enabled by BIOS.
389 * So we try to enable it explicitly.
391 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
392 lo
&= ~MSR_IA32_APIC_BASE_BASE
;
393 lo
|= MSR_IA32_APIC_BASE_ENABLE
| LAPIC_START
;
394 lo
|= MSR_IA32_APIC_BASE_ENABLE
;
395 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
398 * Re-initialize cpu features info and re-check.
401 if (cpuid_features() & CPUID_FEATURE_APIC
) {
402 printf("Local APIC discovered and enabled\n");
403 lapic_os_enabled
= TRUE
;
404 lapic_interrupt_base
= LAPIC_REDUCED_INTERRUPT_BASE
;
419 /* Shutdown if local APIC was enabled by OS */
420 if (lapic_os_enabled
== FALSE
)
423 mp_disable_preemption();
426 if (get_cpu_number() == master_cpu
) {
427 value
= LAPIC_REG(LVT_LINT0
);
428 value
|= LAPIC_LVT_MASKED
;
429 LAPIC_REG(LVT_LINT0
) = value
;
433 LAPIC_REG(LVT_TIMER
) |= LAPIC_LVT_MASKED
;
435 /* Perfmon: masked */
436 LAPIC_REG(LVT_PERFCNT
) |= LAPIC_LVT_MASKED
;
439 LAPIC_REG(LVT_ERROR
) |= LAPIC_LVT_MASKED
;
441 /* APIC software disabled */
442 LAPIC_REG(SVR
) &= ~LAPIC_SVR_ENABLE
;
444 /* Bypass the APIC completely and update cpu features */
445 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
446 lo
&= ~MSR_IA32_APIC_BASE_ENABLE
;
447 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
450 mp_enable_preemption();
458 /* Set flat delivery model, logical processor id */
459 LAPIC_REG(DFR
) = LAPIC_DFR_FLAT
;
460 LAPIC_REG(LDR
) = (get_cpu_number()) << LAPIC_LDR_SHIFT
;
465 LAPIC_REG(SVR
) = LAPIC_VECTOR(SPURIOUS
) | LAPIC_SVR_ENABLE
;
468 if (get_cpu_number() == master_cpu
) {
469 value
= LAPIC_REG(LVT_LINT0
);
470 value
&= ~LAPIC_LVT_MASKED
;
471 value
|= LAPIC_LVT_DM_EXTINT
;
472 LAPIC_REG(LVT_LINT0
) = value
;
475 /* Timer: unmasked, one-shot */
476 LAPIC_REG(LVT_TIMER
) = LAPIC_VECTOR(TIMER
);
478 /* Perfmon: unmasked */
479 LAPIC_REG(LVT_PERFCNT
) = LAPIC_VECTOR(PERFCNT
);
483 LAPIC_REG(LVT_ERROR
) = LAPIC_VECTOR(ERROR
);
488 lapic_set_timer_func(i386_intr_func_t func
)
490 lapic_timer_func
= func
;
496 lapic_timer_mode_t mode
,
497 lapic_timer_divide_t divisor
,
498 lapic_timer_count_t initial_count
)
501 uint32_t timer_vector
;
503 state
= ml_set_interrupts_enabled(FALSE
);
504 timer_vector
= LAPIC_REG(LVT_TIMER
);
505 timer_vector
&= ~(LAPIC_LVT_MASKED
|LAPIC_LVT_PERIODIC
);;
506 timer_vector
|= interrupt
? 0 : LAPIC_LVT_MASKED
;
507 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
508 LAPIC_REG(LVT_TIMER
) = timer_vector
;
509 LAPIC_REG(TIMER_DIVIDE_CONFIG
) = divisor
;
510 LAPIC_REG(TIMER_INITIAL_COUNT
) = initial_count
;
511 ml_set_interrupts_enabled(state
);
516 lapic_timer_mode_t
*mode
,
517 lapic_timer_divide_t
*divisor
,
518 lapic_timer_count_t
*initial_count
,
519 lapic_timer_count_t
*current_count
)
523 state
= ml_set_interrupts_enabled(FALSE
);
525 *mode
= (LAPIC_REG(LVT_TIMER
) & LAPIC_LVT_PERIODIC
) ?
528 *divisor
= LAPIC_REG(TIMER_DIVIDE_CONFIG
) & LAPIC_TIMER_DIVIDE_MASK
;
530 *initial_count
= LAPIC_REG(TIMER_INITIAL_COUNT
);
532 *current_count
= LAPIC_REG(TIMER_CURRENT_COUNT
);
533 ml_set_interrupts_enabled(state
);
537 lapic_set_pmi_func(i386_intr_func_t func
)
539 lapic_pmi_func
= func
;
543 _lapic_end_of_interrupt(void)
549 lapic_end_of_interrupt(void)
551 _lapic_end_of_interrupt();
555 lapic_interrupt(int interrupt
, void *state
)
557 interrupt
-= lapic_interrupt_base
;
562 case LAPIC_PERFCNT_INTERRUPT
:
563 if (lapic_pmi_func
!= NULL
)
565 (struct i386_interrupt_state
*) state
);
566 /* Clear interrupt masked */
567 LAPIC_REG(LVT_PERFCNT
) = LAPIC_VECTOR(PERFCNT
);
568 _lapic_end_of_interrupt();
570 case LAPIC_TIMER_INTERRUPT
:
571 _lapic_end_of_interrupt();
572 if (lapic_timer_func
!= NULL
)
574 (struct i386_interrupt_state
*) state
);
576 case LAPIC_ERROR_INTERRUPT
:
578 panic("Local APIC error\n");
579 _lapic_end_of_interrupt();
581 case LAPIC_SPURIOUS_INTERRUPT
:
583 /* No EOI required here */
585 case LAPIC_INTERPROCESSOR_INTERRUPT
:
586 cpu_signal_handler((struct i386_interrupt_state
*) state
);
587 _lapic_end_of_interrupt();
594 lapic_smm_restore(void)
598 if (lapic_os_enabled
== FALSE
)
601 state
= ml_set_interrupts_enabled(FALSE
);
603 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE
, TIMER
)) {
605 * Bogus SMI handler enables interrupts but does not know about
606 * local APIC interrupt sources. When APIC timer counts down to
607 * zero while in SMM, local APIC will end up waiting for an EOI
608 * but no interrupt was delivered to the OS.
610 _lapic_end_of_interrupt();
613 * timer is one-shot, trigger another quick countdown to trigger
614 * another timer interrupt.
616 if (LAPIC_REG(TIMER_CURRENT_COUNT
) == 0) {
617 LAPIC_REG(TIMER_INITIAL_COUNT
) = 1;
620 kprintf("lapic_smm_restore\n");
623 ml_set_interrupts_enabled(state
);
632 int lapic
= cpu_to_lapic
[slot_num
];
636 DBGLOG_CPU_INIT(slot_num
);
638 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num
, lapic
);
639 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD
, (int) IdlePTD
);
641 /* Initialize (or re-initialize) the descriptor tables for this cpu. */
642 mp_desc_init(cpu_datap(slot_num
), FALSE
);
644 /* Serialize use of the slave boot stack. */
645 mutex_lock(&mp_cpu_boot_lock
);
647 mp_disable_preemption();
648 if (slot_num
== get_cpu_number()) {
649 mp_enable_preemption();
650 mutex_unlock(&mp_cpu_boot_lock
);
654 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
655 LAPIC_REG(ICR
) = LAPIC_ICR_DM_INIT
;
658 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
659 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
662 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
663 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
666 #ifdef POSTCODE_DELAY
667 /* Wait much longer if postcodes are displayed for a delay period. */
671 if (cpu_datap(slot_num
)->cpu_running
)
676 mp_enable_preemption();
677 mutex_unlock(&mp_cpu_boot_lock
);
679 if (!cpu_datap(slot_num
)->cpu_running
) {
680 DBG("Failed to start CPU %02d\n", slot_num
);
681 printf("Failed to start CPU %02d, rebooting...\n", slot_num
);
686 DBG("Started CPU %02d\n", slot_num
);
687 printf("Started CPU %02d\n", slot_num
);
692 extern char slave_boot_base
[];
693 extern char slave_boot_end
[];
694 extern void pstart(void);
697 slave_boot_init(void)
699 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
701 kvtophys((vm_offset_t
) slave_boot_base
),
703 slave_boot_end
-slave_boot_base
);
706 * Copy the boot entry code to the real-mode vector area MP_BOOT.
707 * This is in page 1 which has been reserved for this purpose by
708 * machine_startup() from the boot processor.
709 * The slave boot code is responsible for switching to protected
710 * mode and then jumping to the common startup, _start().
712 bcopy_phys((addr64_t
) kvtophys((vm_offset_t
) slave_boot_base
),
714 slave_boot_end
-slave_boot_base
);
717 * Zero a stack area above the boot code.
719 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK
+MP_BOOT
-0x400, 0x400);
720 bzero_phys((addr64_t
)MP_BOOTSTACK
+MP_BOOT
-0x400, 0x400);
723 * Set the location at the base of the stack to point to the
724 * common startup entry.
726 DBG("writing 0x%x at phys 0x%x\n",
727 kvtophys((vm_offset_t
) &pstart
), MP_MACH_START
+MP_BOOT
);
728 ml_phys_write_word(MP_MACH_START
+MP_BOOT
,
729 kvtophys((vm_offset_t
) &pstart
));
736 cpu_signal_event_log_t
*cpu_signal
[MAX_CPUS
];
737 cpu_signal_event_log_t
*cpu_handle
[MAX_CPUS
];
739 MP_EVENT_NAME_DECL();
741 #endif /* MP_DEBUG */
744 cpu_signal_handler(__unused
struct i386_interrupt_state
*regs
)
747 volatile int *my_word
;
748 #if MACH_KDB && MACH_ASSERT
750 #endif /* MACH_KDB && MACH_ASSERT */
752 mp_disable_preemption();
754 my_cpu
= cpu_number();
755 my_word
= ¤t_cpu_datap()->cpu_signals
;
758 #if MACH_KDB && MACH_ASSERT
760 Debugger("cpu_signal_handler");
761 #endif /* MACH_KDB && MACH_ASSERT */
763 if (i_bit(MP_KDP
, my_word
)) {
764 DBGLOG(cpu_handle
,my_cpu
,MP_KDP
);
765 i_bit_clear(MP_KDP
, my_word
);
768 #endif /* MACH_KDP */
769 if (i_bit(MP_TLB_FLUSH
, my_word
)) {
770 DBGLOG(cpu_handle
,my_cpu
,MP_TLB_FLUSH
);
771 i_bit_clear(MP_TLB_FLUSH
, my_word
);
772 pmap_update_interrupt();
773 } else if (i_bit(MP_AST
, my_word
)) {
774 DBGLOG(cpu_handle
,my_cpu
,MP_AST
);
775 i_bit_clear(MP_AST
, my_word
);
776 ast_check(cpu_to_processor(my_cpu
));
778 } else if (i_bit(MP_KDB
, my_word
)) {
779 extern kdb_is_slave
[];
781 i_bit_clear(MP_KDB
, my_word
);
782 kdb_is_slave
[my_cpu
]++;
784 #endif /* MACH_KDB */
785 } else if (i_bit(MP_RENDEZVOUS
, my_word
)) {
786 DBGLOG(cpu_handle
,my_cpu
,MP_RENDEZVOUS
);
787 i_bit_clear(MP_RENDEZVOUS
, my_word
);
788 mp_rendezvous_action();
792 mp_enable_preemption();
797 extern int max_lock_loops
;
798 #endif /* MP_DEBUG */
800 cpu_interrupt(int cpu
)
804 if (smp_initialized
) {
806 /* Wait for previous interrupt to be delivered... */
808 int pending_busy_count
= 0;
809 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
) {
810 if (++pending_busy_count
> max_lock_loops
)
811 panic("cpus_interrupt() deadlock\n");
813 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
) {
814 #endif /* MP_DEBUG */
818 state
= ml_set_interrupts_enabled(FALSE
);
820 cpu_to_lapic
[cpu
] << LAPIC_ICRD_DEST_SHIFT
;
822 LAPIC_VECTOR(INTERPROCESSOR
) | LAPIC_ICR_DM_FIXED
;
823 (void) ml_set_interrupts_enabled(state
);
829 i386_signal_cpu(int cpu
, mp_event_t event
, mp_sync_t mode
)
831 volatile int *signals
= &cpu_datap(cpu
)->cpu_signals
;
832 uint64_t tsc_timeout
;
835 if (!cpu_datap(cpu
)->cpu_running
)
838 DBGLOG(cpu_signal
, cpu
, event
);
840 i_bit_set(event
, signals
);
844 tsc_timeout
= rdtsc64() + (1000*1000*1000);
845 while (i_bit(event
, signals
) && rdtsc64() < tsc_timeout
) {
848 if (i_bit(event
, signals
)) {
849 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
857 i386_signal_cpus(mp_event_t event
, mp_sync_t mode
)
860 unsigned int my_cpu
= cpu_number();
862 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
863 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
865 i386_signal_cpu(cpu
, event
, mode
);
870 i386_active_cpus(void)
873 unsigned int ncpus
= 0;
875 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
876 if (cpu_datap(cpu
)->cpu_running
)
883 * All-CPU rendezvous:
884 * - CPUs are signalled,
885 * - all execute the setup function (if specified),
886 * - rendezvous (i.e. all cpus reach a barrier),
887 * - all execute the action function (if specified),
888 * - rendezvous again,
889 * - execute the teardown function (if specified), and then
892 * Note that the supplied external functions _must_ be reentrant and aware
893 * that they are running in parallel and in an unknown lock context.
897 mp_rendezvous_action(void)
901 if (mp_rv_setup_func
!= NULL
)
902 mp_rv_setup_func(mp_rv_func_arg
);
903 /* spin on entry rendezvous */
904 atomic_incl(&mp_rv_waiters
[0], 1);
905 while (*((volatile long *) &mp_rv_waiters
[0]) < mp_rv_ncpus
)
907 /* action function */
908 if (mp_rv_action_func
!= NULL
)
909 mp_rv_action_func(mp_rv_func_arg
);
910 /* spin on exit rendezvous */
911 atomic_incl(&mp_rv_waiters
[1], 1);
912 while (*((volatile long *) &mp_rv_waiters
[1]) < mp_rv_ncpus
)
914 /* teardown function */
915 if (mp_rv_teardown_func
!= NULL
)
916 mp_rv_teardown_func(mp_rv_func_arg
);
920 mp_rendezvous(void (*setup_func
)(void *),
921 void (*action_func
)(void *),
922 void (*teardown_func
)(void *),
926 if (!smp_initialized
) {
927 if (setup_func
!= NULL
)
929 if (action_func
!= NULL
)
931 if (teardown_func
!= NULL
)
936 /* obtain rendezvous lock */
937 simple_lock(&mp_rv_lock
);
939 /* set static function pointers */
940 mp_rv_setup_func
= setup_func
;
941 mp_rv_action_func
= action_func
;
942 mp_rv_teardown_func
= teardown_func
;
943 mp_rv_func_arg
= arg
;
945 mp_rv_waiters
[0] = 0; /* entry rendezvous count */
946 mp_rv_waiters
[1] = 0; /* exit rendezvous count */
947 mp_rv_ncpus
= i386_active_cpus();
950 * signal other processors, which will call mp_rendezvous_action()
951 * with interrupts disabled
953 i386_signal_cpus(MP_RENDEZVOUS
, ASYNC
);
955 /* call executor function on this cpu */
956 mp_rendezvous_action();
959 simple_unlock(&mp_rv_lock
);
963 volatile boolean_t mp_kdp_trap
= FALSE
;
965 boolean_t mp_kdp_state
;
973 unsigned int my_cpu
= cpu_number();
974 uint64_t tsc_timeout
;
976 DBG("mp_kdp_enter()\n");
979 * Here to enter the debugger.
980 * In case of races, only one cpu is allowed to enter kdp after
983 mp_kdp_state
= ml_set_interrupts_enabled(FALSE
);
984 simple_lock(&mp_kdp_lock
);
985 while (mp_kdp_trap
) {
986 simple_unlock(&mp_kdp_lock
);
987 DBG("mp_kdp_enter() race lost\n");
989 simple_lock(&mp_kdp_lock
);
991 mp_kdp_ncpus
= 1; /* self */
993 simple_unlock(&mp_kdp_lock
);
995 /* Deliver a nudge to other cpus, counting how many */
996 DBG("mp_kdp_enter() signaling other processors\n");
997 for (ncpus
= 1, cpu
= 0; cpu
< real_ncpus
; cpu
++) {
998 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1001 i386_signal_cpu(cpu
, MP_KDP
, ASYNC
);
1004 /* Wait other processors to spin. */
1005 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus
);
1006 tsc_timeout
= rdtsc64() + (1000*1000*1000);
1007 while (*((volatile unsigned int *) &mp_kdp_ncpus
) != ncpus
1008 && rdtsc64() < tsc_timeout
) {
1011 DBG("mp_kdp_enter() %d processors done %s\n",
1012 mp_kdp_ncpus
, (mp_kdp_ncpus
== ncpus
) ? "OK" : "timed out");
1013 postcode(MP_KDP_ENTER
);
1021 state
= ml_set_interrupts_enabled(TRUE
);
1022 DBG("mp_kdp_wait()\n");
1023 atomic_incl(&mp_kdp_ncpus
, 1);
1024 while (mp_kdp_trap
) {
1027 atomic_decl(&mp_kdp_ncpus
, 1);
1028 DBG("mp_kdp_wait() done\n");
1029 (void) ml_set_interrupts_enabled(state
);
1035 DBG("mp_kdp_exit()\n");
1036 atomic_decl(&mp_kdp_ncpus
, 1);
1037 mp_kdp_trap
= FALSE
;
1039 /* Wait other processors to stop spinning. XXX needs timeout */
1040 DBG("mp_kdp_exit() waiting for processors to resume\n");
1041 while (*((volatile long *) &mp_kdp_ncpus
) > 0) {
1044 DBG("mp_kdp_exit() done\n");
1045 (void) ml_set_interrupts_enabled(mp_kdp_state
);
1048 #endif /* MACH_KDP */
1053 __unused processor_t processor
)
1059 processor_t processor
)
1061 int cpu
= PROCESSOR_DATA(processor
, slot_num
);
1063 if (cpu
!= cpu_number()) {
1064 i386_signal_cpu(cpu
, MP_AST
, ASYNC
);
1069 * invoke kdb on slave processors
1075 unsigned int my_cpu
= cpu_number();
1078 mp_disable_preemption();
1079 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1080 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1082 i386_signal_cpu(cpu
, MP_KDB
, SYNC
);
1084 mp_enable_preemption();
1088 * Clear kdb interrupt
1092 clear_kdb_intr(void)
1094 mp_disable_preemption();
1095 i_bit_clear(MP_KDB
, ¤t_cpu_datap()->cpu_signals
);
1096 mp_enable_preemption();
1100 * i386_init_slave() is called from pstart.
1101 * We're in the cpu's interrupt stack with interrupts disabled.
1104 i386_init_slave(void)
1106 postcode(I386_INIT_SLAVE
);
1108 /* Ensure that caching and write-through are enabled */
1109 set_cr0(get_cr0() & ~(CR0_NW
|CR0_CD
));
1111 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1112 get_cpu_number(), get_cpu_phys_number());
1117 LAPIC_CPU_MAP_DUMP();
1127 panic("i386_init_slave() returned from slave_main()");
1131 slave_machine_init(void)
1134 * Here in process context.
1136 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1150 int cpu_number(void)
1152 return get_cpu_number();
1156 #include <ddb/db_output.h>
1158 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1163 struct mp_trap_hist_struct
{
1165 unsigned char data
[5];
1166 } trap_hist
[MTRAPS
], *cur_trap_hist
= trap_hist
,
1167 *max_trap_hist
= &trap_hist
[MTRAPS
];
1169 void db_trap_hist(void);
1189 for(i
=0;i
<MTRAPS
;i
++)
1190 if (trap_hist
[i
].type
== 1 || trap_hist
[i
].type
== 2) {
1192 (&trap_hist
[i
]>=cur_trap_hist
)?"*":" ",
1193 (trap_hist
[i
].type
== 1)?"SPL":"INT");
1195 db_printf(" %02x", trap_hist
[i
].data
[j
]);
1200 #endif /* TRAP_DEBUG */
1202 void db_lapic(int cpu
);
1203 unsigned int db_remote_read(int cpu
, int reg
);
1204 void db_ioapic(unsigned int);
1205 void kdb_console(void);
1212 #define BOOLP(a) ((a)?' ':'!')
1214 static char *DM
[8] = {
1225 db_remote_read(int cpu
, int reg
)
1236 db_ioapic(unsigned int ind
)
1240 #endif /* MACH_KDB */