2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
37 #include <mach_ldebug.h>
40 #include <mach/mach_types.h>
41 #include <mach/kern_return.h>
43 #include <kern/kern_types.h>
44 #include <kern/startup.h>
45 #include <kern/processor.h>
46 #include <kern/cpu_number.h>
47 #include <kern/cpu_data.h>
48 #include <kern/assert.h>
49 #include <kern/machine.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_kern.h>
54 #include <profiling/profile-mk.h>
57 #include <i386/mp_events.h>
58 #include <i386/mp_slave_boot.h>
59 #include <i386/apic.h>
63 #include <i386/cpuid.h>
64 #include <i386/proc_reg.h>
65 #include <i386/machine_cpu.h>
66 #include <i386/misc_protos.h>
67 #include <i386/mtrr.h>
68 #include <i386/postcode.h>
69 #include <i386/perfmon.h>
70 #include <i386/cpu_threads.h>
71 #include <i386/mp_desc.h>
74 #define PAUSE delay(1000000)
75 #define DBG(x...) kprintf(x)
82 * By default, use high vectors to leave vector space for systems
83 * with multiple I/O APIC's. However some systems that boot with
84 * local APIC disabled will hang in SMM when vectors greater than
85 * 0x5F are used. Those systems are not expected to have I/O APIC
86 * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect.
88 #define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0
89 #define LAPIC_REDUCED_INTERRUPT_BASE 0x50
91 * Specific lapic interrupts are relative to this base:
93 #define LAPIC_PERFCNT_INTERRUPT 0xB
94 #define LAPIC_TIMER_INTERRUPT 0xC
95 #define LAPIC_SPURIOUS_INTERRUPT 0xD
96 #define LAPIC_INTERPROCESSOR_INTERRUPT 0xE
97 #define LAPIC_ERROR_INTERRUPT 0xF
99 /* Initialize lapic_id so cpu_number() works on non SMP systems */
100 unsigned long lapic_id_initdata
= 0;
101 unsigned long lapic_id
= (unsigned long)&lapic_id_initdata
;
102 vm_offset_t lapic_start
;
104 static i386_intr_func_t lapic_timer_func
;
105 static i386_intr_func_t lapic_pmi_func
;
107 /* TRUE if local APIC was enabled by the OS not by the BIOS */
108 static boolean_t lapic_os_enabled
= FALSE
;
110 /* Base vector for local APIC interrupt sources */
111 int lapic_interrupt_base
= LAPIC_DEFAULT_INTERRUPT_BASE
;
113 void slave_boot_init(void);
115 static void mp_kdp_wait(void);
116 static void mp_rendezvous_action(void);
118 boolean_t smp_initialized
= FALSE
;
120 decl_simple_lock_data(,mp_kdp_lock
);
122 decl_mutex_data(static, mp_cpu_boot_lock
);
124 /* Variables needed for MP rendezvous. */
125 static void (*mp_rv_setup_func
)(void *arg
);
126 static void (*mp_rv_action_func
)(void *arg
);
127 static void (*mp_rv_teardown_func
)(void *arg
);
128 static void *mp_rv_func_arg
;
129 static int mp_rv_ncpus
;
130 static long mp_rv_waiters
[2];
131 decl_simple_lock_data(,mp_rv_lock
);
133 int lapic_to_cpu
[MAX_CPUS
];
134 int cpu_to_lapic
[MAX_CPUS
];
137 lapic_cpu_map_init(void)
141 for (i
= 0; i
< MAX_CPUS
; i
++) {
142 lapic_to_cpu
[i
] = -1;
143 cpu_to_lapic
[i
] = -1;
148 lapic_cpu_map(int apic_id
, int cpu
)
150 cpu_to_lapic
[cpu
] = apic_id
;
151 lapic_to_cpu
[apic_id
] = cpu
;
156 lapic_cpu_map_dump(void)
160 for (i
= 0; i
< MAX_CPUS
; i
++) {
161 if (cpu_to_lapic
[i
] == -1)
163 kprintf("cpu_to_lapic[%d]: %d\n",
166 for (i
= 0; i
< MAX_CPUS
; i
++) {
167 if (lapic_to_cpu
[i
] == -1)
169 kprintf("lapic_to_cpu[%d]: %d\n",
173 #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
174 #define LAPIC_DUMP() lapic_dump()
176 #define LAPIC_CPU_MAP_DUMP()
178 #endif /* MP_DEBUG */
180 #define LAPIC_REG(reg) \
181 (*((volatile int *)(lapic_start + LAPIC_##reg)))
182 #define LAPIC_REG_OFFSET(reg,off) \
183 (*((volatile int *)(lapic_start + LAPIC_##reg + (off))))
185 #define LAPIC_VECTOR(src) \
186 (lapic_interrupt_base + LAPIC_##src##_INTERRUPT)
188 #define LAPIC_ISR_IS_SET(base,src) \
189 (LAPIC_REG_OFFSET(ISR_BASE,((base+LAPIC_##src##_INTERRUPT)/32)*0x10) & \
190 (1 <<((base + LAPIC_##src##_INTERRUPT)%32)))
194 * Initialize dummy structs for profiling. These aren't used but
195 * allows hertz_tick() to be built with GPROF defined.
197 struct profile_vars _profile_vars
;
198 struct profile_vars
*_profile_vars_cpus
[MAX_CPUS
] = { &_profile_vars
};
199 #define GPROF_INIT() \
203 /* Hack to initialize pointers to unused profiling structs */ \
204 for (i = 1; i < MAX_CPUS; i++) \
205 _profile_vars_cpus[i] = &_profile_vars; \
211 extern void master_up(void);
217 vm_map_entry_t entry
;
220 boolean_t is_boot_processor
;
221 boolean_t is_lapic_enabled
;
222 vm_offset_t lapic_base
;
224 simple_lock_init(&mp_kdp_lock
, 0);
225 simple_lock_init(&mp_rv_lock
, 0);
226 mutex_init(&mp_cpu_boot_lock
, 0);
233 /* Examine the local APIC state */
234 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
235 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
236 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
237 lapic_base
= (lo
& MSR_IA32_APIC_BASE_BASE
);
238 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base
,
239 is_lapic_enabled
? "enabled" : "disabled",
240 is_boot_processor
? "BSP" : "AP");
241 if (!is_boot_processor
|| !is_lapic_enabled
)
242 panic("Unexpected local APIC state\n");
244 /* Establish a map to the local apic */
245 lapic_start
= vm_map_min(kernel_map
);
246 result
= vm_map_find_space(kernel_map
, &lapic_start
,
247 round_page(LAPIC_SIZE
), 0, &entry
);
248 if (result
!= KERN_SUCCESS
) {
249 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result
);
251 vm_map_unlock(kernel_map
);
252 pmap_enter(pmap_kernel(),
254 (ppnum_t
) i386_btop(lapic_base
),
255 VM_PROT_READ
|VM_PROT_WRITE
,
258 lapic_id
= (unsigned long)(lapic_start
+ LAPIC_ID
);
260 if ((LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
) != 0x14) {
261 printf("Local APIC version not 0x14 as expected\n");
264 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
265 lapic_cpu_map_init();
266 lapic_cpu_map((LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
, 0);
272 if (pmc_init() != KERN_SUCCESS
)
273 printf("Performance counters not available\n");
276 DBGLOG_CPU_INIT(master_cpu
);
281 smp_initialized
= TRUE
;
290 /* write-read register */
291 LAPIC_REG(ERROR_STATUS
) = 0;
292 return LAPIC_REG(ERROR_STATUS
);
296 lapic_esr_clear(void)
298 LAPIC_REG(ERROR_STATUS
) = 0;
299 LAPIC_REG(ERROR_STATUS
) = 0;
302 static const char *DM
[8] = {
317 #define BOOL(a) ((a)?' ':'!')
319 kprintf("LAPIC %d at 0x%x version 0x%x\n",
320 (LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
,
322 LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
);
323 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
324 LAPIC_REG(TPR
)&LAPIC_TPR_MASK
,
325 LAPIC_REG(APR
)&LAPIC_APR_MASK
,
326 LAPIC_REG(PPR
)&LAPIC_PPR_MASK
);
327 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
328 LAPIC_REG(DFR
)>>LAPIC_DFR_SHIFT
,
329 LAPIC_REG(LDR
)>>LAPIC_LDR_SHIFT
);
330 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
331 BOOL(LAPIC_REG(SVR
)&LAPIC_SVR_ENABLE
),
332 BOOL(!(LAPIC_REG(SVR
)&LAPIC_SVR_FOCUS_OFF
)),
333 LAPIC_REG(SVR
) & LAPIC_SVR_MASK
);
334 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
335 LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_VECTOR_MASK
,
336 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
337 BOOL(LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_MASKED
),
338 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_PERIODIC
)?"Periodic":"OneShot");
339 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT
));
340 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT
));
341 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG
));
342 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
343 LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_VECTOR_MASK
,
344 DM
[(LAPIC_REG(LVT_PERFCNT
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
345 (LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
346 BOOL(LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_MASKED
));
347 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
348 LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_VECTOR_MASK
,
349 DM
[(LAPIC_REG(LVT_LINT0
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
350 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
351 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
352 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
353 BOOL(LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_MASKED
));
354 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
355 LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_VECTOR_MASK
,
356 DM
[(LAPIC_REG(LVT_LINT1
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
357 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
358 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
359 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
360 BOOL(LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_MASKED
));
361 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
362 LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_VECTOR_MASK
,
363 (LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
364 BOOL(LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_MASKED
));
365 kprintf("ESR: %08x \n", lapic_esr_read());
367 for(i
=0xf; i
>=0; i
--)
368 kprintf("%x%x%x%x",i
,i
,i
,i
);
372 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE
, i
*0x10));
376 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE
, i
*0x10));
379 for(i
=7; i
>= 0; i
--)
380 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE
, i
*0x10));
390 if (cpuid_features() & CPUID_FEATURE_APIC
)
393 if (cpuid_family() == 6 || cpuid_family() == 15) {
396 * There may be a local APIC which wasn't enabled by BIOS.
397 * So we try to enable it explicitly.
399 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
400 lo
&= ~MSR_IA32_APIC_BASE_BASE
;
401 lo
|= MSR_IA32_APIC_BASE_ENABLE
| LAPIC_START
;
402 lo
|= MSR_IA32_APIC_BASE_ENABLE
;
403 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
406 * Re-initialize cpu features info and re-check.
409 if (cpuid_features() & CPUID_FEATURE_APIC
) {
410 printf("Local APIC discovered and enabled\n");
411 lapic_os_enabled
= TRUE
;
412 lapic_interrupt_base
= LAPIC_REDUCED_INTERRUPT_BASE
;
427 /* Shutdown if local APIC was enabled by OS */
428 if (lapic_os_enabled
== FALSE
)
431 mp_disable_preemption();
434 if (get_cpu_number() == master_cpu
) {
435 value
= LAPIC_REG(LVT_LINT0
);
436 value
|= LAPIC_LVT_MASKED
;
437 LAPIC_REG(LVT_LINT0
) = value
;
441 LAPIC_REG(LVT_TIMER
) |= LAPIC_LVT_MASKED
;
443 /* Perfmon: masked */
444 LAPIC_REG(LVT_PERFCNT
) |= LAPIC_LVT_MASKED
;
447 LAPIC_REG(LVT_ERROR
) |= LAPIC_LVT_MASKED
;
449 /* APIC software disabled */
450 LAPIC_REG(SVR
) &= ~LAPIC_SVR_ENABLE
;
452 /* Bypass the APIC completely and update cpu features */
453 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
454 lo
&= ~MSR_IA32_APIC_BASE_ENABLE
;
455 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
458 mp_enable_preemption();
466 /* Set flat delivery model, logical processor id */
467 LAPIC_REG(DFR
) = LAPIC_DFR_FLAT
;
468 LAPIC_REG(LDR
) = (get_cpu_number()) << LAPIC_LDR_SHIFT
;
473 LAPIC_REG(SVR
) = LAPIC_VECTOR(SPURIOUS
) | LAPIC_SVR_ENABLE
;
476 if (get_cpu_number() == master_cpu
) {
477 value
= LAPIC_REG(LVT_LINT0
);
478 value
&= ~LAPIC_LVT_MASKED
;
479 value
|= LAPIC_LVT_DM_EXTINT
;
480 LAPIC_REG(LVT_LINT0
) = value
;
483 /* Timer: unmasked, one-shot */
484 LAPIC_REG(LVT_TIMER
) = LAPIC_VECTOR(TIMER
);
486 /* Perfmon: unmasked */
487 LAPIC_REG(LVT_PERFCNT
) = LAPIC_VECTOR(PERFCNT
);
491 LAPIC_REG(LVT_ERROR
) = LAPIC_VECTOR(ERROR
);
496 lapic_set_timer_func(i386_intr_func_t func
)
498 lapic_timer_func
= func
;
504 lapic_timer_mode_t mode
,
505 lapic_timer_divide_t divisor
,
506 lapic_timer_count_t initial_count
)
509 uint32_t timer_vector
;
511 state
= ml_set_interrupts_enabled(FALSE
);
512 timer_vector
= LAPIC_REG(LVT_TIMER
);
513 timer_vector
&= ~(LAPIC_LVT_MASKED
|LAPIC_LVT_PERIODIC
);;
514 timer_vector
|= interrupt
? 0 : LAPIC_LVT_MASKED
;
515 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
516 LAPIC_REG(LVT_TIMER
) = timer_vector
;
517 LAPIC_REG(TIMER_DIVIDE_CONFIG
) = divisor
;
518 LAPIC_REG(TIMER_INITIAL_COUNT
) = initial_count
;
519 ml_set_interrupts_enabled(state
);
524 lapic_timer_mode_t
*mode
,
525 lapic_timer_divide_t
*divisor
,
526 lapic_timer_count_t
*initial_count
,
527 lapic_timer_count_t
*current_count
)
531 state
= ml_set_interrupts_enabled(FALSE
);
533 *mode
= (LAPIC_REG(LVT_TIMER
) & LAPIC_LVT_PERIODIC
) ?
536 *divisor
= LAPIC_REG(TIMER_DIVIDE_CONFIG
) & LAPIC_TIMER_DIVIDE_MASK
;
538 *initial_count
= LAPIC_REG(TIMER_INITIAL_COUNT
);
540 *current_count
= LAPIC_REG(TIMER_CURRENT_COUNT
);
541 ml_set_interrupts_enabled(state
);
545 lapic_set_pmi_func(i386_intr_func_t func
)
547 lapic_pmi_func
= func
;
551 _lapic_end_of_interrupt(void)
557 lapic_end_of_interrupt(void)
559 _lapic_end_of_interrupt();
563 lapic_interrupt(int interrupt
, void *state
)
565 interrupt
-= lapic_interrupt_base
;
570 case LAPIC_PERFCNT_INTERRUPT
:
571 if (lapic_pmi_func
!= NULL
)
573 (struct i386_interrupt_state
*) state
);
574 /* Clear interrupt masked */
575 LAPIC_REG(LVT_PERFCNT
) = LAPIC_VECTOR(PERFCNT
);
576 _lapic_end_of_interrupt();
578 case LAPIC_TIMER_INTERRUPT
:
579 _lapic_end_of_interrupt();
580 if (lapic_timer_func
!= NULL
)
582 (struct i386_interrupt_state
*) state
);
584 case LAPIC_ERROR_INTERRUPT
:
586 panic("Local APIC error\n");
587 _lapic_end_of_interrupt();
589 case LAPIC_SPURIOUS_INTERRUPT
:
591 /* No EOI required here */
593 case LAPIC_INTERPROCESSOR_INTERRUPT
:
594 cpu_signal_handler((struct i386_interrupt_state
*) state
);
595 _lapic_end_of_interrupt();
602 lapic_smm_restore(void)
606 if (lapic_os_enabled
== FALSE
)
609 state
= ml_set_interrupts_enabled(FALSE
);
611 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE
, TIMER
)) {
613 * Bogus SMI handler enables interrupts but does not know about
614 * local APIC interrupt sources. When APIC timer counts down to
615 * zero while in SMM, local APIC will end up waiting for an EOI
616 * but no interrupt was delivered to the OS.
618 _lapic_end_of_interrupt();
621 * timer is one-shot, trigger another quick countdown to trigger
622 * another timer interrupt.
624 if (LAPIC_REG(TIMER_CURRENT_COUNT
) == 0) {
625 LAPIC_REG(TIMER_INITIAL_COUNT
) = 1;
628 kprintf("lapic_smm_restore\n");
631 ml_set_interrupts_enabled(state
);
640 int lapic
= cpu_to_lapic
[slot_num
];
644 DBGLOG_CPU_INIT(slot_num
);
646 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num
, lapic
);
647 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD
, (int) IdlePTD
);
649 /* Initialize (or re-initialize) the descriptor tables for this cpu. */
650 mp_desc_init(cpu_datap(slot_num
), FALSE
);
652 /* Serialize use of the slave boot stack. */
653 mutex_lock(&mp_cpu_boot_lock
);
655 mp_disable_preemption();
656 if (slot_num
== get_cpu_number()) {
657 mp_enable_preemption();
658 mutex_unlock(&mp_cpu_boot_lock
);
662 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
663 LAPIC_REG(ICR
) = LAPIC_ICR_DM_INIT
;
666 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
667 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
670 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
671 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
674 #ifdef POSTCODE_DELAY
675 /* Wait much longer if postcodes are displayed for a delay period. */
679 if (cpu_datap(slot_num
)->cpu_running
)
684 mp_enable_preemption();
685 mutex_unlock(&mp_cpu_boot_lock
);
687 if (!cpu_datap(slot_num
)->cpu_running
) {
688 DBG("Failed to start CPU %02d\n", slot_num
);
689 printf("Failed to start CPU %02d, rebooting...\n", slot_num
);
694 DBG("Started CPU %02d\n", slot_num
);
695 printf("Started CPU %02d\n", slot_num
);
700 extern char slave_boot_base
[];
701 extern char slave_boot_end
[];
702 extern void pstart(void);
705 slave_boot_init(void)
707 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
709 kvtophys((vm_offset_t
) slave_boot_base
),
711 slave_boot_end
-slave_boot_base
);
714 * Copy the boot entry code to the real-mode vector area MP_BOOT.
715 * This is in page 1 which has been reserved for this purpose by
716 * machine_startup() from the boot processor.
717 * The slave boot code is responsible for switching to protected
718 * mode and then jumping to the common startup, _start().
720 bcopy_phys((addr64_t
) kvtophys((vm_offset_t
) slave_boot_base
),
722 slave_boot_end
-slave_boot_base
);
725 * Zero a stack area above the boot code.
727 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK
+MP_BOOT
-0x400, 0x400);
728 bzero_phys((addr64_t
)MP_BOOTSTACK
+MP_BOOT
-0x400, 0x400);
731 * Set the location at the base of the stack to point to the
732 * common startup entry.
734 DBG("writing 0x%x at phys 0x%x\n",
735 kvtophys((vm_offset_t
) &pstart
), MP_MACH_START
+MP_BOOT
);
736 ml_phys_write_word(MP_MACH_START
+MP_BOOT
,
737 kvtophys((vm_offset_t
) &pstart
));
744 cpu_signal_event_log_t
*cpu_signal
[MAX_CPUS
];
745 cpu_signal_event_log_t
*cpu_handle
[MAX_CPUS
];
747 MP_EVENT_NAME_DECL();
749 #endif /* MP_DEBUG */
752 cpu_signal_handler(__unused
struct i386_interrupt_state
*regs
)
755 volatile int *my_word
;
756 #if MACH_KDB && MACH_ASSERT
758 #endif /* MACH_KDB && MACH_ASSERT */
760 mp_disable_preemption();
762 my_cpu
= cpu_number();
763 my_word
= ¤t_cpu_datap()->cpu_signals
;
766 #if MACH_KDB && MACH_ASSERT
768 Debugger("cpu_signal_handler");
769 #endif /* MACH_KDB && MACH_ASSERT */
771 if (i_bit(MP_KDP
, my_word
)) {
772 DBGLOG(cpu_handle
,my_cpu
,MP_KDP
);
773 i_bit_clear(MP_KDP
, my_word
);
776 #endif /* MACH_KDP */
777 if (i_bit(MP_TLB_FLUSH
, my_word
)) {
778 DBGLOG(cpu_handle
,my_cpu
,MP_TLB_FLUSH
);
779 i_bit_clear(MP_TLB_FLUSH
, my_word
);
780 pmap_update_interrupt();
781 } else if (i_bit(MP_AST
, my_word
)) {
782 DBGLOG(cpu_handle
,my_cpu
,MP_AST
);
783 i_bit_clear(MP_AST
, my_word
);
784 ast_check(cpu_to_processor(my_cpu
));
786 } else if (i_bit(MP_KDB
, my_word
)) {
787 extern kdb_is_slave
[];
789 i_bit_clear(MP_KDB
, my_word
);
790 kdb_is_slave
[my_cpu
]++;
792 #endif /* MACH_KDB */
793 } else if (i_bit(MP_RENDEZVOUS
, my_word
)) {
794 DBGLOG(cpu_handle
,my_cpu
,MP_RENDEZVOUS
);
795 i_bit_clear(MP_RENDEZVOUS
, my_word
);
796 mp_rendezvous_action();
800 mp_enable_preemption();
805 extern int max_lock_loops
;
806 #endif /* MP_DEBUG */
808 cpu_interrupt(int cpu
)
812 if (smp_initialized
) {
814 /* Wait for previous interrupt to be delivered... */
816 int pending_busy_count
= 0;
817 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
) {
818 if (++pending_busy_count
> max_lock_loops
)
819 panic("cpus_interrupt() deadlock\n");
821 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
) {
822 #endif /* MP_DEBUG */
826 state
= ml_set_interrupts_enabled(FALSE
);
828 cpu_to_lapic
[cpu
] << LAPIC_ICRD_DEST_SHIFT
;
830 LAPIC_VECTOR(INTERPROCESSOR
) | LAPIC_ICR_DM_FIXED
;
831 (void) ml_set_interrupts_enabled(state
);
837 i386_signal_cpu(int cpu
, mp_event_t event
, mp_sync_t mode
)
839 volatile int *signals
= &cpu_datap(cpu
)->cpu_signals
;
840 uint64_t tsc_timeout
;
843 if (!cpu_datap(cpu
)->cpu_running
)
846 DBGLOG(cpu_signal
, cpu
, event
);
848 i_bit_set(event
, signals
);
852 tsc_timeout
= rdtsc64() + (1000*1000*1000);
853 while (i_bit(event
, signals
) && rdtsc64() < tsc_timeout
) {
856 if (i_bit(event
, signals
)) {
857 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
865 i386_signal_cpus(mp_event_t event
, mp_sync_t mode
)
868 unsigned int my_cpu
= cpu_number();
870 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
871 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
873 i386_signal_cpu(cpu
, event
, mode
);
878 i386_active_cpus(void)
881 unsigned int ncpus
= 0;
883 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
884 if (cpu_datap(cpu
)->cpu_running
)
891 * All-CPU rendezvous:
892 * - CPUs are signalled,
893 * - all execute the setup function (if specified),
894 * - rendezvous (i.e. all cpus reach a barrier),
895 * - all execute the action function (if specified),
896 * - rendezvous again,
897 * - execute the teardown function (if specified), and then
900 * Note that the supplied external functions _must_ be reentrant and aware
901 * that they are running in parallel and in an unknown lock context.
905 mp_rendezvous_action(void)
909 if (mp_rv_setup_func
!= NULL
)
910 mp_rv_setup_func(mp_rv_func_arg
);
911 /* spin on entry rendezvous */
912 atomic_incl(&mp_rv_waiters
[0], 1);
913 while (*((volatile long *) &mp_rv_waiters
[0]) < mp_rv_ncpus
)
915 /* action function */
916 if (mp_rv_action_func
!= NULL
)
917 mp_rv_action_func(mp_rv_func_arg
);
918 /* spin on exit rendezvous */
919 atomic_incl(&mp_rv_waiters
[1], 1);
920 while (*((volatile long *) &mp_rv_waiters
[1]) < mp_rv_ncpus
)
922 /* teardown function */
923 if (mp_rv_teardown_func
!= NULL
)
924 mp_rv_teardown_func(mp_rv_func_arg
);
928 mp_rendezvous(void (*setup_func
)(void *),
929 void (*action_func
)(void *),
930 void (*teardown_func
)(void *),
934 if (!smp_initialized
) {
935 if (setup_func
!= NULL
)
937 if (action_func
!= NULL
)
939 if (teardown_func
!= NULL
)
944 /* obtain rendezvous lock */
945 simple_lock(&mp_rv_lock
);
947 /* set static function pointers */
948 mp_rv_setup_func
= setup_func
;
949 mp_rv_action_func
= action_func
;
950 mp_rv_teardown_func
= teardown_func
;
951 mp_rv_func_arg
= arg
;
953 mp_rv_waiters
[0] = 0; /* entry rendezvous count */
954 mp_rv_waiters
[1] = 0; /* exit rendezvous count */
955 mp_rv_ncpus
= i386_active_cpus();
958 * signal other processors, which will call mp_rendezvous_action()
959 * with interrupts disabled
961 i386_signal_cpus(MP_RENDEZVOUS
, ASYNC
);
963 /* call executor function on this cpu */
964 mp_rendezvous_action();
967 simple_unlock(&mp_rv_lock
);
971 volatile boolean_t mp_kdp_trap
= FALSE
;
973 boolean_t mp_kdp_state
;
981 unsigned int my_cpu
= cpu_number();
982 uint64_t tsc_timeout
;
984 DBG("mp_kdp_enter()\n");
987 * Here to enter the debugger.
988 * In case of races, only one cpu is allowed to enter kdp after
991 mp_kdp_state
= ml_set_interrupts_enabled(FALSE
);
992 simple_lock(&mp_kdp_lock
);
993 while (mp_kdp_trap
) {
994 simple_unlock(&mp_kdp_lock
);
995 DBG("mp_kdp_enter() race lost\n");
997 simple_lock(&mp_kdp_lock
);
999 mp_kdp_ncpus
= 1; /* self */
1001 simple_unlock(&mp_kdp_lock
);
1003 /* Deliver a nudge to other cpus, counting how many */
1004 DBG("mp_kdp_enter() signaling other processors\n");
1005 for (ncpus
= 1, cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1006 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1009 i386_signal_cpu(cpu
, MP_KDP
, ASYNC
);
1012 /* Wait other processors to spin. */
1013 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus
);
1014 tsc_timeout
= rdtsc64() + (1000*1000*1000);
1015 while (*((volatile unsigned int *) &mp_kdp_ncpus
) != ncpus
1016 && rdtsc64() < tsc_timeout
) {
1019 DBG("mp_kdp_enter() %d processors done %s\n",
1020 mp_kdp_ncpus
, (mp_kdp_ncpus
== ncpus
) ? "OK" : "timed out");
1021 postcode(MP_KDP_ENTER
);
1029 state
= ml_set_interrupts_enabled(TRUE
);
1030 DBG("mp_kdp_wait()\n");
1031 atomic_incl(&mp_kdp_ncpus
, 1);
1032 while (mp_kdp_trap
) {
1035 atomic_decl(&mp_kdp_ncpus
, 1);
1036 DBG("mp_kdp_wait() done\n");
1037 (void) ml_set_interrupts_enabled(state
);
1043 DBG("mp_kdp_exit()\n");
1044 atomic_decl(&mp_kdp_ncpus
, 1);
1045 mp_kdp_trap
= FALSE
;
1047 /* Wait other processors to stop spinning. XXX needs timeout */
1048 DBG("mp_kdp_exit() waiting for processors to resume\n");
1049 while (*((volatile long *) &mp_kdp_ncpus
) > 0) {
1052 DBG("mp_kdp_exit() done\n");
1053 (void) ml_set_interrupts_enabled(mp_kdp_state
);
1056 #endif /* MACH_KDP */
1061 __unused processor_t processor
)
1067 processor_t processor
)
1069 int cpu
= PROCESSOR_DATA(processor
, slot_num
);
1071 if (cpu
!= cpu_number()) {
1072 i386_signal_cpu(cpu
, MP_AST
, ASYNC
);
1077 * invoke kdb on slave processors
1083 unsigned int my_cpu
= cpu_number();
1086 mp_disable_preemption();
1087 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1088 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1090 i386_signal_cpu(cpu
, MP_KDB
, SYNC
);
1092 mp_enable_preemption();
1096 * Clear kdb interrupt
1100 clear_kdb_intr(void)
1102 mp_disable_preemption();
1103 i_bit_clear(MP_KDB
, ¤t_cpu_datap()->cpu_signals
);
1104 mp_enable_preemption();
1108 * i386_init_slave() is called from pstart.
1109 * We're in the cpu's interrupt stack with interrupts disabled.
1112 i386_init_slave(void)
1114 postcode(I386_INIT_SLAVE
);
1116 /* Ensure that caching and write-through are enabled */
1117 set_cr0(get_cr0() & ~(CR0_NW
|CR0_CD
));
1119 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1120 get_cpu_number(), get_cpu_phys_number());
1125 LAPIC_CPU_MAP_DUMP();
1135 panic("i386_init_slave() returned from slave_main()");
1139 slave_machine_init(void)
1142 * Here in process context.
1144 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1158 int cpu_number(void)
1160 return get_cpu_number();
1164 #include <ddb/db_output.h>
1166 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1171 struct mp_trap_hist_struct
{
1173 unsigned char data
[5];
1174 } trap_hist
[MTRAPS
], *cur_trap_hist
= trap_hist
,
1175 *max_trap_hist
= &trap_hist
[MTRAPS
];
1177 void db_trap_hist(void);
1197 for(i
=0;i
<MTRAPS
;i
++)
1198 if (trap_hist
[i
].type
== 1 || trap_hist
[i
].type
== 2) {
1200 (&trap_hist
[i
]>=cur_trap_hist
)?"*":" ",
1201 (trap_hist
[i
].type
== 1)?"SPL":"INT");
1203 db_printf(" %02x", trap_hist
[i
].data
[j
]);
1208 #endif /* TRAP_DEBUG */
1210 void db_lapic(int cpu
);
1211 unsigned int db_remote_read(int cpu
, int reg
);
1212 void db_ioapic(unsigned int);
1213 void kdb_console(void);
1220 #define BOOLP(a) ((a)?' ':'!')
1222 static char *DM
[8] = {
1233 db_remote_read(int cpu
, int reg
)
1244 db_ioapic(unsigned int ind
)
1248 #endif /* MACH_KDB */