2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
35 #include <mach_ldebug.h>
38 #include <mach/mach_types.h>
39 #include <mach/kern_return.h>
41 #include <kern/kern_types.h>
42 #include <kern/startup.h>
43 #include <kern/processor.h>
44 #include <kern/cpu_number.h>
45 #include <kern/cpu_data.h>
46 #include <kern/assert.h>
47 #include <kern/machine.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_kern.h>
53 #include <profiling/profile-mk.h>
56 #include <i386/mp_events.h>
57 #include <i386/mp_slave_boot.h>
58 #include <i386/apic.h>
61 #include <i386/cpuid.h>
62 #include <i386/proc_reg.h>
63 #include <i386/machine_cpu.h>
64 #include <i386/misc_protos.h>
65 #include <i386/mtrr.h>
66 #include <i386/vmx/vmx_cpu.h>
67 #include <i386/postcode.h>
68 #include <i386/perfmon.h>
69 #include <i386/cpu_threads.h>
70 #include <i386/mp_desc.h>
71 #include <i386/trap.h>
72 #include <i386/machine_routines.h>
73 #include <i386/pmCPU.h>
74 #include <i386/hpet.h>
75 #include <i386/machine_check.h>
77 #include <chud/chud_xnu.h>
78 #include <chud/chud_xnu_private.h>
80 #include <sys/kdebug.h>
82 #include <i386/db_machdep.h>
83 #include <ddb/db_aout.h>
84 #include <ddb/db_access.h>
85 #include <ddb/db_sym.h>
86 #include <ddb/db_variables.h>
87 #include <ddb/db_command.h>
88 #include <ddb/db_output.h>
89 #include <ddb/db_expr.h>
93 #define PAUSE delay(1000000)
94 #define DBG(x...) kprintf(x)
100 /* Initialize lapic_id so cpu_number() works on non SMP systems */
101 unsigned long lapic_id_initdata
= 0;
102 unsigned long lapic_id
= (unsigned long)&lapic_id_initdata
;
103 vm_offset_t lapic_start
;
105 static i386_intr_func_t lapic_timer_func
;
106 static i386_intr_func_t lapic_pmi_func
;
107 static i386_intr_func_t lapic_thermal_func
;
109 /* TRUE if local APIC was enabled by the OS not by the BIOS */
110 static boolean_t lapic_os_enabled
= FALSE
;
112 /* Base vector for local APIC interrupt sources */
113 int lapic_interrupt_base
= LAPIC_DEFAULT_INTERRUPT_BASE
;
115 void slave_boot_init(void);
118 static void mp_kdb_wait(void);
119 volatile boolean_t mp_kdb_trap
= FALSE
;
120 volatile long mp_kdb_ncpus
= 0;
123 static void mp_kdp_wait(boolean_t flush
);
124 static void mp_rendezvous_action(void);
125 static void mp_broadcast_action(void);
127 static int NMIInterruptHandler(x86_saved_state_t
*regs
);
128 static boolean_t
cpu_signal_pending(int cpu
, mp_event_t event
);
130 boolean_t smp_initialized
= FALSE
;
131 volatile boolean_t force_immediate_debugger_NMI
= FALSE
;
132 volatile boolean_t pmap_tlb_flush_timeout
= FALSE
;
134 decl_simple_lock_data(,mp_kdp_lock
);
136 decl_mutex_data(static, mp_cpu_boot_lock
);
138 /* Variables needed for MP rendezvous. */
139 decl_simple_lock_data(,mp_rv_lock
);
140 static void (*mp_rv_setup_func
)(void *arg
);
141 static void (*mp_rv_action_func
)(void *arg
);
142 static void (*mp_rv_teardown_func
)(void *arg
);
143 static void *mp_rv_func_arg
;
144 static int mp_rv_ncpus
;
145 /* Cache-aligned barriers: */
146 static volatile long mp_rv_entry
__attribute__((aligned(64)));
147 static volatile long mp_rv_exit
__attribute__((aligned(64)));
148 static volatile long mp_rv_complete
__attribute__((aligned(64)));
150 /* Variables needed for MP broadcast. */
151 static void (*mp_bc_action_func
)(void *arg
);
152 static void *mp_bc_func_arg
;
153 static int mp_bc_ncpus
;
154 static volatile long mp_bc_count
;
155 decl_mutex_data(static, mp_bc_lock
);
157 static void mp_cpus_call_action(void);
159 int lapic_to_cpu
[MAX_CPUS
];
160 int cpu_to_lapic
[MAX_CPUS
];
163 lapic_cpu_map_init(void)
167 for (i
= 0; i
< MAX_CPUS
; i
++) {
168 lapic_to_cpu
[i
] = -1;
169 cpu_to_lapic
[i
] = -1;
174 lapic_cpu_map(int apic_id
, int cpu
)
176 cpu_to_lapic
[cpu
] = apic_id
;
177 lapic_to_cpu
[apic_id
] = cpu
;
181 * Retrieve the local apic ID a cpu.
183 * Returns the local apic ID for the given processor.
184 * If the processor does not exist or apic not configured, returns -1.
188 ml_get_apicid(uint32_t cpu
)
190 if(cpu
>= (uint32_t)MAX_CPUS
)
191 return 0xFFFFFFFF; /* Return -1 if cpu too big */
193 /* Return the apic ID (or -1 if not configured) */
194 return (uint32_t)cpu_to_lapic
[cpu
];
200 lapic_cpu_map_dump(void)
204 for (i
= 0; i
< MAX_CPUS
; i
++) {
205 if (cpu_to_lapic
[i
] == -1)
207 kprintf("cpu_to_lapic[%d]: %d\n",
210 for (i
= 0; i
< MAX_CPUS
; i
++) {
211 if (lapic_to_cpu
[i
] == -1)
213 kprintf("lapic_to_cpu[%d]: %d\n",
217 #define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump()
218 #define LAPIC_DUMP() lapic_dump()
220 #define LAPIC_CPU_MAP_DUMP()
222 #endif /* MP_DEBUG */
226 * Initialize dummy structs for profiling. These aren't used but
227 * allows hertz_tick() to be built with GPROF defined.
229 struct profile_vars _profile_vars
;
230 struct profile_vars
*_profile_vars_cpus
[MAX_CPUS
] = { &_profile_vars
};
231 #define GPROF_INIT() \
235 /* Hack to initialize pointers to unused profiling structs */ \
236 for (i = 1; i < MAX_CPUS; i++) \
237 _profile_vars_cpus[i] = &_profile_vars; \
247 vm_map_entry_t entry
;
250 boolean_t is_boot_processor
;
251 boolean_t is_lapic_enabled
;
252 vm_offset_t lapic_base
;
254 simple_lock_init(&mp_kdp_lock
, 0);
255 simple_lock_init(&mp_rv_lock
, 0);
256 mutex_init(&mp_cpu_boot_lock
, 0);
257 mutex_init(&mp_bc_lock
, 0);
264 /* Examine the local APIC state */
265 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
266 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
267 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
268 lapic_base
= (lo
& MSR_IA32_APIC_BASE_BASE
);
269 kprintf("MSR_IA32_APIC_BASE 0x%x %s %s\n", lapic_base
,
270 is_lapic_enabled
? "enabled" : "disabled",
271 is_boot_processor
? "BSP" : "AP");
272 if (!is_boot_processor
|| !is_lapic_enabled
)
273 panic("Unexpected local APIC state\n");
275 /* Establish a map to the local apic */
276 lapic_start
= vm_map_min(kernel_map
);
277 result
= vm_map_find_space(kernel_map
,
278 (vm_map_address_t
*) &lapic_start
,
279 round_page(LAPIC_SIZE
), 0,
280 VM_MAKE_TAG(VM_MEMORY_IOKIT
), &entry
);
281 if (result
!= KERN_SUCCESS
) {
282 panic("smp_init: vm_map_find_entry FAILED (err=%d)", result
);
284 vm_map_unlock(kernel_map
);
285 /* Map in the local APIC non-cacheable, as recommended by Intel
286 * in section 8.4.1 of the "System Programming Guide".
288 pmap_enter(pmap_kernel(),
290 (ppnum_t
) i386_btop(lapic_base
),
291 VM_PROT_READ
|VM_PROT_WRITE
,
294 lapic_id
= (unsigned long)(lapic_start
+ LAPIC_ID
);
296 if ((LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
) != 0x14) {
297 printf("Local APIC version not 0x14 as expected\n");
300 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
301 lapic_cpu_map_init();
302 lapic_cpu_map((LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
, 0);
303 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic
[0]);
310 DBGLOG_CPU_INIT(master_cpu
);
314 smp_initialized
= TRUE
;
323 /* write-read register */
324 LAPIC_REG(ERROR_STATUS
) = 0;
325 return LAPIC_REG(ERROR_STATUS
);
329 lapic_esr_clear(void)
331 LAPIC_REG(ERROR_STATUS
) = 0;
332 LAPIC_REG(ERROR_STATUS
) = 0;
335 static const char *DM
[8] = {
350 #define BOOL(a) ((a)?' ':'!')
352 kprintf("LAPIC %d at 0x%x version 0x%x\n",
353 (LAPIC_REG(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
,
355 LAPIC_REG(VERSION
)&LAPIC_VERSION_MASK
);
356 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
357 LAPIC_REG(TPR
)&LAPIC_TPR_MASK
,
358 LAPIC_REG(APR
)&LAPIC_APR_MASK
,
359 LAPIC_REG(PPR
)&LAPIC_PPR_MASK
);
360 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
361 LAPIC_REG(DFR
)>>LAPIC_DFR_SHIFT
,
362 LAPIC_REG(LDR
)>>LAPIC_LDR_SHIFT
);
363 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
364 BOOL(LAPIC_REG(SVR
)&LAPIC_SVR_ENABLE
),
365 BOOL(!(LAPIC_REG(SVR
)&LAPIC_SVR_FOCUS_OFF
)),
366 LAPIC_REG(SVR
) & LAPIC_SVR_MASK
);
367 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
368 LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_VECTOR_MASK
,
369 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
370 BOOL(LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_MASKED
),
371 (LAPIC_REG(LVT_TIMER
)&LAPIC_LVT_PERIODIC
)?"Periodic":"OneShot");
372 kprintf(" Initial Count: 0x%08x \n", LAPIC_REG(TIMER_INITIAL_COUNT
));
373 kprintf(" Current Count: 0x%08x \n", LAPIC_REG(TIMER_CURRENT_COUNT
));
374 kprintf(" Divide Config: 0x%08x \n", LAPIC_REG(TIMER_DIVIDE_CONFIG
));
375 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
376 LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_VECTOR_MASK
,
377 DM
[(LAPIC_REG(LVT_PERFCNT
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
378 (LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
379 BOOL(LAPIC_REG(LVT_PERFCNT
)&LAPIC_LVT_MASKED
));
380 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
381 LAPIC_REG(LVT_THERMAL
)&LAPIC_LVT_VECTOR_MASK
,
382 DM
[(LAPIC_REG(LVT_THERMAL
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
383 (LAPIC_REG(LVT_THERMAL
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
384 BOOL(LAPIC_REG(LVT_THERMAL
)&LAPIC_LVT_MASKED
));
385 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
386 LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_VECTOR_MASK
,
387 DM
[(LAPIC_REG(LVT_LINT0
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
388 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
389 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
390 (LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
391 BOOL(LAPIC_REG(LVT_LINT0
)&LAPIC_LVT_MASKED
));
392 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
393 LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_VECTOR_MASK
,
394 DM
[(LAPIC_REG(LVT_LINT1
)>>LAPIC_LVT_DM_SHIFT
)&LAPIC_LVT_DM_MASK
],
395 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_TM_LEVEL
)?"Level":"Edge ",
396 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_IP_PLRITY_LOW
)?"Low ":"High",
397 (LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
398 BOOL(LAPIC_REG(LVT_LINT1
)&LAPIC_LVT_MASKED
));
399 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
400 LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_VECTOR_MASK
,
401 (LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_DS_PENDING
)?"SendPending":"Idle",
402 BOOL(LAPIC_REG(LVT_ERROR
)&LAPIC_LVT_MASKED
));
403 kprintf("ESR: %08x \n", lapic_esr_read());
405 for(i
=0xf; i
>=0; i
--)
406 kprintf("%x%x%x%x",i
,i
,i
,i
);
410 kprintf("%08x",LAPIC_REG_OFFSET(TMR_BASE
, i
*0x10));
414 kprintf("%08x",LAPIC_REG_OFFSET(IRR_BASE
, i
*0x10));
417 for(i
=7; i
>= 0; i
--)
418 kprintf("%08x",LAPIC_REG_OFFSET(ISR_BASE
, i
*0x10));
429 db_apic(__unused db_expr_t addr
,
430 __unused
int have_addr
,
431 __unused db_expr_t count
,
432 __unused
char *modif
)
448 if (cpuid_features() & CPUID_FEATURE_APIC
)
451 if (cpuid_family() == 6 || cpuid_family() == 15) {
454 * There may be a local APIC which wasn't enabled by BIOS.
455 * So we try to enable it explicitly.
457 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
458 lo
&= ~MSR_IA32_APIC_BASE_BASE
;
459 lo
|= MSR_IA32_APIC_BASE_ENABLE
| LAPIC_START
;
460 lo
|= MSR_IA32_APIC_BASE_ENABLE
;
461 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
464 * Re-initialize cpu features info and re-check.
467 if (cpuid_features() & CPUID_FEATURE_APIC
) {
468 printf("Local APIC discovered and enabled\n");
469 lapic_os_enabled
= TRUE
;
470 lapic_interrupt_base
= LAPIC_REDUCED_INTERRUPT_BASE
;
485 /* Shutdown if local APIC was enabled by OS */
486 if (lapic_os_enabled
== FALSE
)
489 mp_disable_preemption();
492 if (get_cpu_number() == master_cpu
) {
493 value
= LAPIC_REG(LVT_LINT0
);
494 value
|= LAPIC_LVT_MASKED
;
495 LAPIC_REG(LVT_LINT0
) = value
;
499 LAPIC_REG(LVT_TIMER
) |= LAPIC_LVT_MASKED
;
501 /* Perfmon: masked */
502 LAPIC_REG(LVT_PERFCNT
) |= LAPIC_LVT_MASKED
;
505 LAPIC_REG(LVT_ERROR
) |= LAPIC_LVT_MASKED
;
507 /* APIC software disabled */
508 LAPIC_REG(SVR
) &= ~LAPIC_SVR_ENABLE
;
510 /* Bypass the APIC completely and update cpu features */
511 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
512 lo
&= ~MSR_IA32_APIC_BASE_ENABLE
;
513 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
516 mp_enable_preemption();
524 /* Set flat delivery model, logical processor id */
525 LAPIC_REG(DFR
) = LAPIC_DFR_FLAT
;
526 LAPIC_REG(LDR
) = (get_cpu_number()) << LAPIC_LDR_SHIFT
;
531 LAPIC_REG(SVR
) = LAPIC_VECTOR(SPURIOUS
) | LAPIC_SVR_ENABLE
;
534 if (get_cpu_number() == master_cpu
) {
535 value
= LAPIC_REG(LVT_LINT0
);
536 value
&= ~LAPIC_LVT_MASKED
;
537 value
|= LAPIC_LVT_DM_EXTINT
;
538 LAPIC_REG(LVT_LINT0
) = value
;
541 /* Timer: unmasked, one-shot */
542 LAPIC_REG(LVT_TIMER
) = LAPIC_VECTOR(TIMER
);
544 /* Perfmon: unmasked */
545 LAPIC_REG(LVT_PERFCNT
) = LAPIC_VECTOR(PERFCNT
);
547 /* Thermal: unmasked */
548 LAPIC_REG(LVT_THERMAL
) = LAPIC_VECTOR(THERMAL
);
552 LAPIC_REG(LVT_ERROR
) = LAPIC_VECTOR(ERROR
);
556 lapic_set_timer_func(i386_intr_func_t func
)
558 lapic_timer_func
= func
;
564 lapic_timer_mode_t mode
,
565 lapic_timer_divide_t divisor
,
566 lapic_timer_count_t initial_count
)
569 uint32_t timer_vector
;
571 state
= ml_set_interrupts_enabled(FALSE
);
572 timer_vector
= LAPIC_REG(LVT_TIMER
);
573 timer_vector
&= ~(LAPIC_LVT_MASKED
|LAPIC_LVT_PERIODIC
);;
574 timer_vector
|= interrupt
? 0 : LAPIC_LVT_MASKED
;
575 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
576 LAPIC_REG(LVT_TIMER
) = timer_vector
;
577 LAPIC_REG(TIMER_DIVIDE_CONFIG
) = divisor
;
578 LAPIC_REG(TIMER_INITIAL_COUNT
) = initial_count
;
579 ml_set_interrupts_enabled(state
);
584 lapic_timer_mode_t
*mode
,
585 lapic_timer_divide_t
*divisor
,
586 lapic_timer_count_t
*initial_count
,
587 lapic_timer_count_t
*current_count
)
591 state
= ml_set_interrupts_enabled(FALSE
);
593 *mode
= (LAPIC_REG(LVT_TIMER
) & LAPIC_LVT_PERIODIC
) ?
596 *divisor
= LAPIC_REG(TIMER_DIVIDE_CONFIG
) & LAPIC_TIMER_DIVIDE_MASK
;
598 *initial_count
= LAPIC_REG(TIMER_INITIAL_COUNT
);
600 *current_count
= LAPIC_REG(TIMER_CURRENT_COUNT
);
601 ml_set_interrupts_enabled(state
);
605 lapic_set_pmi_func(i386_intr_func_t func
)
607 lapic_pmi_func
= func
;
611 lapic_set_thermal_func(i386_intr_func_t func
)
613 lapic_thermal_func
= func
;
617 _lapic_end_of_interrupt(void)
623 lapic_end_of_interrupt(void)
625 _lapic_end_of_interrupt();
629 lapic_interrupt(int interrupt
, x86_saved_state_t
*state
)
633 /* Did we just field an interruption for the HPET comparator? */
634 if(x86_core()->HpetVec
== ((uint32_t)interrupt
- 0x40)) {
635 /* Yes, go handle it... */
636 retval
= HPETInterrupt();
637 /* Was it really handled? */
639 /* If so, EOI the 'rupt */
640 _lapic_end_of_interrupt();
643 * indicating that this has been handled
649 interrupt
-= lapic_interrupt_base
;
651 if (interrupt
== (LAPIC_NMI_INTERRUPT
- lapic_interrupt_base
)) {
652 retval
= NMIInterruptHandler(state
);
653 _lapic_end_of_interrupt();
661 case LAPIC_PERFCNT_INTERRUPT
:
662 if (lapic_pmi_func
!= NULL
)
663 (*lapic_pmi_func
)(NULL
);
664 /* Clear interrupt masked */
665 LAPIC_REG(LVT_PERFCNT
) = LAPIC_VECTOR(PERFCNT
);
666 _lapic_end_of_interrupt();
669 case LAPIC_TIMER_INTERRUPT
:
670 _lapic_end_of_interrupt();
671 if (lapic_timer_func
!= NULL
)
672 (*lapic_timer_func
)(state
);
675 case LAPIC_THERMAL_INTERRUPT
:
676 if (lapic_thermal_func
!= NULL
)
677 (*lapic_thermal_func
)(NULL
);
678 _lapic_end_of_interrupt();
681 case LAPIC_ERROR_INTERRUPT
:
683 panic("Local APIC error\n");
684 _lapic_end_of_interrupt();
687 case LAPIC_SPURIOUS_INTERRUPT
:
689 /* No EOI required here */
692 case LAPIC_INTERPROCESSOR_INTERRUPT
:
693 _lapic_end_of_interrupt();
694 cpu_signal_handler(state
);
703 lapic_smm_restore(void)
707 if (lapic_os_enabled
== FALSE
)
710 state
= ml_set_interrupts_enabled(FALSE
);
712 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE
, TIMER
)) {
714 * Bogus SMI handler enables interrupts but does not know about
715 * local APIC interrupt sources. When APIC timer counts down to
716 * zero while in SMM, local APIC will end up waiting for an EOI
717 * but no interrupt was delivered to the OS.
719 _lapic_end_of_interrupt();
722 * timer is one-shot, trigger another quick countdown to trigger
723 * another timer interrupt.
725 if (LAPIC_REG(TIMER_CURRENT_COUNT
) == 0) {
726 LAPIC_REG(TIMER_INITIAL_COUNT
) = 1;
729 kprintf("lapic_smm_restore\n");
732 ml_set_interrupts_enabled(state
);
741 int lapic
= cpu_to_lapic
[slot_num
];
745 DBGLOG_CPU_INIT(slot_num
);
747 DBG("intel_startCPU(%d) lapic_id=%d\n", slot_num
, lapic
);
748 DBG("IdlePTD(%p): 0x%x\n", &IdlePTD
, (int) IdlePTD
);
751 * Initialize (or re-initialize) the descriptor tables for this cpu.
752 * Propagate processor mode to slave.
754 if (cpu_mode_is64bit())
755 cpu_desc_init64(cpu_datap(slot_num
), FALSE
);
757 cpu_desc_init(cpu_datap(slot_num
), FALSE
);
759 /* Serialize use of the slave boot stack. */
760 mutex_lock(&mp_cpu_boot_lock
);
762 mp_disable_preemption();
763 if (slot_num
== get_cpu_number()) {
764 mp_enable_preemption();
765 mutex_unlock(&mp_cpu_boot_lock
);
769 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
770 LAPIC_REG(ICR
) = LAPIC_ICR_DM_INIT
;
773 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
774 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
777 LAPIC_REG(ICRD
) = lapic
<< LAPIC_ICRD_DEST_SHIFT
;
778 LAPIC_REG(ICR
) = LAPIC_ICR_DM_STARTUP
|(MP_BOOT
>>12);
781 #ifdef POSTCODE_DELAY
782 /* Wait much longer if postcodes are displayed for a delay period. */
786 if (cpu_datap(slot_num
)->cpu_running
)
791 mp_enable_preemption();
792 mutex_unlock(&mp_cpu_boot_lock
);
794 if (!cpu_datap(slot_num
)->cpu_running
) {
795 kprintf("Failed to start CPU %02d\n", slot_num
);
796 printf("Failed to start CPU %02d, rebooting...\n", slot_num
);
801 kprintf("Started cpu %d (lapic id %08x)\n", slot_num
, lapic
);
806 extern char slave_boot_base
[];
807 extern char slave_boot_end
[];
808 extern void slave_pstart(void);
811 slave_boot_init(void)
813 DBG("V(slave_boot_base)=%p P(slave_boot_base)=%p MP_BOOT=%p sz=0x%x\n",
815 kvtophys((vm_offset_t
) slave_boot_base
),
817 slave_boot_end
-slave_boot_base
);
820 * Copy the boot entry code to the real-mode vector area MP_BOOT.
821 * This is in page 1 which has been reserved for this purpose by
822 * machine_startup() from the boot processor.
823 * The slave boot code is responsible for switching to protected
824 * mode and then jumping to the common startup, _start().
826 bcopy_phys(kvtophys((vm_offset_t
) slave_boot_base
),
828 slave_boot_end
-slave_boot_base
);
831 * Zero a stack area above the boot code.
833 DBG("bzero_phys 0x%x sz 0x%x\n",MP_BOOTSTACK
+MP_BOOT
-0x400, 0x400);
834 bzero_phys((addr64_t
)MP_BOOTSTACK
+MP_BOOT
-0x400, 0x400);
837 * Set the location at the base of the stack to point to the
838 * common startup entry.
840 DBG("writing 0x%x at phys 0x%x\n",
841 kvtophys((vm_offset_t
) &slave_pstart
), MP_MACH_START
+MP_BOOT
);
842 ml_phys_write_word(MP_MACH_START
+MP_BOOT
,
843 (unsigned int)kvtophys((vm_offset_t
) &slave_pstart
));
850 cpu_signal_event_log_t
*cpu_signal
[MAX_CPUS
];
851 cpu_signal_event_log_t
*cpu_handle
[MAX_CPUS
];
853 MP_EVENT_NAME_DECL();
855 #endif /* MP_DEBUG */
858 cpu_signal_handler(x86_saved_state_t
*regs
)
861 volatile int *my_word
;
862 #if MACH_KDB && MACH_ASSERT
864 #endif /* MACH_KDB && MACH_ASSERT */
866 mp_disable_preemption();
868 my_cpu
= cpu_number();
869 my_word
= ¤t_cpu_datap()->cpu_signals
;
872 #if MACH_KDB && MACH_ASSERT
874 Debugger("cpu_signal_handler: signals did not clear");
875 #endif /* MACH_KDB && MACH_ASSERT */
877 if (i_bit(MP_KDP
, my_word
)) {
878 DBGLOG(cpu_handle
,my_cpu
,MP_KDP
);
879 i_bit_clear(MP_KDP
, my_word
);
880 /* Ensure that the i386_kernel_state at the base of the
881 * current thread's stack (if any) is synchronized with the
882 * context at the moment of the interrupt, to facilitate
883 * access through the debugger.
886 sync_iss_to_iks(saved_state32(regs
));
889 #endif /* MACH_KDP */
890 if (i_bit(MP_TLB_FLUSH
, my_word
)) {
891 DBGLOG(cpu_handle
,my_cpu
,MP_TLB_FLUSH
);
892 i_bit_clear(MP_TLB_FLUSH
, my_word
);
893 pmap_update_interrupt();
894 } else if (i_bit(MP_AST
, my_word
)) {
895 DBGLOG(cpu_handle
,my_cpu
,MP_AST
);
896 i_bit_clear(MP_AST
, my_word
);
897 ast_check(cpu_to_processor(my_cpu
));
899 } else if (i_bit(MP_KDB
, my_word
)) {
901 i_bit_clear(MP_KDB
, my_word
);
902 current_cpu_datap()->cpu_kdb_is_slave
++;
904 current_cpu_datap()->cpu_kdb_is_slave
--;
905 #endif /* MACH_KDB */
906 } else if (i_bit(MP_RENDEZVOUS
, my_word
)) {
907 DBGLOG(cpu_handle
,my_cpu
,MP_RENDEZVOUS
);
908 i_bit_clear(MP_RENDEZVOUS
, my_word
);
909 mp_rendezvous_action();
910 } else if (i_bit(MP_BROADCAST
, my_word
)) {
911 DBGLOG(cpu_handle
,my_cpu
,MP_BROADCAST
);
912 i_bit_clear(MP_BROADCAST
, my_word
);
913 mp_broadcast_action();
914 } else if (i_bit(MP_CHUD
, my_word
)) {
915 DBGLOG(cpu_handle
,my_cpu
,MP_CHUD
);
916 i_bit_clear(MP_CHUD
, my_word
);
917 chudxnu_cpu_signal_handler();
918 } else if (i_bit(MP_CALL
, my_word
)) {
919 DBGLOG(cpu_handle
,my_cpu
,MP_CALL
);
920 i_bit_clear(MP_CALL
, my_word
);
921 mp_cpus_call_action();
925 mp_enable_preemption();
929 /* We want this to show up in backtraces, hence marked noinline.
931 static int __attribute__((noinline
))
932 NMIInterruptHandler(x86_saved_state_t
*regs
)
936 sync_iss_to_iks_unconditionally(regs
);
937 __asm__
volatile("movl %%ebp, %0" : "=m" (stackptr
));
939 if (pmap_tlb_flush_timeout
== TRUE
&& current_cpu_datap()->cpu_tlb_invalid
) {
940 panic_i386_backtrace(stackptr
, 10, "Panic: Unresponsive processor\n", TRUE
, regs
);
941 panic_io_port_read();
944 pmSafeMode(¤t_cpu_datap()->lcpu
, PM_SAFE_FL_SAFE
);
954 extern int max_lock_loops
;
955 int trappedalready
= 0; /* (BRINGUP */
956 #endif /* MP_DEBUG */
959 i386_cpu_IPI(int cpu
)
964 if(cpu_datap(cpu
)->cpu_signals
& 6) { /* (BRINGUP) */
965 kprintf("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu
)->cpu_signals
, cpu
);
967 #endif /* MP_DEBUG */
971 if(!trappedalready
&& (cpu_datap(cpu
)->cpu_signals
& 6)) { /* (BRINGUP) */
972 if(kdb_cpu
!= cpu_number()) {
974 panic("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d and I do not own debugger, owner = %08X\n",
975 cpu_datap(cpu
)->cpu_signals
, cpu
, kdb_cpu
);
978 #endif /* MP_DEBUG */
981 /* Wait for previous interrupt to be delivered... */
983 int pending_busy_count
= 0;
984 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
) {
985 if (++pending_busy_count
> max_lock_loops
)
986 panic("i386_cpu_IPI() deadlock\n");
988 while (LAPIC_REG(ICR
) & LAPIC_ICR_DS_PENDING
) {
989 #endif /* MP_DEBUG */
993 state
= ml_set_interrupts_enabled(FALSE
);
995 cpu_to_lapic
[cpu
] << LAPIC_ICRD_DEST_SHIFT
;
997 LAPIC_VECTOR(INTERPROCESSOR
) | LAPIC_ICR_DM_FIXED
;
998 (void) ml_set_interrupts_enabled(state
);
1002 * cpu_interrupt is really just to be used by the scheduler to
1003 * get a CPU's attention it may not always issue an IPI. If an
1004 * IPI is always needed then use i386_cpu_IPI.
1007 cpu_interrupt(int cpu
)
1010 && pmCPUExitIdle(cpu_datap(cpu
))) {
1016 * Send a true NMI via the local APIC to the specified CPU.
1019 cpu_NMI_interrupt(int cpu
)
1023 if (smp_initialized
) {
1024 state
= ml_set_interrupts_enabled(FALSE
);
1025 /* Program the interrupt command register */
1027 cpu_to_lapic
[cpu
] << LAPIC_ICRD_DEST_SHIFT
;
1028 /* The vector is ignored in this case--the target CPU will enter on the
1032 LAPIC_VECTOR(INTERPROCESSOR
) | LAPIC_ICR_DM_NMI
;
1033 (void) ml_set_interrupts_enabled(state
);
1038 i386_signal_cpu(int cpu
, mp_event_t event
, mp_sync_t mode
)
1040 volatile int *signals
= &cpu_datap(cpu
)->cpu_signals
;
1041 uint64_t tsc_timeout
;
1044 if (!cpu_datap(cpu
)->cpu_running
)
1047 if (event
== MP_TLB_FLUSH
)
1048 KERNEL_DEBUG(0xef800020 | DBG_FUNC_START
, cpu
, 0, 0, 0, 0);
1050 DBGLOG(cpu_signal
, cpu
, event
);
1052 i_bit_set(event
, signals
);
1056 tsc_timeout
= rdtsc64() + (1000*1000*1000);
1057 while (i_bit(event
, signals
) && rdtsc64() < tsc_timeout
) {
1060 if (i_bit(event
, signals
)) {
1061 DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n",
1066 if (event
== MP_TLB_FLUSH
)
1067 KERNEL_DEBUG(0xef800020 | DBG_FUNC_END
, cpu
, 0, 0, 0, 0);
1071 * Send event to all running cpus.
1072 * Called with the topology locked.
1075 i386_signal_cpus(mp_event_t event
, mp_sync_t mode
)
1078 unsigned int my_cpu
= cpu_number();
1080 assert(hw_lock_held(&x86_topo_lock
));
1082 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1083 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1085 i386_signal_cpu(cpu
, event
, mode
);
1090 * Return the number of running cpus.
1091 * Called with the topology locked.
1094 i386_active_cpus(void)
1097 unsigned int ncpus
= 0;
1099 assert(hw_lock_held(&x86_topo_lock
));
1101 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1102 if (cpu_datap(cpu
)->cpu_running
)
1109 * All-CPU rendezvous:
1110 * - CPUs are signalled,
1111 * - all execute the setup function (if specified),
1112 * - rendezvous (i.e. all cpus reach a barrier),
1113 * - all execute the action function (if specified),
1114 * - rendezvous again,
1115 * - execute the teardown function (if specified), and then
1118 * Note that the supplied external functions _must_ be reentrant and aware
1119 * that they are running in parallel and in an unknown lock context.
1123 mp_rendezvous_action(void)
1125 boolean_t intrs_enabled
;
1127 /* setup function */
1128 if (mp_rv_setup_func
!= NULL
)
1129 mp_rv_setup_func(mp_rv_func_arg
);
1131 intrs_enabled
= ml_get_interrupts_enabled();
1133 /* spin on entry rendezvous */
1134 atomic_incl(&mp_rv_entry
, 1);
1135 while (mp_rv_entry
< mp_rv_ncpus
) {
1136 /* poll for pesky tlb flushes if interrupts disabled */
1138 handle_pending_TLB_flushes();
1141 /* action function */
1142 if (mp_rv_action_func
!= NULL
)
1143 mp_rv_action_func(mp_rv_func_arg
);
1144 /* spin on exit rendezvous */
1145 atomic_incl(&mp_rv_exit
, 1);
1146 while (mp_rv_exit
< mp_rv_ncpus
) {
1148 handle_pending_TLB_flushes();
1152 /* teardown function */
1153 if (mp_rv_teardown_func
!= NULL
)
1154 mp_rv_teardown_func(mp_rv_func_arg
);
1156 /* Bump completion count */
1157 atomic_incl(&mp_rv_complete
, 1);
1161 mp_rendezvous(void (*setup_func
)(void *),
1162 void (*action_func
)(void *),
1163 void (*teardown_func
)(void *),
1167 if (!smp_initialized
) {
1168 if (setup_func
!= NULL
)
1170 if (action_func
!= NULL
)
1172 if (teardown_func
!= NULL
)
1177 /* obtain rendezvous lock */
1178 simple_lock(&mp_rv_lock
);
1180 /* set static function pointers */
1181 mp_rv_setup_func
= setup_func
;
1182 mp_rv_action_func
= action_func
;
1183 mp_rv_teardown_func
= teardown_func
;
1184 mp_rv_func_arg
= arg
;
1191 * signal other processors, which will call mp_rendezvous_action()
1192 * with interrupts disabled
1194 simple_lock(&x86_topo_lock
);
1195 mp_rv_ncpus
= i386_active_cpus();
1196 i386_signal_cpus(MP_RENDEZVOUS
, ASYNC
);
1197 simple_unlock(&x86_topo_lock
);
1199 /* call executor function on this cpu */
1200 mp_rendezvous_action();
1203 * Spin for everyone to complete.
1204 * This is necessary to ensure that all processors have proceeded
1205 * from the exit barrier before we release the rendezvous structure.
1207 while (mp_rv_complete
< mp_rv_ncpus
) {
1212 mp_rv_setup_func
= NULL
;
1213 mp_rv_action_func
= NULL
;
1214 mp_rv_teardown_func
= NULL
;
1215 mp_rv_func_arg
= NULL
;
1218 simple_unlock(&mp_rv_lock
);
1222 mp_rendezvous_break_lock(void)
1224 simple_lock_init(&mp_rv_lock
, 0);
1228 setup_disable_intrs(__unused
void * param_not_used
)
1230 /* disable interrupts before the first barrier */
1231 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
1233 current_cpu_datap()->cpu_iflag
= intr
;
1234 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
1238 teardown_restore_intrs(__unused
void * param_not_used
)
1240 /* restore interrupt flag following MTRR changes */
1241 ml_set_interrupts_enabled(current_cpu_datap()->cpu_iflag
);
1242 DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__
);
1246 * A wrapper to mp_rendezvous() to call action_func() with interrupts disabled.
1247 * This is exported for use by kexts.
1250 mp_rendezvous_no_intrs(
1251 void (*action_func
)(void *),
1254 mp_rendezvous(setup_disable_intrs
,
1256 teardown_restore_intrs
,
1261 handle_pending_TLB_flushes(void)
1263 volatile int *my_word
= ¤t_cpu_datap()->cpu_signals
;
1265 if (i_bit(MP_TLB_FLUSH
, my_word
)) {
1266 DBGLOG(cpu_handle
, cpu_number(), MP_TLB_FLUSH
);
1267 i_bit_clear(MP_TLB_FLUSH
, my_word
);
1268 pmap_update_interrupt();
1273 * This is called from cpu_signal_handler() to process an MP_CALL signal.
1276 mp_cpus_call_action(void)
1278 if (mp_rv_action_func
!= NULL
)
1279 mp_rv_action_func(mp_rv_func_arg
);
1280 atomic_incl(&mp_rv_complete
, 1);
1284 * mp_cpus_call() runs a given function on cpus specified in a given cpu mask.
1285 * If the mode is SYNC, the function is called serially on the target cpus
1286 * in logical cpu order. If the mode is ASYNC, the function is called in
1287 * parallel over the specified cpus.
1288 * The action function may be NULL.
1289 * The cpu mask may include the local cpu. Offline cpus are ignored.
1290 * Return does not occur until the function has completed on all cpus.
1291 * The return value is the number of cpus on which the function was called.
1297 void (*action_func
)(void *),
1301 boolean_t intrs_enabled
= ml_get_interrupts_enabled();
1302 boolean_t call_self
= FALSE
;
1304 if (!smp_initialized
) {
1305 if ((cpus
& CPUMASK_SELF
) == 0)
1307 if (action_func
!= NULL
) {
1308 (void) ml_set_interrupts_enabled(FALSE
);
1310 ml_set_interrupts_enabled(intrs_enabled
);
1315 /* obtain rendezvous lock */
1316 simple_lock(&mp_rv_lock
);
1318 /* Use the rendezvous data structures for this call */
1319 mp_rv_action_func
= action_func
;
1320 mp_rv_func_arg
= arg
;
1324 simple_lock(&x86_topo_lock
);
1325 for (cpu
= 0; cpu
< (cpu_t
) real_ncpus
; cpu
++) {
1326 if (((cpu_to_cpumask(cpu
) & cpus
) == 0) ||
1327 !cpu_datap(cpu
)->cpu_running
)
1329 if (cpu
== (cpu_t
) cpu_number()) {
1331 * We don't IPI ourself and if calling asynchronously,
1332 * we defer our call until we have signalled all others.
1335 if (mode
== SYNC
&& action_func
!= NULL
) {
1336 (void) ml_set_interrupts_enabled(FALSE
);
1338 ml_set_interrupts_enabled(intrs_enabled
);
1342 * Bump count of other cpus called and signal this cpu.
1343 * Note: we signal asynchronously regardless of mode
1344 * because we wait on mp_rv_complete either here
1345 * (if mode == SYNC) or later (if mode == ASYNC).
1346 * While spinning, poll for TLB flushes if interrupts
1350 i386_signal_cpu(cpu
, MP_CALL
, ASYNC
);
1352 simple_unlock(&x86_topo_lock
);
1353 while (mp_rv_complete
< mp_rv_ncpus
) {
1355 handle_pending_TLB_flushes();
1358 simple_lock(&x86_topo_lock
);
1362 simple_unlock(&x86_topo_lock
);
1365 * If calls are being made asynchronously,
1366 * make the local call now if needed, and then
1367 * wait for all other cpus to finish their calls.
1369 if (mode
== ASYNC
) {
1370 if (call_self
&& action_func
!= NULL
) {
1371 (void) ml_set_interrupts_enabled(FALSE
);
1373 ml_set_interrupts_enabled(intrs_enabled
);
1375 while (mp_rv_complete
< mp_rv_ncpus
) {
1377 handle_pending_TLB_flushes();
1382 /* Determine the number of cpus called */
1383 cpu
= mp_rv_ncpus
+ (call_self
? 1 : 0);
1385 simple_unlock(&mp_rv_lock
);
1391 mp_broadcast_action(void)
1393 /* call action function */
1394 if (mp_bc_action_func
!= NULL
)
1395 mp_bc_action_func(mp_bc_func_arg
);
1397 /* if we're the last one through, wake up the instigator */
1398 if (atomic_decl_and_test((volatile long *)&mp_bc_count
, 1))
1399 thread_wakeup(((event_t
)(unsigned int *) &mp_bc_count
));
1403 * mp_broadcast() runs a given function on all active cpus.
1404 * The caller blocks until the functions has run on all cpus.
1405 * The caller will also block if there is another pending braodcast.
1409 void (*action_func
)(void *),
1412 if (!smp_initialized
) {
1413 if (action_func
!= NULL
)
1418 /* obtain broadcast lock */
1419 mutex_lock(&mp_bc_lock
);
1421 /* set static function pointers */
1422 mp_bc_action_func
= action_func
;
1423 mp_bc_func_arg
= arg
;
1425 assert_wait(&mp_bc_count
, THREAD_UNINT
);
1428 * signal other processors, which will call mp_broadcast_action()
1430 simple_lock(&x86_topo_lock
);
1431 mp_bc_ncpus
= i386_active_cpus(); /* total including this cpu */
1432 mp_bc_count
= mp_bc_ncpus
;
1433 i386_signal_cpus(MP_BROADCAST
, ASYNC
);
1435 /* call executor function on this cpu */
1436 mp_broadcast_action();
1437 simple_unlock(&x86_topo_lock
);
1439 /* block for all cpus to have run action_func */
1440 if (mp_bc_ncpus
> 1)
1441 thread_block(THREAD_CONTINUE_NULL
);
1443 clear_wait(current_thread(), THREAD_AWAKENED
);
1446 mutex_unlock(&mp_bc_lock
);
1450 i386_activate_cpu(void)
1452 cpu_data_t
*cdp
= current_cpu_datap();
1454 assert(!ml_get_interrupts_enabled());
1456 if (!smp_initialized
) {
1457 cdp
->cpu_running
= TRUE
;
1461 simple_lock(&x86_topo_lock
);
1462 cdp
->cpu_running
= TRUE
;
1463 simple_unlock(&x86_topo_lock
);
1467 i386_deactivate_cpu(void)
1469 cpu_data_t
*cdp
= current_cpu_datap();
1471 assert(!ml_get_interrupts_enabled());
1473 simple_lock(&x86_topo_lock
);
1474 cdp
->cpu_running
= FALSE
;
1475 simple_unlock(&x86_topo_lock
);
1478 * In case a rendezvous/braodcast/call was initiated to this cpu
1479 * before we cleared cpu_running, we must perform any actions due.
1481 if (i_bit(MP_RENDEZVOUS
, &cdp
->cpu_signals
))
1482 mp_rendezvous_action();
1483 if (i_bit(MP_BROADCAST
, &cdp
->cpu_signals
))
1484 mp_broadcast_action();
1485 if (i_bit(MP_CALL
, &cdp
->cpu_signals
))
1486 mp_cpus_call_action();
1487 cdp
->cpu_signals
= 0; /* all clear */
1490 int pmsafe_debug
= 1;
1493 volatile boolean_t mp_kdp_trap
= FALSE
;
1494 volatile unsigned long mp_kdp_ncpus
;
1495 boolean_t mp_kdp_state
;
1503 unsigned int my_cpu
= cpu_number();
1504 uint64_t tsc_timeout
;
1506 DBG("mp_kdp_enter()\n");
1509 * Here to enter the debugger.
1510 * In case of races, only one cpu is allowed to enter kdp after
1513 mp_kdp_state
= ml_set_interrupts_enabled(FALSE
);
1514 simple_lock(&mp_kdp_lock
);
1517 pmSafeMode(¤t_cpu_datap()->lcpu
, PM_SAFE_FL_SAFE
);
1519 while (mp_kdp_trap
) {
1520 simple_unlock(&mp_kdp_lock
);
1521 DBG("mp_kdp_enter() race lost\n");
1523 simple_lock(&mp_kdp_lock
);
1525 mp_kdp_ncpus
= 1; /* self */
1527 simple_unlock(&mp_kdp_lock
);
1530 * Deliver a nudge to other cpus, counting how many
1532 DBG("mp_kdp_enter() signaling other processors\n");
1533 if (force_immediate_debugger_NMI
== FALSE
) {
1534 for (ncpus
= 1, cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1535 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1538 i386_signal_cpu(cpu
, MP_KDP
, ASYNC
);
1541 * Wait other processors to synchronize
1543 DBG("mp_kdp_enter() waiting for (%d) processors to suspend\n", ncpus
);
1546 * This timeout is rather arbitrary; we don't want to NMI
1547 * processors that are executing at potentially
1548 * "unsafe-to-interrupt" points such as the trampolines,
1549 * but neither do we want to lose state by waiting too long.
1551 tsc_timeout
= rdtsc64() + (ncpus
* 1000 * 1000);
1553 while (mp_kdp_ncpus
!= ncpus
&& rdtsc64() < tsc_timeout
) {
1555 * A TLB shootdown request may be pending--this would
1556 * result in the requesting processor waiting in
1557 * PMAP_UPDATE_TLBS() until this processor deals with it.
1558 * Process it, so it can now enter mp_kdp_wait()
1560 handle_pending_TLB_flushes();
1563 /* If we've timed out, and some processor(s) are still unresponsive,
1564 * interrupt them with an NMI via the local APIC.
1566 if (mp_kdp_ncpus
!= ncpus
) {
1567 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1568 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1570 if (cpu_signal_pending(cpu
, MP_KDP
))
1571 cpu_NMI_interrupt(cpu
);
1576 for (cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1577 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1579 cpu_NMI_interrupt(cpu
);
1582 DBG("mp_kdp_enter() %u processors done %s\n",
1583 mp_kdp_ncpus
, (mp_kdp_ncpus
== ncpus
) ? "OK" : "timed out");
1585 postcode(MP_KDP_ENTER
);
1589 cpu_signal_pending(int cpu
, mp_event_t event
)
1591 volatile int *signals
= &cpu_datap(cpu
)->cpu_signals
;
1592 boolean_t retval
= FALSE
;
1594 if (i_bit(event
, signals
))
1601 mp_kdp_wait(boolean_t flush
)
1603 DBG("mp_kdp_wait()\n");
1604 /* If an I/O port has been specified as a debugging aid, issue a read */
1605 panic_io_port_read();
1607 /* If we've trapped due to a machine-check, save MCA registers */
1611 pmSafeMode(¤t_cpu_datap()->lcpu
, PM_SAFE_FL_SAFE
);
1613 atomic_incl((volatile long *)&mp_kdp_ncpus
, 1);
1614 while (mp_kdp_trap
) {
1616 * A TLB shootdown request may be pending--this would result
1617 * in the requesting processor waiting in PMAP_UPDATE_TLBS()
1618 * until this processor handles it.
1619 * Process it, so it can now enter mp_kdp_wait()
1622 handle_pending_TLB_flushes();
1627 pmSafeMode(¤t_cpu_datap()->lcpu
, PM_SAFE_FL_NORMAL
);
1629 atomic_decl((volatile long *)&mp_kdp_ncpus
, 1);
1630 DBG("mp_kdp_wait() done\n");
1636 DBG("mp_kdp_exit()\n");
1637 atomic_decl((volatile long *)&mp_kdp_ncpus
, 1);
1638 mp_kdp_trap
= FALSE
;
1639 __asm__
volatile("mfence");
1641 /* Wait other processors to stop spinning. XXX needs timeout */
1642 DBG("mp_kdp_exit() waiting for processors to resume\n");
1643 while (mp_kdp_ncpus
> 0) {
1645 * a TLB shootdown request may be pending... this would result in the requesting
1646 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1647 * Process it, so it can now enter mp_kdp_wait()
1649 handle_pending_TLB_flushes();
1655 pmSafeMode(¤t_cpu_datap()->lcpu
, PM_SAFE_FL_NORMAL
);
1657 DBG("mp_kdp_exit() done\n");
1658 (void) ml_set_interrupts_enabled(mp_kdp_state
);
1661 #endif /* MACH_KDP */
1666 __unused processor_t processor
)
1672 processor_t processor
)
1674 int cpu
= PROCESSOR_DATA(processor
, slot_num
);
1676 if (cpu
!= cpu_number()) {
1677 i386_signal_cpu(cpu
, MP_AST
, ASYNC
);
1683 * invoke kdb on slave processors
1689 unsigned int my_cpu
= cpu_number();
1692 uint64_t tsc_timeout
= 0;
1696 for (kdb_ncpus
= 1, cpu
= 0; cpu
< real_ncpus
; cpu
++) {
1697 if (cpu
== my_cpu
|| !cpu_datap(cpu
)->cpu_running
)
1700 i386_signal_cpu(cpu
, MP_KDB
, ASYNC
);
1702 DBG("remote_kdb() waiting for (%d) processors to suspend\n",kdb_ncpus
);
1704 tsc_timeout
= rdtsc64() + (kdb_ncpus
* 100 * 1000 * 1000);
1706 while (mp_kdb_ncpus
!= kdb_ncpus
&& rdtsc64() < tsc_timeout
) {
1708 * a TLB shootdown request may be pending... this would result in the requesting
1709 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1710 * Process it, so it can now enter mp_kdp_wait()
1712 handle_pending_TLB_flushes();
1716 DBG("mp_kdp_enter() %d processors done %s\n",
1717 mp_kdb_ncpus
, (mp_kdb_ncpus
== kdb_ncpus
) ? "OK" : "timed out");
1723 DBG("mp_kdb_wait()\n");
1725 /* If an I/O port has been specified as a debugging aid, issue a read */
1726 panic_io_port_read();
1728 atomic_incl(&mp_kdb_ncpus
, 1);
1729 while (mp_kdb_trap
) {
1731 * a TLB shootdown request may be pending... this would result in the requesting
1732 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1733 * Process it, so it can now enter mp_kdp_wait()
1735 handle_pending_TLB_flushes();
1739 atomic_decl((volatile long *)&mp_kdb_ncpus
, 1);
1740 DBG("mp_kdb_wait() done\n");
1744 * Clear kdb interrupt
1748 clear_kdb_intr(void)
1750 mp_disable_preemption();
1751 i_bit_clear(MP_KDB
, ¤t_cpu_datap()->cpu_signals
);
1752 mp_enable_preemption();
1758 DBG("mp_kdb_exit()\n");
1759 atomic_decl((volatile long *)&mp_kdb_ncpus
, 1);
1760 mp_kdb_trap
= FALSE
;
1761 __asm__
volatile("mfence");
1763 while (mp_kdb_ncpus
> 0) {
1765 * a TLB shootdown request may be pending... this would result in the requesting
1766 * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it.
1767 * Process it, so it can now enter mp_kdp_wait()
1769 handle_pending_TLB_flushes();
1774 DBG("mp_kdb_exit() done\n");
1777 #endif /* MACH_KDB */
1780 * i386_init_slave() is called from pstart.
1781 * We're in the cpu's interrupt stack with interrupts disabled.
1782 * At this point we are in legacy mode. We need to switch on IA32e
1783 * if the mode is set to 64-bits.
1786 i386_init_slave(void)
1788 postcode(I386_INIT_SLAVE
);
1790 /* Ensure that caching and write-through are enabled */
1791 set_cr0(get_cr0() & ~(CR0_NW
|CR0_CD
));
1793 DBG("i386_init_slave() CPU%d: phys (%d) active.\n",
1794 get_cpu_number(), get_cpu_phys_number());
1796 assert(!ml_get_interrupts_enabled());
1798 cpu_mode_init(current_cpu_datap());
1804 LAPIC_CPU_MAP_DUMP();
1810 /* resume VT operation */
1815 cpu_thread_init(); /* not strictly necessary */
1817 cpu_init(); /* Sets cpu_running which starter cpu waits for */
1821 panic("i386_init_slave() returned from slave_main()");
1825 slave_machine_init(void)
1828 * Here in process context, but with interrupts disabled.
1830 DBG("slave_machine_init() CPU%d\n", get_cpu_number());
1834 cpu_machine_init(); /* Interrupts enabled hereafter */
1838 int cpu_number(void)
1840 return get_cpu_number();
1844 #include <ddb/db_output.h>
1846 #define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */
1851 struct mp_trap_hist_struct
{
1853 unsigned char data
[5];
1854 } trap_hist
[MTRAPS
], *cur_trap_hist
= trap_hist
,
1855 *max_trap_hist
= &trap_hist
[MTRAPS
];
1857 void db_trap_hist(void);
1877 for(i
=0;i
<MTRAPS
;i
++)
1878 if (trap_hist
[i
].type
== 1 || trap_hist
[i
].type
== 2) {
1880 (&trap_hist
[i
]>=cur_trap_hist
)?"*":" ",
1881 (trap_hist
[i
].type
== 1)?"SPL":"INT");
1883 db_printf(" %02x", trap_hist
[i
].data
[j
]);
1888 #endif /* TRAP_DEBUG */
1889 #endif /* MACH_KDB */