2 * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
56 #include <i386/machine_check.h>
59 #include <sys/kdebug.h>
62 #define PAUSE delay(1000000)
63 #define DBG(x...) kprintf(x)
69 lapic_ops_table_t
*lapic_ops
; /* Lapic operations switch */
71 static vm_map_offset_t lapic_pbase
; /* Physical base memory-mapped regs */
72 static vm_offset_t lapic_vbase
; /* Virtual base memory-mapped regs */
74 static i386_intr_func_t lapic_intr_func
[LAPIC_FUNC_TABLE_SIZE
];
76 /* TRUE if local APIC was enabled by the OS not by the BIOS */
77 static boolean_t lapic_os_enabled
= FALSE
;
79 static boolean_t lapic_errors_masked
= FALSE
;
80 static uint64_t lapic_last_master_error
= 0;
81 static uint64_t lapic_error_time_threshold
= 0;
82 static unsigned lapic_master_error_count
= 0;
83 static unsigned lapic_error_count_threshold
= 5;
84 static boolean_t lapic_dont_panic
= FALSE
;
85 int lapic_max_interrupt_cpunum
= 0;
88 APIC_MODE_UNKNOWN
= 0,
93 static apic_mode_t apic_mode_before_sleep
= APIC_MODE_UNKNOWN
;
97 lapic_cpu_map_dump(void)
101 for (i
= 0; i
< MAX_CPUS
; i
++) {
102 if (cpu_to_lapic
[i
] == -1) {
105 kprintf("cpu_to_lapic[%d]: %d\n",
108 for (i
= 0; i
< MAX_LAPICIDS
; i
++) {
109 if (lapic_to_cpu
[i
] == -1) {
112 kprintf("lapic_to_cpu[%d]: %d\n",
116 #endif /* MP_DEBUG */
121 vm_map_offset_t lapic_vbase64
;
124 vm_map_entry_t entry
;
126 if (lapic_vbase
== 0) {
127 lapic_vbase64
= (vm_offset_t
)vm_map_min(kernel_map
);
128 result
= vm_map_find_space(kernel_map
,
130 round_page(LAPIC_SIZE
), 0,
132 VM_MAP_KERNEL_FLAGS_NONE
,
133 VM_KERN_MEMORY_IOKIT
,
135 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
137 lapic_vbase
= (vm_offset_t
) lapic_vbase64
;
138 if (result
!= KERN_SUCCESS
) {
139 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result
);
141 vm_map_unlock(kernel_map
);
144 * Map in the local APIC non-cacheable, as recommended by Intel
145 * in section 8.4.1 of the "System Programming Guide".
146 * In fact, this is redundant because EFI will have assigned an
147 * MTRR physical range containing the local APIC's MMIO space as
148 * UC and this will override the default PAT setting.
150 kr
= pmap_enter(pmap_kernel(),
152 (ppnum_t
) i386_btop(lapic_pbase
),
153 VM_PROT_READ
| VM_PROT_WRITE
,
158 assert(kr
== KERN_SUCCESS
);
167 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
168 if ((lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0) {
170 * If we're already in x2APIC mode, we MUST disable the local APIC
171 * before transitioning back to legacy APIC mode.
173 lo
&= ~(MSR_IA32_APIC_BASE_ENABLE
| MSR_IA32_APIC_BASE_EXTENDED
);
174 wrmsr64(MSR_IA32_APIC_BASE
, ((uint64_t)hi
) << 32 | lo
);
175 wrmsr64(MSR_IA32_APIC_BASE
, ((uint64_t)hi
) << 32 | lo
| MSR_IA32_APIC_BASE_ENABLE
);
178 * Set flat delivery model, logical processor id
179 * This should already be the default set.
181 LAPIC_WRITE(DFR
, LAPIC_DFR_FLAT
);
182 LAPIC_WRITE(LDR
, (get_cpu_number()) << LAPIC_LDR_SHIFT
);
187 legacy_read(lapic_register_t reg
)
189 return *LAPIC_MMIO(reg
);
193 legacy_write(lapic_register_t reg
, uint32_t value
)
195 *LAPIC_MMIO(reg
) = value
;
199 legacy_read_icr(void)
201 return (((uint64_t)*LAPIC_MMIO(ICRD
)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR
));
205 legacy_write_icr(uint32_t dst
, uint32_t cmd
)
207 *LAPIC_MMIO(ICRD
) = dst
<< LAPIC_ICRD_DEST_SHIFT
;
208 *LAPIC_MMIO(ICR
) = cmd
;
211 static lapic_ops_table_t legacy_ops
= {
219 boolean_t is_x2apic
= FALSE
;
227 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
228 if ((lo
& MSR_IA32_APIC_BASE_EXTENDED
) == 0) {
229 lo
|= MSR_IA32_APIC_BASE_EXTENDED
;
230 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
231 kprintf("x2APIC mode enabled\n");
236 x2apic_read(lapic_register_t reg
)
241 rdmsr(LAPIC_MSR(reg
), lo
, hi
);
246 x2apic_write(lapic_register_t reg
, uint32_t value
)
248 wrmsr(LAPIC_MSR(reg
), value
, 0);
252 x2apic_read_icr(void)
254 return rdmsr64(LAPIC_MSR(ICR
));;
258 x2apic_write_icr(uint32_t dst
, uint32_t cmd
)
260 wrmsr(LAPIC_MSR(ICR
), cmd
, dst
);
263 static lapic_ops_table_t x2apic_ops
= {
272 * Used by APs to determine their APIC IDs; assumes master CPU has initialized
273 * the local APIC interfaces.
276 lapic_safe_apicid(void)
280 boolean_t is_lapic_enabled
, is_local_x2apic
;
282 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
283 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
284 is_local_x2apic
= (lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0;
286 if (is_lapic_enabled
&& is_local_x2apic
) {
287 return x2apic_read(ID
);
288 } else if (is_lapic_enabled
) {
289 return (*LAPIC_MMIO(ID
) >> LAPIC_ID_SHIFT
) & LAPIC_ID_MASK
;
291 panic("Unknown Local APIC state!");
297 lapic_reinit(bool for_wake
)
301 boolean_t is_boot_processor
;
302 boolean_t is_lapic_enabled
;
303 boolean_t is_local_x2apic
;
305 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
306 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
307 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
308 is_local_x2apic
= (lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0;
311 * If we're configured for x2apic mode and we're being asked to transition
312 * to legacy APIC mode, OR if we're in legacy APIC mode and we're being
313 * asked to transition to x2apic mode, call LAPIC_INIT().
315 if ((!is_local_x2apic
&& is_x2apic
) || (is_local_x2apic
&& !is_x2apic
)) {
317 /* Now re-read after LAPIC_INIT() */
318 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
319 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
320 is_local_x2apic
= (lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0;
323 if ((!is_lapic_enabled
&& !is_local_x2apic
)) {
324 panic("Unexpected local APIC state\n");
328 * If we did not select the same APIC mode as we had before sleep, flag
329 * that as an error (and panic on debug/development kernels). Note that
330 * we might get here with for_wake == true for the first boot case. In
331 * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't
332 * slept yet), so we do not need to do any APIC checks.
335 ((apic_mode_before_sleep
== APIC_MODE_XAPIC
&& !is_lapic_enabled
) ||
336 (apic_mode_before_sleep
== APIC_MODE_X2APIC
&& !is_local_x2apic
))) {
337 kprintf("Inconsistent APIC state after wake (was %d before sleep, "
338 "now is %d)", apic_mode_before_sleep
,
339 is_lapic_enabled
? APIC_MODE_XAPIC
: APIC_MODE_X2APIC
);
340 #if DEBUG || DEVELOPMENT
341 kprintf("HALTING.\n");
343 * Unfortunately, we cannot safely panic here because the
344 * executing CPU might not be fully initialized. The best
345 * we can do is just print a message to the console and
348 asm volatile ("cli; hlt;" ::: "memory");
354 lapic_init_slave(void)
357 #if DEBUG || DEVELOPMENT
358 if (rdmsr64(MSR_IA32_APIC_BASE
) & MSR_IA32_APIC_BASE_BSP
) {
359 panic("Calling lapic_init_slave() on the boot processor\n");
369 boolean_t is_boot_processor
;
370 boolean_t is_lapic_enabled
;
372 /* Examine the local APIC state */
373 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
374 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
375 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
376 is_x2apic
= (lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0;
377 lapic_pbase
= (lo
& MSR_IA32_APIC_BASE_BASE
);
378 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase
,
379 is_lapic_enabled
? "enabled" : "disabled",
380 is_x2apic
? "extended" : "legacy",
381 is_boot_processor
? "BSP" : "AP");
382 if (!is_boot_processor
|| !is_lapic_enabled
) {
383 panic("Unexpected local APIC state\n");
387 * If x2APIC is available and not already enabled, enable it.
388 * Unless overriden by boot-arg.
390 if (!is_x2apic
&& (cpuid_features() & CPUID_FEATURE_x2APIC
)) {
392 * If no x2apic boot-arg was set and if we're running under a VMM,
393 * autoenable x2APIC mode.
395 if (PE_parse_boot_argn("x2apic", &is_x2apic
, sizeof(is_x2apic
)) == FALSE
&&
396 cpuid_vmm_info()->cpuid_vmm_family
!= CPUID_VMM_FAMILY_NONE
) {
399 kprintf("x2APIC supported %s be enabled\n",
400 is_x2apic
? "and will" : "but will not");
403 lapic_ops
= is_x2apic
? &x2apic_ops
: &legacy_ops
;
405 if (lapic_pbase
!= 0) {
407 * APs might need to consult the local APIC via the MMIO interface
408 * to get their APIC IDs.
411 } else if (!is_x2apic
) {
412 panic("Local APIC physical address was not set.");
417 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID
), LAPIC_READ(LDR
));
418 if ((LAPIC_READ(VERSION
) & LAPIC_VERSION_MASK
) < 0x14) {
419 panic("Local APIC version 0x%x, 0x14 or more expected\n",
420 (LAPIC_READ(VERSION
) & LAPIC_VERSION_MASK
));
423 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
424 lapic_cpu_map_init();
425 lapic_cpu_map(lapic_safe_apicid(), 0);
426 current_cpu_datap()->cpu_phys_number
= cpu_to_lapic
[0];
427 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic
[0]);
434 /* write-read register */
435 LAPIC_WRITE(ERROR_STATUS
, 0);
436 return LAPIC_READ(ERROR_STATUS
);
440 lapic_esr_clear(void)
442 LAPIC_WRITE(ERROR_STATUS
, 0);
443 LAPIC_WRITE(ERROR_STATUS
, 0);
446 static const char *DM_str
[8] = {
457 static const char *TMR_str
[] = {
469 #define BOOL(a) ((a)?' ':'!')
471 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
473 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
475 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
477 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
479 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
481 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
483 kprintf("LAPIC %d at %p version 0x%x\n",
485 (void *) lapic_vbase
,
486 LAPIC_READ(VERSION
) & LAPIC_VERSION_MASK
);
487 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
488 LAPIC_READ(TPR
) & LAPIC_TPR_MASK
,
489 LAPIC_READ(APR
) & LAPIC_APR_MASK
,
490 LAPIC_READ(PPR
) & LAPIC_PPR_MASK
);
491 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
492 is_x2apic
? 0 : LAPIC_READ(DFR
) >> LAPIC_DFR_SHIFT
,
493 LAPIC_READ(LDR
) >> LAPIC_LDR_SHIFT
);
494 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
495 BOOL(LAPIC_READ(SVR
) & LAPIC_SVR_ENABLE
),
496 BOOL(!(LAPIC_READ(SVR
) & LAPIC_SVR_FOCUS_OFF
)),
497 LAPIC_READ(SVR
) & LAPIC_SVR_MASK
);
499 if (mca_is_cmci_present()) {
500 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
507 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
511 TMR_str
[(LAPIC_READ(LVT_TIMER
) >> LAPIC_LVT_TMR_SHIFT
)
512 & LAPIC_LVT_TMR_MASK
]);
513 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT
));
514 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT
));
515 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG
));
516 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
521 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
526 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
533 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
540 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
544 kprintf("ESR: %08x \n", lapic_esr_read());
546 for (i
= 0xf; i
>= 0; i
--) {
547 kprintf("%x%x%x%x", i
, i
, i
, i
);
551 for (i
= 7; i
>= 0; i
--) {
552 kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE
, i
));
556 for (i
= 7; i
>= 0; i
--) {
557 kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE
, i
));
561 for (i
= 7; i
>= 0; i
--) {
562 kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE
, i
));
573 if (cpuid_features() & CPUID_FEATURE_APIC
) {
577 if (cpuid_family() == 6 || cpuid_family() == 15) {
580 * There may be a local APIC which wasn't enabled by BIOS.
581 * So we try to enable it explicitly.
583 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
584 lo
&= ~MSR_IA32_APIC_BASE_BASE
;
585 lo
|= MSR_IA32_APIC_BASE_ENABLE
| LAPIC_START
;
586 lo
|= MSR_IA32_APIC_BASE_ENABLE
;
587 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
590 * Re-initialize cpu features info and re-check.
593 /* We expect this codepath will never be traversed
594 * due to EFI enabling the APIC. Reducing the APIC
595 * interrupt base dynamically is not supported.
597 if (cpuid_features() & CPUID_FEATURE_APIC
) {
598 printf("Local APIC discovered and enabled\n");
599 lapic_os_enabled
= TRUE
;
600 lapic_interrupt_base
= LAPIC_REDUCED_INTERRUPT_BASE
;
609 lapic_shutdown(bool for_sleep
)
615 if (for_sleep
== true) {
616 apic_mode_before_sleep
= (is_x2apic
? APIC_MODE_X2APIC
: APIC_MODE_XAPIC
);
619 /* Shutdown if local APIC was enabled by OS */
620 if (lapic_os_enabled
== FALSE
) {
624 mp_disable_preemption();
627 if (get_cpu_number() <= lapic_max_interrupt_cpunum
) {
628 value
= LAPIC_READ(LVT_LINT0
);
629 value
|= LAPIC_LVT_MASKED
;
630 LAPIC_WRITE(LVT_LINT0
, value
);
634 LAPIC_WRITE(LVT_ERROR
, LAPIC_READ(LVT_ERROR
) | LAPIC_LVT_MASKED
);
637 LAPIC_WRITE(LVT_TIMER
, LAPIC_READ(LVT_TIMER
) | LAPIC_LVT_MASKED
);
639 /* Perfmon: masked */
640 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_READ(LVT_PERFCNT
) | LAPIC_LVT_MASKED
);
642 /* APIC software disabled */
643 LAPIC_WRITE(SVR
, LAPIC_READ(SVR
) & ~LAPIC_SVR_ENABLE
);
645 /* Bypass the APIC completely and update cpu features */
646 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
647 lo
&= ~MSR_IA32_APIC_BASE_ENABLE
;
648 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
651 mp_enable_preemption();
655 cpu_can_exit(int cpu
)
657 return cpu
> lapic_max_interrupt_cpunum
;
661 lapic_configure(bool for_wake
)
665 if (lapic_error_time_threshold
== 0 && cpu_number() == 0) {
666 nanoseconds_to_absolutetime(NSEC_PER_SEC
>> 2, &lapic_error_time_threshold
);
667 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic
, sizeof(lapic_dont_panic
))) {
668 lapic_dont_panic
= FALSE
;
672 if (cpu_number() == 0) {
673 if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum
, sizeof(lapic_max_interrupt_cpunum
))) {
674 lapic_max_interrupt_cpunum
= ((cpuid_features() & CPUID_FEATURE_HTT
) ? 1 : 0);
679 * Reinitialize the APIC (handles the case where we're configured to use the X2APIC
680 * but firmware configured the Legacy APIC):
682 lapic_reinit(for_wake
);
687 LAPIC_WRITE(SVR
, LAPIC_VECTOR(SPURIOUS
) | LAPIC_SVR_ENABLE
);
690 if (get_cpu_number() <= lapic_max_interrupt_cpunum
) {
691 value
= LAPIC_READ(LVT_LINT0
);
692 value
&= ~LAPIC_LVT_MASKED
;
693 value
|= LAPIC_LVT_DM_EXTINT
;
694 LAPIC_WRITE(LVT_LINT0
, value
);
697 /* Timer: unmasked, one-shot */
698 LAPIC_WRITE(LVT_TIMER
, LAPIC_VECTOR(TIMER
));
700 /* Perfmon: unmasked */
701 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
));
703 /* Thermal: unmasked */
704 LAPIC_WRITE(LVT_THERMAL
, LAPIC_VECTOR(THERMAL
));
707 /* CMCI, if available */
708 if (mca_is_cmci_present()) {
709 LAPIC_WRITE(LVT_CMCI
, LAPIC_VECTOR(CMCI
));
713 if (((cpu_number() == master_cpu
) && lapic_errors_masked
== FALSE
) ||
714 (cpu_number() != master_cpu
)) {
716 LAPIC_WRITE(LVT_ERROR
, LAPIC_VECTOR(ERROR
));
722 boolean_t interrupt_unmasked
,
723 lapic_timer_mode_t mode
,
724 lapic_timer_divide_t divisor
,
725 lapic_timer_count_t initial_count
)
727 uint32_t timer_vector
;
729 mp_disable_preemption();
730 timer_vector
= LAPIC_READ(LVT_TIMER
);
731 timer_vector
&= ~(LAPIC_LVT_MASKED
| LAPIC_LVT_PERIODIC
);;
732 timer_vector
|= interrupt_unmasked
? 0 : LAPIC_LVT_MASKED
;
733 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
734 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
735 LAPIC_WRITE(TIMER_DIVIDE_CONFIG
, divisor
);
736 LAPIC_WRITE(TIMER_INITIAL_COUNT
, initial_count
);
737 mp_enable_preemption();
742 boolean_t interrupt_unmasked
,
743 lapic_timer_mode_t mode
,
744 lapic_timer_divide_t divisor
)
746 uint32_t timer_vector
;
748 mp_disable_preemption();
749 timer_vector
= LAPIC_READ(LVT_TIMER
);
750 timer_vector
&= ~(LAPIC_LVT_MASKED
|
752 LAPIC_LVT_TSC_DEADLINE
);
753 timer_vector
|= interrupt_unmasked
? 0 : LAPIC_LVT_MASKED
;
754 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
755 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
756 LAPIC_WRITE(TIMER_DIVIDE_CONFIG
, divisor
);
757 mp_enable_preemption();
761 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
764 lapic_config_tsc_deadline_timer(void)
766 uint32_t timer_vector
;
768 DBG("lapic_config_tsc_deadline_timer()\n");
769 mp_disable_preemption();
770 timer_vector
= LAPIC_READ(LVT_TIMER
);
771 timer_vector
&= ~(LAPIC_LVT_MASKED
|
773 timer_vector
|= LAPIC_LVT_TSC_DEADLINE
;
774 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
776 /* Serialize writes per Intel OSWG */
778 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
779 } while (lapic_get_tsc_deadline_timer() == 0);
780 lapic_set_tsc_deadline_timer(0);
782 mp_enable_preemption();
783 DBG("lapic_config_tsc_deadline_timer() done\n");
787 lapic_set_timer_fast(
788 lapic_timer_count_t initial_count
)
790 LAPIC_WRITE(LVT_TIMER
, LAPIC_READ(LVT_TIMER
) & ~LAPIC_LVT_MASKED
);
791 LAPIC_WRITE(TIMER_INITIAL_COUNT
, initial_count
);
795 lapic_set_tsc_deadline_timer(uint64_t deadline
)
797 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
798 wrmsr64(MSR_IA32_TSC_DEADLINE
, deadline
);
802 lapic_get_tsc_deadline_timer(void)
804 return rdmsr64(MSR_IA32_TSC_DEADLINE
);
809 lapic_timer_mode_t
*mode
,
810 lapic_timer_divide_t
*divisor
,
811 lapic_timer_count_t
*initial_count
,
812 lapic_timer_count_t
*current_count
)
814 mp_disable_preemption();
816 *mode
= (LAPIC_READ(LVT_TIMER
) & LAPIC_LVT_PERIODIC
) ?
820 *divisor
= LAPIC_READ(TIMER_DIVIDE_CONFIG
) & LAPIC_TIMER_DIVIDE_MASK
;
823 *initial_count
= LAPIC_READ(TIMER_INITIAL_COUNT
);
826 *current_count
= LAPIC_READ(TIMER_CURRENT_COUNT
);
828 mp_enable_preemption();
832 _lapic_end_of_interrupt(void)
838 lapic_end_of_interrupt(void)
840 _lapic_end_of_interrupt();
844 lapic_unmask_perfcnt_interrupt(void)
846 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
));
850 lapic_set_perfcnt_interrupt_mask(boolean_t mask
)
852 uint32_t m
= (mask
? LAPIC_LVT_MASKED
: 0);
853 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
) | m
);
857 lapic_set_intr_func(int vector
, i386_intr_func_t func
)
859 if (vector
> lapic_interrupt_base
) {
860 vector
-= lapic_interrupt_base
;
864 case LAPIC_NMI_INTERRUPT
:
865 case LAPIC_INTERPROCESSOR_INTERRUPT
:
866 case LAPIC_TIMER_INTERRUPT
:
867 case LAPIC_THERMAL_INTERRUPT
:
868 case LAPIC_PERFCNT_INTERRUPT
:
869 case LAPIC_CMCI_INTERRUPT
:
870 case LAPIC_PM_INTERRUPT
:
871 lapic_intr_func
[vector
] = func
;
874 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
880 lapic_set_pmi_func(i386_intr_func_t func
)
882 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT
), func
);
886 lapic_interrupt(int interrupt_num
, x86_saved_state_t
*state
)
891 interrupt_num
-= lapic_interrupt_base
;
892 if (interrupt_num
< 0) {
893 if (interrupt_num
== (LAPIC_NMI_INTERRUPT
- lapic_interrupt_base
) &&
894 lapic_intr_func
[LAPIC_NMI_INTERRUPT
] != NULL
) {
895 retval
= (*lapic_intr_func
[LAPIC_NMI_INTERRUPT
])(state
);
902 switch (interrupt_num
) {
903 case LAPIC_TIMER_INTERRUPT
:
904 case LAPIC_THERMAL_INTERRUPT
:
905 case LAPIC_INTERPROCESSOR_INTERRUPT
:
906 case LAPIC_PM_INTERRUPT
:
907 if (lapic_intr_func
[interrupt_num
] != NULL
) {
908 (void) (*lapic_intr_func
[interrupt_num
])(state
);
910 _lapic_end_of_interrupt();
913 case LAPIC_PERFCNT_INTERRUPT
:
914 /* If a function has been registered, invoke it. Otherwise,
917 if (lapic_intr_func
[interrupt_num
] != NULL
) {
918 (void) (*lapic_intr_func
[interrupt_num
])(state
);
919 /* Unmask the interrupt since we don't expect legacy users
920 * to be responsible for it.
922 lapic_unmask_perfcnt_interrupt();
923 _lapic_end_of_interrupt();
927 case LAPIC_CMCI_INTERRUPT
:
928 if (lapic_intr_func
[interrupt_num
] != NULL
) {
929 (void) (*lapic_intr_func
[interrupt_num
])(state
);
931 /* return 0 for plaform expert to handle */
933 case LAPIC_ERROR_INTERRUPT
:
934 /* We treat error interrupts on APs as fatal.
935 * The current interrupt steering scheme directs most
936 * external interrupts to the BSP (HPET interrupts being
937 * a notable exception); hence, such an error
938 * on an AP may signify LVT corruption (with "may" being
939 * the operative word). On the BSP, we adopt a more
940 * lenient approach, in the interests of enhancing
941 * debuggability and reducing fragility.
942 * If "lapic_error_count_threshold" error interrupts
943 * occur within "lapic_error_time_threshold" absolute
944 * time units, we mask the error vector and log. The
945 * error interrupts themselves are likely
946 * side effects of issues which are beyond the purview of
947 * the local APIC interrupt handler, however. The Error
948 * Status Register value (the illegal destination
949 * vector code is one observed in practice) indicates
950 * the immediate cause of the error.
952 esr
= lapic_esr_read();
955 if ((debug_boot_arg
&& (lapic_dont_panic
== FALSE
)) ||
956 cpu_number() != master_cpu
) {
957 panic("Local APIC error, ESR: %d\n", esr
);
960 if (cpu_number() == master_cpu
) {
961 uint64_t abstime
= mach_absolute_time();
962 if ((abstime
- lapic_last_master_error
) < lapic_error_time_threshold
) {
963 if (lapic_master_error_count
++ > lapic_error_count_threshold
) {
964 lapic_errors_masked
= TRUE
;
965 LAPIC_WRITE(LVT_ERROR
, LAPIC_READ(LVT_ERROR
) | LAPIC_LVT_MASKED
);
966 printf("Local APIC: errors masked\n");
969 lapic_last_master_error
= abstime
;
970 lapic_master_error_count
= 0;
972 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr
, lapic_master_error_count
);
975 _lapic_end_of_interrupt();
978 case LAPIC_SPURIOUS_INTERRUPT
:
980 /* No EOI required here */
983 case LAPIC_PMC_SW_INTERRUPT
:
987 case LAPIC_KICK_INTERRUPT
:
988 _lapic_end_of_interrupt();
997 lapic_smm_restore(void)
1001 if (lapic_os_enabled
== FALSE
) {
1005 state
= ml_set_interrupts_enabled(FALSE
);
1007 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE
, TIMER
)) {
1009 * Bogus SMI handler enables interrupts but does not know about
1010 * local APIC interrupt sources. When APIC timer counts down to
1011 * zero while in SMM, local APIC will end up waiting for an EOI
1012 * but no interrupt was delivered to the OS.
1014 _lapic_end_of_interrupt();
1017 * timer is one-shot, trigger another quick countdown to trigger
1018 * another timer interrupt.
1020 if (LAPIC_READ(TIMER_CURRENT_COUNT
) == 0) {
1021 LAPIC_WRITE(TIMER_INITIAL_COUNT
, 1);
1024 kprintf("lapic_smm_restore\n");
1027 ml_set_interrupts_enabled(state
);
1031 lapic_send_ipi(int cpu
, int vector
)
1035 if (vector
< lapic_interrupt_base
) {
1036 vector
+= lapic_interrupt_base
;
1039 state
= ml_set_interrupts_enabled(FALSE
);
1041 /* X2APIC's ICR doesn't have a pending bit. */
1043 /* Wait for pending outgoing send to complete */
1044 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING
) {
1049 LAPIC_WRITE_ICR(cpu_to_lapic
[cpu
], vector
| LAPIC_ICR_DM_FIXED
);
1051 (void) ml_set_interrupts_enabled(state
);
1055 * The following interfaces are privately exported to AICPM.
1059 lapic_is_interrupt_pending(void)
1063 for (i
= 0; i
< 8; i
+= 1) {
1064 if ((LAPIC_READ_OFFSET(IRR_BASE
, i
) != 0) ||
1065 (LAPIC_READ_OFFSET(ISR_BASE
, i
) != 0)) {
1074 lapic_is_interrupting(uint8_t vector
)
1082 bit
= 1 << (vector
% 32);
1084 irr
= LAPIC_READ_OFFSET(IRR_BASE
, i
);
1085 isr
= LAPIC_READ_OFFSET(ISR_BASE
, i
);
1087 if ((irr
| isr
) & bit
) {
1095 lapic_interrupt_counts(uint64_t intrs
[256])
1103 if (intrs
== NULL
) {
1107 for (i
= 0; i
< 8; i
+= 1) {
1108 irr
= LAPIC_READ_OFFSET(IRR_BASE
, i
);
1109 isr
= LAPIC_READ_OFFSET(ISR_BASE
, i
);
1111 if ((isr
| irr
) == 0) {
1115 for (j
= (i
== 0) ? 16 : 0; j
< 32; j
+= 1) {
1117 if ((isr
| irr
) & (1 << j
)) {
1125 lapic_disable_timer(void)
1130 * If we're in deadline timer mode,
1131 * simply clear the deadline timer, otherwise
1132 * mask the timer interrupt and clear the countdown.
1134 lvt_timer
= LAPIC_READ(LVT_TIMER
);
1135 if (lvt_timer
& LAPIC_LVT_TSC_DEADLINE
) {
1136 wrmsr64(MSR_IA32_TSC_DEADLINE
, 0);
1138 LAPIC_WRITE(LVT_TIMER
, lvt_timer
| LAPIC_LVT_MASKED
);
1139 LAPIC_WRITE(TIMER_INITIAL_COUNT
, 0);
1140 lvt_timer
= LAPIC_READ(LVT_TIMER
);
1144 /* SPI returning the CMCI vector */
1146 lapic_get_cmci_vector(void)
1148 uint8_t cmci_vector
= 0;
1150 /* CMCI, if available */
1151 if (mca_is_cmci_present()) {
1152 cmci_vector
= LAPIC_VECTOR(CMCI
);
1158 #if DEVELOPMENT || DEBUG
1159 extern void lapic_trigger_MC(void);
1161 lapic_trigger_MC(void)
1163 /* A 64-bit access to any register will do it. */
1164 volatile uint64_t dummy
= *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID
);