2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
56 #include <i386/machine_check.h>
63 #include <sys/kdebug.h>
66 #define PAUSE delay(1000000)
67 #define DBG(x...) kprintf(x)
73 lapic_ops_table_t
*lapic_ops
; /* Lapic operations switch */
75 static vm_map_offset_t lapic_pbase
; /* Physical base memory-mapped regs */
76 static vm_offset_t lapic_vbase
; /* Virtual base memory-mapped regs */
78 static i386_intr_func_t lapic_intr_func
[LAPIC_FUNC_TABLE_SIZE
];
80 /* TRUE if local APIC was enabled by the OS not by the BIOS */
81 static boolean_t lapic_os_enabled
= FALSE
;
83 static boolean_t lapic_errors_masked
= FALSE
;
84 static uint64_t lapic_last_master_error
= 0;
85 static uint64_t lapic_error_time_threshold
= 0;
86 static unsigned lapic_master_error_count
= 0;
87 static unsigned lapic_error_count_threshold
= 5;
88 static boolean_t lapic_dont_panic
= FALSE
;
92 lapic_cpu_map_dump(void)
96 for (i
= 0; i
< MAX_CPUS
; i
++) {
97 if (cpu_to_lapic
[i
] == -1)
99 kprintf("cpu_to_lapic[%d]: %d\n",
102 for (i
= 0; i
< MAX_LAPICIDS
; i
++) {
103 if (lapic_to_cpu
[i
] == -1)
105 kprintf("lapic_to_cpu[%d]: %d\n",
109 #endif /* MP_DEBUG */
115 vm_map_entry_t entry
;
116 vm_map_offset_t lapic_vbase64
;
117 /* Establish a map to the local apic */
119 if (lapic_vbase
== 0) {
120 lapic_vbase64
= (vm_offset_t
)vm_map_min(kernel_map
);
121 result
= vm_map_find_space(kernel_map
,
123 round_page(LAPIC_SIZE
), 0,
124 VM_MAKE_TAG(VM_MEMORY_IOKIT
), &entry
);
125 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
127 lapic_vbase
= (vm_offset_t
) lapic_vbase64
;
128 if (result
!= KERN_SUCCESS
) {
129 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result
);
131 vm_map_unlock(kernel_map
);
134 * Map in the local APIC non-cacheable, as recommended by Intel
135 * in section 8.4.1 of the "System Programming Guide".
136 * In fact, this is redundant because EFI will have assigned an
137 * MTRR physical range containing the local APIC's MMIO space as
138 * UC and this will override the default PAT setting.
140 pmap_enter(pmap_kernel(),
142 (ppnum_t
) i386_btop(lapic_pbase
),
143 VM_PROT_READ
|VM_PROT_WRITE
,
150 * Set flat delivery model, logical processor id
151 * This should already be the default set.
153 LAPIC_WRITE(DFR
, LAPIC_DFR_FLAT
);
154 LAPIC_WRITE(LDR
, (get_cpu_number()) << LAPIC_LDR_SHIFT
);
159 legacy_read(lapic_register_t reg
)
161 return *LAPIC_MMIO(reg
);
165 legacy_write(lapic_register_t reg
, uint32_t value
)
167 *LAPIC_MMIO(reg
) = value
;
171 legacy_read_icr(void)
173 return (((uint64_t)*LAPIC_MMIO(ICRD
)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR
));
177 legacy_write_icr(uint32_t dst
, uint32_t cmd
)
179 *LAPIC_MMIO(ICRD
) = dst
<< LAPIC_ICRD_DEST_SHIFT
;
180 *LAPIC_MMIO(ICR
) = cmd
;
183 static lapic_ops_table_t legacy_ops
= {
191 static boolean_t is_x2apic
= FALSE
;
199 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
200 if ((lo
& MSR_IA32_APIC_BASE_EXTENDED
) == 0) {
201 lo
|= MSR_IA32_APIC_BASE_EXTENDED
;
202 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
203 kprintf("x2APIC mode enabled\n");
208 x2apic_read(lapic_register_t reg
)
213 rdmsr(LAPIC_MSR(reg
), lo
, hi
);
218 x2apic_write(lapic_register_t reg
, uint32_t value
)
220 wrmsr(LAPIC_MSR(reg
), value
, 0);
224 x2apic_read_icr(void)
226 return rdmsr64(LAPIC_MSR(ICR
));;
230 x2apic_write_icr(uint32_t dst
, uint32_t cmd
)
232 wrmsr(LAPIC_MSR(ICR
), cmd
, dst
);
235 static lapic_ops_table_t x2apic_ops
= {
248 boolean_t is_boot_processor
;
249 boolean_t is_lapic_enabled
;
251 /* Examine the local APIC state */
252 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
253 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
254 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
255 is_x2apic
= (lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0;
256 lapic_pbase
= (lo
& MSR_IA32_APIC_BASE_BASE
);
257 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase
,
258 is_lapic_enabled
? "enabled" : "disabled",
259 is_x2apic
? "extended" : "legacy",
260 is_boot_processor
? "BSP" : "AP");
261 if (!is_boot_processor
|| !is_lapic_enabled
)
262 panic("Unexpected local APIC state\n");
265 * If x2APIC is available and not already enabled, enable it.
266 * Unless overriden by boot-arg.
268 if (!is_x2apic
&& (cpuid_features() & CPUID_FEATURE_x2APIC
)) {
269 PE_parse_boot_argn("-x2apic", &is_x2apic
, sizeof(is_x2apic
));
270 kprintf("x2APIC supported %s be enabled\n",
271 is_x2apic
? "and will" : "but will not");
274 lapic_ops
= is_x2apic
? &x2apic_ops
: &legacy_ops
;
278 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID
), LAPIC_READ(LDR
));
279 if ((LAPIC_READ(VERSION
)&LAPIC_VERSION_MASK
) < 0x14) {
280 panic("Local APIC version 0x%x, 0x14 or more expected\n",
281 (LAPIC_READ(VERSION
)&LAPIC_VERSION_MASK
));
284 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
285 lapic_cpu_map_init();
286 lapic_cpu_map((LAPIC_READ(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
, 0);
287 current_cpu_datap()->cpu_phys_number
= cpu_to_lapic
[0];
288 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic
[0]);
295 /* write-read register */
296 LAPIC_WRITE(ERROR_STATUS
, 0);
297 return LAPIC_READ(ERROR_STATUS
);
301 lapic_esr_clear(void)
303 LAPIC_WRITE(ERROR_STATUS
, 0);
304 LAPIC_WRITE(ERROR_STATUS
, 0);
307 static const char *DM_str
[8] = {
317 static const char *TMR_str
[] = {
329 #define BOOL(a) ((a)?' ':'!')
331 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
333 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
335 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
337 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
339 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
341 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
343 kprintf("LAPIC %d at %p version 0x%x\n",
344 (LAPIC_READ(ID
)>>LAPIC_ID_SHIFT
)&LAPIC_ID_MASK
,
345 (void *) lapic_vbase
,
346 LAPIC_READ(VERSION
)&LAPIC_VERSION_MASK
);
347 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
348 LAPIC_READ(TPR
)&LAPIC_TPR_MASK
,
349 LAPIC_READ(APR
)&LAPIC_APR_MASK
,
350 LAPIC_READ(PPR
)&LAPIC_PPR_MASK
);
351 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
352 is_x2apic
? 0 : LAPIC_READ(DFR
)>>LAPIC_DFR_SHIFT
,
353 LAPIC_READ(LDR
)>>LAPIC_LDR_SHIFT
);
354 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
355 BOOL(LAPIC_READ(SVR
)&LAPIC_SVR_ENABLE
),
356 BOOL(!(LAPIC_READ(SVR
)&LAPIC_SVR_FOCUS_OFF
)),
357 LAPIC_READ(SVR
) & LAPIC_SVR_MASK
);
359 if (mca_is_cmci_present())
360 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
366 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
370 TMR_str
[(LAPIC_READ(LVT_TIMER
) >> LAPIC_LVT_TMR_SHIFT
)
371 & LAPIC_LVT_TMR_MASK
]);
372 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT
));
373 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT
));
374 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG
));
375 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
380 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
385 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
392 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
399 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
403 kprintf("ESR: %08x \n", lapic_esr_read());
405 for(i
=0xf; i
>=0; i
--)
406 kprintf("%x%x%x%x",i
,i
,i
,i
);
410 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE
, i
));
414 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE
, i
));
417 for(i
=7; i
>= 0; i
--)
418 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE
, i
));
428 if (cpuid_features() & CPUID_FEATURE_APIC
)
431 if (cpuid_family() == 6 || cpuid_family() == 15) {
434 * There may be a local APIC which wasn't enabled by BIOS.
435 * So we try to enable it explicitly.
437 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
438 lo
&= ~MSR_IA32_APIC_BASE_BASE
;
439 lo
|= MSR_IA32_APIC_BASE_ENABLE
| LAPIC_START
;
440 lo
|= MSR_IA32_APIC_BASE_ENABLE
;
441 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
444 * Re-initialize cpu features info and re-check.
447 /* We expect this codepath will never be traversed
448 * due to EFI enabling the APIC. Reducing the APIC
449 * interrupt base dynamically is not supported.
451 if (cpuid_features() & CPUID_FEATURE_APIC
) {
452 printf("Local APIC discovered and enabled\n");
453 lapic_os_enabled
= TRUE
;
454 lapic_interrupt_base
= LAPIC_REDUCED_INTERRUPT_BASE
;
469 /* Shutdown if local APIC was enabled by OS */
470 if (lapic_os_enabled
== FALSE
)
473 mp_disable_preemption();
476 if (get_cpu_number() == master_cpu
) {
477 value
= LAPIC_READ(LVT_LINT0
);
478 value
|= LAPIC_LVT_MASKED
;
479 LAPIC_WRITE(LVT_LINT0
, value
);
483 LAPIC_WRITE(LVT_ERROR
, LAPIC_READ(LVT_ERROR
) | LAPIC_LVT_MASKED
);
486 LAPIC_WRITE(LVT_TIMER
, LAPIC_READ(LVT_TIMER
) | LAPIC_LVT_MASKED
);
488 /* Perfmon: masked */
489 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_READ(LVT_PERFCNT
) | LAPIC_LVT_MASKED
);
491 /* APIC software disabled */
492 LAPIC_WRITE(SVR
, LAPIC_READ(SVR
) & ~LAPIC_SVR_ENABLE
);
494 /* Bypass the APIC completely and update cpu features */
495 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
496 lo
&= ~MSR_IA32_APIC_BASE_ENABLE
;
497 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
500 mp_enable_preemption();
504 lapic_configure(void)
508 if (lapic_error_time_threshold
== 0 && cpu_number() == 0) {
509 nanoseconds_to_absolutetime(NSEC_PER_SEC
>> 2, &lapic_error_time_threshold
);
510 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic
, sizeof(lapic_dont_panic
))) {
511 lapic_dont_panic
= FALSE
;
518 LAPIC_WRITE(SVR
, LAPIC_VECTOR(SPURIOUS
) | LAPIC_SVR_ENABLE
);
521 if (get_cpu_number() == master_cpu
) {
522 value
= LAPIC_READ(LVT_LINT0
);
523 value
&= ~LAPIC_LVT_MASKED
;
524 value
|= LAPIC_LVT_DM_EXTINT
;
525 LAPIC_WRITE(LVT_LINT0
, value
);
528 /* Timer: unmasked, one-shot */
529 LAPIC_WRITE(LVT_TIMER
, LAPIC_VECTOR(TIMER
));
531 /* Perfmon: unmasked */
532 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
));
534 /* Thermal: unmasked */
535 LAPIC_WRITE(LVT_THERMAL
, LAPIC_VECTOR(THERMAL
));
538 /* CMCI, if available */
539 if (mca_is_cmci_present())
540 LAPIC_WRITE(LVT_CMCI
, LAPIC_VECTOR(CMCI
));
543 if (((cpu_number() == master_cpu
) && lapic_errors_masked
== FALSE
) ||
544 (cpu_number() != master_cpu
)) {
546 LAPIC_WRITE(LVT_ERROR
, LAPIC_VECTOR(ERROR
));
552 boolean_t interrupt_unmasked
,
553 lapic_timer_mode_t mode
,
554 lapic_timer_divide_t divisor
,
555 lapic_timer_count_t initial_count
)
557 uint32_t timer_vector
;
559 mp_disable_preemption();
560 timer_vector
= LAPIC_READ(LVT_TIMER
);
561 timer_vector
&= ~(LAPIC_LVT_MASKED
|LAPIC_LVT_PERIODIC
);;
562 timer_vector
|= interrupt_unmasked
? 0 : LAPIC_LVT_MASKED
;
563 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
564 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
565 LAPIC_WRITE(TIMER_DIVIDE_CONFIG
, divisor
);
566 LAPIC_WRITE(TIMER_INITIAL_COUNT
, initial_count
);
567 mp_enable_preemption();
572 boolean_t interrupt_unmasked
,
573 lapic_timer_mode_t mode
,
574 lapic_timer_divide_t divisor
)
576 uint32_t timer_vector
;
578 mp_disable_preemption();
579 timer_vector
= LAPIC_READ(LVT_TIMER
);
580 timer_vector
&= ~(LAPIC_LVT_MASKED
|
582 LAPIC_LVT_TSC_DEADLINE
);
583 timer_vector
|= interrupt_unmasked
? 0 : LAPIC_LVT_MASKED
;
584 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
585 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
586 LAPIC_WRITE(TIMER_DIVIDE_CONFIG
, divisor
);
587 mp_enable_preemption();
591 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
594 lapic_config_tsc_deadline_timer(void)
596 uint32_t timer_vector
;
598 DBG("lapic_config_tsc_deadline_timer()\n");
599 mp_disable_preemption();
600 timer_vector
= LAPIC_READ(LVT_TIMER
);
601 timer_vector
&= ~(LAPIC_LVT_MASKED
|
603 timer_vector
|= LAPIC_LVT_TSC_DEADLINE
;
604 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
606 /* Serialize writes per Intel OSWG */
608 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
609 } while (lapic_get_tsc_deadline_timer() == 0);
610 lapic_set_tsc_deadline_timer(0);
612 mp_enable_preemption();
613 DBG("lapic_config_tsc_deadline_timer() done\n");
617 lapic_set_timer_fast(
618 lapic_timer_count_t initial_count
)
620 LAPIC_WRITE(LVT_TIMER
, LAPIC_READ(LVT_TIMER
) & ~LAPIC_LVT_MASKED
);
621 LAPIC_WRITE(TIMER_INITIAL_COUNT
, initial_count
);
625 lapic_set_tsc_deadline_timer(uint64_t deadline
)
627 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
628 wrmsr64(MSR_IA32_TSC_DEADLINE
, deadline
);
632 lapic_get_tsc_deadline_timer(void)
634 return rdmsr64(MSR_IA32_TSC_DEADLINE
);
639 lapic_timer_mode_t
*mode
,
640 lapic_timer_divide_t
*divisor
,
641 lapic_timer_count_t
*initial_count
,
642 lapic_timer_count_t
*current_count
)
644 mp_disable_preemption();
646 *mode
= (LAPIC_READ(LVT_TIMER
) & LAPIC_LVT_PERIODIC
) ?
649 *divisor
= LAPIC_READ(TIMER_DIVIDE_CONFIG
) & LAPIC_TIMER_DIVIDE_MASK
;
651 *initial_count
= LAPIC_READ(TIMER_INITIAL_COUNT
);
653 *current_count
= LAPIC_READ(TIMER_CURRENT_COUNT
);
654 mp_enable_preemption();
658 _lapic_end_of_interrupt(void)
664 lapic_end_of_interrupt(void)
666 _lapic_end_of_interrupt();
669 void lapic_unmask_perfcnt_interrupt(void) {
670 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
));
673 void lapic_set_perfcnt_interrupt_mask(boolean_t mask
) {
674 uint32_t m
= (mask
? LAPIC_LVT_MASKED
: 0);
675 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
) | m
);
679 lapic_set_intr_func(int vector
, i386_intr_func_t func
)
681 if (vector
> lapic_interrupt_base
)
682 vector
-= lapic_interrupt_base
;
685 case LAPIC_NMI_INTERRUPT
:
686 case LAPIC_INTERPROCESSOR_INTERRUPT
:
687 case LAPIC_TIMER_INTERRUPT
:
688 case LAPIC_THERMAL_INTERRUPT
:
689 case LAPIC_PERFCNT_INTERRUPT
:
690 case LAPIC_CMCI_INTERRUPT
:
691 case LAPIC_PM_INTERRUPT
:
692 lapic_intr_func
[vector
] = func
;
695 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
700 void lapic_set_pmi_func(i386_intr_func_t func
) {
701 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT
), func
);
705 lapic_interrupt(int interrupt_num
, x86_saved_state_t
*state
)
710 interrupt_num
-= lapic_interrupt_base
;
711 if (interrupt_num
< 0) {
712 if (interrupt_num
== (LAPIC_NMI_INTERRUPT
- lapic_interrupt_base
) &&
713 lapic_intr_func
[LAPIC_NMI_INTERRUPT
] != NULL
) {
714 retval
= (*lapic_intr_func
[LAPIC_NMI_INTERRUPT
])(state
);
721 switch(interrupt_num
) {
722 case LAPIC_TIMER_INTERRUPT
:
723 case LAPIC_THERMAL_INTERRUPT
:
724 case LAPIC_INTERPROCESSOR_INTERRUPT
:
725 case LAPIC_PM_INTERRUPT
:
726 if (lapic_intr_func
[interrupt_num
] != NULL
)
727 (void) (*lapic_intr_func
[interrupt_num
])(state
);
728 _lapic_end_of_interrupt();
731 case LAPIC_PERFCNT_INTERRUPT
:
732 /* If a function has been registered, invoke it. Otherwise,
735 if (lapic_intr_func
[interrupt_num
] != NULL
) {
736 (void) (*lapic_intr_func
[interrupt_num
])(state
);
737 /* Unmask the interrupt since we don't expect legacy users
738 * to be responsible for it.
740 lapic_unmask_perfcnt_interrupt();
741 _lapic_end_of_interrupt();
745 case LAPIC_CMCI_INTERRUPT
:
746 if (lapic_intr_func
[interrupt_num
] != NULL
)
747 (void) (*lapic_intr_func
[interrupt_num
])(state
);
748 /* return 0 for plaform expert to handle */
750 case LAPIC_ERROR_INTERRUPT
:
751 /* We treat error interrupts on APs as fatal.
752 * The current interrupt steering scheme directs most
753 * external interrupts to the BSP (HPET interrupts being
754 * a notable exception); hence, such an error
755 * on an AP may signify LVT corruption (with "may" being
756 * the operative word). On the BSP, we adopt a more
757 * lenient approach, in the interests of enhancing
758 * debuggability and reducing fragility.
759 * If "lapic_error_count_threshold" error interrupts
760 * occur within "lapic_error_time_threshold" absolute
761 * time units, we mask the error vector and log. The
762 * error interrupts themselves are likely
763 * side effects of issues which are beyond the purview of
764 * the local APIC interrupt handler, however. The Error
765 * Status Register value (the illegal destination
766 * vector code is one observed in practice) indicates
767 * the immediate cause of the error.
769 esr
= lapic_esr_read();
772 if ((debug_boot_arg
&& (lapic_dont_panic
== FALSE
)) ||
773 cpu_number() != master_cpu
) {
774 panic("Local APIC error, ESR: %d\n", esr
);
777 if (cpu_number() == master_cpu
) {
778 uint64_t abstime
= mach_absolute_time();
779 if ((abstime
- lapic_last_master_error
) < lapic_error_time_threshold
) {
780 if (lapic_master_error_count
++ > lapic_error_count_threshold
) {
781 lapic_errors_masked
= TRUE
;
782 LAPIC_WRITE(LVT_ERROR
, LAPIC_READ(LVT_ERROR
) | LAPIC_LVT_MASKED
);
783 printf("Local APIC: errors masked\n");
787 lapic_last_master_error
= abstime
;
788 lapic_master_error_count
= 0;
790 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr
, lapic_master_error_count
);
793 _lapic_end_of_interrupt();
796 case LAPIC_SPURIOUS_INTERRUPT
:
798 /* No EOI required here */
801 case LAPIC_PMC_SW_INTERRUPT
:
805 ml_get_csw_threads(&old
, &new);
807 if (pmc_context_switch(old
, new) == TRUE
) {
809 /* No EOI required for SWI */
811 #endif /* CONFIG_COUNTERS */
820 lapic_smm_restore(void)
824 if (lapic_os_enabled
== FALSE
)
827 state
= ml_set_interrupts_enabled(FALSE
);
829 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE
, TIMER
)) {
831 * Bogus SMI handler enables interrupts but does not know about
832 * local APIC interrupt sources. When APIC timer counts down to
833 * zero while in SMM, local APIC will end up waiting for an EOI
834 * but no interrupt was delivered to the OS.
836 _lapic_end_of_interrupt();
839 * timer is one-shot, trigger another quick countdown to trigger
840 * another timer interrupt.
842 if (LAPIC_READ(TIMER_CURRENT_COUNT
) == 0) {
843 LAPIC_WRITE(TIMER_INITIAL_COUNT
, 1);
846 kprintf("lapic_smm_restore\n");
849 ml_set_interrupts_enabled(state
);
853 lapic_send_ipi(int cpu
, int vector
)
857 if (vector
< lapic_interrupt_base
)
858 vector
+= lapic_interrupt_base
;
860 state
= ml_set_interrupts_enabled(FALSE
);
862 /* Wait for pending outgoing send to complete */
863 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING
) {
867 LAPIC_WRITE_ICR(cpu_to_lapic
[cpu
], vector
| LAPIC_ICR_DM_FIXED
);
869 (void) ml_set_interrupts_enabled(state
);
873 * The following interfaces are privately exported to AICPM.
877 lapic_is_interrupt_pending(void)
881 for (i
= 0; i
< 8; i
+= 1) {
882 if ((LAPIC_READ_OFFSET(IRR_BASE
, i
) != 0) ||
883 (LAPIC_READ_OFFSET(ISR_BASE
, i
) != 0))
891 lapic_is_interrupting(uint8_t vector
)
899 bit
= 1 << (vector
% 32);
901 irr
= LAPIC_READ_OFFSET(IRR_BASE
, i
);
902 isr
= LAPIC_READ_OFFSET(ISR_BASE
, i
);
904 if ((irr
| isr
) & bit
)
911 lapic_interrupt_counts(uint64_t intrs
[256])
922 for (i
= 0; i
< 8; i
+= 1) {
923 irr
= LAPIC_READ_OFFSET(IRR_BASE
, i
);
924 isr
= LAPIC_READ_OFFSET(ISR_BASE
, i
);
926 if ((isr
| irr
) == 0)
929 for (j
= (i
== 0) ? 16 : 0; j
< 32; j
+= 1) {
931 if ((isr
| irr
) & (1 << j
))
938 lapic_disable_timer(void)
943 * If we're in deadline timer mode,
944 * simply clear the deadline timer, otherwise
945 * mask the timer interrupt and clear the countdown.
947 lvt_timer
= LAPIC_READ(LVT_TIMER
);
948 if (lvt_timer
& LAPIC_LVT_TSC_DEADLINE
) {
949 wrmsr64(MSR_IA32_TSC_DEADLINE
, 0);
951 LAPIC_WRITE(LVT_TIMER
, lvt_timer
| LAPIC_LVT_MASKED
);
952 LAPIC_WRITE(TIMER_INITIAL_COUNT
, 0);
953 lvt_timer
= LAPIC_READ(LVT_TIMER
);
957 /* SPI returning the CMCI vector */
959 lapic_get_cmci_vector(void)
961 uint8_t cmci_vector
= 0;
963 /* CMCI, if available */
964 if (mca_is_cmci_present())
965 cmci_vector
= LAPIC_VECTOR(CMCI
);
971 extern void lapic_trigger_MC(void);
973 lapic_trigger_MC(void)
975 /* A 64-bit access to any register will do it. */
976 volatile uint64_t dummy
= *(uint64_t *) (void *) LAPIC_MMIO(ID
);