2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
56 #include <i386/machine_check.h>
59 #include <sys/kdebug.h>
62 #define PAUSE delay(1000000)
63 #define DBG(x...) kprintf(x)
69 lapic_ops_table_t
*lapic_ops
; /* Lapic operations switch */
71 static vm_map_offset_t lapic_pbase
; /* Physical base memory-mapped regs */
72 static vm_offset_t lapic_vbase
; /* Virtual base memory-mapped regs */
74 static i386_intr_func_t lapic_intr_func
[LAPIC_FUNC_TABLE_SIZE
];
76 /* TRUE if local APIC was enabled by the OS not by the BIOS */
77 static boolean_t lapic_os_enabled
= FALSE
;
79 static boolean_t lapic_errors_masked
= FALSE
;
80 static uint64_t lapic_last_master_error
= 0;
81 static uint64_t lapic_error_time_threshold
= 0;
82 static unsigned lapic_master_error_count
= 0;
83 static unsigned lapic_error_count_threshold
= 5;
84 static boolean_t lapic_dont_panic
= FALSE
;
85 int lapic_max_interrupt_cpunum
= 0;
89 lapic_cpu_map_dump(void)
93 for (i
= 0; i
< MAX_CPUS
; i
++) {
94 if (cpu_to_lapic
[i
] == -1) {
97 kprintf("cpu_to_lapic[%d]: %d\n",
100 for (i
= 0; i
< MAX_LAPICIDS
; i
++) {
101 if (lapic_to_cpu
[i
] == -1) {
104 kprintf("lapic_to_cpu[%d]: %d\n",
108 #endif /* MP_DEBUG */
115 vm_map_entry_t entry
;
116 vm_map_offset_t lapic_vbase64
;
117 /* Establish a map to the local apic */
119 if (lapic_vbase
== 0) {
120 lapic_vbase64
= (vm_offset_t
)vm_map_min(kernel_map
);
121 result
= vm_map_find_space(kernel_map
,
123 round_page(LAPIC_SIZE
), 0,
125 VM_MAP_KERNEL_FLAGS_NONE
,
126 VM_KERN_MEMORY_IOKIT
,
128 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
130 lapic_vbase
= (vm_offset_t
) lapic_vbase64
;
131 if (result
!= KERN_SUCCESS
) {
132 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result
);
134 vm_map_unlock(kernel_map
);
137 * Map in the local APIC non-cacheable, as recommended by Intel
138 * in section 8.4.1 of the "System Programming Guide".
139 * In fact, this is redundant because EFI will have assigned an
140 * MTRR physical range containing the local APIC's MMIO space as
141 * UC and this will override the default PAT setting.
143 kr
= pmap_enter(pmap_kernel(),
145 (ppnum_t
) i386_btop(lapic_pbase
),
146 VM_PROT_READ
| VM_PROT_WRITE
,
151 assert(kr
== KERN_SUCCESS
);
155 * Set flat delivery model, logical processor id
156 * This should already be the default set.
158 LAPIC_WRITE(DFR
, LAPIC_DFR_FLAT
);
159 LAPIC_WRITE(LDR
, (get_cpu_number()) << LAPIC_LDR_SHIFT
);
164 legacy_read(lapic_register_t reg
)
166 return *LAPIC_MMIO(reg
);
170 legacy_write(lapic_register_t reg
, uint32_t value
)
172 *LAPIC_MMIO(reg
) = value
;
176 legacy_read_icr(void)
178 return (((uint64_t)*LAPIC_MMIO(ICRD
)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR
));
182 legacy_write_icr(uint32_t dst
, uint32_t cmd
)
184 *LAPIC_MMIO(ICRD
) = dst
<< LAPIC_ICRD_DEST_SHIFT
;
185 *LAPIC_MMIO(ICR
) = cmd
;
188 static lapic_ops_table_t legacy_ops
= {
196 static boolean_t is_x2apic
= FALSE
;
204 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
205 if ((lo
& MSR_IA32_APIC_BASE_EXTENDED
) == 0) {
206 lo
|= MSR_IA32_APIC_BASE_EXTENDED
;
207 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
208 kprintf("x2APIC mode enabled\n");
213 x2apic_read(lapic_register_t reg
)
218 rdmsr(LAPIC_MSR(reg
), lo
, hi
);
223 x2apic_write(lapic_register_t reg
, uint32_t value
)
225 wrmsr(LAPIC_MSR(reg
), value
, 0);
229 x2apic_read_icr(void)
231 return rdmsr64(LAPIC_MSR(ICR
));;
235 x2apic_write_icr(uint32_t dst
, uint32_t cmd
)
237 wrmsr(LAPIC_MSR(ICR
), cmd
, dst
);
240 static lapic_ops_table_t x2apic_ops
= {
253 boolean_t is_boot_processor
;
254 boolean_t is_lapic_enabled
;
256 /* Examine the local APIC state */
257 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
258 is_boot_processor
= (lo
& MSR_IA32_APIC_BASE_BSP
) != 0;
259 is_lapic_enabled
= (lo
& MSR_IA32_APIC_BASE_ENABLE
) != 0;
260 is_x2apic
= (lo
& MSR_IA32_APIC_BASE_EXTENDED
) != 0;
261 lapic_pbase
= (lo
& MSR_IA32_APIC_BASE_BASE
);
262 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase
,
263 is_lapic_enabled
? "enabled" : "disabled",
264 is_x2apic
? "extended" : "legacy",
265 is_boot_processor
? "BSP" : "AP");
266 if (!is_boot_processor
|| !is_lapic_enabled
) {
267 panic("Unexpected local APIC state\n");
271 * If x2APIC is available and not already enabled, enable it.
272 * Unless overriden by boot-arg.
274 if (!is_x2apic
&& (cpuid_features() & CPUID_FEATURE_x2APIC
)) {
275 PE_parse_boot_argn("-x2apic", &is_x2apic
, sizeof(is_x2apic
));
276 kprintf("x2APIC supported %s be enabled\n",
277 is_x2apic
? "and will" : "but will not");
280 lapic_ops
= is_x2apic
? &x2apic_ops
: &legacy_ops
;
284 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID
), LAPIC_READ(LDR
));
285 if ((LAPIC_READ(VERSION
) & LAPIC_VERSION_MASK
) < 0x14) {
286 panic("Local APIC version 0x%x, 0x14 or more expected\n",
287 (LAPIC_READ(VERSION
) & LAPIC_VERSION_MASK
));
290 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
291 lapic_cpu_map_init();
292 lapic_cpu_map((LAPIC_READ(ID
) >> LAPIC_ID_SHIFT
) & LAPIC_ID_MASK
, 0);
293 current_cpu_datap()->cpu_phys_number
= cpu_to_lapic
[0];
294 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic
[0]);
301 /* write-read register */
302 LAPIC_WRITE(ERROR_STATUS
, 0);
303 return LAPIC_READ(ERROR_STATUS
);
307 lapic_esr_clear(void)
309 LAPIC_WRITE(ERROR_STATUS
, 0);
310 LAPIC_WRITE(ERROR_STATUS
, 0);
313 static const char *DM_str
[8] = {
324 static const char *TMR_str
[] = {
336 #define BOOL(a) ((a)?' ':'!')
338 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
340 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
342 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
344 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
346 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
348 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
350 kprintf("LAPIC %d at %p version 0x%x\n",
351 (LAPIC_READ(ID
) >> LAPIC_ID_SHIFT
) & LAPIC_ID_MASK
,
352 (void *) lapic_vbase
,
353 LAPIC_READ(VERSION
) & LAPIC_VERSION_MASK
);
354 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
355 LAPIC_READ(TPR
) & LAPIC_TPR_MASK
,
356 LAPIC_READ(APR
) & LAPIC_APR_MASK
,
357 LAPIC_READ(PPR
) & LAPIC_PPR_MASK
);
358 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
359 is_x2apic
? 0 : LAPIC_READ(DFR
) >> LAPIC_DFR_SHIFT
,
360 LAPIC_READ(LDR
) >> LAPIC_LDR_SHIFT
);
361 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
362 BOOL(LAPIC_READ(SVR
) & LAPIC_SVR_ENABLE
),
363 BOOL(!(LAPIC_READ(SVR
) & LAPIC_SVR_FOCUS_OFF
)),
364 LAPIC_READ(SVR
) & LAPIC_SVR_MASK
);
366 if (mca_is_cmci_present()) {
367 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
374 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
378 TMR_str
[(LAPIC_READ(LVT_TIMER
) >> LAPIC_LVT_TMR_SHIFT
)
379 & LAPIC_LVT_TMR_MASK
]);
380 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT
));
381 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT
));
382 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG
));
383 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
388 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
393 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
400 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
407 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
411 kprintf("ESR: %08x \n", lapic_esr_read());
413 for (i
= 0xf; i
>= 0; i
--) {
414 kprintf("%x%x%x%x", i
, i
, i
, i
);
418 for (i
= 7; i
>= 0; i
--) {
419 kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE
, i
));
423 for (i
= 7; i
>= 0; i
--) {
424 kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE
, i
));
428 for (i
= 7; i
>= 0; i
--) {
429 kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE
, i
));
440 if (cpuid_features() & CPUID_FEATURE_APIC
) {
444 if (cpuid_family() == 6 || cpuid_family() == 15) {
447 * There may be a local APIC which wasn't enabled by BIOS.
448 * So we try to enable it explicitly.
450 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
451 lo
&= ~MSR_IA32_APIC_BASE_BASE
;
452 lo
|= MSR_IA32_APIC_BASE_ENABLE
| LAPIC_START
;
453 lo
|= MSR_IA32_APIC_BASE_ENABLE
;
454 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
457 * Re-initialize cpu features info and re-check.
460 /* We expect this codepath will never be traversed
461 * due to EFI enabling the APIC. Reducing the APIC
462 * interrupt base dynamically is not supported.
464 if (cpuid_features() & CPUID_FEATURE_APIC
) {
465 printf("Local APIC discovered and enabled\n");
466 lapic_os_enabled
= TRUE
;
467 lapic_interrupt_base
= LAPIC_REDUCED_INTERRUPT_BASE
;
482 /* Shutdown if local APIC was enabled by OS */
483 if (lapic_os_enabled
== FALSE
) {
487 mp_disable_preemption();
490 if (get_cpu_number() <= lapic_max_interrupt_cpunum
) {
491 value
= LAPIC_READ(LVT_LINT0
);
492 value
|= LAPIC_LVT_MASKED
;
493 LAPIC_WRITE(LVT_LINT0
, value
);
497 LAPIC_WRITE(LVT_ERROR
, LAPIC_READ(LVT_ERROR
) | LAPIC_LVT_MASKED
);
500 LAPIC_WRITE(LVT_TIMER
, LAPIC_READ(LVT_TIMER
) | LAPIC_LVT_MASKED
);
502 /* Perfmon: masked */
503 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_READ(LVT_PERFCNT
) | LAPIC_LVT_MASKED
);
505 /* APIC software disabled */
506 LAPIC_WRITE(SVR
, LAPIC_READ(SVR
) & ~LAPIC_SVR_ENABLE
);
508 /* Bypass the APIC completely and update cpu features */
509 rdmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
510 lo
&= ~MSR_IA32_APIC_BASE_ENABLE
;
511 wrmsr(MSR_IA32_APIC_BASE
, lo
, hi
);
514 mp_enable_preemption();
518 cpu_can_exit(int cpu
)
520 return cpu
> lapic_max_interrupt_cpunum
;
524 lapic_configure(void)
528 if (lapic_error_time_threshold
== 0 && cpu_number() == 0) {
529 nanoseconds_to_absolutetime(NSEC_PER_SEC
>> 2, &lapic_error_time_threshold
);
530 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic
, sizeof(lapic_dont_panic
))) {
531 lapic_dont_panic
= FALSE
;
535 if (cpu_number() == 0) {
536 if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum
, sizeof(lapic_max_interrupt_cpunum
))) {
537 lapic_max_interrupt_cpunum
= ((cpuid_features() & CPUID_FEATURE_HTT
) ? 1 : 0);
544 LAPIC_WRITE(SVR
, LAPIC_VECTOR(SPURIOUS
) | LAPIC_SVR_ENABLE
);
547 if (get_cpu_number() <= lapic_max_interrupt_cpunum
) {
548 value
= LAPIC_READ(LVT_LINT0
);
549 value
&= ~LAPIC_LVT_MASKED
;
550 value
|= LAPIC_LVT_DM_EXTINT
;
551 LAPIC_WRITE(LVT_LINT0
, value
);
554 /* Timer: unmasked, one-shot */
555 LAPIC_WRITE(LVT_TIMER
, LAPIC_VECTOR(TIMER
));
557 /* Perfmon: unmasked */
558 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
));
560 /* Thermal: unmasked */
561 LAPIC_WRITE(LVT_THERMAL
, LAPIC_VECTOR(THERMAL
));
564 /* CMCI, if available */
565 if (mca_is_cmci_present()) {
566 LAPIC_WRITE(LVT_CMCI
, LAPIC_VECTOR(CMCI
));
570 if (((cpu_number() == master_cpu
) && lapic_errors_masked
== FALSE
) ||
571 (cpu_number() != master_cpu
)) {
573 LAPIC_WRITE(LVT_ERROR
, LAPIC_VECTOR(ERROR
));
579 boolean_t interrupt_unmasked
,
580 lapic_timer_mode_t mode
,
581 lapic_timer_divide_t divisor
,
582 lapic_timer_count_t initial_count
)
584 uint32_t timer_vector
;
586 mp_disable_preemption();
587 timer_vector
= LAPIC_READ(LVT_TIMER
);
588 timer_vector
&= ~(LAPIC_LVT_MASKED
| LAPIC_LVT_PERIODIC
);;
589 timer_vector
|= interrupt_unmasked
? 0 : LAPIC_LVT_MASKED
;
590 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
591 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
592 LAPIC_WRITE(TIMER_DIVIDE_CONFIG
, divisor
);
593 LAPIC_WRITE(TIMER_INITIAL_COUNT
, initial_count
);
594 mp_enable_preemption();
599 boolean_t interrupt_unmasked
,
600 lapic_timer_mode_t mode
,
601 lapic_timer_divide_t divisor
)
603 uint32_t timer_vector
;
605 mp_disable_preemption();
606 timer_vector
= LAPIC_READ(LVT_TIMER
);
607 timer_vector
&= ~(LAPIC_LVT_MASKED
|
609 LAPIC_LVT_TSC_DEADLINE
);
610 timer_vector
|= interrupt_unmasked
? 0 : LAPIC_LVT_MASKED
;
611 timer_vector
|= (mode
== periodic
) ? LAPIC_LVT_PERIODIC
: 0;
612 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
613 LAPIC_WRITE(TIMER_DIVIDE_CONFIG
, divisor
);
614 mp_enable_preemption();
618 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
621 lapic_config_tsc_deadline_timer(void)
623 uint32_t timer_vector
;
625 DBG("lapic_config_tsc_deadline_timer()\n");
626 mp_disable_preemption();
627 timer_vector
= LAPIC_READ(LVT_TIMER
);
628 timer_vector
&= ~(LAPIC_LVT_MASKED
|
630 timer_vector
|= LAPIC_LVT_TSC_DEADLINE
;
631 LAPIC_WRITE(LVT_TIMER
, timer_vector
);
633 /* Serialize writes per Intel OSWG */
635 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
636 } while (lapic_get_tsc_deadline_timer() == 0);
637 lapic_set_tsc_deadline_timer(0);
639 mp_enable_preemption();
640 DBG("lapic_config_tsc_deadline_timer() done\n");
644 lapic_set_timer_fast(
645 lapic_timer_count_t initial_count
)
647 LAPIC_WRITE(LVT_TIMER
, LAPIC_READ(LVT_TIMER
) & ~LAPIC_LVT_MASKED
);
648 LAPIC_WRITE(TIMER_INITIAL_COUNT
, initial_count
);
652 lapic_set_tsc_deadline_timer(uint64_t deadline
)
654 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
655 wrmsr64(MSR_IA32_TSC_DEADLINE
, deadline
);
659 lapic_get_tsc_deadline_timer(void)
661 return rdmsr64(MSR_IA32_TSC_DEADLINE
);
666 lapic_timer_mode_t
*mode
,
667 lapic_timer_divide_t
*divisor
,
668 lapic_timer_count_t
*initial_count
,
669 lapic_timer_count_t
*current_count
)
671 mp_disable_preemption();
673 *mode
= (LAPIC_READ(LVT_TIMER
) & LAPIC_LVT_PERIODIC
) ?
677 *divisor
= LAPIC_READ(TIMER_DIVIDE_CONFIG
) & LAPIC_TIMER_DIVIDE_MASK
;
680 *initial_count
= LAPIC_READ(TIMER_INITIAL_COUNT
);
683 *current_count
= LAPIC_READ(TIMER_CURRENT_COUNT
);
685 mp_enable_preemption();
689 _lapic_end_of_interrupt(void)
695 lapic_end_of_interrupt(void)
697 _lapic_end_of_interrupt();
701 lapic_unmask_perfcnt_interrupt(void)
703 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
));
707 lapic_set_perfcnt_interrupt_mask(boolean_t mask
)
709 uint32_t m
= (mask
? LAPIC_LVT_MASKED
: 0);
710 LAPIC_WRITE(LVT_PERFCNT
, LAPIC_VECTOR(PERFCNT
) | m
);
714 lapic_set_intr_func(int vector
, i386_intr_func_t func
)
716 if (vector
> lapic_interrupt_base
) {
717 vector
-= lapic_interrupt_base
;
721 case LAPIC_NMI_INTERRUPT
:
722 case LAPIC_INTERPROCESSOR_INTERRUPT
:
723 case LAPIC_TIMER_INTERRUPT
:
724 case LAPIC_THERMAL_INTERRUPT
:
725 case LAPIC_PERFCNT_INTERRUPT
:
726 case LAPIC_CMCI_INTERRUPT
:
727 case LAPIC_PM_INTERRUPT
:
728 lapic_intr_func
[vector
] = func
;
731 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
737 lapic_set_pmi_func(i386_intr_func_t func
)
739 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT
), func
);
743 lapic_interrupt(int interrupt_num
, x86_saved_state_t
*state
)
748 interrupt_num
-= lapic_interrupt_base
;
749 if (interrupt_num
< 0) {
750 if (interrupt_num
== (LAPIC_NMI_INTERRUPT
- lapic_interrupt_base
) &&
751 lapic_intr_func
[LAPIC_NMI_INTERRUPT
] != NULL
) {
752 retval
= (*lapic_intr_func
[LAPIC_NMI_INTERRUPT
])(state
);
759 switch (interrupt_num
) {
760 case LAPIC_TIMER_INTERRUPT
:
761 case LAPIC_THERMAL_INTERRUPT
:
762 case LAPIC_INTERPROCESSOR_INTERRUPT
:
763 case LAPIC_PM_INTERRUPT
:
764 if (lapic_intr_func
[interrupt_num
] != NULL
) {
765 (void) (*lapic_intr_func
[interrupt_num
])(state
);
767 _lapic_end_of_interrupt();
770 case LAPIC_PERFCNT_INTERRUPT
:
771 /* If a function has been registered, invoke it. Otherwise,
774 if (lapic_intr_func
[interrupt_num
] != NULL
) {
775 (void) (*lapic_intr_func
[interrupt_num
])(state
);
776 /* Unmask the interrupt since we don't expect legacy users
777 * to be responsible for it.
779 lapic_unmask_perfcnt_interrupt();
780 _lapic_end_of_interrupt();
784 case LAPIC_CMCI_INTERRUPT
:
785 if (lapic_intr_func
[interrupt_num
] != NULL
) {
786 (void) (*lapic_intr_func
[interrupt_num
])(state
);
788 /* return 0 for plaform expert to handle */
790 case LAPIC_ERROR_INTERRUPT
:
791 /* We treat error interrupts on APs as fatal.
792 * The current interrupt steering scheme directs most
793 * external interrupts to the BSP (HPET interrupts being
794 * a notable exception); hence, such an error
795 * on an AP may signify LVT corruption (with "may" being
796 * the operative word). On the BSP, we adopt a more
797 * lenient approach, in the interests of enhancing
798 * debuggability and reducing fragility.
799 * If "lapic_error_count_threshold" error interrupts
800 * occur within "lapic_error_time_threshold" absolute
801 * time units, we mask the error vector and log. The
802 * error interrupts themselves are likely
803 * side effects of issues which are beyond the purview of
804 * the local APIC interrupt handler, however. The Error
805 * Status Register value (the illegal destination
806 * vector code is one observed in practice) indicates
807 * the immediate cause of the error.
809 esr
= lapic_esr_read();
812 if ((debug_boot_arg
&& (lapic_dont_panic
== FALSE
)) ||
813 cpu_number() != master_cpu
) {
814 panic("Local APIC error, ESR: %d\n", esr
);
817 if (cpu_number() == master_cpu
) {
818 uint64_t abstime
= mach_absolute_time();
819 if ((abstime
- lapic_last_master_error
) < lapic_error_time_threshold
) {
820 if (lapic_master_error_count
++ > lapic_error_count_threshold
) {
821 lapic_errors_masked
= TRUE
;
822 LAPIC_WRITE(LVT_ERROR
, LAPIC_READ(LVT_ERROR
) | LAPIC_LVT_MASKED
);
823 printf("Local APIC: errors masked\n");
826 lapic_last_master_error
= abstime
;
827 lapic_master_error_count
= 0;
829 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr
, lapic_master_error_count
);
832 _lapic_end_of_interrupt();
835 case LAPIC_SPURIOUS_INTERRUPT
:
837 /* No EOI required here */
840 case LAPIC_PMC_SW_INTERRUPT
:
844 case LAPIC_KICK_INTERRUPT
:
845 _lapic_end_of_interrupt();
854 lapic_smm_restore(void)
858 if (lapic_os_enabled
== FALSE
) {
862 state
= ml_set_interrupts_enabled(FALSE
);
864 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE
, TIMER
)) {
866 * Bogus SMI handler enables interrupts but does not know about
867 * local APIC interrupt sources. When APIC timer counts down to
868 * zero while in SMM, local APIC will end up waiting for an EOI
869 * but no interrupt was delivered to the OS.
871 _lapic_end_of_interrupt();
874 * timer is one-shot, trigger another quick countdown to trigger
875 * another timer interrupt.
877 if (LAPIC_READ(TIMER_CURRENT_COUNT
) == 0) {
878 LAPIC_WRITE(TIMER_INITIAL_COUNT
, 1);
881 kprintf("lapic_smm_restore\n");
884 ml_set_interrupts_enabled(state
);
888 lapic_send_ipi(int cpu
, int vector
)
892 if (vector
< lapic_interrupt_base
) {
893 vector
+= lapic_interrupt_base
;
896 state
= ml_set_interrupts_enabled(FALSE
);
898 /* Wait for pending outgoing send to complete */
899 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING
) {
903 LAPIC_WRITE_ICR(cpu_to_lapic
[cpu
], vector
| LAPIC_ICR_DM_FIXED
);
905 (void) ml_set_interrupts_enabled(state
);
909 * The following interfaces are privately exported to AICPM.
913 lapic_is_interrupt_pending(void)
917 for (i
= 0; i
< 8; i
+= 1) {
918 if ((LAPIC_READ_OFFSET(IRR_BASE
, i
) != 0) ||
919 (LAPIC_READ_OFFSET(ISR_BASE
, i
) != 0)) {
928 lapic_is_interrupting(uint8_t vector
)
936 bit
= 1 << (vector
% 32);
938 irr
= LAPIC_READ_OFFSET(IRR_BASE
, i
);
939 isr
= LAPIC_READ_OFFSET(ISR_BASE
, i
);
941 if ((irr
| isr
) & bit
) {
949 lapic_interrupt_counts(uint64_t intrs
[256])
961 for (i
= 0; i
< 8; i
+= 1) {
962 irr
= LAPIC_READ_OFFSET(IRR_BASE
, i
);
963 isr
= LAPIC_READ_OFFSET(ISR_BASE
, i
);
965 if ((isr
| irr
) == 0) {
969 for (j
= (i
== 0) ? 16 : 0; j
< 32; j
+= 1) {
971 if ((isr
| irr
) & (1 << j
)) {
979 lapic_disable_timer(void)
984 * If we're in deadline timer mode,
985 * simply clear the deadline timer, otherwise
986 * mask the timer interrupt and clear the countdown.
988 lvt_timer
= LAPIC_READ(LVT_TIMER
);
989 if (lvt_timer
& LAPIC_LVT_TSC_DEADLINE
) {
990 wrmsr64(MSR_IA32_TSC_DEADLINE
, 0);
992 LAPIC_WRITE(LVT_TIMER
, lvt_timer
| LAPIC_LVT_MASKED
);
993 LAPIC_WRITE(TIMER_INITIAL_COUNT
, 0);
994 lvt_timer
= LAPIC_READ(LVT_TIMER
);
998 /* SPI returning the CMCI vector */
1000 lapic_get_cmci_vector(void)
1002 uint8_t cmci_vector
= 0;
1004 /* CMCI, if available */
1005 if (mca_is_cmci_present()) {
1006 cmci_vector
= LAPIC_VECTOR(CMCI
);
1012 #if DEVELOPMENT || DEBUG
1013 extern void lapic_trigger_MC(void);
1015 lapic_trigger_MC(void)
1017 /* A 64-bit access to any register will do it. */
1018 volatile uint64_t dummy
= *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID
);