]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/lapic_native.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2008-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#include <mach/mach_types.h>
33#include <mach/kern_return.h>
34
35#include <kern/kern_types.h>
36#include <kern/cpu_number.h>
37#include <kern/cpu_data.h>
38#include <kern/assert.h>
39#include <kern/machine.h>
40#include <kern/debug.h>
41
42#include <vm/vm_map.h>
43#include <vm/vm_kern.h>
44
45#include <i386/lapic.h>
46#include <i386/cpuid.h>
47#include <i386/proc_reg.h>
48#include <i386/machine_cpu.h>
49#include <i386/misc_protos.h>
50#include <i386/mp.h>
51#include <i386/postcode.h>
52#include <i386/cpu_threads.h>
53#include <i386/machine_routines.h>
54#include <i386/tsc.h>
55#if CONFIG_MCA
56#include <i386/machine_check.h>
57#endif
58
59#include <sys/kdebug.h>
60
61#if MP_DEBUG
62#define PAUSE delay(1000000)
63#define DBG(x...) kprintf(x)
64#else
65#define DBG(x...)
66#define PAUSE
67#endif /* MP_DEBUG */
68
69lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
70
71static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
72static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
73
74static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
75
76/* TRUE if local APIC was enabled by the OS not by the BIOS */
77static boolean_t lapic_os_enabled = FALSE;
78
79static boolean_t lapic_errors_masked = FALSE;
80static uint64_t lapic_last_master_error = 0;
81static uint64_t lapic_error_time_threshold = 0;
82static unsigned lapic_master_error_count = 0;
83static unsigned lapic_error_count_threshold = 5;
84static boolean_t lapic_dont_panic = FALSE;
85int lapic_max_interrupt_cpunum = 0;
86
87typedef enum {
88 APIC_MODE_UNKNOWN = 0,
89 APIC_MODE_XAPIC = 1,
90 APIC_MODE_X2APIC = 2
91} apic_mode_t;
92
93static apic_mode_t apic_mode_before_sleep = APIC_MODE_UNKNOWN;
94
95#ifdef MP_DEBUG
96void
97lapic_cpu_map_dump(void)
98{
99 int i;
100
101 for (i = 0; i < MAX_CPUS; i++) {
102 if (cpu_to_lapic[i] == -1) {
103 continue;
104 }
105 kprintf("cpu_to_lapic[%d]: %d\n",
106 i, cpu_to_lapic[i]);
107 }
108 for (i = 0; i < MAX_LAPICIDS; i++) {
109 if (lapic_to_cpu[i] == -1) {
110 continue;
111 }
112 kprintf("lapic_to_cpu[%d]: %d\n",
113 i, lapic_to_cpu[i]);
114 }
115}
116#endif /* MP_DEBUG */
117
118static void
119map_local_apic(void)
120{
121 vm_map_offset_t lapic_vbase64;
122 int result;
123 kern_return_t kr;
124 vm_map_entry_t entry;
125
126 if (lapic_vbase == 0) {
127 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
128 result = vm_map_find_space(kernel_map,
129 &lapic_vbase64,
130 round_page(LAPIC_SIZE), 0,
131 0,
132 VM_MAP_KERNEL_FLAGS_NONE,
133 VM_KERN_MEMORY_IOKIT,
134 &entry);
135 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
136 */
137 lapic_vbase = (vm_offset_t) lapic_vbase64;
138 if (result != KERN_SUCCESS) {
139 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
140 }
141 vm_map_unlock(kernel_map);
142
143 /*
144 * Map in the local APIC non-cacheable, as recommended by Intel
145 * in section 8.4.1 of the "System Programming Guide".
146 * In fact, this is redundant because EFI will have assigned an
147 * MTRR physical range containing the local APIC's MMIO space as
148 * UC and this will override the default PAT setting.
149 */
150 kr = pmap_enter(pmap_kernel(),
151 lapic_vbase,
152 (ppnum_t) i386_btop(lapic_pbase),
153 VM_PROT_READ | VM_PROT_WRITE,
154 VM_PROT_NONE,
155 VM_WIMG_IO,
156 TRUE);
157
158 assert(kr == KERN_SUCCESS);
159 }
160}
161
162static void
163legacy_init(void)
164{
165 uint32_t lo, hi;
166
167 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
168 if ((lo & MSR_IA32_APIC_BASE_EXTENDED) != 0) {
169 /*
170 * If we're already in x2APIC mode, we MUST disable the local APIC
171 * before transitioning back to legacy APIC mode.
172 */
173 lo &= ~(MSR_IA32_APIC_BASE_ENABLE | MSR_IA32_APIC_BASE_EXTENDED);
174 wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo);
175 wrmsr64(MSR_IA32_APIC_BASE, ((uint64_t)hi) << 32 | lo | MSR_IA32_APIC_BASE_ENABLE);
176 }
177 /*
178 * Set flat delivery model, logical processor id
179 * This should already be the default set.
180 */
181 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
182 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
183}
184
185
186static uint32_t
187legacy_read(lapic_register_t reg)
188{
189 return *LAPIC_MMIO(reg);
190}
191
192static void
193legacy_write(lapic_register_t reg, uint32_t value)
194{
195 *LAPIC_MMIO(reg) = value;
196}
197
198static uint64_t
199legacy_read_icr(void)
200{
201 return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
202}
203
204static void
205legacy_write_icr(uint32_t dst, uint32_t cmd)
206{
207 *LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
208 *LAPIC_MMIO(ICR) = cmd;
209}
210
211static lapic_ops_table_t legacy_ops = {
212 legacy_init,
213 legacy_read,
214 legacy_write,
215 legacy_read_icr,
216 legacy_write_icr
217};
218
219boolean_t is_x2apic = FALSE;
220
221static void
222x2apic_init(void)
223{
224 uint32_t lo;
225 uint32_t hi;
226
227 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
228 if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
229 lo |= MSR_IA32_APIC_BASE_EXTENDED;
230 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
231 kprintf("x2APIC mode enabled\n");
232 }
233}
234
235static uint32_t
236x2apic_read(lapic_register_t reg)
237{
238 uint32_t lo;
239 uint32_t hi;
240
241 rdmsr(LAPIC_MSR(reg), lo, hi);
242 return lo;
243}
244
245static void
246x2apic_write(lapic_register_t reg, uint32_t value)
247{
248 wrmsr(LAPIC_MSR(reg), value, 0);
249}
250
251static uint64_t
252x2apic_read_icr(void)
253{
254 return rdmsr64(LAPIC_MSR(ICR));;
255}
256
257static void
258x2apic_write_icr(uint32_t dst, uint32_t cmd)
259{
260 wrmsr(LAPIC_MSR(ICR), cmd, dst);
261}
262
263static lapic_ops_table_t x2apic_ops = {
264 x2apic_init,
265 x2apic_read,
266 x2apic_write,
267 x2apic_read_icr,
268 x2apic_write_icr
269};
270
271/*
272 * Used by APs to determine their APIC IDs; assumes master CPU has initialized
273 * the local APIC interfaces.
274 */
275uint32_t
276lapic_safe_apicid(void)
277{
278 uint32_t lo;
279 uint32_t hi;
280 boolean_t is_lapic_enabled, is_local_x2apic;
281
282 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
283 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
284 is_local_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
285
286 if (is_lapic_enabled && is_local_x2apic) {
287 return x2apic_read(ID);
288 } else if (is_lapic_enabled) {
289 return (*LAPIC_MMIO(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK;
290 } else {
291 panic("Unknown Local APIC state!");
292 /*NORETURN*/
293 }
294}
295
296static void
297lapic_reinit(bool for_wake)
298{
299 uint32_t lo;
300 uint32_t hi;
301 boolean_t is_boot_processor;
302 boolean_t is_lapic_enabled;
303 boolean_t is_local_x2apic;
304
305 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
306 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
307 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
308 is_local_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
309
310 /*
311 * If we're configured for x2apic mode and we're being asked to transition
312 * to legacy APIC mode, OR if we're in legacy APIC mode and we're being
313 * asked to transition to x2apic mode, call LAPIC_INIT().
314 */
315 if ((!is_local_x2apic && is_x2apic) || (is_local_x2apic && !is_x2apic)) {
316 LAPIC_INIT();
317 /* Now re-read after LAPIC_INIT() */
318 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
319 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
320 is_local_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
321 }
322
323 if ((!is_lapic_enabled && !is_local_x2apic)) {
324 panic("Unexpected local APIC state\n");
325 }
326
327 /*
328 * If we did not select the same APIC mode as we had before sleep, flag
329 * that as an error (and panic on debug/development kernels). Note that
330 * we might get here with for_wake == true for the first boot case. In
331 * that case, apic_mode_before_sleep will be UNKNOWN (since we haven't
332 * slept yet), so we do not need to do any APIC checks.
333 */
334 if (for_wake &&
335 ((apic_mode_before_sleep == APIC_MODE_XAPIC && !is_lapic_enabled) ||
336 (apic_mode_before_sleep == APIC_MODE_X2APIC && !is_local_x2apic))) {
337 kprintf("Inconsistent APIC state after wake (was %d before sleep, "
338 "now is %d)", apic_mode_before_sleep,
339 is_lapic_enabled ? APIC_MODE_XAPIC : APIC_MODE_X2APIC);
340#if DEBUG || DEVELOPMENT
341 kprintf("HALTING.\n");
342 /*
343 * Unfortunately, we cannot safely panic here because the
344 * executing CPU might not be fully initialized. The best
345 * we can do is just print a message to the console and
346 * halt.
347 */
348 asm volatile ("cli; hlt;" ::: "memory");
349#endif
350 }
351}
352
353void
354lapic_init_slave(void)
355{
356 lapic_reinit(false);
357#if DEBUG || DEVELOPMENT
358 if (rdmsr64(MSR_IA32_APIC_BASE) & MSR_IA32_APIC_BASE_BSP) {
359 panic("Calling lapic_init_slave() on the boot processor\n");
360 }
361#endif
362}
363
364void
365lapic_init(void)
366{
367 uint32_t lo;
368 uint32_t hi;
369 boolean_t is_boot_processor;
370 boolean_t is_lapic_enabled;
371
372 /* Examine the local APIC state */
373 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
374 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
375 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
376 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
377 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
378 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
379 is_lapic_enabled ? "enabled" : "disabled",
380 is_x2apic ? "extended" : "legacy",
381 is_boot_processor ? "BSP" : "AP");
382 if (!is_boot_processor || !is_lapic_enabled) {
383 panic("Unexpected local APIC state\n");
384 }
385
386 /*
387 * If x2APIC is available and not already enabled, enable it.
388 * Unless overriden by boot-arg.
389 */
390 if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
391 /*
392 * If no x2apic boot-arg was set and if we're running under a VMM,
393 * autoenable x2APIC mode.
394 */
395 if (PE_parse_boot_argn("x2apic", &is_x2apic, sizeof(is_x2apic)) == FALSE &&
396 cpuid_vmm_info()->cpuid_vmm_family != CPUID_VMM_FAMILY_NONE) {
397 is_x2apic = TRUE;
398 }
399 kprintf("x2APIC supported %s be enabled\n",
400 is_x2apic ? "and will" : "but will not");
401 }
402
403 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
404
405 if (lapic_pbase != 0) {
406 /*
407 * APs might need to consult the local APIC via the MMIO interface
408 * to get their APIC IDs.
409 */
410 map_local_apic();
411 } else if (!is_x2apic) {
412 panic("Local APIC physical address was not set.");
413 }
414
415 LAPIC_INIT();
416
417 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
418 if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) {
419 panic("Local APIC version 0x%x, 0x14 or more expected\n",
420 (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK));
421 }
422
423 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
424 lapic_cpu_map_init();
425 lapic_cpu_map(lapic_safe_apicid(), 0);
426 current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
427 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
428}
429
430
431static int
432lapic_esr_read(void)
433{
434 /* write-read register */
435 LAPIC_WRITE(ERROR_STATUS, 0);
436 return LAPIC_READ(ERROR_STATUS);
437}
438
439static void
440lapic_esr_clear(void)
441{
442 LAPIC_WRITE(ERROR_STATUS, 0);
443 LAPIC_WRITE(ERROR_STATUS, 0);
444}
445
446static const char *DM_str[8] = {
447 "Fixed",
448 "Lowest Priority",
449 "Invalid",
450 "Invalid",
451 "NMI",
452 "Reset",
453 "Invalid",
454 "ExtINT"
455};
456
457static const char *TMR_str[] = {
458 "OneShot",
459 "Periodic",
460 "TSC-Deadline",
461 "Illegal"
462};
463
464void
465lapic_dump(void)
466{
467 int i;
468
469#define BOOL(a) ((a)?' ':'!')
470#define VEC(lvt) \
471 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
472#define DS(lvt) \
473 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
474#define DM(lvt) \
475 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
476#define MASK(lvt) \
477 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
478#define TM(lvt) \
479 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
480#define IP(lvt) \
481 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
482
483 kprintf("LAPIC %d at %p version 0x%x\n",
484 lapic_safe_apicid(),
485 (void *) lapic_vbase,
486 LAPIC_READ(VERSION) & LAPIC_VERSION_MASK);
487 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
488 LAPIC_READ(TPR) & LAPIC_TPR_MASK,
489 LAPIC_READ(APR) & LAPIC_APR_MASK,
490 LAPIC_READ(PPR) & LAPIC_PPR_MASK);
491 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
492 is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT,
493 LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT);
494 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
495 BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE),
496 BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)),
497 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
498#if CONFIG_MCA
499 if (mca_is_cmci_present()) {
500 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
501 VEC(LVT_CMCI),
502 DM(LVT_CMCI),
503 DS(LVT_CMCI),
504 MASK(LVT_CMCI));
505 }
506#endif
507 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
508 VEC(LVT_TIMER),
509 DS(LVT_TIMER),
510 MASK(LVT_TIMER),
511 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
512 & LAPIC_LVT_TMR_MASK]);
513 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
514 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
515 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
516 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
517 VEC(LVT_PERFCNT),
518 DM(LVT_PERFCNT),
519 DS(LVT_PERFCNT),
520 MASK(LVT_PERFCNT));
521 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
522 VEC(LVT_THERMAL),
523 DM(LVT_THERMAL),
524 DS(LVT_THERMAL),
525 MASK(LVT_THERMAL));
526 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
527 VEC(LVT_LINT0),
528 DM(LVT_LINT0),
529 TM(LVT_LINT0),
530 IP(LVT_LINT0),
531 DS(LVT_LINT0),
532 MASK(LVT_LINT0));
533 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
534 VEC(LVT_LINT1),
535 DM(LVT_LINT1),
536 TM(LVT_LINT1),
537 IP(LVT_LINT1),
538 DS(LVT_LINT1),
539 MASK(LVT_LINT1));
540 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
541 VEC(LVT_ERROR),
542 DS(LVT_ERROR),
543 MASK(LVT_ERROR));
544 kprintf("ESR: %08x \n", lapic_esr_read());
545 kprintf(" ");
546 for (i = 0xf; i >= 0; i--) {
547 kprintf("%x%x%x%x", i, i, i, i);
548 }
549 kprintf("\n");
550 kprintf("TMR: 0x");
551 for (i = 7; i >= 0; i--) {
552 kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i));
553 }
554 kprintf("\n");
555 kprintf("IRR: 0x");
556 for (i = 7; i >= 0; i--) {
557 kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i));
558 }
559 kprintf("\n");
560 kprintf("ISR: 0x");
561 for (i = 7; i >= 0; i--) {
562 kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i));
563 }
564 kprintf("\n");
565}
566
567boolean_t
568lapic_probe(void)
569{
570 uint32_t lo;
571 uint32_t hi;
572
573 if (cpuid_features() & CPUID_FEATURE_APIC) {
574 return TRUE;
575 }
576
577 if (cpuid_family() == 6 || cpuid_family() == 15) {
578 /*
579 * Mobile Pentiums:
580 * There may be a local APIC which wasn't enabled by BIOS.
581 * So we try to enable it explicitly.
582 */
583 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
584 lo &= ~MSR_IA32_APIC_BASE_BASE;
585 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
586 lo |= MSR_IA32_APIC_BASE_ENABLE;
587 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
588
589 /*
590 * Re-initialize cpu features info and re-check.
591 */
592 cpuid_set_info();
593 /* We expect this codepath will never be traversed
594 * due to EFI enabling the APIC. Reducing the APIC
595 * interrupt base dynamically is not supported.
596 */
597 if (cpuid_features() & CPUID_FEATURE_APIC) {
598 printf("Local APIC discovered and enabled\n");
599 lapic_os_enabled = TRUE;
600 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
601 return TRUE;
602 }
603 }
604
605 return FALSE;
606}
607
608void
609lapic_shutdown(bool for_sleep)
610{
611 uint32_t lo;
612 uint32_t hi;
613 uint32_t value;
614
615 if (for_sleep == true) {
616 apic_mode_before_sleep = (is_x2apic ? APIC_MODE_X2APIC : APIC_MODE_XAPIC);
617 }
618
619 /* Shutdown if local APIC was enabled by OS */
620 if (lapic_os_enabled == FALSE) {
621 return;
622 }
623
624 mp_disable_preemption();
625
626 /* ExtINT: masked */
627 if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
628 value = LAPIC_READ(LVT_LINT0);
629 value |= LAPIC_LVT_MASKED;
630 LAPIC_WRITE(LVT_LINT0, value);
631 }
632
633 /* Error: masked */
634 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
635
636 /* Timer: masked */
637 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
638
639 /* Perfmon: masked */
640 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
641
642 /* APIC software disabled */
643 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
644
645 /* Bypass the APIC completely and update cpu features */
646 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
647 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
648 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
649 cpuid_set_info();
650
651 mp_enable_preemption();
652}
653
654boolean_t
655cpu_can_exit(int cpu)
656{
657 return cpu > lapic_max_interrupt_cpunum;
658}
659
660void
661lapic_configure(bool for_wake)
662{
663 int value;
664
665 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
666 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
667 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
668 lapic_dont_panic = FALSE;
669 }
670 }
671
672 if (cpu_number() == 0) {
673 if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) {
674 lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0);
675 }
676 }
677
678 /*
679 * Reinitialize the APIC (handles the case where we're configured to use the X2APIC
680 * but firmware configured the Legacy APIC):
681 */
682 lapic_reinit(for_wake);
683
684 /* Accept all */
685 LAPIC_WRITE(TPR, 0);
686
687 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
688
689 /* ExtINT */
690 if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
691 value = LAPIC_READ(LVT_LINT0);
692 value &= ~LAPIC_LVT_MASKED;
693 value |= LAPIC_LVT_DM_EXTINT;
694 LAPIC_WRITE(LVT_LINT0, value);
695 }
696
697 /* Timer: unmasked, one-shot */
698 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
699
700 /* Perfmon: unmasked */
701 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
702
703 /* Thermal: unmasked */
704 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
705
706#if CONFIG_MCA
707 /* CMCI, if available */
708 if (mca_is_cmci_present()) {
709 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
710 }
711#endif
712
713 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
714 (cpu_number() != master_cpu)) {
715 lapic_esr_clear();
716 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
717 }
718}
719
720void
721lapic_set_timer(
722 boolean_t interrupt_unmasked,
723 lapic_timer_mode_t mode,
724 lapic_timer_divide_t divisor,
725 lapic_timer_count_t initial_count)
726{
727 uint32_t timer_vector;
728
729 mp_disable_preemption();
730 timer_vector = LAPIC_READ(LVT_TIMER);
731 timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);;
732 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
733 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
734 LAPIC_WRITE(LVT_TIMER, timer_vector);
735 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
736 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
737 mp_enable_preemption();
738}
739
740void
741lapic_config_timer(
742 boolean_t interrupt_unmasked,
743 lapic_timer_mode_t mode,
744 lapic_timer_divide_t divisor)
745{
746 uint32_t timer_vector;
747
748 mp_disable_preemption();
749 timer_vector = LAPIC_READ(LVT_TIMER);
750 timer_vector &= ~(LAPIC_LVT_MASKED |
751 LAPIC_LVT_PERIODIC |
752 LAPIC_LVT_TSC_DEADLINE);
753 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
754 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
755 LAPIC_WRITE(LVT_TIMER, timer_vector);
756 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
757 mp_enable_preemption();
758}
759
760/*
761 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
762 */
763void
764lapic_config_tsc_deadline_timer(void)
765{
766 uint32_t timer_vector;
767
768 DBG("lapic_config_tsc_deadline_timer()\n");
769 mp_disable_preemption();
770 timer_vector = LAPIC_READ(LVT_TIMER);
771 timer_vector &= ~(LAPIC_LVT_MASKED |
772 LAPIC_LVT_PERIODIC);
773 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
774 LAPIC_WRITE(LVT_TIMER, timer_vector);
775
776 /* Serialize writes per Intel OSWG */
777 do {
778 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
779 } while (lapic_get_tsc_deadline_timer() == 0);
780 lapic_set_tsc_deadline_timer(0);
781
782 mp_enable_preemption();
783 DBG("lapic_config_tsc_deadline_timer() done\n");
784}
785
786void
787lapic_set_timer_fast(
788 lapic_timer_count_t initial_count)
789{
790 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
791 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
792}
793
794void
795lapic_set_tsc_deadline_timer(uint64_t deadline)
796{
797 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
798 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
799}
800
801uint64_t
802lapic_get_tsc_deadline_timer(void)
803{
804 return rdmsr64(MSR_IA32_TSC_DEADLINE);
805}
806
807void
808lapic_get_timer(
809 lapic_timer_mode_t *mode,
810 lapic_timer_divide_t *divisor,
811 lapic_timer_count_t *initial_count,
812 lapic_timer_count_t *current_count)
813{
814 mp_disable_preemption();
815 if (mode) {
816 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
817 periodic : one_shot;
818 }
819 if (divisor) {
820 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
821 }
822 if (initial_count) {
823 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
824 }
825 if (current_count) {
826 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
827 }
828 mp_enable_preemption();
829}
830
831static inline void
832_lapic_end_of_interrupt(void)
833{
834 LAPIC_WRITE(EOI, 0);
835}
836
837void
838lapic_end_of_interrupt(void)
839{
840 _lapic_end_of_interrupt();
841}
842
843void
844lapic_unmask_perfcnt_interrupt(void)
845{
846 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
847}
848
849void
850lapic_set_perfcnt_interrupt_mask(boolean_t mask)
851{
852 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
853 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
854}
855
856void
857lapic_set_intr_func(int vector, i386_intr_func_t func)
858{
859 if (vector > lapic_interrupt_base) {
860 vector -= lapic_interrupt_base;
861 }
862
863 switch (vector) {
864 case LAPIC_NMI_INTERRUPT:
865 case LAPIC_INTERPROCESSOR_INTERRUPT:
866 case LAPIC_TIMER_INTERRUPT:
867 case LAPIC_THERMAL_INTERRUPT:
868 case LAPIC_PERFCNT_INTERRUPT:
869 case LAPIC_CMCI_INTERRUPT:
870 case LAPIC_PM_INTERRUPT:
871 lapic_intr_func[vector] = func;
872 break;
873 default:
874 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
875 vector, func);
876 }
877}
878
879void
880lapic_set_pmi_func(i386_intr_func_t func)
881{
882 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
883}
884
885int
886lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
887{
888 int retval = 0;
889 int esr = -1;
890
891 interrupt_num -= lapic_interrupt_base;
892 if (interrupt_num < 0) {
893 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
894 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
895 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
896 return retval;
897 } else {
898 return 0;
899 }
900 }
901
902 switch (interrupt_num) {
903 case LAPIC_TIMER_INTERRUPT:
904 case LAPIC_THERMAL_INTERRUPT:
905 case LAPIC_INTERPROCESSOR_INTERRUPT:
906 case LAPIC_PM_INTERRUPT:
907 if (lapic_intr_func[interrupt_num] != NULL) {
908 (void) (*lapic_intr_func[interrupt_num])(state);
909 }
910 _lapic_end_of_interrupt();
911 retval = 1;
912 break;
913 case LAPIC_PERFCNT_INTERRUPT:
914 /* If a function has been registered, invoke it. Otherwise,
915 * pass up to IOKit.
916 */
917 if (lapic_intr_func[interrupt_num] != NULL) {
918 (void) (*lapic_intr_func[interrupt_num])(state);
919 /* Unmask the interrupt since we don't expect legacy users
920 * to be responsible for it.
921 */
922 lapic_unmask_perfcnt_interrupt();
923 _lapic_end_of_interrupt();
924 retval = 1;
925 }
926 break;
927 case LAPIC_CMCI_INTERRUPT:
928 if (lapic_intr_func[interrupt_num] != NULL) {
929 (void) (*lapic_intr_func[interrupt_num])(state);
930 }
931 /* return 0 for plaform expert to handle */
932 break;
933 case LAPIC_ERROR_INTERRUPT:
934 /* We treat error interrupts on APs as fatal.
935 * The current interrupt steering scheme directs most
936 * external interrupts to the BSP (HPET interrupts being
937 * a notable exception); hence, such an error
938 * on an AP may signify LVT corruption (with "may" being
939 * the operative word). On the BSP, we adopt a more
940 * lenient approach, in the interests of enhancing
941 * debuggability and reducing fragility.
942 * If "lapic_error_count_threshold" error interrupts
943 * occur within "lapic_error_time_threshold" absolute
944 * time units, we mask the error vector and log. The
945 * error interrupts themselves are likely
946 * side effects of issues which are beyond the purview of
947 * the local APIC interrupt handler, however. The Error
948 * Status Register value (the illegal destination
949 * vector code is one observed in practice) indicates
950 * the immediate cause of the error.
951 */
952 esr = lapic_esr_read();
953 lapic_dump();
954
955 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
956 cpu_number() != master_cpu) {
957 panic("Local APIC error, ESR: %d\n", esr);
958 }
959
960 if (cpu_number() == master_cpu) {
961 uint64_t abstime = mach_absolute_time();
962 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
963 if (lapic_master_error_count++ > lapic_error_count_threshold) {
964 lapic_errors_masked = TRUE;
965 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
966 printf("Local APIC: errors masked\n");
967 }
968 } else {
969 lapic_last_master_error = abstime;
970 lapic_master_error_count = 0;
971 }
972 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
973 }
974
975 _lapic_end_of_interrupt();
976 retval = 1;
977 break;
978 case LAPIC_SPURIOUS_INTERRUPT:
979 kprintf("SPIV\n");
980 /* No EOI required here */
981 retval = 1;
982 break;
983 case LAPIC_PMC_SW_INTERRUPT:
984 {
985 }
986 break;
987 case LAPIC_KICK_INTERRUPT:
988 _lapic_end_of_interrupt();
989 retval = 1;
990 break;
991 }
992
993 return retval;
994}
995
996void
997lapic_smm_restore(void)
998{
999 boolean_t state;
1000
1001 if (lapic_os_enabled == FALSE) {
1002 return;
1003 }
1004
1005 state = ml_set_interrupts_enabled(FALSE);
1006
1007 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
1008 /*
1009 * Bogus SMI handler enables interrupts but does not know about
1010 * local APIC interrupt sources. When APIC timer counts down to
1011 * zero while in SMM, local APIC will end up waiting for an EOI
1012 * but no interrupt was delivered to the OS.
1013 */
1014 _lapic_end_of_interrupt();
1015
1016 /*
1017 * timer is one-shot, trigger another quick countdown to trigger
1018 * another timer interrupt.
1019 */
1020 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
1021 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
1022 }
1023
1024 kprintf("lapic_smm_restore\n");
1025 }
1026
1027 ml_set_interrupts_enabled(state);
1028}
1029
1030void
1031lapic_send_ipi(int cpu, int vector)
1032{
1033 boolean_t state;
1034
1035 if (vector < lapic_interrupt_base) {
1036 vector += lapic_interrupt_base;
1037 }
1038
1039 state = ml_set_interrupts_enabled(FALSE);
1040
1041 /* X2APIC's ICR doesn't have a pending bit. */
1042 if (!is_x2apic) {
1043 /* Wait for pending outgoing send to complete */
1044 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
1045 cpu_pause();
1046 }
1047 }
1048
1049 LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
1050
1051 (void) ml_set_interrupts_enabled(state);
1052}
1053
1054/*
1055 * The following interfaces are privately exported to AICPM.
1056 */
1057
1058boolean_t
1059lapic_is_interrupt_pending(void)
1060{
1061 int i;
1062
1063 for (i = 0; i < 8; i += 1) {
1064 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
1065 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) {
1066 return TRUE;
1067 }
1068 }
1069
1070 return FALSE;
1071}
1072
1073boolean_t
1074lapic_is_interrupting(uint8_t vector)
1075{
1076 int i;
1077 int bit;
1078 uint32_t irr;
1079 uint32_t isr;
1080
1081 i = vector / 32;
1082 bit = 1 << (vector % 32);
1083
1084 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1085 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1086
1087 if ((irr | isr) & bit) {
1088 return TRUE;
1089 }
1090
1091 return FALSE;
1092}
1093
1094void
1095lapic_interrupt_counts(uint64_t intrs[256])
1096{
1097 int i;
1098 int j;
1099 int bit;
1100 uint32_t irr;
1101 uint32_t isr;
1102
1103 if (intrs == NULL) {
1104 return;
1105 }
1106
1107 for (i = 0; i < 8; i += 1) {
1108 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
1109 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
1110
1111 if ((isr | irr) == 0) {
1112 continue;
1113 }
1114
1115 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
1116 bit = (32 * i) + j;
1117 if ((isr | irr) & (1 << j)) {
1118 intrs[bit] += 1;
1119 }
1120 }
1121 }
1122}
1123
1124void
1125lapic_disable_timer(void)
1126{
1127 uint32_t lvt_timer;
1128
1129 /*
1130 * If we're in deadline timer mode,
1131 * simply clear the deadline timer, otherwise
1132 * mask the timer interrupt and clear the countdown.
1133 */
1134 lvt_timer = LAPIC_READ(LVT_TIMER);
1135 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
1136 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
1137 } else {
1138 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
1139 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
1140 lvt_timer = LAPIC_READ(LVT_TIMER);
1141 }
1142}
1143
1144/* SPI returning the CMCI vector */
1145uint8_t
1146lapic_get_cmci_vector(void)
1147{
1148 uint8_t cmci_vector = 0;
1149#if CONFIG_MCA
1150 /* CMCI, if available */
1151 if (mca_is_cmci_present()) {
1152 cmci_vector = LAPIC_VECTOR(CMCI);
1153 }
1154#endif
1155 return cmci_vector;
1156}
1157
1158#if DEVELOPMENT || DEBUG
1159extern void lapic_trigger_MC(void);
1160void
1161lapic_trigger_MC(void)
1162{
1163 /* A 64-bit access to any register will do it. */
1164 volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
1165 dummy++;
1166}
1167#endif