]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/lapic_native.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
CommitLineData
6d2010ae 1/*
316670eb 2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#include <mach/mach_types.h>
33#include <mach/kern_return.h>
34
35#include <kern/kern_types.h>
36#include <kern/cpu_number.h>
37#include <kern/cpu_data.h>
38#include <kern/assert.h>
39#include <kern/machine.h>
40#include <kern/debug.h>
41
42#include <vm/vm_map.h>
43#include <vm/vm_kern.h>
44
45#include <i386/lapic.h>
46#include <i386/cpuid.h>
47#include <i386/proc_reg.h>
48#include <i386/machine_cpu.h>
49#include <i386/misc_protos.h>
50#include <i386/mp.h>
51#include <i386/postcode.h>
52#include <i386/cpu_threads.h>
53#include <i386/machine_routines.h>
54#include <i386/tsc.h>
55#if CONFIG_MCA
56#include <i386/machine_check.h>
57#endif
58
59#if CONFIG_COUNTERS
60#include <pmc/pmc.h>
61#endif
62
6d2010ae
A
63#include <sys/kdebug.h>
64
65#if MP_DEBUG
66#define PAUSE delay(1000000)
67#define DBG(x...) kprintf(x)
68#else
69#define DBG(x...)
70#define PAUSE
71#endif /* MP_DEBUG */
72
73lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
74
75static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
76static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
77
78static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
79
80/* TRUE if local APIC was enabled by the OS not by the BIOS */
81static boolean_t lapic_os_enabled = FALSE;
82
83static boolean_t lapic_errors_masked = FALSE;
84static uint64_t lapic_last_master_error = 0;
85static uint64_t lapic_error_time_threshold = 0;
86static unsigned lapic_master_error_count = 0;
87static unsigned lapic_error_count_threshold = 5;
88static boolean_t lapic_dont_panic = FALSE;
89
90#ifdef MP_DEBUG
91void
92lapic_cpu_map_dump(void)
93{
94 int i;
95
96 for (i = 0; i < MAX_CPUS; i++) {
97 if (cpu_to_lapic[i] == -1)
98 continue;
99 kprintf("cpu_to_lapic[%d]: %d\n",
100 i, cpu_to_lapic[i]);
101 }
102 for (i = 0; i < MAX_LAPICIDS; i++) {
103 if (lapic_to_cpu[i] == -1)
104 continue;
105 kprintf("lapic_to_cpu[%d]: %d\n",
106 i, lapic_to_cpu[i]);
107 }
108}
109#endif /* MP_DEBUG */
110
111static void
112legacy_init(void)
113{
114 int result;
115 vm_map_entry_t entry;
116 vm_map_offset_t lapic_vbase64;
117 /* Establish a map to the local apic */
118
bd504ef0
A
119 if (lapic_vbase == 0) {
120 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
121 result = vm_map_find_space(kernel_map,
122 &lapic_vbase64,
123 round_page(LAPIC_SIZE), 0,
124 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
125 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
126 */
127 lapic_vbase = (vm_offset_t) lapic_vbase64;
128 if (result != KERN_SUCCESS) {
129 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
130 }
131 vm_map_unlock(kernel_map);
132
133 /*
134 * Map in the local APIC non-cacheable, as recommended by Intel
135 * in section 8.4.1 of the "System Programming Guide".
136 * In fact, this is redundant because EFI will have assigned an
137 * MTRR physical range containing the local APIC's MMIO space as
138 * UC and this will override the default PAT setting.
139 */
140 pmap_enter(pmap_kernel(),
141 lapic_vbase,
142 (ppnum_t) i386_btop(lapic_pbase),
143 VM_PROT_READ|VM_PROT_WRITE,
144 VM_PROT_NONE,
145 VM_WIMG_IO,
146 TRUE);
6d2010ae 147 }
316670eb
A
148
149 /*
bd504ef0
A
150 * Set flat delivery model, logical processor id
151 * This should already be the default set.
316670eb 152 */
bd504ef0
A
153 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
154 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
6d2010ae
A
155}
156
157
158static uint32_t
159legacy_read(lapic_register_t reg)
160{
161 return *LAPIC_MMIO(reg);
162}
163
164static void
165legacy_write(lapic_register_t reg, uint32_t value)
166{
167 *LAPIC_MMIO(reg) = value;
168}
169
bd504ef0
A
170static uint64_t
171legacy_read_icr(void)
172{
173 return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
174}
175
176static void
177legacy_write_icr(uint32_t dst, uint32_t cmd)
178{
179 *LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
180 *LAPIC_MMIO(ICR) = cmd;
181}
182
6d2010ae
A
183static lapic_ops_table_t legacy_ops = {
184 legacy_init,
185 legacy_read,
bd504ef0
A
186 legacy_write,
187 legacy_read_icr,
188 legacy_write_icr
6d2010ae
A
189};
190
bd504ef0
A
191static boolean_t is_x2apic = FALSE;
192
6d2010ae
A
193static void
194x2apic_init(void)
195{
bd504ef0
A
196 uint32_t lo;
197 uint32_t hi;
198
199 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
200 if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
201 lo |= MSR_IA32_APIC_BASE_EXTENDED;
202 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
203 kprintf("x2APIC mode enabled\n");
204 }
6d2010ae
A
205}
206
207static uint32_t
208x2apic_read(lapic_register_t reg)
209{
210 uint32_t lo;
211 uint32_t hi;
212
213 rdmsr(LAPIC_MSR(reg), lo, hi);
214 return lo;
215}
216
217static void
218x2apic_write(lapic_register_t reg, uint32_t value)
219{
220 wrmsr(LAPIC_MSR(reg), value, 0);
221}
222
bd504ef0
A
223static uint64_t
224x2apic_read_icr(void)
225{
226 return rdmsr64(LAPIC_MSR(ICR));;
227}
228
229static void
230x2apic_write_icr(uint32_t dst, uint32_t cmd)
231{
232 wrmsr(LAPIC_MSR(ICR), cmd, dst);
233}
234
6d2010ae
A
235static lapic_ops_table_t x2apic_ops = {
236 x2apic_init,
237 x2apic_read,
bd504ef0
A
238 x2apic_write,
239 x2apic_read_icr,
240 x2apic_write_icr
6d2010ae
A
241};
242
6d2010ae
A
243void
244lapic_init(void)
245{
246 uint32_t lo;
247 uint32_t hi;
248 boolean_t is_boot_processor;
249 boolean_t is_lapic_enabled;
6d2010ae
A
250
251 /* Examine the local APIC state */
252 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
253 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
254 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
255 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
256 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
257 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
258 is_lapic_enabled ? "enabled" : "disabled",
259 is_x2apic ? "extended" : "legacy",
260 is_boot_processor ? "BSP" : "AP");
261 if (!is_boot_processor || !is_lapic_enabled)
262 panic("Unexpected local APIC state\n");
263
bd504ef0
A
264 /*
265 * If x2APIC is available and not already enabled, enable it.
266 * Unless overriden by boot-arg.
267 */
268 if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
269 PE_parse_boot_argn("-x2apic", &is_x2apic, sizeof(is_x2apic));
270 kprintf("x2APIC supported %s be enabled\n",
271 is_x2apic ? "and will" : "but will not");
272 }
273
6d2010ae
A
274 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
275
bd504ef0 276 LAPIC_INIT();
6d2010ae 277
bd504ef0 278 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
6d2010ae
A
279 if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
280 panic("Local APIC version 0x%x, 0x14 or more expected\n",
281 (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
282 }
283
284 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
285 lapic_cpu_map_init();
286 lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
39236c6e 287 current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
6d2010ae
A
288 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
289}
290
291
292static int
293lapic_esr_read(void)
294{
295 /* write-read register */
296 LAPIC_WRITE(ERROR_STATUS, 0);
297 return LAPIC_READ(ERROR_STATUS);
298}
299
300static void
301lapic_esr_clear(void)
302{
303 LAPIC_WRITE(ERROR_STATUS, 0);
304 LAPIC_WRITE(ERROR_STATUS, 0);
305}
306
307static const char *DM_str[8] = {
308 "Fixed",
309 "Lowest Priority",
310 "Invalid",
311 "Invalid",
312 "NMI",
313 "Reset",
314 "Invalid",
315 "ExtINT"};
316
317static const char *TMR_str[] = {
318 "OneShot",
319 "Periodic",
320 "TSC-Deadline",
321 "Illegal"
6d2010ae
A
322};
323
324void
325lapic_dump(void)
326{
327 int i;
328
329#define BOOL(a) ((a)?' ':'!')
330#define VEC(lvt) \
331 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
332#define DS(lvt) \
333 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
334#define DM(lvt) \
335 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
336#define MASK(lvt) \
337 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
338#define TM(lvt) \
339 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
340#define IP(lvt) \
341 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
342
343 kprintf("LAPIC %d at %p version 0x%x\n",
344 (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
345 (void *) lapic_vbase,
346 LAPIC_READ(VERSION)&LAPIC_VERSION_MASK);
347 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
348 LAPIC_READ(TPR)&LAPIC_TPR_MASK,
349 LAPIC_READ(APR)&LAPIC_APR_MASK,
350 LAPIC_READ(PPR)&LAPIC_PPR_MASK);
351 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
bd504ef0 352 is_x2apic ? 0 : LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT,
6d2010ae
A
353 LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT);
354 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
355 BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE),
356 BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)),
357 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
358#if CONFIG_MCA
359 if (mca_is_cmci_present())
360 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
361 VEC(LVT_CMCI),
362 DM(LVT_CMCI),
363 DS(LVT_CMCI),
364 MASK(LVT_CMCI));
365#endif
366 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
367 VEC(LVT_TIMER),
368 DS(LVT_TIMER),
369 MASK(LVT_TIMER),
370 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
371 & LAPIC_LVT_TMR_MASK]);
372 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
373 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
374 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
375 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
376 VEC(LVT_PERFCNT),
377 DM(LVT_PERFCNT),
378 DS(LVT_PERFCNT),
379 MASK(LVT_PERFCNT));
380 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
381 VEC(LVT_THERMAL),
382 DM(LVT_THERMAL),
383 DS(LVT_THERMAL),
384 MASK(LVT_THERMAL));
385 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
386 VEC(LVT_LINT0),
387 DM(LVT_LINT0),
388 TM(LVT_LINT0),
389 IP(LVT_LINT0),
390 DS(LVT_LINT0),
391 MASK(LVT_LINT0));
392 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
393 VEC(LVT_LINT1),
394 DM(LVT_LINT1),
395 TM(LVT_LINT1),
396 IP(LVT_LINT1),
397 DS(LVT_LINT1),
398 MASK(LVT_LINT1));
399 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
400 VEC(LVT_ERROR),
401 DS(LVT_ERROR),
402 MASK(LVT_ERROR));
403 kprintf("ESR: %08x \n", lapic_esr_read());
404 kprintf(" ");
405 for(i=0xf; i>=0; i--)
406 kprintf("%x%x%x%x",i,i,i,i);
407 kprintf("\n");
408 kprintf("TMR: 0x");
409 for(i=7; i>=0; i--)
410 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i));
411 kprintf("\n");
412 kprintf("IRR: 0x");
413 for(i=7; i>=0; i--)
414 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i));
415 kprintf("\n");
416 kprintf("ISR: 0x");
417 for(i=7; i >= 0; i--)
418 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i));
419 kprintf("\n");
420}
421
6d2010ae
A
422boolean_t
423lapic_probe(void)
424{
425 uint32_t lo;
426 uint32_t hi;
427
428 if (cpuid_features() & CPUID_FEATURE_APIC)
429 return TRUE;
430
431 if (cpuid_family() == 6 || cpuid_family() == 15) {
432 /*
433 * Mobile Pentiums:
434 * There may be a local APIC which wasn't enabled by BIOS.
435 * So we try to enable it explicitly.
436 */
437 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
438 lo &= ~MSR_IA32_APIC_BASE_BASE;
439 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
440 lo |= MSR_IA32_APIC_BASE_ENABLE;
441 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
442
443 /*
444 * Re-initialize cpu features info and re-check.
445 */
446 cpuid_set_info();
39236c6e
A
447 /* We expect this codepath will never be traversed
448 * due to EFI enabling the APIC. Reducing the APIC
449 * interrupt base dynamically is not supported.
450 */
6d2010ae
A
451 if (cpuid_features() & CPUID_FEATURE_APIC) {
452 printf("Local APIC discovered and enabled\n");
453 lapic_os_enabled = TRUE;
454 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
455 return TRUE;
456 }
457 }
458
459 return FALSE;
460}
461
462void
463lapic_shutdown(void)
464{
465 uint32_t lo;
466 uint32_t hi;
467 uint32_t value;
468
469 /* Shutdown if local APIC was enabled by OS */
470 if (lapic_os_enabled == FALSE)
471 return;
472
473 mp_disable_preemption();
474
475 /* ExtINT: masked */
476 if (get_cpu_number() == master_cpu) {
477 value = LAPIC_READ(LVT_LINT0);
478 value |= LAPIC_LVT_MASKED;
479 LAPIC_WRITE(LVT_LINT0, value);
480 }
481
482 /* Error: masked */
483 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
484
485 /* Timer: masked */
486 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
487
488 /* Perfmon: masked */
489 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
490
491 /* APIC software disabled */
492 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
493
494 /* Bypass the APIC completely and update cpu features */
495 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
496 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
497 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
498 cpuid_set_info();
499
500 mp_enable_preemption();
501}
502
503void
504lapic_configure(void)
505{
506 int value;
507
508 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
509 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
510 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
511 lapic_dont_panic = FALSE;
512 }
513 }
514
6d2010ae
A
515 /* Accept all */
516 LAPIC_WRITE(TPR, 0);
517
518 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
519
520 /* ExtINT */
521 if (get_cpu_number() == master_cpu) {
522 value = LAPIC_READ(LVT_LINT0);
523 value &= ~LAPIC_LVT_MASKED;
524 value |= LAPIC_LVT_DM_EXTINT;
525 LAPIC_WRITE(LVT_LINT0, value);
526 }
527
528 /* Timer: unmasked, one-shot */
529 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
530
531 /* Perfmon: unmasked */
532 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
533
534 /* Thermal: unmasked */
535 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
536
537#if CONFIG_MCA
538 /* CMCI, if available */
539 if (mca_is_cmci_present())
540 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
541#endif
542
543 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
544 (cpu_number() != master_cpu)) {
545 lapic_esr_clear();
546 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
547 }
548}
549
550void
551lapic_set_timer(
552 boolean_t interrupt_unmasked,
553 lapic_timer_mode_t mode,
554 lapic_timer_divide_t divisor,
555 lapic_timer_count_t initial_count)
556{
557 uint32_t timer_vector;
558
559 mp_disable_preemption();
560 timer_vector = LAPIC_READ(LVT_TIMER);
561 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
562 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
563 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
564 LAPIC_WRITE(LVT_TIMER, timer_vector);
565 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
566 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
567 mp_enable_preemption();
568}
569
570void
571lapic_config_timer(
572 boolean_t interrupt_unmasked,
573 lapic_timer_mode_t mode,
574 lapic_timer_divide_t divisor)
575{
576 uint32_t timer_vector;
577
578 mp_disable_preemption();
579 timer_vector = LAPIC_READ(LVT_TIMER);
580 timer_vector &= ~(LAPIC_LVT_MASKED |
581 LAPIC_LVT_PERIODIC |
582 LAPIC_LVT_TSC_DEADLINE);
583 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
584 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
585 LAPIC_WRITE(LVT_TIMER, timer_vector);
586 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
587 mp_enable_preemption();
588}
589
590/*
591 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
592 */
6d2010ae
A
593void
594lapic_config_tsc_deadline_timer(void)
595{
596 uint32_t timer_vector;
597
598 DBG("lapic_config_tsc_deadline_timer()\n");
599 mp_disable_preemption();
600 timer_vector = LAPIC_READ(LVT_TIMER);
601 timer_vector &= ~(LAPIC_LVT_MASKED |
602 LAPIC_LVT_PERIODIC);
603 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
604 LAPIC_WRITE(LVT_TIMER, timer_vector);
605
606 /* Serialize writes per Intel OSWG */
607 do {
608 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
609 } while (lapic_get_tsc_deadline_timer() == 0);
610 lapic_set_tsc_deadline_timer(0);
611
612 mp_enable_preemption();
613 DBG("lapic_config_tsc_deadline_timer() done\n");
614}
615
616void
617lapic_set_timer_fast(
618 lapic_timer_count_t initial_count)
619{
620 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
621 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
622}
623
6d2010ae
A
624void
625lapic_set_tsc_deadline_timer(uint64_t deadline)
626{
627 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
628 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
629}
630
6d2010ae
A
631uint64_t
632lapic_get_tsc_deadline_timer(void)
633{
634 return rdmsr64(MSR_IA32_TSC_DEADLINE);
635}
636
637void
638lapic_get_timer(
639 lapic_timer_mode_t *mode,
640 lapic_timer_divide_t *divisor,
641 lapic_timer_count_t *initial_count,
642 lapic_timer_count_t *current_count)
643{
644 mp_disable_preemption();
645 if (mode)
646 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
647 periodic : one_shot;
648 if (divisor)
649 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
650 if (initial_count)
651 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
652 if (current_count)
653 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
654 mp_enable_preemption();
655}
656
657static inline void
658_lapic_end_of_interrupt(void)
659{
660 LAPIC_WRITE(EOI, 0);
661}
662
663void
664lapic_end_of_interrupt(void)
665{
666 _lapic_end_of_interrupt();
667}
668
669void lapic_unmask_perfcnt_interrupt(void) {
670 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
671}
672
673void lapic_set_perfcnt_interrupt_mask(boolean_t mask) {
674 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
675 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
676}
677
678void
679lapic_set_intr_func(int vector, i386_intr_func_t func)
680{
681 if (vector > lapic_interrupt_base)
682 vector -= lapic_interrupt_base;
683
684 switch (vector) {
685 case LAPIC_NMI_INTERRUPT:
686 case LAPIC_INTERPROCESSOR_INTERRUPT:
687 case LAPIC_TIMER_INTERRUPT:
688 case LAPIC_THERMAL_INTERRUPT:
689 case LAPIC_PERFCNT_INTERRUPT:
690 case LAPIC_CMCI_INTERRUPT:
691 case LAPIC_PM_INTERRUPT:
692 lapic_intr_func[vector] = func;
693 break;
694 default:
695 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
696 vector, func);
697 }
698}
699
700void lapic_set_pmi_func(i386_intr_func_t func) {
701 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
702}
703
704int
705lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
706{
707 int retval = 0;
708 int esr = -1;
709
710 interrupt_num -= lapic_interrupt_base;
711 if (interrupt_num < 0) {
712 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
713 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
714 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
715 return retval;
716 }
717 else
718 return 0;
719 }
720
721 switch(interrupt_num) {
722 case LAPIC_TIMER_INTERRUPT:
723 case LAPIC_THERMAL_INTERRUPT:
724 case LAPIC_INTERPROCESSOR_INTERRUPT:
725 case LAPIC_PM_INTERRUPT:
726 if (lapic_intr_func[interrupt_num] != NULL)
727 (void) (*lapic_intr_func[interrupt_num])(state);
728 _lapic_end_of_interrupt();
729 retval = 1;
730 break;
731 case LAPIC_PERFCNT_INTERRUPT:
732 /* If a function has been registered, invoke it. Otherwise,
733 * pass up to IOKit.
734 */
735 if (lapic_intr_func[interrupt_num] != NULL) {
736 (void) (*lapic_intr_func[interrupt_num])(state);
737 /* Unmask the interrupt since we don't expect legacy users
738 * to be responsible for it.
739 */
740 lapic_unmask_perfcnt_interrupt();
741 _lapic_end_of_interrupt();
742 retval = 1;
743 }
744 break;
745 case LAPIC_CMCI_INTERRUPT:
746 if (lapic_intr_func[interrupt_num] != NULL)
747 (void) (*lapic_intr_func[interrupt_num])(state);
748 /* return 0 for plaform expert to handle */
749 break;
750 case LAPIC_ERROR_INTERRUPT:
751 /* We treat error interrupts on APs as fatal.
752 * The current interrupt steering scheme directs most
753 * external interrupts to the BSP (HPET interrupts being
754 * a notable exception); hence, such an error
755 * on an AP may signify LVT corruption (with "may" being
756 * the operative word). On the BSP, we adopt a more
757 * lenient approach, in the interests of enhancing
758 * debuggability and reducing fragility.
759 * If "lapic_error_count_threshold" error interrupts
760 * occur within "lapic_error_time_threshold" absolute
761 * time units, we mask the error vector and log. The
762 * error interrupts themselves are likely
763 * side effects of issues which are beyond the purview of
764 * the local APIC interrupt handler, however. The Error
765 * Status Register value (the illegal destination
766 * vector code is one observed in practice) indicates
767 * the immediate cause of the error.
768 */
769 esr = lapic_esr_read();
770 lapic_dump();
771
772 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
773 cpu_number() != master_cpu) {
774 panic("Local APIC error, ESR: %d\n", esr);
775 }
776
777 if (cpu_number() == master_cpu) {
778 uint64_t abstime = mach_absolute_time();
779 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
780 if (lapic_master_error_count++ > lapic_error_count_threshold) {
781 lapic_errors_masked = TRUE;
782 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
783 printf("Local APIC: errors masked\n");
784 }
785 }
786 else {
787 lapic_last_master_error = abstime;
788 lapic_master_error_count = 0;
789 }
790 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
791 }
792
793 _lapic_end_of_interrupt();
794 retval = 1;
795 break;
796 case LAPIC_SPURIOUS_INTERRUPT:
797 kprintf("SPIV\n");
798 /* No EOI required here */
799 retval = 1;
800 break;
801 case LAPIC_PMC_SW_INTERRUPT:
802 {
803#if CONFIG_COUNTERS
804 thread_t old, new;
805 ml_get_csw_threads(&old, &new);
806
807 if (pmc_context_switch(old, new) == TRUE) {
808 retval = 1;
809 /* No EOI required for SWI */
810 }
811#endif /* CONFIG_COUNTERS */
812 }
813 break;
fe8ab488
A
814 case LAPIC_KICK_INTERRUPT:
815 _lapic_end_of_interrupt();
816 retval = 1;
817 break;
6d2010ae
A
818 }
819
820 return retval;
821}
822
823void
824lapic_smm_restore(void)
825{
826 boolean_t state;
827
828 if (lapic_os_enabled == FALSE)
829 return;
830
831 state = ml_set_interrupts_enabled(FALSE);
832
833 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
834 /*
835 * Bogus SMI handler enables interrupts but does not know about
836 * local APIC interrupt sources. When APIC timer counts down to
837 * zero while in SMM, local APIC will end up waiting for an EOI
838 * but no interrupt was delivered to the OS.
839 */
840 _lapic_end_of_interrupt();
841
842 /*
843 * timer is one-shot, trigger another quick countdown to trigger
844 * another timer interrupt.
845 */
846 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
847 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
848 }
849
850 kprintf("lapic_smm_restore\n");
851 }
852
853 ml_set_interrupts_enabled(state);
854}
855
856void
857lapic_send_ipi(int cpu, int vector)
858{
859 boolean_t state;
860
861 if (vector < lapic_interrupt_base)
862 vector += lapic_interrupt_base;
863
864 state = ml_set_interrupts_enabled(FALSE);
865
866 /* Wait for pending outgoing send to complete */
bd504ef0 867 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
6d2010ae
A
868 cpu_pause();
869 }
870
bd504ef0 871 LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
6d2010ae
A
872
873 (void) ml_set_interrupts_enabled(state);
874}
875
876/*
877 * The following interfaces are privately exported to AICPM.
878 */
879
880boolean_t
881lapic_is_interrupt_pending(void)
882{
883 int i;
884
885 for (i = 0; i < 8; i += 1) {
886 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
887 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0))
888 return (TRUE);
889 }
890
891 return (FALSE);
892}
893
894boolean_t
895lapic_is_interrupting(uint8_t vector)
896{
897 int i;
898 int bit;
899 uint32_t irr;
900 uint32_t isr;
901
902 i = vector / 32;
903 bit = 1 << (vector % 32);
904
905 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
906 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
907
908 if ((irr | isr) & bit)
909 return (TRUE);
910
911 return (FALSE);
912}
913
914void
915lapic_interrupt_counts(uint64_t intrs[256])
916{
917 int i;
918 int j;
919 int bit;
920 uint32_t irr;
921 uint32_t isr;
922
923 if (intrs == NULL)
924 return;
925
926 for (i = 0; i < 8; i += 1) {
927 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
928 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
929
930 if ((isr | irr) == 0)
931 continue;
932
933 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
934 bit = (32 * i) + j;
935 if ((isr | irr) & (1 << j))
936 intrs[bit] += 1;
937 }
938 }
939}
940
941void
942lapic_disable_timer(void)
943{
944 uint32_t lvt_timer;
945
946 /*
947 * If we're in deadline timer mode,
948 * simply clear the deadline timer, otherwise
949 * mask the timer interrupt and clear the countdown.
950 */
951 lvt_timer = LAPIC_READ(LVT_TIMER);
952 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
953 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
954 } else {
955 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
956 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
957 lvt_timer = LAPIC_READ(LVT_TIMER);
958 }
959}
316670eb 960
15129b1c
A
961/* SPI returning the CMCI vector */
962uint8_t
963lapic_get_cmci_vector(void)
964{
965 uint8_t cmci_vector = 0;
966#if CONFIG_MCA
967 /* CMCI, if available */
968 if (mca_is_cmci_present())
969 cmci_vector = LAPIC_VECTOR(CMCI);
970#endif
971 return cmci_vector;
972}
973
974#if DEBUG
975extern void lapic_trigger_MC(void);
976void
977lapic_trigger_MC(void)
978{
979 /* A 64-bit access to any register will do it. */
980 volatile uint64_t dummy = *(uint64_t *) (void *) LAPIC_MMIO(ID);
981 dummy++;
982}
983#endif