]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/lapic_native.c
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#include <mach/mach_types.h>
33#include <mach/kern_return.h>
34
35#include <kern/kern_types.h>
36#include <kern/cpu_number.h>
37#include <kern/cpu_data.h>
38#include <kern/assert.h>
39#include <kern/machine.h>
40#include <kern/debug.h>
41
42#include <vm/vm_map.h>
43#include <vm/vm_kern.h>
44
45#include <i386/lapic.h>
46#include <i386/cpuid.h>
47#include <i386/proc_reg.h>
48#include <i386/machine_cpu.h>
49#include <i386/misc_protos.h>
50#include <i386/mp.h>
51#include <i386/postcode.h>
52#include <i386/cpu_threads.h>
53#include <i386/machine_routines.h>
54#include <i386/tsc.h>
55#if CONFIG_MCA
56#include <i386/machine_check.h>
57#endif
58
59#if CONFIG_COUNTERS
60#include <pmc/pmc.h>
61#endif
62
63#if MACH_KDB
64#include <machine/db_machdep.h>
65#endif
66
67#include <sys/kdebug.h>
68
69#if MP_DEBUG
70#define PAUSE delay(1000000)
71#define DBG(x...) kprintf(x)
72#else
73#define DBG(x...)
74#define PAUSE
75#endif /* MP_DEBUG */
76
77lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
78
79static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
80static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
81
82static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
83
84/* TRUE if local APIC was enabled by the OS not by the BIOS */
85static boolean_t lapic_os_enabled = FALSE;
86
87static boolean_t lapic_errors_masked = FALSE;
88static uint64_t lapic_last_master_error = 0;
89static uint64_t lapic_error_time_threshold = 0;
90static unsigned lapic_master_error_count = 0;
91static unsigned lapic_error_count_threshold = 5;
92static boolean_t lapic_dont_panic = FALSE;
93
94#ifdef MP_DEBUG
95void
96lapic_cpu_map_dump(void)
97{
98 int i;
99
100 for (i = 0; i < MAX_CPUS; i++) {
101 if (cpu_to_lapic[i] == -1)
102 continue;
103 kprintf("cpu_to_lapic[%d]: %d\n",
104 i, cpu_to_lapic[i]);
105 }
106 for (i = 0; i < MAX_LAPICIDS; i++) {
107 if (lapic_to_cpu[i] == -1)
108 continue;
109 kprintf("lapic_to_cpu[%d]: %d\n",
110 i, lapic_to_cpu[i]);
111 }
112}
113#endif /* MP_DEBUG */
114
115static void
116legacy_init(void)
117{
118 int result;
119 vm_map_entry_t entry;
120 vm_map_offset_t lapic_vbase64;
121 /* Establish a map to the local apic */
122
123 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
124 result = vm_map_find_space(kernel_map,
125 &lapic_vbase64,
126 round_page(LAPIC_SIZE), 0,
127 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
128 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
129 */
130 lapic_vbase = (vm_offset_t) lapic_vbase64;
131 if (result != KERN_SUCCESS) {
132 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
133 }
134 vm_map_unlock(kernel_map);
135/* Map in the local APIC non-cacheable, as recommended by Intel
136 * in section 8.4.1 of the "System Programming Guide".
137 */
138 pmap_enter(pmap_kernel(),
139 lapic_vbase,
140 (ppnum_t) i386_btop(lapic_pbase),
141 VM_PROT_READ|VM_PROT_WRITE,
142 VM_WIMG_IO,
143 TRUE);
144}
145
146
147static uint32_t
148legacy_read(lapic_register_t reg)
149{
150 return *LAPIC_MMIO(reg);
151}
152
153static void
154legacy_write(lapic_register_t reg, uint32_t value)
155{
156 *LAPIC_MMIO(reg) = value;
157}
158
159static lapic_ops_table_t legacy_ops = {
160 legacy_init,
161 legacy_read,
162 legacy_write
163};
164
165static void
166x2apic_init(void)
167{
168}
169
170static uint32_t
171x2apic_read(lapic_register_t reg)
172{
173 uint32_t lo;
174 uint32_t hi;
175
176 rdmsr(LAPIC_MSR(reg), lo, hi);
177 return lo;
178}
179
180static void
181x2apic_write(lapic_register_t reg, uint32_t value)
182{
183 wrmsr(LAPIC_MSR(reg), value, 0);
184}
185
186static lapic_ops_table_t x2apic_ops = {
187 x2apic_init,
188 x2apic_read,
189 x2apic_write
190};
191
192
193void
194lapic_init(void)
195{
196 uint32_t lo;
197 uint32_t hi;
198 boolean_t is_boot_processor;
199 boolean_t is_lapic_enabled;
200 boolean_t is_x2apic;
201
202 /* Examine the local APIC state */
203 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
204 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
205 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
206 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
207 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
208 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
209 is_lapic_enabled ? "enabled" : "disabled",
210 is_x2apic ? "extended" : "legacy",
211 is_boot_processor ? "BSP" : "AP");
212 if (!is_boot_processor || !is_lapic_enabled)
213 panic("Unexpected local APIC state\n");
214
215 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
216
217 lapic_ops->init();
218
219 if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
220 panic("Local APIC version 0x%x, 0x14 or more expected\n",
221 (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
222 }
223
224 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
225 lapic_cpu_map_init();
226 lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
227 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
228}
229
230
231static int
232lapic_esr_read(void)
233{
234 /* write-read register */
235 LAPIC_WRITE(ERROR_STATUS, 0);
236 return LAPIC_READ(ERROR_STATUS);
237}
238
239static void
240lapic_esr_clear(void)
241{
242 LAPIC_WRITE(ERROR_STATUS, 0);
243 LAPIC_WRITE(ERROR_STATUS, 0);
244}
245
246static const char *DM_str[8] = {
247 "Fixed",
248 "Lowest Priority",
249 "Invalid",
250 "Invalid",
251 "NMI",
252 "Reset",
253 "Invalid",
254 "ExtINT"};
255
256static const char *TMR_str[] = {
257 "OneShot",
258 "Periodic",
259 "TSC-Deadline",
260 "Illegal"
261 "Illegal"
262};
263
264void
265lapic_dump(void)
266{
267 int i;
268
269#define BOOL(a) ((a)?' ':'!')
270#define VEC(lvt) \
271 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
272#define DS(lvt) \
273 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
274#define DM(lvt) \
275 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
276#define MASK(lvt) \
277 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
278#define TM(lvt) \
279 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
280#define IP(lvt) \
281 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
282
283 kprintf("LAPIC %d at %p version 0x%x\n",
284 (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
285 (void *) lapic_vbase,
286 LAPIC_READ(VERSION)&LAPIC_VERSION_MASK);
287 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
288 LAPIC_READ(TPR)&LAPIC_TPR_MASK,
289 LAPIC_READ(APR)&LAPIC_APR_MASK,
290 LAPIC_READ(PPR)&LAPIC_PPR_MASK);
291 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
292 LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT,
293 LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT);
294 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
295 BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE),
296 BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)),
297 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
298#if CONFIG_MCA
299 if (mca_is_cmci_present())
300 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
301 VEC(LVT_CMCI),
302 DM(LVT_CMCI),
303 DS(LVT_CMCI),
304 MASK(LVT_CMCI));
305#endif
306 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
307 VEC(LVT_TIMER),
308 DS(LVT_TIMER),
309 MASK(LVT_TIMER),
310 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
311 & LAPIC_LVT_TMR_MASK]);
312 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
313 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
314 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
315 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
316 VEC(LVT_PERFCNT),
317 DM(LVT_PERFCNT),
318 DS(LVT_PERFCNT),
319 MASK(LVT_PERFCNT));
320 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
321 VEC(LVT_THERMAL),
322 DM(LVT_THERMAL),
323 DS(LVT_THERMAL),
324 MASK(LVT_THERMAL));
325 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
326 VEC(LVT_LINT0),
327 DM(LVT_LINT0),
328 TM(LVT_LINT0),
329 IP(LVT_LINT0),
330 DS(LVT_LINT0),
331 MASK(LVT_LINT0));
332 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
333 VEC(LVT_LINT1),
334 DM(LVT_LINT1),
335 TM(LVT_LINT1),
336 IP(LVT_LINT1),
337 DS(LVT_LINT1),
338 MASK(LVT_LINT1));
339 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
340 VEC(LVT_ERROR),
341 DS(LVT_ERROR),
342 MASK(LVT_ERROR));
343 kprintf("ESR: %08x \n", lapic_esr_read());
344 kprintf(" ");
345 for(i=0xf; i>=0; i--)
346 kprintf("%x%x%x%x",i,i,i,i);
347 kprintf("\n");
348 kprintf("TMR: 0x");
349 for(i=7; i>=0; i--)
350 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i));
351 kprintf("\n");
352 kprintf("IRR: 0x");
353 for(i=7; i>=0; i--)
354 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i));
355 kprintf("\n");
356 kprintf("ISR: 0x");
357 for(i=7; i >= 0; i--)
358 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i));
359 kprintf("\n");
360}
361
362#if MACH_KDB
363/*
364 * Displays apic junk
365 *
366 * da
367 */
368void
369db_apic(__unused db_expr_t addr,
370 __unused int have_addr,
371 __unused db_expr_t count,
372 __unused char *modif)
373{
374
375 lapic_dump();
376
377 return;
378}
379
380#endif
381
382boolean_t
383lapic_probe(void)
384{
385 uint32_t lo;
386 uint32_t hi;
387
388 if (cpuid_features() & CPUID_FEATURE_APIC)
389 return TRUE;
390
391 if (cpuid_family() == 6 || cpuid_family() == 15) {
392 /*
393 * Mobile Pentiums:
394 * There may be a local APIC which wasn't enabled by BIOS.
395 * So we try to enable it explicitly.
396 */
397 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
398 lo &= ~MSR_IA32_APIC_BASE_BASE;
399 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
400 lo |= MSR_IA32_APIC_BASE_ENABLE;
401 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
402
403 /*
404 * Re-initialize cpu features info and re-check.
405 */
406 cpuid_set_info();
407 if (cpuid_features() & CPUID_FEATURE_APIC) {
408 printf("Local APIC discovered and enabled\n");
409 lapic_os_enabled = TRUE;
410 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
411 return TRUE;
412 }
413 }
414
415 return FALSE;
416}
417
418void
419lapic_shutdown(void)
420{
421 uint32_t lo;
422 uint32_t hi;
423 uint32_t value;
424
425 /* Shutdown if local APIC was enabled by OS */
426 if (lapic_os_enabled == FALSE)
427 return;
428
429 mp_disable_preemption();
430
431 /* ExtINT: masked */
432 if (get_cpu_number() == master_cpu) {
433 value = LAPIC_READ(LVT_LINT0);
434 value |= LAPIC_LVT_MASKED;
435 LAPIC_WRITE(LVT_LINT0, value);
436 }
437
438 /* Error: masked */
439 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
440
441 /* Timer: masked */
442 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
443
444 /* Perfmon: masked */
445 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
446
447 /* APIC software disabled */
448 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
449
450 /* Bypass the APIC completely and update cpu features */
451 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
452 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
453 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
454 cpuid_set_info();
455
456 mp_enable_preemption();
457}
458
459void
460lapic_configure(void)
461{
462 int value;
463
464 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
465 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
466 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
467 lapic_dont_panic = FALSE;
468 }
469 }
470
471 /* Set flat delivery model, logical processor id */
472 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
473 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
474
475 /* Accept all */
476 LAPIC_WRITE(TPR, 0);
477
478 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
479
480 /* ExtINT */
481 if (get_cpu_number() == master_cpu) {
482 value = LAPIC_READ(LVT_LINT0);
483 value &= ~LAPIC_LVT_MASKED;
484 value |= LAPIC_LVT_DM_EXTINT;
485 LAPIC_WRITE(LVT_LINT0, value);
486 }
487
488 /* Timer: unmasked, one-shot */
489 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
490
491 /* Perfmon: unmasked */
492 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
493
494 /* Thermal: unmasked */
495 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
496
497#if CONFIG_MCA
498 /* CMCI, if available */
499 if (mca_is_cmci_present())
500 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
501#endif
502
503 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
504 (cpu_number() != master_cpu)) {
505 lapic_esr_clear();
506 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
507 }
508}
509
510void
511lapic_set_timer(
512 boolean_t interrupt_unmasked,
513 lapic_timer_mode_t mode,
514 lapic_timer_divide_t divisor,
515 lapic_timer_count_t initial_count)
516{
517 uint32_t timer_vector;
518
519 mp_disable_preemption();
520 timer_vector = LAPIC_READ(LVT_TIMER);
521 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
522 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
523 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
524 LAPIC_WRITE(LVT_TIMER, timer_vector);
525 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
526 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
527 mp_enable_preemption();
528}
529
530void
531lapic_config_timer(
532 boolean_t interrupt_unmasked,
533 lapic_timer_mode_t mode,
534 lapic_timer_divide_t divisor)
535{
536 uint32_t timer_vector;
537
538 mp_disable_preemption();
539 timer_vector = LAPIC_READ(LVT_TIMER);
540 timer_vector &= ~(LAPIC_LVT_MASKED |
541 LAPIC_LVT_PERIODIC |
542 LAPIC_LVT_TSC_DEADLINE);
543 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
544 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
545 LAPIC_WRITE(LVT_TIMER, timer_vector);
546 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
547 mp_enable_preemption();
548}
549
550/*
551 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
552 */
553__private_extern__
554void
555lapic_config_tsc_deadline_timer(void)
556{
557 uint32_t timer_vector;
558
559 DBG("lapic_config_tsc_deadline_timer()\n");
560 mp_disable_preemption();
561 timer_vector = LAPIC_READ(LVT_TIMER);
562 timer_vector &= ~(LAPIC_LVT_MASKED |
563 LAPIC_LVT_PERIODIC);
564 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
565 LAPIC_WRITE(LVT_TIMER, timer_vector);
566
567 /* Serialize writes per Intel OSWG */
568 do {
569 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
570 } while (lapic_get_tsc_deadline_timer() == 0);
571 lapic_set_tsc_deadline_timer(0);
572
573 mp_enable_preemption();
574 DBG("lapic_config_tsc_deadline_timer() done\n");
575}
576
577void
578lapic_set_timer_fast(
579 lapic_timer_count_t initial_count)
580{
581 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
582 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
583}
584
585__private_extern__
586void
587lapic_set_tsc_deadline_timer(uint64_t deadline)
588{
589 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
590 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
591}
592
593__private_extern__
594uint64_t
595lapic_get_tsc_deadline_timer(void)
596{
597 return rdmsr64(MSR_IA32_TSC_DEADLINE);
598}
599
600void
601lapic_get_timer(
602 lapic_timer_mode_t *mode,
603 lapic_timer_divide_t *divisor,
604 lapic_timer_count_t *initial_count,
605 lapic_timer_count_t *current_count)
606{
607 mp_disable_preemption();
608 if (mode)
609 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
610 periodic : one_shot;
611 if (divisor)
612 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
613 if (initial_count)
614 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
615 if (current_count)
616 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
617 mp_enable_preemption();
618}
619
620static inline void
621_lapic_end_of_interrupt(void)
622{
623 LAPIC_WRITE(EOI, 0);
624}
625
626void
627lapic_end_of_interrupt(void)
628{
629 _lapic_end_of_interrupt();
630}
631
632void lapic_unmask_perfcnt_interrupt(void) {
633 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
634}
635
636void lapic_set_perfcnt_interrupt_mask(boolean_t mask) {
637 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
638 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
639}
640
641void
642lapic_set_intr_func(int vector, i386_intr_func_t func)
643{
644 if (vector > lapic_interrupt_base)
645 vector -= lapic_interrupt_base;
646
647 switch (vector) {
648 case LAPIC_NMI_INTERRUPT:
649 case LAPIC_INTERPROCESSOR_INTERRUPT:
650 case LAPIC_TIMER_INTERRUPT:
651 case LAPIC_THERMAL_INTERRUPT:
652 case LAPIC_PERFCNT_INTERRUPT:
653 case LAPIC_CMCI_INTERRUPT:
654 case LAPIC_PM_INTERRUPT:
655 lapic_intr_func[vector] = func;
656 break;
657 default:
658 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
659 vector, func);
660 }
661}
662
663void lapic_set_pmi_func(i386_intr_func_t func) {
664 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
665}
666
667int
668lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
669{
670 int retval = 0;
671 int esr = -1;
672
673 interrupt_num -= lapic_interrupt_base;
674 if (interrupt_num < 0) {
675 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
676 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
677 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
678 return retval;
679 }
680 else
681 return 0;
682 }
683
684 switch(interrupt_num) {
685 case LAPIC_TIMER_INTERRUPT:
686 case LAPIC_THERMAL_INTERRUPT:
687 case LAPIC_INTERPROCESSOR_INTERRUPT:
688 case LAPIC_PM_INTERRUPT:
689 if (lapic_intr_func[interrupt_num] != NULL)
690 (void) (*lapic_intr_func[interrupt_num])(state);
691 _lapic_end_of_interrupt();
692 retval = 1;
693 break;
694 case LAPIC_PERFCNT_INTERRUPT:
695 /* If a function has been registered, invoke it. Otherwise,
696 * pass up to IOKit.
697 */
698 if (lapic_intr_func[interrupt_num] != NULL) {
699 (void) (*lapic_intr_func[interrupt_num])(state);
700 /* Unmask the interrupt since we don't expect legacy users
701 * to be responsible for it.
702 */
703 lapic_unmask_perfcnt_interrupt();
704 _lapic_end_of_interrupt();
705 retval = 1;
706 }
707 break;
708 case LAPIC_CMCI_INTERRUPT:
709 if (lapic_intr_func[interrupt_num] != NULL)
710 (void) (*lapic_intr_func[interrupt_num])(state);
711 /* return 0 for plaform expert to handle */
712 break;
713 case LAPIC_ERROR_INTERRUPT:
714 /* We treat error interrupts on APs as fatal.
715 * The current interrupt steering scheme directs most
716 * external interrupts to the BSP (HPET interrupts being
717 * a notable exception); hence, such an error
718 * on an AP may signify LVT corruption (with "may" being
719 * the operative word). On the BSP, we adopt a more
720 * lenient approach, in the interests of enhancing
721 * debuggability and reducing fragility.
722 * If "lapic_error_count_threshold" error interrupts
723 * occur within "lapic_error_time_threshold" absolute
724 * time units, we mask the error vector and log. The
725 * error interrupts themselves are likely
726 * side effects of issues which are beyond the purview of
727 * the local APIC interrupt handler, however. The Error
728 * Status Register value (the illegal destination
729 * vector code is one observed in practice) indicates
730 * the immediate cause of the error.
731 */
732 esr = lapic_esr_read();
733 lapic_dump();
734
735 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
736 cpu_number() != master_cpu) {
737 panic("Local APIC error, ESR: %d\n", esr);
738 }
739
740 if (cpu_number() == master_cpu) {
741 uint64_t abstime = mach_absolute_time();
742 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
743 if (lapic_master_error_count++ > lapic_error_count_threshold) {
744 lapic_errors_masked = TRUE;
745 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
746 printf("Local APIC: errors masked\n");
747 }
748 }
749 else {
750 lapic_last_master_error = abstime;
751 lapic_master_error_count = 0;
752 }
753 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
754 }
755
756 _lapic_end_of_interrupt();
757 retval = 1;
758 break;
759 case LAPIC_SPURIOUS_INTERRUPT:
760 kprintf("SPIV\n");
761 /* No EOI required here */
762 retval = 1;
763 break;
764 case LAPIC_PMC_SW_INTERRUPT:
765 {
766#if CONFIG_COUNTERS
767 thread_t old, new;
768 ml_get_csw_threads(&old, &new);
769
770 if (pmc_context_switch(old, new) == TRUE) {
771 retval = 1;
772 /* No EOI required for SWI */
773 }
774#endif /* CONFIG_COUNTERS */
775 }
776 break;
777 }
778
779 return retval;
780}
781
782void
783lapic_smm_restore(void)
784{
785 boolean_t state;
786
787 if (lapic_os_enabled == FALSE)
788 return;
789
790 state = ml_set_interrupts_enabled(FALSE);
791
792 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
793 /*
794 * Bogus SMI handler enables interrupts but does not know about
795 * local APIC interrupt sources. When APIC timer counts down to
796 * zero while in SMM, local APIC will end up waiting for an EOI
797 * but no interrupt was delivered to the OS.
798 */
799 _lapic_end_of_interrupt();
800
801 /*
802 * timer is one-shot, trigger another quick countdown to trigger
803 * another timer interrupt.
804 */
805 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
806 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
807 }
808
809 kprintf("lapic_smm_restore\n");
810 }
811
812 ml_set_interrupts_enabled(state);
813}
814
815void
816lapic_send_ipi(int cpu, int vector)
817{
818 boolean_t state;
819
820 if (vector < lapic_interrupt_base)
821 vector += lapic_interrupt_base;
822
823 state = ml_set_interrupts_enabled(FALSE);
824
825 /* Wait for pending outgoing send to complete */
826 while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
827 cpu_pause();
828 }
829
830 LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
831 LAPIC_WRITE(ICR, vector | LAPIC_ICR_DM_FIXED);
832
833 (void) ml_set_interrupts_enabled(state);
834}
835
836/*
837 * The following interfaces are privately exported to AICPM.
838 */
839
840boolean_t
841lapic_is_interrupt_pending(void)
842{
843 int i;
844
845 for (i = 0; i < 8; i += 1) {
846 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
847 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0))
848 return (TRUE);
849 }
850
851 return (FALSE);
852}
853
854boolean_t
855lapic_is_interrupting(uint8_t vector)
856{
857 int i;
858 int bit;
859 uint32_t irr;
860 uint32_t isr;
861
862 i = vector / 32;
863 bit = 1 << (vector % 32);
864
865 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
866 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
867
868 if ((irr | isr) & bit)
869 return (TRUE);
870
871 return (FALSE);
872}
873
874void
875lapic_interrupt_counts(uint64_t intrs[256])
876{
877 int i;
878 int j;
879 int bit;
880 uint32_t irr;
881 uint32_t isr;
882
883 if (intrs == NULL)
884 return;
885
886 for (i = 0; i < 8; i += 1) {
887 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
888 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
889
890 if ((isr | irr) == 0)
891 continue;
892
893 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
894 bit = (32 * i) + j;
895 if ((isr | irr) & (1 << j))
896 intrs[bit] += 1;
897 }
898 }
899}
900
901void
902lapic_disable_timer(void)
903{
904 uint32_t lvt_timer;
905
906 /*
907 * If we're in deadline timer mode,
908 * simply clear the deadline timer, otherwise
909 * mask the timer interrupt and clear the countdown.
910 */
911 lvt_timer = LAPIC_READ(LVT_TIMER);
912 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
913 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
914 } else {
915 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
916 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
917 lvt_timer = LAPIC_READ(LVT_TIMER);
918 }
919}