]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lapic_native.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #if CONFIG_MCA
56 #include <i386/machine_check.h>
57 #endif
58
59 #include <sys/kdebug.h>
60
61 #if MP_DEBUG
62 #define PAUSE delay(1000000)
63 #define DBG(x...) kprintf(x)
64 #else
65 #define DBG(x...)
66 #define PAUSE
67 #endif /* MP_DEBUG */
68
69 lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
70
71 static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
72 static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
73
74 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
75
76 /* TRUE if local APIC was enabled by the OS not by the BIOS */
77 static boolean_t lapic_os_enabled = FALSE;
78
79 static boolean_t lapic_errors_masked = FALSE;
80 static uint64_t lapic_last_master_error = 0;
81 static uint64_t lapic_error_time_threshold = 0;
82 static unsigned lapic_master_error_count = 0;
83 static unsigned lapic_error_count_threshold = 5;
84 static boolean_t lapic_dont_panic = FALSE;
85 int lapic_max_interrupt_cpunum = 0;
86
87 #ifdef MP_DEBUG
88 void
89 lapic_cpu_map_dump(void)
90 {
91 int i;
92
93 for (i = 0; i < MAX_CPUS; i++) {
94 if (cpu_to_lapic[i] == -1) {
95 continue;
96 }
97 kprintf("cpu_to_lapic[%d]: %d\n",
98 i, cpu_to_lapic[i]);
99 }
100 for (i = 0; i < MAX_LAPICIDS; i++) {
101 if (lapic_to_cpu[i] == -1) {
102 continue;
103 }
104 kprintf("lapic_to_cpu[%d]: %d\n",
105 i, lapic_to_cpu[i]);
106 }
107 }
108 #endif /* MP_DEBUG */
109
110 static void
111 legacy_init(void)
112 {
113 int result;
114 kern_return_t kr;
115 vm_map_entry_t entry;
116 vm_map_offset_t lapic_vbase64;
117 /* Establish a map to the local apic */
118
119 if (lapic_vbase == 0) {
120 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
121 result = vm_map_find_space(kernel_map,
122 &lapic_vbase64,
123 round_page(LAPIC_SIZE), 0,
124 0,
125 VM_MAP_KERNEL_FLAGS_NONE,
126 VM_KERN_MEMORY_IOKIT,
127 &entry);
128 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
129 */
130 lapic_vbase = (vm_offset_t) lapic_vbase64;
131 if (result != KERN_SUCCESS) {
132 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
133 }
134 vm_map_unlock(kernel_map);
135
136 /*
137 * Map in the local APIC non-cacheable, as recommended by Intel
138 * in section 8.4.1 of the "System Programming Guide".
139 * In fact, this is redundant because EFI will have assigned an
140 * MTRR physical range containing the local APIC's MMIO space as
141 * UC and this will override the default PAT setting.
142 */
143 kr = pmap_enter(pmap_kernel(),
144 lapic_vbase,
145 (ppnum_t) i386_btop(lapic_pbase),
146 VM_PROT_READ | VM_PROT_WRITE,
147 VM_PROT_NONE,
148 VM_WIMG_IO,
149 TRUE);
150
151 assert(kr == KERN_SUCCESS);
152 }
153
154 /*
155 * Set flat delivery model, logical processor id
156 * This should already be the default set.
157 */
158 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
159 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
160 }
161
162
163 static uint32_t
164 legacy_read(lapic_register_t reg)
165 {
166 return *LAPIC_MMIO(reg);
167 }
168
169 static void
170 legacy_write(lapic_register_t reg, uint32_t value)
171 {
172 *LAPIC_MMIO(reg) = value;
173 }
174
175 static uint64_t
176 legacy_read_icr(void)
177 {
178 return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
179 }
180
181 static void
182 legacy_write_icr(uint32_t dst, uint32_t cmd)
183 {
184 *LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
185 *LAPIC_MMIO(ICR) = cmd;
186 }
187
188 static lapic_ops_table_t legacy_ops = {
189 legacy_init,
190 legacy_read,
191 legacy_write,
192 legacy_read_icr,
193 legacy_write_icr
194 };
195
196 static boolean_t is_x2apic = FALSE;
197
198 static void
199 x2apic_init(void)
200 {
201 uint32_t lo;
202 uint32_t hi;
203
204 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
205 if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
206 lo |= MSR_IA32_APIC_BASE_EXTENDED;
207 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
208 kprintf("x2APIC mode enabled\n");
209 }
210 }
211
212 static uint32_t
213 x2apic_read(lapic_register_t reg)
214 {
215 uint32_t lo;
216 uint32_t hi;
217
218 rdmsr(LAPIC_MSR(reg), lo, hi);
219 return lo;
220 }
221
222 static void
223 x2apic_write(lapic_register_t reg, uint32_t value)
224 {
225 wrmsr(LAPIC_MSR(reg), value, 0);
226 }
227
228 static uint64_t
229 x2apic_read_icr(void)
230 {
231 return rdmsr64(LAPIC_MSR(ICR));;
232 }
233
234 static void
235 x2apic_write_icr(uint32_t dst, uint32_t cmd)
236 {
237 wrmsr(LAPIC_MSR(ICR), cmd, dst);
238 }
239
240 static lapic_ops_table_t x2apic_ops = {
241 x2apic_init,
242 x2apic_read,
243 x2apic_write,
244 x2apic_read_icr,
245 x2apic_write_icr
246 };
247
248 void
249 lapic_init(void)
250 {
251 uint32_t lo;
252 uint32_t hi;
253 boolean_t is_boot_processor;
254 boolean_t is_lapic_enabled;
255
256 /* Examine the local APIC state */
257 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
258 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
259 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
260 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
261 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
262 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
263 is_lapic_enabled ? "enabled" : "disabled",
264 is_x2apic ? "extended" : "legacy",
265 is_boot_processor ? "BSP" : "AP");
266 if (!is_boot_processor || !is_lapic_enabled) {
267 panic("Unexpected local APIC state\n");
268 }
269
270 /*
271 * If x2APIC is available and not already enabled, enable it.
272 * Unless overriden by boot-arg.
273 */
274 if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
275 PE_parse_boot_argn("-x2apic", &is_x2apic, sizeof(is_x2apic));
276 kprintf("x2APIC supported %s be enabled\n",
277 is_x2apic ? "and will" : "but will not");
278 }
279
280 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
281
282 LAPIC_INIT();
283
284 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
285 if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) {
286 panic("Local APIC version 0x%x, 0x14 or more expected\n",
287 (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK));
288 }
289
290 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
291 lapic_cpu_map_init();
292 lapic_cpu_map((LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK, 0);
293 current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
294 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
295 }
296
297
298 static int
299 lapic_esr_read(void)
300 {
301 /* write-read register */
302 LAPIC_WRITE(ERROR_STATUS, 0);
303 return LAPIC_READ(ERROR_STATUS);
304 }
305
306 static void
307 lapic_esr_clear(void)
308 {
309 LAPIC_WRITE(ERROR_STATUS, 0);
310 LAPIC_WRITE(ERROR_STATUS, 0);
311 }
312
313 static const char *DM_str[8] = {
314 "Fixed",
315 "Lowest Priority",
316 "Invalid",
317 "Invalid",
318 "NMI",
319 "Reset",
320 "Invalid",
321 "ExtINT"
322 };
323
324 static const char *TMR_str[] = {
325 "OneShot",
326 "Periodic",
327 "TSC-Deadline",
328 "Illegal"
329 };
330
331 void
332 lapic_dump(void)
333 {
334 int i;
335
336 #define BOOL(a) ((a)?' ':'!')
337 #define VEC(lvt) \
338 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
339 #define DS(lvt) \
340 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
341 #define DM(lvt) \
342 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
343 #define MASK(lvt) \
344 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
345 #define TM(lvt) \
346 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
347 #define IP(lvt) \
348 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
349
350 kprintf("LAPIC %d at %p version 0x%x\n",
351 (LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK,
352 (void *) lapic_vbase,
353 LAPIC_READ(VERSION) & LAPIC_VERSION_MASK);
354 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
355 LAPIC_READ(TPR) & LAPIC_TPR_MASK,
356 LAPIC_READ(APR) & LAPIC_APR_MASK,
357 LAPIC_READ(PPR) & LAPIC_PPR_MASK);
358 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
359 is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT,
360 LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT);
361 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
362 BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE),
363 BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)),
364 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
365 #if CONFIG_MCA
366 if (mca_is_cmci_present()) {
367 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
368 VEC(LVT_CMCI),
369 DM(LVT_CMCI),
370 DS(LVT_CMCI),
371 MASK(LVT_CMCI));
372 }
373 #endif
374 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
375 VEC(LVT_TIMER),
376 DS(LVT_TIMER),
377 MASK(LVT_TIMER),
378 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
379 & LAPIC_LVT_TMR_MASK]);
380 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
381 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
382 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
383 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
384 VEC(LVT_PERFCNT),
385 DM(LVT_PERFCNT),
386 DS(LVT_PERFCNT),
387 MASK(LVT_PERFCNT));
388 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
389 VEC(LVT_THERMAL),
390 DM(LVT_THERMAL),
391 DS(LVT_THERMAL),
392 MASK(LVT_THERMAL));
393 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
394 VEC(LVT_LINT0),
395 DM(LVT_LINT0),
396 TM(LVT_LINT0),
397 IP(LVT_LINT0),
398 DS(LVT_LINT0),
399 MASK(LVT_LINT0));
400 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
401 VEC(LVT_LINT1),
402 DM(LVT_LINT1),
403 TM(LVT_LINT1),
404 IP(LVT_LINT1),
405 DS(LVT_LINT1),
406 MASK(LVT_LINT1));
407 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
408 VEC(LVT_ERROR),
409 DS(LVT_ERROR),
410 MASK(LVT_ERROR));
411 kprintf("ESR: %08x \n", lapic_esr_read());
412 kprintf(" ");
413 for (i = 0xf; i >= 0; i--) {
414 kprintf("%x%x%x%x", i, i, i, i);
415 }
416 kprintf("\n");
417 kprintf("TMR: 0x");
418 for (i = 7; i >= 0; i--) {
419 kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i));
420 }
421 kprintf("\n");
422 kprintf("IRR: 0x");
423 for (i = 7; i >= 0; i--) {
424 kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i));
425 }
426 kprintf("\n");
427 kprintf("ISR: 0x");
428 for (i = 7; i >= 0; i--) {
429 kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i));
430 }
431 kprintf("\n");
432 }
433
434 boolean_t
435 lapic_probe(void)
436 {
437 uint32_t lo;
438 uint32_t hi;
439
440 if (cpuid_features() & CPUID_FEATURE_APIC) {
441 return TRUE;
442 }
443
444 if (cpuid_family() == 6 || cpuid_family() == 15) {
445 /*
446 * Mobile Pentiums:
447 * There may be a local APIC which wasn't enabled by BIOS.
448 * So we try to enable it explicitly.
449 */
450 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
451 lo &= ~MSR_IA32_APIC_BASE_BASE;
452 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
453 lo |= MSR_IA32_APIC_BASE_ENABLE;
454 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
455
456 /*
457 * Re-initialize cpu features info and re-check.
458 */
459 cpuid_set_info();
460 /* We expect this codepath will never be traversed
461 * due to EFI enabling the APIC. Reducing the APIC
462 * interrupt base dynamically is not supported.
463 */
464 if (cpuid_features() & CPUID_FEATURE_APIC) {
465 printf("Local APIC discovered and enabled\n");
466 lapic_os_enabled = TRUE;
467 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
468 return TRUE;
469 }
470 }
471
472 return FALSE;
473 }
474
475 void
476 lapic_shutdown(void)
477 {
478 uint32_t lo;
479 uint32_t hi;
480 uint32_t value;
481
482 /* Shutdown if local APIC was enabled by OS */
483 if (lapic_os_enabled == FALSE) {
484 return;
485 }
486
487 mp_disable_preemption();
488
489 /* ExtINT: masked */
490 if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
491 value = LAPIC_READ(LVT_LINT0);
492 value |= LAPIC_LVT_MASKED;
493 LAPIC_WRITE(LVT_LINT0, value);
494 }
495
496 /* Error: masked */
497 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
498
499 /* Timer: masked */
500 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
501
502 /* Perfmon: masked */
503 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
504
505 /* APIC software disabled */
506 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
507
508 /* Bypass the APIC completely and update cpu features */
509 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
510 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
511 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
512 cpuid_set_info();
513
514 mp_enable_preemption();
515 }
516
517 boolean_t
518 cpu_can_exit(int cpu)
519 {
520 return cpu > lapic_max_interrupt_cpunum;
521 }
522
523 void
524 lapic_configure(void)
525 {
526 int value;
527
528 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
529 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
530 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
531 lapic_dont_panic = FALSE;
532 }
533 }
534
535 if (cpu_number() == 0) {
536 if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) {
537 lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0);
538 }
539 }
540
541 /* Accept all */
542 LAPIC_WRITE(TPR, 0);
543
544 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
545
546 /* ExtINT */
547 if (get_cpu_number() <= lapic_max_interrupt_cpunum) {
548 value = LAPIC_READ(LVT_LINT0);
549 value &= ~LAPIC_LVT_MASKED;
550 value |= LAPIC_LVT_DM_EXTINT;
551 LAPIC_WRITE(LVT_LINT0, value);
552 }
553
554 /* Timer: unmasked, one-shot */
555 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
556
557 /* Perfmon: unmasked */
558 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
559
560 /* Thermal: unmasked */
561 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
562
563 #if CONFIG_MCA
564 /* CMCI, if available */
565 if (mca_is_cmci_present()) {
566 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
567 }
568 #endif
569
570 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
571 (cpu_number() != master_cpu)) {
572 lapic_esr_clear();
573 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
574 }
575 }
576
577 void
578 lapic_set_timer(
579 boolean_t interrupt_unmasked,
580 lapic_timer_mode_t mode,
581 lapic_timer_divide_t divisor,
582 lapic_timer_count_t initial_count)
583 {
584 uint32_t timer_vector;
585
586 mp_disable_preemption();
587 timer_vector = LAPIC_READ(LVT_TIMER);
588 timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);;
589 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
590 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
591 LAPIC_WRITE(LVT_TIMER, timer_vector);
592 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
593 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
594 mp_enable_preemption();
595 }
596
597 void
598 lapic_config_timer(
599 boolean_t interrupt_unmasked,
600 lapic_timer_mode_t mode,
601 lapic_timer_divide_t divisor)
602 {
603 uint32_t timer_vector;
604
605 mp_disable_preemption();
606 timer_vector = LAPIC_READ(LVT_TIMER);
607 timer_vector &= ~(LAPIC_LVT_MASKED |
608 LAPIC_LVT_PERIODIC |
609 LAPIC_LVT_TSC_DEADLINE);
610 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
611 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
612 LAPIC_WRITE(LVT_TIMER, timer_vector);
613 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
614 mp_enable_preemption();
615 }
616
617 /*
618 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
619 */
620 void
621 lapic_config_tsc_deadline_timer(void)
622 {
623 uint32_t timer_vector;
624
625 DBG("lapic_config_tsc_deadline_timer()\n");
626 mp_disable_preemption();
627 timer_vector = LAPIC_READ(LVT_TIMER);
628 timer_vector &= ~(LAPIC_LVT_MASKED |
629 LAPIC_LVT_PERIODIC);
630 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
631 LAPIC_WRITE(LVT_TIMER, timer_vector);
632
633 /* Serialize writes per Intel OSWG */
634 do {
635 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32));
636 } while (lapic_get_tsc_deadline_timer() == 0);
637 lapic_set_tsc_deadline_timer(0);
638
639 mp_enable_preemption();
640 DBG("lapic_config_tsc_deadline_timer() done\n");
641 }
642
643 void
644 lapic_set_timer_fast(
645 lapic_timer_count_t initial_count)
646 {
647 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
648 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
649 }
650
651 void
652 lapic_set_tsc_deadline_timer(uint64_t deadline)
653 {
654 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
655 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
656 }
657
658 uint64_t
659 lapic_get_tsc_deadline_timer(void)
660 {
661 return rdmsr64(MSR_IA32_TSC_DEADLINE);
662 }
663
664 void
665 lapic_get_timer(
666 lapic_timer_mode_t *mode,
667 lapic_timer_divide_t *divisor,
668 lapic_timer_count_t *initial_count,
669 lapic_timer_count_t *current_count)
670 {
671 mp_disable_preemption();
672 if (mode) {
673 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
674 periodic : one_shot;
675 }
676 if (divisor) {
677 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
678 }
679 if (initial_count) {
680 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
681 }
682 if (current_count) {
683 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
684 }
685 mp_enable_preemption();
686 }
687
688 static inline void
689 _lapic_end_of_interrupt(void)
690 {
691 LAPIC_WRITE(EOI, 0);
692 }
693
694 void
695 lapic_end_of_interrupt(void)
696 {
697 _lapic_end_of_interrupt();
698 }
699
700 void
701 lapic_unmask_perfcnt_interrupt(void)
702 {
703 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
704 }
705
706 void
707 lapic_set_perfcnt_interrupt_mask(boolean_t mask)
708 {
709 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
710 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
711 }
712
713 void
714 lapic_set_intr_func(int vector, i386_intr_func_t func)
715 {
716 if (vector > lapic_interrupt_base) {
717 vector -= lapic_interrupt_base;
718 }
719
720 switch (vector) {
721 case LAPIC_NMI_INTERRUPT:
722 case LAPIC_INTERPROCESSOR_INTERRUPT:
723 case LAPIC_TIMER_INTERRUPT:
724 case LAPIC_THERMAL_INTERRUPT:
725 case LAPIC_PERFCNT_INTERRUPT:
726 case LAPIC_CMCI_INTERRUPT:
727 case LAPIC_PM_INTERRUPT:
728 lapic_intr_func[vector] = func;
729 break;
730 default:
731 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
732 vector, func);
733 }
734 }
735
736 void
737 lapic_set_pmi_func(i386_intr_func_t func)
738 {
739 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
740 }
741
742 int
743 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
744 {
745 int retval = 0;
746 int esr = -1;
747
748 interrupt_num -= lapic_interrupt_base;
749 if (interrupt_num < 0) {
750 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
751 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
752 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
753 return retval;
754 } else {
755 return 0;
756 }
757 }
758
759 switch (interrupt_num) {
760 case LAPIC_TIMER_INTERRUPT:
761 case LAPIC_THERMAL_INTERRUPT:
762 case LAPIC_INTERPROCESSOR_INTERRUPT:
763 case LAPIC_PM_INTERRUPT:
764 if (lapic_intr_func[interrupt_num] != NULL) {
765 (void) (*lapic_intr_func[interrupt_num])(state);
766 }
767 _lapic_end_of_interrupt();
768 retval = 1;
769 break;
770 case LAPIC_PERFCNT_INTERRUPT:
771 /* If a function has been registered, invoke it. Otherwise,
772 * pass up to IOKit.
773 */
774 if (lapic_intr_func[interrupt_num] != NULL) {
775 (void) (*lapic_intr_func[interrupt_num])(state);
776 /* Unmask the interrupt since we don't expect legacy users
777 * to be responsible for it.
778 */
779 lapic_unmask_perfcnt_interrupt();
780 _lapic_end_of_interrupt();
781 retval = 1;
782 }
783 break;
784 case LAPIC_CMCI_INTERRUPT:
785 if (lapic_intr_func[interrupt_num] != NULL) {
786 (void) (*lapic_intr_func[interrupt_num])(state);
787 }
788 /* return 0 for plaform expert to handle */
789 break;
790 case LAPIC_ERROR_INTERRUPT:
791 /* We treat error interrupts on APs as fatal.
792 * The current interrupt steering scheme directs most
793 * external interrupts to the BSP (HPET interrupts being
794 * a notable exception); hence, such an error
795 * on an AP may signify LVT corruption (with "may" being
796 * the operative word). On the BSP, we adopt a more
797 * lenient approach, in the interests of enhancing
798 * debuggability and reducing fragility.
799 * If "lapic_error_count_threshold" error interrupts
800 * occur within "lapic_error_time_threshold" absolute
801 * time units, we mask the error vector and log. The
802 * error interrupts themselves are likely
803 * side effects of issues which are beyond the purview of
804 * the local APIC interrupt handler, however. The Error
805 * Status Register value (the illegal destination
806 * vector code is one observed in practice) indicates
807 * the immediate cause of the error.
808 */
809 esr = lapic_esr_read();
810 lapic_dump();
811
812 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
813 cpu_number() != master_cpu) {
814 panic("Local APIC error, ESR: %d\n", esr);
815 }
816
817 if (cpu_number() == master_cpu) {
818 uint64_t abstime = mach_absolute_time();
819 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
820 if (lapic_master_error_count++ > lapic_error_count_threshold) {
821 lapic_errors_masked = TRUE;
822 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
823 printf("Local APIC: errors masked\n");
824 }
825 } else {
826 lapic_last_master_error = abstime;
827 lapic_master_error_count = 0;
828 }
829 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
830 }
831
832 _lapic_end_of_interrupt();
833 retval = 1;
834 break;
835 case LAPIC_SPURIOUS_INTERRUPT:
836 kprintf("SPIV\n");
837 /* No EOI required here */
838 retval = 1;
839 break;
840 case LAPIC_PMC_SW_INTERRUPT:
841 {
842 }
843 break;
844 case LAPIC_KICK_INTERRUPT:
845 _lapic_end_of_interrupt();
846 retval = 1;
847 break;
848 }
849
850 return retval;
851 }
852
853 void
854 lapic_smm_restore(void)
855 {
856 boolean_t state;
857
858 if (lapic_os_enabled == FALSE) {
859 return;
860 }
861
862 state = ml_set_interrupts_enabled(FALSE);
863
864 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
865 /*
866 * Bogus SMI handler enables interrupts but does not know about
867 * local APIC interrupt sources. When APIC timer counts down to
868 * zero while in SMM, local APIC will end up waiting for an EOI
869 * but no interrupt was delivered to the OS.
870 */
871 _lapic_end_of_interrupt();
872
873 /*
874 * timer is one-shot, trigger another quick countdown to trigger
875 * another timer interrupt.
876 */
877 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
878 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
879 }
880
881 kprintf("lapic_smm_restore\n");
882 }
883
884 ml_set_interrupts_enabled(state);
885 }
886
887 void
888 lapic_send_ipi(int cpu, int vector)
889 {
890 boolean_t state;
891
892 if (vector < lapic_interrupt_base) {
893 vector += lapic_interrupt_base;
894 }
895
896 state = ml_set_interrupts_enabled(FALSE);
897
898 /* Wait for pending outgoing send to complete */
899 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
900 cpu_pause();
901 }
902
903 LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
904
905 (void) ml_set_interrupts_enabled(state);
906 }
907
908 /*
909 * The following interfaces are privately exported to AICPM.
910 */
911
912 boolean_t
913 lapic_is_interrupt_pending(void)
914 {
915 int i;
916
917 for (i = 0; i < 8; i += 1) {
918 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
919 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) {
920 return TRUE;
921 }
922 }
923
924 return FALSE;
925 }
926
927 boolean_t
928 lapic_is_interrupting(uint8_t vector)
929 {
930 int i;
931 int bit;
932 uint32_t irr;
933 uint32_t isr;
934
935 i = vector / 32;
936 bit = 1 << (vector % 32);
937
938 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
939 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
940
941 if ((irr | isr) & bit) {
942 return TRUE;
943 }
944
945 return FALSE;
946 }
947
948 void
949 lapic_interrupt_counts(uint64_t intrs[256])
950 {
951 int i;
952 int j;
953 int bit;
954 uint32_t irr;
955 uint32_t isr;
956
957 if (intrs == NULL) {
958 return;
959 }
960
961 for (i = 0; i < 8; i += 1) {
962 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
963 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
964
965 if ((isr | irr) == 0) {
966 continue;
967 }
968
969 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
970 bit = (32 * i) + j;
971 if ((isr | irr) & (1 << j)) {
972 intrs[bit] += 1;
973 }
974 }
975 }
976 }
977
978 void
979 lapic_disable_timer(void)
980 {
981 uint32_t lvt_timer;
982
983 /*
984 * If we're in deadline timer mode,
985 * simply clear the deadline timer, otherwise
986 * mask the timer interrupt and clear the countdown.
987 */
988 lvt_timer = LAPIC_READ(LVT_TIMER);
989 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
990 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
991 } else {
992 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
993 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
994 lvt_timer = LAPIC_READ(LVT_TIMER);
995 }
996 }
997
998 /* SPI returning the CMCI vector */
999 uint8_t
1000 lapic_get_cmci_vector(void)
1001 {
1002 uint8_t cmci_vector = 0;
1003 #if CONFIG_MCA
1004 /* CMCI, if available */
1005 if (mca_is_cmci_present()) {
1006 cmci_vector = LAPIC_VECTOR(CMCI);
1007 }
1008 #endif
1009 return cmci_vector;
1010 }
1011
1012 #if DEVELOPMENT || DEBUG
1013 extern void lapic_trigger_MC(void);
1014 void
1015 lapic_trigger_MC(void)
1016 {
1017 /* A 64-bit access to any register will do it. */
1018 volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
1019 dummy++;
1020 }
1021 #endif