]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/lapic_native.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
CommitLineData
6d2010ae 1/*
316670eb 2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
6d2010ae
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#include <mach/mach_types.h>
33#include <mach/kern_return.h>
34
35#include <kern/kern_types.h>
36#include <kern/cpu_number.h>
37#include <kern/cpu_data.h>
38#include <kern/assert.h>
39#include <kern/machine.h>
40#include <kern/debug.h>
41
42#include <vm/vm_map.h>
43#include <vm/vm_kern.h>
44
45#include <i386/lapic.h>
46#include <i386/cpuid.h>
47#include <i386/proc_reg.h>
48#include <i386/machine_cpu.h>
49#include <i386/misc_protos.h>
50#include <i386/mp.h>
51#include <i386/postcode.h>
52#include <i386/cpu_threads.h>
53#include <i386/machine_routines.h>
54#include <i386/tsc.h>
55#if CONFIG_MCA
56#include <i386/machine_check.h>
57#endif
58
6d2010ae
A
59#include <sys/kdebug.h>
60
61#if MP_DEBUG
62#define PAUSE delay(1000000)
63#define DBG(x...) kprintf(x)
64#else
65#define DBG(x...)
66#define PAUSE
67#endif /* MP_DEBUG */
68
69lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
70
71static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
72static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
73
74static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
75
76/* TRUE if local APIC was enabled by the OS not by the BIOS */
77static boolean_t lapic_os_enabled = FALSE;
78
79static boolean_t lapic_errors_masked = FALSE;
80static uint64_t lapic_last_master_error = 0;
81static uint64_t lapic_error_time_threshold = 0;
82static unsigned lapic_master_error_count = 0;
83static unsigned lapic_error_count_threshold = 5;
84static boolean_t lapic_dont_panic = FALSE;
85
86#ifdef MP_DEBUG
87void
88lapic_cpu_map_dump(void)
89{
90 int i;
91
92 for (i = 0; i < MAX_CPUS; i++) {
93 if (cpu_to_lapic[i] == -1)
94 continue;
95 kprintf("cpu_to_lapic[%d]: %d\n",
96 i, cpu_to_lapic[i]);
97 }
98 for (i = 0; i < MAX_LAPICIDS; i++) {
99 if (lapic_to_cpu[i] == -1)
100 continue;
101 kprintf("lapic_to_cpu[%d]: %d\n",
102 i, lapic_to_cpu[i]);
103 }
104}
105#endif /* MP_DEBUG */
106
107static void
108legacy_init(void)
109{
110 int result;
111 vm_map_entry_t entry;
112 vm_map_offset_t lapic_vbase64;
113 /* Establish a map to the local apic */
114
bd504ef0
A
115 if (lapic_vbase == 0) {
116 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
117 result = vm_map_find_space(kernel_map,
118 &lapic_vbase64,
119 round_page(LAPIC_SIZE), 0,
3e170ce0 120 VM_MAKE_TAG(VM_KERN_MEMORY_IOKIT), &entry);
bd504ef0
A
121 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
122 */
123 lapic_vbase = (vm_offset_t) lapic_vbase64;
124 if (result != KERN_SUCCESS) {
125 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
126 }
127 vm_map_unlock(kernel_map);
128
129 /*
130 * Map in the local APIC non-cacheable, as recommended by Intel
131 * in section 8.4.1 of the "System Programming Guide".
132 * In fact, this is redundant because EFI will have assigned an
133 * MTRR physical range containing the local APIC's MMIO space as
134 * UC and this will override the default PAT setting.
135 */
136 pmap_enter(pmap_kernel(),
137 lapic_vbase,
138 (ppnum_t) i386_btop(lapic_pbase),
139 VM_PROT_READ|VM_PROT_WRITE,
140 VM_PROT_NONE,
141 VM_WIMG_IO,
142 TRUE);
6d2010ae 143 }
316670eb
A
144
145 /*
bd504ef0
A
146 * Set flat delivery model, logical processor id
147 * This should already be the default set.
316670eb 148 */
bd504ef0
A
149 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
150 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
6d2010ae
A
151}
152
153
154static uint32_t
155legacy_read(lapic_register_t reg)
156{
157 return *LAPIC_MMIO(reg);
158}
159
160static void
161legacy_write(lapic_register_t reg, uint32_t value)
162{
163 *LAPIC_MMIO(reg) = value;
164}
165
bd504ef0
A
166static uint64_t
167legacy_read_icr(void)
168{
169 return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
170}
171
172static void
173legacy_write_icr(uint32_t dst, uint32_t cmd)
174{
175 *LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
176 *LAPIC_MMIO(ICR) = cmd;
177}
178
6d2010ae
A
179static lapic_ops_table_t legacy_ops = {
180 legacy_init,
181 legacy_read,
bd504ef0
A
182 legacy_write,
183 legacy_read_icr,
184 legacy_write_icr
6d2010ae
A
185};
186
bd504ef0
A
187static boolean_t is_x2apic = FALSE;
188
6d2010ae
A
189static void
190x2apic_init(void)
191{
bd504ef0
A
192 uint32_t lo;
193 uint32_t hi;
194
195 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
196 if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
197 lo |= MSR_IA32_APIC_BASE_EXTENDED;
198 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
199 kprintf("x2APIC mode enabled\n");
200 }
6d2010ae
A
201}
202
203static uint32_t
204x2apic_read(lapic_register_t reg)
205{
206 uint32_t lo;
207 uint32_t hi;
208
209 rdmsr(LAPIC_MSR(reg), lo, hi);
210 return lo;
211}
212
213static void
214x2apic_write(lapic_register_t reg, uint32_t value)
215{
216 wrmsr(LAPIC_MSR(reg), value, 0);
217}
218
bd504ef0
A
219static uint64_t
220x2apic_read_icr(void)
221{
222 return rdmsr64(LAPIC_MSR(ICR));;
223}
224
225static void
226x2apic_write_icr(uint32_t dst, uint32_t cmd)
227{
228 wrmsr(LAPIC_MSR(ICR), cmd, dst);
229}
230
6d2010ae
A
231static lapic_ops_table_t x2apic_ops = {
232 x2apic_init,
233 x2apic_read,
bd504ef0
A
234 x2apic_write,
235 x2apic_read_icr,
236 x2apic_write_icr
6d2010ae
A
237};
238
6d2010ae
A
239void
240lapic_init(void)
241{
242 uint32_t lo;
243 uint32_t hi;
244 boolean_t is_boot_processor;
245 boolean_t is_lapic_enabled;
6d2010ae
A
246
247 /* Examine the local APIC state */
248 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
249 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
250 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
251 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
252 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
253 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
254 is_lapic_enabled ? "enabled" : "disabled",
255 is_x2apic ? "extended" : "legacy",
256 is_boot_processor ? "BSP" : "AP");
257 if (!is_boot_processor || !is_lapic_enabled)
258 panic("Unexpected local APIC state\n");
259
bd504ef0
A
260 /*
261 * If x2APIC is available and not already enabled, enable it.
262 * Unless overriden by boot-arg.
263 */
264 if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
265 PE_parse_boot_argn("-x2apic", &is_x2apic, sizeof(is_x2apic));
266 kprintf("x2APIC supported %s be enabled\n",
267 is_x2apic ? "and will" : "but will not");
268 }
269
6d2010ae
A
270 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
271
bd504ef0 272 LAPIC_INIT();
6d2010ae 273
bd504ef0 274 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
6d2010ae
A
275 if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
276 panic("Local APIC version 0x%x, 0x14 or more expected\n",
277 (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
278 }
279
280 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
281 lapic_cpu_map_init();
282 lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
39236c6e 283 current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0];
6d2010ae
A
284 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
285}
286
287
288static int
289lapic_esr_read(void)
290{
291 /* write-read register */
292 LAPIC_WRITE(ERROR_STATUS, 0);
293 return LAPIC_READ(ERROR_STATUS);
294}
295
296static void
297lapic_esr_clear(void)
298{
299 LAPIC_WRITE(ERROR_STATUS, 0);
300 LAPIC_WRITE(ERROR_STATUS, 0);
301}
302
303static const char *DM_str[8] = {
304 "Fixed",
305 "Lowest Priority",
306 "Invalid",
307 "Invalid",
308 "NMI",
309 "Reset",
310 "Invalid",
311 "ExtINT"};
312
313static const char *TMR_str[] = {
314 "OneShot",
315 "Periodic",
316 "TSC-Deadline",
317 "Illegal"
6d2010ae
A
318};
319
320void
321lapic_dump(void)
322{
323 int i;
324
325#define BOOL(a) ((a)?' ':'!')
326#define VEC(lvt) \
327 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
328#define DS(lvt) \
329 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
330#define DM(lvt) \
331 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
332#define MASK(lvt) \
333 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
334#define TM(lvt) \
335 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
336#define IP(lvt) \
337 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
338
339 kprintf("LAPIC %d at %p version 0x%x\n",
340 (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
341 (void *) lapic_vbase,
342 LAPIC_READ(VERSION)&LAPIC_VERSION_MASK);
343 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
344 LAPIC_READ(TPR)&LAPIC_TPR_MASK,
345 LAPIC_READ(APR)&LAPIC_APR_MASK,
346 LAPIC_READ(PPR)&LAPIC_PPR_MASK);
347 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
bd504ef0 348 is_x2apic ? 0 : LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT,
6d2010ae
A
349 LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT);
350 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
351 BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE),
352 BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)),
353 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
354#if CONFIG_MCA
355 if (mca_is_cmci_present())
356 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
357 VEC(LVT_CMCI),
358 DM(LVT_CMCI),
359 DS(LVT_CMCI),
360 MASK(LVT_CMCI));
361#endif
362 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
363 VEC(LVT_TIMER),
364 DS(LVT_TIMER),
365 MASK(LVT_TIMER),
366 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
367 & LAPIC_LVT_TMR_MASK]);
368 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
369 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
370 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
371 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
372 VEC(LVT_PERFCNT),
373 DM(LVT_PERFCNT),
374 DS(LVT_PERFCNT),
375 MASK(LVT_PERFCNT));
376 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
377 VEC(LVT_THERMAL),
378 DM(LVT_THERMAL),
379 DS(LVT_THERMAL),
380 MASK(LVT_THERMAL));
381 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
382 VEC(LVT_LINT0),
383 DM(LVT_LINT0),
384 TM(LVT_LINT0),
385 IP(LVT_LINT0),
386 DS(LVT_LINT0),
387 MASK(LVT_LINT0));
388 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
389 VEC(LVT_LINT1),
390 DM(LVT_LINT1),
391 TM(LVT_LINT1),
392 IP(LVT_LINT1),
393 DS(LVT_LINT1),
394 MASK(LVT_LINT1));
395 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
396 VEC(LVT_ERROR),
397 DS(LVT_ERROR),
398 MASK(LVT_ERROR));
399 kprintf("ESR: %08x \n", lapic_esr_read());
400 kprintf(" ");
401 for(i=0xf; i>=0; i--)
402 kprintf("%x%x%x%x",i,i,i,i);
403 kprintf("\n");
404 kprintf("TMR: 0x");
405 for(i=7; i>=0; i--)
406 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i));
407 kprintf("\n");
408 kprintf("IRR: 0x");
409 for(i=7; i>=0; i--)
410 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i));
411 kprintf("\n");
412 kprintf("ISR: 0x");
413 for(i=7; i >= 0; i--)
414 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i));
415 kprintf("\n");
416}
417
6d2010ae
A
418boolean_t
419lapic_probe(void)
420{
421 uint32_t lo;
422 uint32_t hi;
423
424 if (cpuid_features() & CPUID_FEATURE_APIC)
425 return TRUE;
426
427 if (cpuid_family() == 6 || cpuid_family() == 15) {
428 /*
429 * Mobile Pentiums:
430 * There may be a local APIC which wasn't enabled by BIOS.
431 * So we try to enable it explicitly.
432 */
433 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
434 lo &= ~MSR_IA32_APIC_BASE_BASE;
435 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
436 lo |= MSR_IA32_APIC_BASE_ENABLE;
437 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
438
439 /*
440 * Re-initialize cpu features info and re-check.
441 */
442 cpuid_set_info();
39236c6e
A
443 /* We expect this codepath will never be traversed
444 * due to EFI enabling the APIC. Reducing the APIC
445 * interrupt base dynamically is not supported.
446 */
6d2010ae
A
447 if (cpuid_features() & CPUID_FEATURE_APIC) {
448 printf("Local APIC discovered and enabled\n");
449 lapic_os_enabled = TRUE;
450 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
451 return TRUE;
452 }
453 }
454
455 return FALSE;
456}
457
458void
459lapic_shutdown(void)
460{
461 uint32_t lo;
462 uint32_t hi;
463 uint32_t value;
464
465 /* Shutdown if local APIC was enabled by OS */
466 if (lapic_os_enabled == FALSE)
467 return;
468
469 mp_disable_preemption();
470
471 /* ExtINT: masked */
472 if (get_cpu_number() == master_cpu) {
473 value = LAPIC_READ(LVT_LINT0);
474 value |= LAPIC_LVT_MASKED;
475 LAPIC_WRITE(LVT_LINT0, value);
476 }
477
478 /* Error: masked */
479 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
480
481 /* Timer: masked */
482 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
483
484 /* Perfmon: masked */
485 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
486
487 /* APIC software disabled */
488 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
489
490 /* Bypass the APIC completely and update cpu features */
491 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
492 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
493 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
494 cpuid_set_info();
495
496 mp_enable_preemption();
497}
498
499void
500lapic_configure(void)
501{
502 int value;
503
504 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
505 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
506 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
507 lapic_dont_panic = FALSE;
508 }
509 }
510
6d2010ae
A
511 /* Accept all */
512 LAPIC_WRITE(TPR, 0);
513
514 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
515
516 /* ExtINT */
517 if (get_cpu_number() == master_cpu) {
518 value = LAPIC_READ(LVT_LINT0);
519 value &= ~LAPIC_LVT_MASKED;
520 value |= LAPIC_LVT_DM_EXTINT;
521 LAPIC_WRITE(LVT_LINT0, value);
522 }
523
524 /* Timer: unmasked, one-shot */
525 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
526
527 /* Perfmon: unmasked */
528 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
529
530 /* Thermal: unmasked */
531 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
532
533#if CONFIG_MCA
534 /* CMCI, if available */
535 if (mca_is_cmci_present())
536 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
537#endif
538
539 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
540 (cpu_number() != master_cpu)) {
541 lapic_esr_clear();
542 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
543 }
544}
545
546void
547lapic_set_timer(
548 boolean_t interrupt_unmasked,
549 lapic_timer_mode_t mode,
550 lapic_timer_divide_t divisor,
551 lapic_timer_count_t initial_count)
552{
553 uint32_t timer_vector;
554
555 mp_disable_preemption();
556 timer_vector = LAPIC_READ(LVT_TIMER);
557 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
558 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
559 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
560 LAPIC_WRITE(LVT_TIMER, timer_vector);
561 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
562 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
563 mp_enable_preemption();
564}
565
566void
567lapic_config_timer(
568 boolean_t interrupt_unmasked,
569 lapic_timer_mode_t mode,
570 lapic_timer_divide_t divisor)
571{
572 uint32_t timer_vector;
573
574 mp_disable_preemption();
575 timer_vector = LAPIC_READ(LVT_TIMER);
576 timer_vector &= ~(LAPIC_LVT_MASKED |
577 LAPIC_LVT_PERIODIC |
578 LAPIC_LVT_TSC_DEADLINE);
579 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
580 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
581 LAPIC_WRITE(LVT_TIMER, timer_vector);
582 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
583 mp_enable_preemption();
584}
585
586/*
587 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
588 */
6d2010ae
A
589void
590lapic_config_tsc_deadline_timer(void)
591{
592 uint32_t timer_vector;
593
594 DBG("lapic_config_tsc_deadline_timer()\n");
595 mp_disable_preemption();
596 timer_vector = LAPIC_READ(LVT_TIMER);
597 timer_vector &= ~(LAPIC_LVT_MASKED |
598 LAPIC_LVT_PERIODIC);
599 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
600 LAPIC_WRITE(LVT_TIMER, timer_vector);
601
602 /* Serialize writes per Intel OSWG */
603 do {
604 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
605 } while (lapic_get_tsc_deadline_timer() == 0);
606 lapic_set_tsc_deadline_timer(0);
607
608 mp_enable_preemption();
609 DBG("lapic_config_tsc_deadline_timer() done\n");
610}
611
612void
613lapic_set_timer_fast(
614 lapic_timer_count_t initial_count)
615{
616 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
617 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
618}
619
6d2010ae
A
620void
621lapic_set_tsc_deadline_timer(uint64_t deadline)
622{
623 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
624 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
625}
626
6d2010ae
A
627uint64_t
628lapic_get_tsc_deadline_timer(void)
629{
630 return rdmsr64(MSR_IA32_TSC_DEADLINE);
631}
632
633void
634lapic_get_timer(
635 lapic_timer_mode_t *mode,
636 lapic_timer_divide_t *divisor,
637 lapic_timer_count_t *initial_count,
638 lapic_timer_count_t *current_count)
639{
640 mp_disable_preemption();
641 if (mode)
642 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
643 periodic : one_shot;
644 if (divisor)
645 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
646 if (initial_count)
647 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
648 if (current_count)
649 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
650 mp_enable_preemption();
651}
652
653static inline void
654_lapic_end_of_interrupt(void)
655{
656 LAPIC_WRITE(EOI, 0);
657}
658
659void
660lapic_end_of_interrupt(void)
661{
662 _lapic_end_of_interrupt();
663}
664
665void lapic_unmask_perfcnt_interrupt(void) {
666 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
667}
668
669void lapic_set_perfcnt_interrupt_mask(boolean_t mask) {
670 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
671 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
672}
673
674void
675lapic_set_intr_func(int vector, i386_intr_func_t func)
676{
677 if (vector > lapic_interrupt_base)
678 vector -= lapic_interrupt_base;
679
680 switch (vector) {
681 case LAPIC_NMI_INTERRUPT:
682 case LAPIC_INTERPROCESSOR_INTERRUPT:
683 case LAPIC_TIMER_INTERRUPT:
684 case LAPIC_THERMAL_INTERRUPT:
685 case LAPIC_PERFCNT_INTERRUPT:
686 case LAPIC_CMCI_INTERRUPT:
687 case LAPIC_PM_INTERRUPT:
688 lapic_intr_func[vector] = func;
689 break;
690 default:
691 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
692 vector, func);
693 }
694}
695
696void lapic_set_pmi_func(i386_intr_func_t func) {
697 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
698}
699
700int
701lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
702{
703 int retval = 0;
704 int esr = -1;
705
706 interrupt_num -= lapic_interrupt_base;
707 if (interrupt_num < 0) {
708 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
709 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
710 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
711 return retval;
712 }
713 else
714 return 0;
715 }
716
717 switch(interrupt_num) {
718 case LAPIC_TIMER_INTERRUPT:
719 case LAPIC_THERMAL_INTERRUPT:
720 case LAPIC_INTERPROCESSOR_INTERRUPT:
721 case LAPIC_PM_INTERRUPT:
722 if (lapic_intr_func[interrupt_num] != NULL)
723 (void) (*lapic_intr_func[interrupt_num])(state);
724 _lapic_end_of_interrupt();
725 retval = 1;
726 break;
727 case LAPIC_PERFCNT_INTERRUPT:
728 /* If a function has been registered, invoke it. Otherwise,
729 * pass up to IOKit.
730 */
731 if (lapic_intr_func[interrupt_num] != NULL) {
732 (void) (*lapic_intr_func[interrupt_num])(state);
733 /* Unmask the interrupt since we don't expect legacy users
734 * to be responsible for it.
735 */
736 lapic_unmask_perfcnt_interrupt();
737 _lapic_end_of_interrupt();
738 retval = 1;
739 }
740 break;
741 case LAPIC_CMCI_INTERRUPT:
742 if (lapic_intr_func[interrupt_num] != NULL)
743 (void) (*lapic_intr_func[interrupt_num])(state);
744 /* return 0 for plaform expert to handle */
745 break;
746 case LAPIC_ERROR_INTERRUPT:
747 /* We treat error interrupts on APs as fatal.
748 * The current interrupt steering scheme directs most
749 * external interrupts to the BSP (HPET interrupts being
750 * a notable exception); hence, such an error
751 * on an AP may signify LVT corruption (with "may" being
752 * the operative word). On the BSP, we adopt a more
753 * lenient approach, in the interests of enhancing
754 * debuggability and reducing fragility.
755 * If "lapic_error_count_threshold" error interrupts
756 * occur within "lapic_error_time_threshold" absolute
757 * time units, we mask the error vector and log. The
758 * error interrupts themselves are likely
759 * side effects of issues which are beyond the purview of
760 * the local APIC interrupt handler, however. The Error
761 * Status Register value (the illegal destination
762 * vector code is one observed in practice) indicates
763 * the immediate cause of the error.
764 */
765 esr = lapic_esr_read();
766 lapic_dump();
767
768 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
769 cpu_number() != master_cpu) {
770 panic("Local APIC error, ESR: %d\n", esr);
771 }
772
773 if (cpu_number() == master_cpu) {
774 uint64_t abstime = mach_absolute_time();
775 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
776 if (lapic_master_error_count++ > lapic_error_count_threshold) {
777 lapic_errors_masked = TRUE;
778 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
779 printf("Local APIC: errors masked\n");
780 }
781 }
782 else {
783 lapic_last_master_error = abstime;
784 lapic_master_error_count = 0;
785 }
786 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
787 }
788
789 _lapic_end_of_interrupt();
790 retval = 1;
791 break;
792 case LAPIC_SPURIOUS_INTERRUPT:
793 kprintf("SPIV\n");
794 /* No EOI required here */
795 retval = 1;
796 break;
797 case LAPIC_PMC_SW_INTERRUPT:
798 {
6d2010ae
A
799 }
800 break;
fe8ab488
A
801 case LAPIC_KICK_INTERRUPT:
802 _lapic_end_of_interrupt();
803 retval = 1;
804 break;
6d2010ae
A
805 }
806
807 return retval;
808}
809
810void
811lapic_smm_restore(void)
812{
813 boolean_t state;
814
815 if (lapic_os_enabled == FALSE)
816 return;
817
818 state = ml_set_interrupts_enabled(FALSE);
819
820 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
821 /*
822 * Bogus SMI handler enables interrupts but does not know about
823 * local APIC interrupt sources. When APIC timer counts down to
824 * zero while in SMM, local APIC will end up waiting for an EOI
825 * but no interrupt was delivered to the OS.
826 */
827 _lapic_end_of_interrupt();
828
829 /*
830 * timer is one-shot, trigger another quick countdown to trigger
831 * another timer interrupt.
832 */
833 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
834 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
835 }
836
837 kprintf("lapic_smm_restore\n");
838 }
839
840 ml_set_interrupts_enabled(state);
841}
842
843void
844lapic_send_ipi(int cpu, int vector)
845{
846 boolean_t state;
847
848 if (vector < lapic_interrupt_base)
849 vector += lapic_interrupt_base;
850
851 state = ml_set_interrupts_enabled(FALSE);
852
853 /* Wait for pending outgoing send to complete */
bd504ef0 854 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
6d2010ae
A
855 cpu_pause();
856 }
857
bd504ef0 858 LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
6d2010ae
A
859
860 (void) ml_set_interrupts_enabled(state);
861}
862
863/*
864 * The following interfaces are privately exported to AICPM.
865 */
866
867boolean_t
868lapic_is_interrupt_pending(void)
869{
870 int i;
871
872 for (i = 0; i < 8; i += 1) {
873 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
874 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0))
875 return (TRUE);
876 }
877
878 return (FALSE);
879}
880
881boolean_t
882lapic_is_interrupting(uint8_t vector)
883{
884 int i;
885 int bit;
886 uint32_t irr;
887 uint32_t isr;
888
889 i = vector / 32;
890 bit = 1 << (vector % 32);
891
892 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
893 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
894
895 if ((irr | isr) & bit)
896 return (TRUE);
897
898 return (FALSE);
899}
900
901void
902lapic_interrupt_counts(uint64_t intrs[256])
903{
904 int i;
905 int j;
906 int bit;
907 uint32_t irr;
908 uint32_t isr;
909
910 if (intrs == NULL)
911 return;
912
913 for (i = 0; i < 8; i += 1) {
914 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
915 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
916
917 if ((isr | irr) == 0)
918 continue;
919
920 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
921 bit = (32 * i) + j;
922 if ((isr | irr) & (1 << j))
923 intrs[bit] += 1;
924 }
925 }
926}
927
928void
929lapic_disable_timer(void)
930{
931 uint32_t lvt_timer;
932
933 /*
934 * If we're in deadline timer mode,
935 * simply clear the deadline timer, otherwise
936 * mask the timer interrupt and clear the countdown.
937 */
938 lvt_timer = LAPIC_READ(LVT_TIMER);
939 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
940 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
941 } else {
942 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
943 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
944 lvt_timer = LAPIC_READ(LVT_TIMER);
945 }
946}
316670eb 947
15129b1c
A
948/* SPI returning the CMCI vector */
949uint8_t
950lapic_get_cmci_vector(void)
951{
952 uint8_t cmci_vector = 0;
953#if CONFIG_MCA
954 /* CMCI, if available */
955 if (mca_is_cmci_present())
956 cmci_vector = LAPIC_VECTOR(CMCI);
957#endif
958 return cmci_vector;
959}
960
39037602 961#if DEVELOPMENT || DEBUG
15129b1c
A
962extern void lapic_trigger_MC(void);
963void
964lapic_trigger_MC(void)
965{
966 /* A 64-bit access to any register will do it. */
3e170ce0 967 volatile uint64_t dummy = *(volatile uint64_t *) (volatile void *) LAPIC_MMIO(ID);
15129b1c
A
968 dummy++;
969}
970#endif