]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lapic_native.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #if CONFIG_MCA
56 #include <i386/machine_check.h>
57 #endif
58
59 #if CONFIG_COUNTERS
60 #include <pmc/pmc.h>
61 #endif
62
63 #include <sys/kdebug.h>
64
65 #if MP_DEBUG
66 #define PAUSE delay(1000000)
67 #define DBG(x...) kprintf(x)
68 #else
69 #define DBG(x...)
70 #define PAUSE
71 #endif /* MP_DEBUG */
72
73 lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
74
75 static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
76 static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
77
78 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
79
80 /* TRUE if local APIC was enabled by the OS not by the BIOS */
81 static boolean_t lapic_os_enabled = FALSE;
82
83 static boolean_t lapic_errors_masked = FALSE;
84 static uint64_t lapic_last_master_error = 0;
85 static uint64_t lapic_error_time_threshold = 0;
86 static unsigned lapic_master_error_count = 0;
87 static unsigned lapic_error_count_threshold = 5;
88 static boolean_t lapic_dont_panic = FALSE;
89
90 #ifdef MP_DEBUG
91 void
92 lapic_cpu_map_dump(void)
93 {
94 int i;
95
96 for (i = 0; i < MAX_CPUS; i++) {
97 if (cpu_to_lapic[i] == -1)
98 continue;
99 kprintf("cpu_to_lapic[%d]: %d\n",
100 i, cpu_to_lapic[i]);
101 }
102 for (i = 0; i < MAX_LAPICIDS; i++) {
103 if (lapic_to_cpu[i] == -1)
104 continue;
105 kprintf("lapic_to_cpu[%d]: %d\n",
106 i, lapic_to_cpu[i]);
107 }
108 }
109 #endif /* MP_DEBUG */
110
111 static void
112 legacy_init(void)
113 {
114 int result;
115 vm_map_entry_t entry;
116 vm_map_offset_t lapic_vbase64;
117 /* Establish a map to the local apic */
118
119 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
120 result = vm_map_find_space(kernel_map,
121 &lapic_vbase64,
122 round_page(LAPIC_SIZE), 0,
123 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
124 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
125 */
126 lapic_vbase = (vm_offset_t) lapic_vbase64;
127 if (result != KERN_SUCCESS) {
128 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
129 }
130 vm_map_unlock(kernel_map);
131
132 /*
133 * Map in the local APIC non-cacheable, as recommended by Intel
134 * in section 8.4.1 of the "System Programming Guide".
135 * In fact, this is redundant because EFI will have assigned an
136 * MTRR physical range containing the local APIC's MMIO space as
137 * UC and this will override the default PAT setting.
138 */
139 pmap_enter(pmap_kernel(),
140 lapic_vbase,
141 (ppnum_t) i386_btop(lapic_pbase),
142 VM_PROT_READ|VM_PROT_WRITE,
143 VM_PROT_NONE,
144 VM_WIMG_IO,
145 TRUE);
146 }
147
148
149 static uint32_t
150 legacy_read(lapic_register_t reg)
151 {
152 return *LAPIC_MMIO(reg);
153 }
154
155 static void
156 legacy_write(lapic_register_t reg, uint32_t value)
157 {
158 *LAPIC_MMIO(reg) = value;
159 }
160
161 static lapic_ops_table_t legacy_ops = {
162 legacy_init,
163 legacy_read,
164 legacy_write
165 };
166
167 static void
168 x2apic_init(void)
169 {
170 }
171
172 static uint32_t
173 x2apic_read(lapic_register_t reg)
174 {
175 uint32_t lo;
176 uint32_t hi;
177
178 rdmsr(LAPIC_MSR(reg), lo, hi);
179 return lo;
180 }
181
182 static void
183 x2apic_write(lapic_register_t reg, uint32_t value)
184 {
185 wrmsr(LAPIC_MSR(reg), value, 0);
186 }
187
188 static lapic_ops_table_t x2apic_ops = {
189 x2apic_init,
190 x2apic_read,
191 x2apic_write
192 };
193
194
195 void
196 lapic_init(void)
197 {
198 uint32_t lo;
199 uint32_t hi;
200 boolean_t is_boot_processor;
201 boolean_t is_lapic_enabled;
202 boolean_t is_x2apic;
203
204 /* Examine the local APIC state */
205 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
206 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
207 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
208 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
209 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
210 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
211 is_lapic_enabled ? "enabled" : "disabled",
212 is_x2apic ? "extended" : "legacy",
213 is_boot_processor ? "BSP" : "AP");
214 if (!is_boot_processor || !is_lapic_enabled)
215 panic("Unexpected local APIC state\n");
216
217 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
218
219 lapic_ops->init();
220
221 if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
222 panic("Local APIC version 0x%x, 0x14 or more expected\n",
223 (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
224 }
225
226 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
227 lapic_cpu_map_init();
228 lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
229 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
230 }
231
232
233 static int
234 lapic_esr_read(void)
235 {
236 /* write-read register */
237 LAPIC_WRITE(ERROR_STATUS, 0);
238 return LAPIC_READ(ERROR_STATUS);
239 }
240
241 static void
242 lapic_esr_clear(void)
243 {
244 LAPIC_WRITE(ERROR_STATUS, 0);
245 LAPIC_WRITE(ERROR_STATUS, 0);
246 }
247
248 static const char *DM_str[8] = {
249 "Fixed",
250 "Lowest Priority",
251 "Invalid",
252 "Invalid",
253 "NMI",
254 "Reset",
255 "Invalid",
256 "ExtINT"};
257
258 static const char *TMR_str[] = {
259 "OneShot",
260 "Periodic",
261 "TSC-Deadline",
262 "Illegal"
263 };
264
265 void
266 lapic_dump(void)
267 {
268 int i;
269
270 #define BOOL(a) ((a)?' ':'!')
271 #define VEC(lvt) \
272 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
273 #define DS(lvt) \
274 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
275 #define DM(lvt) \
276 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
277 #define MASK(lvt) \
278 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
279 #define TM(lvt) \
280 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
281 #define IP(lvt) \
282 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
283
284 kprintf("LAPIC %d at %p version 0x%x\n",
285 (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
286 (void *) lapic_vbase,
287 LAPIC_READ(VERSION)&LAPIC_VERSION_MASK);
288 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
289 LAPIC_READ(TPR)&LAPIC_TPR_MASK,
290 LAPIC_READ(APR)&LAPIC_APR_MASK,
291 LAPIC_READ(PPR)&LAPIC_PPR_MASK);
292 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
293 LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT,
294 LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT);
295 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
296 BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE),
297 BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)),
298 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
299 #if CONFIG_MCA
300 if (mca_is_cmci_present())
301 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
302 VEC(LVT_CMCI),
303 DM(LVT_CMCI),
304 DS(LVT_CMCI),
305 MASK(LVT_CMCI));
306 #endif
307 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
308 VEC(LVT_TIMER),
309 DS(LVT_TIMER),
310 MASK(LVT_TIMER),
311 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
312 & LAPIC_LVT_TMR_MASK]);
313 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
314 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
315 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
316 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
317 VEC(LVT_PERFCNT),
318 DM(LVT_PERFCNT),
319 DS(LVT_PERFCNT),
320 MASK(LVT_PERFCNT));
321 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
322 VEC(LVT_THERMAL),
323 DM(LVT_THERMAL),
324 DS(LVT_THERMAL),
325 MASK(LVT_THERMAL));
326 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
327 VEC(LVT_LINT0),
328 DM(LVT_LINT0),
329 TM(LVT_LINT0),
330 IP(LVT_LINT0),
331 DS(LVT_LINT0),
332 MASK(LVT_LINT0));
333 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
334 VEC(LVT_LINT1),
335 DM(LVT_LINT1),
336 TM(LVT_LINT1),
337 IP(LVT_LINT1),
338 DS(LVT_LINT1),
339 MASK(LVT_LINT1));
340 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
341 VEC(LVT_ERROR),
342 DS(LVT_ERROR),
343 MASK(LVT_ERROR));
344 kprintf("ESR: %08x \n", lapic_esr_read());
345 kprintf(" ");
346 for(i=0xf; i>=0; i--)
347 kprintf("%x%x%x%x",i,i,i,i);
348 kprintf("\n");
349 kprintf("TMR: 0x");
350 for(i=7; i>=0; i--)
351 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i));
352 kprintf("\n");
353 kprintf("IRR: 0x");
354 for(i=7; i>=0; i--)
355 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i));
356 kprintf("\n");
357 kprintf("ISR: 0x");
358 for(i=7; i >= 0; i--)
359 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i));
360 kprintf("\n");
361 }
362
363 boolean_t
364 lapic_probe(void)
365 {
366 uint32_t lo;
367 uint32_t hi;
368
369 if (cpuid_features() & CPUID_FEATURE_APIC)
370 return TRUE;
371
372 if (cpuid_family() == 6 || cpuid_family() == 15) {
373 /*
374 * Mobile Pentiums:
375 * There may be a local APIC which wasn't enabled by BIOS.
376 * So we try to enable it explicitly.
377 */
378 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
379 lo &= ~MSR_IA32_APIC_BASE_BASE;
380 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
381 lo |= MSR_IA32_APIC_BASE_ENABLE;
382 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
383
384 /*
385 * Re-initialize cpu features info and re-check.
386 */
387 cpuid_set_info();
388 if (cpuid_features() & CPUID_FEATURE_APIC) {
389 printf("Local APIC discovered and enabled\n");
390 lapic_os_enabled = TRUE;
391 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
392 return TRUE;
393 }
394 }
395
396 return FALSE;
397 }
398
399 void
400 lapic_shutdown(void)
401 {
402 uint32_t lo;
403 uint32_t hi;
404 uint32_t value;
405
406 /* Shutdown if local APIC was enabled by OS */
407 if (lapic_os_enabled == FALSE)
408 return;
409
410 mp_disable_preemption();
411
412 /* ExtINT: masked */
413 if (get_cpu_number() == master_cpu) {
414 value = LAPIC_READ(LVT_LINT0);
415 value |= LAPIC_LVT_MASKED;
416 LAPIC_WRITE(LVT_LINT0, value);
417 }
418
419 /* Error: masked */
420 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
421
422 /* Timer: masked */
423 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
424
425 /* Perfmon: masked */
426 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
427
428 /* APIC software disabled */
429 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
430
431 /* Bypass the APIC completely and update cpu features */
432 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
433 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
434 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
435 cpuid_set_info();
436
437 mp_enable_preemption();
438 }
439
440 void
441 lapic_configure(void)
442 {
443 int value;
444
445 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
446 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
447 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
448 lapic_dont_panic = FALSE;
449 }
450 }
451
452 /* Set flat delivery model, logical processor id */
453 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
454 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
455
456 /* Accept all */
457 LAPIC_WRITE(TPR, 0);
458
459 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
460
461 /* ExtINT */
462 if (get_cpu_number() == master_cpu) {
463 value = LAPIC_READ(LVT_LINT0);
464 value &= ~LAPIC_LVT_MASKED;
465 value |= LAPIC_LVT_DM_EXTINT;
466 LAPIC_WRITE(LVT_LINT0, value);
467 }
468
469 /* Timer: unmasked, one-shot */
470 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
471
472 /* Perfmon: unmasked */
473 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
474
475 /* Thermal: unmasked */
476 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
477
478 #if CONFIG_MCA
479 /* CMCI, if available */
480 if (mca_is_cmci_present())
481 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
482 #endif
483
484 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
485 (cpu_number() != master_cpu)) {
486 lapic_esr_clear();
487 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
488 }
489 }
490
491 void
492 lapic_set_timer(
493 boolean_t interrupt_unmasked,
494 lapic_timer_mode_t mode,
495 lapic_timer_divide_t divisor,
496 lapic_timer_count_t initial_count)
497 {
498 uint32_t timer_vector;
499
500 mp_disable_preemption();
501 timer_vector = LAPIC_READ(LVT_TIMER);
502 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
503 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
504 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
505 LAPIC_WRITE(LVT_TIMER, timer_vector);
506 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
507 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
508 mp_enable_preemption();
509 }
510
511 void
512 lapic_config_timer(
513 boolean_t interrupt_unmasked,
514 lapic_timer_mode_t mode,
515 lapic_timer_divide_t divisor)
516 {
517 uint32_t timer_vector;
518
519 mp_disable_preemption();
520 timer_vector = LAPIC_READ(LVT_TIMER);
521 timer_vector &= ~(LAPIC_LVT_MASKED |
522 LAPIC_LVT_PERIODIC |
523 LAPIC_LVT_TSC_DEADLINE);
524 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
525 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
526 LAPIC_WRITE(LVT_TIMER, timer_vector);
527 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
528 mp_enable_preemption();
529 }
530
531 /*
532 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
533 */
534 void
535 lapic_config_tsc_deadline_timer(void)
536 {
537 uint32_t timer_vector;
538
539 DBG("lapic_config_tsc_deadline_timer()\n");
540 mp_disable_preemption();
541 timer_vector = LAPIC_READ(LVT_TIMER);
542 timer_vector &= ~(LAPIC_LVT_MASKED |
543 LAPIC_LVT_PERIODIC);
544 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
545 LAPIC_WRITE(LVT_TIMER, timer_vector);
546
547 /* Serialize writes per Intel OSWG */
548 do {
549 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
550 } while (lapic_get_tsc_deadline_timer() == 0);
551 lapic_set_tsc_deadline_timer(0);
552
553 mp_enable_preemption();
554 DBG("lapic_config_tsc_deadline_timer() done\n");
555 }
556
557 void
558 lapic_set_timer_fast(
559 lapic_timer_count_t initial_count)
560 {
561 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
562 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
563 }
564
565 void
566 lapic_set_tsc_deadline_timer(uint64_t deadline)
567 {
568 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
569 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
570 }
571
572 uint64_t
573 lapic_get_tsc_deadline_timer(void)
574 {
575 return rdmsr64(MSR_IA32_TSC_DEADLINE);
576 }
577
578 void
579 lapic_get_timer(
580 lapic_timer_mode_t *mode,
581 lapic_timer_divide_t *divisor,
582 lapic_timer_count_t *initial_count,
583 lapic_timer_count_t *current_count)
584 {
585 mp_disable_preemption();
586 if (mode)
587 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
588 periodic : one_shot;
589 if (divisor)
590 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
591 if (initial_count)
592 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
593 if (current_count)
594 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
595 mp_enable_preemption();
596 }
597
598 static inline void
599 _lapic_end_of_interrupt(void)
600 {
601 LAPIC_WRITE(EOI, 0);
602 }
603
604 void
605 lapic_end_of_interrupt(void)
606 {
607 _lapic_end_of_interrupt();
608 }
609
610 void lapic_unmask_perfcnt_interrupt(void) {
611 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
612 }
613
614 void lapic_set_perfcnt_interrupt_mask(boolean_t mask) {
615 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
616 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
617 }
618
619 void
620 lapic_set_intr_func(int vector, i386_intr_func_t func)
621 {
622 if (vector > lapic_interrupt_base)
623 vector -= lapic_interrupt_base;
624
625 switch (vector) {
626 case LAPIC_NMI_INTERRUPT:
627 case LAPIC_INTERPROCESSOR_INTERRUPT:
628 case LAPIC_TIMER_INTERRUPT:
629 case LAPIC_THERMAL_INTERRUPT:
630 case LAPIC_PERFCNT_INTERRUPT:
631 case LAPIC_CMCI_INTERRUPT:
632 case LAPIC_PM_INTERRUPT:
633 lapic_intr_func[vector] = func;
634 break;
635 default:
636 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
637 vector, func);
638 }
639 }
640
641 void lapic_set_pmi_func(i386_intr_func_t func) {
642 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
643 }
644
645 int
646 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
647 {
648 int retval = 0;
649 int esr = -1;
650
651 interrupt_num -= lapic_interrupt_base;
652 if (interrupt_num < 0) {
653 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
654 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
655 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
656 return retval;
657 }
658 else
659 return 0;
660 }
661
662 switch(interrupt_num) {
663 case LAPIC_TIMER_INTERRUPT:
664 case LAPIC_THERMAL_INTERRUPT:
665 case LAPIC_INTERPROCESSOR_INTERRUPT:
666 case LAPIC_PM_INTERRUPT:
667 if (lapic_intr_func[interrupt_num] != NULL)
668 (void) (*lapic_intr_func[interrupt_num])(state);
669 _lapic_end_of_interrupt();
670 retval = 1;
671 break;
672 case LAPIC_PERFCNT_INTERRUPT:
673 /* If a function has been registered, invoke it. Otherwise,
674 * pass up to IOKit.
675 */
676 if (lapic_intr_func[interrupt_num] != NULL) {
677 (void) (*lapic_intr_func[interrupt_num])(state);
678 /* Unmask the interrupt since we don't expect legacy users
679 * to be responsible for it.
680 */
681 lapic_unmask_perfcnt_interrupt();
682 _lapic_end_of_interrupt();
683 retval = 1;
684 }
685 break;
686 case LAPIC_CMCI_INTERRUPT:
687 if (lapic_intr_func[interrupt_num] != NULL)
688 (void) (*lapic_intr_func[interrupt_num])(state);
689 /* return 0 for plaform expert to handle */
690 break;
691 case LAPIC_ERROR_INTERRUPT:
692 /* We treat error interrupts on APs as fatal.
693 * The current interrupt steering scheme directs most
694 * external interrupts to the BSP (HPET interrupts being
695 * a notable exception); hence, such an error
696 * on an AP may signify LVT corruption (with "may" being
697 * the operative word). On the BSP, we adopt a more
698 * lenient approach, in the interests of enhancing
699 * debuggability and reducing fragility.
700 * If "lapic_error_count_threshold" error interrupts
701 * occur within "lapic_error_time_threshold" absolute
702 * time units, we mask the error vector and log. The
703 * error interrupts themselves are likely
704 * side effects of issues which are beyond the purview of
705 * the local APIC interrupt handler, however. The Error
706 * Status Register value (the illegal destination
707 * vector code is one observed in practice) indicates
708 * the immediate cause of the error.
709 */
710 esr = lapic_esr_read();
711 lapic_dump();
712
713 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
714 cpu_number() != master_cpu) {
715 panic("Local APIC error, ESR: %d\n", esr);
716 }
717
718 if (cpu_number() == master_cpu) {
719 uint64_t abstime = mach_absolute_time();
720 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
721 if (lapic_master_error_count++ > lapic_error_count_threshold) {
722 lapic_errors_masked = TRUE;
723 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
724 printf("Local APIC: errors masked\n");
725 }
726 }
727 else {
728 lapic_last_master_error = abstime;
729 lapic_master_error_count = 0;
730 }
731 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
732 }
733
734 _lapic_end_of_interrupt();
735 retval = 1;
736 break;
737 case LAPIC_SPURIOUS_INTERRUPT:
738 kprintf("SPIV\n");
739 /* No EOI required here */
740 retval = 1;
741 break;
742 case LAPIC_PMC_SW_INTERRUPT:
743 {
744 #if CONFIG_COUNTERS
745 thread_t old, new;
746 ml_get_csw_threads(&old, &new);
747
748 if (pmc_context_switch(old, new) == TRUE) {
749 retval = 1;
750 /* No EOI required for SWI */
751 }
752 #endif /* CONFIG_COUNTERS */
753 }
754 break;
755 }
756
757 return retval;
758 }
759
760 void
761 lapic_smm_restore(void)
762 {
763 boolean_t state;
764
765 if (lapic_os_enabled == FALSE)
766 return;
767
768 state = ml_set_interrupts_enabled(FALSE);
769
770 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
771 /*
772 * Bogus SMI handler enables interrupts but does not know about
773 * local APIC interrupt sources. When APIC timer counts down to
774 * zero while in SMM, local APIC will end up waiting for an EOI
775 * but no interrupt was delivered to the OS.
776 */
777 _lapic_end_of_interrupt();
778
779 /*
780 * timer is one-shot, trigger another quick countdown to trigger
781 * another timer interrupt.
782 */
783 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
784 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
785 }
786
787 kprintf("lapic_smm_restore\n");
788 }
789
790 ml_set_interrupts_enabled(state);
791 }
792
793 void
794 lapic_send_ipi(int cpu, int vector)
795 {
796 boolean_t state;
797
798 if (vector < lapic_interrupt_base)
799 vector += lapic_interrupt_base;
800
801 state = ml_set_interrupts_enabled(FALSE);
802
803 /* Wait for pending outgoing send to complete */
804 while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
805 cpu_pause();
806 }
807
808 LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
809 LAPIC_WRITE(ICR, vector | LAPIC_ICR_DM_FIXED);
810
811 (void) ml_set_interrupts_enabled(state);
812 }
813
814 /*
815 * The following interfaces are privately exported to AICPM.
816 */
817
818 boolean_t
819 lapic_is_interrupt_pending(void)
820 {
821 int i;
822
823 for (i = 0; i < 8; i += 1) {
824 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
825 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0))
826 return (TRUE);
827 }
828
829 return (FALSE);
830 }
831
832 boolean_t
833 lapic_is_interrupting(uint8_t vector)
834 {
835 int i;
836 int bit;
837 uint32_t irr;
838 uint32_t isr;
839
840 i = vector / 32;
841 bit = 1 << (vector % 32);
842
843 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
844 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
845
846 if ((irr | isr) & bit)
847 return (TRUE);
848
849 return (FALSE);
850 }
851
852 void
853 lapic_interrupt_counts(uint64_t intrs[256])
854 {
855 int i;
856 int j;
857 int bit;
858 uint32_t irr;
859 uint32_t isr;
860
861 if (intrs == NULL)
862 return;
863
864 for (i = 0; i < 8; i += 1) {
865 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
866 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
867
868 if ((isr | irr) == 0)
869 continue;
870
871 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
872 bit = (32 * i) + j;
873 if ((isr | irr) & (1 << j))
874 intrs[bit] += 1;
875 }
876 }
877 }
878
879 void
880 lapic_disable_timer(void)
881 {
882 uint32_t lvt_timer;
883
884 /*
885 * If we're in deadline timer mode,
886 * simply clear the deadline timer, otherwise
887 * mask the timer interrupt and clear the countdown.
888 */
889 lvt_timer = LAPIC_READ(LVT_TIMER);
890 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
891 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
892 } else {
893 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
894 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
895 lvt_timer = LAPIC_READ(LVT_TIMER);
896 }
897 }
898