]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lapic_native.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic_native.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #if CONFIG_MCA
56 #include <i386/machine_check.h>
57 #endif
58
59 #if CONFIG_COUNTERS
60 #include <pmc/pmc.h>
61 #endif
62
63 #include <sys/kdebug.h>
64
65 #if MP_DEBUG
66 #define PAUSE delay(1000000)
67 #define DBG(x...) kprintf(x)
68 #else
69 #define DBG(x...)
70 #define PAUSE
71 #endif /* MP_DEBUG */
72
73 lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
74
75 static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */
76 static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
77
78 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
79
80 /* TRUE if local APIC was enabled by the OS not by the BIOS */
81 static boolean_t lapic_os_enabled = FALSE;
82
83 static boolean_t lapic_errors_masked = FALSE;
84 static uint64_t lapic_last_master_error = 0;
85 static uint64_t lapic_error_time_threshold = 0;
86 static unsigned lapic_master_error_count = 0;
87 static unsigned lapic_error_count_threshold = 5;
88 static boolean_t lapic_dont_panic = FALSE;
89
90 #ifdef MP_DEBUG
91 void
92 lapic_cpu_map_dump(void)
93 {
94 int i;
95
96 for (i = 0; i < MAX_CPUS; i++) {
97 if (cpu_to_lapic[i] == -1)
98 continue;
99 kprintf("cpu_to_lapic[%d]: %d\n",
100 i, cpu_to_lapic[i]);
101 }
102 for (i = 0; i < MAX_LAPICIDS; i++) {
103 if (lapic_to_cpu[i] == -1)
104 continue;
105 kprintf("lapic_to_cpu[%d]: %d\n",
106 i, lapic_to_cpu[i]);
107 }
108 }
109 #endif /* MP_DEBUG */
110
111 static void
112 legacy_init(void)
113 {
114 int result;
115 vm_map_entry_t entry;
116 vm_map_offset_t lapic_vbase64;
117 /* Establish a map to the local apic */
118
119 if (lapic_vbase == 0) {
120 lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map);
121 result = vm_map_find_space(kernel_map,
122 &lapic_vbase64,
123 round_page(LAPIC_SIZE), 0,
124 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
125 /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t
126 */
127 lapic_vbase = (vm_offset_t) lapic_vbase64;
128 if (result != KERN_SUCCESS) {
129 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
130 }
131 vm_map_unlock(kernel_map);
132
133 /*
134 * Map in the local APIC non-cacheable, as recommended by Intel
135 * in section 8.4.1 of the "System Programming Guide".
136 * In fact, this is redundant because EFI will have assigned an
137 * MTRR physical range containing the local APIC's MMIO space as
138 * UC and this will override the default PAT setting.
139 */
140 pmap_enter(pmap_kernel(),
141 lapic_vbase,
142 (ppnum_t) i386_btop(lapic_pbase),
143 VM_PROT_READ|VM_PROT_WRITE,
144 VM_PROT_NONE,
145 VM_WIMG_IO,
146 TRUE);
147 }
148
149 /*
150 * Set flat delivery model, logical processor id
151 * This should already be the default set.
152 */
153 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
154 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
155 }
156
157
158 static uint32_t
159 legacy_read(lapic_register_t reg)
160 {
161 return *LAPIC_MMIO(reg);
162 }
163
164 static void
165 legacy_write(lapic_register_t reg, uint32_t value)
166 {
167 *LAPIC_MMIO(reg) = value;
168 }
169
170 static uint64_t
171 legacy_read_icr(void)
172 {
173 return (((uint64_t)*LAPIC_MMIO(ICRD)) << 32) | ((uint64_t)*LAPIC_MMIO(ICR));
174 }
175
176 static void
177 legacy_write_icr(uint32_t dst, uint32_t cmd)
178 {
179 *LAPIC_MMIO(ICRD) = dst << LAPIC_ICRD_DEST_SHIFT;
180 *LAPIC_MMIO(ICR) = cmd;
181 }
182
183 static lapic_ops_table_t legacy_ops = {
184 legacy_init,
185 legacy_read,
186 legacy_write,
187 legacy_read_icr,
188 legacy_write_icr
189 };
190
191 static boolean_t is_x2apic = FALSE;
192
193 static void
194 x2apic_init(void)
195 {
196 uint32_t lo;
197 uint32_t hi;
198
199 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
200 if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) {
201 lo |= MSR_IA32_APIC_BASE_EXTENDED;
202 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
203 kprintf("x2APIC mode enabled\n");
204 }
205 }
206
207 static uint32_t
208 x2apic_read(lapic_register_t reg)
209 {
210 uint32_t lo;
211 uint32_t hi;
212
213 rdmsr(LAPIC_MSR(reg), lo, hi);
214 return lo;
215 }
216
217 static void
218 x2apic_write(lapic_register_t reg, uint32_t value)
219 {
220 wrmsr(LAPIC_MSR(reg), value, 0);
221 }
222
223 static uint64_t
224 x2apic_read_icr(void)
225 {
226 return rdmsr64(LAPIC_MSR(ICR));;
227 }
228
229 static void
230 x2apic_write_icr(uint32_t dst, uint32_t cmd)
231 {
232 wrmsr(LAPIC_MSR(ICR), cmd, dst);
233 }
234
235 static lapic_ops_table_t x2apic_ops = {
236 x2apic_init,
237 x2apic_read,
238 x2apic_write,
239 x2apic_read_icr,
240 x2apic_write_icr
241 };
242
243 void
244 lapic_init(void)
245 {
246 uint32_t lo;
247 uint32_t hi;
248 boolean_t is_boot_processor;
249 boolean_t is_lapic_enabled;
250
251 /* Examine the local APIC state */
252 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
253 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
254 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
255 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
256 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
257 kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase,
258 is_lapic_enabled ? "enabled" : "disabled",
259 is_x2apic ? "extended" : "legacy",
260 is_boot_processor ? "BSP" : "AP");
261 if (!is_boot_processor || !is_lapic_enabled)
262 panic("Unexpected local APIC state\n");
263
264 /*
265 * If x2APIC is available and not already enabled, enable it.
266 * Unless overriden by boot-arg.
267 */
268 if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) {
269 PE_parse_boot_argn("-x2apic", &is_x2apic, sizeof(is_x2apic));
270 kprintf("x2APIC supported %s be enabled\n",
271 is_x2apic ? "and will" : "but will not");
272 }
273
274 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
275
276 LAPIC_INIT();
277
278 kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR));
279 if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
280 panic("Local APIC version 0x%x, 0x14 or more expected\n",
281 (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
282 }
283
284 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
285 lapic_cpu_map_init();
286 lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
287 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
288 }
289
290
291 static int
292 lapic_esr_read(void)
293 {
294 /* write-read register */
295 LAPIC_WRITE(ERROR_STATUS, 0);
296 return LAPIC_READ(ERROR_STATUS);
297 }
298
299 static void
300 lapic_esr_clear(void)
301 {
302 LAPIC_WRITE(ERROR_STATUS, 0);
303 LAPIC_WRITE(ERROR_STATUS, 0);
304 }
305
306 static const char *DM_str[8] = {
307 "Fixed",
308 "Lowest Priority",
309 "Invalid",
310 "Invalid",
311 "NMI",
312 "Reset",
313 "Invalid",
314 "ExtINT"};
315
316 static const char *TMR_str[] = {
317 "OneShot",
318 "Periodic",
319 "TSC-Deadline",
320 "Illegal"
321 };
322
323 void
324 lapic_dump(void)
325 {
326 int i;
327
328 #define BOOL(a) ((a)?' ':'!')
329 #define VEC(lvt) \
330 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
331 #define DS(lvt) \
332 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
333 #define DM(lvt) \
334 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
335 #define MASK(lvt) \
336 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
337 #define TM(lvt) \
338 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
339 #define IP(lvt) \
340 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
341
342 kprintf("LAPIC %d at %p version 0x%x\n",
343 (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
344 (void *) lapic_vbase,
345 LAPIC_READ(VERSION)&LAPIC_VERSION_MASK);
346 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
347 LAPIC_READ(TPR)&LAPIC_TPR_MASK,
348 LAPIC_READ(APR)&LAPIC_APR_MASK,
349 LAPIC_READ(PPR)&LAPIC_PPR_MASK);
350 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
351 is_x2apic ? 0 : LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT,
352 LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT);
353 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
354 BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE),
355 BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)),
356 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
357 #if CONFIG_MCA
358 if (mca_is_cmci_present())
359 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
360 VEC(LVT_CMCI),
361 DM(LVT_CMCI),
362 DS(LVT_CMCI),
363 MASK(LVT_CMCI));
364 #endif
365 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
366 VEC(LVT_TIMER),
367 DS(LVT_TIMER),
368 MASK(LVT_TIMER),
369 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
370 & LAPIC_LVT_TMR_MASK]);
371 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
372 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
373 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
374 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
375 VEC(LVT_PERFCNT),
376 DM(LVT_PERFCNT),
377 DS(LVT_PERFCNT),
378 MASK(LVT_PERFCNT));
379 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
380 VEC(LVT_THERMAL),
381 DM(LVT_THERMAL),
382 DS(LVT_THERMAL),
383 MASK(LVT_THERMAL));
384 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
385 VEC(LVT_LINT0),
386 DM(LVT_LINT0),
387 TM(LVT_LINT0),
388 IP(LVT_LINT0),
389 DS(LVT_LINT0),
390 MASK(LVT_LINT0));
391 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
392 VEC(LVT_LINT1),
393 DM(LVT_LINT1),
394 TM(LVT_LINT1),
395 IP(LVT_LINT1),
396 DS(LVT_LINT1),
397 MASK(LVT_LINT1));
398 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
399 VEC(LVT_ERROR),
400 DS(LVT_ERROR),
401 MASK(LVT_ERROR));
402 kprintf("ESR: %08x \n", lapic_esr_read());
403 kprintf(" ");
404 for(i=0xf; i>=0; i--)
405 kprintf("%x%x%x%x",i,i,i,i);
406 kprintf("\n");
407 kprintf("TMR: 0x");
408 for(i=7; i>=0; i--)
409 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i));
410 kprintf("\n");
411 kprintf("IRR: 0x");
412 for(i=7; i>=0; i--)
413 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i));
414 kprintf("\n");
415 kprintf("ISR: 0x");
416 for(i=7; i >= 0; i--)
417 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i));
418 kprintf("\n");
419 }
420
421 boolean_t
422 lapic_probe(void)
423 {
424 uint32_t lo;
425 uint32_t hi;
426
427 if (cpuid_features() & CPUID_FEATURE_APIC)
428 return TRUE;
429
430 if (cpuid_family() == 6 || cpuid_family() == 15) {
431 /*
432 * Mobile Pentiums:
433 * There may be a local APIC which wasn't enabled by BIOS.
434 * So we try to enable it explicitly.
435 */
436 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
437 lo &= ~MSR_IA32_APIC_BASE_BASE;
438 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
439 lo |= MSR_IA32_APIC_BASE_ENABLE;
440 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
441
442 /*
443 * Re-initialize cpu features info and re-check.
444 */
445 cpuid_set_info();
446 if (cpuid_features() & CPUID_FEATURE_APIC) {
447 printf("Local APIC discovered and enabled\n");
448 lapic_os_enabled = TRUE;
449 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
450 return TRUE;
451 }
452 }
453
454 return FALSE;
455 }
456
457 void
458 lapic_shutdown(void)
459 {
460 uint32_t lo;
461 uint32_t hi;
462 uint32_t value;
463
464 /* Shutdown if local APIC was enabled by OS */
465 if (lapic_os_enabled == FALSE)
466 return;
467
468 mp_disable_preemption();
469
470 /* ExtINT: masked */
471 if (get_cpu_number() == master_cpu) {
472 value = LAPIC_READ(LVT_LINT0);
473 value |= LAPIC_LVT_MASKED;
474 LAPIC_WRITE(LVT_LINT0, value);
475 }
476
477 /* Error: masked */
478 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
479
480 /* Timer: masked */
481 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
482
483 /* Perfmon: masked */
484 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
485
486 /* APIC software disabled */
487 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
488
489 /* Bypass the APIC completely and update cpu features */
490 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
491 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
492 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
493 cpuid_set_info();
494
495 mp_enable_preemption();
496 }
497
498 void
499 lapic_configure(void)
500 {
501 int value;
502
503 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
504 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
505 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
506 lapic_dont_panic = FALSE;
507 }
508 }
509
510 /* Accept all */
511 LAPIC_WRITE(TPR, 0);
512
513 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
514
515 /* ExtINT */
516 if (get_cpu_number() == master_cpu) {
517 value = LAPIC_READ(LVT_LINT0);
518 value &= ~LAPIC_LVT_MASKED;
519 value |= LAPIC_LVT_DM_EXTINT;
520 LAPIC_WRITE(LVT_LINT0, value);
521 }
522
523 /* Timer: unmasked, one-shot */
524 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
525
526 /* Perfmon: unmasked */
527 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
528
529 /* Thermal: unmasked */
530 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
531
532 #if CONFIG_MCA
533 /* CMCI, if available */
534 if (mca_is_cmci_present())
535 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
536 #endif
537
538 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
539 (cpu_number() != master_cpu)) {
540 lapic_esr_clear();
541 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
542 }
543 }
544
545 void
546 lapic_set_timer(
547 boolean_t interrupt_unmasked,
548 lapic_timer_mode_t mode,
549 lapic_timer_divide_t divisor,
550 lapic_timer_count_t initial_count)
551 {
552 uint32_t timer_vector;
553
554 mp_disable_preemption();
555 timer_vector = LAPIC_READ(LVT_TIMER);
556 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
557 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
558 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
559 LAPIC_WRITE(LVT_TIMER, timer_vector);
560 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
561 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
562 mp_enable_preemption();
563 }
564
565 void
566 lapic_config_timer(
567 boolean_t interrupt_unmasked,
568 lapic_timer_mode_t mode,
569 lapic_timer_divide_t divisor)
570 {
571 uint32_t timer_vector;
572
573 mp_disable_preemption();
574 timer_vector = LAPIC_READ(LVT_TIMER);
575 timer_vector &= ~(LAPIC_LVT_MASKED |
576 LAPIC_LVT_PERIODIC |
577 LAPIC_LVT_TSC_DEADLINE);
578 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
579 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
580 LAPIC_WRITE(LVT_TIMER, timer_vector);
581 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
582 mp_enable_preemption();
583 }
584
585 /*
586 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
587 */
588 void
589 lapic_config_tsc_deadline_timer(void)
590 {
591 uint32_t timer_vector;
592
593 DBG("lapic_config_tsc_deadline_timer()\n");
594 mp_disable_preemption();
595 timer_vector = LAPIC_READ(LVT_TIMER);
596 timer_vector &= ~(LAPIC_LVT_MASKED |
597 LAPIC_LVT_PERIODIC);
598 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
599 LAPIC_WRITE(LVT_TIMER, timer_vector);
600
601 /* Serialize writes per Intel OSWG */
602 do {
603 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
604 } while (lapic_get_tsc_deadline_timer() == 0);
605 lapic_set_tsc_deadline_timer(0);
606
607 mp_enable_preemption();
608 DBG("lapic_config_tsc_deadline_timer() done\n");
609 }
610
611 void
612 lapic_set_timer_fast(
613 lapic_timer_count_t initial_count)
614 {
615 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
616 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
617 }
618
619 void
620 lapic_set_tsc_deadline_timer(uint64_t deadline)
621 {
622 /* Don't bother disarming: wrmsr64(MSR_IA32_TSC_DEADLINE, 0); */
623 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
624 }
625
626 uint64_t
627 lapic_get_tsc_deadline_timer(void)
628 {
629 return rdmsr64(MSR_IA32_TSC_DEADLINE);
630 }
631
632 void
633 lapic_get_timer(
634 lapic_timer_mode_t *mode,
635 lapic_timer_divide_t *divisor,
636 lapic_timer_count_t *initial_count,
637 lapic_timer_count_t *current_count)
638 {
639 mp_disable_preemption();
640 if (mode)
641 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
642 periodic : one_shot;
643 if (divisor)
644 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
645 if (initial_count)
646 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
647 if (current_count)
648 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
649 mp_enable_preemption();
650 }
651
652 static inline void
653 _lapic_end_of_interrupt(void)
654 {
655 LAPIC_WRITE(EOI, 0);
656 }
657
658 void
659 lapic_end_of_interrupt(void)
660 {
661 _lapic_end_of_interrupt();
662 }
663
664 void lapic_unmask_perfcnt_interrupt(void) {
665 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
666 }
667
668 void lapic_set_perfcnt_interrupt_mask(boolean_t mask) {
669 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
670 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
671 }
672
673 void
674 lapic_set_intr_func(int vector, i386_intr_func_t func)
675 {
676 if (vector > lapic_interrupt_base)
677 vector -= lapic_interrupt_base;
678
679 switch (vector) {
680 case LAPIC_NMI_INTERRUPT:
681 case LAPIC_INTERPROCESSOR_INTERRUPT:
682 case LAPIC_TIMER_INTERRUPT:
683 case LAPIC_THERMAL_INTERRUPT:
684 case LAPIC_PERFCNT_INTERRUPT:
685 case LAPIC_CMCI_INTERRUPT:
686 case LAPIC_PM_INTERRUPT:
687 lapic_intr_func[vector] = func;
688 break;
689 default:
690 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
691 vector, func);
692 }
693 }
694
695 void lapic_set_pmi_func(i386_intr_func_t func) {
696 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
697 }
698
699 int
700 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
701 {
702 int retval = 0;
703 int esr = -1;
704
705 interrupt_num -= lapic_interrupt_base;
706 if (interrupt_num < 0) {
707 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
708 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
709 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
710 return retval;
711 }
712 else
713 return 0;
714 }
715
716 switch(interrupt_num) {
717 case LAPIC_TIMER_INTERRUPT:
718 case LAPIC_THERMAL_INTERRUPT:
719 case LAPIC_INTERPROCESSOR_INTERRUPT:
720 case LAPIC_PM_INTERRUPT:
721 if (lapic_intr_func[interrupt_num] != NULL)
722 (void) (*lapic_intr_func[interrupt_num])(state);
723 _lapic_end_of_interrupt();
724 retval = 1;
725 break;
726 case LAPIC_PERFCNT_INTERRUPT:
727 /* If a function has been registered, invoke it. Otherwise,
728 * pass up to IOKit.
729 */
730 if (lapic_intr_func[interrupt_num] != NULL) {
731 (void) (*lapic_intr_func[interrupt_num])(state);
732 /* Unmask the interrupt since we don't expect legacy users
733 * to be responsible for it.
734 */
735 lapic_unmask_perfcnt_interrupt();
736 _lapic_end_of_interrupt();
737 retval = 1;
738 }
739 break;
740 case LAPIC_CMCI_INTERRUPT:
741 if (lapic_intr_func[interrupt_num] != NULL)
742 (void) (*lapic_intr_func[interrupt_num])(state);
743 /* return 0 for plaform expert to handle */
744 break;
745 case LAPIC_ERROR_INTERRUPT:
746 /* We treat error interrupts on APs as fatal.
747 * The current interrupt steering scheme directs most
748 * external interrupts to the BSP (HPET interrupts being
749 * a notable exception); hence, such an error
750 * on an AP may signify LVT corruption (with "may" being
751 * the operative word). On the BSP, we adopt a more
752 * lenient approach, in the interests of enhancing
753 * debuggability and reducing fragility.
754 * If "lapic_error_count_threshold" error interrupts
755 * occur within "lapic_error_time_threshold" absolute
756 * time units, we mask the error vector and log. The
757 * error interrupts themselves are likely
758 * side effects of issues which are beyond the purview of
759 * the local APIC interrupt handler, however. The Error
760 * Status Register value (the illegal destination
761 * vector code is one observed in practice) indicates
762 * the immediate cause of the error.
763 */
764 esr = lapic_esr_read();
765 lapic_dump();
766
767 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
768 cpu_number() != master_cpu) {
769 panic("Local APIC error, ESR: %d\n", esr);
770 }
771
772 if (cpu_number() == master_cpu) {
773 uint64_t abstime = mach_absolute_time();
774 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
775 if (lapic_master_error_count++ > lapic_error_count_threshold) {
776 lapic_errors_masked = TRUE;
777 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
778 printf("Local APIC: errors masked\n");
779 }
780 }
781 else {
782 lapic_last_master_error = abstime;
783 lapic_master_error_count = 0;
784 }
785 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
786 }
787
788 _lapic_end_of_interrupt();
789 retval = 1;
790 break;
791 case LAPIC_SPURIOUS_INTERRUPT:
792 kprintf("SPIV\n");
793 /* No EOI required here */
794 retval = 1;
795 break;
796 case LAPIC_PMC_SW_INTERRUPT:
797 {
798 #if CONFIG_COUNTERS
799 thread_t old, new;
800 ml_get_csw_threads(&old, &new);
801
802 if (pmc_context_switch(old, new) == TRUE) {
803 retval = 1;
804 /* No EOI required for SWI */
805 }
806 #endif /* CONFIG_COUNTERS */
807 }
808 break;
809 }
810
811 return retval;
812 }
813
814 void
815 lapic_smm_restore(void)
816 {
817 boolean_t state;
818
819 if (lapic_os_enabled == FALSE)
820 return;
821
822 state = ml_set_interrupts_enabled(FALSE);
823
824 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
825 /*
826 * Bogus SMI handler enables interrupts but does not know about
827 * local APIC interrupt sources. When APIC timer counts down to
828 * zero while in SMM, local APIC will end up waiting for an EOI
829 * but no interrupt was delivered to the OS.
830 */
831 _lapic_end_of_interrupt();
832
833 /*
834 * timer is one-shot, trigger another quick countdown to trigger
835 * another timer interrupt.
836 */
837 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
838 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
839 }
840
841 kprintf("lapic_smm_restore\n");
842 }
843
844 ml_set_interrupts_enabled(state);
845 }
846
847 void
848 lapic_send_ipi(int cpu, int vector)
849 {
850 boolean_t state;
851
852 if (vector < lapic_interrupt_base)
853 vector += lapic_interrupt_base;
854
855 state = ml_set_interrupts_enabled(FALSE);
856
857 /* Wait for pending outgoing send to complete */
858 while (LAPIC_READ_ICR() & LAPIC_ICR_DS_PENDING) {
859 cpu_pause();
860 }
861
862 LAPIC_WRITE_ICR(cpu_to_lapic[cpu], vector | LAPIC_ICR_DM_FIXED);
863
864 (void) ml_set_interrupts_enabled(state);
865 }
866
867 /*
868 * The following interfaces are privately exported to AICPM.
869 */
870
871 boolean_t
872 lapic_is_interrupt_pending(void)
873 {
874 int i;
875
876 for (i = 0; i < 8; i += 1) {
877 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
878 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0))
879 return (TRUE);
880 }
881
882 return (FALSE);
883 }
884
885 boolean_t
886 lapic_is_interrupting(uint8_t vector)
887 {
888 int i;
889 int bit;
890 uint32_t irr;
891 uint32_t isr;
892
893 i = vector / 32;
894 bit = 1 << (vector % 32);
895
896 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
897 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
898
899 if ((irr | isr) & bit)
900 return (TRUE);
901
902 return (FALSE);
903 }
904
905 void
906 lapic_interrupt_counts(uint64_t intrs[256])
907 {
908 int i;
909 int j;
910 int bit;
911 uint32_t irr;
912 uint32_t isr;
913
914 if (intrs == NULL)
915 return;
916
917 for (i = 0; i < 8; i += 1) {
918 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
919 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
920
921 if ((isr | irr) == 0)
922 continue;
923
924 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
925 bit = (32 * i) + j;
926 if ((isr | irr) & (1 << j))
927 intrs[bit] += 1;
928 }
929 }
930 }
931
932 void
933 lapic_disable_timer(void)
934 {
935 uint32_t lvt_timer;
936
937 /*
938 * If we're in deadline timer mode,
939 * simply clear the deadline timer, otherwise
940 * mask the timer interrupt and clear the countdown.
941 */
942 lvt_timer = LAPIC_READ(LVT_TIMER);
943 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
944 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
945 } else {
946 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
947 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
948 lvt_timer = LAPIC_READ(LVT_TIMER);
949 }
950 }
951