]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lapic.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / i386 / lapic.c
1 /*
2 * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34
35 #include <kern/kern_types.h>
36 #include <kern/cpu_number.h>
37 #include <kern/cpu_data.h>
38 #include <kern/assert.h>
39 #include <kern/machine.h>
40 #include <kern/debug.h>
41
42 #include <vm/vm_map.h>
43 #include <vm/vm_kern.h>
44
45 #include <i386/lapic.h>
46 #include <i386/cpuid.h>
47 #include <i386/proc_reg.h>
48 #include <i386/machine_cpu.h>
49 #include <i386/misc_protos.h>
50 #include <i386/mp.h>
51 #include <i386/postcode.h>
52 #include <i386/cpu_threads.h>
53 #include <i386/machine_routines.h>
54 #include <i386/tsc.h>
55 #if CONFIG_MCA
56 #include <i386/machine_check.h>
57 #endif
58
59 #if CONFIG_COUNTERS
60 #include <pmc/pmc.h>
61 #endif
62
63 #if MACH_KDB
64 #include <machine/db_machdep.h>
65 #endif
66
67 #include <sys/kdebug.h>
68
69 #if MP_DEBUG
70 #define PAUSE delay(1000000)
71 #define DBG(x...) kprintf(x)
72 #else
73 #define DBG(x...)
74 #define PAUSE
75 #endif /* MP_DEBUG */
76
77 /* Base vector for local APIC interrupt sources */
78 int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE;
79
80 lapic_ops_table_t *lapic_ops; /* Lapic operations switch */
81
82 #define MAX_LAPICIDS (LAPIC_ID_MAX+1)
83 int lapic_to_cpu[MAX_LAPICIDS];
84 int cpu_to_lapic[MAX_CPUS];
85
86 static vm_offset_t lapic_pbase; /* Physical base memory-mapped regs */
87 static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */
88
89 static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE];
90
91 /* TRUE if local APIC was enabled by the OS not by the BIOS */
92 static boolean_t lapic_os_enabled = FALSE;
93
94 static boolean_t lapic_errors_masked = FALSE;
95 static uint64_t lapic_last_master_error = 0;
96 static uint64_t lapic_error_time_threshold = 0;
97 static unsigned lapic_master_error_count = 0;
98 static unsigned lapic_error_count_threshold = 5;
99 static boolean_t lapic_dont_panic = FALSE;
100
101 static void
102 lapic_cpu_map_init(void)
103 {
104 int i;
105
106 for (i = 0; i < MAX_CPUS; i++)
107 cpu_to_lapic[i] = -1;
108 for (i = 0; i < MAX_LAPICIDS; i++)
109 lapic_to_cpu[i] = -1;
110 }
111
112 void
113 lapic_cpu_map(int apic_id, int cpu)
114 {
115 assert(apic_id < MAX_LAPICIDS);
116 assert(cpu < MAX_CPUS);
117 cpu_to_lapic[cpu] = apic_id;
118 lapic_to_cpu[apic_id] = cpu;
119 }
120
121 /*
122 * Retrieve the local apic ID a cpu.
123 *
124 * Returns the local apic ID for the given processor.
125 * If the processor does not exist or apic not configured, returns -1.
126 */
127
128 uint32_t
129 ml_get_apicid(uint32_t cpu)
130 {
131 if(cpu >= (uint32_t)MAX_CPUS)
132 return 0xFFFFFFFF; /* Return -1 if cpu too big */
133
134 /* Return the apic ID (or -1 if not configured) */
135 return (uint32_t)cpu_to_lapic[cpu];
136
137 }
138
139 uint32_t
140 ml_get_cpuid(uint32_t lapic_index)
141 {
142 if(lapic_index >= (uint32_t)MAX_LAPICIDS)
143 return 0xFFFFFFFF; /* Return -1 if cpu too big */
144
145 /* Return the cpu ID (or -1 if not configured) */
146 return (uint32_t)lapic_to_cpu[lapic_index];
147
148 }
149
150 #ifdef MP_DEBUG
151 void
152 lapic_cpu_map_dump(void)
153 {
154 int i;
155
156 for (i = 0; i < MAX_CPUS; i++) {
157 if (cpu_to_lapic[i] == -1)
158 continue;
159 kprintf("cpu_to_lapic[%d]: %d\n",
160 i, cpu_to_lapic[i]);
161 }
162 for (i = 0; i < MAX_LAPICIDS; i++) {
163 if (lapic_to_cpu[i] == -1)
164 continue;
165 kprintf("lapic_to_cpu[%d]: %d\n",
166 i, lapic_to_cpu[i]);
167 }
168 }
169 #endif /* MP_DEBUG */
170
171 static void
172 legacy_init(void)
173 {
174 int result;
175 vm_map_entry_t entry;
176
177 /* Establish a map to the local apic */
178 lapic_vbase = (vm_offset_t)vm_map_min(kernel_map);
179 result = vm_map_find_space(kernel_map,
180 (vm_map_address_t *) &lapic_vbase,
181 round_page(LAPIC_SIZE), 0,
182 VM_MAKE_TAG(VM_MEMORY_IOKIT), &entry);
183 if (result != KERN_SUCCESS) {
184 panic("legacy_init: vm_map_find_entry FAILED (err=%d)", result);
185 }
186 vm_map_unlock(kernel_map);
187 /* Map in the local APIC non-cacheable, as recommended by Intel
188 * in section 8.4.1 of the "System Programming Guide".
189 */
190 pmap_enter(pmap_kernel(),
191 lapic_vbase,
192 (ppnum_t) i386_btop(lapic_pbase),
193 VM_PROT_READ|VM_PROT_WRITE,
194 VM_WIMG_IO,
195 TRUE);
196 }
197
198
199 static uint32_t
200 legacy_read(lapic_register_t reg)
201 {
202 return *LAPIC_MMIO(reg);
203 }
204
205 static void
206 legacy_write(lapic_register_t reg, uint32_t value)
207 {
208 *LAPIC_MMIO(reg) = value;
209 }
210
211 static lapic_ops_table_t legacy_ops = {
212 legacy_init,
213 legacy_read,
214 legacy_write
215 };
216
217 static void
218 x2apic_init(void)
219 {
220 }
221
222 static uint32_t
223 x2apic_read(lapic_register_t reg)
224 {
225 uint32_t lo;
226 uint32_t hi;
227
228 rdmsr(LAPIC_MSR(reg), lo, hi);
229 return lo;
230 }
231
232 static void
233 x2apic_write(lapic_register_t reg, uint32_t value)
234 {
235 wrmsr(LAPIC_MSR(reg), value, 0);
236 }
237
238 static lapic_ops_table_t x2apic_ops = {
239 x2apic_init,
240 x2apic_read,
241 x2apic_write
242 };
243
244
245 void
246 lapic_init(void)
247 {
248 uint32_t lo;
249 uint32_t hi;
250 boolean_t is_boot_processor;
251 boolean_t is_lapic_enabled;
252 boolean_t is_x2apic;
253
254 /* Examine the local APIC state */
255 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
256 is_boot_processor = (lo & MSR_IA32_APIC_BASE_BSP) != 0;
257 is_lapic_enabled = (lo & MSR_IA32_APIC_BASE_ENABLE) != 0;
258 is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0;
259 lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE);
260 kprintf("MSR_IA32_APIC_BASE %p %s %s mode %s\n", (void *) lapic_pbase,
261 is_lapic_enabled ? "enabled" : "disabled",
262 is_x2apic ? "extended" : "legacy",
263 is_boot_processor ? "BSP" : "AP");
264 if (!is_boot_processor || !is_lapic_enabled)
265 panic("Unexpected local APIC state\n");
266
267 lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops;
268
269 lapic_ops->init();
270
271 if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) {
272 panic("Local APIC version 0x%x, 0x14 or more expected\n",
273 (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK));
274 }
275
276 /* Set up the lapic_id <-> cpu_number map and add this boot processor */
277 lapic_cpu_map_init();
278 lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0);
279 kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]);
280 }
281
282
283 static int
284 lapic_esr_read(void)
285 {
286 /* write-read register */
287 LAPIC_WRITE(ERROR_STATUS, 0);
288 return LAPIC_READ(ERROR_STATUS);
289 }
290
291 static void
292 lapic_esr_clear(void)
293 {
294 LAPIC_WRITE(ERROR_STATUS, 0);
295 LAPIC_WRITE(ERROR_STATUS, 0);
296 }
297
298 static const char *DM_str[8] = {
299 "Fixed",
300 "Lowest Priority",
301 "Invalid",
302 "Invalid",
303 "NMI",
304 "Reset",
305 "Invalid",
306 "ExtINT"};
307
308 static const char *TMR_str[] = {
309 "OneShot",
310 "Periodic",
311 "TSC-Deadline",
312 "Illegal"
313 };
314
315 void
316 lapic_dump(void)
317 {
318 int i;
319
320 #define BOOL(a) ((a)?' ':'!')
321 #define VEC(lvt) \
322 LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK
323 #define DS(lvt) \
324 (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle"
325 #define DM(lvt) \
326 DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK]
327 #define MASK(lvt) \
328 BOOL(LAPIC_READ(lvt)&LAPIC_LVT_MASKED)
329 #define TM(lvt) \
330 (LAPIC_READ(lvt)&LAPIC_LVT_TM_LEVEL)? "Level" : "Edge"
331 #define IP(lvt) \
332 (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High"
333
334 kprintf("LAPIC %d at %p version 0x%x\n",
335 (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK,
336 (void *) lapic_vbase,
337 LAPIC_READ(VERSION)&LAPIC_VERSION_MASK);
338 kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n",
339 LAPIC_READ(TPR)&LAPIC_TPR_MASK,
340 LAPIC_READ(APR)&LAPIC_APR_MASK,
341 LAPIC_READ(PPR)&LAPIC_PPR_MASK);
342 kprintf("Destination Format 0x%x Logical Destination 0x%x\n",
343 LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT,
344 LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT);
345 kprintf("%cEnabled %cFocusChecking SV 0x%x\n",
346 BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE),
347 BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)),
348 LAPIC_READ(SVR) & LAPIC_SVR_MASK);
349 #if CONFIG_MCA
350 if (mca_is_cmci_present())
351 kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n",
352 VEC(LVT_CMCI),
353 DM(LVT_CMCI),
354 DS(LVT_CMCI),
355 MASK(LVT_CMCI));
356 #endif
357 kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n",
358 VEC(LVT_TIMER),
359 DS(LVT_TIMER),
360 MASK(LVT_TIMER),
361 TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT)
362 & LAPIC_LVT_TMR_MASK]);
363 kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT));
364 kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT));
365 kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG));
366 kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n",
367 VEC(LVT_PERFCNT),
368 DM(LVT_PERFCNT),
369 DS(LVT_PERFCNT),
370 MASK(LVT_PERFCNT));
371 kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n",
372 VEC(LVT_THERMAL),
373 DM(LVT_THERMAL),
374 DS(LVT_THERMAL),
375 MASK(LVT_THERMAL));
376 kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
377 VEC(LVT_LINT0),
378 DM(LVT_LINT0),
379 TM(LVT_LINT0),
380 IP(LVT_LINT0),
381 DS(LVT_LINT0),
382 MASK(LVT_LINT0));
383 kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n",
384 VEC(LVT_LINT1),
385 DM(LVT_LINT1),
386 TM(LVT_LINT1),
387 IP(LVT_LINT1),
388 DS(LVT_LINT1),
389 MASK(LVT_LINT1));
390 kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n",
391 VEC(LVT_ERROR),
392 DS(LVT_ERROR),
393 MASK(LVT_ERROR));
394 kprintf("ESR: %08x \n", lapic_esr_read());
395 kprintf(" ");
396 for(i=0xf; i>=0; i--)
397 kprintf("%x%x%x%x",i,i,i,i);
398 kprintf("\n");
399 kprintf("TMR: 0x");
400 for(i=7; i>=0; i--)
401 kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i));
402 kprintf("\n");
403 kprintf("IRR: 0x");
404 for(i=7; i>=0; i--)
405 kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i));
406 kprintf("\n");
407 kprintf("ISR: 0x");
408 for(i=7; i >= 0; i--)
409 kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i));
410 kprintf("\n");
411 }
412
413 #if MACH_KDB
414 /*
415 * Displays apic junk
416 *
417 * da
418 */
419 void
420 db_apic(__unused db_expr_t addr,
421 __unused int have_addr,
422 __unused db_expr_t count,
423 __unused char *modif)
424 {
425
426 lapic_dump();
427
428 return;
429 }
430
431 #endif
432
433 boolean_t
434 lapic_probe(void)
435 {
436 uint32_t lo;
437 uint32_t hi;
438
439 if (cpuid_features() & CPUID_FEATURE_APIC)
440 return TRUE;
441
442 if (cpuid_family() == 6 || cpuid_family() == 15) {
443 /*
444 * Mobile Pentiums:
445 * There may be a local APIC which wasn't enabled by BIOS.
446 * So we try to enable it explicitly.
447 */
448 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
449 lo &= ~MSR_IA32_APIC_BASE_BASE;
450 lo |= MSR_IA32_APIC_BASE_ENABLE | LAPIC_START;
451 lo |= MSR_IA32_APIC_BASE_ENABLE;
452 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
453
454 /*
455 * Re-initialize cpu features info and re-check.
456 */
457 cpuid_set_info();
458 if (cpuid_features() & CPUID_FEATURE_APIC) {
459 printf("Local APIC discovered and enabled\n");
460 lapic_os_enabled = TRUE;
461 lapic_interrupt_base = LAPIC_REDUCED_INTERRUPT_BASE;
462 return TRUE;
463 }
464 }
465
466 return FALSE;
467 }
468
469 void
470 lapic_shutdown(void)
471 {
472 uint32_t lo;
473 uint32_t hi;
474 uint32_t value;
475
476 /* Shutdown if local APIC was enabled by OS */
477 if (lapic_os_enabled == FALSE)
478 return;
479
480 mp_disable_preemption();
481
482 /* ExtINT: masked */
483 if (get_cpu_number() == master_cpu) {
484 value = LAPIC_READ(LVT_LINT0);
485 value |= LAPIC_LVT_MASKED;
486 LAPIC_WRITE(LVT_LINT0, value);
487 }
488
489 /* Error: masked */
490 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
491
492 /* Timer: masked */
493 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) | LAPIC_LVT_MASKED);
494
495 /* Perfmon: masked */
496 LAPIC_WRITE(LVT_PERFCNT, LAPIC_READ(LVT_PERFCNT) | LAPIC_LVT_MASKED);
497
498 /* APIC software disabled */
499 LAPIC_WRITE(SVR, LAPIC_READ(SVR) & ~LAPIC_SVR_ENABLE);
500
501 /* Bypass the APIC completely and update cpu features */
502 rdmsr(MSR_IA32_APIC_BASE, lo, hi);
503 lo &= ~MSR_IA32_APIC_BASE_ENABLE;
504 wrmsr(MSR_IA32_APIC_BASE, lo, hi);
505 cpuid_set_info();
506
507 mp_enable_preemption();
508 }
509
510 void
511 lapic_configure(void)
512 {
513 int value;
514
515 if (lapic_error_time_threshold == 0 && cpu_number() == 0) {
516 nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold);
517 if (!PE_parse_boot_argn("lapic_dont_panic", &lapic_dont_panic, sizeof(lapic_dont_panic))) {
518 lapic_dont_panic = FALSE;
519 }
520 }
521
522 /* Set flat delivery model, logical processor id */
523 LAPIC_WRITE(DFR, LAPIC_DFR_FLAT);
524 LAPIC_WRITE(LDR, (get_cpu_number()) << LAPIC_LDR_SHIFT);
525
526 /* Accept all */
527 LAPIC_WRITE(TPR, 0);
528
529 LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE);
530
531 /* ExtINT */
532 if (get_cpu_number() == master_cpu) {
533 value = LAPIC_READ(LVT_LINT0);
534 value &= ~LAPIC_LVT_MASKED;
535 value |= LAPIC_LVT_DM_EXTINT;
536 LAPIC_WRITE(LVT_LINT0, value);
537 }
538
539 /* Timer: unmasked, one-shot */
540 LAPIC_WRITE(LVT_TIMER, LAPIC_VECTOR(TIMER));
541
542 /* Perfmon: unmasked */
543 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
544
545 /* Thermal: unmasked */
546 LAPIC_WRITE(LVT_THERMAL, LAPIC_VECTOR(THERMAL));
547
548 #if CONFIG_MCA
549 /* CMCI, if available */
550 if (mca_is_cmci_present())
551 LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI));
552 #endif
553
554 if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) ||
555 (cpu_number() != master_cpu)) {
556 lapic_esr_clear();
557 LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR));
558 }
559 }
560
561 void
562 lapic_set_timer(
563 boolean_t interrupt_unmasked,
564 lapic_timer_mode_t mode,
565 lapic_timer_divide_t divisor,
566 lapic_timer_count_t initial_count)
567 {
568 uint32_t timer_vector;
569
570 mp_disable_preemption();
571 timer_vector = LAPIC_READ(LVT_TIMER);
572 timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);;
573 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
574 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
575 LAPIC_WRITE(LVT_TIMER, timer_vector);
576 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
577 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
578 mp_enable_preemption();
579 }
580
581 void
582 lapic_config_timer(
583 boolean_t interrupt_unmasked,
584 lapic_timer_mode_t mode,
585 lapic_timer_divide_t divisor)
586 {
587 uint32_t timer_vector;
588
589 mp_disable_preemption();
590 timer_vector = LAPIC_READ(LVT_TIMER);
591 timer_vector &= ~(LAPIC_LVT_MASKED |
592 LAPIC_LVT_PERIODIC |
593 LAPIC_LVT_TSC_DEADLINE);
594 timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED;
595 timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0;
596 LAPIC_WRITE(LVT_TIMER, timer_vector);
597 LAPIC_WRITE(TIMER_DIVIDE_CONFIG, divisor);
598 mp_enable_preemption();
599 }
600
601 /*
602 * Configure TSC-deadline timer mode. The lapic interrupt is always unmasked.
603 */
604 void
605 lapic_config_tsc_deadline_timer(void)
606 {
607 uint32_t timer_vector;
608
609 DBG("lapic_config_tsc_deadline_timer()\n");
610 mp_disable_preemption();
611 timer_vector = LAPIC_READ(LVT_TIMER);
612 timer_vector &= ~(LAPIC_LVT_MASKED |
613 LAPIC_LVT_PERIODIC);
614 timer_vector |= LAPIC_LVT_TSC_DEADLINE;
615 LAPIC_WRITE(LVT_TIMER, timer_vector);
616
617 /* Serialize writes per Intel OSWG */
618 do {
619 lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32));
620 } while (lapic_get_tsc_deadline_timer() == 0);
621 lapic_set_tsc_deadline_timer(0);
622
623 mp_enable_preemption();
624 DBG("lapic_config_tsc_deadline_timer() done\n");
625 }
626
627 void
628 lapic_set_timer_fast(
629 lapic_timer_count_t initial_count)
630 {
631 LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED);
632 LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count);
633 }
634
635 void
636 lapic_set_tsc_deadline_timer(uint64_t deadline)
637 {
638 wrmsr64(MSR_IA32_TSC_DEADLINE, deadline);
639 }
640
641 uint64_t
642 lapic_get_tsc_deadline_timer(void)
643 {
644 return rdmsr64(MSR_IA32_TSC_DEADLINE);
645 }
646
647 void
648 lapic_get_timer(
649 lapic_timer_mode_t *mode,
650 lapic_timer_divide_t *divisor,
651 lapic_timer_count_t *initial_count,
652 lapic_timer_count_t *current_count)
653 {
654 mp_disable_preemption();
655 if (mode)
656 *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ?
657 periodic : one_shot;
658 if (divisor)
659 *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK;
660 if (initial_count)
661 *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT);
662 if (current_count)
663 *current_count = LAPIC_READ(TIMER_CURRENT_COUNT);
664 mp_enable_preemption();
665 }
666
667 static inline void
668 _lapic_end_of_interrupt(void)
669 {
670 LAPIC_WRITE(EOI, 0);
671 }
672
673 void
674 lapic_end_of_interrupt(void)
675 {
676 _lapic_end_of_interrupt();
677 }
678
679 void lapic_unmask_perfcnt_interrupt(void) {
680 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT));
681 }
682
683 void lapic_set_perfcnt_interrupt_mask(boolean_t mask) {
684 uint32_t m = (mask ? LAPIC_LVT_MASKED : 0);
685 LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m);
686 }
687
688 void
689 lapic_set_intr_func(int vector, i386_intr_func_t func)
690 {
691 if (vector > lapic_interrupt_base)
692 vector -= lapic_interrupt_base;
693
694 switch (vector) {
695 case LAPIC_NMI_INTERRUPT:
696 case LAPIC_INTERPROCESSOR_INTERRUPT:
697 case LAPIC_TIMER_INTERRUPT:
698 case LAPIC_THERMAL_INTERRUPT:
699 case LAPIC_PERFCNT_INTERRUPT:
700 case LAPIC_CMCI_INTERRUPT:
701 case LAPIC_PM_INTERRUPT:
702 lapic_intr_func[vector] = func;
703 break;
704 default:
705 panic("lapic_set_intr_func(%d,%p) invalid vector\n",
706 vector, func);
707 }
708 }
709
710 void lapic_set_pmi_func(i386_intr_func_t func) {
711 lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func);
712 }
713
714 int
715 lapic_interrupt(int interrupt_num, x86_saved_state_t *state)
716 {
717 int retval = 0;
718 int esr = -1;
719
720 interrupt_num -= lapic_interrupt_base;
721 if (interrupt_num < 0) {
722 if (interrupt_num == (LAPIC_NMI_INTERRUPT - lapic_interrupt_base) &&
723 lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) {
724 retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state);
725 return retval;
726 }
727 else
728 return 0;
729 }
730
731 switch(interrupt_num) {
732 case LAPIC_TIMER_INTERRUPT:
733 case LAPIC_THERMAL_INTERRUPT:
734 case LAPIC_INTERPROCESSOR_INTERRUPT:
735 case LAPIC_PM_INTERRUPT:
736 if (lapic_intr_func[interrupt_num] != NULL)
737 (void) (*lapic_intr_func[interrupt_num])(state);
738 _lapic_end_of_interrupt();
739 retval = 1;
740 break;
741 case LAPIC_PERFCNT_INTERRUPT:
742 /* If a function has been registered, invoke it. Otherwise,
743 * pass up to IOKit.
744 */
745 if (lapic_intr_func[interrupt_num] != NULL) {
746 (void) (*lapic_intr_func[interrupt_num])(state);
747 /* Unmask the interrupt since we don't expect legacy users
748 * to be responsible for it.
749 */
750 lapic_unmask_perfcnt_interrupt();
751 _lapic_end_of_interrupt();
752 retval = 1;
753 }
754 break;
755 case LAPIC_CMCI_INTERRUPT:
756 if (lapic_intr_func[interrupt_num] != NULL)
757 (void) (*lapic_intr_func[interrupt_num])(state);
758 /* return 0 for plaform expert to handle */
759 break;
760 case LAPIC_ERROR_INTERRUPT:
761 /* We treat error interrupts on APs as fatal.
762 * The current interrupt steering scheme directs most
763 * external interrupts to the BSP (HPET interrupts being
764 * a notable exception); hence, such an error
765 * on an AP may signify LVT corruption (with "may" being
766 * the operative word). On the BSP, we adopt a more
767 * lenient approach, in the interests of enhancing
768 * debuggability and reducing fragility.
769 * If "lapic_error_count_threshold" error interrupts
770 * occur within "lapic_error_time_threshold" absolute
771 * time units, we mask the error vector and log. The
772 * error interrupts themselves are likely
773 * side effects of issues which are beyond the purview of
774 * the local APIC interrupt handler, however. The Error
775 * Status Register value (the illegal destination
776 * vector code is one observed in practice) indicates
777 * the immediate cause of the error.
778 */
779 esr = lapic_esr_read();
780 lapic_dump();
781
782 if ((debug_boot_arg && (lapic_dont_panic == FALSE)) ||
783 cpu_number() != master_cpu) {
784 panic("Local APIC error, ESR: %d\n", esr);
785 }
786
787 if (cpu_number() == master_cpu) {
788 uint64_t abstime = mach_absolute_time();
789 if ((abstime - lapic_last_master_error) < lapic_error_time_threshold) {
790 if (lapic_master_error_count++ > lapic_error_count_threshold) {
791 lapic_errors_masked = TRUE;
792 LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED);
793 printf("Local APIC: errors masked\n");
794 }
795 }
796 else {
797 lapic_last_master_error = abstime;
798 lapic_master_error_count = 0;
799 }
800 printf("Local APIC error on master CPU, ESR: %d, error count this run: %d\n", esr, lapic_master_error_count);
801 }
802
803 _lapic_end_of_interrupt();
804 retval = 1;
805 break;
806 case LAPIC_SPURIOUS_INTERRUPT:
807 kprintf("SPIV\n");
808 /* No EOI required here */
809 retval = 1;
810 break;
811 case LAPIC_PMC_SW_INTERRUPT:
812 {
813 #if CONFIG_COUNTERS
814 thread_t old, new;
815 ml_get_csw_threads(&old, &new);
816
817 if (pmc_context_switch(old, new) == TRUE) {
818 retval = 1;
819 /* No EOI required for SWI */
820 }
821 #endif /* CONFIG_COUNTERS */
822 }
823 break;
824 }
825
826 return retval;
827 }
828
829 void
830 lapic_smm_restore(void)
831 {
832 boolean_t state;
833
834 if (lapic_os_enabled == FALSE)
835 return;
836
837 state = ml_set_interrupts_enabled(FALSE);
838
839 if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) {
840 /*
841 * Bogus SMI handler enables interrupts but does not know about
842 * local APIC interrupt sources. When APIC timer counts down to
843 * zero while in SMM, local APIC will end up waiting for an EOI
844 * but no interrupt was delivered to the OS.
845 */
846 _lapic_end_of_interrupt();
847
848 /*
849 * timer is one-shot, trigger another quick countdown to trigger
850 * another timer interrupt.
851 */
852 if (LAPIC_READ(TIMER_CURRENT_COUNT) == 0) {
853 LAPIC_WRITE(TIMER_INITIAL_COUNT, 1);
854 }
855
856 kprintf("lapic_smm_restore\n");
857 }
858
859 ml_set_interrupts_enabled(state);
860 }
861
862 void
863 lapic_send_ipi(int cpu, int vector)
864 {
865 boolean_t state;
866
867 if (vector < lapic_interrupt_base)
868 vector += lapic_interrupt_base;
869
870 state = ml_set_interrupts_enabled(FALSE);
871
872 /* Wait for pending outgoing send to complete */
873 while (LAPIC_READ(ICR) & LAPIC_ICR_DS_PENDING) {
874 cpu_pause();
875 }
876
877 LAPIC_WRITE(ICRD, cpu_to_lapic[cpu] << LAPIC_ICRD_DEST_SHIFT);
878 LAPIC_WRITE(ICR, vector | LAPIC_ICR_DM_FIXED);
879
880 (void) ml_set_interrupts_enabled(state);
881 }
882
883 /*
884 * The following interfaces are privately exported to AICPM.
885 */
886
887 boolean_t
888 lapic_is_interrupt_pending(void)
889 {
890 int i;
891
892 for (i = 0; i < 8; i += 1) {
893 if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) ||
894 (LAPIC_READ_OFFSET(ISR_BASE, i) != 0))
895 return (TRUE);
896 }
897
898 return (FALSE);
899 }
900
901 boolean_t
902 lapic_is_interrupting(uint8_t vector)
903 {
904 int i;
905 int bit;
906 uint32_t irr;
907 uint32_t isr;
908
909 i = vector / 32;
910 bit = 1 << (vector % 32);
911
912 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
913 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
914
915 if ((irr | isr) & bit)
916 return (TRUE);
917
918 return (FALSE);
919 }
920
921 void
922 lapic_interrupt_counts(uint64_t intrs[256])
923 {
924 int i;
925 int j;
926 int bit;
927 uint32_t irr;
928 uint32_t isr;
929
930 if (intrs == NULL)
931 return;
932
933 for (i = 0; i < 8; i += 1) {
934 irr = LAPIC_READ_OFFSET(IRR_BASE, i);
935 isr = LAPIC_READ_OFFSET(ISR_BASE, i);
936
937 if ((isr | irr) == 0)
938 continue;
939
940 for (j = (i == 0) ? 16 : 0; j < 32; j += 1) {
941 bit = (32 * i) + j;
942 if ((isr | irr) & (1 << j))
943 intrs[bit] += 1;
944 }
945 }
946 }
947
948 void
949 lapic_disable_timer(void)
950 {
951 uint32_t lvt_timer;
952
953 /*
954 * If we're in deadline timer mode,
955 * simply clear the deadline timer, otherwise
956 * mask the timer interrupt and clear the countdown.
957 */
958 lvt_timer = LAPIC_READ(LVT_TIMER);
959 if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) {
960 wrmsr64(MSR_IA32_TSC_DEADLINE, 0);
961 } else {
962 LAPIC_WRITE(LVT_TIMER, lvt_timer | LAPIC_LVT_MASKED);
963 LAPIC_WRITE(TIMER_INITIAL_COUNT, 0);
964 lvt_timer = LAPIC_READ(LVT_TIMER);
965 }
966 }