]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/acpi.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / acpi.c
CommitLineData
91447636 1/*
0a7de745 2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
91447636 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
91447636
A
27 */
28
b0d623f7
A
29#include <i386/pmap.h>
30#include <i386/proc_reg.h>
31#include <i386/mp_desc.h>
91447636 32#include <i386/misc_protos.h>
b0d623f7 33#include <i386/mp.h>
2d21ac55 34#include <i386/cpu_data.h>
6d2010ae 35#if CONFIG_MTRR
91447636 36#include <i386/mtrr.h>
6d2010ae 37#endif
04b8595b
A
38#if HYPERVISOR
39#include <kern/hv_support.h>
40#endif
b0d623f7 41#if CONFIG_VMX
2d21ac55 42#include <i386/vmx/vmx_cpu.h>
b0d623f7 43#endif
6d2010ae 44#include <i386/ucode.h>
91447636 45#include <i386/acpi.h>
0c530ab8 46#include <i386/fpu.h>
593a1d5f 47#include <i386/lapic.h>
91447636 48#include <i386/mp.h>
0c530ab8 49#include <i386/mp_desc.h>
2d21ac55 50#include <i386/serial_io.h>
b0d623f7 51#if CONFIG_MCA
0c530ab8 52#include <i386/machine_check.h>
b0d623f7 53#endif
593a1d5f 54#include <i386/pmCPU.h>
91447636 55
0b4c1975
A
56#include <i386/tsc.h>
57
f427ee49
A
58#define UINT64 uint64_t
59#define UINT32 uint32_t
60#define UINT16 uint16_t
61#define UINT8 uint8_t
62#define RSDP_VERSION_ACPI10 0
63#define RSDP_VERSION_ACPI20 2
64#include <acpi/Acpi.h>
65#include <acpi/Acpi_v1.h>
66#include <pexpert/i386/efi.h>
67
91447636 68#include <kern/cpu_data.h>
bd504ef0
A
69#include <kern/machine.h>
70#include <kern/timer_queue.h>
2d21ac55 71#include <console/serial_protos.h>
6d2010ae 72#include <machine/pal_routines.h>
0b4c1975 73#include <vm/vm_page.h>
3a60a9f5 74
2d21ac55 75#if HIBERNATION
3a60a9f5 76#include <IOKit/IOHibernatePrivate.h>
2d21ac55 77#endif
91447636 78#include <IOKit/IOPlatformExpert.h>
0b4c1975
A
79#include <sys/kdebug.h>
80
5ba3f43e
A
81#if MONOTONIC
82#include <kern/monotonic.h>
83#endif /* MONOTONIC */
84
b0d623f7 85#if CONFIG_SLEEP
0a7de745
A
86extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
87extern void acpi_wake_prot(void);
b0d623f7 88#endif
99c3a104
A
89extern kern_return_t IOCPURunPlatformQuiesceActions(void);
90extern kern_return_t IOCPURunPlatformActiveActions(void);
fe8ab488 91extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
91447636 92
0a7de745 93extern void fpinit(void);
0c530ab8 94
f427ee49
A
95#if DEVELOPMENT || DEBUG
96#define DBG(x...) kprintf(x)
97#else
98#define DBG(x...)
99#endif
100
91447636
A
101vm_offset_t
102acpi_install_wake_handler(void)
103{
b0d623f7
A
104#if CONFIG_SLEEP
105 install_real_mode_bootstrap(acpi_wake_prot);
106 return REAL_MODE_BOOTSTRAP_OFFSET;
107#else
108 return 0;
109#endif
91447636
A
110}
111
3e170ce0 112#if CONFIG_SLEEP
91447636 113
0a7de745
A
114unsigned int save_kdebug_enable = 0;
115static uint64_t acpi_sleep_abstime;
116static uint64_t acpi_idle_abstime;
117static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
118boolean_t deep_idle_rebase = TRUE;
0b4c1975 119
3e170ce0
A
120#if HIBERNATION
121struct acpi_hibernate_callback_data {
122 acpi_sleep_callback func;
123 void *refcon;
124};
125typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
126
91447636 127static void
3a60a9f5 128acpi_hibernate(void *refcon)
91447636 129{
2d21ac55 130 uint32_t mode;
3a60a9f5 131
2d21ac55 132 acpi_hibernate_callback_data_t *data =
0a7de745 133 (acpi_hibernate_callback_data_t *)refcon;
91447636 134
0a7de745 135 if (current_cpu_datap()->cpu_hibernate) {
2d21ac55
A
136 mode = hibernate_write_image();
137
0a7de745 138 if (mode == kIOHibernatePostWriteHalt) {
2d21ac55
A
139 // off
140 HIBLOG("power off\n");
fe8ab488 141 IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
0a7de745
A
142 if (PE_halt_restart) {
143 (*PE_halt_restart)(kPEHaltCPU);
144 }
145 } else if (mode == kIOHibernatePostWriteRestart) {
2d21ac55
A
146 // restart
147 HIBLOG("restart\n");
fe8ab488 148 IOCPURunPlatformHaltRestartActions(kPERestartCPU);
0a7de745
A
149 if (PE_halt_restart) {
150 (*PE_halt_restart)(kPERestartCPU);
151 }
152 } else {
2d21ac55
A
153 // sleep
154 HIBLOG("sleep\n");
0a7de745 155
2d21ac55 156 // should we come back via regular wake, set the state in memory.
0a7de745 157 cpu_datap(0)->cpu_hibernate = 0;
2d21ac55 158 }
0c530ab8 159 }
490019cf
A
160
161#if CONFIG_VMX
162 vmx_suspend();
163#endif
0b4c1975
A
164 kdebug_enable = 0;
165
99c3a104
A
166 IOCPURunPlatformQuiesceActions();
167
0b4c1975 168 acpi_sleep_abstime = mach_absolute_time();
91447636 169
2d21ac55 170 (data->func)(data->refcon);
91447636 171
2d21ac55 172 /* should never get here! */
91447636 173}
b0d623f7 174#endif /* HIBERNATION */
3e170ce0 175#endif /* CONFIG_SLEEP */
91447636 176
0a7de745 177extern void slave_pstart(void);
0c530ab8 178
91447636
A
179void
180acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
181{
2d21ac55
A
182#if HIBERNATION
183 acpi_hibernate_callback_data_t data;
2d21ac55 184#endif
b0d623f7 185 boolean_t did_hibernate;
5ba3f43e 186 cpu_data_t *cdp = current_cpu_datap();
0a7de745
A
187 unsigned int cpu;
188 kern_return_t rc;
189 unsigned int my_cpu;
190 uint64_t start;
191 uint64_t elapsed = 0;
192 uint64_t elapsed_trace_start = 0;
91447636 193
5ba3f43e
A
194 my_cpu = cpu_number();
195 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
0a7de745 196 my_cpu);
0c530ab8 197
5ba3f43e 198 /* Get all CPUs to be in the "off" state */
e2fac8b1 199 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
0a7de745 200 if (cpu == my_cpu) {
e2fac8b1 201 continue;
0a7de745 202 }
e2fac8b1 203 rc = pmCPUExitHaltToOff(cpu);
0a7de745 204 if (rc != KERN_SUCCESS) {
b0d623f7 205 panic("Error %d trying to transition CPU %d to OFF",
0a7de745
A
206 rc, cpu);
207 }
e2fac8b1
A
208 }
209
6d2010ae 210 /* shutdown local APIC before passing control to firmware */
f427ee49 211 lapic_shutdown(true);
91447636 212
2d21ac55
A
213#if HIBERNATION
214 data.func = func;
215 data.refcon = refcon;
216#endif
91447636 217
5ba3f43e
A
218#if MONOTONIC
219 mt_cpu_down(cdp);
220#endif /* MONOTONIC */
221
593a1d5f
A
222 /* Save power management timer state */
223 pmTimerSave();
0c530ab8 224
04b8595b
A
225#if HYPERVISOR
226 /* Notify hypervisor that we are about to sleep */
227 hv_suspend();
228#endif
229
060df5ea
A
230 /*
231 * Enable FPU/SIMD unit for potential hibernate acceleration
232 */
0a7de745 233 clear_ts();
060df5ea 234
5ba3f43e 235 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
0b4c1975
A
236
237 save_kdebug_enable = kdebug_enable;
238 kdebug_enable = 0;
0c530ab8
A
239
240 acpi_sleep_abstime = mach_absolute_time();
2d21ac55 241
b0d623f7 242#if CONFIG_SLEEP
2d21ac55
A
243 /*
244 * Save master CPU state and sleep platform.
245 * Will not return until platform is woken up,
246 * or if sleep failed.
247 */
b0d623f7 248 uint64_t old_cr3 = x86_64_pre_sleep();
2d21ac55
A
249#if HIBERNATION
250 acpi_sleep_cpu(acpi_hibernate, &data);
251#else
490019cf
A
252#if CONFIG_VMX
253 vmx_suspend();
254#endif
2d21ac55
A
255 acpi_sleep_cpu(func, refcon);
256#endif
060df5ea 257
39037602
A
258 acpi_wake_abstime = mach_absolute_time();
259 /* Rebase TSC->absolute time conversion, using timestamp
260 * recorded before sleep.
261 */
262 rtc_nanotime_init(acpi_sleep_abstime);
263 acpi_wake_postrebase_abstime = start = mach_absolute_time();
264 assert(start >= acpi_sleep_abstime);
39236c6e 265
b0d623f7 266 x86_64_post_sleep(old_cr3);
b0d623f7
A
267
268#endif /* CONFIG_SLEEP */
2d21ac55 269
4a3eedf9
A
270 /* Reset UART if kprintf is enabled.
271 * However kprintf should not be used before rtc_sleep_wakeup()
272 * for compatibility with firewire kprintf.
273 */
274
0a7de745 275 if (FALSE == disable_serial_output) {
6d2010ae 276 pal_serial_init();
0a7de745 277 }
2d21ac55
A
278
279#if HIBERNATION
280 if (current_cpu_datap()->cpu_hibernate) {
2d21ac55 281 did_hibernate = TRUE;
2d21ac55 282 } else
0a7de745 283#endif
2d21ac55
A
284 {
285 did_hibernate = FALSE;
0c530ab8 286 }
3a60a9f5 287
5ba3f43e
A
288 /* Re-enable fast syscall */
289 cpu_syscall_init(current_cpu_datap());
3a60a9f5 290
b0d623f7 291#if CONFIG_MCA
2d21ac55
A
292 /* Re-enable machine check handling */
293 mca_cpu_init();
b0d623f7 294#endif
91447636 295
6d2010ae 296#if CONFIG_MTRR
2d21ac55
A
297 /* restore MTRR settings */
298 mtrr_update_cpu();
6d2010ae
A
299#endif
300
eb6b6ca3
A
301 /* update CPU microcode and apply CPU workarounds */
302 ucode_update_wake_and_apply_cpu_was();
0c530ab8 303
490019cf
A
304#if CONFIG_MTRR
305 /* set up PAT following boot processor power up */
306 pat_init();
307#endif
308
b0d623f7 309#if CONFIG_VMX
0a7de745 310 /*
2d21ac55
A
311 * Restore VT mode
312 */
490019cf 313 vmx_resume(did_hibernate);
6d2010ae 314#endif
91447636 315
593a1d5f
A
316 /*
317 * Go through all of the CPUs and mark them as requiring
318 * a full restart.
319 */
320 pmMarkAllCPUsOff();
321
0b4c1975 322
060df5ea 323 /* re-enable and re-init local apic (prior to starting timers) */
0a7de745 324 if (lapic_probe()) {
f427ee49 325 lapic_configure(true);
0a7de745 326 }
060df5ea 327
5ba3f43e
A
328#if KASAN
329 /*
330 * The sleep implementation uses indirect noreturn calls, so we miss stack
331 * unpoisoning. Do it explicitly.
332 */
d9a64523 333 kasan_unpoison_curstack(true);
5ba3f43e
A
334#endif
335
39236c6e 336 elapsed += mach_absolute_time() - start;
fe8ab488 337
39037602 338 rtc_decrementer_configure();
0b4c1975
A
339 kdebug_enable = save_kdebug_enable;
340
bd504ef0 341 if (kdebug_enable == 0) {
f427ee49 342 elapsed_trace_start += kdebug_wake();
bd504ef0 343 }
39236c6e 344 start = mach_absolute_time();
bd504ef0
A
345
346 /* Reconfigure FP/SIMD unit */
347 init_fpu();
348 clear_ts();
349
c3c9b80d
A
350
351#if HYPERVISOR
352 /* Notify hypervisor that we are about to resume */
353 hv_resume();
354#endif
355
99c3a104
A
356 IOCPURunPlatformActiveActions();
357
5ba3f43e 358 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
0a7de745 359 elapsed_trace_start, acpi_wake_abstime);
0b4c1975 360
593a1d5f
A
361 /* Restore power management register state */
362 pmCPUMarkRunning(current_cpu_datap());
91447636 363
593a1d5f
A
364 /* Restore power management timer state */
365 pmTimerRestore();
0c530ab8 366
060df5ea
A
367 /* Restart timer interrupts */
368 rtc_timer_start();
0c530ab8 369
cb323159
A
370#if MONOTONIC
371 mt_cpu_up(cdp);
372#endif /* MONOTONIC */
373
374#if HIBERNATION
2d21ac55 375 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
cb323159 376#endif /* HIBERNATION */
b0d623f7
A
377
378#if CONFIG_SLEEP
379 /* Becase we don't save the bootstrap page, and we share it
0a7de745 380 * between sleep and mp slave init, we need to recreate it
b0d623f7
A
381 * after coming back from sleep or hibernate */
382 install_real_mode_bootstrap(slave_pstart);
cb323159 383#endif /* CONFIG_SLEEP */
91447636 384}
b0d623f7 385
f427ee49
A
386void
387ml_hibernate_active_pre(void)
388{
389#if HIBERNATION
390 hibernate_rebuild_vm_structs();
391#endif /* HIBERNATION */
392}
393
394void
395ml_hibernate_active_post(void)
396{
397#if HIBERNATION
398 if (current_cpu_datap()->cpu_hibernate) {
399 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
400 hibernate_machine_init();
401 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
402 current_cpu_datap()->cpu_hibernate = 0;
403 }
404#endif /* HIBERNATION */
405}
406
bd504ef0
A
407/*
408 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
409 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
410 * processors are expected already to have been offlined in the deepest C-state.
411 *
412 * The contract with ACPI is that although the kernel is called with interrupts
413 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
414 * interrupt. However, the callback function will be called once this has
415 * occurred and interrupts are guaranteed to be disabled at that time,
416 * and to remain disabled during C-state entry, exit (wake) and return
417 * from acpi_idle_kernel.
418 */
419void
420acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
421{
0a7de745
A
422 boolean_t istate = ml_get_interrupts_enabled();
423
bd504ef0 424 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
0a7de745 425 cpu_number(), istate ? "enabled" : "disabled");
bd504ef0
A
426
427 assert(cpu_number() == master_cpu);
428
cb323159
A
429#if MONOTONIC
430 mt_cpu_down(cpu_datap(0));
431#endif /* MONOTONIC */
432
bd504ef0
A
433 /* Cancel any pending deadline */
434 setPop(0);
cb323159
A
435 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)
436#if MONOTONIC
437 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT))
438#endif /* MONOTONIC */
439 ) {
bd504ef0
A
440 (void) ml_set_interrupts_enabled(TRUE);
441 setPop(0);
442 ml_set_interrupts_enabled(FALSE);
443 }
444
0a7de745
A
445 if (current_cpu_datap()->cpu_hibernate) {
446 /* Call hibernate_write_image() to put disk to low power state */
447 hibernate_write_image();
448 cpu_datap(0)->cpu_hibernate = 0;
449 }
d26ffc64 450
bd504ef0
A
451 /*
452 * Call back to caller to indicate that interrupts will remain
453 * disabled while we deep idle, wake and return.
0a7de745 454 */
5ba3f43e
A
455 IOCPURunPlatformQuiesceActions();
456
bd504ef0
A
457 func(refcon);
458
459 acpi_idle_abstime = mach_absolute_time();
460
461 KERNEL_DEBUG_CONSTANT(
462 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
463 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
464
465 /*
466 * Disable tracing during S0-sleep
467 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
468 */
469 if (deep_idle_rebase) {
470 save_kdebug_enable = kdebug_enable;
471 kdebug_enable = 0;
472 }
473
474 /*
475 * Call into power-management to enter the lowest C-state.
476 * Note when called on the boot processor this routine will
477 * return directly when awoken.
478 */
479 pmCPUHalt(PM_HALT_SLEEP);
480
481 /*
482 * Get wakeup time relative to the TSC which has progressed.
483 * Then rebase nanotime to reflect time not progressing over sleep
484 * - unless overriden so that tracing can occur during deep_idle.
0a7de745 485 */
bd504ef0
A
486 acpi_wake_abstime = mach_absolute_time();
487 if (deep_idle_rebase) {
488 rtc_sleep_wakeup(acpi_idle_abstime);
489 kdebug_enable = save_kdebug_enable;
490 }
143464d5 491 acpi_wake_postrebase_abstime = mach_absolute_time();
fe8ab488 492 assert(mach_absolute_time() >= acpi_idle_abstime);
bd504ef0
A
493
494 KERNEL_DEBUG_CONSTANT(
495 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
496 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
0a7de745 497
f427ee49
A
498#if MONOTONIC
499 mt_cpu_up(cpu_datap(0));
500#endif /* MONOTONIC */
501
0a7de745 502 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
bd504ef0 503 if (kdebug_enable == 0) {
f427ee49 504 kdebug_wake();
bd504ef0
A
505 }
506
507 IOCPURunPlatformActiveActions();
508
509 /* Restart timer interrupts */
510 rtc_timer_start();
511}
512
b0d623f7
A
513extern char real_mode_bootstrap_end[];
514extern char real_mode_bootstrap_base[];
515
516void
517install_real_mode_bootstrap(void *prot_entry)
518{
519 /*
520 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
521 * This is in page 1 which has been reserved for this purpose by
522 * machine_startup() from the boot processor.
523 * The slave boot code is responsible for switching to protected
524 * mode and then jumping to the common startup, _start().
525 */
526 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
0a7de745
A
527 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
528 real_mode_bootstrap_end - real_mode_bootstrap_base);
b0d623f7
A
529
530 /*
531 * Set the location at the base of the stack to point to the
532 * common startup entry.
533 */
534 ml_phys_write_word(
0a7de745 535 PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET,
b0d623f7 536 (unsigned int)kvtophys((vm_offset_t)prot_entry));
0a7de745 537
b0d623f7
A
538 /* Flush caches */
539 __asm__("wbinvd");
540}
541
143464d5 542boolean_t
0a7de745
A
543ml_recent_wake(void)
544{
143464d5
A
545 uint64_t ctime = mach_absolute_time();
546 assert(ctime > acpi_wake_postrebase_abstime);
0a7de745 547 return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC;
143464d5 548}
f427ee49
A
549
550static uint8_t
551cksum8(uint8_t *ptr, uint32_t size)
552{
553 uint8_t sum = 0;
554 uint32_t i;
555
556 for (i = 0; i < size; i++) {
557 sum += ptr[i];
558 }
559
560 return sum;
561}
562
563/*
564 * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
565 * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
566 * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
567 * physical.
568 */
569#define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
570{ \
571 uint32_t i, pointer_count; \
572 \
573 /* Walk the list of tables in the *SDT, looking for the signature passed in */ \
574 pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \
575 \
576 for (i = 0; i < pointer_count; i++) { \
577 ACPI_TABLE_HEADER *next_table = \
578 (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \
579 (uintptr_t)(sdtp)->TableOffsetEntry[i]); \
580 if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \
581 /* \
582 * Checksum the table first, then return it if the checksum \
583 * is valid. \
584 */ \
585 if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \
586 return next_table; \
587 } else { \
588 DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \
589 (unsigned long)(sdtp)->TableOffsetEntry[i]); \
590 return NULL; \
591 } \
592 } \
593 } \
594 \
595 return NULL; \
596}
597
598static ACPI_TABLE_HEADER *
599acpi_find_table_via_xsdt(XSDT_DESCRIPTOR *xsdtp, const char *signature)
600{
601 SEARCH_FOR_ACPI_TABLE(xsdtp, signature, UINT64);
602}
603
604static ACPI_TABLE_HEADER *
605acpi_find_table_via_rsdt(RSDT_DESCRIPTOR *rsdtp, const char *signature)
606{
607 SEARCH_FOR_ACPI_TABLE(rsdtp, signature, UINT32);
608}
609
610/*
611 * Returns a pointer to an ACPI table header corresponding to the table
612 * whose signature is passed in, or NULL if no such table could be found.
613 */
614static ACPI_TABLE_HEADER *
615acpi_find_table(uintptr_t rsdp_physaddr, const char *signature)
616{
617 static RSDP_DESCRIPTOR *rsdp = NULL;
618 static XSDT_DESCRIPTOR *xsdtp = NULL;
619 static RSDT_DESCRIPTOR *rsdtp = NULL;
620
621 if (signature == NULL) {
622 DBG("Invalid NULL signature passed to acpi_find_table\n");
623 return NULL;
624 }
625
626 /*
627 * RSDT or XSDT is required; without it, we cannot locate other tables.
628 */
629 if (__improbable(rsdp == NULL || (rsdtp == NULL && xsdtp == NULL))) {
630 rsdp = PHYSMAP_PTOV(rsdp_physaddr);
631
632 /* Verify RSDP signature */
633 if (__improbable(strncmp((void *)rsdp, "RSD PTR ", 8) != 0)) {
634 DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
635 rsdp = NULL;
636 return NULL;
637 }
638
639 /* Verify RSDP checksum */
640 if (__improbable(cksum8((uint8_t *)rsdp, sizeof(RSDP_DESCRIPTOR)) != 0)) {
641 DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
642 (unsigned long)rsdp_physaddr);
643 rsdp = NULL;
644 return NULL;
645 }
646
647 /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
648 if (__improbable(rsdp->Revision >= RSDP_VERSION_ACPI20 && rsdp->XsdtPhysicalAddress == 0ULL)) {
649 DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
650 rsdp = NULL;
651 return NULL;
652 } else if (__probable(rsdp->Revision >= RSDP_VERSION_ACPI20)) {
653 /* XSDT (with 64-bit pointers to tables) */
654 rsdtp = NULL;
655 xsdtp = PHYSMAP_PTOV(rsdp->XsdtPhysicalAddress);
656 if (cksum8((uint8_t *)xsdtp, xsdtp->Length) != 0) {
657 DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
658 (unsigned long)rsdp->XsdtPhysicalAddress);
659 xsdtp = NULL;
660 return NULL;
661 }
662 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10 && rsdp->RsdtPhysicalAddress == 0)) {
663 DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
664 rsdp = NULL;
665 return NULL;
666 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10)) {
667 /* RSDT (with 32-bit pointers to tables) */
668 xsdtp = NULL;
669 rsdtp = PHYSMAP_PTOV((uintptr_t)rsdp->RsdtPhysicalAddress);
670 if (cksum8((uint8_t *)rsdtp, rsdtp->Length) != 0) {
671 DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
672 (unsigned long)rsdp->RsdtPhysicalAddress);
673 rsdtp = NULL;
674 return NULL;
675 }
676 } else {
677 DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
678 rsdp->Revision);
679 rsdp = NULL;
680 return NULL;
681 }
682 }
683
684 assert(xsdtp != NULL || rsdtp != NULL);
685
686 if (__probable(xsdtp != NULL)) {
687 return acpi_find_table_via_xsdt(xsdtp, signature);
688 } else if (rsdtp != NULL) {
689 return acpi_find_table_via_rsdt(rsdtp, signature);
690 }
691
692 return NULL;
693}
694
695/*
696 * Returns the count of enabled logical processors present in the ACPI
697 * MADT, or 0 if the MADT could not be located.
698 */
699uint32_t
700acpi_count_enabled_logical_processors(void)
701{
702 MULTIPLE_APIC_TABLE *madtp;
703 void *end_ptr;
704 APIC_HEADER *next_apic_entryp;
705 uint32_t enabled_cpu_count = 0;
706 uint64_t rsdp_physaddr;
707
708 rsdp_physaddr = efi_get_rsdp_physaddr();
709 if (__improbable(rsdp_physaddr == 0)) {
710 DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
711 return 0;
712 }
713
714 madtp = (MULTIPLE_APIC_TABLE *)acpi_find_table(rsdp_physaddr, ACPI_SIG_MADT);
715
716 if (__improbable(madtp == NULL)) {
717 DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
718 return 0;
719 }
720
721 end_ptr = (void *)((uintptr_t)madtp + madtp->Length);
722 next_apic_entryp = (APIC_HEADER *)((uintptr_t)madtp + sizeof(MULTIPLE_APIC_TABLE));
723
724 while ((void *)next_apic_entryp < end_ptr) {
725 switch (next_apic_entryp->Type) {
726 case APIC_PROCESSOR:
727 {
728 MADT_PROCESSOR_APIC *madt_procp = (MADT_PROCESSOR_APIC *)next_apic_entryp;
729 if (madt_procp->ProcessorEnabled) {
730 enabled_cpu_count++;
731 }
732
733 break;
734 }
735
736 default:
737 DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp->Type,
738 next_apic_entryp->Length);
739 break;
740 }
741
742 next_apic_entryp = (APIC_HEADER *)((uintptr_t)next_apic_entryp + next_apic_entryp->Length);
743 }
744
745 return enabled_cpu_count;
746}