]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/acpi.c
d4e14d5118bbe15657d89d271aa42062c15324f7
[apple/xnu.git] / osfmk / i386 / acpi.c
1 /*
2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if HYPERVISOR
39 #include <kern/hv_support.h>
40 #endif
41 #if CONFIG_VMX
42 #include <i386/vmx/vmx_cpu.h>
43 #endif
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
46 #include <i386/fpu.h>
47 #include <i386/lapic.h>
48 #include <i386/mp.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
51 #if CONFIG_MCA
52 #include <i386/machine_check.h>
53 #endif
54 #include <i386/pmCPU.h>
55
56 #include <i386/tsc.h>
57
58 #define UINT64 uint64_t
59 #define UINT32 uint32_t
60 #define UINT16 uint16_t
61 #define UINT8 uint8_t
62 #define RSDP_VERSION_ACPI10 0
63 #define RSDP_VERSION_ACPI20 2
64 #include <acpi/Acpi.h>
65 #include <acpi/Acpi_v1.h>
66 #include <pexpert/i386/efi.h>
67
68 #include <kern/cpu_data.h>
69 #include <kern/machine.h>
70 #include <kern/timer_queue.h>
71 #include <console/serial_protos.h>
72 #include <machine/pal_routines.h>
73 #include <vm/vm_page.h>
74
75 #if HIBERNATION
76 #include <IOKit/IOHibernatePrivate.h>
77 #endif
78 #include <IOKit/IOPlatformExpert.h>
79 #include <sys/kdebug.h>
80
81 #if MONOTONIC
82 #include <kern/monotonic.h>
83 #endif /* MONOTONIC */
84
85 #if CONFIG_SLEEP
86 extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
87 extern void acpi_wake_prot(void);
88 #endif
89 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
90 extern kern_return_t IOCPURunPlatformActiveActions(void);
91 extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
92
93 extern void fpinit(void);
94
95 #if DEVELOPMENT || DEBUG
96 #define DBG(x...) kprintf(x)
97 #else
98 #define DBG(x...)
99 #endif
100
101 vm_offset_t
102 acpi_install_wake_handler(void)
103 {
104 #if CONFIG_SLEEP
105 install_real_mode_bootstrap(acpi_wake_prot);
106 return REAL_MODE_BOOTSTRAP_OFFSET;
107 #else
108 return 0;
109 #endif
110 }
111
112 #if CONFIG_SLEEP
113
114 unsigned int save_kdebug_enable = 0;
115 static uint64_t acpi_sleep_abstime;
116 static uint64_t acpi_idle_abstime;
117 static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
118 boolean_t deep_idle_rebase = TRUE;
119
120 #if HIBERNATION
121 struct acpi_hibernate_callback_data {
122 acpi_sleep_callback func;
123 void *refcon;
124 };
125 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
126
127 static void
128 acpi_hibernate(void *refcon)
129 {
130 uint32_t mode;
131
132 acpi_hibernate_callback_data_t *data =
133 (acpi_hibernate_callback_data_t *)refcon;
134
135 if (current_cpu_datap()->cpu_hibernate) {
136 mode = hibernate_write_image();
137
138 if (mode == kIOHibernatePostWriteHalt) {
139 // off
140 HIBLOG("power off\n");
141 IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
142 if (PE_halt_restart) {
143 (*PE_halt_restart)(kPEHaltCPU);
144 }
145 } else if (mode == kIOHibernatePostWriteRestart) {
146 // restart
147 HIBLOG("restart\n");
148 IOCPURunPlatformHaltRestartActions(kPERestartCPU);
149 if (PE_halt_restart) {
150 (*PE_halt_restart)(kPERestartCPU);
151 }
152 } else {
153 // sleep
154 HIBLOG("sleep\n");
155
156 // should we come back via regular wake, set the state in memory.
157 cpu_datap(0)->cpu_hibernate = 0;
158 }
159 }
160
161 #if CONFIG_VMX
162 vmx_suspend();
163 #endif
164 kdebug_enable = 0;
165
166 IOCPURunPlatformQuiesceActions();
167
168 acpi_sleep_abstime = mach_absolute_time();
169
170 (data->func)(data->refcon);
171
172 /* should never get here! */
173 }
174 #endif /* HIBERNATION */
175 #endif /* CONFIG_SLEEP */
176
177 extern void slave_pstart(void);
178
179 void
180 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
181 {
182 #if HIBERNATION
183 acpi_hibernate_callback_data_t data;
184 #endif
185 boolean_t did_hibernate;
186 cpu_data_t *cdp = current_cpu_datap();
187 unsigned int cpu;
188 kern_return_t rc;
189 unsigned int my_cpu;
190 uint64_t start;
191 uint64_t elapsed = 0;
192 uint64_t elapsed_trace_start = 0;
193
194 my_cpu = cpu_number();
195 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate,
196 my_cpu);
197
198 /* Get all CPUs to be in the "off" state */
199 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
200 if (cpu == my_cpu) {
201 continue;
202 }
203 rc = pmCPUExitHaltToOff(cpu);
204 if (rc != KERN_SUCCESS) {
205 panic("Error %d trying to transition CPU %d to OFF",
206 rc, cpu);
207 }
208 }
209
210 /* shutdown local APIC before passing control to firmware */
211 lapic_shutdown(true);
212
213 #if HIBERNATION
214 data.func = func;
215 data.refcon = refcon;
216 #endif
217
218 #if MONOTONIC
219 mt_cpu_down(cdp);
220 #endif /* MONOTONIC */
221
222 /* Save power management timer state */
223 pmTimerSave();
224
225 #if HYPERVISOR
226 /* Notify hypervisor that we are about to sleep */
227 hv_suspend();
228 #endif
229
230 /*
231 * Enable FPU/SIMD unit for potential hibernate acceleration
232 */
233 clear_ts();
234
235 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START);
236
237 save_kdebug_enable = kdebug_enable;
238 kdebug_enable = 0;
239
240 acpi_sleep_abstime = mach_absolute_time();
241
242 #if CONFIG_SLEEP
243 /*
244 * Save master CPU state and sleep platform.
245 * Will not return until platform is woken up,
246 * or if sleep failed.
247 */
248 uint64_t old_cr3 = x86_64_pre_sleep();
249 #if HIBERNATION
250 acpi_sleep_cpu(acpi_hibernate, &data);
251 #else
252 #if CONFIG_VMX
253 vmx_suspend();
254 #endif
255 acpi_sleep_cpu(func, refcon);
256 #endif
257
258 acpi_wake_abstime = mach_absolute_time();
259 /* Rebase TSC->absolute time conversion, using timestamp
260 * recorded before sleep.
261 */
262 rtc_nanotime_init(acpi_sleep_abstime);
263 acpi_wake_postrebase_abstime = start = mach_absolute_time();
264 assert(start >= acpi_sleep_abstime);
265
266 x86_64_post_sleep(old_cr3);
267
268 #endif /* CONFIG_SLEEP */
269
270 /* Reset UART if kprintf is enabled.
271 * However kprintf should not be used before rtc_sleep_wakeup()
272 * for compatibility with firewire kprintf.
273 */
274
275 if (FALSE == disable_serial_output) {
276 pal_serial_init();
277 }
278
279 #if HIBERNATION
280 if (current_cpu_datap()->cpu_hibernate) {
281 did_hibernate = TRUE;
282 } else
283 #endif
284 {
285 did_hibernate = FALSE;
286 }
287
288 /* Re-enable fast syscall */
289 cpu_syscall_init(current_cpu_datap());
290
291 #if CONFIG_MCA
292 /* Re-enable machine check handling */
293 mca_cpu_init();
294 #endif
295
296 #if CONFIG_MTRR
297 /* restore MTRR settings */
298 mtrr_update_cpu();
299 #endif
300
301 /* update CPU microcode and apply CPU workarounds */
302 ucode_update_wake_and_apply_cpu_was();
303
304 #if CONFIG_MTRR
305 /* set up PAT following boot processor power up */
306 pat_init();
307 #endif
308
309 #if CONFIG_VMX
310 /*
311 * Restore VT mode
312 */
313 vmx_resume(did_hibernate);
314 #endif
315
316 /*
317 * Go through all of the CPUs and mark them as requiring
318 * a full restart.
319 */
320 pmMarkAllCPUsOff();
321
322
323 /* re-enable and re-init local apic (prior to starting timers) */
324 if (lapic_probe()) {
325 lapic_configure(true);
326 }
327
328 #if KASAN
329 /*
330 * The sleep implementation uses indirect noreturn calls, so we miss stack
331 * unpoisoning. Do it explicitly.
332 */
333 kasan_unpoison_curstack(true);
334 #endif
335
336 elapsed += mach_absolute_time() - start;
337
338 rtc_decrementer_configure();
339 kdebug_enable = save_kdebug_enable;
340
341 if (kdebug_enable == 0) {
342 elapsed_trace_start += kdebug_wake();
343 }
344 start = mach_absolute_time();
345
346 /* Reconfigure FP/SIMD unit */
347 init_fpu();
348 clear_ts();
349
350 IOCPURunPlatformActiveActions();
351
352 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed,
353 elapsed_trace_start, acpi_wake_abstime);
354
355 /* Restore power management register state */
356 pmCPUMarkRunning(current_cpu_datap());
357
358 /* Restore power management timer state */
359 pmTimerRestore();
360
361 /* Restart timer interrupts */
362 rtc_timer_start();
363
364
365 #if MONOTONIC
366 mt_cpu_up(cdp);
367 #endif /* MONOTONIC */
368
369 #if HIBERNATION
370 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
371 #endif /* HIBERNATION */
372
373 #if CONFIG_SLEEP
374 /* Becase we don't save the bootstrap page, and we share it
375 * between sleep and mp slave init, we need to recreate it
376 * after coming back from sleep or hibernate */
377 install_real_mode_bootstrap(slave_pstart);
378 #endif /* CONFIG_SLEEP */
379 }
380
381 void
382 ml_hibernate_active_pre(void)
383 {
384 #if HIBERNATION
385 hibernate_rebuild_vm_structs();
386 #endif /* HIBERNATION */
387 }
388
389 void
390 ml_hibernate_active_post(void)
391 {
392 #if HIBERNATION
393 if (current_cpu_datap()->cpu_hibernate) {
394 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START);
395 hibernate_machine_init();
396 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END);
397 current_cpu_datap()->cpu_hibernate = 0;
398 }
399 #endif /* HIBERNATION */
400 }
401
402 /*
403 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
404 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
405 * processors are expected already to have been offlined in the deepest C-state.
406 *
407 * The contract with ACPI is that although the kernel is called with interrupts
408 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
409 * interrupt. However, the callback function will be called once this has
410 * occurred and interrupts are guaranteed to be disabled at that time,
411 * and to remain disabled during C-state entry, exit (wake) and return
412 * from acpi_idle_kernel.
413 */
414 void
415 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
416 {
417 boolean_t istate = ml_get_interrupts_enabled();
418
419 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
420 cpu_number(), istate ? "enabled" : "disabled");
421
422 assert(cpu_number() == master_cpu);
423
424 #if MONOTONIC
425 mt_cpu_down(cpu_datap(0));
426 #endif /* MONOTONIC */
427
428 /* Cancel any pending deadline */
429 setPop(0);
430 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)
431 #if MONOTONIC
432 || lapic_is_interrupting(LAPIC_VECTOR(PERFCNT))
433 #endif /* MONOTONIC */
434 ) {
435 (void) ml_set_interrupts_enabled(TRUE);
436 setPop(0);
437 ml_set_interrupts_enabled(FALSE);
438 }
439
440 if (current_cpu_datap()->cpu_hibernate) {
441 /* Call hibernate_write_image() to put disk to low power state */
442 hibernate_write_image();
443 cpu_datap(0)->cpu_hibernate = 0;
444 }
445
446 /*
447 * Call back to caller to indicate that interrupts will remain
448 * disabled while we deep idle, wake and return.
449 */
450 IOCPURunPlatformQuiesceActions();
451
452 func(refcon);
453
454 acpi_idle_abstime = mach_absolute_time();
455
456 KERNEL_DEBUG_CONSTANT(
457 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
458 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
459
460 /*
461 * Disable tracing during S0-sleep
462 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
463 */
464 if (deep_idle_rebase) {
465 save_kdebug_enable = kdebug_enable;
466 kdebug_enable = 0;
467 }
468
469 /*
470 * Call into power-management to enter the lowest C-state.
471 * Note when called on the boot processor this routine will
472 * return directly when awoken.
473 */
474 pmCPUHalt(PM_HALT_SLEEP);
475
476 /*
477 * Get wakeup time relative to the TSC which has progressed.
478 * Then rebase nanotime to reflect time not progressing over sleep
479 * - unless overriden so that tracing can occur during deep_idle.
480 */
481 acpi_wake_abstime = mach_absolute_time();
482 if (deep_idle_rebase) {
483 rtc_sleep_wakeup(acpi_idle_abstime);
484 kdebug_enable = save_kdebug_enable;
485 }
486 acpi_wake_postrebase_abstime = mach_absolute_time();
487 assert(mach_absolute_time() >= acpi_idle_abstime);
488
489 KERNEL_DEBUG_CONSTANT(
490 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
491 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
492
493 #if MONOTONIC
494 mt_cpu_up(cpu_datap(0));
495 #endif /* MONOTONIC */
496
497 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
498 if (kdebug_enable == 0) {
499 kdebug_wake();
500 }
501
502 IOCPURunPlatformActiveActions();
503
504 /* Restart timer interrupts */
505 rtc_timer_start();
506 }
507
508 extern char real_mode_bootstrap_end[];
509 extern char real_mode_bootstrap_base[];
510
511 void
512 install_real_mode_bootstrap(void *prot_entry)
513 {
514 /*
515 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
516 * This is in page 1 which has been reserved for this purpose by
517 * machine_startup() from the boot processor.
518 * The slave boot code is responsible for switching to protected
519 * mode and then jumping to the common startup, _start().
520 */
521 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
522 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
523 real_mode_bootstrap_end - real_mode_bootstrap_base);
524
525 /*
526 * Set the location at the base of the stack to point to the
527 * common startup entry.
528 */
529 ml_phys_write_word(
530 PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET,
531 (unsigned int)kvtophys((vm_offset_t)prot_entry));
532
533 /* Flush caches */
534 __asm__("wbinvd");
535 }
536
537 boolean_t
538 ml_recent_wake(void)
539 {
540 uint64_t ctime = mach_absolute_time();
541 assert(ctime > acpi_wake_postrebase_abstime);
542 return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC;
543 }
544
545 static uint8_t
546 cksum8(uint8_t *ptr, uint32_t size)
547 {
548 uint8_t sum = 0;
549 uint32_t i;
550
551 for (i = 0; i < size; i++) {
552 sum += ptr[i];
553 }
554
555 return sum;
556 }
557
558 /*
559 * Parameterized search for a specified table given an sdtp (either RSDT or XSDT).
560 * Note that efiboot does not modify the addresses of tables in the RSDT or XSDT
561 * TableOffsetEntry array, so we do not need to "convert" from efiboot virtual to
562 * physical.
563 */
564 #define SEARCH_FOR_ACPI_TABLE(sdtp, signature, entry_type) \
565 { \
566 uint32_t i, pointer_count; \
567 \
568 /* Walk the list of tables in the *SDT, looking for the signature passed in */ \
569 pointer_count = ((sdtp)->Length - sizeof(ACPI_TABLE_HEADER)) / sizeof(entry_type); \
570 \
571 for (i = 0; i < pointer_count; i++) { \
572 ACPI_TABLE_HEADER *next_table = \
573 (ACPI_TABLE_HEADER *)PHYSMAP_PTOV( \
574 (uintptr_t)(sdtp)->TableOffsetEntry[i]); \
575 if (strncmp(&next_table->Signature[0], (signature), 4) == 0) { \
576 /* \
577 * Checksum the table first, then return it if the checksum \
578 * is valid. \
579 */ \
580 if (cksum8((uint8_t *)next_table, next_table->Length) == 0) { \
581 return next_table; \
582 } else { \
583 DBG("Invalid checksum for table [%s]@0x%lx!\n", (signature), \
584 (unsigned long)(sdtp)->TableOffsetEntry[i]); \
585 return NULL; \
586 } \
587 } \
588 } \
589 \
590 return NULL; \
591 }
592
593 static ACPI_TABLE_HEADER *
594 acpi_find_table_via_xsdt(XSDT_DESCRIPTOR *xsdtp, const char *signature)
595 {
596 SEARCH_FOR_ACPI_TABLE(xsdtp, signature, UINT64);
597 }
598
599 static ACPI_TABLE_HEADER *
600 acpi_find_table_via_rsdt(RSDT_DESCRIPTOR *rsdtp, const char *signature)
601 {
602 SEARCH_FOR_ACPI_TABLE(rsdtp, signature, UINT32);
603 }
604
605 /*
606 * Returns a pointer to an ACPI table header corresponding to the table
607 * whose signature is passed in, or NULL if no such table could be found.
608 */
609 static ACPI_TABLE_HEADER *
610 acpi_find_table(uintptr_t rsdp_physaddr, const char *signature)
611 {
612 static RSDP_DESCRIPTOR *rsdp = NULL;
613 static XSDT_DESCRIPTOR *xsdtp = NULL;
614 static RSDT_DESCRIPTOR *rsdtp = NULL;
615
616 if (signature == NULL) {
617 DBG("Invalid NULL signature passed to acpi_find_table\n");
618 return NULL;
619 }
620
621 /*
622 * RSDT or XSDT is required; without it, we cannot locate other tables.
623 */
624 if (__improbable(rsdp == NULL || (rsdtp == NULL && xsdtp == NULL))) {
625 rsdp = PHYSMAP_PTOV(rsdp_physaddr);
626
627 /* Verify RSDP signature */
628 if (__improbable(strncmp((void *)rsdp, "RSD PTR ", 8) != 0)) {
629 DBG("RSDP signature mismatch: Aborting acpi_find_table\n");
630 rsdp = NULL;
631 return NULL;
632 }
633
634 /* Verify RSDP checksum */
635 if (__improbable(cksum8((uint8_t *)rsdp, sizeof(RSDP_DESCRIPTOR)) != 0)) {
636 DBG("RSDP@0x%lx signature mismatch: Aborting acpi_find_table\n",
637 (unsigned long)rsdp_physaddr);
638 rsdp = NULL;
639 return NULL;
640 }
641
642 /* Ensure the revision of the RSDP indicates the presence of an RSDT or XSDT */
643 if (__improbable(rsdp->Revision >= RSDP_VERSION_ACPI20 && rsdp->XsdtPhysicalAddress == 0ULL)) {
644 DBG("RSDP XSDT Physical Address is 0!: Aborting acpi_find_table\n");
645 rsdp = NULL;
646 return NULL;
647 } else if (__probable(rsdp->Revision >= RSDP_VERSION_ACPI20)) {
648 /* XSDT (with 64-bit pointers to tables) */
649 rsdtp = NULL;
650 xsdtp = PHYSMAP_PTOV(rsdp->XsdtPhysicalAddress);
651 if (cksum8((uint8_t *)xsdtp, xsdtp->Length) != 0) {
652 DBG("ERROR: XSDT@0x%lx checksum is non-zero; not using this XSDT\n",
653 (unsigned long)rsdp->XsdtPhysicalAddress);
654 xsdtp = NULL;
655 return NULL;
656 }
657 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10 && rsdp->RsdtPhysicalAddress == 0)) {
658 DBG("RSDP RSDT Physical Address is 0!: Aborting acpi_find_table\n");
659 rsdp = NULL;
660 return NULL;
661 } else if (__improbable(rsdp->Revision == RSDP_VERSION_ACPI10)) {
662 /* RSDT (with 32-bit pointers to tables) */
663 xsdtp = NULL;
664 rsdtp = PHYSMAP_PTOV((uintptr_t)rsdp->RsdtPhysicalAddress);
665 if (cksum8((uint8_t *)rsdtp, rsdtp->Length) != 0) {
666 DBG("ERROR: RSDT@0x%lx checksum is non-zero; not using this RSDT\n",
667 (unsigned long)rsdp->RsdtPhysicalAddress);
668 rsdtp = NULL;
669 return NULL;
670 }
671 } else {
672 DBG("Unrecognized RSDP Revision (0x%x): Aborting acpi_find_table\n",
673 rsdp->Revision);
674 rsdp = NULL;
675 return NULL;
676 }
677 }
678
679 assert(xsdtp != NULL || rsdtp != NULL);
680
681 if (__probable(xsdtp != NULL)) {
682 return acpi_find_table_via_xsdt(xsdtp, signature);
683 } else if (rsdtp != NULL) {
684 return acpi_find_table_via_rsdt(rsdtp, signature);
685 }
686
687 return NULL;
688 }
689
690 /*
691 * Returns the count of enabled logical processors present in the ACPI
692 * MADT, or 0 if the MADT could not be located.
693 */
694 uint32_t
695 acpi_count_enabled_logical_processors(void)
696 {
697 MULTIPLE_APIC_TABLE *madtp;
698 void *end_ptr;
699 APIC_HEADER *next_apic_entryp;
700 uint32_t enabled_cpu_count = 0;
701 uint64_t rsdp_physaddr;
702
703 rsdp_physaddr = efi_get_rsdp_physaddr();
704 if (__improbable(rsdp_physaddr == 0)) {
705 DBG("acpi_count_enabled_logical_processors: Could not get RSDP physaddr from EFI.\n");
706 return 0;
707 }
708
709 madtp = (MULTIPLE_APIC_TABLE *)acpi_find_table(rsdp_physaddr, ACPI_SIG_MADT);
710
711 if (__improbable(madtp == NULL)) {
712 DBG("acpi_count_enabled_logical_processors: Could not find the MADT.\n");
713 return 0;
714 }
715
716 end_ptr = (void *)((uintptr_t)madtp + madtp->Length);
717 next_apic_entryp = (APIC_HEADER *)((uintptr_t)madtp + sizeof(MULTIPLE_APIC_TABLE));
718
719 while ((void *)next_apic_entryp < end_ptr) {
720 switch (next_apic_entryp->Type) {
721 case APIC_PROCESSOR:
722 {
723 MADT_PROCESSOR_APIC *madt_procp = (MADT_PROCESSOR_APIC *)next_apic_entryp;
724 if (madt_procp->ProcessorEnabled) {
725 enabled_cpu_count++;
726 }
727
728 break;
729 }
730
731 default:
732 DBG("Ignoring MADT entry type 0x%x length 0x%x\n", next_apic_entryp->Type,
733 next_apic_entryp->Length);
734 break;
735 }
736
737 next_apic_entryp = (APIC_HEADER *)((uintptr_t)next_apic_entryp + next_apic_entryp->Length);
738 }
739
740 return enabled_cpu_count;
741 }