]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/acpi.c
xnu-2782.20.48.tar.gz
[apple/xnu.git] / osfmk / i386 / acpi.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if HYPERVISOR
39 #include <kern/hv_support.h>
40 #endif
41 #if CONFIG_VMX
42 #include <i386/vmx/vmx_cpu.h>
43 #endif
44 #include <i386/ucode.h>
45 #include <i386/acpi.h>
46 #include <i386/fpu.h>
47 #include <i386/lapic.h>
48 #include <i386/mp.h>
49 #include <i386/mp_desc.h>
50 #include <i386/serial_io.h>
51 #if CONFIG_MCA
52 #include <i386/machine_check.h>
53 #endif
54 #include <i386/pmCPU.h>
55
56 #include <i386/tsc.h>
57
58 #include <kern/cpu_data.h>
59 #include <kern/machine.h>
60 #include <kern/timer_queue.h>
61 #include <console/serial_protos.h>
62 #include <machine/pal_routines.h>
63 #include <vm/vm_page.h>
64
65 #if HIBERNATION
66 #include <IOKit/IOHibernatePrivate.h>
67 #endif
68 #include <IOKit/IOPlatformExpert.h>
69 #include <sys/kdebug.h>
70
71 #if CONFIG_SLEEP
72 extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
73 extern void acpi_wake_prot(void);
74 #endif
75 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
76 extern kern_return_t IOCPURunPlatformActiveActions(void);
77 extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message);
78
79 extern void fpinit(void);
80
81 vm_offset_t
82 acpi_install_wake_handler(void)
83 {
84 #if CONFIG_SLEEP
85 install_real_mode_bootstrap(acpi_wake_prot);
86 return REAL_MODE_BOOTSTRAP_OFFSET;
87 #else
88 return 0;
89 #endif
90 }
91
92 #if HIBERNATION
93 struct acpi_hibernate_callback_data {
94 acpi_sleep_callback func;
95 void *refcon;
96 };
97 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
98
99 unsigned int save_kdebug_enable = 0;
100 static uint64_t acpi_sleep_abstime;
101 static uint64_t acpi_idle_abstime;
102 static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime;
103 boolean_t deep_idle_rebase = TRUE;
104
105 #if CONFIG_SLEEP
106 static void
107 acpi_hibernate(void *refcon)
108 {
109 uint32_t mode;
110
111 acpi_hibernate_callback_data_t *data =
112 (acpi_hibernate_callback_data_t *)refcon;
113
114 if (current_cpu_datap()->cpu_hibernate)
115 {
116 mode = hibernate_write_image();
117
118 if( mode == kIOHibernatePostWriteHalt )
119 {
120 // off
121 HIBLOG("power off\n");
122 IOCPURunPlatformHaltRestartActions(kPEHaltCPU);
123 if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU);
124 }
125 else if( mode == kIOHibernatePostWriteRestart )
126 {
127 // restart
128 HIBLOG("restart\n");
129 IOCPURunPlatformHaltRestartActions(kPERestartCPU);
130 if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU);
131 }
132 else
133 {
134 // sleep
135 HIBLOG("sleep\n");
136
137 // should we come back via regular wake, set the state in memory.
138 cpu_datap(0)->cpu_hibernate = 0;
139 }
140
141 }
142 kdebug_enable = 0;
143
144 IOCPURunPlatformQuiesceActions();
145
146 acpi_sleep_abstime = mach_absolute_time();
147
148 (data->func)(data->refcon);
149
150 /* should never get here! */
151 }
152 #endif /* CONFIG_SLEEP */
153 #endif /* HIBERNATION */
154
155 extern void slave_pstart(void);
156 extern void hibernate_rebuild_vm_structs(void);
157
158 extern unsigned int wake_nkdbufs;
159
160 void
161 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
162 {
163 #if HIBERNATION
164 acpi_hibernate_callback_data_t data;
165 #endif
166 boolean_t did_hibernate;
167 unsigned int cpu;
168 kern_return_t rc;
169 unsigned int my_cpu;
170 uint64_t start;
171 uint64_t elapsed = 0;
172 uint64_t elapsed_trace_start = 0;
173
174 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n",
175 current_cpu_datap()->cpu_hibernate, cpu_number());
176
177 /* Get all CPUs to be in the "off" state */
178 my_cpu = cpu_number();
179 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
180 if (cpu == my_cpu)
181 continue;
182 rc = pmCPUExitHaltToOff(cpu);
183 if (rc != KERN_SUCCESS)
184 panic("Error %d trying to transition CPU %d to OFF",
185 rc, cpu);
186 }
187
188 /* shutdown local APIC before passing control to firmware */
189 lapic_shutdown();
190
191 #if HIBERNATION
192 data.func = func;
193 data.refcon = refcon;
194 #endif
195
196 /* Save power management timer state */
197 pmTimerSave();
198
199 #if HYPERVISOR
200 /* Notify hypervisor that we are about to sleep */
201 hv_suspend();
202 #endif
203
204 #if CONFIG_VMX
205 /*
206 * Turn off VT, otherwise switching to legacy mode will fail
207 */
208 vmx_suspend();
209 #endif
210
211 /*
212 * Enable FPU/SIMD unit for potential hibernate acceleration
213 */
214 clear_ts();
215
216 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0);
217
218 save_kdebug_enable = kdebug_enable;
219 kdebug_enable = 0;
220
221 acpi_sleep_abstime = mach_absolute_time();
222
223 #if CONFIG_SLEEP
224 /*
225 * Save master CPU state and sleep platform.
226 * Will not return until platform is woken up,
227 * or if sleep failed.
228 */
229 uint64_t old_cr3 = x86_64_pre_sleep();
230 #if HIBERNATION
231 acpi_sleep_cpu(acpi_hibernate, &data);
232 #else
233 acpi_sleep_cpu(func, refcon);
234 #endif
235
236 start = mach_absolute_time();
237
238 x86_64_post_sleep(old_cr3);
239
240 #endif /* CONFIG_SLEEP */
241
242 /* Reset UART if kprintf is enabled.
243 * However kprintf should not be used before rtc_sleep_wakeup()
244 * for compatibility with firewire kprintf.
245 */
246
247 if (FALSE == disable_serial_output)
248 pal_serial_init();
249
250 #if HIBERNATION
251 if (current_cpu_datap()->cpu_hibernate) {
252 did_hibernate = TRUE;
253
254 } else
255 #endif
256 {
257 did_hibernate = FALSE;
258 }
259
260 /* Re-enable mode (including 64-bit if applicable) */
261 cpu_mode_init(current_cpu_datap());
262
263 #if CONFIG_MCA
264 /* Re-enable machine check handling */
265 mca_cpu_init();
266 #endif
267
268 #if CONFIG_MTRR
269 /* restore MTRR settings */
270 mtrr_update_cpu();
271 #endif
272
273 /* update CPU microcode */
274 ucode_update_wake();
275
276 #if CONFIG_VMX
277 /*
278 * Restore VT mode
279 */
280 vmx_resume();
281 #endif
282
283 #if CONFIG_MTRR
284 /* set up PAT following boot processor power up */
285 pat_init();
286 #endif
287
288 /*
289 * Go through all of the CPUs and mark them as requiring
290 * a full restart.
291 */
292 pmMarkAllCPUsOff();
293
294
295 /* re-enable and re-init local apic (prior to starting timers) */
296 if (lapic_probe())
297 lapic_configure();
298
299 hibernate_rebuild_vm_structs();
300
301 elapsed += mach_absolute_time() - start;
302 acpi_wake_abstime = mach_absolute_time();
303
304 /* let the realtime clock reset */
305 rtc_sleep_wakeup(acpi_sleep_abstime);
306 acpi_wake_postrebase_abstime = mach_absolute_time();
307 assert(mach_absolute_time() >= acpi_sleep_abstime);
308
309 kdebug_enable = save_kdebug_enable;
310
311 if (kdebug_enable == 0) {
312 if (wake_nkdbufs) {
313 start = mach_absolute_time();
314 start_kern_tracing(wake_nkdbufs, TRUE);
315 elapsed_trace_start += mach_absolute_time() - start;
316 }
317 }
318 start = mach_absolute_time();
319
320 /* Reconfigure FP/SIMD unit */
321 init_fpu();
322 clear_ts();
323
324 IOCPURunPlatformActiveActions();
325
326 if (did_hibernate) {
327 elapsed += mach_absolute_time() - start;
328
329 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, elapsed, elapsed_trace_start, 0, 0, 0);
330 hibernate_machine_init();
331 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0);
332
333 current_cpu_datap()->cpu_hibernate = 0;
334
335 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
336 } else
337 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
338
339 /* Restore power management register state */
340 pmCPUMarkRunning(current_cpu_datap());
341
342 /* Restore power management timer state */
343 pmTimerRestore();
344
345 /* Restart timer interrupts */
346 rtc_timer_start();
347
348 #if HIBERNATION
349
350 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
351 #endif
352
353 #if CONFIG_SLEEP
354 /* Becase we don't save the bootstrap page, and we share it
355 * between sleep and mp slave init, we need to recreate it
356 * after coming back from sleep or hibernate */
357 install_real_mode_bootstrap(slave_pstart);
358 #endif
359 }
360
361 /*
362 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
363 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
364 * processors are expected already to have been offlined in the deepest C-state.
365 *
366 * The contract with ACPI is that although the kernel is called with interrupts
367 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
368 * interrupt. However, the callback function will be called once this has
369 * occurred and interrupts are guaranteed to be disabled at that time,
370 * and to remain disabled during C-state entry, exit (wake) and return
371 * from acpi_idle_kernel.
372 */
373 void
374 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
375 {
376 boolean_t istate = ml_get_interrupts_enabled();
377
378 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
379 cpu_number(), istate ? "enabled" : "disabled");
380
381 assert(cpu_number() == master_cpu);
382
383 /*
384 * Effectively set the boot cpu offline.
385 * This will stop further deadlines being set.
386 */
387 cpu_datap(master_cpu)->cpu_running = FALSE;
388
389 /* Cancel any pending deadline */
390 setPop(0);
391 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) {
392 (void) ml_set_interrupts_enabled(TRUE);
393 setPop(0);
394 ml_set_interrupts_enabled(FALSE);
395 }
396
397 /*
398 * Call back to caller to indicate that interrupts will remain
399 * disabled while we deep idle, wake and return.
400 */
401 func(refcon);
402
403 acpi_idle_abstime = mach_absolute_time();
404
405 KERNEL_DEBUG_CONSTANT(
406 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
407 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
408
409 /*
410 * Disable tracing during S0-sleep
411 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
412 */
413 if (deep_idle_rebase) {
414 save_kdebug_enable = kdebug_enable;
415 kdebug_enable = 0;
416 }
417
418 /*
419 * Call into power-management to enter the lowest C-state.
420 * Note when called on the boot processor this routine will
421 * return directly when awoken.
422 */
423 pmCPUHalt(PM_HALT_SLEEP);
424
425 /*
426 * Get wakeup time relative to the TSC which has progressed.
427 * Then rebase nanotime to reflect time not progressing over sleep
428 * - unless overriden so that tracing can occur during deep_idle.
429 */
430 acpi_wake_abstime = mach_absolute_time();
431 if (deep_idle_rebase) {
432 rtc_sleep_wakeup(acpi_idle_abstime);
433 kdebug_enable = save_kdebug_enable;
434 }
435 acpi_wake_postrebase_abstime = mach_absolute_time();
436 assert(mach_absolute_time() >= acpi_idle_abstime);
437 cpu_datap(master_cpu)->cpu_running = TRUE;
438
439 KERNEL_DEBUG_CONSTANT(
440 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
441 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
442
443 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
444 if (kdebug_enable == 0) {
445 if (wake_nkdbufs)
446 start_kern_tracing(wake_nkdbufs, TRUE);
447 }
448
449 IOCPURunPlatformActiveActions();
450
451 /* Restart timer interrupts */
452 rtc_timer_start();
453 }
454
455 extern char real_mode_bootstrap_end[];
456 extern char real_mode_bootstrap_base[];
457
458 void
459 install_real_mode_bootstrap(void *prot_entry)
460 {
461 /*
462 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
463 * This is in page 1 which has been reserved for this purpose by
464 * machine_startup() from the boot processor.
465 * The slave boot code is responsible for switching to protected
466 * mode and then jumping to the common startup, _start().
467 */
468 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
469 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
470 real_mode_bootstrap_end-real_mode_bootstrap_base);
471
472 /*
473 * Set the location at the base of the stack to point to the
474 * common startup entry.
475 */
476 ml_phys_write_word(
477 PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET,
478 (unsigned int)kvtophys((vm_offset_t)prot_entry));
479
480 /* Flush caches */
481 __asm__("wbinvd");
482 }
483
484 boolean_t
485 ml_recent_wake(void) {
486 uint64_t ctime = mach_absolute_time();
487 assert(ctime > acpi_wake_postrebase_abstime);
488 return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC);
489 }