]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/acpi.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / i386 / acpi.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/pmap.h>
30 #include <i386/proc_reg.h>
31 #include <i386/mp_desc.h>
32 #include <i386/misc_protos.h>
33 #include <i386/mp.h>
34 #include <i386/cpu_data.h>
35 #if CONFIG_MTRR
36 #include <i386/mtrr.h>
37 #endif
38 #if CONFIG_VMX
39 #include <i386/vmx/vmx_cpu.h>
40 #endif
41 #include <i386/ucode.h>
42 #include <i386/acpi.h>
43 #include <i386/fpu.h>
44 #include <i386/lapic.h>
45 #include <i386/mp.h>
46 #include <i386/mp_desc.h>
47 #include <i386/serial_io.h>
48 #if CONFIG_MCA
49 #include <i386/machine_check.h>
50 #endif
51 #include <i386/pmCPU.h>
52
53 #include <i386/tsc.h>
54
55 #include <kern/cpu_data.h>
56 #include <kern/machine.h>
57 #include <kern/timer_queue.h>
58 #include <console/serial_protos.h>
59 #include <machine/pal_routines.h>
60 #include <vm/vm_page.h>
61
62 #if HIBERNATION
63 #include <IOKit/IOHibernatePrivate.h>
64 #endif
65 #include <IOKit/IOPlatformExpert.h>
66 #include <sys/kdebug.h>
67
68 #if CONFIG_SLEEP
69 extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
70 extern void acpi_wake_prot(void);
71 #endif
72 extern kern_return_t IOCPURunPlatformQuiesceActions(void);
73 extern kern_return_t IOCPURunPlatformActiveActions(void);
74
75 extern void fpinit(void);
76
77 vm_offset_t
78 acpi_install_wake_handler(void)
79 {
80 #if CONFIG_SLEEP
81 install_real_mode_bootstrap(acpi_wake_prot);
82 return REAL_MODE_BOOTSTRAP_OFFSET;
83 #else
84 return 0;
85 #endif
86 }
87
88 #if HIBERNATION
89 struct acpi_hibernate_callback_data {
90 acpi_sleep_callback func;
91 void *refcon;
92 };
93 typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t;
94
95 unsigned int save_kdebug_enable = 0;
96 static uint64_t acpi_sleep_abstime;
97 static uint64_t acpi_idle_abstime;
98 static uint64_t acpi_wake_abstime;
99 boolean_t deep_idle_rebase = TRUE;
100
101 #if CONFIG_SLEEP
102 static void
103 acpi_hibernate(void *refcon)
104 {
105 uint32_t mode;
106
107 acpi_hibernate_callback_data_t *data =
108 (acpi_hibernate_callback_data_t *)refcon;
109
110 if (current_cpu_datap()->cpu_hibernate)
111 {
112 mode = hibernate_write_image();
113
114 if( mode == kIOHibernatePostWriteHalt )
115 {
116 // off
117 HIBLOG("power off\n");
118 if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU);
119 }
120 else if( mode == kIOHibernatePostWriteRestart )
121 {
122 // restart
123 HIBLOG("restart\n");
124 if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU);
125 }
126 else
127 {
128 // sleep
129 HIBLOG("sleep\n");
130
131 // should we come back via regular wake, set the state in memory.
132 cpu_datap(0)->cpu_hibernate = 0;
133 }
134
135 }
136 kdebug_enable = 0;
137
138 IOCPURunPlatformQuiesceActions();
139
140 acpi_sleep_abstime = mach_absolute_time();
141
142 (data->func)(data->refcon);
143
144 /* should never get here! */
145 }
146 #endif /* CONFIG_SLEEP */
147 #endif /* HIBERNATION */
148
149 extern void slave_pstart(void);
150 extern void hibernate_rebuild_vm_structs(void);
151
152 extern unsigned int wake_nkdbufs;
153
154 void
155 acpi_sleep_kernel(acpi_sleep_callback func, void *refcon)
156 {
157 #if HIBERNATION
158 acpi_hibernate_callback_data_t data;
159 #endif
160 boolean_t did_hibernate;
161 unsigned int cpu;
162 kern_return_t rc;
163 unsigned int my_cpu;
164 uint64_t start;
165 uint64_t elapsed = 0;
166 uint64_t elapsed_trace_start = 0;
167
168 kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n",
169 current_cpu_datap()->cpu_hibernate, cpu_number());
170
171 /* Get all CPUs to be in the "off" state */
172 my_cpu = cpu_number();
173 for (cpu = 0; cpu < real_ncpus; cpu += 1) {
174 if (cpu == my_cpu)
175 continue;
176 rc = pmCPUExitHaltToOff(cpu);
177 if (rc != KERN_SUCCESS)
178 panic("Error %d trying to transition CPU %d to OFF",
179 rc, cpu);
180 }
181
182 /* shutdown local APIC before passing control to firmware */
183 lapic_shutdown();
184
185 #if HIBERNATION
186 data.func = func;
187 data.refcon = refcon;
188 #endif
189
190 /* Save power management timer state */
191 pmTimerSave();
192
193 #if CONFIG_VMX
194 /*
195 * Turn off VT, otherwise switching to legacy mode will fail
196 */
197 vmx_suspend();
198 #endif
199
200 /*
201 * Enable FPU/SIMD unit for potential hibernate acceleration
202 */
203 clear_ts();
204
205 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0);
206
207 save_kdebug_enable = kdebug_enable;
208 kdebug_enable = 0;
209
210 acpi_sleep_abstime = mach_absolute_time();
211
212 #if CONFIG_SLEEP
213 /*
214 * Save master CPU state and sleep platform.
215 * Will not return until platform is woken up,
216 * or if sleep failed.
217 */
218 uint64_t old_cr3 = x86_64_pre_sleep();
219 #if HIBERNATION
220 acpi_sleep_cpu(acpi_hibernate, &data);
221 #else
222 acpi_sleep_cpu(func, refcon);
223 #endif
224
225 start = mach_absolute_time();
226
227 x86_64_post_sleep(old_cr3);
228
229 #endif /* CONFIG_SLEEP */
230
231 /* Reset UART if kprintf is enabled.
232 * However kprintf should not be used before rtc_sleep_wakeup()
233 * for compatibility with firewire kprintf.
234 */
235
236 if (FALSE == disable_serial_output)
237 pal_serial_init();
238
239 #if HIBERNATION
240 if (current_cpu_datap()->cpu_hibernate) {
241 did_hibernate = TRUE;
242
243 } else
244 #endif
245 {
246 did_hibernate = FALSE;
247 }
248
249 /* Re-enable mode (including 64-bit if applicable) */
250 cpu_mode_init(current_cpu_datap());
251
252 #if CONFIG_MCA
253 /* Re-enable machine check handling */
254 mca_cpu_init();
255 #endif
256
257 #if CONFIG_MTRR
258 /* restore MTRR settings */
259 mtrr_update_cpu();
260 #endif
261
262 /* update CPU microcode */
263 ucode_update_wake();
264
265 #if CONFIG_VMX
266 /*
267 * Restore VT mode
268 */
269 vmx_resume();
270 #endif
271
272 #if CONFIG_MTRR
273 /* set up PAT following boot processor power up */
274 pat_init();
275 #endif
276
277 /*
278 * Go through all of the CPUs and mark them as requiring
279 * a full restart.
280 */
281 pmMarkAllCPUsOff();
282
283
284 /* re-enable and re-init local apic (prior to starting timers) */
285 if (lapic_probe())
286 lapic_configure();
287
288 hibernate_rebuild_vm_structs();
289
290 elapsed += mach_absolute_time() - start;
291 acpi_wake_abstime = mach_absolute_time();
292
293 /* let the realtime clock reset */
294 rtc_sleep_wakeup(acpi_sleep_abstime);
295
296 kdebug_enable = save_kdebug_enable;
297
298 if (kdebug_enable == 0) {
299 if (wake_nkdbufs) {
300 start = mach_absolute_time();
301 start_kern_tracing(wake_nkdbufs, TRUE);
302 elapsed_trace_start += mach_absolute_time() - start;
303 }
304 }
305 start = mach_absolute_time();
306
307 /* Reconfigure FP/SIMD unit */
308 init_fpu();
309 clear_ts();
310
311 IOCPURunPlatformActiveActions();
312
313 if (did_hibernate) {
314 elapsed += mach_absolute_time() - start;
315
316 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, elapsed, elapsed_trace_start, 0, 0, 0);
317 hibernate_machine_init();
318 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0);
319
320 current_cpu_datap()->cpu_hibernate = 0;
321
322 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
323 } else
324 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0);
325
326 /* Restore power management register state */
327 pmCPUMarkRunning(current_cpu_datap());
328
329 /* Restore power management timer state */
330 pmTimerRestore();
331
332 /* Restart timer interrupts */
333 rtc_timer_start();
334
335 #if HIBERNATION
336
337 kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate);
338 #endif
339
340 #if CONFIG_SLEEP
341 /* Becase we don't save the bootstrap page, and we share it
342 * between sleep and mp slave init, we need to recreate it
343 * after coming back from sleep or hibernate */
344 install_real_mode_bootstrap(slave_pstart);
345 #endif
346 }
347
348 /*
349 * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel
350 * to idle the boot processor in the deepest C-state for S0 sleep. All slave
351 * processors are expected already to have been offlined in the deepest C-state.
352 *
353 * The contract with ACPI is that although the kernel is called with interrupts
354 * disabled, interrupts may need to be re-enabled to dismiss any pending timer
355 * interrupt. However, the callback function will be called once this has
356 * occurred and interrupts are guaranteed to be disabled at that time,
357 * and to remain disabled during C-state entry, exit (wake) and return
358 * from acpi_idle_kernel.
359 */
360 void
361 acpi_idle_kernel(acpi_sleep_callback func, void *refcon)
362 {
363 boolean_t istate = ml_get_interrupts_enabled();
364
365 kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n",
366 cpu_number(), istate ? "enabled" : "disabled");
367
368 assert(cpu_number() == master_cpu);
369
370 /*
371 * Effectively set the boot cpu offline.
372 * This will stop further deadlines being set.
373 */
374 cpu_datap(master_cpu)->cpu_running = FALSE;
375
376 /* Cancel any pending deadline */
377 setPop(0);
378 while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) {
379 (void) ml_set_interrupts_enabled(TRUE);
380 setPop(0);
381 ml_set_interrupts_enabled(FALSE);
382 }
383
384 /*
385 * Call back to caller to indicate that interrupts will remain
386 * disabled while we deep idle, wake and return.
387 */
388 func(refcon);
389
390 acpi_idle_abstime = mach_absolute_time();
391
392 KERNEL_DEBUG_CONSTANT(
393 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START,
394 acpi_idle_abstime, deep_idle_rebase, 0, 0, 0);
395
396 /*
397 * Disable tracing during S0-sleep
398 * unless overridden by sysctl -w tsc.deep_idle_rebase=0
399 */
400 if (deep_idle_rebase) {
401 save_kdebug_enable = kdebug_enable;
402 kdebug_enable = 0;
403 }
404
405 /*
406 * Call into power-management to enter the lowest C-state.
407 * Note when called on the boot processor this routine will
408 * return directly when awoken.
409 */
410 pmCPUHalt(PM_HALT_SLEEP);
411
412 /*
413 * Get wakeup time relative to the TSC which has progressed.
414 * Then rebase nanotime to reflect time not progressing over sleep
415 * - unless overriden so that tracing can occur during deep_idle.
416 */
417 acpi_wake_abstime = mach_absolute_time();
418 if (deep_idle_rebase) {
419 rtc_sleep_wakeup(acpi_idle_abstime);
420 kdebug_enable = save_kdebug_enable;
421 }
422
423 cpu_datap(master_cpu)->cpu_running = TRUE;
424
425 KERNEL_DEBUG_CONSTANT(
426 MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END,
427 acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0);
428
429 /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */
430 if (kdebug_enable == 0) {
431 if (wake_nkdbufs)
432 start_kern_tracing(wake_nkdbufs, TRUE);
433 }
434
435 IOCPURunPlatformActiveActions();
436
437 /* Restart timer interrupts */
438 rtc_timer_start();
439 }
440
441 extern char real_mode_bootstrap_end[];
442 extern char real_mode_bootstrap_base[];
443
444 void
445 install_real_mode_bootstrap(void *prot_entry)
446 {
447 /*
448 * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET.
449 * This is in page 1 which has been reserved for this purpose by
450 * machine_startup() from the boot processor.
451 * The slave boot code is responsible for switching to protected
452 * mode and then jumping to the common startup, _start().
453 */
454 bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base),
455 (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET,
456 real_mode_bootstrap_end-real_mode_bootstrap_base);
457
458 /*
459 * Set the location at the base of the stack to point to the
460 * common startup entry.
461 */
462 ml_phys_write_word(
463 PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET,
464 (unsigned int)kvtophys((vm_offset_t)prot_entry));
465
466 /* Flush caches */
467 __asm__("wbinvd");
468 }
469