]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <i386/pmap.h> | |
30 | #include <i386/proc_reg.h> | |
31 | #include <i386/mp_desc.h> | |
32 | #include <i386/misc_protos.h> | |
33 | #include <i386/mp.h> | |
34 | #include <i386/cpu_data.h> | |
35 | #if CONFIG_MTRR | |
36 | #include <i386/mtrr.h> | |
37 | #endif | |
38 | #if HYPERVISOR | |
39 | #include <kern/hv_support.h> | |
40 | #endif | |
41 | #if CONFIG_VMX | |
42 | #include <i386/vmx/vmx_cpu.h> | |
43 | #endif | |
44 | #include <i386/ucode.h> | |
45 | #include <i386/acpi.h> | |
46 | #include <i386/fpu.h> | |
47 | #include <i386/lapic.h> | |
48 | #include <i386/mp.h> | |
49 | #include <i386/mp_desc.h> | |
50 | #include <i386/serial_io.h> | |
51 | #if CONFIG_MCA | |
52 | #include <i386/machine_check.h> | |
53 | #endif | |
54 | #include <i386/pmCPU.h> | |
55 | ||
56 | #include <i386/tsc.h> | |
57 | ||
58 | #include <kern/cpu_data.h> | |
59 | #include <kern/machine.h> | |
60 | #include <kern/timer_queue.h> | |
61 | #include <console/serial_protos.h> | |
62 | #include <machine/pal_routines.h> | |
63 | #include <vm/vm_page.h> | |
64 | ||
65 | #if HIBERNATION | |
66 | #include <IOKit/IOHibernatePrivate.h> | |
67 | #endif | |
68 | #include <IOKit/IOPlatformExpert.h> | |
69 | #include <sys/kdebug.h> | |
70 | ||
71 | #if CONFIG_SLEEP | |
72 | extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); | |
73 | extern void acpi_wake_prot(void); | |
74 | #endif | |
75 | extern kern_return_t IOCPURunPlatformQuiesceActions(void); | |
76 | extern kern_return_t IOCPURunPlatformActiveActions(void); | |
77 | extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); | |
78 | ||
79 | extern void fpinit(void); | |
80 | ||
81 | vm_offset_t | |
82 | acpi_install_wake_handler(void) | |
83 | { | |
84 | #if CONFIG_SLEEP | |
85 | install_real_mode_bootstrap(acpi_wake_prot); | |
86 | return REAL_MODE_BOOTSTRAP_OFFSET; | |
87 | #else | |
88 | return 0; | |
89 | #endif | |
90 | } | |
91 | ||
92 | #if CONFIG_SLEEP | |
93 | ||
94 | unsigned int save_kdebug_enable = 0; | |
95 | static uint64_t acpi_sleep_abstime; | |
96 | static uint64_t acpi_idle_abstime; | |
97 | static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime; | |
98 | boolean_t deep_idle_rebase = TRUE; | |
99 | ||
100 | #if HIBERNATION | |
101 | struct acpi_hibernate_callback_data { | |
102 | acpi_sleep_callback func; | |
103 | void *refcon; | |
104 | }; | |
105 | typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; | |
106 | ||
107 | static void | |
108 | acpi_hibernate(void *refcon) | |
109 | { | |
110 | uint32_t mode; | |
111 | ||
112 | acpi_hibernate_callback_data_t *data = | |
113 | (acpi_hibernate_callback_data_t *)refcon; | |
114 | ||
115 | if (current_cpu_datap()->cpu_hibernate) | |
116 | { | |
117 | mode = hibernate_write_image(); | |
118 | ||
119 | if( mode == kIOHibernatePostWriteHalt ) | |
120 | { | |
121 | // off | |
122 | HIBLOG("power off\n"); | |
123 | IOCPURunPlatformHaltRestartActions(kPEHaltCPU); | |
124 | if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); | |
125 | } | |
126 | else if( mode == kIOHibernatePostWriteRestart ) | |
127 | { | |
128 | // restart | |
129 | HIBLOG("restart\n"); | |
130 | IOCPURunPlatformHaltRestartActions(kPERestartCPU); | |
131 | if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); | |
132 | } | |
133 | else | |
134 | { | |
135 | // sleep | |
136 | HIBLOG("sleep\n"); | |
137 | ||
138 | // should we come back via regular wake, set the state in memory. | |
139 | cpu_datap(0)->cpu_hibernate = 0; | |
140 | } | |
141 | ||
142 | } | |
143 | kdebug_enable = 0; | |
144 | ||
145 | IOCPURunPlatformQuiesceActions(); | |
146 | ||
147 | acpi_sleep_abstime = mach_absolute_time(); | |
148 | ||
149 | (data->func)(data->refcon); | |
150 | ||
151 | /* should never get here! */ | |
152 | } | |
153 | #endif /* HIBERNATION */ | |
154 | #endif /* CONFIG_SLEEP */ | |
155 | ||
156 | extern void slave_pstart(void); | |
157 | extern void hibernate_rebuild_vm_structs(void); | |
158 | ||
159 | extern unsigned int wake_nkdbufs; | |
160 | ||
161 | void | |
162 | acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) | |
163 | { | |
164 | #if HIBERNATION | |
165 | acpi_hibernate_callback_data_t data; | |
166 | #endif | |
167 | boolean_t did_hibernate; | |
168 | unsigned int cpu; | |
169 | kern_return_t rc; | |
170 | unsigned int my_cpu; | |
171 | uint64_t start; | |
172 | uint64_t elapsed = 0; | |
173 | uint64_t elapsed_trace_start = 0; | |
174 | ||
175 | kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", | |
176 | current_cpu_datap()->cpu_hibernate, cpu_number()); | |
177 | ||
178 | /* Get all CPUs to be in the "off" state */ | |
179 | my_cpu = cpu_number(); | |
180 | for (cpu = 0; cpu < real_ncpus; cpu += 1) { | |
181 | if (cpu == my_cpu) | |
182 | continue; | |
183 | rc = pmCPUExitHaltToOff(cpu); | |
184 | if (rc != KERN_SUCCESS) | |
185 | panic("Error %d trying to transition CPU %d to OFF", | |
186 | rc, cpu); | |
187 | } | |
188 | ||
189 | /* shutdown local APIC before passing control to firmware */ | |
190 | lapic_shutdown(); | |
191 | ||
192 | #if HIBERNATION | |
193 | data.func = func; | |
194 | data.refcon = refcon; | |
195 | #endif | |
196 | ||
197 | /* Save power management timer state */ | |
198 | pmTimerSave(); | |
199 | ||
200 | #if HYPERVISOR | |
201 | /* Notify hypervisor that we are about to sleep */ | |
202 | hv_suspend(); | |
203 | #endif | |
204 | ||
205 | #if CONFIG_VMX | |
206 | /* | |
207 | * Turn off VT, otherwise switching to legacy mode will fail | |
208 | */ | |
209 | vmx_suspend(); | |
210 | #endif | |
211 | ||
212 | /* | |
213 | * Enable FPU/SIMD unit for potential hibernate acceleration | |
214 | */ | |
215 | clear_ts(); | |
216 | ||
217 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0); | |
218 | ||
219 | save_kdebug_enable = kdebug_enable; | |
220 | kdebug_enable = 0; | |
221 | ||
222 | acpi_sleep_abstime = mach_absolute_time(); | |
223 | ||
224 | #if CONFIG_SLEEP | |
225 | /* | |
226 | * Save master CPU state and sleep platform. | |
227 | * Will not return until platform is woken up, | |
228 | * or if sleep failed. | |
229 | */ | |
230 | uint64_t old_cr3 = x86_64_pre_sleep(); | |
231 | #if HIBERNATION | |
232 | acpi_sleep_cpu(acpi_hibernate, &data); | |
233 | #else | |
234 | acpi_sleep_cpu(func, refcon); | |
235 | #endif | |
236 | ||
237 | start = mach_absolute_time(); | |
238 | ||
239 | x86_64_post_sleep(old_cr3); | |
240 | ||
241 | #endif /* CONFIG_SLEEP */ | |
242 | ||
243 | /* Reset UART if kprintf is enabled. | |
244 | * However kprintf should not be used before rtc_sleep_wakeup() | |
245 | * for compatibility with firewire kprintf. | |
246 | */ | |
247 | ||
248 | if (FALSE == disable_serial_output) | |
249 | pal_serial_init(); | |
250 | ||
251 | #if HIBERNATION | |
252 | if (current_cpu_datap()->cpu_hibernate) { | |
253 | did_hibernate = TRUE; | |
254 | ||
255 | } else | |
256 | #endif | |
257 | { | |
258 | did_hibernate = FALSE; | |
259 | } | |
260 | ||
261 | /* Re-enable mode (including 64-bit if applicable) */ | |
262 | cpu_mode_init(current_cpu_datap()); | |
263 | ||
264 | #if CONFIG_MCA | |
265 | /* Re-enable machine check handling */ | |
266 | mca_cpu_init(); | |
267 | #endif | |
268 | ||
269 | #if CONFIG_MTRR | |
270 | /* restore MTRR settings */ | |
271 | mtrr_update_cpu(); | |
272 | #endif | |
273 | ||
274 | /* update CPU microcode */ | |
275 | ucode_update_wake(); | |
276 | ||
277 | #if CONFIG_VMX | |
278 | /* | |
279 | * Restore VT mode | |
280 | */ | |
281 | vmx_resume(); | |
282 | #endif | |
283 | ||
284 | #if CONFIG_MTRR | |
285 | /* set up PAT following boot processor power up */ | |
286 | pat_init(); | |
287 | #endif | |
288 | ||
289 | /* | |
290 | * Go through all of the CPUs and mark them as requiring | |
291 | * a full restart. | |
292 | */ | |
293 | pmMarkAllCPUsOff(); | |
294 | ||
295 | ||
296 | /* re-enable and re-init local apic (prior to starting timers) */ | |
297 | if (lapic_probe()) | |
298 | lapic_configure(); | |
299 | ||
300 | #if HIBERNATION | |
301 | hibernate_rebuild_vm_structs(); | |
302 | #endif | |
303 | ||
304 | elapsed += mach_absolute_time() - start; | |
305 | acpi_wake_abstime = mach_absolute_time(); | |
306 | ||
307 | /* let the realtime clock reset */ | |
308 | rtc_sleep_wakeup(acpi_sleep_abstime); | |
309 | acpi_wake_postrebase_abstime = mach_absolute_time(); | |
310 | assert(mach_absolute_time() >= acpi_sleep_abstime); | |
311 | ||
312 | kdebug_enable = save_kdebug_enable; | |
313 | ||
314 | if (kdebug_enable == 0) { | |
315 | if (wake_nkdbufs) { | |
316 | start = mach_absolute_time(); | |
317 | start_kern_tracing(wake_nkdbufs, TRUE); | |
318 | elapsed_trace_start += mach_absolute_time() - start; | |
319 | } | |
320 | } | |
321 | start = mach_absolute_time(); | |
322 | ||
323 | /* Reconfigure FP/SIMD unit */ | |
324 | init_fpu(); | |
325 | clear_ts(); | |
326 | ||
327 | IOCPURunPlatformActiveActions(); | |
328 | ||
329 | #if HIBERNATION | |
330 | if (did_hibernate) { | |
331 | elapsed += mach_absolute_time() - start; | |
332 | ||
333 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, elapsed, elapsed_trace_start, 0, 0, 0); | |
334 | hibernate_machine_init(); | |
335 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
336 | ||
337 | current_cpu_datap()->cpu_hibernate = 0; | |
338 | ||
339 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
340 | } else | |
341 | #endif /* HIBERNATION */ | |
342 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
343 | ||
344 | /* Restore power management register state */ | |
345 | pmCPUMarkRunning(current_cpu_datap()); | |
346 | ||
347 | /* Restore power management timer state */ | |
348 | pmTimerRestore(); | |
349 | ||
350 | /* Restart timer interrupts */ | |
351 | rtc_timer_start(); | |
352 | ||
353 | #if HIBERNATION | |
354 | ||
355 | kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate); | |
356 | #endif | |
357 | ||
358 | #if CONFIG_SLEEP | |
359 | /* Becase we don't save the bootstrap page, and we share it | |
360 | * between sleep and mp slave init, we need to recreate it | |
361 | * after coming back from sleep or hibernate */ | |
362 | install_real_mode_bootstrap(slave_pstart); | |
363 | #endif | |
364 | } | |
365 | ||
366 | /* | |
367 | * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel | |
368 | * to idle the boot processor in the deepest C-state for S0 sleep. All slave | |
369 | * processors are expected already to have been offlined in the deepest C-state. | |
370 | * | |
371 | * The contract with ACPI is that although the kernel is called with interrupts | |
372 | * disabled, interrupts may need to be re-enabled to dismiss any pending timer | |
373 | * interrupt. However, the callback function will be called once this has | |
374 | * occurred and interrupts are guaranteed to be disabled at that time, | |
375 | * and to remain disabled during C-state entry, exit (wake) and return | |
376 | * from acpi_idle_kernel. | |
377 | */ | |
378 | void | |
379 | acpi_idle_kernel(acpi_sleep_callback func, void *refcon) | |
380 | { | |
381 | boolean_t istate = ml_get_interrupts_enabled(); | |
382 | ||
383 | kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n", | |
384 | cpu_number(), istate ? "enabled" : "disabled"); | |
385 | ||
386 | assert(cpu_number() == master_cpu); | |
387 | ||
388 | /* | |
389 | * Effectively set the boot cpu offline. | |
390 | * This will stop further deadlines being set. | |
391 | */ | |
392 | cpu_datap(master_cpu)->cpu_running = FALSE; | |
393 | ||
394 | /* Cancel any pending deadline */ | |
395 | setPop(0); | |
396 | while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) { | |
397 | (void) ml_set_interrupts_enabled(TRUE); | |
398 | setPop(0); | |
399 | ml_set_interrupts_enabled(FALSE); | |
400 | } | |
401 | ||
402 | /* | |
403 | * Call back to caller to indicate that interrupts will remain | |
404 | * disabled while we deep idle, wake and return. | |
405 | */ | |
406 | func(refcon); | |
407 | ||
408 | acpi_idle_abstime = mach_absolute_time(); | |
409 | ||
410 | KERNEL_DEBUG_CONSTANT( | |
411 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START, | |
412 | acpi_idle_abstime, deep_idle_rebase, 0, 0, 0); | |
413 | ||
414 | /* | |
415 | * Disable tracing during S0-sleep | |
416 | * unless overridden by sysctl -w tsc.deep_idle_rebase=0 | |
417 | */ | |
418 | if (deep_idle_rebase) { | |
419 | save_kdebug_enable = kdebug_enable; | |
420 | kdebug_enable = 0; | |
421 | } | |
422 | ||
423 | /* | |
424 | * Call into power-management to enter the lowest C-state. | |
425 | * Note when called on the boot processor this routine will | |
426 | * return directly when awoken. | |
427 | */ | |
428 | pmCPUHalt(PM_HALT_SLEEP); | |
429 | ||
430 | /* | |
431 | * Get wakeup time relative to the TSC which has progressed. | |
432 | * Then rebase nanotime to reflect time not progressing over sleep | |
433 | * - unless overriden so that tracing can occur during deep_idle. | |
434 | */ | |
435 | acpi_wake_abstime = mach_absolute_time(); | |
436 | if (deep_idle_rebase) { | |
437 | rtc_sleep_wakeup(acpi_idle_abstime); | |
438 | kdebug_enable = save_kdebug_enable; | |
439 | } | |
440 | acpi_wake_postrebase_abstime = mach_absolute_time(); | |
441 | assert(mach_absolute_time() >= acpi_idle_abstime); | |
442 | cpu_datap(master_cpu)->cpu_running = TRUE; | |
443 | ||
444 | KERNEL_DEBUG_CONSTANT( | |
445 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, | |
446 | acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); | |
447 | ||
448 | /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ | |
449 | if (kdebug_enable == 0) { | |
450 | if (wake_nkdbufs) | |
451 | start_kern_tracing(wake_nkdbufs, TRUE); | |
452 | } | |
453 | ||
454 | IOCPURunPlatformActiveActions(); | |
455 | ||
456 | /* Restart timer interrupts */ | |
457 | rtc_timer_start(); | |
458 | } | |
459 | ||
460 | extern char real_mode_bootstrap_end[]; | |
461 | extern char real_mode_bootstrap_base[]; | |
462 | ||
463 | void | |
464 | install_real_mode_bootstrap(void *prot_entry) | |
465 | { | |
466 | /* | |
467 | * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. | |
468 | * This is in page 1 which has been reserved for this purpose by | |
469 | * machine_startup() from the boot processor. | |
470 | * The slave boot code is responsible for switching to protected | |
471 | * mode and then jumping to the common startup, _start(). | |
472 | */ | |
473 | bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), | |
474 | (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, | |
475 | real_mode_bootstrap_end-real_mode_bootstrap_base); | |
476 | ||
477 | /* | |
478 | * Set the location at the base of the stack to point to the | |
479 | * common startup entry. | |
480 | */ | |
481 | ml_phys_write_word( | |
482 | PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, | |
483 | (unsigned int)kvtophys((vm_offset_t)prot_entry)); | |
484 | ||
485 | /* Flush caches */ | |
486 | __asm__("wbinvd"); | |
487 | } | |
488 | ||
489 | boolean_t | |
490 | ml_recent_wake(void) { | |
491 | uint64_t ctime = mach_absolute_time(); | |
492 | assert(ctime > acpi_wake_postrebase_abstime); | |
493 | return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC); | |
494 | } |