]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <i386/pmap.h> | |
30 | #include <i386/proc_reg.h> | |
31 | #include <i386/mp_desc.h> | |
32 | #include <i386/misc_protos.h> | |
33 | #include <i386/mp.h> | |
34 | #include <i386/cpu_data.h> | |
35 | #if CONFIG_MTRR | |
36 | #include <i386/mtrr.h> | |
37 | #endif | |
38 | #if HYPERVISOR | |
39 | #include <kern/hv_support.h> | |
40 | #endif | |
41 | #if CONFIG_VMX | |
42 | #include <i386/vmx/vmx_cpu.h> | |
43 | #endif | |
44 | #include <i386/ucode.h> | |
45 | #include <i386/acpi.h> | |
46 | #include <i386/fpu.h> | |
47 | #include <i386/lapic.h> | |
48 | #include <i386/mp.h> | |
49 | #include <i386/mp_desc.h> | |
50 | #include <i386/serial_io.h> | |
51 | #if CONFIG_MCA | |
52 | #include <i386/machine_check.h> | |
53 | #endif | |
54 | #include <i386/pmCPU.h> | |
55 | ||
56 | #include <i386/tsc.h> | |
57 | ||
58 | #include <kern/cpu_data.h> | |
59 | #include <kern/machine.h> | |
60 | #include <kern/timer_queue.h> | |
61 | #include <console/serial_protos.h> | |
62 | #include <machine/pal_routines.h> | |
63 | #include <vm/vm_page.h> | |
64 | ||
65 | #if HIBERNATION | |
66 | #include <IOKit/IOHibernatePrivate.h> | |
67 | #endif | |
68 | #include <IOKit/IOPlatformExpert.h> | |
69 | #include <sys/kdebug.h> | |
70 | ||
71 | #if CONFIG_SLEEP | |
72 | extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); | |
73 | extern void acpi_wake_prot(void); | |
74 | #endif | |
75 | extern kern_return_t IOCPURunPlatformQuiesceActions(void); | |
76 | extern kern_return_t IOCPURunPlatformActiveActions(void); | |
77 | extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); | |
78 | ||
79 | extern void fpinit(void); | |
80 | ||
81 | vm_offset_t | |
82 | acpi_install_wake_handler(void) | |
83 | { | |
84 | #if CONFIG_SLEEP | |
85 | install_real_mode_bootstrap(acpi_wake_prot); | |
86 | return REAL_MODE_BOOTSTRAP_OFFSET; | |
87 | #else | |
88 | return 0; | |
89 | #endif | |
90 | } | |
91 | ||
92 | #if CONFIG_SLEEP | |
93 | ||
94 | unsigned int save_kdebug_enable = 0; | |
95 | static uint64_t acpi_sleep_abstime; | |
96 | static uint64_t acpi_idle_abstime; | |
97 | static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime; | |
98 | boolean_t deep_idle_rebase = TRUE; | |
99 | ||
100 | #if HIBERNATION | |
101 | struct acpi_hibernate_callback_data { | |
102 | acpi_sleep_callback func; | |
103 | void *refcon; | |
104 | }; | |
105 | typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; | |
106 | ||
107 | static void | |
108 | acpi_hibernate(void *refcon) | |
109 | { | |
110 | uint32_t mode; | |
111 | ||
112 | acpi_hibernate_callback_data_t *data = | |
113 | (acpi_hibernate_callback_data_t *)refcon; | |
114 | ||
115 | if (current_cpu_datap()->cpu_hibernate) | |
116 | { | |
117 | mode = hibernate_write_image(); | |
118 | ||
119 | if( mode == kIOHibernatePostWriteHalt ) | |
120 | { | |
121 | // off | |
122 | HIBLOG("power off\n"); | |
123 | IOCPURunPlatformHaltRestartActions(kPEHaltCPU); | |
124 | if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); | |
125 | } | |
126 | else if( mode == kIOHibernatePostWriteRestart ) | |
127 | { | |
128 | // restart | |
129 | HIBLOG("restart\n"); | |
130 | IOCPURunPlatformHaltRestartActions(kPERestartCPU); | |
131 | if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); | |
132 | } | |
133 | else | |
134 | { | |
135 | // sleep | |
136 | HIBLOG("sleep\n"); | |
137 | ||
138 | // should we come back via regular wake, set the state in memory. | |
139 | cpu_datap(0)->cpu_hibernate = 0; | |
140 | } | |
141 | ||
142 | } | |
143 | ||
144 | #if CONFIG_VMX | |
145 | vmx_suspend(); | |
146 | #endif | |
147 | kdebug_enable = 0; | |
148 | ||
149 | IOCPURunPlatformQuiesceActions(); | |
150 | ||
151 | acpi_sleep_abstime = mach_absolute_time(); | |
152 | ||
153 | (data->func)(data->refcon); | |
154 | ||
155 | /* should never get here! */ | |
156 | } | |
157 | #endif /* HIBERNATION */ | |
158 | #endif /* CONFIG_SLEEP */ | |
159 | ||
160 | extern void slave_pstart(void); | |
161 | extern void hibernate_rebuild_vm_structs(void); | |
162 | ||
163 | extern unsigned int wake_nkdbufs; | |
164 | ||
165 | void | |
166 | acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) | |
167 | { | |
168 | #if HIBERNATION | |
169 | acpi_hibernate_callback_data_t data; | |
170 | #endif | |
171 | boolean_t did_hibernate; | |
172 | unsigned int cpu; | |
173 | kern_return_t rc; | |
174 | unsigned int my_cpu; | |
175 | uint64_t start; | |
176 | uint64_t elapsed = 0; | |
177 | uint64_t elapsed_trace_start = 0; | |
178 | ||
179 | kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", | |
180 | current_cpu_datap()->cpu_hibernate, cpu_number()); | |
181 | ||
182 | /* Get all CPUs to be in the "off" state */ | |
183 | my_cpu = cpu_number(); | |
184 | for (cpu = 0; cpu < real_ncpus; cpu += 1) { | |
185 | if (cpu == my_cpu) | |
186 | continue; | |
187 | rc = pmCPUExitHaltToOff(cpu); | |
188 | if (rc != KERN_SUCCESS) | |
189 | panic("Error %d trying to transition CPU %d to OFF", | |
190 | rc, cpu); | |
191 | } | |
192 | ||
193 | /* shutdown local APIC before passing control to firmware */ | |
194 | lapic_shutdown(); | |
195 | ||
196 | #if HIBERNATION | |
197 | data.func = func; | |
198 | data.refcon = refcon; | |
199 | #endif | |
200 | ||
201 | /* Save power management timer state */ | |
202 | pmTimerSave(); | |
203 | ||
204 | #if HYPERVISOR | |
205 | /* Notify hypervisor that we are about to sleep */ | |
206 | hv_suspend(); | |
207 | #endif | |
208 | ||
209 | /* | |
210 | * Enable FPU/SIMD unit for potential hibernate acceleration | |
211 | */ | |
212 | clear_ts(); | |
213 | ||
214 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0); | |
215 | ||
216 | save_kdebug_enable = kdebug_enable; | |
217 | kdebug_enable = 0; | |
218 | ||
219 | acpi_sleep_abstime = mach_absolute_time(); | |
220 | ||
221 | #if CONFIG_SLEEP | |
222 | /* | |
223 | * Save master CPU state and sleep platform. | |
224 | * Will not return until platform is woken up, | |
225 | * or if sleep failed. | |
226 | */ | |
227 | uint64_t old_cr3 = x86_64_pre_sleep(); | |
228 | #if HIBERNATION | |
229 | acpi_sleep_cpu(acpi_hibernate, &data); | |
230 | #else | |
231 | #if CONFIG_VMX | |
232 | vmx_suspend(); | |
233 | #endif | |
234 | acpi_sleep_cpu(func, refcon); | |
235 | #endif | |
236 | ||
237 | acpi_wake_abstime = mach_absolute_time(); | |
238 | /* Rebase TSC->absolute time conversion, using timestamp | |
239 | * recorded before sleep. | |
240 | */ | |
241 | rtc_nanotime_init(acpi_sleep_abstime); | |
242 | acpi_wake_postrebase_abstime = start = mach_absolute_time(); | |
243 | assert(start >= acpi_sleep_abstime); | |
244 | ||
245 | x86_64_post_sleep(old_cr3); | |
246 | ||
247 | #endif /* CONFIG_SLEEP */ | |
248 | ||
249 | /* Reset UART if kprintf is enabled. | |
250 | * However kprintf should not be used before rtc_sleep_wakeup() | |
251 | * for compatibility with firewire kprintf. | |
252 | */ | |
253 | ||
254 | if (FALSE == disable_serial_output) | |
255 | pal_serial_init(); | |
256 | ||
257 | #if HIBERNATION | |
258 | if (current_cpu_datap()->cpu_hibernate) { | |
259 | did_hibernate = TRUE; | |
260 | ||
261 | } else | |
262 | #endif | |
263 | { | |
264 | did_hibernate = FALSE; | |
265 | } | |
266 | ||
267 | /* Re-enable mode (including 64-bit if applicable) */ | |
268 | cpu_mode_init(current_cpu_datap()); | |
269 | ||
270 | #if CONFIG_MCA | |
271 | /* Re-enable machine check handling */ | |
272 | mca_cpu_init(); | |
273 | #endif | |
274 | ||
275 | #if CONFIG_MTRR | |
276 | /* restore MTRR settings */ | |
277 | mtrr_update_cpu(); | |
278 | #endif | |
279 | ||
280 | /* update CPU microcode */ | |
281 | ucode_update_wake(); | |
282 | ||
283 | #if CONFIG_MTRR | |
284 | /* set up PAT following boot processor power up */ | |
285 | pat_init(); | |
286 | #endif | |
287 | ||
288 | #if CONFIG_VMX | |
289 | /* | |
290 | * Restore VT mode | |
291 | */ | |
292 | vmx_resume(did_hibernate); | |
293 | #endif | |
294 | ||
295 | /* | |
296 | * Go through all of the CPUs and mark them as requiring | |
297 | * a full restart. | |
298 | */ | |
299 | pmMarkAllCPUsOff(); | |
300 | ||
301 | ||
302 | /* re-enable and re-init local apic (prior to starting timers) */ | |
303 | if (lapic_probe()) | |
304 | lapic_configure(); | |
305 | ||
306 | #if HIBERNATION | |
307 | hibernate_rebuild_vm_structs(); | |
308 | #endif | |
309 | ||
310 | elapsed += mach_absolute_time() - start; | |
311 | ||
312 | rtc_decrementer_configure(); | |
313 | kdebug_enable = save_kdebug_enable; | |
314 | ||
315 | if (kdebug_enable == 0) { | |
316 | if (wake_nkdbufs) { | |
317 | start = mach_absolute_time(); | |
318 | kdebug_trace_start(wake_nkdbufs, NULL, TRUE); | |
319 | elapsed_trace_start += mach_absolute_time() - start; | |
320 | } | |
321 | } | |
322 | start = mach_absolute_time(); | |
323 | ||
324 | /* Reconfigure FP/SIMD unit */ | |
325 | init_fpu(); | |
326 | clear_ts(); | |
327 | ||
328 | IOCPURunPlatformActiveActions(); | |
329 | ||
330 | #if HIBERNATION | |
331 | if (did_hibernate) { | |
332 | elapsed += mach_absolute_time() - start; | |
333 | ||
334 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, elapsed, elapsed_trace_start, 0, 0, 0); | |
335 | hibernate_machine_init(); | |
336 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
337 | ||
338 | current_cpu_datap()->cpu_hibernate = 0; | |
339 | ||
340 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
341 | } else | |
342 | #endif /* HIBERNATION */ | |
343 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
344 | ||
345 | /* Restore power management register state */ | |
346 | pmCPUMarkRunning(current_cpu_datap()); | |
347 | ||
348 | /* Restore power management timer state */ | |
349 | pmTimerRestore(); | |
350 | ||
351 | /* Restart timer interrupts */ | |
352 | rtc_timer_start(); | |
353 | ||
354 | #if HIBERNATION | |
355 | ||
356 | kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate); | |
357 | #endif | |
358 | ||
359 | #if CONFIG_SLEEP | |
360 | /* Becase we don't save the bootstrap page, and we share it | |
361 | * between sleep and mp slave init, we need to recreate it | |
362 | * after coming back from sleep or hibernate */ | |
363 | install_real_mode_bootstrap(slave_pstart); | |
364 | #endif | |
365 | } | |
366 | ||
367 | /* | |
368 | * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel | |
369 | * to idle the boot processor in the deepest C-state for S0 sleep. All slave | |
370 | * processors are expected already to have been offlined in the deepest C-state. | |
371 | * | |
372 | * The contract with ACPI is that although the kernel is called with interrupts | |
373 | * disabled, interrupts may need to be re-enabled to dismiss any pending timer | |
374 | * interrupt. However, the callback function will be called once this has | |
375 | * occurred and interrupts are guaranteed to be disabled at that time, | |
376 | * and to remain disabled during C-state entry, exit (wake) and return | |
377 | * from acpi_idle_kernel. | |
378 | */ | |
379 | void | |
380 | acpi_idle_kernel(acpi_sleep_callback func, void *refcon) | |
381 | { | |
382 | boolean_t istate = ml_get_interrupts_enabled(); | |
383 | ||
384 | kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n", | |
385 | cpu_number(), istate ? "enabled" : "disabled"); | |
386 | ||
387 | assert(cpu_number() == master_cpu); | |
388 | ||
389 | /* | |
390 | * Effectively set the boot cpu offline. | |
391 | * This will stop further deadlines being set. | |
392 | */ | |
393 | cpu_datap(master_cpu)->cpu_running = FALSE; | |
394 | ||
395 | /* Cancel any pending deadline */ | |
396 | setPop(0); | |
397 | while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) { | |
398 | (void) ml_set_interrupts_enabled(TRUE); | |
399 | setPop(0); | |
400 | ml_set_interrupts_enabled(FALSE); | |
401 | } | |
402 | ||
403 | /* | |
404 | * Call back to caller to indicate that interrupts will remain | |
405 | * disabled while we deep idle, wake and return. | |
406 | */ | |
407 | func(refcon); | |
408 | ||
409 | acpi_idle_abstime = mach_absolute_time(); | |
410 | ||
411 | KERNEL_DEBUG_CONSTANT( | |
412 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START, | |
413 | acpi_idle_abstime, deep_idle_rebase, 0, 0, 0); | |
414 | ||
415 | /* | |
416 | * Disable tracing during S0-sleep | |
417 | * unless overridden by sysctl -w tsc.deep_idle_rebase=0 | |
418 | */ | |
419 | if (deep_idle_rebase) { | |
420 | save_kdebug_enable = kdebug_enable; | |
421 | kdebug_enable = 0; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Call into power-management to enter the lowest C-state. | |
426 | * Note when called on the boot processor this routine will | |
427 | * return directly when awoken. | |
428 | */ | |
429 | pmCPUHalt(PM_HALT_SLEEP); | |
430 | ||
431 | /* | |
432 | * Get wakeup time relative to the TSC which has progressed. | |
433 | * Then rebase nanotime to reflect time not progressing over sleep | |
434 | * - unless overriden so that tracing can occur during deep_idle. | |
435 | */ | |
436 | acpi_wake_abstime = mach_absolute_time(); | |
437 | if (deep_idle_rebase) { | |
438 | rtc_sleep_wakeup(acpi_idle_abstime); | |
439 | kdebug_enable = save_kdebug_enable; | |
440 | } | |
441 | acpi_wake_postrebase_abstime = mach_absolute_time(); | |
442 | assert(mach_absolute_time() >= acpi_idle_abstime); | |
443 | cpu_datap(master_cpu)->cpu_running = TRUE; | |
444 | ||
445 | KERNEL_DEBUG_CONSTANT( | |
446 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, | |
447 | acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); | |
448 | ||
449 | /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ | |
450 | if (kdebug_enable == 0) { | |
451 | if (wake_nkdbufs) | |
452 | kdebug_trace_start(wake_nkdbufs, NULL, TRUE); | |
453 | } | |
454 | ||
455 | IOCPURunPlatformActiveActions(); | |
456 | ||
457 | /* Restart timer interrupts */ | |
458 | rtc_timer_start(); | |
459 | } | |
460 | ||
461 | extern char real_mode_bootstrap_end[]; | |
462 | extern char real_mode_bootstrap_base[]; | |
463 | ||
464 | void | |
465 | install_real_mode_bootstrap(void *prot_entry) | |
466 | { | |
467 | /* | |
468 | * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. | |
469 | * This is in page 1 which has been reserved for this purpose by | |
470 | * machine_startup() from the boot processor. | |
471 | * The slave boot code is responsible for switching to protected | |
472 | * mode and then jumping to the common startup, _start(). | |
473 | */ | |
474 | bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), | |
475 | (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, | |
476 | real_mode_bootstrap_end-real_mode_bootstrap_base); | |
477 | ||
478 | /* | |
479 | * Set the location at the base of the stack to point to the | |
480 | * common startup entry. | |
481 | */ | |
482 | ml_phys_write_word( | |
483 | PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, | |
484 | (unsigned int)kvtophys((vm_offset_t)prot_entry)); | |
485 | ||
486 | /* Flush caches */ | |
487 | __asm__("wbinvd"); | |
488 | } | |
489 | ||
490 | boolean_t | |
491 | ml_recent_wake(void) { | |
492 | uint64_t ctime = mach_absolute_time(); | |
493 | assert(ctime > acpi_wake_postrebase_abstime); | |
494 | return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC); | |
495 | } |