]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <i386/pmap.h> | |
30 | #include <i386/proc_reg.h> | |
31 | #include <i386/mp_desc.h> | |
32 | #include <i386/misc_protos.h> | |
33 | #include <i386/mp.h> | |
34 | #include <i386/cpu_data.h> | |
35 | #if CONFIG_MTRR | |
36 | #include <i386/mtrr.h> | |
37 | #endif | |
38 | #if CONFIG_VMX | |
39 | #include <i386/vmx/vmx_cpu.h> | |
40 | #endif | |
41 | #include <i386/ucode.h> | |
42 | #include <i386/acpi.h> | |
43 | #include <i386/fpu.h> | |
44 | #include <i386/lapic.h> | |
45 | #include <i386/mp.h> | |
46 | #include <i386/mp_desc.h> | |
47 | #include <i386/serial_io.h> | |
48 | #if CONFIG_MCA | |
49 | #include <i386/machine_check.h> | |
50 | #endif | |
51 | #include <i386/pmCPU.h> | |
52 | ||
53 | #include <i386/tsc.h> | |
54 | ||
55 | #include <kern/cpu_data.h> | |
56 | #include <kern/etimer.h> | |
57 | #include <kern/machine.h> | |
58 | #include <kern/timer_queue.h> | |
59 | #include <console/serial_protos.h> | |
60 | #include <machine/pal_routines.h> | |
61 | #include <vm/vm_page.h> | |
62 | ||
63 | #if HIBERNATION | |
64 | #include <IOKit/IOHibernatePrivate.h> | |
65 | #endif | |
66 | #include <IOKit/IOPlatformExpert.h> | |
67 | #include <sys/kdebug.h> | |
68 | ||
69 | #if CONFIG_SLEEP | |
70 | extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); | |
71 | extern void acpi_wake_prot(void); | |
72 | #endif | |
73 | extern kern_return_t IOCPURunPlatformQuiesceActions(void); | |
74 | extern kern_return_t IOCPURunPlatformActiveActions(void); | |
75 | ||
76 | extern void fpinit(void); | |
77 | ||
78 | vm_offset_t | |
79 | acpi_install_wake_handler(void) | |
80 | { | |
81 | #if CONFIG_SLEEP | |
82 | install_real_mode_bootstrap(acpi_wake_prot); | |
83 | return REAL_MODE_BOOTSTRAP_OFFSET; | |
84 | #else | |
85 | return 0; | |
86 | #endif | |
87 | } | |
88 | ||
89 | #if HIBERNATION | |
90 | struct acpi_hibernate_callback_data { | |
91 | acpi_sleep_callback func; | |
92 | void *refcon; | |
93 | }; | |
94 | typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; | |
95 | ||
96 | unsigned int save_kdebug_enable = 0; | |
97 | static uint64_t acpi_sleep_abstime; | |
98 | static uint64_t acpi_idle_abstime; | |
99 | static uint64_t acpi_wake_abstime; | |
100 | boolean_t deep_idle_rebase = TRUE; | |
101 | ||
102 | #if CONFIG_SLEEP | |
103 | static void | |
104 | acpi_hibernate(void *refcon) | |
105 | { | |
106 | uint32_t mode; | |
107 | ||
108 | acpi_hibernate_callback_data_t *data = | |
109 | (acpi_hibernate_callback_data_t *)refcon; | |
110 | ||
111 | if (current_cpu_datap()->cpu_hibernate) | |
112 | { | |
113 | #if defined(__i386__) | |
114 | cpu_IA32e_enable(current_cpu_datap()); | |
115 | #endif | |
116 | mode = hibernate_write_image(); | |
117 | ||
118 | if( mode == kIOHibernatePostWriteHalt ) | |
119 | { | |
120 | // off | |
121 | HIBLOG("power off\n"); | |
122 | if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); | |
123 | } | |
124 | else if( mode == kIOHibernatePostWriteRestart ) | |
125 | { | |
126 | // restart | |
127 | HIBLOG("restart\n"); | |
128 | if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); | |
129 | } | |
130 | else | |
131 | { | |
132 | // sleep | |
133 | HIBLOG("sleep\n"); | |
134 | ||
135 | // should we come back via regular wake, set the state in memory. | |
136 | cpu_datap(0)->cpu_hibernate = 0; | |
137 | } | |
138 | ||
139 | #if defined(__i386__) | |
140 | /* | |
141 | * If we're in 64-bit mode, drop back into legacy mode during sleep. | |
142 | */ | |
143 | cpu_IA32e_disable(current_cpu_datap()); | |
144 | #endif | |
145 | } | |
146 | kdebug_enable = 0; | |
147 | ||
148 | IOCPURunPlatformQuiesceActions(); | |
149 | ||
150 | acpi_sleep_abstime = mach_absolute_time(); | |
151 | ||
152 | (data->func)(data->refcon); | |
153 | ||
154 | /* should never get here! */ | |
155 | } | |
156 | #endif /* CONFIG_SLEEP */ | |
157 | #endif /* HIBERNATION */ | |
158 | ||
159 | extern void slave_pstart(void); | |
160 | ||
161 | extern unsigned int wake_nkdbufs; | |
162 | ||
163 | void | |
164 | acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) | |
165 | { | |
166 | #if HIBERNATION | |
167 | acpi_hibernate_callback_data_t data; | |
168 | #endif | |
169 | boolean_t did_hibernate; | |
170 | unsigned int cpu; | |
171 | kern_return_t rc; | |
172 | unsigned int my_cpu; | |
173 | uint64_t now; | |
174 | uint64_t my_tsc; | |
175 | uint64_t my_abs; | |
176 | ||
177 | kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", | |
178 | current_cpu_datap()->cpu_hibernate, cpu_number()); | |
179 | ||
180 | /* Get all CPUs to be in the "off" state */ | |
181 | my_cpu = cpu_number(); | |
182 | for (cpu = 0; cpu < real_ncpus; cpu += 1) { | |
183 | if (cpu == my_cpu) | |
184 | continue; | |
185 | rc = pmCPUExitHaltToOff(cpu); | |
186 | if (rc != KERN_SUCCESS) | |
187 | panic("Error %d trying to transition CPU %d to OFF", | |
188 | rc, cpu); | |
189 | } | |
190 | ||
191 | /* shutdown local APIC before passing control to firmware */ | |
192 | lapic_shutdown(); | |
193 | ||
194 | #if HIBERNATION | |
195 | data.func = func; | |
196 | data.refcon = refcon; | |
197 | #endif | |
198 | ||
199 | /* Save power management timer state */ | |
200 | pmTimerSave(); | |
201 | ||
202 | #if CONFIG_VMX | |
203 | /* | |
204 | * Turn off VT, otherwise switching to legacy mode will fail | |
205 | */ | |
206 | vmx_suspend(); | |
207 | #endif | |
208 | ||
209 | #if defined(__i386__) | |
210 | /* | |
211 | * If we're in 64-bit mode, drop back into legacy mode during sleep. | |
212 | */ | |
213 | cpu_IA32e_disable(current_cpu_datap()); | |
214 | #endif | |
215 | /* | |
216 | * Enable FPU/SIMD unit for potential hibernate acceleration | |
217 | */ | |
218 | clear_ts(); | |
219 | ||
220 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0); | |
221 | ||
222 | save_kdebug_enable = kdebug_enable; | |
223 | kdebug_enable = 0; | |
224 | ||
225 | acpi_sleep_abstime = mach_absolute_time(); | |
226 | ||
227 | #if CONFIG_SLEEP | |
228 | /* | |
229 | * Save master CPU state and sleep platform. | |
230 | * Will not return until platform is woken up, | |
231 | * or if sleep failed. | |
232 | */ | |
233 | #ifdef __x86_64__ | |
234 | uint64_t old_cr3 = x86_64_pre_sleep(); | |
235 | #endif | |
236 | #if HIBERNATION | |
237 | acpi_sleep_cpu(acpi_hibernate, &data); | |
238 | #else | |
239 | acpi_sleep_cpu(func, refcon); | |
240 | #endif | |
241 | ||
242 | #ifdef __x86_64__ | |
243 | x86_64_post_sleep(old_cr3); | |
244 | #endif | |
245 | ||
246 | #endif /* CONFIG_SLEEP */ | |
247 | ||
248 | /* Reset UART if kprintf is enabled. | |
249 | * However kprintf should not be used before rtc_sleep_wakeup() | |
250 | * for compatibility with firewire kprintf. | |
251 | */ | |
252 | ||
253 | if (FALSE == disable_serial_output) | |
254 | pal_serial_init(); | |
255 | ||
256 | #if HIBERNATION | |
257 | if (current_cpu_datap()->cpu_hibernate) { | |
258 | #if defined(__i386__) | |
259 | int i; | |
260 | for (i = 0; i < PMAP_NWINDOWS; i++) | |
261 | *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; | |
262 | #endif | |
263 | did_hibernate = TRUE; | |
264 | ||
265 | } else | |
266 | #endif | |
267 | { | |
268 | did_hibernate = FALSE; | |
269 | } | |
270 | ||
271 | /* Re-enable mode (including 64-bit if applicable) */ | |
272 | cpu_mode_init(current_cpu_datap()); | |
273 | ||
274 | #if CONFIG_MCA | |
275 | /* Re-enable machine check handling */ | |
276 | mca_cpu_init(); | |
277 | #endif | |
278 | ||
279 | #if CONFIG_MTRR | |
280 | /* restore MTRR settings */ | |
281 | mtrr_update_cpu(); | |
282 | #endif | |
283 | ||
284 | /* update CPU microcode */ | |
285 | ucode_update_wake(); | |
286 | ||
287 | #if CONFIG_VMX | |
288 | /* | |
289 | * Restore VT mode | |
290 | */ | |
291 | vmx_resume(); | |
292 | #endif | |
293 | ||
294 | #if CONFIG_MTRR | |
295 | /* set up PAT following boot processor power up */ | |
296 | pat_init(); | |
297 | #endif | |
298 | ||
299 | /* | |
300 | * Go through all of the CPUs and mark them as requiring | |
301 | * a full restart. | |
302 | */ | |
303 | pmMarkAllCPUsOff(); | |
304 | ||
305 | ml_get_timebase(&now); | |
306 | ||
307 | /* re-enable and re-init local apic (prior to starting timers) */ | |
308 | if (lapic_probe()) | |
309 | lapic_configure(); | |
310 | ||
311 | acpi_wake_abstime = mach_absolute_time(); | |
312 | ||
313 | /* let the realtime clock reset */ | |
314 | rtc_sleep_wakeup(acpi_sleep_abstime); | |
315 | ||
316 | kdebug_enable = save_kdebug_enable; | |
317 | ||
318 | if (kdebug_enable == 0) { | |
319 | if (wake_nkdbufs) | |
320 | start_kern_tracing(wake_nkdbufs, TRUE); | |
321 | } | |
322 | ||
323 | /* Reconfigure FP/SIMD unit */ | |
324 | init_fpu(); | |
325 | clear_ts(); | |
326 | ||
327 | IOCPURunPlatformActiveActions(); | |
328 | ||
329 | if (did_hibernate) { | |
330 | ||
331 | my_tsc = (now >> 32) | (now << 32); | |
332 | my_abs = tmrCvt(my_tsc, tscFCvtt2n); | |
333 | ||
334 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, | |
335 | (uint32_t)(my_abs >> 32), (uint32_t)my_abs, 0, 0, 0); | |
336 | hibernate_machine_init(); | |
337 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
338 | ||
339 | current_cpu_datap()->cpu_hibernate = 0; | |
340 | ||
341 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
342 | } else | |
343 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
344 | ||
345 | /* Restore power management register state */ | |
346 | pmCPUMarkRunning(current_cpu_datap()); | |
347 | ||
348 | /* Restore power management timer state */ | |
349 | pmTimerRestore(); | |
350 | ||
351 | /* Restart timer interrupts */ | |
352 | rtc_timer_start(); | |
353 | ||
354 | ||
355 | ||
356 | #if HIBERNATION | |
357 | #ifdef __i386__ | |
358 | /* The image is written out using the copy engine, which disables | |
359 | * preemption. Since the copy engine writes out the page which contains | |
360 | * the preemption variable when it is disabled, we need to explicitly | |
361 | * enable it here */ | |
362 | if (did_hibernate) | |
363 | enable_preemption(); | |
364 | #endif | |
365 | ||
366 | kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate); | |
367 | #endif | |
368 | ||
369 | #if CONFIG_SLEEP | |
370 | /* Becase we don't save the bootstrap page, and we share it | |
371 | * between sleep and mp slave init, we need to recreate it | |
372 | * after coming back from sleep or hibernate */ | |
373 | install_real_mode_bootstrap(slave_pstart); | |
374 | #endif | |
375 | } | |
376 | ||
377 | /* | |
378 | * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel | |
379 | * to idle the boot processor in the deepest C-state for S0 sleep. All slave | |
380 | * processors are expected already to have been offlined in the deepest C-state. | |
381 | * | |
382 | * The contract with ACPI is that although the kernel is called with interrupts | |
383 | * disabled, interrupts may need to be re-enabled to dismiss any pending timer | |
384 | * interrupt. However, the callback function will be called once this has | |
385 | * occurred and interrupts are guaranteed to be disabled at that time, | |
386 | * and to remain disabled during C-state entry, exit (wake) and return | |
387 | * from acpi_idle_kernel. | |
388 | */ | |
389 | void | |
390 | acpi_idle_kernel(acpi_sleep_callback func, void *refcon) | |
391 | { | |
392 | boolean_t istate = ml_get_interrupts_enabled(); | |
393 | ||
394 | kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n", | |
395 | cpu_number(), istate ? "enabled" : "disabled"); | |
396 | ||
397 | assert(cpu_number() == master_cpu); | |
398 | ||
399 | /* | |
400 | * Effectively set the boot cpu offline. | |
401 | * This will stop further deadlines being set. | |
402 | */ | |
403 | cpu_datap(master_cpu)->cpu_running = FALSE; | |
404 | ||
405 | /* Cancel any pending deadline */ | |
406 | setPop(0); | |
407 | while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) { | |
408 | (void) ml_set_interrupts_enabled(TRUE); | |
409 | setPop(0); | |
410 | ml_set_interrupts_enabled(FALSE); | |
411 | } | |
412 | ||
413 | /* | |
414 | * Call back to caller to indicate that interrupts will remain | |
415 | * disabled while we deep idle, wake and return. | |
416 | */ | |
417 | func(refcon); | |
418 | ||
419 | acpi_idle_abstime = mach_absolute_time(); | |
420 | ||
421 | KERNEL_DEBUG_CONSTANT( | |
422 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START, | |
423 | acpi_idle_abstime, deep_idle_rebase, 0, 0, 0); | |
424 | ||
425 | /* | |
426 | * Disable tracing during S0-sleep | |
427 | * unless overridden by sysctl -w tsc.deep_idle_rebase=0 | |
428 | */ | |
429 | if (deep_idle_rebase) { | |
430 | save_kdebug_enable = kdebug_enable; | |
431 | kdebug_enable = 0; | |
432 | } | |
433 | ||
434 | /* | |
435 | * Call into power-management to enter the lowest C-state. | |
436 | * Note when called on the boot processor this routine will | |
437 | * return directly when awoken. | |
438 | */ | |
439 | pmCPUHalt(PM_HALT_SLEEP); | |
440 | ||
441 | /* | |
442 | * Get wakeup time relative to the TSC which has progressed. | |
443 | * Then rebase nanotime to reflect time not progressing over sleep | |
444 | * - unless overriden so that tracing can occur during deep_idle. | |
445 | */ | |
446 | acpi_wake_abstime = mach_absolute_time(); | |
447 | if (deep_idle_rebase) { | |
448 | rtc_sleep_wakeup(acpi_idle_abstime); | |
449 | kdebug_enable = save_kdebug_enable; | |
450 | } | |
451 | ||
452 | cpu_datap(master_cpu)->cpu_running = TRUE; | |
453 | ||
454 | KERNEL_DEBUG_CONSTANT( | |
455 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, | |
456 | acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); | |
457 | ||
458 | /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ | |
459 | if (kdebug_enable == 0) { | |
460 | if (wake_nkdbufs) | |
461 | start_kern_tracing(wake_nkdbufs, TRUE); | |
462 | } | |
463 | ||
464 | IOCPURunPlatformActiveActions(); | |
465 | ||
466 | /* Restart timer interrupts */ | |
467 | rtc_timer_start(); | |
468 | } | |
469 | ||
470 | extern char real_mode_bootstrap_end[]; | |
471 | extern char real_mode_bootstrap_base[]; | |
472 | ||
473 | void | |
474 | install_real_mode_bootstrap(void *prot_entry) | |
475 | { | |
476 | /* | |
477 | * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. | |
478 | * This is in page 1 which has been reserved for this purpose by | |
479 | * machine_startup() from the boot processor. | |
480 | * The slave boot code is responsible for switching to protected | |
481 | * mode and then jumping to the common startup, _start(). | |
482 | */ | |
483 | bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), | |
484 | (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, | |
485 | real_mode_bootstrap_end-real_mode_bootstrap_base); | |
486 | ||
487 | /* | |
488 | * Set the location at the base of the stack to point to the | |
489 | * common startup entry. | |
490 | */ | |
491 | ml_phys_write_word( | |
492 | PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, | |
493 | (unsigned int)kvtophys((vm_offset_t)prot_entry)); | |
494 | ||
495 | /* Flush caches */ | |
496 | __asm__("wbinvd"); | |
497 | } | |
498 |