]>
Commit | Line | Data |
---|---|---|
91447636 | 1 | /* |
e2fac8b1 | 2 | * Copyright (c) 2000-2009 Apple Inc. All rights reserved. |
91447636 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
91447636 A |
27 | */ |
28 | ||
b0d623f7 A |
29 | #include <i386/pmap.h> |
30 | #include <i386/proc_reg.h> | |
31 | #include <i386/mp_desc.h> | |
91447636 | 32 | #include <i386/misc_protos.h> |
b0d623f7 | 33 | #include <i386/mp.h> |
2d21ac55 | 34 | #include <i386/cpu_data.h> |
6d2010ae | 35 | #if CONFIG_MTRR |
91447636 | 36 | #include <i386/mtrr.h> |
6d2010ae | 37 | #endif |
b0d623f7 | 38 | #if CONFIG_VMX |
2d21ac55 | 39 | #include <i386/vmx/vmx_cpu.h> |
b0d623f7 | 40 | #endif |
6d2010ae | 41 | #include <i386/ucode.h> |
91447636 | 42 | #include <i386/acpi.h> |
0c530ab8 | 43 | #include <i386/fpu.h> |
593a1d5f | 44 | #include <i386/lapic.h> |
91447636 | 45 | #include <i386/mp.h> |
0c530ab8 | 46 | #include <i386/mp_desc.h> |
2d21ac55 | 47 | #include <i386/serial_io.h> |
b0d623f7 | 48 | #if CONFIG_MCA |
0c530ab8 | 49 | #include <i386/machine_check.h> |
b0d623f7 | 50 | #endif |
593a1d5f | 51 | #include <i386/pmCPU.h> |
91447636 | 52 | |
0b4c1975 A |
53 | #include <i386/tsc.h> |
54 | ||
91447636 | 55 | #include <kern/cpu_data.h> |
bd504ef0 A |
56 | #include <kern/etimer.h> |
57 | #include <kern/machine.h> | |
58 | #include <kern/timer_queue.h> | |
2d21ac55 | 59 | #include <console/serial_protos.h> |
6d2010ae | 60 | #include <machine/pal_routines.h> |
0b4c1975 | 61 | #include <vm/vm_page.h> |
3a60a9f5 | 62 | |
2d21ac55 | 63 | #if HIBERNATION |
3a60a9f5 | 64 | #include <IOKit/IOHibernatePrivate.h> |
2d21ac55 | 65 | #endif |
91447636 | 66 | #include <IOKit/IOPlatformExpert.h> |
0b4c1975 A |
67 | #include <sys/kdebug.h> |
68 | ||
b0d623f7 | 69 | #if CONFIG_SLEEP |
91447636 | 70 | extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); |
bd504ef0 | 71 | extern void acpi_wake_prot(void); |
b0d623f7 | 72 | #endif |
99c3a104 A |
73 | extern kern_return_t IOCPURunPlatformQuiesceActions(void); |
74 | extern kern_return_t IOCPURunPlatformActiveActions(void); | |
91447636 | 75 | |
0c530ab8 A |
76 | extern void fpinit(void); |
77 | ||
91447636 A |
78 | vm_offset_t |
79 | acpi_install_wake_handler(void) | |
80 | { | |
b0d623f7 A |
81 | #if CONFIG_SLEEP |
82 | install_real_mode_bootstrap(acpi_wake_prot); | |
83 | return REAL_MODE_BOOTSTRAP_OFFSET; | |
84 | #else | |
85 | return 0; | |
86 | #endif | |
91447636 A |
87 | } |
88 | ||
2d21ac55 A |
89 | #if HIBERNATION |
90 | struct acpi_hibernate_callback_data { | |
91 | acpi_sleep_callback func; | |
92 | void *refcon; | |
93 | }; | |
94 | typedef struct acpi_hibernate_callback_data acpi_hibernate_callback_data_t; | |
91447636 | 95 | |
0b4c1975 A |
96 | unsigned int save_kdebug_enable = 0; |
97 | static uint64_t acpi_sleep_abstime; | |
bd504ef0 A |
98 | static uint64_t acpi_idle_abstime; |
99 | static uint64_t acpi_wake_abstime; | |
100 | boolean_t deep_idle_rebase = TRUE; | |
0b4c1975 | 101 | |
b0d623f7 | 102 | #if CONFIG_SLEEP |
91447636 | 103 | static void |
3a60a9f5 | 104 | acpi_hibernate(void *refcon) |
91447636 | 105 | { |
2d21ac55 | 106 | uint32_t mode; |
3a60a9f5 | 107 | |
2d21ac55 A |
108 | acpi_hibernate_callback_data_t *data = |
109 | (acpi_hibernate_callback_data_t *)refcon; | |
91447636 | 110 | |
2d21ac55 | 111 | if (current_cpu_datap()->cpu_hibernate) |
0c530ab8 | 112 | { |
b0d623f7 | 113 | #if defined(__i386__) |
2d21ac55 | 114 | cpu_IA32e_enable(current_cpu_datap()); |
b0d623f7 | 115 | #endif |
2d21ac55 A |
116 | mode = hibernate_write_image(); |
117 | ||
118 | if( mode == kIOHibernatePostWriteHalt ) | |
119 | { | |
120 | // off | |
121 | HIBLOG("power off\n"); | |
122 | if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); | |
123 | } | |
124 | else if( mode == kIOHibernatePostWriteRestart ) | |
125 | { | |
126 | // restart | |
127 | HIBLOG("restart\n"); | |
128 | if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); | |
129 | } | |
130 | else | |
131 | { | |
132 | // sleep | |
133 | HIBLOG("sleep\n"); | |
134 | ||
135 | // should we come back via regular wake, set the state in memory. | |
136 | cpu_datap(0)->cpu_hibernate = 0; | |
137 | } | |
138 | ||
b0d623f7 | 139 | #if defined(__i386__) |
2d21ac55 A |
140 | /* |
141 | * If we're in 64-bit mode, drop back into legacy mode during sleep. | |
142 | */ | |
143 | cpu_IA32e_disable(current_cpu_datap()); | |
b0d623f7 | 144 | #endif |
0c530ab8 | 145 | } |
0b4c1975 A |
146 | kdebug_enable = 0; |
147 | ||
99c3a104 A |
148 | IOCPURunPlatformQuiesceActions(); |
149 | ||
0b4c1975 | 150 | acpi_sleep_abstime = mach_absolute_time(); |
91447636 | 151 | |
2d21ac55 | 152 | (data->func)(data->refcon); |
91447636 | 153 | |
2d21ac55 | 154 | /* should never get here! */ |
91447636 | 155 | } |
b0d623f7 A |
156 | #endif /* CONFIG_SLEEP */ |
157 | #endif /* HIBERNATION */ | |
91447636 | 158 | |
6d2010ae A |
159 | extern void slave_pstart(void); |
160 | ||
bd504ef0 | 161 | extern unsigned int wake_nkdbufs; |
0c530ab8 | 162 | |
91447636 A |
163 | void |
164 | acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) | |
165 | { | |
2d21ac55 A |
166 | #if HIBERNATION |
167 | acpi_hibernate_callback_data_t data; | |
2d21ac55 | 168 | #endif |
b0d623f7 | 169 | boolean_t did_hibernate; |
e2fac8b1 A |
170 | unsigned int cpu; |
171 | kern_return_t rc; | |
172 | unsigned int my_cpu; | |
0b4c1975 A |
173 | uint64_t now; |
174 | uint64_t my_tsc; | |
175 | uint64_t my_abs; | |
91447636 | 176 | |
6d2010ae A |
177 | kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", |
178 | current_cpu_datap()->cpu_hibernate, cpu_number()); | |
0c530ab8 | 179 | |
b0d623f7 A |
180 | /* Get all CPUs to be in the "off" state */ |
181 | my_cpu = cpu_number(); | |
e2fac8b1 A |
182 | for (cpu = 0; cpu < real_ncpus; cpu += 1) { |
183 | if (cpu == my_cpu) | |
184 | continue; | |
185 | rc = pmCPUExitHaltToOff(cpu); | |
186 | if (rc != KERN_SUCCESS) | |
b0d623f7 A |
187 | panic("Error %d trying to transition CPU %d to OFF", |
188 | rc, cpu); | |
e2fac8b1 A |
189 | } |
190 | ||
6d2010ae | 191 | /* shutdown local APIC before passing control to firmware */ |
2d21ac55 | 192 | lapic_shutdown(); |
91447636 | 193 | |
2d21ac55 A |
194 | #if HIBERNATION |
195 | data.func = func; | |
196 | data.refcon = refcon; | |
197 | #endif | |
91447636 | 198 | |
593a1d5f A |
199 | /* Save power management timer state */ |
200 | pmTimerSave(); | |
0c530ab8 | 201 | |
b0d623f7 | 202 | #if CONFIG_VMX |
2d21ac55 A |
203 | /* |
204 | * Turn off VT, otherwise switching to legacy mode will fail | |
205 | */ | |
206 | vmx_suspend(); | |
b0d623f7 | 207 | #endif |
2d21ac55 | 208 | |
b0d623f7 | 209 | #if defined(__i386__) |
2d21ac55 A |
210 | /* |
211 | * If we're in 64-bit mode, drop back into legacy mode during sleep. | |
212 | */ | |
0c530ab8 | 213 | cpu_IA32e_disable(current_cpu_datap()); |
b0d623f7 | 214 | #endif |
060df5ea A |
215 | /* |
216 | * Enable FPU/SIMD unit for potential hibernate acceleration | |
217 | */ | |
218 | clear_ts(); | |
219 | ||
0b4c1975 A |
220 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START, 0, 0, 0, 0, 0); |
221 | ||
222 | save_kdebug_enable = kdebug_enable; | |
223 | kdebug_enable = 0; | |
0c530ab8 A |
224 | |
225 | acpi_sleep_abstime = mach_absolute_time(); | |
2d21ac55 | 226 | |
b0d623f7 | 227 | #if CONFIG_SLEEP |
2d21ac55 A |
228 | /* |
229 | * Save master CPU state and sleep platform. | |
230 | * Will not return until platform is woken up, | |
231 | * or if sleep failed. | |
232 | */ | |
b0d623f7 A |
233 | #ifdef __x86_64__ |
234 | uint64_t old_cr3 = x86_64_pre_sleep(); | |
235 | #endif | |
2d21ac55 A |
236 | #if HIBERNATION |
237 | acpi_sleep_cpu(acpi_hibernate, &data); | |
238 | #else | |
239 | acpi_sleep_cpu(func, refcon); | |
240 | #endif | |
060df5ea | 241 | |
b0d623f7 A |
242 | #ifdef __x86_64__ |
243 | x86_64_post_sleep(old_cr3); | |
244 | #endif | |
245 | ||
246 | #endif /* CONFIG_SLEEP */ | |
2d21ac55 | 247 | |
4a3eedf9 A |
248 | /* Reset UART if kprintf is enabled. |
249 | * However kprintf should not be used before rtc_sleep_wakeup() | |
250 | * for compatibility with firewire kprintf. | |
251 | */ | |
252 | ||
2d21ac55 | 253 | if (FALSE == disable_serial_output) |
6d2010ae | 254 | pal_serial_init(); |
2d21ac55 A |
255 | |
256 | #if HIBERNATION | |
257 | if (current_cpu_datap()->cpu_hibernate) { | |
b0d623f7 | 258 | #if defined(__i386__) |
2d21ac55 A |
259 | int i; |
260 | for (i = 0; i < PMAP_NWINDOWS; i++) | |
261 | *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; | |
b0d623f7 | 262 | #endif |
2d21ac55 A |
263 | did_hibernate = TRUE; |
264 | ||
265 | } else | |
266 | #endif | |
267 | { | |
268 | did_hibernate = FALSE; | |
0c530ab8 | 269 | } |
3a60a9f5 | 270 | |
2d21ac55 A |
271 | /* Re-enable mode (including 64-bit if applicable) */ |
272 | cpu_mode_init(current_cpu_datap()); | |
3a60a9f5 | 273 | |
b0d623f7 | 274 | #if CONFIG_MCA |
2d21ac55 A |
275 | /* Re-enable machine check handling */ |
276 | mca_cpu_init(); | |
b0d623f7 | 277 | #endif |
91447636 | 278 | |
6d2010ae | 279 | #if CONFIG_MTRR |
2d21ac55 A |
280 | /* restore MTRR settings */ |
281 | mtrr_update_cpu(); | |
6d2010ae A |
282 | #endif |
283 | ||
284 | /* update CPU microcode */ | |
285 | ucode_update_wake(); | |
0c530ab8 | 286 | |
b0d623f7 | 287 | #if CONFIG_VMX |
2d21ac55 A |
288 | /* |
289 | * Restore VT mode | |
290 | */ | |
291 | vmx_resume(); | |
b0d623f7 | 292 | #endif |
0c530ab8 | 293 | |
6d2010ae | 294 | #if CONFIG_MTRR |
2d21ac55 A |
295 | /* set up PAT following boot processor power up */ |
296 | pat_init(); | |
6d2010ae | 297 | #endif |
91447636 | 298 | |
593a1d5f A |
299 | /* |
300 | * Go through all of the CPUs and mark them as requiring | |
301 | * a full restart. | |
302 | */ | |
303 | pmMarkAllCPUsOff(); | |
304 | ||
0b4c1975 A |
305 | ml_get_timebase(&now); |
306 | ||
060df5ea A |
307 | /* re-enable and re-init local apic (prior to starting timers) */ |
308 | if (lapic_probe()) | |
309 | lapic_configure(); | |
310 | ||
bd504ef0 A |
311 | acpi_wake_abstime = mach_absolute_time(); |
312 | ||
0c530ab8 A |
313 | /* let the realtime clock reset */ |
314 | rtc_sleep_wakeup(acpi_sleep_abstime); | |
91447636 | 315 | |
0b4c1975 A |
316 | kdebug_enable = save_kdebug_enable; |
317 | ||
bd504ef0 A |
318 | if (kdebug_enable == 0) { |
319 | if (wake_nkdbufs) | |
320 | start_kern_tracing(wake_nkdbufs, TRUE); | |
321 | } | |
322 | ||
323 | /* Reconfigure FP/SIMD unit */ | |
324 | init_fpu(); | |
325 | clear_ts(); | |
326 | ||
99c3a104 A |
327 | IOCPURunPlatformActiveActions(); |
328 | ||
0b4c1975 A |
329 | if (did_hibernate) { |
330 | ||
331 | my_tsc = (now >> 32) | (now << 32); | |
332 | my_abs = tmrCvt(my_tsc, tscFCvtt2n); | |
333 | ||
334 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_START, | |
335 | (uint32_t)(my_abs >> 32), (uint32_t)my_abs, 0, 0, 0); | |
2d21ac55 | 336 | hibernate_machine_init(); |
0b4c1975 A |
337 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 2) | DBG_FUNC_END, 0, 0, 0, 0, 0); |
338 | ||
b0d623f7 | 339 | current_cpu_datap()->cpu_hibernate = 0; |
0b4c1975 A |
340 | |
341 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
342 | } else | |
343 | KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, 0, 0, 0, 0, 0); | |
344 | ||
593a1d5f A |
345 | /* Restore power management register state */ |
346 | pmCPUMarkRunning(current_cpu_datap()); | |
91447636 | 347 | |
593a1d5f A |
348 | /* Restore power management timer state */ |
349 | pmTimerRestore(); | |
0c530ab8 | 350 | |
060df5ea A |
351 | /* Restart timer interrupts */ |
352 | rtc_timer_start(); | |
0c530ab8 | 353 | |
bd504ef0 | 354 | |
2d21ac55 A |
355 | |
356 | #if HIBERNATION | |
b0d623f7 A |
357 | #ifdef __i386__ |
358 | /* The image is written out using the copy engine, which disables | |
359 | * preemption. Since the copy engine writes out the page which contains | |
360 | * the preemption variable when it is disabled, we need to explicitly | |
361 | * enable it here */ | |
2d21ac55 A |
362 | if (did_hibernate) |
363 | enable_preemption(); | |
b0d623f7 | 364 | #endif |
91447636 | 365 | |
2d21ac55 A |
366 | kprintf("ret from acpi_sleep_cpu hib=%d\n", did_hibernate); |
367 | #endif | |
b0d623f7 A |
368 | |
369 | #if CONFIG_SLEEP | |
370 | /* Becase we don't save the bootstrap page, and we share it | |
371 | * between sleep and mp slave init, we need to recreate it | |
372 | * after coming back from sleep or hibernate */ | |
373 | install_real_mode_bootstrap(slave_pstart); | |
374 | #endif | |
91447636 | 375 | } |
b0d623f7 | 376 | |
bd504ef0 A |
377 | /* |
378 | * acpi_idle_kernel is called by the ACPI Platform kext to request the kernel | |
379 | * to idle the boot processor in the deepest C-state for S0 sleep. All slave | |
380 | * processors are expected already to have been offlined in the deepest C-state. | |
381 | * | |
382 | * The contract with ACPI is that although the kernel is called with interrupts | |
383 | * disabled, interrupts may need to be re-enabled to dismiss any pending timer | |
384 | * interrupt. However, the callback function will be called once this has | |
385 | * occurred and interrupts are guaranteed to be disabled at that time, | |
386 | * and to remain disabled during C-state entry, exit (wake) and return | |
387 | * from acpi_idle_kernel. | |
388 | */ | |
389 | void | |
390 | acpi_idle_kernel(acpi_sleep_callback func, void *refcon) | |
391 | { | |
392 | boolean_t istate = ml_get_interrupts_enabled(); | |
393 | ||
394 | kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n", | |
395 | cpu_number(), istate ? "enabled" : "disabled"); | |
396 | ||
397 | assert(cpu_number() == master_cpu); | |
398 | ||
399 | /* | |
400 | * Effectively set the boot cpu offline. | |
401 | * This will stop further deadlines being set. | |
402 | */ | |
403 | cpu_datap(master_cpu)->cpu_running = FALSE; | |
404 | ||
405 | /* Cancel any pending deadline */ | |
406 | setPop(0); | |
407 | while (lapic_is_interrupting(LAPIC_TIMER_VECTOR)) { | |
408 | (void) ml_set_interrupts_enabled(TRUE); | |
409 | setPop(0); | |
410 | ml_set_interrupts_enabled(FALSE); | |
411 | } | |
412 | ||
413 | /* | |
414 | * Call back to caller to indicate that interrupts will remain | |
415 | * disabled while we deep idle, wake and return. | |
416 | */ | |
417 | func(refcon); | |
418 | ||
419 | acpi_idle_abstime = mach_absolute_time(); | |
420 | ||
421 | KERNEL_DEBUG_CONSTANT( | |
422 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_START, | |
423 | acpi_idle_abstime, deep_idle_rebase, 0, 0, 0); | |
424 | ||
425 | /* | |
426 | * Disable tracing during S0-sleep | |
427 | * unless overridden by sysctl -w tsc.deep_idle_rebase=0 | |
428 | */ | |
429 | if (deep_idle_rebase) { | |
430 | save_kdebug_enable = kdebug_enable; | |
431 | kdebug_enable = 0; | |
432 | } | |
433 | ||
434 | /* | |
435 | * Call into power-management to enter the lowest C-state. | |
436 | * Note when called on the boot processor this routine will | |
437 | * return directly when awoken. | |
438 | */ | |
439 | pmCPUHalt(PM_HALT_SLEEP); | |
440 | ||
441 | /* | |
442 | * Get wakeup time relative to the TSC which has progressed. | |
443 | * Then rebase nanotime to reflect time not progressing over sleep | |
444 | * - unless overriden so that tracing can occur during deep_idle. | |
445 | */ | |
446 | acpi_wake_abstime = mach_absolute_time(); | |
447 | if (deep_idle_rebase) { | |
448 | rtc_sleep_wakeup(acpi_idle_abstime); | |
449 | kdebug_enable = save_kdebug_enable; | |
450 | } | |
451 | ||
452 | cpu_datap(master_cpu)->cpu_running = TRUE; | |
453 | ||
454 | KERNEL_DEBUG_CONSTANT( | |
455 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, | |
456 | acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); | |
457 | ||
458 | /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ | |
459 | if (kdebug_enable == 0) { | |
460 | if (wake_nkdbufs) | |
461 | start_kern_tracing(wake_nkdbufs, TRUE); | |
462 | } | |
463 | ||
464 | IOCPURunPlatformActiveActions(); | |
465 | ||
466 | /* Restart timer interrupts */ | |
467 | rtc_timer_start(); | |
468 | } | |
469 | ||
b0d623f7 A |
470 | extern char real_mode_bootstrap_end[]; |
471 | extern char real_mode_bootstrap_base[]; | |
472 | ||
473 | void | |
474 | install_real_mode_bootstrap(void *prot_entry) | |
475 | { | |
476 | /* | |
477 | * Copy the boot entry code to the real-mode vector area REAL_MODE_BOOTSTRAP_OFFSET. | |
478 | * This is in page 1 which has been reserved for this purpose by | |
479 | * machine_startup() from the boot processor. | |
480 | * The slave boot code is responsible for switching to protected | |
481 | * mode and then jumping to the common startup, _start(). | |
482 | */ | |
483 | bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), | |
484 | (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, | |
485 | real_mode_bootstrap_end-real_mode_bootstrap_base); | |
486 | ||
487 | /* | |
488 | * Set the location at the base of the stack to point to the | |
489 | * common startup entry. | |
490 | */ | |
491 | ml_phys_write_word( | |
492 | PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, | |
493 | (unsigned int)kvtophys((vm_offset_t)prot_entry)); | |
494 | ||
495 | /* Flush caches */ | |
496 | __asm__("wbinvd"); | |
497 | } | |
498 |