]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/acpi_wakeup.s
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / i386 / acpi_wakeup.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <i386/asm.h>
30 #include <i386/proc_reg.h>
31 #include <i386/postcode.h>
32 #include <i386/acpi.h>
33 #include <assym.s>
34
35 .file "acpi_wakeup.s"
36
37 .text
38 .align 12 /* Page align for single bcopy_phys() */
39
40 #define LJMP(segment, address) \
41 .byte 0xea ;\
42 .long address - EXT(acpi_wake_start) ;\
43 .word segment
44
45 #define PA(addr) (addr)
46
47 /*
48 * acpi_wake_start
49 *
50 * The code from acpi_wake_start to acpi_wake_end is copied to
51 * memory below 1MB. The firmware waking vector is updated to
52 * point at acpi_wake_start in low memory before sleeping.
53 */
54
55 ENTRY(acpi_wake_start)
56 /*
57 * CPU woke up from sleep, and is back in real mode.
58 * Initialize it just enough to get back to protected mode.
59 */
60 cli
61
62 POSTCODE(ACPI_WAKE_START_ENTRY)
63
64 /* set up DS to match CS */
65 movw %cs, %ax
66 movw %ax, %ds
67
68 /*
69 * Must initialize GDTR before entering protected mode.
70 * Use a temporary GDT that is 0 based, 4GB limit, code and data.
71 * Restoring the actual GDT will come later.
72 */
73 addr16
74 data16
75 lgdt EXT(acpi_gdtr) - EXT(acpi_wake_start)
76
77 /* set CR0.PE to enter protected mode */
78 mov %cr0, %eax
79 data16
80 or $(CR0_PE), %eax
81 mov %eax, %cr0
82
83 /*
84 * Make intra-segment jump to flush pipeline and reload CS register.
85 * If GDT is bogus, it will blow up here.
86 */
87 data16
88 LJMP(0x8, acpi_wake_prot + ACPI_WAKE_ADDR)
89
90 acpi_wake_prot:
91
92 /* protected mode, paging disabled */
93
94 /* setup the protected mode segment registers */
95 mov $0x10, %eax
96 movw %ax, %ds
97 movw %ax, %es
98 movw %ax, %ss
99 movw %ax, %fs
100 movw %ax, %gs
101
102 /* jump back to the sleep function in the kernel */
103 movl PA(saved_eip), %eax
104 jmp *%eax
105
106 /* Segment Descriptor
107 *
108 * 31 24 19 16 7 0
109 * ------------------------------------------------------------
110 * | | |B| |A| | | |1|0|E|W|A| |
111 * | BASE 31..24 |G|/|0|V| LIMIT |P|DPL| TYPE | BASE 23:16 |
112 * | | |D| |L| 19..16| | |1|1|C|R|A| |
113 * ------------------------------------------------------------
114 * | | |
115 * | BASE 15..0 | LIMIT 15..0 |
116 * | | |
117 * ------------------------------------------------------------
118 */
119 ENTRY(acpi_gdt)
120 .word 0, 0 /* 0x0 : null */
121 .byte 0, 0, 0, 0
122
123 .word 0xffff, 0x0000 /* 0x8 : code */
124 .byte 0, 0x9e, 0xcf, 0
125
126 .word 0xffff, 0x0000 /* 0x10 : data */
127 .byte 0, 0x92, 0xcf, 0
128
129 ENTRY(acpi_gdtr)
130 .word 24 /* limit (8*3 segs) */
131 .long EXT(acpi_gdt) - EXT(acpi_wake_start) + ACPI_WAKE_ADDR
132
133 ENTRY(acpi_wake_end)
134
135
136 /*
137 * acpi_sleep_cpu(acpi_sleep_callback func, void * refcon)
138 *
139 * Save CPU state before platform sleep. Restore CPU state
140 * following wake up.
141 */
142
143 ENTRY(acpi_sleep_cpu)
144 pushl %ebp
145 movl %esp, %ebp
146
147 /* save flags */
148 pushfl
149
150 /* save general purpose registers */
151 pushal
152 movl %esp, saved_esp
153
154 /* make sure tlb is flushed */
155 movl %cr3,%eax
156 movl %eax,%cr3
157
158 /* save control registers */
159 movl %cr0, %eax
160 movl %eax, saved_cr0
161 movl %cr2, %eax
162 movl %eax, saved_cr2
163 movl %cr3, %eax
164 movl %eax, saved_cr3
165 movl %cr4, %eax
166 movl %eax, saved_cr4
167
168 /* save segment registers */
169 movw %es, saved_es
170 movw %fs, saved_fs
171 movw %gs, saved_gs
172 movw %ss, saved_ss
173
174 /* save descriptor table registers */
175 sgdt saved_gdt
176 sldt saved_ldt
177 sidt saved_idt
178 str saved_tr
179
180 /*
181 * When system wakes up, the real mode wake handler will revert to
182 * protected mode, then jump to the address stored at saved_eip.
183 */
184 movl $(PA(wake_prot)), saved_eip
185
186 /*
187 * Call ACPI function provided by the caller to sleep the platform.
188 * This call will not return on success.
189 */
190 pushl B_ARG1
191 movl B_ARG0, %edi
192 call *%edi
193 popl %edi
194
195 /* sleep failed, no cpu context lost */
196 jmp wake_restore
197
198 wake_prot:
199 /* protected mode, paging disabled */
200 POSTCODE(ACPI_WAKE_PROT_ENTRY)
201
202 movl PA(saved_cr3), %ebx
203 movl PA(saved_cr4), %ecx
204 /*
205 * restore cr3, PAE and NXE states in an orderly fashion
206 */
207 movl %ebx, %cr3
208 movl %ecx, %cr4
209
210 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
211 rdmsr /* MSR value return in edx: eax */
212 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
213 wrmsr /* Update Extended Feature Enable reg */
214
215 /* restore kernel GDT */
216 lgdt PA(saved_gdt)
217
218 movl PA(saved_cr2), %eax
219 movl %eax, %cr2
220
221 /* restore CR0, paging enabled */
222 movl PA(saved_cr0), %eax
223 movl %eax, %cr0
224
225 /* switch to kernel code segment */
226 ljmpl $(KERNEL_CS), $wake_paged
227
228 wake_paged:
229
230 /* protected mode, paging enabled */
231 POSTCODE(ACPI_WAKE_PAGED_ENTRY)
232
233 /* switch to kernel data segment */
234 movw $(KERNEL_DS), %ax
235 movw %ax, %ds
236
237 /* restore local and interrupt descriptor tables */
238 lldt saved_ldt
239 lidt saved_idt
240
241 /* restore segment registers */
242 movw saved_es, %es
243 movw saved_fs, %fs
244 movw saved_gs, %gs
245 movw saved_ss, %ss
246
247 /*
248 * Restore task register. Before doing this, clear the busy flag
249 * in the TSS descriptor set by the CPU.
250 */
251 movl $saved_gdt, %eax
252 movl 2(%eax), %edx /* GDT base, skip limit word */
253 movl $(KERNEL_TSS), %eax /* TSS segment selector */
254 movb $(K_TSS), 5(%edx, %eax) /* clear busy flag */
255 ltr saved_tr /* restore TR */
256
257 wake_restore:
258
259 /* restore general purpose registers */
260 movl saved_esp, %esp
261 popal
262
263 /* restore flags */
264 popfl
265
266 leave
267 ret
268
269
270 .section __HIB, __text
271 .align 2
272
273 .globl EXT(acpi_wake_prot_entry)
274 ENTRY(acpi_wake_prot_entry)
275 /* protected mode, paging enabled */
276
277 POSTCODE(ACPI_WAKE_PAGED_ENTRY)
278
279 /* restore kernel GDT */
280 lgdt saved_gdt
281
282 POSTCODE(0x40)
283
284 /* restore control registers */
285
286 movl saved_cr0, %eax
287 movl %eax, %cr0
288
289 movl saved_cr2, %eax
290 movl %eax, %cr2
291
292 POSTCODE(0x3E)
293
294 /* restore real PDE base */
295 movl saved_cr3, %eax
296 movl saved_cr4, %edx
297 movl %eax, %cr3
298 movl %edx, %cr4
299 movl %eax, %cr3
300
301 /* switch to kernel data segment */
302 movw $(KERNEL_DS), %ax
303 movw %ax, %ds
304
305 POSTCODE(0x3C)
306 /* restore local and interrupt descriptor tables */
307 lldt saved_ldt
308 lidt saved_idt
309
310 POSTCODE(0x3B)
311 /* restore segment registers */
312 movw saved_es, %es
313 movw saved_fs, %fs
314 movw saved_gs, %gs
315 movw saved_ss, %ss
316
317 POSTCODE(0x3A)
318 /*
319 * Restore task register. Before doing this, clear the busy flag
320 * in the TSS descriptor set by the CPU.
321 */
322 movl $saved_gdt, %eax
323 movl 2(%eax), %edx /* GDT base, skip limit word */
324 movl $(KERNEL_TSS), %eax /* TSS segment selector */
325 movb $(K_TSS), 5(%edx, %eax) /* clear busy flag */
326 ltr saved_tr /* restore TR */
327
328 /* restore general purpose registers */
329 movl saved_esp, %esp
330 popal
331
332 /* restore flags */
333 popfl
334
335 /* make sure interrupts are disabled */
336 cli
337
338 movl $2, %eax
339
340 leave
341 ret
342
343
344 .data
345 .section __HIB, __data
346 .align 2
347
348
349 /*
350 * CPU registers saved across sleep/wake.
351 */
352
353 saved_esp: .long 0
354 saved_es: .word 0
355 saved_fs: .word 0
356 saved_gs: .word 0
357 saved_ss: .word 0
358 saved_cr0: .long 0
359 saved_cr2: .long 0
360 saved_cr3: .long 0
361 saved_cr4: .long 0
362 saved_gdt: .word 0
363 .long 0
364 saved_idt: .word 0
365 .long 0
366 saved_ldt: .word 0
367 saved_tr: .word 0
368 saved_eip: .long 0
369