]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_restore.s
d718870c8447a1bbd5791f56e665813e1329cef7
[apple/xnu.git] / osfmk / i386 / hibernate_restore.s
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <i386/asm.h>
24 #include <i386/proc_reg.h>
25
26 #include <i386/postcode.h>
27 #include <assym.s>
28
29 /*
30 This code is linked into the kernel but part of the "__HIB" section, which means
31 its used by code running in the special context of restoring the kernel text and data
32 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
33 it calls or references (ie. hibernate_restore_phys_page())
34 needs to be careful to only touch memory also in the "__HIB" section.
35 */
36
37 /*
38 * GAS won't handle an intersegment jump with a relocatable offset.
39 */
40 #define LJMP(segment,address) \
41 .byte 0xea ;\
42 .long address ;\
43 .word segment
44
45 /* Location of temporary page tables */
46 #define HPTD (0x13000)
47 #define HPDPT (0x17000)
48
49 #define LAST_PAGE (0xFFE00000)
50 #define LAST_PAGE_PDE (0x7ff)
51
52 /*
53 * fillpse
54 * eax = physical page address
55 * ebx = index into page table
56 * ecx = how many pages to map
57 * base = base address of page dir/table
58 * prot = protection bits
59 */
60 #define fillpse(base, prot) \
61 shll $3,%ebx ; \
62 addl base,%ebx ; \
63 orl $(PTE_V|PTE_PS|0x60), %eax ; \
64 orl prot,%eax ; \
65 xorl %edx, %edx ; \
66 1: movl %eax,(%ebx) ; /* low 32b */ \
67 addl $4,%ebx ; \
68 movl %edx,(%ebx) ; /* high 32b */ \
69 addl $(1 << PDESHIFT),%eax ; /* increment physical address 2Mb */ \
70 addl $4,%ebx ; /* next entry */ \
71 loop 1b
72
73
74
75 /* Segment Descriptor
76 *
77 * 31 24 19 16 7 0
78 * ------------------------------------------------------------
79 * | | |B| |A| | | |1|0|E|W|A| |
80 * | BASE 31..24 |G|/|0|V| LIMIT |P|DPL| TYPE | BASE 23:16 |
81 * | | |D| |L| 19..16| | |1|1|C|R|A| |
82 * ------------------------------------------------------------
83 * | | |
84 * | BASE 15..0 | LIMIT 15..0 |
85 * | | |
86 * ------------------------------------------------------------
87 */
88
89 .align ALIGN
90 ENTRY(hib_gdt)
91 .word 0, 0 /* 0x0 : null */
92 .byte 0, 0, 0, 0
93
94 .word 0xffff, 0x0000 /* 0x8 : code */
95 .byte 0, 0x9e, 0xcf, 0
96
97 .word 0xffff, 0x0000 /* 0x10 : data */
98 .byte 0, 0x92, 0xcf, 0
99
100 ENTRY(hib_gdtr)
101 .word 24 /* limit (8*3 segs) */
102 .long EXT(hib_gdt)
103
104 /*
105 * Hibernation code restarts here. Steal some pages from 0x10000
106 * to 0x90000 for pages tables and directories etc to temporarily
107 * map the hibernation code (put at 0x100000 (phys) by the booter
108 * and linked to 0xC0100000 by the linker) to 0xC0100000 so it can
109 * execute. It's self-contained and won't make any references outside
110 * of itself.
111 *
112 * On the way down it has to save IdlePTD (and if PAE also IdlePDPT)
113 * and after it runs it has to restore those and load IdlePTD (or
114 * IdlePDPT if PAE) into %cr3 to re-establish the original mappings
115 */
116
117 .align ALIGN
118 .globl EXT(hibernate_machine_entrypoint)
119 LEXT(hibernate_machine_entrypoint)
120 cli
121
122 mov %eax, %edi
123
124 POSTCODE(0x1)
125
126 /* Map physical memory from zero to LAST_PAGE */
127 xorl %eax, %eax
128 xorl %ebx, %ebx
129 movl $(LAST_PAGE_PDE), %ecx
130 fillpse( $(HPTD), $(PTE_W) )
131
132 movl $(HPDPT), %ebx
133 movl $(HPTD), %eax
134 orl $(PTE_V), %eax
135
136 xorl %edx, %edx ; \
137
138 movl %eax,(%ebx) ; /* low 32b */ \
139 addl $4,%ebx ; \
140 movl %edx,(%ebx) ; /* high 32b */ \
141 addl $4,%ebx ; \
142 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
143
144 movl %eax,(%ebx) ; /* low 32b */ \
145 addl $4,%ebx ; \
146 movl %edx,(%ebx) ; /* high 32b */ \
147 addl $4,%ebx ; \
148 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
149
150 movl %eax,(%ebx) ; /* low 32b */ \
151 addl $4,%ebx ; \
152 movl %edx,(%ebx) ; /* high 32b */ \
153 addl $4,%ebx ; \
154 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
155
156 movl %eax,(%ebx) ; /* low 32b */
157 addl $4,%ebx ;
158 movl %edx,(%ebx) ; /* high 32b */ \
159 addl $4,%ebx ; \
160 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
161
162 /* set page dir ptr table addr */
163 movl $(HPDPT), %eax
164 movl %eax, %cr3
165
166 POSTCODE(0x3)
167
168 movl %cr4,%eax
169 orl $(CR4_PAE|CR4_PGE|CR4_MCE),%eax
170 movl %eax,%cr4 /* enable page size extensions */
171
172 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
173 rdmsr /* MSR value return in edx: eax */
174 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
175 wrmsr /* Update Extended Feature Enable reg */
176
177 movl %cr0, %eax
178 orl $(CR0_PG|CR0_WP|CR0_PE), %eax
179 movl %eax, %cr0 /* ready paging */
180
181 POSTCODE(0x4)
182
183 lgdt EXT(gdtptr) /* load GDT */
184 lidt EXT(idtptr) /* load IDT */
185
186 POSTCODE(0x5)
187
188 LJMP (KERNEL_CS,EXT(hstart)) /* paging on and go to correct vaddr */
189
190 /* Hib restart code now running with correct addresses */
191 LEXT(hstart)
192 POSTCODE(0x6)
193
194 mov $(KERNEL_DS),%ax /* set kernel data segment */
195 mov %ax,%ds
196 mov %ax,%es
197 mov %ax,%ss
198
199 mov $0,%ax /* fs must be zeroed; */
200 mov %ax,%fs /* some bootstrappers don`t do this */
201 mov %ax,%gs
202
203 lea EXT(gIOHibernateRestoreStackEnd),%esp /* switch to the bootup stack */
204
205 POSTCODE(0x7)
206
207 xorl %eax, %eax /* Video memory - N/A */
208 pushl %eax
209 pushl %eax
210 pushl %eax
211 mov %edi, %eax /* Pointer to hibernate header */
212 pushl %eax
213 call EXT(hibernate_kernel_entrypoint)
214 /* NOTREACHED */
215 hlt
216
217 /*
218 void
219 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
220 */
221
222 .align 5
223 .globl EXT(hibernate_restore_phys_page)
224
225 /* XXX can only deal with exactly one page */
226 LEXT(hibernate_restore_phys_page)
227 pushl %edi
228 pushl %esi
229
230 movl 8+ 4(%esp),%esi /* source virtual address */
231 addl $0, %esi
232 jz 3f /* If source == 0, nothing to do */
233
234 movl 8+ 16(%esp),%eax /* destination physical address, high 32 bits */
235 movl 8+ 12(%esp),%edi /* destination physical address, low 32 bits */
236 addl $0, %eax
237 jne 1f /* need to map, above LAST_PAGE */
238
239 cmpl $(LAST_PAGE), %edi
240 jb 2f /* no need to map, below LAST_PAGE */
241 1:
242 /* Map physical address %eax:%edi to virt. address LAST_PAGE (4GB - 2MB) */
243 movl %eax, (HPTD + (LAST_PAGE_PDE * 8) + 4)
244 movl %edi, %eax /* destination physical address */
245 andl $(LAST_PAGE), %eax
246 orl $(PTE_V | PTE_PS | PTE_W), %eax
247 movl %eax, (HPTD + (LAST_PAGE_PDE * 8))
248 orl $(LAST_PAGE), %edi
249 invlpg (%edi)
250
251 2:
252 movl 8+ 20(%esp),%edx /* number of bytes */
253 cld
254 /* move longs*/
255 movl %edx,%ecx
256 sarl $2,%ecx
257 rep
258 movsl
259 /* move bytes*/
260 movl %edx,%ecx
261 andl $3,%ecx
262 rep
263 movsb
264 3:
265 popl %esi
266 popl %edi
267 ret