]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_restore.s
c01f23c506550383b70655669a00a32f9e701eb5
[apple/xnu.git] / osfmk / i386 / hibernate_restore.s
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <i386/asm.h>
24 #include <i386/proc_reg.h>
25
26 #include <i386/postcode.h>
27 #include <assym.s>
28
29 /*
30 This code is linked into the kernel but part of the "__HIB" section, which means
31 its used by code running in the special context of restoring the kernel text and data
32 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
33 it calls or references (ie. hibernate_restore_phys_page())
34 needs to be careful to only touch memory also in the "__HIB" section.
35 */
36
37 /*
38 * GAS won't handle an intersegment jump with a relocatable offset.
39 */
40 #define LJMP(segment,address) \
41 .byte 0xea ;\
42 .long address ;\
43 .word segment
44
45 #define KVTOPHYS (-KERNELBASE)
46 #define KVTOLINEAR LINEAR_KERNELBASE
47
48 #define PA(addr) ((addr)+KVTOPHYS)
49 #define VA(addr) ((addr)-KVTOPHYS)
50
51 /* Location of temporary page tables */
52 #define HPTD 0x80000
53
54 #define KERNEL_MAP_SIZE ( 4 * 1024 * 1024)
55
56 /*
57 * fillkpt
58 * eax = page frame address
59 * ebx = index into page table
60 * ecx = how many pages to map
61 * base = base address of page dir/table
62 * prot = protection bits
63 */
64 #define fillkpt(base, prot) \
65 shll $2,%ebx ; \
66 addl base,%ebx ; \
67 orl $(PTE_V), %eax ; \
68 orl prot,%eax ; \
69 1: movl %eax,(%ebx) ; \
70 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
71 addl $4,%ebx ; /* next pte */ \
72 loop 1b
73
74 /*
75 * fillpse
76 * eax = physical page address
77 * ebx = index into page table
78 * ecx = how many pages to map
79 * base = base address of page dir/table
80 * prot = protection bits
81 */
82 #define fillpse(base, prot) \
83 shll $2,%ebx ; \
84 addl base,%ebx ; \
85 orl $(PTE_V|PTE_PS), %eax ; \
86 orl prot,%eax ; \
87 1: movl %eax,(%ebx) ; \
88 addl $(1 << PDESHIFT),%eax ; /* increment physical address 4Mb */ \
89 addl $4,%ebx ; /* next entry */ \
90 loop 1b
91
92 /*
93 * fillkptphys(base, prot)
94 * eax = physical address
95 * ecx = how many pages to map
96 * base = base of page table
97 * prot = protection bits
98 */
99 #define fillkptphys(base, prot) \
100 movl %eax, %ebx ; \
101 shrl $(PAGE_SHIFT), %ebx ; \
102 fillkpt(base, prot)
103
104 /*
105 * Hibernation code restarts here. Steal some pages from 0x10000
106 * to 0x90000 for pages tables and directories etc to temporarily
107 * map the hibernation code (put at 0x100000 (phys) by the booter
108 * and linked to 0xC0100000 by the linker) to 0xC0100000 so it can
109 * execute. It's self-contained and won't make any references outside
110 * of itself.
111 *
112 * On the way down it has to save IdlePTD (and if PAE also IdlePDPT)
113 * and after it runs it has to restore those and load IdlePTD (or
114 * IdlePDPT if PAE) into %cr3 to re-establish the original mappings
115 */
116
117 .align ALIGN
118 .globl EXT(hibernate_machine_entrypoint)
119 LEXT(hibernate_machine_entrypoint)
120 cli
121
122 mov %eax, %edi
123
124 POSTCODE(0x1)
125
126 /* Map physical memory from zero to 0xC0000000 */
127 xorl %eax, %eax
128 xorl %ebx, %ebx
129 movl $(KPTDI), %ecx
130 fillpse( $(HPTD), $(PTE_W) )
131
132 /* Map 0 again at 0xC0000000 */
133 xorl %eax, %eax
134 movl $(KPTDI), %ebx
135 movl $(KERNEL_MAP_SIZE >> PDESHIFT), %ecx
136 fillpse( $(HPTD), $(PTE_W) )
137
138 movl $(HPTD), %eax
139 movl %eax, %cr3
140
141 POSTCODE(0x3)
142
143 movl %cr4,%eax
144 orl $(CR4_PSE),%eax
145 movl %eax,%cr4 /* enable page size extensions */
146 movl %cr0, %eax
147 orl $(CR0_PG|CR0_WP|CR0_PE), %eax
148 movl %eax, %cr0 /* ready paging */
149
150 POSTCODE(0x4)
151
152 lgdt PA(EXT(gdtptr)) /* load GDT */
153 lidt PA(EXT(idtptr)) /* load IDT */
154
155 POSTCODE(0x5)
156
157 LJMP (KERNEL_CS,EXT(hstart)) /* paging on and go to correct vaddr */
158
159 /* Hib restart code now running with correct addresses */
160 LEXT(hstart)
161 POSTCODE(0x6)
162
163 mov $(KERNEL_DS),%ax /* set kernel data segment */
164 mov %ax,%ds
165 mov %ax,%es
166 mov %ax,%ss
167
168 mov $0,%ax /* fs must be zeroed; */
169 mov %ax,%fs /* some bootstrappers don`t do this */
170 mov %ax,%gs
171
172 lea EXT(gIOHibernateRestoreStackEnd),%esp /* switch to the bootup stack */
173
174 POSTCODE(0x7)
175
176 xorl %eax, %eax /* Video memory - N/A */
177 pushl %eax
178 mov %edi, %eax /* Pointer to hibernate header */
179 pushl %eax
180 call EXT(hibernate_kernel_entrypoint)
181 /* NOTREACHED */
182 hlt
183
184
185
186 /*
187 void
188 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
189 */
190
191 .align 5
192 .globl EXT(hibernate_restore_phys_page)
193
194 /* XXX doesn't handle 64-bit addresses yet */
195 /* XXX can only deal with exactly one page */
196 LEXT(hibernate_restore_phys_page)
197 pushl %edi
198 pushl %esi
199
200 movl 8+ 4(%esp),%esi /* source virtual address */
201 addl $0, %esi
202 jz 2f /* If source == 0, nothing to do */
203
204
205 movl 8+ 12(%esp),%edi /* destination physical address */
206 cmpl $(LINEAR_KERNELBASE), %edi
207 jl 1f /* no need to map, below 0xC0000000 */
208
209 movl %edi, %eax /* destination physical address */
210 /* Map physical address to virt. address 0xffc00000 (4GB - 4MB) */
211 andl $0xFFC00000, %eax
212 orl $(PTE_V | PTE_PS | PTE_W), %eax
213 movl %eax, (HPTD + (0x3FF * 4))
214 orl $0xFFC00000, %edi
215 invlpg (%edi)
216
217 1:
218 movl 8+ 20(%esp),%edx /* number of bytes */
219 cld
220 /* move longs*/
221 movl %edx,%ecx
222 sarl $2,%ecx
223 rep
224 movsl
225 /* move bytes*/
226 movl %edx,%ecx
227 andl $3,%ecx
228 rep
229 movsb
230 2:
231 popl %esi
232 popl %edi
233 ret