2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <i386/proc_reg.h>
26 #include <i386/postcode.h>
30 This code is linked into the kernel but part of the "__HIB" section, which means
31 its used by code running in the special context of restoring the kernel text and data
32 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
33 it calls or references (ie. hibernate_restore_phys_page())
34 needs to be careful to only touch memory also in the "__HIB" section.
38 * GAS won't handle an intersegment jump with a relocatable offset.
40 #define LJMP(segment,address) \
45 /* Location of temporary page tables */
46 #define HPTD (0x13000)
47 #define HPDPT (0x17000)
49 #define LAST_PAGE (0xFFE00000)
50 #define LAST_PAGE_PDE (0x7ff)
54 * eax = physical page address
55 * ebx = index into page table
56 * ecx = how many pages to map
57 * base = base address of page dir/table
58 * prot = protection bits
60 #define fillpse(base, prot) \
63 orl $(PTE_V|PTE_PS|0x60), %eax ; \
66 1: movl %eax,(%ebx) ; /* low 32b */ \
68 movl %edx,(%ebx) ; /* high 32b */ \
69 addl $(1 << PDESHIFT),%eax ; /* increment physical address 2Mb */ \
70 addl $4,%ebx ; /* next entry */ \
78 * ------------------------------------------------------------
79 * | | |B| |A| | | |1|0|E|W|A| |
80 * | BASE 31..24 |G|/|0|V| LIMIT |P|DPL| TYPE | BASE 23:16 |
81 * | | |D| |L| 19..16| | |1|1|C|R|A| |
82 * ------------------------------------------------------------
84 * | BASE 15..0 | LIMIT 15..0 |
86 * ------------------------------------------------------------
91 .word 0, 0 /* 0x0 : null */
94 .word 0xffff, 0x0000 /* 0x8 : code */
95 .byte 0, 0x9e, 0xcf, 0
97 .word 0xffff, 0x0000 /* 0x10 : data */
98 .byte 0, 0x92, 0xcf, 0
101 .word 24 /* limit (8*3 segs) */
105 * Hibernation code restarts here. Steal some pages from 0x10000
106 * to 0x90000 for pages tables and directories etc to temporarily
107 * map the hibernation code (put at 0x100000 (phys) by the booter
108 * and linked to 0xC0100000 by the linker) to 0xC0100000 so it can
109 * execute. It's self-contained and won't make any references outside
112 * On the way down it has to save IdlePTD (and if PAE also IdlePDPT)
113 * and after it runs it has to restore those and load IdlePTD (or
114 * IdlePDPT if PAE) into %cr3 to re-establish the original mappings
118 .globl EXT(hibernate_machine_entrypoint)
119 LEXT(hibernate_machine_entrypoint)
126 /* Map physical memory from zero to LAST_PAGE */
129 movl $(LAST_PAGE_PDE), %ecx
130 fillpse( $(HPTD), $(PTE_W) )
138 movl %eax,(%ebx) ; /* low 32b */ \
140 movl %edx,(%ebx) ; /* high 32b */ \
142 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
144 movl %eax,(%ebx) ; /* low 32b */ \
146 movl %edx,(%ebx) ; /* high 32b */ \
148 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
150 movl %eax,(%ebx) ; /* low 32b */ \
152 movl %edx,(%ebx) ; /* high 32b */ \
154 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
156 movl %eax,(%ebx) ; /* low 32b */
158 movl %edx,(%ebx) ; /* high 32b */ \
160 addl $(1 << 12),%eax ; /* increment physical address 1Gb */ \
162 /* set page dir ptr table addr */
169 orl $(CR4_PAE|CR4_PGE|CR4_MCE),%eax
170 movl %eax,%cr4 /* enable page size extensions */
172 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
173 rdmsr /* MSR value return in edx: eax */
174 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
175 wrmsr /* Update Extended Feature Enable reg */
178 orl $(CR0_PG|CR0_WP|CR0_PE), %eax
179 movl %eax, %cr0 /* ready paging */
183 lgdt EXT(gdtptr) /* load GDT */
184 lidt EXT(idtptr) /* load IDT */
188 LJMP (KERNEL_CS,EXT(hstart)) /* paging on and go to correct vaddr */
190 /* Hib restart code now running with correct addresses */
194 mov $(KERNEL_DS),%ax /* set kernel data segment */
199 mov $0,%ax /* fs must be zeroed; */
200 mov %ax,%fs /* some bootstrappers don`t do this */
203 lea EXT(gIOHibernateRestoreStackEnd),%esp /* switch to the bootup stack */
207 xorl %eax, %eax /* Video memory - N/A */
211 mov %edi, %eax /* Pointer to hibernate header */
213 call EXT(hibernate_kernel_entrypoint)
219 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
223 .globl EXT(hibernate_restore_phys_page)
225 /* XXX can only deal with exactly one page */
226 LEXT(hibernate_restore_phys_page)
230 movl 8+ 4(%esp),%esi /* source virtual address */
232 jz 3f /* If source == 0, nothing to do */
234 movl 8+ 16(%esp),%eax /* destination physical address, high 32 bits */
235 movl 8+ 12(%esp),%edi /* destination physical address, low 32 bits */
237 jne 1f /* need to map, above LAST_PAGE */
239 cmpl $(LAST_PAGE), %edi
240 jb 2f /* no need to map, below LAST_PAGE */
242 /* Map physical address %eax:%edi to virt. address LAST_PAGE (4GB - 2MB) */
243 movl %eax, (HPTD + (LAST_PAGE_PDE * 8) + 4)
244 movl %edi, %eax /* destination physical address */
245 andl $(LAST_PAGE), %eax
246 orl $(PTE_V | PTE_PS | PTE_W), %eax
247 movl %eax, (HPTD + (LAST_PAGE_PDE * 8))
248 orl $(LAST_PAGE), %edi
252 movl 8+ 20(%esp),%edx /* number of bytes */