2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 #include <i386/proc_reg.h>
34 #include <i386/postcode.h>
38 This code is linked into the kernel but part of the "__HIB" section, which means
39 its used by code running in the special context of restoring the kernel text and data
40 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
41 it calls or references (ie. hibernate_restore_phys_page())
42 needs to be careful to only touch memory also in the "__HIB" section.
46 * GAS won't handle an intersegment jump with a relocatable offset.
48 #define LJMP(segment,address) \
53 #define KVTOPHYS (-KERNELBASE)
54 #define KVTOLINEAR LINEAR_KERNELBASE
56 #define PA(addr) ((addr)+KVTOPHYS)
57 #define VA(addr) ((addr)-KVTOPHYS)
59 /* Location of temporary page tables */
62 #define KERNEL_MAP_SIZE ( 4 * 1024 * 1024)
66 * eax = page frame address
67 * ebx = index into page table
68 * ecx = how many pages to map
69 * base = base address of page dir/table
70 * prot = protection bits
72 #define fillkpt(base, prot) \
75 orl $(PTE_V), %eax ; \
77 1: movl %eax,(%ebx) ; \
78 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
79 addl $4,%ebx ; /* next pte */ \
84 * eax = physical page address
85 * ebx = index into page table
86 * ecx = how many pages to map
87 * base = base address of page dir/table
88 * prot = protection bits
90 #define fillpse(base, prot) \
93 orl $(PTE_V|PTE_PS), %eax ; \
95 1: movl %eax,(%ebx) ; \
96 addl $(1 << PDESHIFT),%eax ; /* increment physical address 4Mb */ \
97 addl $4,%ebx ; /* next entry */ \
101 * fillkptphys(base, prot)
102 * eax = physical address
103 * ecx = how many pages to map
104 * base = base of page table
105 * prot = protection bits
107 #define fillkptphys(base, prot) \
109 shrl $(PAGE_SHIFT), %ebx ; \
113 * Hibernation code restarts here. Steal some pages from 0x10000
114 * to 0x90000 for pages tables and directories etc to temporarily
115 * map the hibernation code (put at 0x100000 (phys) by the booter
116 * and linked to 0xC0100000 by the linker) to 0xC0100000 so it can
117 * execute. It's self-contained and won't make any references outside
120 * On the way down it has to save IdlePTD (and if PAE also IdlePDPT)
121 * and after it runs it has to restore those and load IdlePTD (or
122 * IdlePDPT if PAE) into %cr3 to re-establish the original mappings
126 .globl EXT(hibernate_machine_entrypoint)
127 LEXT(hibernate_machine_entrypoint)
134 /* Map physical memory from zero to 0xC0000000 */
138 fillpse( $(HPTD), $(PTE_W) )
140 /* Map 0 again at 0xC0000000 */
143 movl $(KERNEL_MAP_SIZE >> PDESHIFT), %ecx
144 fillpse( $(HPTD), $(PTE_W) )
153 movl %eax,%cr4 /* enable page size extensions */
155 orl $(CR0_PG|CR0_WP|CR0_PE), %eax
156 movl %eax, %cr0 /* ready paging */
160 lgdt PA(EXT(gdtptr)) /* load GDT */
161 lidt PA(EXT(idtptr)) /* load IDT */
165 LJMP (KERNEL_CS,EXT(hstart)) /* paging on and go to correct vaddr */
167 /* Hib restart code now running with correct addresses */
171 mov $(KERNEL_DS),%ax /* set kernel data segment */
176 mov $0,%ax /* fs must be zeroed; */
177 mov %ax,%fs /* some bootstrappers don`t do this */
180 lea EXT(gIOHibernateRestoreStackEnd),%esp /* switch to the bootup stack */
184 xorl %eax, %eax /* Video memory - N/A */
186 mov %edi, %eax /* Pointer to hibernate header */
188 call EXT(hibernate_kernel_entrypoint)
196 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
200 .globl EXT(hibernate_restore_phys_page)
202 /* XXX doesn't handle 64-bit addresses yet */
203 /* XXX can only deal with exactly one page */
204 LEXT(hibernate_restore_phys_page)
208 movl 8+ 4(%esp),%esi /* source virtual address */
210 jz 2f /* If source == 0, nothing to do */
213 movl 8+ 12(%esp),%edi /* destination physical address */
214 cmpl $(LINEAR_KERNELBASE), %edi
215 jl 1f /* no need to map, below 0xC0000000 */
217 movl %edi, %eax /* destination physical address */
218 /* Map physical address to virt. address 0xffc00000 (4GB - 4MB) */
219 andl $0xFFC00000, %eax
220 orl $(PTE_V | PTE_PS | PTE_W), %eax
221 movl %eax, (HPTD + (0x3FF * 4))
222 orl $0xFFC00000, %edi
226 movl 8+ 20(%esp),%edx /* number of bytes */