2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 #include <i386/proc_reg.h>
26 #include <i386/postcode.h>
30 This code is linked into the kernel but part of the "__HIB" section, which means
31 its used by code running in the special context of restoring the kernel text and data
32 from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything
33 it calls or references (ie. hibernate_restore_phys_page())
34 needs to be careful to only touch memory also in the "__HIB" section.
38 * GAS won't handle an intersegment jump with a relocatable offset.
40 #define LJMP(segment,address) \
45 #define KVTOPHYS (-KERNELBASE)
46 #define KVTOLINEAR LINEAR_KERNELBASE
48 #define PA(addr) ((addr)+KVTOPHYS)
49 #define VA(addr) ((addr)-KVTOPHYS)
51 /* Location of temporary page tables */
54 #define KERNEL_MAP_SIZE ( 4 * 1024 * 1024)
58 * eax = page frame address
59 * ebx = index into page table
60 * ecx = how many pages to map
61 * base = base address of page dir/table
62 * prot = protection bits
64 #define fillkpt(base, prot) \
67 orl $(PTE_V), %eax ; \
69 1: movl %eax,(%ebx) ; \
70 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
71 addl $4,%ebx ; /* next pte */ \
76 * eax = physical page address
77 * ebx = index into page table
78 * ecx = how many pages to map
79 * base = base address of page dir/table
80 * prot = protection bits
82 #define fillpse(base, prot) \
85 orl $(PTE_V|PTE_PS), %eax ; \
87 1: movl %eax,(%ebx) ; \
88 addl $(1 << PDESHIFT),%eax ; /* increment physical address 4Mb */ \
89 addl $4,%ebx ; /* next entry */ \
93 * fillkptphys(base, prot)
94 * eax = physical address
95 * ecx = how many pages to map
96 * base = base of page table
97 * prot = protection bits
99 #define fillkptphys(base, prot) \
101 shrl $(PAGE_SHIFT), %ebx ; \
105 * Hibernation code restarts here. Steal some pages from 0x10000
106 * to 0x90000 for pages tables and directories etc to temporarily
107 * map the hibernation code (put at 0x100000 (phys) by the booter
108 * and linked to 0xC0100000 by the linker) to 0xC0100000 so it can
109 * execute. It's self-contained and won't make any references outside
112 * On the way down it has to save IdlePTD (and if PAE also IdlePDPT)
113 * and after it runs it has to restore those and load IdlePTD (or
114 * IdlePDPT if PAE) into %cr3 to re-establish the original mappings
118 .globl EXT(hibernate_machine_entrypoint)
119 LEXT(hibernate_machine_entrypoint)
126 /* Map physical memory from zero to 0xC0000000 */
130 fillpse( $(HPTD), $(PTE_W) )
132 /* Map 0 again at 0xC0000000 */
135 movl $(KERNEL_MAP_SIZE >> PDESHIFT), %ecx
136 fillpse( $(HPTD), $(PTE_W) )
145 movl %eax,%cr4 /* enable page size extensions */
147 orl $(CR0_PG|CR0_WP|CR0_PE), %eax
148 movl %eax, %cr0 /* ready paging */
152 lgdt PA(EXT(gdtptr)) /* load GDT */
153 lidt PA(EXT(idtptr)) /* load IDT */
157 LJMP (KERNEL_CS,EXT(hstart)) /* paging on and go to correct vaddr */
159 /* Hib restart code now running with correct addresses */
163 mov $(KERNEL_DS),%ax /* set kernel data segment */
168 mov $0,%ax /* fs must be zeroed; */
169 mov %ax,%fs /* some bootstrappers don`t do this */
172 lea EXT(gIOHibernateRestoreStackEnd),%esp /* switch to the bootup stack */
176 xorl %eax, %eax /* Video memory - N/A */
178 mov %edi, %eax /* Pointer to hibernate header */
180 call EXT(hibernate_kernel_entrypoint)
188 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags);
192 .globl EXT(hibernate_restore_phys_page)
194 /* XXX doesn't handle 64-bit addresses yet */
195 /* XXX can only deal with exactly one page */
196 LEXT(hibernate_restore_phys_page)
200 movl 8+ 4(%esp),%esi /* source virtual address */
202 jz 2f /* If source == 0, nothing to do */
205 movl 8+ 12(%esp),%edi /* destination physical address */
206 cmpl $(LINEAR_KERNELBASE), %edi
207 jl 1f /* no need to map, below 0xC0000000 */
209 movl %edi, %eax /* destination physical address */
210 /* Map physical address to virt. address 0xffc00000 (4GB - 4MB) */
211 andl $0xFFC00000, %eax
212 orl $(PTE_V | PTE_PS | PTE_W), %eax
213 movl %eax, (HPTD + (0x3FF * 4))
214 orl $0xFFC00000, %edi
218 movl 8+ 20(%esp),%edx /* number of bytes */