]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_i386.c
f164b8ec9cb1673aa29c27da771cab7a991d6c17
[apple/xnu.git] / osfmk / i386 / hibernate_i386.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/machine.h>
30 #include <kern/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/processor.h>
33 #include <kern/kalloc.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <i386/pmap.h>
38 #include <kern/cpu_data.h>
39 #include <IOKit/IOPlatformExpert.h>
40
41 #include <pexpert/i386/efi.h>
42
43 #include <IOKit/IOHibernatePrivate.h>
44 #include <vm/vm_page.h>
45 #include <i386/i386_lowmem.h>
46
47 #define MAX_BANKS 32
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 hibernate_page_list_t *
52 hibernate_page_list_allocate(void)
53 {
54 ppnum_t base, num;
55 vm_size_t size;
56 uint32_t bank, num_banks;
57 uint32_t pages, page_count;
58 hibernate_page_list_t * list;
59 hibernate_bitmap_t * bitmap;
60
61 EfiMemoryRange * mptr;
62 uint32_t mcount, msize, i;
63 hibernate_bitmap_t dram_ranges[MAX_BANKS];
64 boot_args * args = (boot_args *) PE_state.bootArgs;
65
66 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
67 if (args->MemoryMapDescriptorSize == 0)
68 panic("Invalid memory map descriptor size");
69 msize = args->MemoryMapDescriptorSize;
70 mcount = args->MemoryMapSize / msize;
71
72 num_banks = 0;
73 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize))
74 {
75 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
76 num = (ppnum_t) mptr->NumberOfPages;
77 if (!num)
78 continue;
79
80 switch (mptr->Type)
81 {
82 // any kind of dram
83 case kEfiLoaderCode:
84 case kEfiLoaderData:
85 case kEfiBootServicesCode:
86 case kEfiBootServicesData:
87 case kEfiConventionalMemory:
88 case kEfiACPIMemoryNVS:
89 case kEfiPalCode:
90
91 for (bank = 0; bank < num_banks; bank++)
92 {
93 if (dram_ranges[bank].first_page <= base)
94 continue;
95 if ((base + num) == dram_ranges[bank].first_page)
96 {
97 dram_ranges[bank].first_page = base;
98 num = 0;
99 }
100 break;
101 }
102 if (!num) break;
103
104 if (bank && (base == (1 + dram_ranges[bank - 1].last_page)))
105 bank--;
106 else
107 {
108 num_banks++;
109 if (num_banks >= MAX_BANKS) break;
110 bcopy(&dram_ranges[bank],
111 &dram_ranges[bank + 1],
112 (num_banks - bank - 1) * sizeof(hibernate_bitmap_t));
113 dram_ranges[bank].first_page = base;
114 }
115 dram_ranges[bank].last_page = base + num - 1;
116 break;
117
118 // runtime services will be restarted, so no save
119 case kEfiRuntimeServicesCode:
120 case kEfiRuntimeServicesData:
121 // contents are volatile once the platform expert starts
122 case kEfiACPIReclaimMemory:
123 // non dram
124 case kEfiReservedMemoryType:
125 case kEfiUnusableMemory:
126 case kEfiMemoryMappedIO:
127 case kEfiMemoryMappedIOPortSpace:
128 default:
129 break;
130 }
131 }
132
133 if (num_banks >= MAX_BANKS)
134 return (NULL);
135
136 // size the hibernation bitmap
137
138 size = sizeof(hibernate_page_list_t);
139 page_count = 0;
140 for (bank = 0; bank < num_banks; bank++) {
141 pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
142 page_count += pages;
143 size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
144 }
145
146 list = (hibernate_page_list_t *)kalloc(size);
147 if (!list)
148 return (list);
149
150 list->list_size = (uint32_t)size;
151 list->page_count = page_count;
152 list->bank_count = num_banks;
153
154 // convert to hibernation bitmap.
155
156 bitmap = &list->bank_bitmap[0];
157 for (bank = 0; bank < num_banks; bank++)
158 {
159 bitmap->first_page = dram_ranges[bank].first_page;
160 bitmap->last_page = dram_ranges[bank].last_page;
161 bitmap->bitmapwords = (bitmap->last_page + 1
162 - bitmap->first_page + 31) >> 5;
163 kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", bank,
164 bitmap->first_page,
165 bitmap->last_page);
166 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
167 }
168
169 return (list);
170 }
171
172 // mark pages not to be saved, but available for scratch usage during restore
173
174 void
175 hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list,
176 __unused hibernate_page_list_t * page_list_wired,
177 __unused uint32_t * pagesOut)
178 {
179 }
180
181 // mark pages not to be saved and not for scratch usage during restore
182 void
183 hibernate_page_list_set_volatile( hibernate_page_list_t * page_list,
184 hibernate_page_list_t * page_list_wired,
185 uint32_t * pagesOut)
186 {
187 boot_args * args = (boot_args *) PE_state.bootArgs;
188
189 #if !defined(x86_64)
190 hibernate_set_page_state(page_list, page_list_wired,
191 I386_HIB_PAGETABLE, I386_HIB_PAGETABLE_COUNT,
192 kIOHibernatePageStateFree);
193 *pagesOut -= I386_HIB_PAGETABLE_COUNT;
194 #endif
195
196 if (args->efiRuntimeServicesPageStart)
197 {
198 hibernate_set_page_state(page_list, page_list_wired,
199 args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount,
200 kIOHibernatePageStateFree);
201 *pagesOut -= args->efiRuntimeServicesPageCount;
202 }
203 }
204
205 kern_return_t
206 hibernate_processor_setup(IOHibernateImageHeader * header)
207 {
208 boot_args * args = (boot_args *) PE_state.bootArgs;
209
210 cpu_datap(0)->cpu_hibernate = 1;
211 header->processorFlags = 0;
212
213 header->runtimePages = args->efiRuntimeServicesPageStart;
214 header->runtimePageCount = args->efiRuntimeServicesPageCount;
215 header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart;
216 if (args->Version == kBootArgsVersion1 && args->Revision >= kBootArgsRevision1_6) {
217 header->performanceDataStart = args->performanceDataStart;
218 header->performanceDataSize = args->performanceDataSize;
219 } else {
220 header->performanceDataStart = 0;
221 header->performanceDataSize = 0;
222 }
223
224 return (KERN_SUCCESS);
225 }
226
227 void
228 hibernate_vm_lock(void)
229 {
230 if (current_cpu_datap()->cpu_hibernate)
231 {
232 vm_page_lock_queues();
233 lck_mtx_lock(&vm_page_queue_free_lock);
234
235 if (vm_page_local_q) {
236 uint32_t i;
237
238 for (i = 0; i < vm_page_local_q_count; i++) {
239 struct vpl *lq;
240
241 lq = &vm_page_local_q[i].vpl_un.vpl;
242
243 VPL_LOCK(&lq->vpl_lock);
244 }
245 }
246 }
247 }
248
249 void
250 hibernate_vm_unlock(void)
251 {
252 if (current_cpu_datap()->cpu_hibernate)
253 {
254 if (vm_page_local_q) {
255 uint32_t i;
256
257 for (i = 0; i < vm_page_local_q_count; i++) {
258 struct vpl *lq;
259
260 lq = &vm_page_local_q[i].vpl_un.vpl;
261
262 VPL_UNLOCK(&lq->vpl_lock);
263 }
264 }
265 lck_mtx_unlock(&vm_page_queue_free_lock);
266 vm_page_unlock_queues();
267 }
268 }