]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_i386.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / i386 / hibernate_i386.c
1 /*
2 * Copyright (c) 2004-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/machine.h>
30 #include <kern/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/processor.h>
33 #include <kern/kalloc.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <i386/pmap.h>
38 #include <kern/cpu_data.h>
39 #include <IOKit/IOPlatformExpert.h>
40
41 #include <pexpert/i386/efi.h>
42
43 #include <IOKit/IOHibernatePrivate.h>
44 #include <vm/vm_page.h>
45 #include <i386/i386_lowmem.h>
46 #include <san/kasan.h>
47
48 extern ppnum_t max_ppnum;
49
50 #define MAX_BANKS 32
51
52 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
53
54 hibernate_page_list_t *
55 hibernate_page_list_allocate(boolean_t log)
56 {
57 ppnum_t base, num;
58 vm_size_t size;
59 uint32_t bank, num_banks;
60 uint32_t pages, page_count;
61 hibernate_page_list_t * list;
62 hibernate_bitmap_t * bitmap;
63
64 EfiMemoryRange * mptr;
65 uint32_t mcount, msize, i;
66 hibernate_bitmap_t dram_ranges[MAX_BANKS];
67 boot_args * args = (boot_args *) PE_state.bootArgs;
68 uint32_t non_os_pagecount;
69 ppnum_t pnmax = max_ppnum;
70
71 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
72 if (args->MemoryMapDescriptorSize == 0) {
73 panic("Invalid memory map descriptor size");
74 }
75 msize = args->MemoryMapDescriptorSize;
76 mcount = args->MemoryMapSize / msize;
77
78 #if KASAN
79 /* adjust max page number to include stolen memory */
80 if (atop(shadow_ptop) > pnmax) {
81 pnmax = (ppnum_t)atop(shadow_ptop);
82 }
83 #endif
84
85 num_banks = 0;
86 non_os_pagecount = 0;
87 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
88 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
89 num = (ppnum_t) mptr->NumberOfPages;
90
91 #if KASAN
92 if (i == shadow_stolen_idx) {
93 /*
94 * Add all stolen pages to the bitmap. Later we will prune the unused
95 * pages.
96 */
97 num += shadow_pages_total;
98 }
99 #endif
100
101 if (base > pnmax) {
102 continue;
103 }
104 if ((base + num - 1) > pnmax) {
105 num = pnmax - base + 1;
106 }
107 if (!num) {
108 continue;
109 }
110
111 switch (mptr->Type) {
112 // any kind of dram
113 case kEfiACPIMemoryNVS:
114 case kEfiPalCode:
115 non_os_pagecount += num;
116
117 // OS used dram
118 case kEfiLoaderCode:
119 case kEfiLoaderData:
120 case kEfiBootServicesCode:
121 case kEfiBootServicesData:
122 case kEfiConventionalMemory:
123
124 for (bank = 0; bank < num_banks; bank++) {
125 if (dram_ranges[bank].first_page <= base) {
126 continue;
127 }
128 if ((base + num) == dram_ranges[bank].first_page) {
129 dram_ranges[bank].first_page = base;
130 num = 0;
131 }
132 break;
133 }
134 if (!num) {
135 break;
136 }
137
138 if (bank && (base == (1 + dram_ranges[bank - 1].last_page))) {
139 bank--;
140 } else {
141 num_banks++;
142 if (num_banks >= MAX_BANKS) {
143 break;
144 }
145 bcopy(&dram_ranges[bank],
146 &dram_ranges[bank + 1],
147 (num_banks - bank - 1) * sizeof(hibernate_bitmap_t));
148 dram_ranges[bank].first_page = base;
149 }
150 dram_ranges[bank].last_page = base + num - 1;
151 break;
152
153 // runtime services will be restarted, so no save
154 case kEfiRuntimeServicesCode:
155 case kEfiRuntimeServicesData:
156 // contents are volatile once the platform expert starts
157 case kEfiACPIReclaimMemory:
158 // non dram
159 case kEfiReservedMemoryType:
160 case kEfiUnusableMemory:
161 case kEfiMemoryMappedIO:
162 case kEfiMemoryMappedIOPortSpace:
163 default:
164 break;
165 }
166 }
167
168 if (num_banks >= MAX_BANKS) {
169 return NULL;
170 }
171
172 // size the hibernation bitmap
173
174 size = sizeof(hibernate_page_list_t);
175 page_count = 0;
176 for (bank = 0; bank < num_banks; bank++) {
177 pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
178 page_count += pages;
179 size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
180 }
181
182 list = (hibernate_page_list_t *)kalloc(size);
183 if (!list) {
184 return list;
185 }
186
187 list->list_size = (uint32_t)size;
188 list->page_count = page_count;
189 list->bank_count = num_banks;
190
191 // convert to hibernation bitmap.
192
193 bitmap = &list->bank_bitmap[0];
194 for (bank = 0; bank < num_banks; bank++) {
195 bitmap->first_page = dram_ranges[bank].first_page;
196 bitmap->last_page = dram_ranges[bank].last_page;
197 bitmap->bitmapwords = (bitmap->last_page + 1
198 - bitmap->first_page + 31) >> 5;
199 if (log) {
200 kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n",
201 bank, bitmap->first_page, bitmap->last_page);
202 }
203 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
204 }
205 if (log) {
206 printf("efi pagecount %d\n", non_os_pagecount);
207 }
208
209 return list;
210 }
211
212 // mark pages not to be saved, but available for scratch usage during restore
213
214 void
215 hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list,
216 __unused hibernate_page_list_t * page_list_wired,
217 __unused boolean_t preflight,
218 __unused uint32_t * pagesOut)
219 {
220 }
221
222 // mark pages not to be saved and not for scratch usage during restore
223 void
224 hibernate_page_list_set_volatile( hibernate_page_list_t * page_list,
225 hibernate_page_list_t * page_list_wired,
226 uint32_t * pagesOut)
227 {
228 boot_args * args = (boot_args *) PE_state.bootArgs;
229
230 if (args->efiRuntimeServicesPageStart) {
231 hibernate_set_page_state(page_list, page_list_wired,
232 args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount,
233 kIOHibernatePageStateFree);
234 *pagesOut -= args->efiRuntimeServicesPageCount;
235 }
236 }
237
238 kern_return_t
239 hibernate_processor_setup(IOHibernateImageHeader * header)
240 {
241 boot_args * args = (boot_args *) PE_state.bootArgs;
242
243 cpu_datap(0)->cpu_hibernate = 1;
244 header->processorFlags = 0;
245
246 header->runtimePages = args->efiRuntimeServicesPageStart;
247 header->runtimePageCount = args->efiRuntimeServicesPageCount;
248 header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart;
249 header->performanceDataStart = args->performanceDataStart;
250 header->performanceDataSize = args->performanceDataSize;
251
252 return KERN_SUCCESS;
253 }
254
255 static boolean_t hibernate_vm_locks_safe;
256
257 void
258 hibernate_vm_lock(void)
259 {
260 if (current_cpu_datap()->cpu_hibernate) {
261 hibernate_vm_lock_queues();
262 hibernate_vm_locks_safe = TRUE;
263 }
264 }
265
266 void
267 hibernate_vm_unlock(void)
268 {
269 assert(FALSE == ml_get_interrupts_enabled());
270 if (current_cpu_datap()->cpu_hibernate) {
271 hibernate_vm_unlock_queues();
272 }
273 ml_set_is_quiescing(TRUE);
274 }
275
276 // ACPI calls hibernate_vm_lock(), interrupt disable, hibernate_vm_unlock() on sleep,
277 // hibernate_vm_lock_end() and interrupt enable on wake.
278 // VM locks are safely single threaded between hibernate_vm_lock() and hibernate_vm_lock_end().
279
280 void
281 hibernate_vm_lock_end(void)
282 {
283 assert(FALSE == ml_get_interrupts_enabled());
284 hibernate_vm_locks_safe = FALSE;
285 ml_set_is_quiescing(FALSE);
286 }
287
288 boolean_t
289 hibernate_vm_locks_are_safe(void)
290 {
291 assert(FALSE == ml_get_interrupts_enabled());
292 return hibernate_vm_locks_safe;
293 }