]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/hibernate_i386.c
xnu-1504.9.17.tar.gz
[apple/xnu.git] / osfmk / i386 / hibernate_i386.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/machine.h>
30 #include <kern/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/processor.h>
33 #include <kern/kalloc.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <i386/pmap.h>
38 #include <kern/cpu_data.h>
39 #include <IOKit/IOPlatformExpert.h>
40
41 #include <pexpert/i386/efi.h>
42
43 #include <IOKit/IOHibernatePrivate.h>
44 #include <vm/vm_page.h>
45 #include <i386/i386_lowmem.h>
46
47 extern ppnum_t max_ppnum;
48
49 #define MAX_BANKS 32
50
51 int hibernate_page_list_allocate_avoided;
52
53 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
54
55 hibernate_page_list_t *
56 hibernate_page_list_allocate(void)
57 {
58 ppnum_t base, num;
59 vm_size_t size;
60 uint32_t bank, num_banks;
61 uint32_t pages, page_count;
62 hibernate_page_list_t * list;
63 hibernate_bitmap_t * bitmap;
64
65 EfiMemoryRange * mptr;
66 uint32_t mcount, msize, i;
67 hibernate_bitmap_t dram_ranges[MAX_BANKS];
68 boot_args * args = (boot_args *) PE_state.bootArgs;
69
70 mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
71 if (args->MemoryMapDescriptorSize == 0)
72 panic("Invalid memory map descriptor size");
73 msize = args->MemoryMapDescriptorSize;
74 mcount = args->MemoryMapSize / msize;
75
76 hibernate_page_list_allocate_avoided = 0;
77
78 num_banks = 0;
79 for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize))
80 {
81 base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT);
82 num = (ppnum_t) mptr->NumberOfPages;
83
84 if (base > max_ppnum)
85 continue;
86 if ((base + num - 1) > max_ppnum)
87 num = max_ppnum - base + 1;
88 if (!num)
89 continue;
90
91 switch (mptr->Type)
92 {
93 // any kind of dram
94 case kEfiLoaderCode:
95 case kEfiLoaderData:
96 case kEfiBootServicesCode:
97 case kEfiBootServicesData:
98 case kEfiConventionalMemory:
99 case kEfiACPIMemoryNVS:
100 case kEfiPalCode:
101
102 for (bank = 0; bank < num_banks; bank++)
103 {
104 if (dram_ranges[bank].first_page <= base)
105 continue;
106 if ((base + num) == dram_ranges[bank].first_page)
107 {
108 dram_ranges[bank].first_page = base;
109 num = 0;
110 }
111 break;
112 }
113 if (!num) break;
114
115 if (bank && (base == (1 + dram_ranges[bank - 1].last_page)))
116 bank--;
117 else
118 {
119 num_banks++;
120 if (num_banks >= MAX_BANKS) break;
121 bcopy(&dram_ranges[bank],
122 &dram_ranges[bank + 1],
123 (num_banks - bank - 1) * sizeof(hibernate_bitmap_t));
124 dram_ranges[bank].first_page = base;
125 }
126 dram_ranges[bank].last_page = base + num - 1;
127 break;
128
129 // runtime services will be restarted, so no save
130 case kEfiRuntimeServicesCode:
131 case kEfiRuntimeServicesData:
132 // contents are volatile once the platform expert starts
133 case kEfiACPIReclaimMemory:
134 hibernate_page_list_allocate_avoided += num;
135 break;
136
137 // non dram
138 case kEfiReservedMemoryType:
139 case kEfiUnusableMemory:
140 case kEfiMemoryMappedIO:
141 case kEfiMemoryMappedIOPortSpace:
142 default:
143 break;
144 }
145 }
146
147 if (num_banks >= MAX_BANKS)
148 return (NULL);
149
150 // size the hibernation bitmap
151
152 size = sizeof(hibernate_page_list_t);
153 page_count = 0;
154 for (bank = 0; bank < num_banks; bank++) {
155 pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page;
156 page_count += pages;
157 size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t);
158 }
159
160 list = (hibernate_page_list_t *)kalloc(size);
161 if (!list)
162 return (list);
163
164 list->list_size = (uint32_t)size;
165 list->page_count = page_count;
166 list->bank_count = num_banks;
167
168 // convert to hibernation bitmap.
169
170 bitmap = &list->bank_bitmap[0];
171 for (bank = 0; bank < num_banks; bank++)
172 {
173 bitmap->first_page = dram_ranges[bank].first_page;
174 bitmap->last_page = dram_ranges[bank].last_page;
175 bitmap->bitmapwords = (bitmap->last_page + 1
176 - bitmap->first_page + 31) >> 5;
177 kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", bank,
178 bitmap->first_page,
179 bitmap->last_page);
180 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
181 }
182
183 return (list);
184 }
185
186 // mark pages not to be saved, but available for scratch usage during restore
187
188 void
189 hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list,
190 __unused hibernate_page_list_t * page_list_wired,
191 __unused uint32_t * pagesOut)
192 {
193 }
194
195 // mark pages not to be saved and not for scratch usage during restore
196 void
197 hibernate_page_list_set_volatile( hibernate_page_list_t * page_list,
198 hibernate_page_list_t * page_list_wired,
199 uint32_t * pagesOut)
200 {
201 boot_args * args = (boot_args *) PE_state.bootArgs;
202
203 #if !defined(x86_64)
204 hibernate_set_page_state(page_list, page_list_wired,
205 I386_HIB_PAGETABLE, I386_HIB_PAGETABLE_COUNT,
206 kIOHibernatePageStateFree);
207 *pagesOut -= I386_HIB_PAGETABLE_COUNT;
208 #endif
209
210 if (args->efiRuntimeServicesPageStart)
211 {
212 hibernate_set_page_state(page_list, page_list_wired,
213 args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount,
214 kIOHibernatePageStateFree);
215 *pagesOut -= args->efiRuntimeServicesPageCount;
216 }
217 }
218
219 kern_return_t
220 hibernate_processor_setup(IOHibernateImageHeader * header)
221 {
222 boot_args * args = (boot_args *) PE_state.bootArgs;
223
224 cpu_datap(0)->cpu_hibernate = 1;
225 header->processorFlags = 0;
226
227 header->runtimePages = args->efiRuntimeServicesPageStart;
228 header->runtimePageCount = args->efiRuntimeServicesPageCount;
229 header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart;
230 if (args->Version == kBootArgsVersion1 && args->Revision >= kBootArgsRevision1_6) {
231 header->performanceDataStart = args->performanceDataStart;
232 header->performanceDataSize = args->performanceDataSize;
233 } else {
234 header->performanceDataStart = 0;
235 header->performanceDataSize = 0;
236 }
237
238 return (KERN_SUCCESS);
239 }
240
241 void
242 hibernate_vm_lock(void)
243 {
244 if (current_cpu_datap()->cpu_hibernate)
245 {
246 vm_page_lock_queues();
247 lck_mtx_lock(&vm_page_queue_free_lock);
248
249 if (vm_page_local_q) {
250 uint32_t i;
251
252 for (i = 0; i < vm_page_local_q_count; i++) {
253 struct vpl *lq;
254
255 lq = &vm_page_local_q[i].vpl_un.vpl;
256
257 VPL_LOCK(&lq->vpl_lock);
258 }
259 }
260 }
261 }
262
263 void
264 hibernate_vm_unlock(void)
265 {
266 if (current_cpu_datap()->cpu_hibernate)
267 {
268 if (vm_page_local_q) {
269 uint32_t i;
270
271 for (i = 0; i < vm_page_local_q_count; i++) {
272 struct vpl *lq;
273
274 lq = &vm_page_local_q[i].vpl_un.vpl;
275
276 VPL_UNLOCK(&lq->vpl_lock);
277 }
278 }
279 lck_mtx_unlock(&vm_page_queue_free_lock);
280 vm_page_unlock_queues();
281 }
282 }