]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hibernate.c
a3bf15f1c29e99eab66ad8bfbf04a405df72b5a9
[apple/xnu.git] / osfmk / kern / hibernate.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <kern/kalloc.h>
25 #include <kern/machine.h>
26 #include <kern/misc_protos.h>
27 #include <kern/thread.h>
28 #include <kern/processor.h>
29 #include <mach/machine.h>
30 #include <mach/processor_info.h>
31 #include <mach/mach_types.h>
32 #include <default_pager/default_pager_internal.h>
33 #include <IOKit/IOPlatformExpert.h>
34 #define KERNEL
35
36 #include <IOKit/IOHibernatePrivate.h>
37 #include <vm/vm_page.h>
38 #include <vm/vm_pageout.h>
39
40 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
41
42 static vm_page_t hibernate_gobble_queue;
43
44 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
45
46 static void
47 hibernate_page_list_zero(hibernate_page_list_t *list)
48 {
49 uint32_t bank;
50 hibernate_bitmap_t * bitmap;
51
52 bitmap = &list->bank_bitmap[0];
53 for (bank = 0; bank < list->bank_count; bank++)
54 {
55 uint32_t bit, last_bit;
56 uint32_t *bitmap_word;
57
58 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
59
60 // Set out-of-bound bits at end of bitmap.
61 bitmap_word = &bitmap->bitmap[bitmap->bitmapwords - 1];
62 last_bit = ((bitmap->last_page - bitmap->first_page) & 31);
63 for (bit = 31; bit > last_bit; bit--) {
64 *bitmap_word |= (0x80000000 >> bit);
65 }
66
67 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
68 }
69 }
70
71
72 static boolean_t
73 consider_discard(vm_page_t m)
74 {
75 register vm_object_t object = 0;
76 int refmod_state;
77 boolean_t discard = FALSE;
78
79 do
80 {
81 if(m->private)
82 panic("consider_discard: private");
83
84 if (!vm_object_lock_try(m->object))
85 break;
86
87 object = m->object;
88
89 if (m->wire_count != 0)
90 break;
91 if (m->precious)
92 break;
93
94 if (m->busy || !object->alive)
95 /*
96 * Somebody is playing with this page.
97 */
98 break;
99
100 if (m->absent || m->unusual || m->error)
101 /*
102 * If it's unusual in anyway, ignore it
103 */
104 break;
105
106 if (m->cleaning)
107 break;
108
109 if (!m->dirty)
110 {
111 refmod_state = pmap_get_refmod(m->phys_page);
112
113 if (refmod_state & VM_MEM_REFERENCED)
114 m->reference = TRUE;
115 if (refmod_state & VM_MEM_MODIFIED)
116 m->dirty = TRUE;
117 }
118
119 /*
120 * If it's clean we can discard the page on wakeup.
121 */
122 discard = !m->dirty;
123 }
124 while (FALSE);
125
126 if (object)
127 vm_object_unlock(object);
128
129 return (discard);
130 }
131
132
133 static void
134 discard_page(vm_page_t m)
135 {
136 if (m->absent || m->unusual || m->error)
137 /*
138 * If it's unusual in anyway, ignore
139 */
140 return;
141
142 if (!m->no_isync)
143 {
144 int refmod_state = pmap_disconnect(m->phys_page);
145
146 if (refmod_state & VM_MEM_REFERENCED)
147 m->reference = TRUE;
148 if (refmod_state & VM_MEM_MODIFIED)
149 m->dirty = TRUE;
150 }
151
152 if (m->dirty)
153 panic("discard_page(%p) dirty", m);
154 if (m->laundry)
155 panic("discard_page(%p) laundry", m);
156 if (m->private)
157 panic("discard_page(%p) private", m);
158 if (m->fictitious)
159 panic("discard_page(%p) fictitious", m);
160
161 vm_page_free(m);
162 }
163
164 /*
165 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
166 pages known to VM to not need saving are subtracted.
167 Wired pages to be saved are present in page_list_wired, pageable in page_list.
168 */
169
170 void
171 hibernate_page_list_setall(hibernate_page_list_t * page_list,
172 hibernate_page_list_t * page_list_wired,
173 uint32_t * pagesOut)
174 {
175 uint64_t start, end, nsec;
176 vm_page_t m;
177 uint32_t pages = page_list->page_count;
178 uint32_t count_zf = 0, count_inactive = 0, count_active = 0;
179 uint32_t count_wire = pages;
180 uint32_t count_discard_active = 0, count_discard_inactive = 0;
181 uint32_t i;
182
183 HIBLOG("hibernate_page_list_setall start\n");
184
185 clock_get_uptime(&start);
186
187 hibernate_page_list_zero(page_list);
188 hibernate_page_list_zero(page_list_wired);
189
190 m = (vm_page_t) hibernate_gobble_queue;
191 while(m)
192 {
193 pages--;
194 count_wire--;
195 hibernate_page_bitset(page_list, TRUE, m->phys_page);
196 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
197 m = (vm_page_t) m->pageq.next;
198 }
199
200 m = (vm_page_t) vm_page_queue_free;
201 while(m)
202 {
203 pages--;
204 count_wire--;
205 hibernate_page_bitset(page_list, TRUE, m->phys_page);
206 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
207 m = (vm_page_t) m->pageq.next;
208 }
209
210 queue_iterate( &vm_page_queue_zf,
211 m,
212 vm_page_t,
213 pageq )
214 {
215 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
216 && consider_discard(m))
217 {
218 hibernate_page_bitset(page_list, TRUE, m->phys_page);
219 count_discard_inactive++;
220 }
221 else
222 count_zf++;
223 count_wire--;
224 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
225 }
226
227 queue_iterate( &vm_page_queue_inactive,
228 m,
229 vm_page_t,
230 pageq )
231 {
232 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
233 && consider_discard(m))
234 {
235 hibernate_page_bitset(page_list, TRUE, m->phys_page);
236 count_discard_inactive++;
237 }
238 else
239 count_inactive++;
240 count_wire--;
241 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
242 }
243
244 queue_iterate( &vm_page_queue_active,
245 m,
246 vm_page_t,
247 pageq )
248 {
249 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
250 && consider_discard(m))
251 {
252 hibernate_page_bitset(page_list, TRUE, m->phys_page);
253 count_discard_active++;
254 }
255 else
256 count_active++;
257 count_wire--;
258 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
259 }
260
261 // pull wired from hibernate_bitmap
262
263 uint32_t bank;
264 hibernate_bitmap_t * bitmap;
265 hibernate_bitmap_t * bitmap_wired;
266
267 bitmap = &page_list->bank_bitmap[0];
268 bitmap_wired = &page_list_wired->bank_bitmap[0];
269 for (bank = 0; bank < page_list->bank_count; bank++)
270 {
271 for (i = 0; i < bitmap->bitmapwords; i++)
272 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
273 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
274 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
275 }
276
277 // machine dependent adjustments
278 hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
279
280 clock_get_uptime(&end);
281 absolutetime_to_nanoseconds(end - start, &nsec);
282 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
283
284 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
285 pages, count_wire, count_active, count_inactive, count_zf,
286 count_discard_active, count_discard_inactive);
287
288 *pagesOut = pages;
289 }
290
291 void
292 hibernate_page_list_discard(hibernate_page_list_t * page_list)
293 {
294 uint64_t start, end, nsec;
295 vm_page_t m;
296 vm_page_t next;
297 uint32_t count_discard_active = 0, count_discard_inactive = 0;
298
299 clock_get_uptime(&start);
300
301 m = (vm_page_t) queue_first(&vm_page_queue_zf);
302 while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
303 {
304 next = (vm_page_t) m->pageq.next;
305 if (hibernate_page_bittst(page_list, m->phys_page))
306 {
307 discard_page(m);
308 count_discard_inactive++;
309 }
310 m = next;
311 }
312
313 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
314 while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
315 {
316 next = (vm_page_t) m->pageq.next;
317 if (hibernate_page_bittst(page_list, m->phys_page))
318 {
319 discard_page(m);
320 count_discard_inactive++;
321 }
322 m = next;
323 }
324
325 m = (vm_page_t) queue_first(&vm_page_queue_active);
326 while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
327 {
328 next = (vm_page_t) m->pageq.next;
329 if (hibernate_page_bittst(page_list, m->phys_page))
330 {
331 discard_page(m);
332 count_discard_active++;
333 }
334 m = next;
335 }
336
337 clock_get_uptime(&end);
338 absolutetime_to_nanoseconds(end - start, &nsec);
339 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
340 nsec / 1000000ULL,
341 count_discard_active, count_discard_inactive);
342 }
343
344 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
345
346 kern_return_t
347 hibernate_setup(IOHibernateImageHeader * header,
348 uint32_t free_page_ratio,
349 uint32_t free_page_time,
350 hibernate_page_list_t ** page_list_ret,
351 hibernate_page_list_t ** page_list_wired_ret,
352 boolean_t * encryptedswap)
353 {
354 hibernate_page_list_t * page_list = NULL;
355 hibernate_page_list_t * page_list_wired = NULL;
356 vm_page_t m;
357 uint32_t i, gobble_count;
358
359 *page_list_ret = NULL;
360 *page_list_wired_ret = NULL;
361
362
363 page_list = hibernate_page_list_allocate();
364 if (!page_list)
365 return (KERN_RESOURCE_SHORTAGE);
366 page_list_wired = hibernate_page_list_allocate();
367 if (!page_list_wired)
368 {
369 kfree(page_list, page_list->list_size);
370 return (KERN_RESOURCE_SHORTAGE);
371 }
372
373 *encryptedswap = dp_encryption;
374
375 // pages we could force out to reduce hibernate image size
376 gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100;
377
378 // no failures hereafter
379
380 hibernate_processor_setup(header);
381
382 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
383 header->processorFlags, gobble_count);
384
385 if (gobble_count)
386 {
387 uint64_t start, end, timeout, nsec;
388 clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
389 clock_get_uptime(&start);
390
391 for (i = 0; i < gobble_count; i++)
392 {
393 while (VM_PAGE_NULL == (m = vm_page_grab()))
394 {
395 clock_get_uptime(&end);
396 if (end >= timeout)
397 break;
398 VM_PAGE_WAIT();
399 }
400 if (!m)
401 break;
402 m->busy = FALSE;
403 vm_page_gobble(m);
404
405 m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
406 hibernate_gobble_queue = m;
407 }
408
409 clock_get_uptime(&end);
410 absolutetime_to_nanoseconds(end - start, &nsec);
411 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
412 }
413
414 *page_list_ret = page_list;
415 *page_list_wired_ret = page_list_wired;
416
417 return (KERN_SUCCESS);
418 }
419
420 kern_return_t
421 hibernate_teardown(hibernate_page_list_t * page_list,
422 hibernate_page_list_t * page_list_wired)
423 {
424 vm_page_t m, next;
425 uint32_t count = 0;
426
427 m = (vm_page_t) hibernate_gobble_queue;
428 while(m)
429 {
430 next = (vm_page_t) m->pageq.next;
431 vm_page_free(m);
432 count++;
433 m = next;
434 }
435 hibernate_gobble_queue = VM_PAGE_NULL;
436
437 if (count)
438 HIBLOG("Freed %d pages\n", count);
439
440 if (page_list)
441 kfree(page_list, page_list->list_size);
442 if (page_list_wired)
443 kfree(page_list_wired, page_list_wired->list_size);
444
445 return (KERN_SUCCESS);
446 }
447