]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hibernate.c
c7c67463b21cf82f953373dec7f0cfb5a8c8bc15
[apple/xnu.git] / osfmk / kern / hibernate.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <kern/kalloc.h>
32 #include <kern/machine.h>
33 #include <kern/misc_protos.h>
34 #include <kern/thread.h>
35 #include <kern/processor.h>
36 #include <mach/machine.h>
37 #include <mach/processor_info.h>
38 #include <mach/mach_types.h>
39 #include <default_pager/default_pager_internal.h>
40 #include <IOKit/IOPlatformExpert.h>
41 #define KERNEL
42
43 #include <IOKit/IOHibernatePrivate.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49 static vm_page_t hibernate_gobble_queue;
50
51 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52
53 static void
54 hibernate_page_list_zero(hibernate_page_list_t *list)
55 {
56 uint32_t bank;
57 hibernate_bitmap_t * bitmap;
58
59 bitmap = &list->bank_bitmap[0];
60 for (bank = 0; bank < list->bank_count; bank++)
61 {
62 uint32_t bit, last_bit;
63 uint32_t *bitmap_word;
64
65 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
66
67 // Set out-of-bound bits at end of bitmap.
68 bitmap_word = &bitmap->bitmap[bitmap->bitmapwords - 1];
69 last_bit = ((bitmap->last_page - bitmap->first_page) & 31);
70 for (bit = 31; bit > last_bit; bit--) {
71 *bitmap_word |= (0x80000000 >> bit);
72 }
73
74 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
75 }
76 }
77
78
79 static boolean_t
80 consider_discard(vm_page_t m)
81 {
82 register vm_object_t object = 0;
83 int refmod_state;
84 boolean_t discard = FALSE;
85
86 do
87 {
88 if(m->private)
89 panic("consider_discard: private");
90
91 if (!vm_object_lock_try(m->object))
92 break;
93
94 object = m->object;
95
96 if (m->wire_count != 0)
97 break;
98 if (m->precious)
99 break;
100
101 if (m->busy || !object->alive)
102 /*
103 * Somebody is playing with this page.
104 */
105 break;
106
107 if (m->absent || m->unusual || m->error)
108 /*
109 * If it's unusual in anyway, ignore it
110 */
111 break;
112
113 if (m->cleaning)
114 break;
115
116 if (!m->dirty)
117 {
118 refmod_state = pmap_get_refmod(m->phys_page);
119
120 if (refmod_state & VM_MEM_REFERENCED)
121 m->reference = TRUE;
122 if (refmod_state & VM_MEM_MODIFIED)
123 m->dirty = TRUE;
124 }
125
126 /*
127 * If it's clean we can discard the page on wakeup.
128 */
129 discard = !m->dirty;
130 }
131 while (FALSE);
132
133 if (object)
134 vm_object_unlock(object);
135
136 return (discard);
137 }
138
139
140 static void
141 discard_page(vm_page_t m)
142 {
143 if (m->absent || m->unusual || m->error)
144 /*
145 * If it's unusual in anyway, ignore
146 */
147 return;
148
149 if (!m->no_isync)
150 {
151 int refmod_state = pmap_disconnect(m->phys_page);
152
153 if (refmod_state & VM_MEM_REFERENCED)
154 m->reference = TRUE;
155 if (refmod_state & VM_MEM_MODIFIED)
156 m->dirty = TRUE;
157 }
158
159 if (m->dirty)
160 panic("discard_page(%p) dirty", m);
161 if (m->laundry)
162 panic("discard_page(%p) laundry", m);
163 if (m->private)
164 panic("discard_page(%p) private", m);
165 if (m->fictitious)
166 panic("discard_page(%p) fictitious", m);
167
168 vm_page_free(m);
169 }
170
171 /*
172 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
173 pages known to VM to not need saving are subtracted.
174 Wired pages to be saved are present in page_list_wired, pageable in page_list.
175 */
176
177 void
178 hibernate_page_list_setall(hibernate_page_list_t * page_list,
179 hibernate_page_list_t * page_list_wired,
180 uint32_t * pagesOut)
181 {
182 uint64_t start, end, nsec;
183 vm_page_t m;
184 uint32_t pages = page_list->page_count;
185 uint32_t count_zf = 0, count_inactive = 0, count_active = 0;
186 uint32_t count_wire = pages;
187 uint32_t count_discard_active = 0, count_discard_inactive = 0;
188 uint32_t i;
189
190 HIBLOG("hibernate_page_list_setall start\n");
191
192 clock_get_uptime(&start);
193
194 hibernate_page_list_zero(page_list);
195 hibernate_page_list_zero(page_list_wired);
196
197 m = (vm_page_t) hibernate_gobble_queue;
198 while(m)
199 {
200 pages--;
201 count_wire--;
202 hibernate_page_bitset(page_list, TRUE, m->phys_page);
203 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
204 m = (vm_page_t) m->pageq.next;
205 }
206
207 m = (vm_page_t) vm_page_queue_free;
208 while(m)
209 {
210 pages--;
211 count_wire--;
212 hibernate_page_bitset(page_list, TRUE, m->phys_page);
213 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
214 m = (vm_page_t) m->pageq.next;
215 }
216
217 queue_iterate( &vm_page_queue_zf,
218 m,
219 vm_page_t,
220 pageq )
221 {
222 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
223 && consider_discard(m))
224 {
225 hibernate_page_bitset(page_list, TRUE, m->phys_page);
226 count_discard_inactive++;
227 }
228 else
229 count_zf++;
230 count_wire--;
231 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
232 }
233
234 queue_iterate( &vm_page_queue_inactive,
235 m,
236 vm_page_t,
237 pageq )
238 {
239 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
240 && consider_discard(m))
241 {
242 hibernate_page_bitset(page_list, TRUE, m->phys_page);
243 count_discard_inactive++;
244 }
245 else
246 count_inactive++;
247 count_wire--;
248 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
249 }
250
251 queue_iterate( &vm_page_queue_active,
252 m,
253 vm_page_t,
254 pageq )
255 {
256 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
257 && consider_discard(m))
258 {
259 hibernate_page_bitset(page_list, TRUE, m->phys_page);
260 count_discard_active++;
261 }
262 else
263 count_active++;
264 count_wire--;
265 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
266 }
267
268 // pull wired from hibernate_bitmap
269
270 uint32_t bank;
271 hibernate_bitmap_t * bitmap;
272 hibernate_bitmap_t * bitmap_wired;
273
274 bitmap = &page_list->bank_bitmap[0];
275 bitmap_wired = &page_list_wired->bank_bitmap[0];
276 for (bank = 0; bank < page_list->bank_count; bank++)
277 {
278 for (i = 0; i < bitmap->bitmapwords; i++)
279 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
280 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
281 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
282 }
283
284 // machine dependent adjustments
285 hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
286
287 clock_get_uptime(&end);
288 absolutetime_to_nanoseconds(end - start, &nsec);
289 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
290
291 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
292 pages, count_wire, count_active, count_inactive, count_zf,
293 count_discard_active, count_discard_inactive);
294
295 *pagesOut = pages;
296 }
297
298 void
299 hibernate_page_list_discard(hibernate_page_list_t * page_list)
300 {
301 uint64_t start, end, nsec;
302 vm_page_t m;
303 vm_page_t next;
304 uint32_t count_discard_active = 0, count_discard_inactive = 0;
305
306 clock_get_uptime(&start);
307
308 m = (vm_page_t) queue_first(&vm_page_queue_zf);
309 while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
310 {
311 next = (vm_page_t) m->pageq.next;
312 if (hibernate_page_bittst(page_list, m->phys_page))
313 {
314 discard_page(m);
315 count_discard_inactive++;
316 }
317 m = next;
318 }
319
320 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
321 while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
322 {
323 next = (vm_page_t) m->pageq.next;
324 if (hibernate_page_bittst(page_list, m->phys_page))
325 {
326 discard_page(m);
327 count_discard_inactive++;
328 }
329 m = next;
330 }
331
332 m = (vm_page_t) queue_first(&vm_page_queue_active);
333 while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
334 {
335 next = (vm_page_t) m->pageq.next;
336 if (hibernate_page_bittst(page_list, m->phys_page))
337 {
338 discard_page(m);
339 count_discard_active++;
340 }
341 m = next;
342 }
343
344 clock_get_uptime(&end);
345 absolutetime_to_nanoseconds(end - start, &nsec);
346 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
347 nsec / 1000000ULL,
348 count_discard_active, count_discard_inactive);
349 }
350
351 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
352
353 kern_return_t
354 hibernate_setup(IOHibernateImageHeader * header,
355 uint32_t free_page_ratio,
356 uint32_t free_page_time,
357 hibernate_page_list_t ** page_list_ret,
358 hibernate_page_list_t ** page_list_wired_ret,
359 boolean_t * encryptedswap)
360 {
361 hibernate_page_list_t * page_list = NULL;
362 hibernate_page_list_t * page_list_wired = NULL;
363 vm_page_t m;
364 uint32_t i, gobble_count;
365
366 *page_list_ret = NULL;
367 *page_list_wired_ret = NULL;
368
369
370 page_list = hibernate_page_list_allocate();
371 if (!page_list)
372 return (KERN_RESOURCE_SHORTAGE);
373 page_list_wired = hibernate_page_list_allocate();
374 if (!page_list_wired)
375 {
376 kfree(page_list, page_list->list_size);
377 return (KERN_RESOURCE_SHORTAGE);
378 }
379
380 *encryptedswap = dp_encryption;
381
382 // pages we could force out to reduce hibernate image size
383 gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100;
384
385 // no failures hereafter
386
387 hibernate_processor_setup(header);
388
389 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
390 header->processorFlags, gobble_count);
391
392 if (gobble_count)
393 {
394 uint64_t start, end, timeout, nsec;
395 clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
396 clock_get_uptime(&start);
397
398 for (i = 0; i < gobble_count; i++)
399 {
400 while (VM_PAGE_NULL == (m = vm_page_grab()))
401 {
402 clock_get_uptime(&end);
403 if (end >= timeout)
404 break;
405 VM_PAGE_WAIT();
406 }
407 if (!m)
408 break;
409 m->busy = FALSE;
410 vm_page_gobble(m);
411
412 m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
413 hibernate_gobble_queue = m;
414 }
415
416 clock_get_uptime(&end);
417 absolutetime_to_nanoseconds(end - start, &nsec);
418 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
419 }
420
421 *page_list_ret = page_list;
422 *page_list_wired_ret = page_list_wired;
423
424 return (KERN_SUCCESS);
425 }
426
427 kern_return_t
428 hibernate_teardown(hibernate_page_list_t * page_list,
429 hibernate_page_list_t * page_list_wired)
430 {
431 vm_page_t m, next;
432 uint32_t count = 0;
433
434 m = (vm_page_t) hibernate_gobble_queue;
435 while(m)
436 {
437 next = (vm_page_t) m->pageq.next;
438 vm_page_free(m);
439 count++;
440 m = next;
441 }
442 hibernate_gobble_queue = VM_PAGE_NULL;
443
444 if (count)
445 HIBLOG("Freed %d pages\n", count);
446
447 if (page_list)
448 kfree(page_list, page_list->list_size);
449 if (page_list_wired)
450 kfree(page_list_wired, page_list_wired->list_size);
451
452 return (KERN_SUCCESS);
453 }
454