]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hibernate.c
be4cc1564c2b9141eeeeb87e633acd305eb61128
[apple/xnu.git] / osfmk / kern / hibernate.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <kern/kalloc.h>
32 #include <kern/machine.h>
33 #include <kern/misc_protos.h>
34 #include <kern/thread.h>
35 #include <kern/processor.h>
36 #include <mach/machine.h>
37 #include <mach/processor_info.h>
38 #include <mach/mach_types.h>
39 #include <default_pager/default_pager_internal.h>
40 #include <IOKit/IOPlatformExpert.h>
41 #define KERNEL
42
43 #include <IOKit/IOHibernatePrivate.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
46
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49 static vm_page_t hibernate_gobble_queue;
50
51 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52
53 static void
54 hibernate_page_list_zero(hibernate_page_list_t *list)
55 {
56 uint32_t bank;
57 hibernate_bitmap_t * bitmap;
58
59 bitmap = &list->bank_bitmap[0];
60 for (bank = 0; bank < list->bank_count; bank++)
61 {
62 uint32_t last_bit;
63
64 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
65 // set out-of-bound bits at end of bitmap.
66 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
67 if (last_bit)
68 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
69
70 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
71 }
72 }
73
74
75 static boolean_t
76 consider_discard(vm_page_t m)
77 {
78 register vm_object_t object = 0;
79 int refmod_state;
80 boolean_t discard = FALSE;
81
82 do
83 {
84 if(m->private)
85 panic("consider_discard: private");
86
87 if (!vm_object_lock_try(m->object))
88 break;
89
90 object = m->object;
91
92 if (m->wire_count != 0)
93 break;
94 if (m->precious)
95 break;
96
97 if (m->busy || !object->alive)
98 /*
99 * Somebody is playing with this page.
100 */
101 break;
102
103 if (m->absent || m->unusual || m->error)
104 /*
105 * If it's unusual in anyway, ignore it
106 */
107 break;
108
109 if (m->cleaning)
110 break;
111
112 if (!m->dirty)
113 {
114 refmod_state = pmap_get_refmod(m->phys_page);
115
116 if (refmod_state & VM_MEM_REFERENCED)
117 m->reference = TRUE;
118 if (refmod_state & VM_MEM_MODIFIED)
119 m->dirty = TRUE;
120 }
121
122 /*
123 * If it's clean we can discard the page on wakeup.
124 */
125 discard = !m->dirty;
126 }
127 while (FALSE);
128
129 if (object)
130 vm_object_unlock(object);
131
132 return (discard);
133 }
134
135
136 static void
137 discard_page(vm_page_t m)
138 {
139 if (m->absent || m->unusual || m->error)
140 /*
141 * If it's unusual in anyway, ignore
142 */
143 return;
144
145 if (!m->no_isync)
146 {
147 int refmod_state = pmap_disconnect(m->phys_page);
148
149 if (refmod_state & VM_MEM_REFERENCED)
150 m->reference = TRUE;
151 if (refmod_state & VM_MEM_MODIFIED)
152 m->dirty = TRUE;
153 }
154
155 if (m->dirty)
156 panic("discard_page(%p) dirty", m);
157 if (m->laundry)
158 panic("discard_page(%p) laundry", m);
159 if (m->private)
160 panic("discard_page(%p) private", m);
161 if (m->fictitious)
162 panic("discard_page(%p) fictitious", m);
163
164 vm_page_free(m);
165 }
166
167 /*
168 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
169 pages known to VM to not need saving are subtracted.
170 Wired pages to be saved are present in page_list_wired, pageable in page_list.
171 */
172 extern vm_page_t vm_lopage_queue_free;
173
174 void
175 hibernate_page_list_setall(hibernate_page_list_t * page_list,
176 hibernate_page_list_t * page_list_wired,
177 uint32_t * pagesOut)
178 {
179 uint64_t start, end, nsec;
180 vm_page_t m;
181 uint32_t pages = page_list->page_count;
182 uint32_t count_zf = 0, count_inactive = 0, count_active = 0;
183 uint32_t count_wire = pages;
184 uint32_t count_discard_active = 0, count_discard_inactive = 0;
185 uint32_t i;
186
187 HIBLOG("hibernate_page_list_setall start\n");
188
189 clock_get_uptime(&start);
190
191 hibernate_page_list_zero(page_list);
192 hibernate_page_list_zero(page_list_wired);
193
194 m = (vm_page_t) hibernate_gobble_queue;
195 while(m)
196 {
197 pages--;
198 count_wire--;
199 hibernate_page_bitset(page_list, TRUE, m->phys_page);
200 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
201 m = (vm_page_t) m->pageq.next;
202 }
203
204 m = (vm_page_t) vm_page_queue_free;
205 while(m)
206 {
207 pages--;
208 count_wire--;
209 hibernate_page_bitset(page_list, TRUE, m->phys_page);
210 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
211 m = (vm_page_t) m->pageq.next;
212 }
213
214 m = (vm_page_t) vm_lopage_queue_free;
215 while(m)
216 {
217 pages--;
218 count_wire--;
219 hibernate_page_bitset(page_list, TRUE, m->phys_page);
220 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
221 m = (vm_page_t) m->pageq.next;
222 }
223
224 queue_iterate( &vm_page_queue_zf,
225 m,
226 vm_page_t,
227 pageq )
228 {
229 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
230 && consider_discard(m))
231 {
232 hibernate_page_bitset(page_list, TRUE, m->phys_page);
233 count_discard_inactive++;
234 }
235 else
236 count_zf++;
237 count_wire--;
238 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
239 }
240
241 queue_iterate( &vm_page_queue_inactive,
242 m,
243 vm_page_t,
244 pageq )
245 {
246 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
247 && consider_discard(m))
248 {
249 hibernate_page_bitset(page_list, TRUE, m->phys_page);
250 count_discard_inactive++;
251 }
252 else
253 count_inactive++;
254 count_wire--;
255 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
256 }
257
258 queue_iterate( &vm_page_queue_active,
259 m,
260 vm_page_t,
261 pageq )
262 {
263 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
264 && consider_discard(m))
265 {
266 hibernate_page_bitset(page_list, TRUE, m->phys_page);
267 count_discard_active++;
268 }
269 else
270 count_active++;
271 count_wire--;
272 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
273 }
274
275 // pull wired from hibernate_bitmap
276
277 uint32_t bank;
278 hibernate_bitmap_t * bitmap;
279 hibernate_bitmap_t * bitmap_wired;
280
281 bitmap = &page_list->bank_bitmap[0];
282 bitmap_wired = &page_list_wired->bank_bitmap[0];
283 for (bank = 0; bank < page_list->bank_count; bank++)
284 {
285 for (i = 0; i < bitmap->bitmapwords; i++)
286 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
287 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
288 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
289 }
290
291 // machine dependent adjustments
292 hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
293
294 clock_get_uptime(&end);
295 absolutetime_to_nanoseconds(end - start, &nsec);
296 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
297
298 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
299 pages, count_wire, count_active, count_inactive, count_zf,
300 count_discard_active, count_discard_inactive);
301
302 *pagesOut = pages;
303 }
304
305 void
306 hibernate_page_list_discard(hibernate_page_list_t * page_list)
307 {
308 uint64_t start, end, nsec;
309 vm_page_t m;
310 vm_page_t next;
311 uint32_t count_discard_active = 0, count_discard_inactive = 0;
312
313 clock_get_uptime(&start);
314
315 m = (vm_page_t) queue_first(&vm_page_queue_zf);
316 while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
317 {
318 next = (vm_page_t) m->pageq.next;
319 if (hibernate_page_bittst(page_list, m->phys_page))
320 {
321 discard_page(m);
322 count_discard_inactive++;
323 }
324 m = next;
325 }
326
327 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
328 while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
329 {
330 next = (vm_page_t) m->pageq.next;
331 if (hibernate_page_bittst(page_list, m->phys_page))
332 {
333 discard_page(m);
334 count_discard_inactive++;
335 }
336 m = next;
337 }
338
339 m = (vm_page_t) queue_first(&vm_page_queue_active);
340 while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
341 {
342 next = (vm_page_t) m->pageq.next;
343 if (hibernate_page_bittst(page_list, m->phys_page))
344 {
345 discard_page(m);
346 count_discard_active++;
347 }
348 m = next;
349 }
350
351 clock_get_uptime(&end);
352 absolutetime_to_nanoseconds(end - start, &nsec);
353 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
354 nsec / 1000000ULL,
355 count_discard_active, count_discard_inactive);
356 }
357
358 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
359
360 kern_return_t
361 hibernate_setup(IOHibernateImageHeader * header,
362 uint32_t free_page_ratio,
363 uint32_t free_page_time,
364 hibernate_page_list_t ** page_list_ret,
365 hibernate_page_list_t ** page_list_wired_ret,
366 boolean_t * encryptedswap)
367 {
368 hibernate_page_list_t * page_list = NULL;
369 hibernate_page_list_t * page_list_wired = NULL;
370 vm_page_t m;
371 uint32_t i, gobble_count;
372
373 *page_list_ret = NULL;
374 *page_list_wired_ret = NULL;
375
376
377 page_list = hibernate_page_list_allocate();
378 if (!page_list)
379 return (KERN_RESOURCE_SHORTAGE);
380 page_list_wired = hibernate_page_list_allocate();
381 if (!page_list_wired)
382 {
383 kfree(page_list, page_list->list_size);
384 return (KERN_RESOURCE_SHORTAGE);
385 }
386
387 *encryptedswap = dp_encryption;
388
389 // pages we could force out to reduce hibernate image size
390 gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100;
391
392 // no failures hereafter
393
394 hibernate_processor_setup(header);
395
396 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
397 header->processorFlags, gobble_count);
398
399 if (gobble_count)
400 {
401 uint64_t start, end, timeout, nsec;
402 clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
403 clock_get_uptime(&start);
404
405 for (i = 0; i < gobble_count; i++)
406 {
407 while (VM_PAGE_NULL == (m = vm_page_grab()))
408 {
409 clock_get_uptime(&end);
410 if (end >= timeout)
411 break;
412 VM_PAGE_WAIT();
413 }
414 if (!m)
415 break;
416 m->busy = FALSE;
417 vm_page_gobble(m);
418
419 m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
420 hibernate_gobble_queue = m;
421 }
422
423 clock_get_uptime(&end);
424 absolutetime_to_nanoseconds(end - start, &nsec);
425 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
426 }
427
428 *page_list_ret = page_list;
429 *page_list_wired_ret = page_list_wired;
430
431 return (KERN_SUCCESS);
432 }
433
434 kern_return_t
435 hibernate_teardown(hibernate_page_list_t * page_list,
436 hibernate_page_list_t * page_list_wired)
437 {
438 vm_page_t m, next;
439 uint32_t count = 0;
440
441 m = (vm_page_t) hibernate_gobble_queue;
442 while(m)
443 {
444 next = (vm_page_t) m->pageq.next;
445 vm_page_free(m);
446 count++;
447 m = next;
448 }
449 hibernate_gobble_queue = VM_PAGE_NULL;
450
451 if (count)
452 HIBLOG("Freed %d pages\n", count);
453
454 if (page_list)
455 kfree(page_list, page_list->list_size);
456 if (page_list_wired)
457 kfree(page_list_wired, page_list_wired->list_size);
458
459 return (KERN_SUCCESS);
460 }
461