]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hibernate.c
27a089239338367b714b15b768cdf2619264958b
[apple/xnu.git] / osfmk / kern / hibernate.c
1 /*
2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
39
40 #include <IOKit/IOHibernatePrivate.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_purgeable_internal.h>
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46
47 static vm_page_t hibernate_gobble_queue;
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 static void
52 hibernate_page_list_zero(hibernate_page_list_t *list)
53 {
54 uint32_t bank;
55 hibernate_bitmap_t * bitmap;
56
57 bitmap = &list->bank_bitmap[0];
58 for (bank = 0; bank < list->bank_count; bank++)
59 {
60 uint32_t last_bit;
61
62 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
63 // set out-of-bound bits at end of bitmap.
64 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
65 if (last_bit)
66 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
67
68 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
69 }
70 }
71
72
73 static boolean_t
74 consider_discard(vm_page_t m)
75 {
76 vm_object_t object = NULL;
77 int refmod_state;
78 boolean_t discard = FALSE;
79
80 do
81 {
82 if(m->private)
83 panic("consider_discard: private");
84
85 if (!vm_object_lock_try(m->object))
86 break;
87
88 object = m->object;
89
90 if (m->wire_count != 0)
91 break;
92 if (m->precious)
93 break;
94
95 if (m->busy || !object->alive)
96 /*
97 * Somebody is playing with this page.
98 */
99 break;
100
101 if (m->absent || m->unusual || m->error)
102 /*
103 * If it's unusual in anyway, ignore it
104 */
105 break;
106
107 if (m->cleaning)
108 break;
109
110 if (m->laundry || m->list_req_pending)
111 break;
112
113 if (!m->dirty)
114 {
115 refmod_state = pmap_get_refmod(m->phys_page);
116
117 if (refmod_state & VM_MEM_REFERENCED)
118 m->reference = TRUE;
119 if (refmod_state & VM_MEM_MODIFIED)
120 m->dirty = TRUE;
121 }
122
123 /*
124 * If it's clean or purgeable we can discard the page on wakeup.
125 * JMM - consider purgeable (volatile or empty) objects here as well.
126 */
127 discard = (!m->dirty)
128 || (VM_PURGABLE_VOLATILE == object->purgable)
129 || (VM_PURGABLE_EMPTY == m->object->purgable);
130 }
131 while (FALSE);
132
133 if (object)
134 vm_object_unlock(object);
135
136 return (discard);
137 }
138
139
140 static void
141 discard_page(vm_page_t m)
142 {
143 if (m->absent || m->unusual || m->error)
144 /*
145 * If it's unusual in anyway, ignore
146 */
147 return;
148
149 if (m->pmapped == TRUE)
150 {
151 __unused int refmod_state = pmap_disconnect(m->phys_page);
152 }
153
154 if (m->laundry)
155 panic("discard_page(%p) laundry", m);
156 if (m->private)
157 panic("discard_page(%p) private", m);
158 if (m->fictitious)
159 panic("discard_page(%p) fictitious", m);
160
161 if (VM_PURGABLE_VOLATILE == m->object->purgable)
162 {
163 assert(m->object->objq.next != NULL && m->object->objq.prev != NULL); /* object should be on a queue */
164 purgeable_q_t old_queue=vm_purgeable_object_remove(m->object);
165 assert(old_queue);
166 /* No need to lock page queue for token delete, hibernate_vm_unlock()
167 makes sure these locks are uncontended before sleep */
168 vm_purgeable_token_delete_first(old_queue);
169 m->object->purgable = VM_PURGABLE_EMPTY;
170 }
171
172 if (m->tabled)
173 vm_page_remove(m);
174
175 vm_page_free(m);
176 }
177
178 /*
179 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
180 pages known to VM to not need saving are subtracted.
181 Wired pages to be saved are present in page_list_wired, pageable in page_list.
182 */
183
184 void
185 hibernate_page_list_setall(hibernate_page_list_t * page_list,
186 hibernate_page_list_t * page_list_wired,
187 uint32_t * pagesOut)
188 {
189 uint64_t start, end, nsec;
190 vm_page_t m;
191 uint32_t pages = page_list->page_count;
192 uint32_t count_zf = 0, count_throttled = 0;
193 uint32_t count_inactive = 0, count_active = 0, count_speculative = 0;
194 uint32_t count_wire = pages;
195 uint32_t count_discard_active = 0;
196 uint32_t count_discard_inactive = 0;
197 uint32_t count_discard_purgeable = 0;
198 uint32_t count_discard_speculative = 0;
199 uint32_t i;
200 uint32_t bank;
201 hibernate_bitmap_t * bitmap;
202 hibernate_bitmap_t * bitmap_wired;
203
204
205 HIBLOG("hibernate_page_list_setall start\n");
206
207 clock_get_uptime(&start);
208
209 hibernate_page_list_zero(page_list);
210 hibernate_page_list_zero(page_list_wired);
211
212 m = (vm_page_t) hibernate_gobble_queue;
213 while(m)
214 {
215 pages--;
216 count_wire--;
217 hibernate_page_bitset(page_list, TRUE, m->phys_page);
218 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
219 m = (vm_page_t) m->pageq.next;
220 }
221
222 for( i = 0; i < vm_colors; i++ )
223 {
224 queue_iterate(&vm_page_queue_free[i],
225 m,
226 vm_page_t,
227 pageq)
228 {
229 pages--;
230 count_wire--;
231 hibernate_page_bitset(page_list, TRUE, m->phys_page);
232 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
233 }
234 }
235
236 queue_iterate(&vm_lopage_queue_free,
237 m,
238 vm_page_t,
239 pageq)
240 {
241 pages--;
242 count_wire--;
243 hibernate_page_bitset(page_list, TRUE, m->phys_page);
244 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
245 }
246
247 queue_iterate( &vm_page_queue_throttled,
248 m,
249 vm_page_t,
250 pageq )
251 {
252 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
253 && consider_discard(m))
254 {
255 hibernate_page_bitset(page_list, TRUE, m->phys_page);
256 count_discard_inactive++;
257 }
258 else
259 count_throttled++;
260 count_wire--;
261 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
262 }
263
264 queue_iterate( &vm_page_queue_zf,
265 m,
266 vm_page_t,
267 pageq )
268 {
269 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
270 && consider_discard(m))
271 {
272 hibernate_page_bitset(page_list, TRUE, m->phys_page);
273 if (m->dirty)
274 count_discard_purgeable++;
275 else
276 count_discard_inactive++;
277 }
278 else
279 count_zf++;
280 count_wire--;
281 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
282 }
283
284 queue_iterate( &vm_page_queue_inactive,
285 m,
286 vm_page_t,
287 pageq )
288 {
289 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
290 && consider_discard(m))
291 {
292 hibernate_page_bitset(page_list, TRUE, m->phys_page);
293 if (m->dirty)
294 count_discard_purgeable++;
295 else
296 count_discard_inactive++;
297 }
298 else
299 count_inactive++;
300 count_wire--;
301 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
302 }
303
304 for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
305 {
306 queue_iterate(&vm_page_queue_speculative[i].age_q,
307 m,
308 vm_page_t,
309 pageq)
310 {
311 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
312 && consider_discard(m))
313 {
314 hibernate_page_bitset(page_list, TRUE, m->phys_page);
315 count_discard_speculative++;
316 }
317 else
318 count_speculative++;
319 count_wire--;
320 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
321 }
322 }
323
324 queue_iterate( &vm_page_queue_active,
325 m,
326 vm_page_t,
327 pageq )
328 {
329 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
330 && consider_discard(m))
331 {
332 hibernate_page_bitset(page_list, TRUE, m->phys_page);
333 if (m->dirty)
334 count_discard_purgeable++;
335 else
336 count_discard_active++;
337 }
338 else
339 count_active++;
340 count_wire--;
341 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
342 }
343
344 // pull wired from hibernate_bitmap
345
346 bitmap = &page_list->bank_bitmap[0];
347 bitmap_wired = &page_list_wired->bank_bitmap[0];
348 for (bank = 0; bank < page_list->bank_count; bank++)
349 {
350 for (i = 0; i < bitmap->bitmapwords; i++)
351 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
352 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
353 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
354 }
355
356 // machine dependent adjustments
357 hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
358
359 clock_get_uptime(&end);
360 absolutetime_to_nanoseconds(end - start, &nsec);
361 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
362
363 HIBLOG("pages %d, wire %d, act %d, inact %d, spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d\n",
364 pages, count_wire, count_active, count_inactive, count_speculative, count_zf, count_throttled,
365 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
366
367 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative;
368 }
369
370 void
371 hibernate_page_list_discard(hibernate_page_list_t * page_list)
372 {
373 uint64_t start, end, nsec;
374 vm_page_t m;
375 vm_page_t next;
376 uint32_t i;
377 uint32_t count_discard_active = 0;
378 uint32_t count_discard_inactive = 0;
379 uint32_t count_discard_purgeable = 0;
380 uint32_t count_discard_speculative = 0;
381
382 clock_get_uptime(&start);
383
384 m = (vm_page_t) queue_first(&vm_page_queue_zf);
385 while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
386 {
387 next = (vm_page_t) m->pageq.next;
388 if (hibernate_page_bittst(page_list, m->phys_page))
389 {
390 if (m->dirty)
391 count_discard_purgeable++;
392 else
393 count_discard_inactive++;
394 discard_page(m);
395 }
396 m = next;
397 }
398
399 for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
400 {
401 m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
402 while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
403 {
404 next = (vm_page_t) m->pageq.next;
405 if (hibernate_page_bittst(page_list, m->phys_page))
406 {
407 count_discard_speculative++;
408 discard_page(m);
409 }
410 m = next;
411 }
412 }
413
414 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
415 while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
416 {
417 next = (vm_page_t) m->pageq.next;
418 if (hibernate_page_bittst(page_list, m->phys_page))
419 {
420 if (m->dirty)
421 count_discard_purgeable++;
422 else
423 count_discard_inactive++;
424 discard_page(m);
425 }
426 m = next;
427 }
428
429 m = (vm_page_t) queue_first(&vm_page_queue_active);
430 while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
431 {
432 next = (vm_page_t) m->pageq.next;
433 if (hibernate_page_bittst(page_list, m->phys_page))
434 {
435 if (m->dirty)
436 count_discard_purgeable++;
437 else
438 count_discard_active++;
439 discard_page(m);
440 }
441 m = next;
442 }
443
444 clock_get_uptime(&end);
445 absolutetime_to_nanoseconds(end - start, &nsec);
446 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d\n",
447 nsec / 1000000ULL,
448 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative);
449 }
450
451 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
452
453 kern_return_t
454 hibernate_setup(IOHibernateImageHeader * header,
455 uint32_t free_page_ratio,
456 uint32_t free_page_time,
457 hibernate_page_list_t ** page_list_ret,
458 hibernate_page_list_t ** page_list_wired_ret,
459 boolean_t * encryptedswap)
460 {
461 hibernate_page_list_t * page_list = NULL;
462 hibernate_page_list_t * page_list_wired = NULL;
463 vm_page_t m;
464 uint32_t i, gobble_count;
465
466 *page_list_ret = NULL;
467 *page_list_wired_ret = NULL;
468
469
470 page_list = hibernate_page_list_allocate();
471 if (!page_list)
472 return (KERN_RESOURCE_SHORTAGE);
473 page_list_wired = hibernate_page_list_allocate();
474 if (!page_list_wired)
475 {
476 kfree(page_list, page_list->list_size);
477 return (KERN_RESOURCE_SHORTAGE);
478 }
479
480 *encryptedswap = dp_encryption;
481
482 // pages we could force out to reduce hibernate image size
483 gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100;
484
485 // no failures hereafter
486
487 hibernate_processor_setup(header);
488
489 HIBLOG("hibernate_alloc_pages flags %08x, gobbling %d pages\n",
490 header->processorFlags, gobble_count);
491
492 if (gobble_count)
493 {
494 uint64_t start, end, timeout, nsec;
495 clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
496 clock_get_uptime(&start);
497
498 for (i = 0; i < gobble_count; i++)
499 {
500 while (VM_PAGE_NULL == (m = vm_page_grab()))
501 {
502 clock_get_uptime(&end);
503 if (end >= timeout)
504 break;
505 VM_PAGE_WAIT();
506 }
507 if (!m)
508 break;
509 m->busy = FALSE;
510 vm_page_gobble(m);
511
512 m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
513 hibernate_gobble_queue = m;
514 }
515
516 clock_get_uptime(&end);
517 absolutetime_to_nanoseconds(end - start, &nsec);
518 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
519 }
520
521 *page_list_ret = page_list;
522 *page_list_wired_ret = page_list_wired;
523
524 return (KERN_SUCCESS);
525 }
526
527 kern_return_t
528 hibernate_teardown(hibernate_page_list_t * page_list,
529 hibernate_page_list_t * page_list_wired)
530 {
531 vm_page_t m, next;
532 uint32_t count = 0;
533
534 m = (vm_page_t) hibernate_gobble_queue;
535 while(m)
536 {
537 next = (vm_page_t) m->pageq.next;
538 vm_page_free(m);
539 count++;
540 m = next;
541 }
542 hibernate_gobble_queue = VM_PAGE_NULL;
543
544 if (count)
545 HIBLOG("Freed %d pages\n", count);
546
547 if (page_list)
548 kfree(page_list, page_list->list_size);
549 if (page_list_wired)
550 kfree(page_list_wired, page_list_wired->list_size);
551
552 return (KERN_SUCCESS);
553 }
554