]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hibernate.c
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / kern / hibernate.c
1 /*
2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
39
40 #include <IOKit/IOHibernatePrivate.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_purgeable_internal.h>
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46
47 static vm_page_t hibernate_gobble_queue;
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 static void
52 hibernate_page_list_zero(hibernate_page_list_t *list)
53 {
54 uint32_t bank;
55 hibernate_bitmap_t * bitmap;
56
57 bitmap = &list->bank_bitmap[0];
58 for (bank = 0; bank < list->bank_count; bank++)
59 {
60 uint32_t last_bit;
61
62 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
63 // set out-of-bound bits at end of bitmap.
64 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
65 if (last_bit)
66 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
67
68 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
69 }
70 }
71
72
73 static boolean_t
74 consider_discard(vm_page_t m)
75 {
76 vm_object_t object = NULL;
77 int refmod_state;
78 boolean_t discard = FALSE;
79
80 do
81 {
82 if(m->private)
83 panic("consider_discard: private");
84
85 if (!vm_object_lock_try(m->object))
86 break;
87
88 object = m->object;
89
90 if (m->wire_count != 0)
91 break;
92 if (m->precious)
93 break;
94
95 if (m->busy || !object->alive)
96 /*
97 * Somebody is playing with this page.
98 */
99 break;
100
101 if (m->absent || m->unusual || m->error)
102 /*
103 * If it's unusual in anyway, ignore it
104 */
105 break;
106
107 if (m->cleaning)
108 break;
109
110 if (m->laundry || m->list_req_pending)
111 break;
112
113 if (!m->dirty)
114 {
115 refmod_state = pmap_get_refmod(m->phys_page);
116
117 if (refmod_state & VM_MEM_REFERENCED)
118 m->reference = TRUE;
119 if (refmod_state & VM_MEM_MODIFIED)
120 m->dirty = TRUE;
121 }
122
123 /*
124 * If it's clean or purgeable we can discard the page on wakeup.
125 * JMM - consider purgeable (volatile or empty) objects here as well.
126 */
127 discard = (!m->dirty)
128 || (VM_PURGABLE_VOLATILE == object->purgable)
129 || (VM_PURGABLE_EMPTY == m->object->purgable);
130 }
131 while (FALSE);
132
133 if (object)
134 vm_object_unlock(object);
135
136 return (discard);
137 }
138
139
140 static void
141 discard_page(vm_page_t m)
142 {
143 if (m->absent || m->unusual || m->error)
144 /*
145 * If it's unusual in anyway, ignore
146 */
147 return;
148
149 if (m->pmapped == TRUE)
150 {
151 __unused int refmod_state = pmap_disconnect(m->phys_page);
152 }
153
154 if (m->laundry)
155 panic("discard_page(%p) laundry", m);
156 if (m->private)
157 panic("discard_page(%p) private", m);
158 if (m->fictitious)
159 panic("discard_page(%p) fictitious", m);
160
161 if (VM_PURGABLE_VOLATILE == m->object->purgable)
162 {
163 assert(m->object->objq.next != NULL && m->object->objq.prev != NULL); /* object should be on a queue */
164 purgeable_q_t old_queue=vm_purgeable_object_remove(m->object);
165 assert(old_queue);
166 /* No need to lock page queue for token delete, hibernate_vm_unlock()
167 makes sure these locks are uncontended before sleep */
168 vm_purgeable_token_delete_first(old_queue);
169 m->object->purgable = VM_PURGABLE_EMPTY;
170 }
171
172 if (m->tabled)
173 vm_page_remove(m);
174
175 vm_page_free(m);
176 }
177
178 /*
179 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
180 pages known to VM to not need saving are subtracted.
181 Wired pages to be saved are present in page_list_wired, pageable in page_list.
182 */
183
184 void
185 hibernate_page_list_setall(hibernate_page_list_t * page_list,
186 hibernate_page_list_t * page_list_wired,
187 uint32_t * pagesOut)
188 {
189 uint64_t start, end, nsec;
190 vm_page_t m;
191 uint32_t pages = page_list->page_count;
192 uint32_t count_zf = 0, count_throttled = 0, count_inactive = 0, count_active = 0;
193 uint32_t count_wire = pages;
194 uint32_t count_discard_active = 0;
195 uint32_t count_discard_inactive = 0;
196 uint32_t count_discard_purgeable = 0;
197 uint32_t i;
198 uint32_t bank;
199 hibernate_bitmap_t * bitmap;
200 hibernate_bitmap_t * bitmap_wired;
201
202
203 HIBLOG("hibernate_page_list_setall start\n");
204
205 clock_get_uptime(&start);
206
207 hibernate_page_list_zero(page_list);
208 hibernate_page_list_zero(page_list_wired);
209
210 m = (vm_page_t) hibernate_gobble_queue;
211 while(m)
212 {
213 pages--;
214 count_wire--;
215 hibernate_page_bitset(page_list, TRUE, m->phys_page);
216 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
217 m = (vm_page_t) m->pageq.next;
218 }
219
220 for( i = 0; i < vm_colors; i++ )
221 {
222 queue_iterate(&vm_page_queue_free[i],
223 m,
224 vm_page_t,
225 pageq)
226 {
227 pages--;
228 count_wire--;
229 hibernate_page_bitset(page_list, TRUE, m->phys_page);
230 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
231 }
232 }
233
234 queue_iterate(&vm_lopage_queue_free,
235 m,
236 vm_page_t,
237 pageq)
238 {
239 pages--;
240 count_wire--;
241 hibernate_page_bitset(page_list, TRUE, m->phys_page);
242 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
243 }
244
245 queue_iterate( &vm_page_queue_throttled,
246 m,
247 vm_page_t,
248 pageq )
249 {
250 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
251 && consider_discard(m))
252 {
253 hibernate_page_bitset(page_list, TRUE, m->phys_page);
254 count_discard_inactive++;
255 }
256 else
257 count_throttled++;
258 count_wire--;
259 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
260 }
261
262 queue_iterate( &vm_page_queue_zf,
263 m,
264 vm_page_t,
265 pageq )
266 {
267 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
268 && consider_discard(m))
269 {
270 hibernate_page_bitset(page_list, TRUE, m->phys_page);
271 if (m->dirty)
272 count_discard_purgeable++;
273 else
274 count_discard_inactive++;
275 }
276 else
277 count_zf++;
278 count_wire--;
279 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
280 }
281
282 queue_iterate( &vm_page_queue_inactive,
283 m,
284 vm_page_t,
285 pageq )
286 {
287 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
288 && consider_discard(m))
289 {
290 hibernate_page_bitset(page_list, TRUE, m->phys_page);
291 if (m->dirty)
292 count_discard_purgeable++;
293 else
294 count_discard_inactive++;
295 }
296 else
297 count_inactive++;
298 count_wire--;
299 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
300 }
301
302 queue_iterate( &vm_page_queue_active,
303 m,
304 vm_page_t,
305 pageq )
306 {
307 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
308 && consider_discard(m))
309 {
310 hibernate_page_bitset(page_list, TRUE, m->phys_page);
311 if (m->dirty)
312 count_discard_purgeable++;
313 else
314 count_discard_active++;
315 }
316 else
317 count_active++;
318 count_wire--;
319 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
320 }
321
322 // pull wired from hibernate_bitmap
323
324 bitmap = &page_list->bank_bitmap[0];
325 bitmap_wired = &page_list_wired->bank_bitmap[0];
326 for (bank = 0; bank < page_list->bank_count; bank++)
327 {
328 for (i = 0; i < bitmap->bitmapwords; i++)
329 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
330 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
331 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
332 }
333
334 // machine dependent adjustments
335 hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
336
337 clock_get_uptime(&end);
338 absolutetime_to_nanoseconds(end - start, &nsec);
339 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
340
341 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, throt %d, could discard act %d inact %d purgeable %d\n",
342 pages, count_wire, count_active, count_inactive, count_zf, count_throttled,
343 count_discard_active, count_discard_inactive, count_discard_purgeable);
344
345 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable;
346 }
347
348 void
349 hibernate_page_list_discard(hibernate_page_list_t * page_list)
350 {
351 uint64_t start, end, nsec;
352 vm_page_t m;
353 vm_page_t next;
354 uint32_t count_discard_active = 0;
355 uint32_t count_discard_inactive = 0;
356 uint32_t count_discard_purgeable = 0;
357
358 clock_get_uptime(&start);
359
360 m = (vm_page_t) queue_first(&vm_page_queue_zf);
361 while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
362 {
363 next = (vm_page_t) m->pageq.next;
364 if (hibernate_page_bittst(page_list, m->phys_page))
365 {
366 if (m->dirty)
367 count_discard_purgeable++;
368 else
369 count_discard_inactive++;
370 discard_page(m);
371 }
372 m = next;
373 }
374
375 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
376 while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
377 {
378 next = (vm_page_t) m->pageq.next;
379 if (hibernate_page_bittst(page_list, m->phys_page))
380 {
381 if (m->dirty)
382 count_discard_purgeable++;
383 else
384 count_discard_inactive++;
385 discard_page(m);
386 }
387 m = next;
388 }
389
390 m = (vm_page_t) queue_first(&vm_page_queue_active);
391 while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
392 {
393 next = (vm_page_t) m->pageq.next;
394 if (hibernate_page_bittst(page_list, m->phys_page))
395 {
396 if (m->dirty)
397 count_discard_purgeable++;
398 else
399 count_discard_active++;
400 discard_page(m);
401 }
402 m = next;
403 }
404
405 clock_get_uptime(&end);
406 absolutetime_to_nanoseconds(end - start, &nsec);
407 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d\n",
408 nsec / 1000000ULL,
409 count_discard_active, count_discard_inactive, count_discard_purgeable);
410 }
411
412 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
413
414 kern_return_t
415 hibernate_setup(IOHibernateImageHeader * header,
416 uint32_t free_page_ratio,
417 uint32_t free_page_time,
418 hibernate_page_list_t ** page_list_ret,
419 hibernate_page_list_t ** page_list_wired_ret,
420 boolean_t * encryptedswap)
421 {
422 hibernate_page_list_t * page_list = NULL;
423 hibernate_page_list_t * page_list_wired = NULL;
424 vm_page_t m;
425 uint32_t i, gobble_count;
426
427 *page_list_ret = NULL;
428 *page_list_wired_ret = NULL;
429
430
431 page_list = hibernate_page_list_allocate();
432 if (!page_list)
433 return (KERN_RESOURCE_SHORTAGE);
434 page_list_wired = hibernate_page_list_allocate();
435 if (!page_list_wired)
436 {
437 kfree(page_list, page_list->list_size);
438 return (KERN_RESOURCE_SHORTAGE);
439 }
440
441 *encryptedswap = dp_encryption;
442
443 // pages we could force out to reduce hibernate image size
444 gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100;
445
446 // no failures hereafter
447
448 hibernate_processor_setup(header);
449
450 HIBLOG("hibernate_alloc_pages flags %08x, gobbling %d pages\n",
451 header->processorFlags, gobble_count);
452
453 if (gobble_count)
454 {
455 uint64_t start, end, timeout, nsec;
456 clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
457 clock_get_uptime(&start);
458
459 for (i = 0; i < gobble_count; i++)
460 {
461 while (VM_PAGE_NULL == (m = vm_page_grab()))
462 {
463 clock_get_uptime(&end);
464 if (end >= timeout)
465 break;
466 VM_PAGE_WAIT();
467 }
468 if (!m)
469 break;
470 m->busy = FALSE;
471 vm_page_gobble(m);
472
473 m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
474 hibernate_gobble_queue = m;
475 }
476
477 clock_get_uptime(&end);
478 absolutetime_to_nanoseconds(end - start, &nsec);
479 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
480 }
481
482 *page_list_ret = page_list;
483 *page_list_wired_ret = page_list_wired;
484
485 return (KERN_SUCCESS);
486 }
487
488 kern_return_t
489 hibernate_teardown(hibernate_page_list_t * page_list,
490 hibernate_page_list_t * page_list_wired)
491 {
492 vm_page_t m, next;
493 uint32_t count = 0;
494
495 m = (vm_page_t) hibernate_gobble_queue;
496 while(m)
497 {
498 next = (vm_page_t) m->pageq.next;
499 vm_page_free(m);
500 count++;
501 m = next;
502 }
503 hibernate_gobble_queue = VM_PAGE_NULL;
504
505 if (count)
506 HIBLOG("Freed %d pages\n", count);
507
508 if (page_list)
509 kfree(page_list, page_list->list_size);
510 if (page_list_wired)
511 kfree(page_list_wired, page_list_wired->list_size);
512
513 return (KERN_SUCCESS);
514 }
515