]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hibernate.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / kern / hibernate.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
39 #define KERNEL
40
41 #include <IOKit/IOHibernatePrivate.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46
47 static vm_page_t hibernate_gobble_queue;
48
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
50
51 static void
52 hibernate_page_list_zero(hibernate_page_list_t *list)
53 {
54 uint32_t bank;
55 hibernate_bitmap_t * bitmap;
56
57 bitmap = &list->bank_bitmap[0];
58 for (bank = 0; bank < list->bank_count; bank++)
59 {
60 uint32_t last_bit;
61
62 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
63 // set out-of-bound bits at end of bitmap.
64 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
65 if (last_bit)
66 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
67
68 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
69 }
70 }
71
72
73 static boolean_t
74 consider_discard(vm_page_t m)
75 {
76 register vm_object_t object = 0;
77 int refmod_state;
78 boolean_t discard = FALSE;
79
80 do
81 {
82 if(m->private)
83 panic("consider_discard: private");
84
85 if (!vm_object_lock_try(m->object))
86 break;
87
88 object = m->object;
89
90 if (m->wire_count != 0)
91 break;
92 if (m->precious)
93 break;
94
95 if (m->busy || !object->alive)
96 /*
97 * Somebody is playing with this page.
98 */
99 break;
100
101 if (m->absent || m->unusual || m->error)
102 /*
103 * If it's unusual in anyway, ignore it
104 */
105 break;
106
107 if (m->cleaning)
108 break;
109
110 if (!m->dirty)
111 {
112 refmod_state = pmap_get_refmod(m->phys_page);
113
114 if (refmod_state & VM_MEM_REFERENCED)
115 m->reference = TRUE;
116 if (refmod_state & VM_MEM_MODIFIED)
117 m->dirty = TRUE;
118 }
119
120 /*
121 * If it's clean we can discard the page on wakeup.
122 */
123 discard = !m->dirty;
124 }
125 while (FALSE);
126
127 if (object)
128 vm_object_unlock(object);
129
130 return (discard);
131 }
132
133
134 static void
135 discard_page(vm_page_t m)
136 {
137 if (m->absent || m->unusual || m->error)
138 /*
139 * If it's unusual in anyway, ignore
140 */
141 return;
142
143 if (!m->no_isync)
144 {
145 int refmod_state = pmap_disconnect(m->phys_page);
146
147 if (refmod_state & VM_MEM_REFERENCED)
148 m->reference = TRUE;
149 if (refmod_state & VM_MEM_MODIFIED)
150 m->dirty = TRUE;
151 }
152
153 if (m->dirty)
154 panic("discard_page(%p) dirty", m);
155 if (m->laundry)
156 panic("discard_page(%p) laundry", m);
157 if (m->private)
158 panic("discard_page(%p) private", m);
159 if (m->fictitious)
160 panic("discard_page(%p) fictitious", m);
161
162 vm_page_free(m);
163 }
164
165 /*
166 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
167 pages known to VM to not need saving are subtracted.
168 Wired pages to be saved are present in page_list_wired, pageable in page_list.
169 */
170 extern vm_page_t vm_lopage_queue_free;
171
172 void
173 hibernate_page_list_setall(hibernate_page_list_t * page_list,
174 hibernate_page_list_t * page_list_wired,
175 uint32_t * pagesOut)
176 {
177 uint64_t start, end, nsec;
178 vm_page_t m;
179 uint32_t pages = page_list->page_count;
180 uint32_t count_zf = 0, count_inactive = 0, count_active = 0;
181 uint32_t count_wire = pages;
182 uint32_t count_discard_active = 0, count_discard_inactive = 0;
183 uint32_t i;
184
185 HIBLOG("hibernate_page_list_setall start\n");
186
187 clock_get_uptime(&start);
188
189 hibernate_page_list_zero(page_list);
190 hibernate_page_list_zero(page_list_wired);
191
192 m = (vm_page_t) hibernate_gobble_queue;
193 while(m)
194 {
195 pages--;
196 count_wire--;
197 hibernate_page_bitset(page_list, TRUE, m->phys_page);
198 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
199 m = (vm_page_t) m->pageq.next;
200 }
201
202 m = (vm_page_t) vm_page_queue_free;
203 while(m)
204 {
205 pages--;
206 count_wire--;
207 hibernate_page_bitset(page_list, TRUE, m->phys_page);
208 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
209 m = (vm_page_t) m->pageq.next;
210 }
211
212 m = (vm_page_t) vm_lopage_queue_free;
213 while(m)
214 {
215 pages--;
216 count_wire--;
217 hibernate_page_bitset(page_list, TRUE, m->phys_page);
218 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
219 m = (vm_page_t) m->pageq.next;
220 }
221
222 queue_iterate( &vm_page_queue_zf,
223 m,
224 vm_page_t,
225 pageq )
226 {
227 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
228 && consider_discard(m))
229 {
230 hibernate_page_bitset(page_list, TRUE, m->phys_page);
231 count_discard_inactive++;
232 }
233 else
234 count_zf++;
235 count_wire--;
236 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
237 }
238
239 queue_iterate( &vm_page_queue_inactive,
240 m,
241 vm_page_t,
242 pageq )
243 {
244 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
245 && consider_discard(m))
246 {
247 hibernate_page_bitset(page_list, TRUE, m->phys_page);
248 count_discard_inactive++;
249 }
250 else
251 count_inactive++;
252 count_wire--;
253 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
254 }
255
256 queue_iterate( &vm_page_queue_active,
257 m,
258 vm_page_t,
259 pageq )
260 {
261 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
262 && consider_discard(m))
263 {
264 hibernate_page_bitset(page_list, TRUE, m->phys_page);
265 count_discard_active++;
266 }
267 else
268 count_active++;
269 count_wire--;
270 hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
271 }
272
273 // pull wired from hibernate_bitmap
274
275 uint32_t bank;
276 hibernate_bitmap_t * bitmap;
277 hibernate_bitmap_t * bitmap_wired;
278
279 bitmap = &page_list->bank_bitmap[0];
280 bitmap_wired = &page_list_wired->bank_bitmap[0];
281 for (bank = 0; bank < page_list->bank_count; bank++)
282 {
283 for (i = 0; i < bitmap->bitmapwords; i++)
284 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
285 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
286 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
287 }
288
289 // machine dependent adjustments
290 hibernate_page_list_setall_machine(page_list, page_list_wired, &pages);
291
292 clock_get_uptime(&end);
293 absolutetime_to_nanoseconds(end - start, &nsec);
294 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
295
296 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
297 pages, count_wire, count_active, count_inactive, count_zf,
298 count_discard_active, count_discard_inactive);
299
300 *pagesOut = pages;
301 }
302
303 void
304 hibernate_page_list_discard(hibernate_page_list_t * page_list)
305 {
306 uint64_t start, end, nsec;
307 vm_page_t m;
308 vm_page_t next;
309 uint32_t count_discard_active = 0, count_discard_inactive = 0;
310
311 clock_get_uptime(&start);
312
313 m = (vm_page_t) queue_first(&vm_page_queue_zf);
314 while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m))
315 {
316 next = (vm_page_t) m->pageq.next;
317 if (hibernate_page_bittst(page_list, m->phys_page))
318 {
319 discard_page(m);
320 count_discard_inactive++;
321 }
322 m = next;
323 }
324
325 m = (vm_page_t) queue_first(&vm_page_queue_inactive);
326 while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
327 {
328 next = (vm_page_t) m->pageq.next;
329 if (hibernate_page_bittst(page_list, m->phys_page))
330 {
331 discard_page(m);
332 count_discard_inactive++;
333 }
334 m = next;
335 }
336
337 m = (vm_page_t) queue_first(&vm_page_queue_active);
338 while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
339 {
340 next = (vm_page_t) m->pageq.next;
341 if (hibernate_page_bittst(page_list, m->phys_page))
342 {
343 discard_page(m);
344 count_discard_active++;
345 }
346 m = next;
347 }
348
349 clock_get_uptime(&end);
350 absolutetime_to_nanoseconds(end - start, &nsec);
351 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
352 nsec / 1000000ULL,
353 count_discard_active, count_discard_inactive);
354 }
355
356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357
358 kern_return_t
359 hibernate_setup(IOHibernateImageHeader * header,
360 uint32_t free_page_ratio,
361 uint32_t free_page_time,
362 hibernate_page_list_t ** page_list_ret,
363 hibernate_page_list_t ** page_list_wired_ret,
364 boolean_t * encryptedswap)
365 {
366 hibernate_page_list_t * page_list = NULL;
367 hibernate_page_list_t * page_list_wired = NULL;
368 vm_page_t m;
369 uint32_t i, gobble_count;
370
371 *page_list_ret = NULL;
372 *page_list_wired_ret = NULL;
373
374
375 page_list = hibernate_page_list_allocate();
376 if (!page_list)
377 return (KERN_RESOURCE_SHORTAGE);
378 page_list_wired = hibernate_page_list_allocate();
379 if (!page_list_wired)
380 {
381 kfree(page_list, page_list->list_size);
382 return (KERN_RESOURCE_SHORTAGE);
383 }
384
385 *encryptedswap = dp_encryption;
386
387 // pages we could force out to reduce hibernate image size
388 gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100;
389
390 // no failures hereafter
391
392 hibernate_processor_setup(header);
393
394 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
395 header->processorFlags, gobble_count);
396
397 if (gobble_count)
398 {
399 uint64_t start, end, timeout, nsec;
400 clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
401 clock_get_uptime(&start);
402
403 for (i = 0; i < gobble_count; i++)
404 {
405 while (VM_PAGE_NULL == (m = vm_page_grab()))
406 {
407 clock_get_uptime(&end);
408 if (end >= timeout)
409 break;
410 VM_PAGE_WAIT();
411 }
412 if (!m)
413 break;
414 m->busy = FALSE;
415 vm_page_gobble(m);
416
417 m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
418 hibernate_gobble_queue = m;
419 }
420
421 clock_get_uptime(&end);
422 absolutetime_to_nanoseconds(end - start, &nsec);
423 HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
424 }
425
426 *page_list_ret = page_list;
427 *page_list_wired_ret = page_list_wired;
428
429 return (KERN_SUCCESS);
430 }
431
432 kern_return_t
433 hibernate_teardown(hibernate_page_list_t * page_list,
434 hibernate_page_list_t * page_list_wired)
435 {
436 vm_page_t m, next;
437 uint32_t count = 0;
438
439 m = (vm_page_t) hibernate_gobble_queue;
440 while(m)
441 {
442 next = (vm_page_t) m->pageq.next;
443 vm_page_free(m);
444 count++;
445 m = next;
446 }
447 hibernate_gobble_queue = VM_PAGE_NULL;
448
449 if (count)
450 HIBLOG("Freed %d pages\n", count);
451
452 if (page_list)
453 kfree(page_list, page_list->list_size);
454 if (page_list_wired)
455 kfree(page_list_wired, page_list_wired->list_size);
456
457 return (KERN_SUCCESS);
458 }
459