]>
Commit | Line | Data |
---|---|---|
3a60a9f5 A |
1 | /* |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
3a60a9f5 | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
3a60a9f5 A |
27 | */ |
28 | ||
29 | #include <kern/kalloc.h> | |
30 | #include <kern/machine.h> | |
31 | #include <kern/misc_protos.h> | |
32 | #include <kern/thread.h> | |
33 | #include <kern/processor.h> | |
34 | #include <mach/machine.h> | |
35 | #include <mach/processor_info.h> | |
36 | #include <mach/mach_types.h> | |
37 | #include <default_pager/default_pager_internal.h> | |
38 | #include <IOKit/IOPlatformExpert.h> | |
39 | #define KERNEL | |
40 | ||
41 | #include <IOKit/IOHibernatePrivate.h> | |
42 | #include <vm/vm_page.h> | |
43 | #include <vm/vm_pageout.h> | |
44 | ||
45 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
46 | ||
47 | static vm_page_t hibernate_gobble_queue; | |
48 | ||
49 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
50 | ||
51 | static void | |
52 | hibernate_page_list_zero(hibernate_page_list_t *list) | |
53 | { | |
54 | uint32_t bank; | |
55 | hibernate_bitmap_t * bitmap; | |
56 | ||
57 | bitmap = &list->bank_bitmap[0]; | |
58 | for (bank = 0; bank < list->bank_count; bank++) | |
59 | { | |
21362eb3 A |
60 | uint32_t bit, last_bit; |
61 | uint32_t *bitmap_word; | |
3a60a9f5 A |
62 | |
63 | bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); | |
21362eb3 A |
64 | |
65 | // Set out-of-bound bits at end of bitmap. | |
66 | bitmap_word = &bitmap->bitmap[bitmap->bitmapwords - 1]; | |
67 | last_bit = ((bitmap->last_page - bitmap->first_page) & 31); | |
68 | for (bit = 31; bit > last_bit; bit--) { | |
69 | *bitmap_word |= (0x80000000 >> bit); | |
70 | } | |
3a60a9f5 A |
71 | |
72 | bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; | |
73 | } | |
74 | } | |
75 | ||
76 | ||
77 | static boolean_t | |
78 | consider_discard(vm_page_t m) | |
79 | { | |
80 | register vm_object_t object = 0; | |
81 | int refmod_state; | |
82 | boolean_t discard = FALSE; | |
83 | ||
84 | do | |
85 | { | |
86 | if(m->private) | |
87 | panic("consider_discard: private"); | |
88 | ||
89 | if (!vm_object_lock_try(m->object)) | |
90 | break; | |
91 | ||
92 | object = m->object; | |
93 | ||
94 | if (m->wire_count != 0) | |
95 | break; | |
96 | if (m->precious) | |
97 | break; | |
98 | ||
99 | if (m->busy || !object->alive) | |
100 | /* | |
101 | * Somebody is playing with this page. | |
102 | */ | |
103 | break; | |
104 | ||
105 | if (m->absent || m->unusual || m->error) | |
106 | /* | |
107 | * If it's unusual in anyway, ignore it | |
108 | */ | |
109 | break; | |
110 | ||
111 | if (m->cleaning) | |
112 | break; | |
113 | ||
114 | if (!m->dirty) | |
115 | { | |
116 | refmod_state = pmap_get_refmod(m->phys_page); | |
117 | ||
118 | if (refmod_state & VM_MEM_REFERENCED) | |
119 | m->reference = TRUE; | |
120 | if (refmod_state & VM_MEM_MODIFIED) | |
121 | m->dirty = TRUE; | |
122 | } | |
123 | ||
124 | /* | |
125 | * If it's clean we can discard the page on wakeup. | |
126 | */ | |
127 | discard = !m->dirty; | |
128 | } | |
129 | while (FALSE); | |
130 | ||
131 | if (object) | |
132 | vm_object_unlock(object); | |
133 | ||
134 | return (discard); | |
135 | } | |
136 | ||
137 | ||
138 | static void | |
139 | discard_page(vm_page_t m) | |
140 | { | |
141 | if (m->absent || m->unusual || m->error) | |
142 | /* | |
143 | * If it's unusual in anyway, ignore | |
144 | */ | |
145 | return; | |
146 | ||
147 | if (!m->no_isync) | |
148 | { | |
149 | int refmod_state = pmap_disconnect(m->phys_page); | |
150 | ||
151 | if (refmod_state & VM_MEM_REFERENCED) | |
152 | m->reference = TRUE; | |
153 | if (refmod_state & VM_MEM_MODIFIED) | |
154 | m->dirty = TRUE; | |
155 | } | |
156 | ||
157 | if (m->dirty) | |
158 | panic("discard_page(%p) dirty", m); | |
159 | if (m->laundry) | |
160 | panic("discard_page(%p) laundry", m); | |
161 | if (m->private) | |
162 | panic("discard_page(%p) private", m); | |
163 | if (m->fictitious) | |
164 | panic("discard_page(%p) fictitious", m); | |
165 | ||
166 | vm_page_free(m); | |
167 | } | |
168 | ||
169 | /* | |
170 | Bits zero in the bitmaps => needs to be saved. All pages default to be saved, | |
171 | pages known to VM to not need saving are subtracted. | |
172 | Wired pages to be saved are present in page_list_wired, pageable in page_list. | |
173 | */ | |
174 | ||
175 | void | |
176 | hibernate_page_list_setall(hibernate_page_list_t * page_list, | |
177 | hibernate_page_list_t * page_list_wired, | |
178 | uint32_t * pagesOut) | |
179 | { | |
180 | uint64_t start, end, nsec; | |
181 | vm_page_t m; | |
182 | uint32_t pages = page_list->page_count; | |
183 | uint32_t count_zf = 0, count_inactive = 0, count_active = 0; | |
184 | uint32_t count_wire = pages; | |
185 | uint32_t count_discard_active = 0, count_discard_inactive = 0; | |
186 | uint32_t i; | |
187 | ||
188 | HIBLOG("hibernate_page_list_setall start\n"); | |
189 | ||
190 | clock_get_uptime(&start); | |
191 | ||
192 | hibernate_page_list_zero(page_list); | |
193 | hibernate_page_list_zero(page_list_wired); | |
194 | ||
195 | m = (vm_page_t) hibernate_gobble_queue; | |
196 | while(m) | |
197 | { | |
198 | pages--; | |
199 | count_wire--; | |
200 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
201 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
202 | m = (vm_page_t) m->pageq.next; | |
203 | } | |
204 | ||
205 | m = (vm_page_t) vm_page_queue_free; | |
206 | while(m) | |
207 | { | |
208 | pages--; | |
209 | count_wire--; | |
210 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
211 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
212 | m = (vm_page_t) m->pageq.next; | |
213 | } | |
214 | ||
215 | queue_iterate( &vm_page_queue_zf, | |
216 | m, | |
217 | vm_page_t, | |
218 | pageq ) | |
219 | { | |
220 | if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) | |
221 | && consider_discard(m)) | |
222 | { | |
223 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
224 | count_discard_inactive++; | |
225 | } | |
226 | else | |
227 | count_zf++; | |
228 | count_wire--; | |
229 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
230 | } | |
231 | ||
232 | queue_iterate( &vm_page_queue_inactive, | |
233 | m, | |
234 | vm_page_t, | |
235 | pageq ) | |
236 | { | |
237 | if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) | |
238 | && consider_discard(m)) | |
239 | { | |
240 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
241 | count_discard_inactive++; | |
242 | } | |
243 | else | |
244 | count_inactive++; | |
245 | count_wire--; | |
246 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
247 | } | |
248 | ||
249 | queue_iterate( &vm_page_queue_active, | |
250 | m, | |
251 | vm_page_t, | |
252 | pageq ) | |
253 | { | |
254 | if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) | |
255 | && consider_discard(m)) | |
256 | { | |
257 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
258 | count_discard_active++; | |
259 | } | |
260 | else | |
261 | count_active++; | |
262 | count_wire--; | |
263 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
264 | } | |
265 | ||
266 | // pull wired from hibernate_bitmap | |
267 | ||
268 | uint32_t bank; | |
269 | hibernate_bitmap_t * bitmap; | |
270 | hibernate_bitmap_t * bitmap_wired; | |
271 | ||
272 | bitmap = &page_list->bank_bitmap[0]; | |
273 | bitmap_wired = &page_list_wired->bank_bitmap[0]; | |
274 | for (bank = 0; bank < page_list->bank_count; bank++) | |
275 | { | |
276 | for (i = 0; i < bitmap->bitmapwords; i++) | |
277 | bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; | |
278 | bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords]; | |
279 | bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; | |
280 | } | |
281 | ||
282 | // machine dependent adjustments | |
283 | hibernate_page_list_setall_machine(page_list, page_list_wired, &pages); | |
284 | ||
285 | clock_get_uptime(&end); | |
286 | absolutetime_to_nanoseconds(end - start, &nsec); | |
287 | HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL); | |
288 | ||
289 | HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n", | |
290 | pages, count_wire, count_active, count_inactive, count_zf, | |
291 | count_discard_active, count_discard_inactive); | |
292 | ||
293 | *pagesOut = pages; | |
294 | } | |
295 | ||
296 | void | |
297 | hibernate_page_list_discard(hibernate_page_list_t * page_list) | |
298 | { | |
299 | uint64_t start, end, nsec; | |
300 | vm_page_t m; | |
301 | vm_page_t next; | |
302 | uint32_t count_discard_active = 0, count_discard_inactive = 0; | |
303 | ||
304 | clock_get_uptime(&start); | |
305 | ||
306 | m = (vm_page_t) queue_first(&vm_page_queue_zf); | |
307 | while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m)) | |
308 | { | |
309 | next = (vm_page_t) m->pageq.next; | |
310 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
311 | { | |
312 | discard_page(m); | |
313 | count_discard_inactive++; | |
314 | } | |
315 | m = next; | |
316 | } | |
317 | ||
318 | m = (vm_page_t) queue_first(&vm_page_queue_inactive); | |
319 | while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) | |
320 | { | |
321 | next = (vm_page_t) m->pageq.next; | |
322 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
323 | { | |
324 | discard_page(m); | |
325 | count_discard_inactive++; | |
326 | } | |
327 | m = next; | |
328 | } | |
329 | ||
330 | m = (vm_page_t) queue_first(&vm_page_queue_active); | |
331 | while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) | |
332 | { | |
333 | next = (vm_page_t) m->pageq.next; | |
334 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
335 | { | |
336 | discard_page(m); | |
337 | count_discard_active++; | |
338 | } | |
339 | m = next; | |
340 | } | |
341 | ||
342 | clock_get_uptime(&end); | |
343 | absolutetime_to_nanoseconds(end - start, &nsec); | |
344 | HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n", | |
345 | nsec / 1000000ULL, | |
346 | count_discard_active, count_discard_inactive); | |
347 | } | |
348 | ||
349 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
350 | ||
351 | kern_return_t | |
352 | hibernate_setup(IOHibernateImageHeader * header, | |
353 | uint32_t free_page_ratio, | |
354 | uint32_t free_page_time, | |
355 | hibernate_page_list_t ** page_list_ret, | |
356 | hibernate_page_list_t ** page_list_wired_ret, | |
357 | boolean_t * encryptedswap) | |
358 | { | |
359 | hibernate_page_list_t * page_list = NULL; | |
360 | hibernate_page_list_t * page_list_wired = NULL; | |
361 | vm_page_t m; | |
362 | uint32_t i, gobble_count; | |
363 | ||
364 | *page_list_ret = NULL; | |
365 | *page_list_wired_ret = NULL; | |
366 | ||
367 | ||
368 | page_list = hibernate_page_list_allocate(); | |
369 | if (!page_list) | |
370 | return (KERN_RESOURCE_SHORTAGE); | |
371 | page_list_wired = hibernate_page_list_allocate(); | |
372 | if (!page_list_wired) | |
373 | { | |
374 | kfree(page_list, page_list->list_size); | |
375 | return (KERN_RESOURCE_SHORTAGE); | |
376 | } | |
377 | ||
378 | *encryptedswap = dp_encryption; | |
379 | ||
380 | // pages we could force out to reduce hibernate image size | |
381 | gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100; | |
382 | ||
383 | // no failures hereafter | |
384 | ||
385 | hibernate_processor_setup(header); | |
386 | ||
387 | HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n", | |
388 | header->processorFlags, gobble_count); | |
389 | ||
390 | if (gobble_count) | |
391 | { | |
392 | uint64_t start, end, timeout, nsec; | |
393 | clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout); | |
394 | clock_get_uptime(&start); | |
395 | ||
396 | for (i = 0; i < gobble_count; i++) | |
397 | { | |
398 | while (VM_PAGE_NULL == (m = vm_page_grab())) | |
399 | { | |
400 | clock_get_uptime(&end); | |
401 | if (end >= timeout) | |
402 | break; | |
403 | VM_PAGE_WAIT(); | |
404 | } | |
405 | if (!m) | |
406 | break; | |
407 | m->busy = FALSE; | |
408 | vm_page_gobble(m); | |
409 | ||
410 | m->pageq.next = (queue_entry_t) hibernate_gobble_queue; | |
411 | hibernate_gobble_queue = m; | |
412 | } | |
413 | ||
414 | clock_get_uptime(&end); | |
415 | absolutetime_to_nanoseconds(end - start, &nsec); | |
416 | HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL); | |
417 | } | |
418 | ||
419 | *page_list_ret = page_list; | |
420 | *page_list_wired_ret = page_list_wired; | |
421 | ||
422 | return (KERN_SUCCESS); | |
423 | } | |
424 | ||
425 | kern_return_t | |
426 | hibernate_teardown(hibernate_page_list_t * page_list, | |
427 | hibernate_page_list_t * page_list_wired) | |
428 | { | |
429 | vm_page_t m, next; | |
430 | uint32_t count = 0; | |
431 | ||
432 | m = (vm_page_t) hibernate_gobble_queue; | |
433 | while(m) | |
434 | { | |
435 | next = (vm_page_t) m->pageq.next; | |
436 | vm_page_free(m); | |
437 | count++; | |
438 | m = next; | |
439 | } | |
440 | hibernate_gobble_queue = VM_PAGE_NULL; | |
441 | ||
442 | if (count) | |
443 | HIBLOG("Freed %d pages\n", count); | |
444 | ||
445 | if (page_list) | |
446 | kfree(page_list, page_list->list_size); | |
447 | if (page_list_wired) | |
448 | kfree(page_list_wired, page_list_wired->list_size); | |
449 | ||
450 | return (KERN_SUCCESS); | |
451 | } | |
452 |