]>
Commit | Line | Data |
---|---|---|
3a60a9f5 A |
1 | /* |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | ||
23 | #include <kern/kalloc.h> | |
24 | #include <kern/machine.h> | |
25 | #include <kern/misc_protos.h> | |
26 | #include <kern/thread.h> | |
27 | #include <kern/processor.h> | |
28 | #include <mach/machine.h> | |
29 | #include <mach/processor_info.h> | |
30 | #include <mach/mach_types.h> | |
31 | #include <default_pager/default_pager_internal.h> | |
32 | #include <IOKit/IOPlatformExpert.h> | |
33 | #define KERNEL | |
34 | ||
35 | #include <IOKit/IOHibernatePrivate.h> | |
36 | #include <vm/vm_page.h> | |
37 | #include <vm/vm_pageout.h> | |
38 | ||
39 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
40 | ||
41 | static vm_page_t hibernate_gobble_queue; | |
42 | ||
43 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
44 | ||
45 | static void | |
46 | hibernate_page_list_zero(hibernate_page_list_t *list) | |
47 | { | |
48 | uint32_t bank; | |
49 | hibernate_bitmap_t * bitmap; | |
50 | ||
51 | bitmap = &list->bank_bitmap[0]; | |
52 | for (bank = 0; bank < list->bank_count; bank++) | |
53 | { | |
54 | uint32_t bit, last_bit; | |
55 | uint32_t *bitmap_word; | |
56 | ||
57 | bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); | |
58 | ||
59 | // Set out-of-bound bits at end of bitmap. | |
60 | bitmap_word = &bitmap->bitmap[bitmap->bitmapwords - 1]; | |
61 | last_bit = ((bitmap->last_page - bitmap->first_page) & 31); | |
62 | for (bit = 31; bit > last_bit; bit--) { | |
63 | *bitmap_word |= (0x80000000 >> bit); | |
64 | } | |
65 | ||
66 | bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; | |
67 | } | |
68 | } | |
69 | ||
70 | ||
71 | static boolean_t | |
72 | consider_discard(vm_page_t m) | |
73 | { | |
74 | register vm_object_t object = 0; | |
75 | int refmod_state; | |
76 | boolean_t discard = FALSE; | |
77 | ||
78 | do | |
79 | { | |
80 | if(m->private) | |
81 | panic("consider_discard: private"); | |
82 | ||
83 | if (!vm_object_lock_try(m->object)) | |
84 | break; | |
85 | ||
86 | object = m->object; | |
87 | ||
88 | if (m->wire_count != 0) | |
89 | break; | |
90 | if (m->precious) | |
91 | break; | |
92 | ||
93 | if (m->busy || !object->alive) | |
94 | /* | |
95 | * Somebody is playing with this page. | |
96 | */ | |
97 | break; | |
98 | ||
99 | if (m->absent || m->unusual || m->error) | |
100 | /* | |
101 | * If it's unusual in anyway, ignore it | |
102 | */ | |
103 | break; | |
104 | ||
105 | if (m->cleaning) | |
106 | break; | |
107 | ||
108 | if (!m->dirty) | |
109 | { | |
110 | refmod_state = pmap_get_refmod(m->phys_page); | |
111 | ||
112 | if (refmod_state & VM_MEM_REFERENCED) | |
113 | m->reference = TRUE; | |
114 | if (refmod_state & VM_MEM_MODIFIED) | |
115 | m->dirty = TRUE; | |
116 | } | |
117 | ||
118 | /* | |
119 | * If it's clean we can discard the page on wakeup. | |
120 | */ | |
121 | discard = !m->dirty; | |
122 | } | |
123 | while (FALSE); | |
124 | ||
125 | if (object) | |
126 | vm_object_unlock(object); | |
127 | ||
128 | return (discard); | |
129 | } | |
130 | ||
131 | ||
132 | static void | |
133 | discard_page(vm_page_t m) | |
134 | { | |
135 | if (m->absent || m->unusual || m->error) | |
136 | /* | |
137 | * If it's unusual in anyway, ignore | |
138 | */ | |
139 | return; | |
140 | ||
141 | if (!m->no_isync) | |
142 | { | |
143 | int refmod_state = pmap_disconnect(m->phys_page); | |
144 | ||
145 | if (refmod_state & VM_MEM_REFERENCED) | |
146 | m->reference = TRUE; | |
147 | if (refmod_state & VM_MEM_MODIFIED) | |
148 | m->dirty = TRUE; | |
149 | } | |
150 | ||
151 | if (m->dirty) | |
152 | panic("discard_page(%p) dirty", m); | |
153 | if (m->laundry) | |
154 | panic("discard_page(%p) laundry", m); | |
155 | if (m->private) | |
156 | panic("discard_page(%p) private", m); | |
157 | if (m->fictitious) | |
158 | panic("discard_page(%p) fictitious", m); | |
159 | ||
160 | vm_page_free(m); | |
161 | } | |
162 | ||
163 | /* | |
164 | Bits zero in the bitmaps => needs to be saved. All pages default to be saved, | |
165 | pages known to VM to not need saving are subtracted. | |
166 | Wired pages to be saved are present in page_list_wired, pageable in page_list. | |
167 | */ | |
168 | ||
169 | void | |
170 | hibernate_page_list_setall(hibernate_page_list_t * page_list, | |
171 | hibernate_page_list_t * page_list_wired, | |
172 | uint32_t * pagesOut) | |
173 | { | |
174 | uint64_t start, end, nsec; | |
175 | vm_page_t m; | |
176 | uint32_t pages = page_list->page_count; | |
177 | uint32_t count_zf = 0, count_inactive = 0, count_active = 0; | |
178 | uint32_t count_wire = pages; | |
179 | uint32_t count_discard_active = 0, count_discard_inactive = 0; | |
180 | uint32_t i; | |
181 | ||
182 | HIBLOG("hibernate_page_list_setall start\n"); | |
183 | ||
184 | clock_get_uptime(&start); | |
185 | ||
186 | hibernate_page_list_zero(page_list); | |
187 | hibernate_page_list_zero(page_list_wired); | |
188 | ||
189 | m = (vm_page_t) hibernate_gobble_queue; | |
190 | while(m) | |
191 | { | |
192 | pages--; | |
193 | count_wire--; | |
194 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
195 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
196 | m = (vm_page_t) m->pageq.next; | |
197 | } | |
198 | ||
199 | m = (vm_page_t) vm_page_queue_free; | |
200 | while(m) | |
201 | { | |
202 | pages--; | |
203 | count_wire--; | |
204 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
205 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
206 | m = (vm_page_t) m->pageq.next; | |
207 | } | |
208 | ||
209 | queue_iterate( &vm_page_queue_zf, | |
210 | m, | |
211 | vm_page_t, | |
212 | pageq ) | |
213 | { | |
214 | if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) | |
215 | && consider_discard(m)) | |
216 | { | |
217 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
218 | count_discard_inactive++; | |
219 | } | |
220 | else | |
221 | count_zf++; | |
222 | count_wire--; | |
223 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
224 | } | |
225 | ||
226 | queue_iterate( &vm_page_queue_inactive, | |
227 | m, | |
228 | vm_page_t, | |
229 | pageq ) | |
230 | { | |
231 | if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) | |
232 | && consider_discard(m)) | |
233 | { | |
234 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
235 | count_discard_inactive++; | |
236 | } | |
237 | else | |
238 | count_inactive++; | |
239 | count_wire--; | |
240 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
241 | } | |
242 | ||
243 | queue_iterate( &vm_page_queue_active, | |
244 | m, | |
245 | vm_page_t, | |
246 | pageq ) | |
247 | { | |
248 | if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) | |
249 | && consider_discard(m)) | |
250 | { | |
251 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
252 | count_discard_active++; | |
253 | } | |
254 | else | |
255 | count_active++; | |
256 | count_wire--; | |
257 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
258 | } | |
259 | ||
260 | // pull wired from hibernate_bitmap | |
261 | ||
262 | uint32_t bank; | |
263 | hibernate_bitmap_t * bitmap; | |
264 | hibernate_bitmap_t * bitmap_wired; | |
265 | ||
266 | bitmap = &page_list->bank_bitmap[0]; | |
267 | bitmap_wired = &page_list_wired->bank_bitmap[0]; | |
268 | for (bank = 0; bank < page_list->bank_count; bank++) | |
269 | { | |
270 | for (i = 0; i < bitmap->bitmapwords; i++) | |
271 | bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; | |
272 | bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords]; | |
273 | bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; | |
274 | } | |
275 | ||
276 | // machine dependent adjustments | |
277 | hibernate_page_list_setall_machine(page_list, page_list_wired, &pages); | |
278 | ||
279 | clock_get_uptime(&end); | |
280 | absolutetime_to_nanoseconds(end - start, &nsec); | |
281 | HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL); | |
282 | ||
283 | HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n", | |
284 | pages, count_wire, count_active, count_inactive, count_zf, | |
285 | count_discard_active, count_discard_inactive); | |
286 | ||
287 | *pagesOut = pages; | |
288 | } | |
289 | ||
290 | void | |
291 | hibernate_page_list_discard(hibernate_page_list_t * page_list) | |
292 | { | |
293 | uint64_t start, end, nsec; | |
294 | vm_page_t m; | |
295 | vm_page_t next; | |
296 | uint32_t count_discard_active = 0, count_discard_inactive = 0; | |
297 | ||
298 | clock_get_uptime(&start); | |
299 | ||
300 | m = (vm_page_t) queue_first(&vm_page_queue_zf); | |
301 | while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m)) | |
302 | { | |
303 | next = (vm_page_t) m->pageq.next; | |
304 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
305 | { | |
306 | discard_page(m); | |
307 | count_discard_inactive++; | |
308 | } | |
309 | m = next; | |
310 | } | |
311 | ||
312 | m = (vm_page_t) queue_first(&vm_page_queue_inactive); | |
313 | while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) | |
314 | { | |
315 | next = (vm_page_t) m->pageq.next; | |
316 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
317 | { | |
318 | discard_page(m); | |
319 | count_discard_inactive++; | |
320 | } | |
321 | m = next; | |
322 | } | |
323 | ||
324 | m = (vm_page_t) queue_first(&vm_page_queue_active); | |
325 | while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) | |
326 | { | |
327 | next = (vm_page_t) m->pageq.next; | |
328 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
329 | { | |
330 | discard_page(m); | |
331 | count_discard_active++; | |
332 | } | |
333 | m = next; | |
334 | } | |
335 | ||
336 | clock_get_uptime(&end); | |
337 | absolutetime_to_nanoseconds(end - start, &nsec); | |
338 | HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n", | |
339 | nsec / 1000000ULL, | |
340 | count_discard_active, count_discard_inactive); | |
341 | } | |
342 | ||
343 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
344 | ||
345 | kern_return_t | |
346 | hibernate_setup(IOHibernateImageHeader * header, | |
347 | uint32_t free_page_ratio, | |
348 | uint32_t free_page_time, | |
349 | hibernate_page_list_t ** page_list_ret, | |
350 | hibernate_page_list_t ** page_list_wired_ret, | |
351 | boolean_t * encryptedswap) | |
352 | { | |
353 | hibernate_page_list_t * page_list = NULL; | |
354 | hibernate_page_list_t * page_list_wired = NULL; | |
355 | vm_page_t m; | |
356 | uint32_t i, gobble_count; | |
357 | ||
358 | *page_list_ret = NULL; | |
359 | *page_list_wired_ret = NULL; | |
360 | ||
361 | ||
362 | page_list = hibernate_page_list_allocate(); | |
363 | if (!page_list) | |
364 | return (KERN_RESOURCE_SHORTAGE); | |
365 | page_list_wired = hibernate_page_list_allocate(); | |
366 | if (!page_list_wired) | |
367 | { | |
368 | kfree(page_list, page_list->list_size); | |
369 | return (KERN_RESOURCE_SHORTAGE); | |
370 | } | |
371 | ||
372 | *encryptedswap = dp_encryption; | |
373 | ||
374 | // pages we could force out to reduce hibernate image size | |
375 | gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100; | |
376 | ||
377 | // no failures hereafter | |
378 | ||
379 | hibernate_processor_setup(header); | |
380 | ||
381 | HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n", | |
382 | header->processorFlags, gobble_count); | |
383 | ||
384 | if (gobble_count) | |
385 | { | |
386 | uint64_t start, end, timeout, nsec; | |
387 | clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout); | |
388 | clock_get_uptime(&start); | |
389 | ||
390 | for (i = 0; i < gobble_count; i++) | |
391 | { | |
392 | while (VM_PAGE_NULL == (m = vm_page_grab())) | |
393 | { | |
394 | clock_get_uptime(&end); | |
395 | if (end >= timeout) | |
396 | break; | |
397 | VM_PAGE_WAIT(); | |
398 | } | |
399 | if (!m) | |
400 | break; | |
401 | m->busy = FALSE; | |
402 | vm_page_gobble(m); | |
403 | ||
404 | m->pageq.next = (queue_entry_t) hibernate_gobble_queue; | |
405 | hibernate_gobble_queue = m; | |
406 | } | |
407 | ||
408 | clock_get_uptime(&end); | |
409 | absolutetime_to_nanoseconds(end - start, &nsec); | |
410 | HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL); | |
411 | } | |
412 | ||
413 | *page_list_ret = page_list; | |
414 | *page_list_wired_ret = page_list_wired; | |
415 | ||
416 | return (KERN_SUCCESS); | |
417 | } | |
418 | ||
419 | kern_return_t | |
420 | hibernate_teardown(hibernate_page_list_t * page_list, | |
421 | hibernate_page_list_t * page_list_wired) | |
422 | { | |
423 | vm_page_t m, next; | |
424 | uint32_t count = 0; | |
425 | ||
426 | m = (vm_page_t) hibernate_gobble_queue; | |
427 | while(m) | |
428 | { | |
429 | next = (vm_page_t) m->pageq.next; | |
430 | vm_page_free(m); | |
431 | count++; | |
432 | m = next; | |
433 | } | |
434 | hibernate_gobble_queue = VM_PAGE_NULL; | |
435 | ||
436 | if (count) | |
437 | HIBLOG("Freed %d pages\n", count); | |
438 | ||
439 | if (page_list) | |
440 | kfree(page_list, page_list->list_size); | |
441 | if (page_list_wired) | |
442 | kfree(page_list_wired, page_list_wired->list_size); | |
443 | ||
444 | return (KERN_SUCCESS); | |
445 | } | |
446 |