]>
Commit | Line | Data |
---|---|---|
3a60a9f5 A |
1 | /* |
2 | * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
6601e61a | 4 | * @APPLE_LICENSE_HEADER_START@ |
3a60a9f5 | 5 | * |
6601e61a A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
8f6c56a5 | 11 | * |
6601e61a A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
6601e61a A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
8f6c56a5 | 19 | * |
6601e61a | 20 | * @APPLE_LICENSE_HEADER_END@ |
3a60a9f5 A |
21 | */ |
22 | ||
23 | #include <kern/kalloc.h> | |
24 | #include <kern/machine.h> | |
25 | #include <kern/misc_protos.h> | |
26 | #include <kern/thread.h> | |
27 | #include <kern/processor.h> | |
28 | #include <mach/machine.h> | |
29 | #include <mach/processor_info.h> | |
30 | #include <mach/mach_types.h> | |
31 | #include <default_pager/default_pager_internal.h> | |
32 | #include <IOKit/IOPlatformExpert.h> | |
33 | #define KERNEL | |
34 | ||
35 | #include <IOKit/IOHibernatePrivate.h> | |
36 | #include <vm/vm_page.h> | |
37 | #include <vm/vm_pageout.h> | |
38 | ||
39 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
40 | ||
41 | static vm_page_t hibernate_gobble_queue; | |
42 | ||
43 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
44 | ||
45 | static void | |
46 | hibernate_page_list_zero(hibernate_page_list_t *list) | |
47 | { | |
48 | uint32_t bank; | |
49 | hibernate_bitmap_t * bitmap; | |
50 | ||
51 | bitmap = &list->bank_bitmap[0]; | |
52 | for (bank = 0; bank < list->bank_count; bank++) | |
53 | { | |
0c530ab8 | 54 | uint32_t last_bit; |
3a60a9f5 A |
55 | |
56 | bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); | |
0c530ab8 A |
57 | // set out-of-bound bits at end of bitmap. |
58 | last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31); | |
59 | if (last_bit) | |
60 | bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit); | |
3a60a9f5 A |
61 | |
62 | bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; | |
63 | } | |
64 | } | |
65 | ||
66 | ||
67 | static boolean_t | |
68 | consider_discard(vm_page_t m) | |
69 | { | |
70 | register vm_object_t object = 0; | |
71 | int refmod_state; | |
72 | boolean_t discard = FALSE; | |
73 | ||
74 | do | |
75 | { | |
76 | if(m->private) | |
77 | panic("consider_discard: private"); | |
78 | ||
79 | if (!vm_object_lock_try(m->object)) | |
80 | break; | |
81 | ||
82 | object = m->object; | |
83 | ||
84 | if (m->wire_count != 0) | |
85 | break; | |
86 | if (m->precious) | |
87 | break; | |
88 | ||
89 | if (m->busy || !object->alive) | |
90 | /* | |
91 | * Somebody is playing with this page. | |
92 | */ | |
93 | break; | |
94 | ||
95 | if (m->absent || m->unusual || m->error) | |
96 | /* | |
97 | * If it's unusual in anyway, ignore it | |
98 | */ | |
99 | break; | |
100 | ||
101 | if (m->cleaning) | |
102 | break; | |
103 | ||
104 | if (!m->dirty) | |
105 | { | |
106 | refmod_state = pmap_get_refmod(m->phys_page); | |
107 | ||
108 | if (refmod_state & VM_MEM_REFERENCED) | |
109 | m->reference = TRUE; | |
110 | if (refmod_state & VM_MEM_MODIFIED) | |
111 | m->dirty = TRUE; | |
112 | } | |
113 | ||
114 | /* | |
115 | * If it's clean we can discard the page on wakeup. | |
116 | */ | |
117 | discard = !m->dirty; | |
118 | } | |
119 | while (FALSE); | |
120 | ||
121 | if (object) | |
122 | vm_object_unlock(object); | |
123 | ||
124 | return (discard); | |
125 | } | |
126 | ||
127 | ||
128 | static void | |
129 | discard_page(vm_page_t m) | |
130 | { | |
131 | if (m->absent || m->unusual || m->error) | |
132 | /* | |
133 | * If it's unusual in anyway, ignore | |
134 | */ | |
135 | return; | |
136 | ||
137 | if (!m->no_isync) | |
138 | { | |
139 | int refmod_state = pmap_disconnect(m->phys_page); | |
140 | ||
141 | if (refmod_state & VM_MEM_REFERENCED) | |
142 | m->reference = TRUE; | |
143 | if (refmod_state & VM_MEM_MODIFIED) | |
144 | m->dirty = TRUE; | |
145 | } | |
146 | ||
147 | if (m->dirty) | |
148 | panic("discard_page(%p) dirty", m); | |
149 | if (m->laundry) | |
150 | panic("discard_page(%p) laundry", m); | |
151 | if (m->private) | |
152 | panic("discard_page(%p) private", m); | |
153 | if (m->fictitious) | |
154 | panic("discard_page(%p) fictitious", m); | |
155 | ||
156 | vm_page_free(m); | |
157 | } | |
158 | ||
159 | /* | |
160 | Bits zero in the bitmaps => needs to be saved. All pages default to be saved, | |
161 | pages known to VM to not need saving are subtracted. | |
162 | Wired pages to be saved are present in page_list_wired, pageable in page_list. | |
163 | */ | |
0c530ab8 | 164 | extern vm_page_t vm_lopage_queue_free; |
3a60a9f5 A |
165 | |
166 | void | |
167 | hibernate_page_list_setall(hibernate_page_list_t * page_list, | |
168 | hibernate_page_list_t * page_list_wired, | |
169 | uint32_t * pagesOut) | |
170 | { | |
171 | uint64_t start, end, nsec; | |
172 | vm_page_t m; | |
173 | uint32_t pages = page_list->page_count; | |
174 | uint32_t count_zf = 0, count_inactive = 0, count_active = 0; | |
175 | uint32_t count_wire = pages; | |
176 | uint32_t count_discard_active = 0, count_discard_inactive = 0; | |
177 | uint32_t i; | |
178 | ||
179 | HIBLOG("hibernate_page_list_setall start\n"); | |
180 | ||
181 | clock_get_uptime(&start); | |
182 | ||
183 | hibernate_page_list_zero(page_list); | |
184 | hibernate_page_list_zero(page_list_wired); | |
185 | ||
186 | m = (vm_page_t) hibernate_gobble_queue; | |
187 | while(m) | |
188 | { | |
189 | pages--; | |
190 | count_wire--; | |
191 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
192 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
193 | m = (vm_page_t) m->pageq.next; | |
194 | } | |
195 | ||
196 | m = (vm_page_t) vm_page_queue_free; | |
197 | while(m) | |
198 | { | |
199 | pages--; | |
200 | count_wire--; | |
201 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
0c530ab8 A |
202 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); |
203 | m = (vm_page_t) m->pageq.next; | |
204 | } | |
205 | ||
206 | m = (vm_page_t) vm_lopage_queue_free; | |
207 | while(m) | |
208 | { | |
209 | pages--; | |
210 | count_wire--; | |
211 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
3a60a9f5 A |
212 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); |
213 | m = (vm_page_t) m->pageq.next; | |
214 | } | |
215 | ||
216 | queue_iterate( &vm_page_queue_zf, | |
217 | m, | |
218 | vm_page_t, | |
219 | pageq ) | |
220 | { | |
221 | if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) | |
222 | && consider_discard(m)) | |
223 | { | |
224 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
225 | count_discard_inactive++; | |
226 | } | |
227 | else | |
228 | count_zf++; | |
229 | count_wire--; | |
230 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
231 | } | |
232 | ||
233 | queue_iterate( &vm_page_queue_inactive, | |
234 | m, | |
235 | vm_page_t, | |
236 | pageq ) | |
237 | { | |
238 | if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) | |
239 | && consider_discard(m)) | |
240 | { | |
241 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
242 | count_discard_inactive++; | |
243 | } | |
244 | else | |
245 | count_inactive++; | |
246 | count_wire--; | |
247 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
248 | } | |
249 | ||
250 | queue_iterate( &vm_page_queue_active, | |
251 | m, | |
252 | vm_page_t, | |
253 | pageq ) | |
254 | { | |
255 | if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) | |
256 | && consider_discard(m)) | |
257 | { | |
258 | hibernate_page_bitset(page_list, TRUE, m->phys_page); | |
259 | count_discard_active++; | |
260 | } | |
261 | else | |
262 | count_active++; | |
263 | count_wire--; | |
264 | hibernate_page_bitset(page_list_wired, TRUE, m->phys_page); | |
265 | } | |
266 | ||
267 | // pull wired from hibernate_bitmap | |
268 | ||
269 | uint32_t bank; | |
270 | hibernate_bitmap_t * bitmap; | |
271 | hibernate_bitmap_t * bitmap_wired; | |
272 | ||
273 | bitmap = &page_list->bank_bitmap[0]; | |
274 | bitmap_wired = &page_list_wired->bank_bitmap[0]; | |
275 | for (bank = 0; bank < page_list->bank_count; bank++) | |
276 | { | |
277 | for (i = 0; i < bitmap->bitmapwords; i++) | |
278 | bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; | |
279 | bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords]; | |
280 | bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; | |
281 | } | |
282 | ||
283 | // machine dependent adjustments | |
284 | hibernate_page_list_setall_machine(page_list, page_list_wired, &pages); | |
285 | ||
286 | clock_get_uptime(&end); | |
287 | absolutetime_to_nanoseconds(end - start, &nsec); | |
288 | HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL); | |
289 | ||
290 | HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n", | |
291 | pages, count_wire, count_active, count_inactive, count_zf, | |
292 | count_discard_active, count_discard_inactive); | |
293 | ||
294 | *pagesOut = pages; | |
295 | } | |
296 | ||
297 | void | |
298 | hibernate_page_list_discard(hibernate_page_list_t * page_list) | |
299 | { | |
300 | uint64_t start, end, nsec; | |
301 | vm_page_t m; | |
302 | vm_page_t next; | |
303 | uint32_t count_discard_active = 0, count_discard_inactive = 0; | |
304 | ||
305 | clock_get_uptime(&start); | |
306 | ||
307 | m = (vm_page_t) queue_first(&vm_page_queue_zf); | |
308 | while (m && !queue_end(&vm_page_queue_zf, (queue_entry_t)m)) | |
309 | { | |
310 | next = (vm_page_t) m->pageq.next; | |
311 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
312 | { | |
313 | discard_page(m); | |
314 | count_discard_inactive++; | |
315 | } | |
316 | m = next; | |
317 | } | |
318 | ||
319 | m = (vm_page_t) queue_first(&vm_page_queue_inactive); | |
320 | while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m)) | |
321 | { | |
322 | next = (vm_page_t) m->pageq.next; | |
323 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
324 | { | |
325 | discard_page(m); | |
326 | count_discard_inactive++; | |
327 | } | |
328 | m = next; | |
329 | } | |
330 | ||
331 | m = (vm_page_t) queue_first(&vm_page_queue_active); | |
332 | while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m)) | |
333 | { | |
334 | next = (vm_page_t) m->pageq.next; | |
335 | if (hibernate_page_bittst(page_list, m->phys_page)) | |
336 | { | |
337 | discard_page(m); | |
338 | count_discard_active++; | |
339 | } | |
340 | m = next; | |
341 | } | |
342 | ||
343 | clock_get_uptime(&end); | |
344 | absolutetime_to_nanoseconds(end - start, &nsec); | |
345 | HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n", | |
346 | nsec / 1000000ULL, | |
347 | count_discard_active, count_discard_inactive); | |
348 | } | |
349 | ||
350 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
351 | ||
352 | kern_return_t | |
353 | hibernate_setup(IOHibernateImageHeader * header, | |
354 | uint32_t free_page_ratio, | |
355 | uint32_t free_page_time, | |
356 | hibernate_page_list_t ** page_list_ret, | |
357 | hibernate_page_list_t ** page_list_wired_ret, | |
358 | boolean_t * encryptedswap) | |
359 | { | |
360 | hibernate_page_list_t * page_list = NULL; | |
361 | hibernate_page_list_t * page_list_wired = NULL; | |
362 | vm_page_t m; | |
363 | uint32_t i, gobble_count; | |
364 | ||
365 | *page_list_ret = NULL; | |
366 | *page_list_wired_ret = NULL; | |
367 | ||
368 | ||
369 | page_list = hibernate_page_list_allocate(); | |
370 | if (!page_list) | |
371 | return (KERN_RESOURCE_SHORTAGE); | |
372 | page_list_wired = hibernate_page_list_allocate(); | |
373 | if (!page_list_wired) | |
374 | { | |
375 | kfree(page_list, page_list->list_size); | |
376 | return (KERN_RESOURCE_SHORTAGE); | |
377 | } | |
378 | ||
379 | *encryptedswap = dp_encryption; | |
380 | ||
381 | // pages we could force out to reduce hibernate image size | |
382 | gobble_count = (((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100; | |
383 | ||
384 | // no failures hereafter | |
385 | ||
386 | hibernate_processor_setup(header); | |
387 | ||
388 | HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n", | |
389 | header->processorFlags, gobble_count); | |
390 | ||
391 | if (gobble_count) | |
392 | { | |
393 | uint64_t start, end, timeout, nsec; | |
394 | clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout); | |
395 | clock_get_uptime(&start); | |
396 | ||
397 | for (i = 0; i < gobble_count; i++) | |
398 | { | |
399 | while (VM_PAGE_NULL == (m = vm_page_grab())) | |
400 | { | |
401 | clock_get_uptime(&end); | |
402 | if (end >= timeout) | |
403 | break; | |
404 | VM_PAGE_WAIT(); | |
405 | } | |
406 | if (!m) | |
407 | break; | |
408 | m->busy = FALSE; | |
409 | vm_page_gobble(m); | |
410 | ||
411 | m->pageq.next = (queue_entry_t) hibernate_gobble_queue; | |
412 | hibernate_gobble_queue = m; | |
413 | } | |
414 | ||
415 | clock_get_uptime(&end); | |
416 | absolutetime_to_nanoseconds(end - start, &nsec); | |
417 | HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL); | |
418 | } | |
419 | ||
420 | *page_list_ret = page_list; | |
421 | *page_list_wired_ret = page_list_wired; | |
422 | ||
423 | return (KERN_SUCCESS); | |
424 | } | |
425 | ||
426 | kern_return_t | |
427 | hibernate_teardown(hibernate_page_list_t * page_list, | |
428 | hibernate_page_list_t * page_list_wired) | |
429 | { | |
430 | vm_page_t m, next; | |
431 | uint32_t count = 0; | |
432 | ||
433 | m = (vm_page_t) hibernate_gobble_queue; | |
434 | while(m) | |
435 | { | |
436 | next = (vm_page_t) m->pageq.next; | |
437 | vm_page_free(m); | |
438 | count++; | |
439 | m = next; | |
440 | } | |
441 | hibernate_gobble_queue = VM_PAGE_NULL; | |
442 | ||
443 | if (count) | |
444 | HIBLOG("Freed %d pages\n", count); | |
445 | ||
446 | if (page_list) | |
447 | kfree(page_list, page_list->list_size); | |
448 | if (page_list_wired) | |
449 | kfree(page_list_wired, page_list_wired->list_size); | |
450 | ||
451 | return (KERN_SUCCESS); | |
452 | } | |
453 |