2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <kern/kalloc.h>
25 #include <kern/machine.h>
26 #include <kern/misc_protos.h>
27 #include <kern/thread.h>
28 #include <kern/processor.h>
29 #include <mach/machine.h>
30 #include <mach/processor_info.h>
31 #include <mach/mach_types.h>
32 #include <default_pager/default_pager_internal.h>
33 #include <IOKit/IOPlatformExpert.h>
36 #include <IOKit/IOHibernatePrivate.h>
37 #include <vm/vm_page.h>
38 #include <vm/vm_pageout.h>
40 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
42 static vm_page_t hibernate_gobble_queue
;
44 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 hibernate_page_list_zero(hibernate_page_list_t
*list
)
50 hibernate_bitmap_t
* bitmap
;
52 bitmap
= &list
->bank_bitmap
[0];
53 for (bank
= 0; bank
< list
->bank_count
; bank
++)
55 uint32_t bit
, last_bit
;
56 uint32_t *bitmap_word
;
58 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
60 // Set out-of-bound bits at end of bitmap.
61 bitmap_word
= &bitmap
->bitmap
[bitmap
->bitmapwords
- 1];
62 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
) & 31);
63 for (bit
= 31; bit
> last_bit
; bit
--) {
64 *bitmap_word
|= (0x80000000 >> bit
);
67 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
73 consider_discard(vm_page_t m
)
75 register vm_object_t object
= 0;
77 boolean_t discard
= FALSE
;
82 panic("consider_discard: private");
84 if (!vm_object_lock_try(m
->object
))
89 if (m
->wire_count
!= 0)
94 if (m
->busy
|| !object
->alive
)
96 * Somebody is playing with this page.
100 if (m
->absent
|| m
->unusual
|| m
->error
)
102 * If it's unusual in anyway, ignore it
111 refmod_state
= pmap_get_refmod(m
->phys_page
);
113 if (refmod_state
& VM_MEM_REFERENCED
)
115 if (refmod_state
& VM_MEM_MODIFIED
)
120 * If it's clean we can discard the page on wakeup.
127 vm_object_unlock(object
);
134 discard_page(vm_page_t m
)
136 if (m
->absent
|| m
->unusual
|| m
->error
)
138 * If it's unusual in anyway, ignore
144 int refmod_state
= pmap_disconnect(m
->phys_page
);
146 if (refmod_state
& VM_MEM_REFERENCED
)
148 if (refmod_state
& VM_MEM_MODIFIED
)
153 panic("discard_page(%p) dirty", m
);
155 panic("discard_page(%p) laundry", m
);
157 panic("discard_page(%p) private", m
);
159 panic("discard_page(%p) fictitious", m
);
165 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
166 pages known to VM to not need saving are subtracted.
167 Wired pages to be saved are present in page_list_wired, pageable in page_list.
171 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
172 hibernate_page_list_t
* page_list_wired
,
175 uint64_t start
, end
, nsec
;
177 uint32_t pages
= page_list
->page_count
;
178 uint32_t count_zf
= 0, count_inactive
= 0, count_active
= 0;
179 uint32_t count_wire
= pages
;
180 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
183 HIBLOG("hibernate_page_list_setall start\n");
185 clock_get_uptime(&start
);
187 hibernate_page_list_zero(page_list
);
188 hibernate_page_list_zero(page_list_wired
);
190 m
= (vm_page_t
) hibernate_gobble_queue
;
195 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
196 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
197 m
= (vm_page_t
) m
->pageq
.next
;
200 m
= (vm_page_t
) vm_page_queue_free
;
205 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
206 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
207 m
= (vm_page_t
) m
->pageq
.next
;
210 queue_iterate( &vm_page_queue_zf
,
215 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
216 && consider_discard(m
))
218 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
219 count_discard_inactive
++;
224 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
227 queue_iterate( &vm_page_queue_inactive
,
232 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
233 && consider_discard(m
))
235 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
236 count_discard_inactive
++;
241 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
244 queue_iterate( &vm_page_queue_active
,
249 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
250 && consider_discard(m
))
252 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
253 count_discard_active
++;
258 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
261 // pull wired from hibernate_bitmap
264 hibernate_bitmap_t
* bitmap
;
265 hibernate_bitmap_t
* bitmap_wired
;
267 bitmap
= &page_list
->bank_bitmap
[0];
268 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
269 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
271 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
272 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
273 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
274 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
277 // machine dependent adjustments
278 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
280 clock_get_uptime(&end
);
281 absolutetime_to_nanoseconds(end
- start
, &nsec
);
282 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
284 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
285 pages
, count_wire
, count_active
, count_inactive
, count_zf
,
286 count_discard_active
, count_discard_inactive
);
292 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
294 uint64_t start
, end
, nsec
;
297 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
299 clock_get_uptime(&start
);
301 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
302 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
304 next
= (vm_page_t
) m
->pageq
.next
;
305 if (hibernate_page_bittst(page_list
, m
->phys_page
))
308 count_discard_inactive
++;
313 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
314 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
316 next
= (vm_page_t
) m
->pageq
.next
;
317 if (hibernate_page_bittst(page_list
, m
->phys_page
))
320 count_discard_inactive
++;
325 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
326 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
328 next
= (vm_page_t
) m
->pageq
.next
;
329 if (hibernate_page_bittst(page_list
, m
->phys_page
))
332 count_discard_active
++;
337 clock_get_uptime(&end
);
338 absolutetime_to_nanoseconds(end
- start
, &nsec
);
339 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
341 count_discard_active
, count_discard_inactive
);
344 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
347 hibernate_setup(IOHibernateImageHeader
* header
,
348 uint32_t free_page_ratio
,
349 uint32_t free_page_time
,
350 hibernate_page_list_t
** page_list_ret
,
351 hibernate_page_list_t
** page_list_wired_ret
,
352 boolean_t
* encryptedswap
)
354 hibernate_page_list_t
* page_list
= NULL
;
355 hibernate_page_list_t
* page_list_wired
= NULL
;
357 uint32_t i
, gobble_count
;
359 *page_list_ret
= NULL
;
360 *page_list_wired_ret
= NULL
;
363 page_list
= hibernate_page_list_allocate();
365 return (KERN_RESOURCE_SHORTAGE
);
366 page_list_wired
= hibernate_page_list_allocate();
367 if (!page_list_wired
)
369 kfree(page_list
, page_list
->list_size
);
370 return (KERN_RESOURCE_SHORTAGE
);
373 *encryptedswap
= dp_encryption
;
375 // pages we could force out to reduce hibernate image size
376 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
378 // no failures hereafter
380 hibernate_processor_setup(header
);
382 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
383 header
->processorFlags
, gobble_count
);
387 uint64_t start
, end
, timeout
, nsec
;
388 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
389 clock_get_uptime(&start
);
391 for (i
= 0; i
< gobble_count
; i
++)
393 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
395 clock_get_uptime(&end
);
405 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
406 hibernate_gobble_queue
= m
;
409 clock_get_uptime(&end
);
410 absolutetime_to_nanoseconds(end
- start
, &nsec
);
411 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
414 *page_list_ret
= page_list
;
415 *page_list_wired_ret
= page_list_wired
;
417 return (KERN_SUCCESS
);
421 hibernate_teardown(hibernate_page_list_t
* page_list
,
422 hibernate_page_list_t
* page_list_wired
)
427 m
= (vm_page_t
) hibernate_gobble_queue
;
430 next
= (vm_page_t
) m
->pageq
.next
;
435 hibernate_gobble_queue
= VM_PAGE_NULL
;
438 HIBLOG("Freed %d pages\n", count
);
441 kfree(page_list
, page_list
->list_size
);
443 kfree(page_list_wired
, page_list_wired
->list_size
);
445 return (KERN_SUCCESS
);