2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/IOHibernatePrivate.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_purgeable_internal.h>
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 static vm_page_t hibernate_gobble_queue
;
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52 hibernate_page_list_zero(hibernate_page_list_t
*list
)
55 hibernate_bitmap_t
* bitmap
;
57 bitmap
= &list
->bank_bitmap
[0];
58 for (bank
= 0; bank
< list
->bank_count
; bank
++)
62 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
63 // set out-of-bound bits at end of bitmap.
64 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
+ 1) & 31);
66 bitmap
->bitmap
[bitmap
->bitmapwords
- 1] = (0xFFFFFFFF >> last_bit
);
68 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
74 consider_discard(vm_page_t m
)
76 vm_object_t object
= NULL
;
78 boolean_t discard
= FALSE
;
83 panic("consider_discard: private");
85 if (!vm_object_lock_try(m
->object
))
90 if (m
->wire_count
!= 0)
95 if (m
->busy
|| !object
->alive
)
97 * Somebody is playing with this page.
101 if (m
->absent
|| m
->unusual
|| m
->error
)
103 * If it's unusual in anyway, ignore it
110 if (m
->laundry
|| m
->list_req_pending
)
115 refmod_state
= pmap_get_refmod(m
->phys_page
);
117 if (refmod_state
& VM_MEM_REFERENCED
)
119 if (refmod_state
& VM_MEM_MODIFIED
)
124 * If it's clean or purgeable we can discard the page on wakeup.
125 * JMM - consider purgeable (volatile or empty) objects here as well.
127 discard
= (!m
->dirty
)
128 || (VM_PURGABLE_VOLATILE
== object
->purgable
)
129 || (VM_PURGABLE_EMPTY
== m
->object
->purgable
);
134 vm_object_unlock(object
);
141 discard_page(vm_page_t m
)
143 if (m
->absent
|| m
->unusual
|| m
->error
)
145 * If it's unusual in anyway, ignore
149 if (m
->pmapped
== TRUE
)
151 __unused
int refmod_state
= pmap_disconnect(m
->phys_page
);
155 panic("discard_page(%p) laundry", m
);
157 panic("discard_page(%p) private", m
);
159 panic("discard_page(%p) fictitious", m
);
161 if (VM_PURGABLE_VOLATILE
== m
->object
->purgable
)
163 assert(m
->object
->objq
.next
!= NULL
&& m
->object
->objq
.prev
!= NULL
); /* object should be on a queue */
164 purgeable_q_t old_queue
=vm_purgeable_object_remove(m
->object
);
166 /* No need to lock page queue for token delete, hibernate_vm_unlock()
167 makes sure these locks are uncontended before sleep */
168 vm_purgeable_token_delete_first(old_queue
);
169 m
->object
->purgable
= VM_PURGABLE_EMPTY
;
179 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
180 pages known to VM to not need saving are subtracted.
181 Wired pages to be saved are present in page_list_wired, pageable in page_list.
185 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
186 hibernate_page_list_t
* page_list_wired
,
189 uint64_t start
, end
, nsec
;
191 uint32_t pages
= page_list
->page_count
;
192 uint32_t count_zf
= 0, count_throttled
= 0, count_inactive
= 0, count_active
= 0;
193 uint32_t count_wire
= pages
;
194 uint32_t count_discard_active
= 0;
195 uint32_t count_discard_inactive
= 0;
196 uint32_t count_discard_purgeable
= 0;
199 hibernate_bitmap_t
* bitmap
;
200 hibernate_bitmap_t
* bitmap_wired
;
203 HIBLOG("hibernate_page_list_setall start\n");
205 clock_get_uptime(&start
);
207 hibernate_page_list_zero(page_list
);
208 hibernate_page_list_zero(page_list_wired
);
210 m
= (vm_page_t
) hibernate_gobble_queue
;
215 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
216 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
217 m
= (vm_page_t
) m
->pageq
.next
;
220 for( i
= 0; i
< vm_colors
; i
++ )
222 queue_iterate(&vm_page_queue_free
[i
],
229 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
230 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
234 queue_iterate(&vm_lopage_queue_free
,
241 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
242 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
245 queue_iterate( &vm_page_queue_throttled
,
250 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
251 && consider_discard(m
))
253 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
254 count_discard_inactive
++;
259 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
262 queue_iterate( &vm_page_queue_zf
,
267 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
268 && consider_discard(m
))
270 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
272 count_discard_purgeable
++;
274 count_discard_inactive
++;
279 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
282 queue_iterate( &vm_page_queue_inactive
,
287 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
288 && consider_discard(m
))
290 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
292 count_discard_purgeable
++;
294 count_discard_inactive
++;
299 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
302 queue_iterate( &vm_page_queue_active
,
307 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
308 && consider_discard(m
))
310 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
312 count_discard_purgeable
++;
314 count_discard_active
++;
319 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
322 // pull wired from hibernate_bitmap
324 bitmap
= &page_list
->bank_bitmap
[0];
325 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
326 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
328 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
329 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
330 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
331 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
334 // machine dependent adjustments
335 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
337 clock_get_uptime(&end
);
338 absolutetime_to_nanoseconds(end
- start
, &nsec
);
339 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
341 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, throt %d, could discard act %d inact %d purgeable %d\n",
342 pages
, count_wire
, count_active
, count_inactive
, count_zf
, count_throttled
,
343 count_discard_active
, count_discard_inactive
, count_discard_purgeable
);
345 *pagesOut
= pages
- count_discard_active
- count_discard_inactive
- count_discard_purgeable
;
349 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
351 uint64_t start
, end
, nsec
;
354 uint32_t count_discard_active
= 0;
355 uint32_t count_discard_inactive
= 0;
356 uint32_t count_discard_purgeable
= 0;
358 clock_get_uptime(&start
);
360 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
361 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
363 next
= (vm_page_t
) m
->pageq
.next
;
364 if (hibernate_page_bittst(page_list
, m
->phys_page
))
367 count_discard_purgeable
++;
369 count_discard_inactive
++;
375 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
376 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
378 next
= (vm_page_t
) m
->pageq
.next
;
379 if (hibernate_page_bittst(page_list
, m
->phys_page
))
382 count_discard_purgeable
++;
384 count_discard_inactive
++;
390 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
391 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
393 next
= (vm_page_t
) m
->pageq
.next
;
394 if (hibernate_page_bittst(page_list
, m
->phys_page
))
397 count_discard_purgeable
++;
399 count_discard_active
++;
405 clock_get_uptime(&end
);
406 absolutetime_to_nanoseconds(end
- start
, &nsec
);
407 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d\n",
409 count_discard_active
, count_discard_inactive
, count_discard_purgeable
);
412 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
415 hibernate_setup(IOHibernateImageHeader
* header
,
416 uint32_t free_page_ratio
,
417 uint32_t free_page_time
,
418 hibernate_page_list_t
** page_list_ret
,
419 hibernate_page_list_t
** page_list_wired_ret
,
420 boolean_t
* encryptedswap
)
422 hibernate_page_list_t
* page_list
= NULL
;
423 hibernate_page_list_t
* page_list_wired
= NULL
;
425 uint32_t i
, gobble_count
;
427 *page_list_ret
= NULL
;
428 *page_list_wired_ret
= NULL
;
431 page_list
= hibernate_page_list_allocate();
433 return (KERN_RESOURCE_SHORTAGE
);
434 page_list_wired
= hibernate_page_list_allocate();
435 if (!page_list_wired
)
437 kfree(page_list
, page_list
->list_size
);
438 return (KERN_RESOURCE_SHORTAGE
);
441 *encryptedswap
= dp_encryption
;
443 // pages we could force out to reduce hibernate image size
444 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
446 // no failures hereafter
448 hibernate_processor_setup(header
);
450 HIBLOG("hibernate_alloc_pages flags %08x, gobbling %d pages\n",
451 header
->processorFlags
, gobble_count
);
455 uint64_t start
, end
, timeout
, nsec
;
456 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
457 clock_get_uptime(&start
);
459 for (i
= 0; i
< gobble_count
; i
++)
461 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
463 clock_get_uptime(&end
);
473 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
474 hibernate_gobble_queue
= m
;
477 clock_get_uptime(&end
);
478 absolutetime_to_nanoseconds(end
- start
, &nsec
);
479 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
482 *page_list_ret
= page_list
;
483 *page_list_wired_ret
= page_list_wired
;
485 return (KERN_SUCCESS
);
489 hibernate_teardown(hibernate_page_list_t
* page_list
,
490 hibernate_page_list_t
* page_list_wired
)
495 m
= (vm_page_t
) hibernate_gobble_queue
;
498 next
= (vm_page_t
) m
->pageq
.next
;
503 hibernate_gobble_queue
= VM_PAGE_NULL
;
506 HIBLOG("Freed %d pages\n", count
);
509 kfree(page_list
, page_list
->list_size
);
511 kfree(page_list_wired
, page_list_wired
->list_size
);
513 return (KERN_SUCCESS
);