2 * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
40 #include <IOKit/IOHibernatePrivate.h>
41 #include <vm/vm_page.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_purgeable_internal.h>
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 static vm_page_t hibernate_gobble_queue
;
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52 hibernate_page_list_zero(hibernate_page_list_t
*list
)
55 hibernate_bitmap_t
* bitmap
;
57 bitmap
= &list
->bank_bitmap
[0];
58 for (bank
= 0; bank
< list
->bank_count
; bank
++)
62 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
63 // set out-of-bound bits at end of bitmap.
64 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
+ 1) & 31);
66 bitmap
->bitmap
[bitmap
->bitmapwords
- 1] = (0xFFFFFFFF >> last_bit
);
68 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
74 consider_discard(vm_page_t m
)
76 vm_object_t object
= NULL
;
78 boolean_t discard
= FALSE
;
83 panic("consider_discard: private");
85 if (!vm_object_lock_try(m
->object
))
90 if (m
->wire_count
!= 0)
95 if (m
->busy
|| !object
->alive
)
97 * Somebody is playing with this page.
101 if (m
->absent
|| m
->unusual
|| m
->error
)
103 * If it's unusual in anyway, ignore it
110 if (m
->laundry
|| m
->list_req_pending
)
115 refmod_state
= pmap_get_refmod(m
->phys_page
);
117 if (refmod_state
& VM_MEM_REFERENCED
)
119 if (refmod_state
& VM_MEM_MODIFIED
)
124 * If it's clean or purgeable we can discard the page on wakeup.
125 * JMM - consider purgeable (volatile or empty) objects here as well.
127 discard
= (!m
->dirty
)
128 || (VM_PURGABLE_VOLATILE
== object
->purgable
)
129 || (VM_PURGABLE_EMPTY
== m
->object
->purgable
);
134 vm_object_unlock(object
);
141 discard_page(vm_page_t m
)
143 if (m
->absent
|| m
->unusual
|| m
->error
)
145 * If it's unusual in anyway, ignore
149 if (m
->pmapped
== TRUE
)
151 __unused
int refmod_state
= pmap_disconnect(m
->phys_page
);
155 panic("discard_page(%p) laundry", m
);
157 panic("discard_page(%p) private", m
);
159 panic("discard_page(%p) fictitious", m
);
161 if (VM_PURGABLE_VOLATILE
== m
->object
->purgable
)
163 assert(m
->object
->objq
.next
!= NULL
&& m
->object
->objq
.prev
!= NULL
); /* object should be on a queue */
164 purgeable_q_t old_queue
=vm_purgeable_object_remove(m
->object
);
166 /* No need to lock page queue for token delete, hibernate_vm_unlock()
167 makes sure these locks are uncontended before sleep */
168 vm_purgeable_token_delete_first(old_queue
);
169 m
->object
->purgable
= VM_PURGABLE_EMPTY
;
179 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
180 pages known to VM to not need saving are subtracted.
181 Wired pages to be saved are present in page_list_wired, pageable in page_list.
185 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
186 hibernate_page_list_t
* page_list_wired
,
189 uint64_t start
, end
, nsec
;
191 uint32_t pages
= page_list
->page_count
;
192 uint32_t count_zf
= 0, count_throttled
= 0;
193 uint32_t count_inactive
= 0, count_active
= 0, count_speculative
= 0;
194 uint32_t count_wire
= pages
;
195 uint32_t count_discard_active
= 0;
196 uint32_t count_discard_inactive
= 0;
197 uint32_t count_discard_purgeable
= 0;
198 uint32_t count_discard_speculative
= 0;
201 hibernate_bitmap_t
* bitmap
;
202 hibernate_bitmap_t
* bitmap_wired
;
205 HIBLOG("hibernate_page_list_setall start\n");
207 clock_get_uptime(&start
);
209 hibernate_page_list_zero(page_list
);
210 hibernate_page_list_zero(page_list_wired
);
212 m
= (vm_page_t
) hibernate_gobble_queue
;
217 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
218 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
219 m
= (vm_page_t
) m
->pageq
.next
;
222 for( i
= 0; i
< vm_colors
; i
++ )
224 queue_iterate(&vm_page_queue_free
[i
],
231 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
232 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
236 queue_iterate(&vm_lopage_queue_free
,
243 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
244 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
247 queue_iterate( &vm_page_queue_throttled
,
252 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
253 && consider_discard(m
))
255 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
256 count_discard_inactive
++;
261 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
264 queue_iterate( &vm_page_queue_zf
,
269 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
270 && consider_discard(m
))
272 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
274 count_discard_purgeable
++;
276 count_discard_inactive
++;
281 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
284 queue_iterate( &vm_page_queue_inactive
,
289 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
290 && consider_discard(m
))
292 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
294 count_discard_purgeable
++;
296 count_discard_inactive
++;
301 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
304 for( i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++ )
306 queue_iterate(&vm_page_queue_speculative
[i
].age_q
,
311 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
312 && consider_discard(m
))
314 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
315 count_discard_speculative
++;
320 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
324 queue_iterate( &vm_page_queue_active
,
329 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
330 && consider_discard(m
))
332 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
334 count_discard_purgeable
++;
336 count_discard_active
++;
341 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
344 // pull wired from hibernate_bitmap
346 bitmap
= &page_list
->bank_bitmap
[0];
347 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
348 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
350 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
351 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
352 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
353 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
356 // machine dependent adjustments
357 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
359 clock_get_uptime(&end
);
360 absolutetime_to_nanoseconds(end
- start
, &nsec
);
361 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
363 HIBLOG("pages %d, wire %d, act %d, inact %d, spec %d, zf %d, throt %d, could discard act %d inact %d purgeable %d spec %d\n",
364 pages
, count_wire
, count_active
, count_inactive
, count_speculative
, count_zf
, count_throttled
,
365 count_discard_active
, count_discard_inactive
, count_discard_purgeable
, count_discard_speculative
);
367 *pagesOut
= pages
- count_discard_active
- count_discard_inactive
- count_discard_purgeable
- count_discard_speculative
;
371 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
373 uint64_t start
, end
, nsec
;
377 uint32_t count_discard_active
= 0;
378 uint32_t count_discard_inactive
= 0;
379 uint32_t count_discard_purgeable
= 0;
380 uint32_t count_discard_speculative
= 0;
382 clock_get_uptime(&start
);
384 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
385 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
387 next
= (vm_page_t
) m
->pageq
.next
;
388 if (hibernate_page_bittst(page_list
, m
->phys_page
))
391 count_discard_purgeable
++;
393 count_discard_inactive
++;
399 for( i
= 0; i
<= VM_PAGE_MAX_SPECULATIVE_AGE_Q
; i
++ )
401 m
= (vm_page_t
) queue_first(&vm_page_queue_speculative
[i
].age_q
);
402 while (m
&& !queue_end(&vm_page_queue_speculative
[i
].age_q
, (queue_entry_t
)m
))
404 next
= (vm_page_t
) m
->pageq
.next
;
405 if (hibernate_page_bittst(page_list
, m
->phys_page
))
407 count_discard_speculative
++;
414 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
415 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
417 next
= (vm_page_t
) m
->pageq
.next
;
418 if (hibernate_page_bittst(page_list
, m
->phys_page
))
421 count_discard_purgeable
++;
423 count_discard_inactive
++;
429 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
430 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
432 next
= (vm_page_t
) m
->pageq
.next
;
433 if (hibernate_page_bittst(page_list
, m
->phys_page
))
436 count_discard_purgeable
++;
438 count_discard_active
++;
444 clock_get_uptime(&end
);
445 absolutetime_to_nanoseconds(end
- start
, &nsec
);
446 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d\n",
448 count_discard_active
, count_discard_inactive
, count_discard_purgeable
, count_discard_speculative
);
451 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
454 hibernate_setup(IOHibernateImageHeader
* header
,
455 uint32_t free_page_ratio
,
456 uint32_t free_page_time
,
457 hibernate_page_list_t
** page_list_ret
,
458 hibernate_page_list_t
** page_list_wired_ret
,
459 boolean_t
* encryptedswap
)
461 hibernate_page_list_t
* page_list
= NULL
;
462 hibernate_page_list_t
* page_list_wired
= NULL
;
464 uint32_t i
, gobble_count
;
466 *page_list_ret
= NULL
;
467 *page_list_wired_ret
= NULL
;
470 page_list
= hibernate_page_list_allocate();
472 return (KERN_RESOURCE_SHORTAGE
);
473 page_list_wired
= hibernate_page_list_allocate();
474 if (!page_list_wired
)
476 kfree(page_list
, page_list
->list_size
);
477 return (KERN_RESOURCE_SHORTAGE
);
480 *encryptedswap
= dp_encryption
;
482 // pages we could force out to reduce hibernate image size
483 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
485 // no failures hereafter
487 hibernate_processor_setup(header
);
489 HIBLOG("hibernate_alloc_pages flags %08x, gobbling %d pages\n",
490 header
->processorFlags
, gobble_count
);
494 uint64_t start
, end
, timeout
, nsec
;
495 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
496 clock_get_uptime(&start
);
498 for (i
= 0; i
< gobble_count
; i
++)
500 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
502 clock_get_uptime(&end
);
512 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
513 hibernate_gobble_queue
= m
;
516 clock_get_uptime(&end
);
517 absolutetime_to_nanoseconds(end
- start
, &nsec
);
518 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
521 *page_list_ret
= page_list
;
522 *page_list_wired_ret
= page_list_wired
;
524 return (KERN_SUCCESS
);
528 hibernate_teardown(hibernate_page_list_t
* page_list
,
529 hibernate_page_list_t
* page_list_wired
)
534 m
= (vm_page_t
) hibernate_gobble_queue
;
537 next
= (vm_page_t
) m
->pageq
.next
;
542 hibernate_gobble_queue
= VM_PAGE_NULL
;
545 HIBLOG("Freed %d pages\n", count
);
548 kfree(page_list
, page_list
->list_size
);
550 kfree(page_list_wired
, page_list_wired
->list_size
);
552 return (KERN_SUCCESS
);