2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
41 #include <IOKit/IOHibernatePrivate.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 static vm_page_t hibernate_gobble_queue
;
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52 hibernate_page_list_zero(hibernate_page_list_t
*list
)
55 hibernate_bitmap_t
* bitmap
;
57 bitmap
= &list
->bank_bitmap
[0];
58 for (bank
= 0; bank
< list
->bank_count
; bank
++)
62 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
63 // set out-of-bound bits at end of bitmap.
64 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
+ 1) & 31);
66 bitmap
->bitmap
[bitmap
->bitmapwords
- 1] = (0xFFFFFFFF >> last_bit
);
68 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
74 consider_discard(vm_page_t m
)
76 register vm_object_t object
= 0;
78 boolean_t discard
= FALSE
;
83 panic("consider_discard: private");
85 if (!vm_object_lock_try(m
->object
))
90 if (m
->wire_count
!= 0)
95 if (m
->busy
|| !object
->alive
)
97 * Somebody is playing with this page.
101 if (m
->absent
|| m
->unusual
|| m
->error
)
103 * If it's unusual in anyway, ignore it
112 refmod_state
= pmap_get_refmod(m
->phys_page
);
114 if (refmod_state
& VM_MEM_REFERENCED
)
116 if (refmod_state
& VM_MEM_MODIFIED
)
121 * If it's clean we can discard the page on wakeup.
128 vm_object_unlock(object
);
135 discard_page(vm_page_t m
)
137 if (m
->absent
|| m
->unusual
|| m
->error
)
139 * If it's unusual in anyway, ignore
145 int refmod_state
= pmap_disconnect(m
->phys_page
);
147 if (refmod_state
& VM_MEM_REFERENCED
)
149 if (refmod_state
& VM_MEM_MODIFIED
)
154 panic("discard_page(%p) dirty", m
);
156 panic("discard_page(%p) laundry", m
);
158 panic("discard_page(%p) private", m
);
160 panic("discard_page(%p) fictitious", m
);
166 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
167 pages known to VM to not need saving are subtracted.
168 Wired pages to be saved are present in page_list_wired, pageable in page_list.
170 extern vm_page_t vm_lopage_queue_free
;
173 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
174 hibernate_page_list_t
* page_list_wired
,
177 uint64_t start
, end
, nsec
;
179 uint32_t pages
= page_list
->page_count
;
180 uint32_t count_zf
= 0, count_inactive
= 0, count_active
= 0;
181 uint32_t count_wire
= pages
;
182 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
185 HIBLOG("hibernate_page_list_setall start\n");
187 clock_get_uptime(&start
);
189 hibernate_page_list_zero(page_list
);
190 hibernate_page_list_zero(page_list_wired
);
192 m
= (vm_page_t
) hibernate_gobble_queue
;
197 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
198 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
199 m
= (vm_page_t
) m
->pageq
.next
;
202 m
= (vm_page_t
) vm_page_queue_free
;
207 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
208 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
209 m
= (vm_page_t
) m
->pageq
.next
;
212 m
= (vm_page_t
) vm_lopage_queue_free
;
217 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
218 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
219 m
= (vm_page_t
) m
->pageq
.next
;
222 queue_iterate( &vm_page_queue_zf
,
227 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
228 && consider_discard(m
))
230 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
231 count_discard_inactive
++;
236 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
239 queue_iterate( &vm_page_queue_inactive
,
244 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
245 && consider_discard(m
))
247 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
248 count_discard_inactive
++;
253 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
256 queue_iterate( &vm_page_queue_active
,
261 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
262 && consider_discard(m
))
264 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
265 count_discard_active
++;
270 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
273 // pull wired from hibernate_bitmap
276 hibernate_bitmap_t
* bitmap
;
277 hibernate_bitmap_t
* bitmap_wired
;
279 bitmap
= &page_list
->bank_bitmap
[0];
280 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
281 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
283 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
284 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
285 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
286 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
289 // machine dependent adjustments
290 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
292 clock_get_uptime(&end
);
293 absolutetime_to_nanoseconds(end
- start
, &nsec
);
294 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
296 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
297 pages
, count_wire
, count_active
, count_inactive
, count_zf
,
298 count_discard_active
, count_discard_inactive
);
304 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
306 uint64_t start
, end
, nsec
;
309 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
311 clock_get_uptime(&start
);
313 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
314 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
316 next
= (vm_page_t
) m
->pageq
.next
;
317 if (hibernate_page_bittst(page_list
, m
->phys_page
))
320 count_discard_inactive
++;
325 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
326 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
328 next
= (vm_page_t
) m
->pageq
.next
;
329 if (hibernate_page_bittst(page_list
, m
->phys_page
))
332 count_discard_inactive
++;
337 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
338 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
340 next
= (vm_page_t
) m
->pageq
.next
;
341 if (hibernate_page_bittst(page_list
, m
->phys_page
))
344 count_discard_active
++;
349 clock_get_uptime(&end
);
350 absolutetime_to_nanoseconds(end
- start
, &nsec
);
351 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
353 count_discard_active
, count_discard_inactive
);
356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
359 hibernate_setup(IOHibernateImageHeader
* header
,
360 uint32_t free_page_ratio
,
361 uint32_t free_page_time
,
362 hibernate_page_list_t
** page_list_ret
,
363 hibernate_page_list_t
** page_list_wired_ret
,
364 boolean_t
* encryptedswap
)
366 hibernate_page_list_t
* page_list
= NULL
;
367 hibernate_page_list_t
* page_list_wired
= NULL
;
369 uint32_t i
, gobble_count
;
371 *page_list_ret
= NULL
;
372 *page_list_wired_ret
= NULL
;
375 page_list
= hibernate_page_list_allocate();
377 return (KERN_RESOURCE_SHORTAGE
);
378 page_list_wired
= hibernate_page_list_allocate();
379 if (!page_list_wired
)
381 kfree(page_list
, page_list
->list_size
);
382 return (KERN_RESOURCE_SHORTAGE
);
385 *encryptedswap
= dp_encryption
;
387 // pages we could force out to reduce hibernate image size
388 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
390 // no failures hereafter
392 hibernate_processor_setup(header
);
394 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
395 header
->processorFlags
, gobble_count
);
399 uint64_t start
, end
, timeout
, nsec
;
400 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
401 clock_get_uptime(&start
);
403 for (i
= 0; i
< gobble_count
; i
++)
405 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
407 clock_get_uptime(&end
);
417 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
418 hibernate_gobble_queue
= m
;
421 clock_get_uptime(&end
);
422 absolutetime_to_nanoseconds(end
- start
, &nsec
);
423 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
426 *page_list_ret
= page_list
;
427 *page_list_wired_ret
= page_list_wired
;
429 return (KERN_SUCCESS
);
433 hibernate_teardown(hibernate_page_list_t
* page_list
,
434 hibernate_page_list_t
* page_list_wired
)
439 m
= (vm_page_t
) hibernate_gobble_queue
;
442 next
= (vm_page_t
) m
->pageq
.next
;
447 hibernate_gobble_queue
= VM_PAGE_NULL
;
450 HIBLOG("Freed %d pages\n", count
);
453 kfree(page_list
, page_list
->list_size
);
455 kfree(page_list_wired
, page_list_wired
->list_size
);
457 return (KERN_SUCCESS
);