2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <kern/kalloc.h>
24 #include <kern/machine.h>
25 #include <kern/misc_protos.h>
26 #include <kern/thread.h>
27 #include <kern/processor.h>
28 #include <mach/machine.h>
29 #include <mach/processor_info.h>
30 #include <mach/mach_types.h>
31 #include <default_pager/default_pager_internal.h>
32 #include <IOKit/IOPlatformExpert.h>
35 #include <IOKit/IOHibernatePrivate.h>
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
39 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
41 static vm_page_t hibernate_gobble_queue
;
43 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 hibernate_page_list_zero(hibernate_page_list_t
*list
)
49 hibernate_bitmap_t
* bitmap
;
51 bitmap
= &list
->bank_bitmap
[0];
52 for (bank
= 0; bank
< list
->bank_count
; bank
++)
56 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
57 // set out-of-bound bits at end of bitmap.
58 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
+ 1) & 31);
60 bitmap
->bitmap
[bitmap
->bitmapwords
- 1] = (0xFFFFFFFF >> last_bit
);
62 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
68 consider_discard(vm_page_t m
)
70 register vm_object_t object
= 0;
72 boolean_t discard
= FALSE
;
77 panic("consider_discard: private");
79 if (!vm_object_lock_try(m
->object
))
84 if (m
->wire_count
!= 0)
89 if (m
->busy
|| !object
->alive
)
91 * Somebody is playing with this page.
95 if (m
->absent
|| m
->unusual
|| m
->error
)
97 * If it's unusual in anyway, ignore it
106 refmod_state
= pmap_get_refmod(m
->phys_page
);
108 if (refmod_state
& VM_MEM_REFERENCED
)
110 if (refmod_state
& VM_MEM_MODIFIED
)
115 * If it's clean we can discard the page on wakeup.
122 vm_object_unlock(object
);
129 discard_page(vm_page_t m
)
131 if (m
->absent
|| m
->unusual
|| m
->error
)
133 * If it's unusual in anyway, ignore
139 int refmod_state
= pmap_disconnect(m
->phys_page
);
141 if (refmod_state
& VM_MEM_REFERENCED
)
143 if (refmod_state
& VM_MEM_MODIFIED
)
148 panic("discard_page(%p) dirty", m
);
150 panic("discard_page(%p) laundry", m
);
152 panic("discard_page(%p) private", m
);
154 panic("discard_page(%p) fictitious", m
);
160 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
161 pages known to VM to not need saving are subtracted.
162 Wired pages to be saved are present in page_list_wired, pageable in page_list.
164 extern vm_page_t vm_lopage_queue_free
;
167 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
168 hibernate_page_list_t
* page_list_wired
,
171 uint64_t start
, end
, nsec
;
173 uint32_t pages
= page_list
->page_count
;
174 uint32_t count_zf
= 0, count_inactive
= 0, count_active
= 0;
175 uint32_t count_wire
= pages
;
176 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
179 HIBLOG("hibernate_page_list_setall start\n");
181 clock_get_uptime(&start
);
183 hibernate_page_list_zero(page_list
);
184 hibernate_page_list_zero(page_list_wired
);
186 m
= (vm_page_t
) hibernate_gobble_queue
;
191 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
192 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
193 m
= (vm_page_t
) m
->pageq
.next
;
196 m
= (vm_page_t
) vm_page_queue_free
;
201 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
202 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
203 m
= (vm_page_t
) m
->pageq
.next
;
206 m
= (vm_page_t
) vm_lopage_queue_free
;
211 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
212 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
213 m
= (vm_page_t
) m
->pageq
.next
;
216 queue_iterate( &vm_page_queue_zf
,
221 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
222 && consider_discard(m
))
224 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
225 count_discard_inactive
++;
230 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
233 queue_iterate( &vm_page_queue_inactive
,
238 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
239 && consider_discard(m
))
241 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
242 count_discard_inactive
++;
247 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
250 queue_iterate( &vm_page_queue_active
,
255 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
256 && consider_discard(m
))
258 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
259 count_discard_active
++;
264 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
267 // pull wired from hibernate_bitmap
270 hibernate_bitmap_t
* bitmap
;
271 hibernate_bitmap_t
* bitmap_wired
;
273 bitmap
= &page_list
->bank_bitmap
[0];
274 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
275 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
277 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
278 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
279 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
280 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
283 // machine dependent adjustments
284 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
286 clock_get_uptime(&end
);
287 absolutetime_to_nanoseconds(end
- start
, &nsec
);
288 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
290 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
291 pages
, count_wire
, count_active
, count_inactive
, count_zf
,
292 count_discard_active
, count_discard_inactive
);
298 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
300 uint64_t start
, end
, nsec
;
303 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
305 clock_get_uptime(&start
);
307 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
308 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
310 next
= (vm_page_t
) m
->pageq
.next
;
311 if (hibernate_page_bittst(page_list
, m
->phys_page
))
314 count_discard_inactive
++;
319 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
320 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
322 next
= (vm_page_t
) m
->pageq
.next
;
323 if (hibernate_page_bittst(page_list
, m
->phys_page
))
326 count_discard_inactive
++;
331 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
332 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
334 next
= (vm_page_t
) m
->pageq
.next
;
335 if (hibernate_page_bittst(page_list
, m
->phys_page
))
338 count_discard_active
++;
343 clock_get_uptime(&end
);
344 absolutetime_to_nanoseconds(end
- start
, &nsec
);
345 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
347 count_discard_active
, count_discard_inactive
);
350 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
353 hibernate_setup(IOHibernateImageHeader
* header
,
354 uint32_t free_page_ratio
,
355 uint32_t free_page_time
,
356 hibernate_page_list_t
** page_list_ret
,
357 hibernate_page_list_t
** page_list_wired_ret
,
358 boolean_t
* encryptedswap
)
360 hibernate_page_list_t
* page_list
= NULL
;
361 hibernate_page_list_t
* page_list_wired
= NULL
;
363 uint32_t i
, gobble_count
;
365 *page_list_ret
= NULL
;
366 *page_list_wired_ret
= NULL
;
369 page_list
= hibernate_page_list_allocate();
371 return (KERN_RESOURCE_SHORTAGE
);
372 page_list_wired
= hibernate_page_list_allocate();
373 if (!page_list_wired
)
375 kfree(page_list
, page_list
->list_size
);
376 return (KERN_RESOURCE_SHORTAGE
);
379 *encryptedswap
= dp_encryption
;
381 // pages we could force out to reduce hibernate image size
382 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
384 // no failures hereafter
386 hibernate_processor_setup(header
);
388 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
389 header
->processorFlags
, gobble_count
);
393 uint64_t start
, end
, timeout
, nsec
;
394 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
395 clock_get_uptime(&start
);
397 for (i
= 0; i
< gobble_count
; i
++)
399 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
401 clock_get_uptime(&end
);
411 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
412 hibernate_gobble_queue
= m
;
415 clock_get_uptime(&end
);
416 absolutetime_to_nanoseconds(end
- start
, &nsec
);
417 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
420 *page_list_ret
= page_list
;
421 *page_list_wired_ret
= page_list_wired
;
423 return (KERN_SUCCESS
);
427 hibernate_teardown(hibernate_page_list_t
* page_list
,
428 hibernate_page_list_t
* page_list_wired
)
433 m
= (vm_page_t
) hibernate_gobble_queue
;
436 next
= (vm_page_t
) m
->pageq
.next
;
441 hibernate_gobble_queue
= VM_PAGE_NULL
;
444 HIBLOG("Freed %d pages\n", count
);
447 kfree(page_list
, page_list
->list_size
);
449 kfree(page_list_wired
, page_list_wired
->list_size
);
451 return (KERN_SUCCESS
);