2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kalloc.h>
30 #include <kern/machine.h>
31 #include <kern/misc_protos.h>
32 #include <kern/thread.h>
33 #include <kern/processor.h>
34 #include <mach/machine.h>
35 #include <mach/processor_info.h>
36 #include <mach/mach_types.h>
37 #include <default_pager/default_pager_internal.h>
38 #include <IOKit/IOPlatformExpert.h>
41 #include <IOKit/IOHibernatePrivate.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
45 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
47 static vm_page_t hibernate_gobble_queue
;
49 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
52 hibernate_page_list_zero(hibernate_page_list_t
*list
)
55 hibernate_bitmap_t
* bitmap
;
57 bitmap
= &list
->bank_bitmap
[0];
58 for (bank
= 0; bank
< list
->bank_count
; bank
++)
60 uint32_t bit
, last_bit
;
61 uint32_t *bitmap_word
;
63 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
65 // Set out-of-bound bits at end of bitmap.
66 bitmap_word
= &bitmap
->bitmap
[bitmap
->bitmapwords
- 1];
67 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
) & 31);
68 for (bit
= 31; bit
> last_bit
; bit
--) {
69 *bitmap_word
|= (0x80000000 >> bit
);
72 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
78 consider_discard(vm_page_t m
)
80 register vm_object_t object
= 0;
82 boolean_t discard
= FALSE
;
87 panic("consider_discard: private");
89 if (!vm_object_lock_try(m
->object
))
94 if (m
->wire_count
!= 0)
99 if (m
->busy
|| !object
->alive
)
101 * Somebody is playing with this page.
105 if (m
->absent
|| m
->unusual
|| m
->error
)
107 * If it's unusual in anyway, ignore it
116 refmod_state
= pmap_get_refmod(m
->phys_page
);
118 if (refmod_state
& VM_MEM_REFERENCED
)
120 if (refmod_state
& VM_MEM_MODIFIED
)
125 * If it's clean we can discard the page on wakeup.
132 vm_object_unlock(object
);
139 discard_page(vm_page_t m
)
141 if (m
->absent
|| m
->unusual
|| m
->error
)
143 * If it's unusual in anyway, ignore
149 int refmod_state
= pmap_disconnect(m
->phys_page
);
151 if (refmod_state
& VM_MEM_REFERENCED
)
153 if (refmod_state
& VM_MEM_MODIFIED
)
158 panic("discard_page(%p) dirty", m
);
160 panic("discard_page(%p) laundry", m
);
162 panic("discard_page(%p) private", m
);
164 panic("discard_page(%p) fictitious", m
);
170 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
171 pages known to VM to not need saving are subtracted.
172 Wired pages to be saved are present in page_list_wired, pageable in page_list.
176 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
177 hibernate_page_list_t
* page_list_wired
,
180 uint64_t start
, end
, nsec
;
182 uint32_t pages
= page_list
->page_count
;
183 uint32_t count_zf
= 0, count_inactive
= 0, count_active
= 0;
184 uint32_t count_wire
= pages
;
185 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
188 HIBLOG("hibernate_page_list_setall start\n");
190 clock_get_uptime(&start
);
192 hibernate_page_list_zero(page_list
);
193 hibernate_page_list_zero(page_list_wired
);
195 m
= (vm_page_t
) hibernate_gobble_queue
;
200 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
201 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
202 m
= (vm_page_t
) m
->pageq
.next
;
205 m
= (vm_page_t
) vm_page_queue_free
;
210 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
211 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
212 m
= (vm_page_t
) m
->pageq
.next
;
215 queue_iterate( &vm_page_queue_zf
,
220 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
221 && consider_discard(m
))
223 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
224 count_discard_inactive
++;
229 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
232 queue_iterate( &vm_page_queue_inactive
,
237 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
238 && consider_discard(m
))
240 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
241 count_discard_inactive
++;
246 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
249 queue_iterate( &vm_page_queue_active
,
254 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
255 && consider_discard(m
))
257 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
258 count_discard_active
++;
263 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
266 // pull wired from hibernate_bitmap
269 hibernate_bitmap_t
* bitmap
;
270 hibernate_bitmap_t
* bitmap_wired
;
272 bitmap
= &page_list
->bank_bitmap
[0];
273 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
274 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
276 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
277 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
278 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
279 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
282 // machine dependent adjustments
283 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
285 clock_get_uptime(&end
);
286 absolutetime_to_nanoseconds(end
- start
, &nsec
);
287 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
289 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
290 pages
, count_wire
, count_active
, count_inactive
, count_zf
,
291 count_discard_active
, count_discard_inactive
);
297 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
299 uint64_t start
, end
, nsec
;
302 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
304 clock_get_uptime(&start
);
306 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
307 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
309 next
= (vm_page_t
) m
->pageq
.next
;
310 if (hibernate_page_bittst(page_list
, m
->phys_page
))
313 count_discard_inactive
++;
318 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
319 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
321 next
= (vm_page_t
) m
->pageq
.next
;
322 if (hibernate_page_bittst(page_list
, m
->phys_page
))
325 count_discard_inactive
++;
330 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
331 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
333 next
= (vm_page_t
) m
->pageq
.next
;
334 if (hibernate_page_bittst(page_list
, m
->phys_page
))
337 count_discard_active
++;
342 clock_get_uptime(&end
);
343 absolutetime_to_nanoseconds(end
- start
, &nsec
);
344 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
346 count_discard_active
, count_discard_inactive
);
349 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
352 hibernate_setup(IOHibernateImageHeader
* header
,
353 uint32_t free_page_ratio
,
354 uint32_t free_page_time
,
355 hibernate_page_list_t
** page_list_ret
,
356 hibernate_page_list_t
** page_list_wired_ret
,
357 boolean_t
* encryptedswap
)
359 hibernate_page_list_t
* page_list
= NULL
;
360 hibernate_page_list_t
* page_list_wired
= NULL
;
362 uint32_t i
, gobble_count
;
364 *page_list_ret
= NULL
;
365 *page_list_wired_ret
= NULL
;
368 page_list
= hibernate_page_list_allocate();
370 return (KERN_RESOURCE_SHORTAGE
);
371 page_list_wired
= hibernate_page_list_allocate();
372 if (!page_list_wired
)
374 kfree(page_list
, page_list
->list_size
);
375 return (KERN_RESOURCE_SHORTAGE
);
378 *encryptedswap
= dp_encryption
;
380 // pages we could force out to reduce hibernate image size
381 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
383 // no failures hereafter
385 hibernate_processor_setup(header
);
387 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
388 header
->processorFlags
, gobble_count
);
392 uint64_t start
, end
, timeout
, nsec
;
393 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
394 clock_get_uptime(&start
);
396 for (i
= 0; i
< gobble_count
; i
++)
398 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
400 clock_get_uptime(&end
);
410 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
411 hibernate_gobble_queue
= m
;
414 clock_get_uptime(&end
);
415 absolutetime_to_nanoseconds(end
- start
, &nsec
);
416 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
419 *page_list_ret
= page_list
;
420 *page_list_wired_ret
= page_list_wired
;
422 return (KERN_SUCCESS
);
426 hibernate_teardown(hibernate_page_list_t
* page_list
,
427 hibernate_page_list_t
* page_list_wired
)
432 m
= (vm_page_t
) hibernate_gobble_queue
;
435 next
= (vm_page_t
) m
->pageq
.next
;
440 hibernate_gobble_queue
= VM_PAGE_NULL
;
443 HIBLOG("Freed %d pages\n", count
);
446 kfree(page_list
, page_list
->list_size
);
448 kfree(page_list_wired
, page_list_wired
->list_size
);
450 return (KERN_SUCCESS
);