2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 #include <kern/kalloc.h>
32 #include <kern/machine.h>
33 #include <kern/misc_protos.h>
34 #include <kern/thread.h>
35 #include <kern/processor.h>
36 #include <mach/machine.h>
37 #include <mach/processor_info.h>
38 #include <mach/mach_types.h>
39 #include <default_pager/default_pager_internal.h>
40 #include <IOKit/IOPlatformExpert.h>
43 #include <IOKit/IOHibernatePrivate.h>
44 #include <vm/vm_page.h>
45 #include <vm/vm_pageout.h>
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49 static vm_page_t hibernate_gobble_queue
;
51 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
54 hibernate_page_list_zero(hibernate_page_list_t
*list
)
57 hibernate_bitmap_t
* bitmap
;
59 bitmap
= &list
->bank_bitmap
[0];
60 for (bank
= 0; bank
< list
->bank_count
; bank
++)
62 uint32_t bit
, last_bit
;
63 uint32_t *bitmap_word
;
65 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
67 // Set out-of-bound bits at end of bitmap.
68 bitmap_word
= &bitmap
->bitmap
[bitmap
->bitmapwords
- 1];
69 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
) & 31);
70 for (bit
= 31; bit
> last_bit
; bit
--) {
71 *bitmap_word
|= (0x80000000 >> bit
);
74 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
80 consider_discard(vm_page_t m
)
82 register vm_object_t object
= 0;
84 boolean_t discard
= FALSE
;
89 panic("consider_discard: private");
91 if (!vm_object_lock_try(m
->object
))
96 if (m
->wire_count
!= 0)
101 if (m
->busy
|| !object
->alive
)
103 * Somebody is playing with this page.
107 if (m
->absent
|| m
->unusual
|| m
->error
)
109 * If it's unusual in anyway, ignore it
118 refmod_state
= pmap_get_refmod(m
->phys_page
);
120 if (refmod_state
& VM_MEM_REFERENCED
)
122 if (refmod_state
& VM_MEM_MODIFIED
)
127 * If it's clean we can discard the page on wakeup.
134 vm_object_unlock(object
);
141 discard_page(vm_page_t m
)
143 if (m
->absent
|| m
->unusual
|| m
->error
)
145 * If it's unusual in anyway, ignore
151 int refmod_state
= pmap_disconnect(m
->phys_page
);
153 if (refmod_state
& VM_MEM_REFERENCED
)
155 if (refmod_state
& VM_MEM_MODIFIED
)
160 panic("discard_page(%p) dirty", m
);
162 panic("discard_page(%p) laundry", m
);
164 panic("discard_page(%p) private", m
);
166 panic("discard_page(%p) fictitious", m
);
172 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
173 pages known to VM to not need saving are subtracted.
174 Wired pages to be saved are present in page_list_wired, pageable in page_list.
178 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
179 hibernate_page_list_t
* page_list_wired
,
182 uint64_t start
, end
, nsec
;
184 uint32_t pages
= page_list
->page_count
;
185 uint32_t count_zf
= 0, count_inactive
= 0, count_active
= 0;
186 uint32_t count_wire
= pages
;
187 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
190 HIBLOG("hibernate_page_list_setall start\n");
192 clock_get_uptime(&start
);
194 hibernate_page_list_zero(page_list
);
195 hibernate_page_list_zero(page_list_wired
);
197 m
= (vm_page_t
) hibernate_gobble_queue
;
202 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
203 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
204 m
= (vm_page_t
) m
->pageq
.next
;
207 m
= (vm_page_t
) vm_page_queue_free
;
212 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
213 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
214 m
= (vm_page_t
) m
->pageq
.next
;
217 queue_iterate( &vm_page_queue_zf
,
222 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
223 && consider_discard(m
))
225 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
226 count_discard_inactive
++;
231 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
234 queue_iterate( &vm_page_queue_inactive
,
239 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
240 && consider_discard(m
))
242 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
243 count_discard_inactive
++;
248 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
251 queue_iterate( &vm_page_queue_active
,
256 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
257 && consider_discard(m
))
259 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
260 count_discard_active
++;
265 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
268 // pull wired from hibernate_bitmap
271 hibernate_bitmap_t
* bitmap
;
272 hibernate_bitmap_t
* bitmap_wired
;
274 bitmap
= &page_list
->bank_bitmap
[0];
275 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
276 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
278 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
279 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
280 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
281 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
284 // machine dependent adjustments
285 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
287 clock_get_uptime(&end
);
288 absolutetime_to_nanoseconds(end
- start
, &nsec
);
289 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
291 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
292 pages
, count_wire
, count_active
, count_inactive
, count_zf
,
293 count_discard_active
, count_discard_inactive
);
299 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
301 uint64_t start
, end
, nsec
;
304 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
306 clock_get_uptime(&start
);
308 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
309 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
311 next
= (vm_page_t
) m
->pageq
.next
;
312 if (hibernate_page_bittst(page_list
, m
->phys_page
))
315 count_discard_inactive
++;
320 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
321 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
323 next
= (vm_page_t
) m
->pageq
.next
;
324 if (hibernate_page_bittst(page_list
, m
->phys_page
))
327 count_discard_inactive
++;
332 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
333 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
335 next
= (vm_page_t
) m
->pageq
.next
;
336 if (hibernate_page_bittst(page_list
, m
->phys_page
))
339 count_discard_active
++;
344 clock_get_uptime(&end
);
345 absolutetime_to_nanoseconds(end
- start
, &nsec
);
346 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
348 count_discard_active
, count_discard_inactive
);
351 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354 hibernate_setup(IOHibernateImageHeader
* header
,
355 uint32_t free_page_ratio
,
356 uint32_t free_page_time
,
357 hibernate_page_list_t
** page_list_ret
,
358 hibernate_page_list_t
** page_list_wired_ret
,
359 boolean_t
* encryptedswap
)
361 hibernate_page_list_t
* page_list
= NULL
;
362 hibernate_page_list_t
* page_list_wired
= NULL
;
364 uint32_t i
, gobble_count
;
366 *page_list_ret
= NULL
;
367 *page_list_wired_ret
= NULL
;
370 page_list
= hibernate_page_list_allocate();
372 return (KERN_RESOURCE_SHORTAGE
);
373 page_list_wired
= hibernate_page_list_allocate();
374 if (!page_list_wired
)
376 kfree(page_list
, page_list
->list_size
);
377 return (KERN_RESOURCE_SHORTAGE
);
380 *encryptedswap
= dp_encryption
;
382 // pages we could force out to reduce hibernate image size
383 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
385 // no failures hereafter
387 hibernate_processor_setup(header
);
389 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
390 header
->processorFlags
, gobble_count
);
394 uint64_t start
, end
, timeout
, nsec
;
395 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
396 clock_get_uptime(&start
);
398 for (i
= 0; i
< gobble_count
; i
++)
400 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
402 clock_get_uptime(&end
);
412 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
413 hibernate_gobble_queue
= m
;
416 clock_get_uptime(&end
);
417 absolutetime_to_nanoseconds(end
- start
, &nsec
);
418 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
421 *page_list_ret
= page_list
;
422 *page_list_wired_ret
= page_list_wired
;
424 return (KERN_SUCCESS
);
428 hibernate_teardown(hibernate_page_list_t
* page_list
,
429 hibernate_page_list_t
* page_list_wired
)
434 m
= (vm_page_t
) hibernate_gobble_queue
;
437 next
= (vm_page_t
) m
->pageq
.next
;
442 hibernate_gobble_queue
= VM_PAGE_NULL
;
445 HIBLOG("Freed %d pages\n", count
);
448 kfree(page_list
, page_list
->list_size
);
450 kfree(page_list_wired
, page_list_wired
->list_size
);
452 return (KERN_SUCCESS
);