2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <kern/kalloc.h>
24 #include <kern/machine.h>
25 #include <kern/misc_protos.h>
26 #include <kern/thread.h>
27 #include <kern/processor.h>
28 #include <mach/machine.h>
29 #include <mach/processor_info.h>
30 #include <mach/mach_types.h>
31 #include <default_pager/default_pager_internal.h>
32 #include <IOKit/IOPlatformExpert.h>
35 #include <IOKit/IOHibernatePrivate.h>
36 #include <vm/vm_page.h>
37 #include <vm/vm_pageout.h>
39 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
41 static vm_page_t hibernate_gobble_queue
;
43 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46 hibernate_page_list_zero(hibernate_page_list_t
*list
)
49 hibernate_bitmap_t
* bitmap
;
51 bitmap
= &list
->bank_bitmap
[0];
52 for (bank
= 0; bank
< list
->bank_count
; bank
++)
54 uint32_t bit
, last_bit
;
55 uint32_t *bitmap_word
;
57 bzero((void *) &bitmap
->bitmap
[0], bitmap
->bitmapwords
<< 2);
59 // Set out-of-bound bits at end of bitmap.
60 bitmap_word
= &bitmap
->bitmap
[bitmap
->bitmapwords
- 1];
61 last_bit
= ((bitmap
->last_page
- bitmap
->first_page
) & 31);
62 for (bit
= 31; bit
> last_bit
; bit
--) {
63 *bitmap_word
|= (0x80000000 >> bit
);
66 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
72 consider_discard(vm_page_t m
)
74 register vm_object_t object
= 0;
76 boolean_t discard
= FALSE
;
81 panic("consider_discard: private");
83 if (!vm_object_lock_try(m
->object
))
88 if (m
->wire_count
!= 0)
93 if (m
->busy
|| !object
->alive
)
95 * Somebody is playing with this page.
99 if (m
->absent
|| m
->unusual
|| m
->error
)
101 * If it's unusual in anyway, ignore it
110 refmod_state
= pmap_get_refmod(m
->phys_page
);
112 if (refmod_state
& VM_MEM_REFERENCED
)
114 if (refmod_state
& VM_MEM_MODIFIED
)
119 * If it's clean we can discard the page on wakeup.
126 vm_object_unlock(object
);
133 discard_page(vm_page_t m
)
135 if (m
->absent
|| m
->unusual
|| m
->error
)
137 * If it's unusual in anyway, ignore
143 int refmod_state
= pmap_disconnect(m
->phys_page
);
145 if (refmod_state
& VM_MEM_REFERENCED
)
147 if (refmod_state
& VM_MEM_MODIFIED
)
152 panic("discard_page(%p) dirty", m
);
154 panic("discard_page(%p) laundry", m
);
156 panic("discard_page(%p) private", m
);
158 panic("discard_page(%p) fictitious", m
);
164 Bits zero in the bitmaps => needs to be saved. All pages default to be saved,
165 pages known to VM to not need saving are subtracted.
166 Wired pages to be saved are present in page_list_wired, pageable in page_list.
170 hibernate_page_list_setall(hibernate_page_list_t
* page_list
,
171 hibernate_page_list_t
* page_list_wired
,
174 uint64_t start
, end
, nsec
;
176 uint32_t pages
= page_list
->page_count
;
177 uint32_t count_zf
= 0, count_inactive
= 0, count_active
= 0;
178 uint32_t count_wire
= pages
;
179 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
182 HIBLOG("hibernate_page_list_setall start\n");
184 clock_get_uptime(&start
);
186 hibernate_page_list_zero(page_list
);
187 hibernate_page_list_zero(page_list_wired
);
189 m
= (vm_page_t
) hibernate_gobble_queue
;
194 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
195 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
196 m
= (vm_page_t
) m
->pageq
.next
;
199 m
= (vm_page_t
) vm_page_queue_free
;
204 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
205 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
206 m
= (vm_page_t
) m
->pageq
.next
;
209 queue_iterate( &vm_page_queue_zf
,
214 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
215 && consider_discard(m
))
217 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
218 count_discard_inactive
++;
223 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
226 queue_iterate( &vm_page_queue_inactive
,
231 if ((kIOHibernateModeDiscardCleanInactive
& gIOHibernateMode
)
232 && consider_discard(m
))
234 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
235 count_discard_inactive
++;
240 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
243 queue_iterate( &vm_page_queue_active
,
248 if ((kIOHibernateModeDiscardCleanActive
& gIOHibernateMode
)
249 && consider_discard(m
))
251 hibernate_page_bitset(page_list
, TRUE
, m
->phys_page
);
252 count_discard_active
++;
257 hibernate_page_bitset(page_list_wired
, TRUE
, m
->phys_page
);
260 // pull wired from hibernate_bitmap
263 hibernate_bitmap_t
* bitmap
;
264 hibernate_bitmap_t
* bitmap_wired
;
266 bitmap
= &page_list
->bank_bitmap
[0];
267 bitmap_wired
= &page_list_wired
->bank_bitmap
[0];
268 for (bank
= 0; bank
< page_list
->bank_count
; bank
++)
270 for (i
= 0; i
< bitmap
->bitmapwords
; i
++)
271 bitmap
->bitmap
[i
] = bitmap
->bitmap
[i
] | ~bitmap_wired
->bitmap
[i
];
272 bitmap
= (hibernate_bitmap_t
*) &bitmap
->bitmap
[bitmap
->bitmapwords
];
273 bitmap_wired
= (hibernate_bitmap_t
*) &bitmap_wired
->bitmap
[bitmap_wired
->bitmapwords
];
276 // machine dependent adjustments
277 hibernate_page_list_setall_machine(page_list
, page_list_wired
, &pages
);
279 clock_get_uptime(&end
);
280 absolutetime_to_nanoseconds(end
- start
, &nsec
);
281 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec
/ 1000000ULL);
283 HIBLOG("pages %d, wire %d, act %d, inact %d, zf %d, could discard act %d inact %d\n",
284 pages
, count_wire
, count_active
, count_inactive
, count_zf
,
285 count_discard_active
, count_discard_inactive
);
291 hibernate_page_list_discard(hibernate_page_list_t
* page_list
)
293 uint64_t start
, end
, nsec
;
296 uint32_t count_discard_active
= 0, count_discard_inactive
= 0;
298 clock_get_uptime(&start
);
300 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
301 while (m
&& !queue_end(&vm_page_queue_zf
, (queue_entry_t
)m
))
303 next
= (vm_page_t
) m
->pageq
.next
;
304 if (hibernate_page_bittst(page_list
, m
->phys_page
))
307 count_discard_inactive
++;
312 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
313 while (m
&& !queue_end(&vm_page_queue_inactive
, (queue_entry_t
)m
))
315 next
= (vm_page_t
) m
->pageq
.next
;
316 if (hibernate_page_bittst(page_list
, m
->phys_page
))
319 count_discard_inactive
++;
324 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
325 while (m
&& !queue_end(&vm_page_queue_active
, (queue_entry_t
)m
))
327 next
= (vm_page_t
) m
->pageq
.next
;
328 if (hibernate_page_bittst(page_list
, m
->phys_page
))
331 count_discard_active
++;
336 clock_get_uptime(&end
);
337 absolutetime_to_nanoseconds(end
- start
, &nsec
);
338 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d\n",
340 count_discard_active
, count_discard_inactive
);
343 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
346 hibernate_setup(IOHibernateImageHeader
* header
,
347 uint32_t free_page_ratio
,
348 uint32_t free_page_time
,
349 hibernate_page_list_t
** page_list_ret
,
350 hibernate_page_list_t
** page_list_wired_ret
,
351 boolean_t
* encryptedswap
)
353 hibernate_page_list_t
* page_list
= NULL
;
354 hibernate_page_list_t
* page_list_wired
= NULL
;
356 uint32_t i
, gobble_count
;
358 *page_list_ret
= NULL
;
359 *page_list_wired_ret
= NULL
;
362 page_list
= hibernate_page_list_allocate();
364 return (KERN_RESOURCE_SHORTAGE
);
365 page_list_wired
= hibernate_page_list_allocate();
366 if (!page_list_wired
)
368 kfree(page_list
, page_list
->list_size
);
369 return (KERN_RESOURCE_SHORTAGE
);
372 *encryptedswap
= dp_encryption
;
374 // pages we could force out to reduce hibernate image size
375 gobble_count
= (((uint64_t) page_list
->page_count
) * ((uint64_t) free_page_ratio
)) / 100;
377 // no failures hereafter
379 hibernate_processor_setup(header
);
381 HIBLOG("hibernate_alloc_pages flags %08lx, gobbling %d pages\n",
382 header
->processorFlags
, gobble_count
);
386 uint64_t start
, end
, timeout
, nsec
;
387 clock_interval_to_deadline(free_page_time
, 1000 * 1000 /*ms*/, &timeout
);
388 clock_get_uptime(&start
);
390 for (i
= 0; i
< gobble_count
; i
++)
392 while (VM_PAGE_NULL
== (m
= vm_page_grab()))
394 clock_get_uptime(&end
);
404 m
->pageq
.next
= (queue_entry_t
) hibernate_gobble_queue
;
405 hibernate_gobble_queue
= m
;
408 clock_get_uptime(&end
);
409 absolutetime_to_nanoseconds(end
- start
, &nsec
);
410 HIBLOG("Gobbled %d pages, time: %qd ms\n", i
, nsec
/ 1000000ULL);
413 *page_list_ret
= page_list
;
414 *page_list_wired_ret
= page_list_wired
;
416 return (KERN_SUCCESS
);
420 hibernate_teardown(hibernate_page_list_t
* page_list
,
421 hibernate_page_list_t
* page_list_wired
)
426 m
= (vm_page_t
) hibernate_gobble_queue
;
429 next
= (vm_page_t
) m
->pageq
.next
;
434 hibernate_gobble_queue
= VM_PAGE_NULL
;
437 HIBLOG("Freed %d pages\n", count
);
440 kfree(page_list
, page_list
->list_size
);
442 kfree(page_list_wired
, page_list_wired
->list_size
);
444 return (KERN_SUCCESS
);