#include <mach/machine.h>
#include <mach/processor_info.h>
#include <mach/mach_types.h>
-#include <default_pager/default_pager_internal.h>
#include <IOKit/IOPlatformExpert.h>
#include <IOKit/IOHibernatePrivate.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_purgeable_internal.h>
+#include <vm/vm_compressor.h>
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+boolean_t need_to_unlock_decompressor = FALSE;
+
kern_return_t
-hibernate_setup(IOHibernateImageHeader * header,
- uint32_t free_page_ratio,
- uint32_t free_page_time,
- boolean_t vmflush,
- hibernate_page_list_t ** page_list_ret,
- hibernate_page_list_t ** page_list_wired_ret,
- boolean_t * encryptedswap)
+hibernate_alloc_page_lists(
+ hibernate_page_list_t ** page_list_ret,
+ hibernate_page_list_t ** page_list_wired_ret,
+ hibernate_page_list_t ** page_list_pal_ret)
{
+ kern_return_t retval = KERN_SUCCESS;
+
hibernate_page_list_t * page_list = NULL;
hibernate_page_list_t * page_list_wired = NULL;
- uint32_t gobble_count;
+ hibernate_page_list_t * page_list_pal = NULL;
- *page_list_ret = NULL;
- *page_list_wired_ret = NULL;
-
- if (vmflush)
- hibernate_flush_memory();
+ page_list = hibernate_page_list_allocate(TRUE);
+ if (!page_list) {
- page_list = hibernate_page_list_allocate();
- if (!page_list)
- return (KERN_RESOURCE_SHORTAGE);
- page_list_wired = hibernate_page_list_allocate();
+ retval = KERN_RESOURCE_SHORTAGE;
+ goto done;
+ }
+ page_list_wired = hibernate_page_list_allocate(FALSE);
if (!page_list_wired)
{
- kfree(page_list, page_list->list_size);
- return (KERN_RESOURCE_SHORTAGE);
+ kfree(page_list, page_list->list_size);
+
+ retval = KERN_RESOURCE_SHORTAGE;
+ goto done;
}
+ page_list_pal = hibernate_page_list_allocate(FALSE);
+ if (!page_list_pal)
+ {
+ kfree(page_list, page_list->list_size);
+ kfree(page_list_wired, page_list_wired->list_size);
- *encryptedswap = dp_encryption;
+ retval = KERN_RESOURCE_SHORTAGE;
+ goto done;
+ }
+ *page_list_ret = page_list;
+ *page_list_wired_ret = page_list_wired;
+ *page_list_pal_ret = page_list_pal;
- // pages we could force out to reduce hibernate image size
- gobble_count = (uint32_t)((((uint64_t) page_list->page_count) * ((uint64_t) free_page_ratio)) / 100);
+done:
+ return (retval);
- // no failures hereafter
+}
- hibernate_processor_setup(header);
+extern int sync_internal(void);
- HIBLOG("hibernate_alloc_pages flags %08x, gobbling %d pages\n",
- header->processorFlags, gobble_count);
+kern_return_t
+hibernate_setup(IOHibernateImageHeader * header,
+ boolean_t vmflush,
+ hibernate_page_list_t * page_list __unused,
+ hibernate_page_list_t * page_list_wired __unused,
+ hibernate_page_list_t * page_list_pal __unused)
+{
+ kern_return_t retval = KERN_SUCCESS;
+
+ hibernate_create_paddr_map();
- if (gobble_count)
- hibernate_gobble_pages(gobble_count, free_page_time);
+ hibernate_reset_stats();
+
+ if (vmflush && VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+
+ sync_internal();
- *page_list_ret = page_list;
- *page_list_wired_ret = page_list_wired;
+ vm_decompressor_lock();
+ need_to_unlock_decompressor = TRUE;
- return (KERN_SUCCESS);
+ hibernate_flush_memory();
+ }
+
+ // no failures hereafter
+
+ hibernate_processor_setup(header);
+
+ HIBLOG("hibernate_alloc_pages act %d, inact %d, anon %d, throt %d, spec %d, wire %d, wireinit %d\n",
+ vm_page_active_count, vm_page_inactive_count,
+ vm_page_anonymous_count, vm_page_throttled_count, vm_page_speculative_count,
+ vm_page_wire_count, vm_page_wire_count_initial);
+
+ if (retval != KERN_SUCCESS && need_to_unlock_decompressor == TRUE) {
+ need_to_unlock_decompressor = FALSE;
+ vm_decompressor_unlock();
+ }
+ return (retval);
}
kern_return_t
hibernate_teardown(hibernate_page_list_t * page_list,
- hibernate_page_list_t * page_list_wired)
+ hibernate_page_list_t * page_list_wired,
+ hibernate_page_list_t * page_list_pal)
{
hibernate_free_gobble_pages();
kfree(page_list, page_list->list_size);
if (page_list_wired)
kfree(page_list_wired, page_list_wired->list_size);
-
+ if (page_list_pal)
+ kfree(page_list_pal, page_list_pal->list_size);
+
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+ if (need_to_unlock_decompressor == TRUE) {
+ need_to_unlock_decompressor = FALSE;
+ vm_decompressor_unlock();
+ }
+ vm_compressor_delay_trim();
+ }
return (KERN_SUCCESS);
}
-