+boolean_t hibernate_paddr_map_inited = FALSE;
+boolean_t hibernate_rebuild_needed = FALSE;
+unsigned int hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t hibernate_rebuild_hash_list = NULL;
+
+unsigned int hibernate_teardown_found_tabled_pages = 0;
+unsigned int hibernate_teardown_found_created_pages = 0;
+unsigned int hibernate_teardown_found_free_pages = 0;
+unsigned int hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+ struct ppnum_mapping *ppnm_next;
+ ppnum_t ppnm_base_paddr;
+ unsigned int ppnm_sindx;
+ unsigned int ppnm_eindx;
+};
+
+struct ppnum_mapping *ppnm_head;
+struct ppnum_mapping *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map()
+{
+ unsigned int i;
+ ppnum_t next_ppnum_in_run = 0;
+ struct ppnum_mapping *ppnm = NULL;
+
+ if (hibernate_paddr_map_inited == FALSE) {
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ if (ppnm)
+ ppnm->ppnm_eindx = i;
+
+ if (ppnm == NULL || vm_pages[i].phys_page != next_ppnum_in_run) {
+
+ ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+ ppnm->ppnm_next = ppnm_head;
+ ppnm_head = ppnm;
+
+ ppnm->ppnm_sindx = i;
+ ppnm->ppnm_base_paddr = vm_pages[i].phys_page;
+ }
+ next_ppnum_in_run = vm_pages[i].phys_page + 1;
+ }
+ ppnm->ppnm_eindx++;
+
+ hibernate_paddr_map_inited = TRUE;
+ }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+ struct ppnum_mapping *ppnm = NULL;
+
+ ppnm = ppnm_last_found;
+
+ if (ppnm) {
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+ goto done;
+ }
+ for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+ ppnm_last_found = ppnm;
+ break;
+ }
+ }
+ if (ppnm == NULL)
+ panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+ return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ addr64_t saddr_aligned;
+ addr64_t eaddr_aligned;
+ addr64_t addr;
+ ppnum_t paddr;
+ unsigned int mark_as_unneeded_pages = 0;
+
+ saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+ eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+ for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+ paddr = pmap_find_phys(kernel_pmap, addr);
+
+ assert(paddr);
+
+ hibernate_page_bitset(page_list, TRUE, paddr);
+ hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+ mark_as_unneeded_pages++;
+ }
+ return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+ vm_page_bucket_t *bucket;
+ int hash_id;
+
+ assert(mem->hashed);
+ assert(mem->object);
+ assert(mem->offset != (vm_object_offset_t) -1);
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+ hash_id = vm_page_hash(mem->object, mem->offset);
+ bucket = &vm_page_buckets[hash_id];
+
+ mem->next = bucket->pages;
+ bucket->pages = mem;
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+ vm_page_t mem;
+ unsigned int color;
+
+ while (sindx < eindx) {
+ mem = &vm_pages[sindx];
+
+ vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+ mem->lopage = FALSE;
+ mem->free = TRUE;
+
+ color = mem->phys_page & vm_color_mask;
+ queue_enter_first(&vm_page_queue_free[color],
+ mem,
+ vm_page_t,
+ pageq);
+ vm_page_free_count++;
+
+ sindx++;
+ }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+ int cindx, sindx, eindx;
+ vm_page_t mem, tmem, mem_next;
+ AbsoluteTime startTime, endTime;
+ uint64_t nsec;
+
+ if (hibernate_rebuild_needed == FALSE)
+ return;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ HIBLOG("hibernate_rebuild started\n");
+
+ clock_get_uptime(&startTime);
+
+ hibernate_rebuild_pmap_structs();
+
+ bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+ eindx = vm_pages_count;
+
+ for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+
+ mem = &vm_pages[cindx];
+ /*
+ * hibernate_teardown_vm_structs leaves the location where
+ * this vm_page_t must be located in "next".
+ */
+ tmem = mem->next;
+ mem->next = NULL;
+
+ sindx = (int)(tmem - &vm_pages[0]);
+
+ if (mem != tmem) {
+ /*
+ * this vm_page_t was moved by hibernate_teardown_vm_structs,
+ * so move it back to its real location
+ */
+ *tmem = *mem;
+ mem = tmem;
+ }
+ if (mem->hashed)
+ hibernate_hash_insert_page(mem);
+ /*
+ * the 'hole' between this vm_page_t and the previous
+ * vm_page_t we moved needs to be initialized as
+ * a range of free vm_page_t's
+ */
+ hibernate_free_range(sindx + 1, eindx);
+
+ eindx = sindx;
+ }
+ if (sindx)
+ hibernate_free_range(0, sindx);
+
+ assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+ /*
+ * process the list of vm_page_t's that were entered in the hash,
+ * but were not located in the vm_pages arrary... these are
+ * vm_page_t's that were created on the fly (i.e. fictitious)
+ */
+ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+ mem_next = mem->next;
+
+ mem->next = NULL;
+ hibernate_hash_insert_page(mem);
+ }
+ hibernate_rebuild_hash_list = NULL;
+
+ clock_get_uptime(&endTime);
+ SUB_ABSOLUTETIME(&endTime, &startTime);
+ absolutetime_to_nanoseconds(endTime, &nsec);
+
+ HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+ hibernate_rebuild_needed = FALSE;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ unsigned int i;
+ unsigned int compact_target_indx;
+ vm_page_t mem, mem_next;
+ vm_page_bucket_t *bucket;
+ unsigned int mark_as_unneeded_pages = 0;
+ unsigned int unneeded_vm_page_bucket_pages = 0;
+ unsigned int unneeded_vm_pages_pages = 0;
+ unsigned int unneeded_pmap_pages = 0;
+ addr64_t start_of_unneeded = 0;
+ addr64_t end_of_unneeded = 0;
+
+
+ if (hibernate_should_abort())
+ return (0);
+
+ HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+ vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+ vm_page_cleaned_count, compressor_object->resident_page_count);
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+
+ bucket = &vm_page_buckets[i];
+
+ for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) {
+ assert(mem->hashed);
+
+ mem_next = mem->next;
+
+ if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+ mem->next = hibernate_rebuild_hash_list;
+ hibernate_rebuild_hash_list = mem;
+ }
+ }
+ }
+ unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+ hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+ compact_target_indx = 0;
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ mem = &vm_pages[i];
+
+ if (mem->free) {
+ unsigned int color;
+
+ assert(mem->busy);
+ assert(!mem->lopage);
+
+ color = mem->phys_page & vm_color_mask;
+
+ queue_remove(&vm_page_queue_free[color],
+ mem,
+ vm_page_t,
+ pageq);
+ mem->pageq.next = NULL;
+ mem->pageq.prev = NULL;
+
+ vm_page_free_count--;
+
+ hibernate_teardown_found_free_pages++;
+
+ if ( !vm_pages[compact_target_indx].free)
+ compact_target_indx = i;
+ } else {
+ /*
+ * record this vm_page_t's original location
+ * we need this even if it doesn't get moved
+ * as an indicator to the rebuild function that
+ * we don't have to move it
+ */
+ mem->next = mem;
+
+ if (vm_pages[compact_target_indx].free) {
+ /*
+ * we've got a hole to fill, so
+ * move this vm_page_t to it's new home
+ */
+ vm_pages[compact_target_indx] = *mem;
+ mem->free = TRUE;
+
+ hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+ compact_target_indx++;
+ } else
+ hibernate_teardown_last_valid_compact_indx = i;
+ }
+ }
+ unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+ (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+ hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+ if (start_of_unneeded) {
+ unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_pmap_pages;
+ }
+ HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+ hibernate_rebuild_needed = TRUE;
+
+ return (mark_as_unneeded_pages);
+}
+
+