+ *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+ if (preflight && will_discard) {
+ *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+ /*
+ * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
+ * even if these are clean and so we need to size the hibernation image accordingly.
+ *
+ * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
+ * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
+ * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
+ * clean xpmapped pages.
+ *
+ * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
+ * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
+ */
+ *pagesOut += HIBERNATE_XPMAPPED_LIMIT;
+ }
+
+ hibernation_vmqueues_inspection = FALSE;
+
+#if MACH_ASSERT || DEBUG
+ if (!preflight) {
+ if (vm_page_local_q) {
+ zpercpu_foreach(lq, vm_page_local_q) {
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+ }
+#endif /* MACH_ASSERT || DEBUG */
+
+ if (preflight) {
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
+ }
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
+}
+
+void
+hibernate_page_list_discard(hibernate_page_list_t * page_list)
+{
+ uint64_t start, end, nsec;
+ vm_page_t m;
+ vm_page_t next;
+ uint32_t i;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_purgeable = 0;
+ uint32_t count_discard_cleaned = 0;
+ uint32_t count_discard_speculative = 0;
+
+
+#if MACH_ASSERT || DEBUG
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ zpercpu_foreach(lq, vm_page_local_q) {
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+#endif /* MACH_ASSERT || DEBUG */
+
+ clock_get_uptime(&start);
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
+ if (m->vmp_dirty) {
+ count_discard_purgeable++;
+ } else {
+ count_discard_inactive++;
+ }
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
+ assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
+ count_discard_speculative++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
+ if (m->vmp_dirty) {
+ count_discard_purgeable++;
+ } else {
+ count_discard_inactive++;
+ }
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+ /* XXX FBDP TODO: secluded queue */
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+ while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
+ assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
+ if (m->vmp_dirty) {
+ count_discard_purgeable++;
+ } else {
+ count_discard_active++;
+ }
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
+ if (m->vmp_dirty) {
+ count_discard_purgeable++;
+ } else {
+ count_discard_cleaned++;
+ }
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+#if MACH_ASSERT || DEBUG
+ if (vm_page_local_q) {
+ zpercpu_foreach(lq, vm_page_local_q) {
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+#endif /* MACH_ASSERT || DEBUG */
+
+ clock_get_uptime(&end);
+ absolutetime_to_nanoseconds(end - start, &nsec);
+ HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
+ nsec / 1000000ULL,
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t hibernate_paddr_map_inited = FALSE;
+unsigned int hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t hibernate_rebuild_hash_list = NULL;
+
+unsigned int hibernate_teardown_found_tabled_pages = 0;
+unsigned int hibernate_teardown_found_created_pages = 0;
+unsigned int hibernate_teardown_found_free_pages = 0;
+unsigned int hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+ struct ppnum_mapping *ppnm_next;
+ ppnum_t ppnm_base_paddr;
+ unsigned int ppnm_sindx;
+ unsigned int ppnm_eindx;
+};
+
+struct ppnum_mapping *ppnm_head;
+struct ppnum_mapping *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map(void)
+{
+ unsigned int i;
+ ppnum_t next_ppnum_in_run = 0;
+ struct ppnum_mapping *ppnm = NULL;
+
+ if (hibernate_paddr_map_inited == FALSE) {
+ for (i = 0; i < vm_pages_count; i++) {
+ if (ppnm) {
+ ppnm->ppnm_eindx = i;
+ }
+
+ if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
+ ppnm = zalloc_permanent_type(struct ppnum_mapping);
+
+ ppnm->ppnm_next = ppnm_head;
+ ppnm_head = ppnm;
+
+ ppnm->ppnm_sindx = i;
+ ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
+ }
+ next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
+ }
+ ppnm->ppnm_eindx++;
+
+ hibernate_paddr_map_inited = TRUE;
+ }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+ struct ppnum_mapping *ppnm = NULL;
+
+ ppnm = ppnm_last_found;
+
+ if (ppnm) {
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+ goto done;
+ }
+ }
+ for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {