+static int
+hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
+{
+ wait_result_t wait_result;
+
+ vm_page_lock_queues();
+
+ while ( !queue_empty(&q->pgo_pending) ) {
+
+ q->pgo_draining = TRUE;
+
+ assert_wait_timeout((event_t) (&q->pgo_laundry+1), THREAD_INTERRUPTIBLE, 5000, 1000*NSEC_PER_USEC);
+
+ vm_page_unlock_queues();
+
+ wait_result = thread_block(THREAD_CONTINUE_NULL);
+
+ if (wait_result == THREAD_TIMED_OUT && !queue_empty(&q->pgo_pending)) {
+ hibernate_stats.hibernate_drain_timeout++;
+
+ if (q == &vm_pageout_queue_external)
+ return (0);
+
+ return (1);
+ }
+ vm_page_lock_queues();
+
+ hibernate_stats.hibernate_drained++;
+ }
+ vm_page_unlock_queues();
+
+ return (0);
+}
+
+
+boolean_t hibernate_skip_external = FALSE;
+
+static int
+hibernate_flush_queue(queue_head_t *q, int qcount)
+{
+ vm_page_t m;
+ vm_object_t l_object = NULL;
+ vm_object_t m_object = NULL;
+ int refmod_state = 0;
+ int try_failed_count = 0;
+ int retval = 0;
+ int current_run = 0;
+ struct vm_pageout_queue *iq;
+ struct vm_pageout_queue *eq;
+ struct vm_pageout_queue *tq;
+
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, q, qcount, 0, 0, 0);
+
+ iq = &vm_pageout_queue_internal;
+ eq = &vm_pageout_queue_external;
+
+ vm_page_lock_queues();
+
+ while (qcount && !queue_empty(q)) {
+
+ if (current_run++ == 1000) {
+ if (hibernate_should_abort()) {
+ retval = 1;
+ break;
+ }
+ current_run = 0;
+ }
+
+ m = (vm_page_t) queue_first(q);
+ m_object = m->object;
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != l_object) {
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run...
+ */
+ if ( !vm_object_lock_try_scan(m_object)) {
+
+ if (try_failed_count > 20) {
+ hibernate_stats.hibernate_queue_nolock++;
+
+ goto reenter_pg_on_q;
+ }
+ vm_pageout_scan_wants_object = m_object;
+
+ vm_page_unlock_queues();
+ mutex_pause(try_failed_count++);
+ vm_page_lock_queues();
+
+ hibernate_stats.hibernate_queue_paused++;
+ continue;
+ } else {
+ l_object = m_object;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+ }
+ if ( !m_object->alive || m->encrypted_cleaning || m->cleaning || m->laundry || m->busy || m->absent || m->error) {
+ /*
+ * page is not to be cleaned
+ * put it back on the head of its queue
+ */
+ if (m->cleaning)
+ hibernate_stats.hibernate_skipped_cleaning++;
+ else
+ hibernate_stats.hibernate_skipped_transient++;
+
+ goto reenter_pg_on_q;
+ }
+ if (m_object->copy == VM_OBJECT_NULL) {
+ if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
+ /*
+ * let the normal hibernate image path
+ * deal with these
+ */
+ goto reenter_pg_on_q;
+ }
+ }
+ if ( !m->dirty && m->pmapped) {
+ refmod_state = pmap_get_refmod(m->phys_page);
+
+ if ((refmod_state & VM_MEM_MODIFIED)) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ } else
+ refmod_state = 0;
+
+ if ( !m->dirty) {
+ /*
+ * page is not to be cleaned
+ * put it back on the head of its queue
+ */
+ if (m->precious)
+ hibernate_stats.hibernate_skipped_precious++;
+
+ goto reenter_pg_on_q;
+ }
+
+ if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+ hibernate_stats.hibernate_skipped_external++;
+
+ goto reenter_pg_on_q;
+ }
+ tq = NULL;
+
+ if (m_object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ tq = iq;
+ } else if (VM_PAGE_Q_THROTTLED(eq))
+ tq = eq;
+
+ if (tq != NULL) {
+ wait_result_t wait_result;
+ int wait_count = 5;
+
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ while (retval == 0) {
+
+ tq->pgo_throttled = TRUE;
+
+ assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
+
+ vm_page_unlock_queues();
+
+ wait_result = thread_block(THREAD_CONTINUE_NULL);
+
+ vm_page_lock_queues();
+
+ if (wait_result != THREAD_TIMED_OUT)
+ break;
+ if (!VM_PAGE_Q_THROTTLED(tq))
+ break;
+
+ if (hibernate_should_abort())
+ retval = 1;
+
+ if (--wait_count == 0) {
+
+ hibernate_stats.hibernate_throttle_timeout++;
+
+ if (tq == eq) {
+ hibernate_skip_external = TRUE;
+ break;
+ }
+ retval = 1;
+ }
+ }
+ if (retval)
+ break;
+
+ hibernate_stats.hibernate_throttled++;
+
+ continue;
+ }
+ /*
+ * we've already factored out pages in the laundry which
+ * means this page can't be on the pageout queue so it's
+ * safe to do the VM_PAGE_QUEUES_REMOVE
+ */
+ assert(!m->pageout_queue);
+
+ VM_PAGE_QUEUES_REMOVE(m);
+
+ if (COMPRESSED_PAGER_IS_ACTIVE)
+ pmap_disconnect(m->phys_page);
+
+ vm_pageout_cluster(m, FALSE);
+
+ hibernate_stats.hibernate_found_dirty++;
+
+ goto next_pg;
+
+reenter_pg_on_q:
+ queue_remove(q, m, vm_page_t, pageq);
+ queue_enter(q, m, vm_page_t, pageq);
+
+ hibernate_stats.hibernate_reentered_on_q++;
+next_pg:
+ hibernate_stats.hibernate_considered++;
+
+ qcount--;
+ try_failed_count = 0;
+ }
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
+
+ return (retval);
+}
+
+
+static int
+hibernate_flush_dirty_pages(int pass)
+{
+ struct vm_speculative_age_q *aq;
+ uint32_t i;
+
+ bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++)
+ vm_page_reactivate_local(i, TRUE, FALSE);
+ }
+
+ for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
+ int qcount;
+ vm_page_t m;
+
+ aq = &vm_page_queue_speculative[i];
+
+ if (queue_empty(&aq->age_q))
+ continue;
+ qcount = 0;
+
+ vm_page_lockspin_queues();
+
+ queue_iterate(&aq->age_q,
+ m,
+ vm_page_t,
+ pageq)
+ {
+ qcount++;
+ }
+ vm_page_unlock_queues();
+
+ if (qcount) {
+ if (hibernate_flush_queue(&aq->age_q, qcount))
+ return (1);
+ }
+ }
+ if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
+ return (1);
+ if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
+ return (1);
+ if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
+ return (1);
+ if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
+ return (1);
+
+ if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+ vm_compressor_record_warmup_start();
+
+ if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+ if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+ vm_compressor_record_warmup_end();
+ return (1);
+ }
+ if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+ if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+ vm_compressor_record_warmup_end();
+ return (1);
+ }
+ if (COMPRESSED_PAGER_IS_ACTIVE && pass == 1)
+ vm_compressor_record_warmup_end();
+
+ if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+ return (1);
+
+ return (0);
+}
+
+
+int
+hibernate_flush_memory()
+{
+ int retval;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
+
+ hibernate_cleaning_in_progress = TRUE;
+ hibernate_skip_external = FALSE;
+
+ if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+ if (COMPRESSED_PAGER_IS_ACTIVE) {
+
+ if ((retval = hibernate_flush_dirty_pages(2)) == 0) {
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+
+ vm_compressor_flush();
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+ }
+ }
+ if (retval == 0 && consider_buffer_cache_collect != NULL) {
+ unsigned int orig_wire_count;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ orig_wire_count = vm_page_wire_count;
+
+ (void)(*consider_buffer_cache_collect)(1);
+ consider_zone_gc(TRUE);
+
+ HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
+ }
+ }
+ hibernate_cleaning_in_progress = FALSE;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
+
+ if (retval && COMPRESSED_PAGER_IS_ACTIVE)
+ HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
+ HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
+ hibernate_stats.hibernate_considered,
+ hibernate_stats.hibernate_reentered_on_q,
+ hibernate_stats.hibernate_found_dirty);
+ HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
+ hibernate_stats.hibernate_skipped_cleaning,
+ hibernate_stats.hibernate_skipped_transient,
+ hibernate_stats.hibernate_skipped_precious,
+ hibernate_stats.hibernate_skipped_external,
+ hibernate_stats.hibernate_queue_nolock);
+ HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
+ hibernate_stats.hibernate_queue_paused,
+ hibernate_stats.hibernate_throttled,
+ hibernate_stats.hibernate_throttle_timeout,
+ hibernate_stats.hibernate_drained,
+ hibernate_stats.hibernate_drain_timeout);
+
+ return (retval);
+}
+
+
+static void
+hibernate_page_list_zero(hibernate_page_list_t *list)
+{
+ uint32_t bank;
+ hibernate_bitmap_t * bitmap;
+
+ bitmap = &list->bank_bitmap[0];
+ for (bank = 0; bank < list->bank_count; bank++)
+ {
+ uint32_t last_bit;
+
+ bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
+ // set out-of-bound bits at end of bitmap.
+ last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
+ if (last_bit)
+ bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
+
+ bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
+ }
+}
+
+void
+hibernate_gobble_pages(uint32_t gobble_count, uint32_t free_page_time)
+{
+ uint32_t i;
+ vm_page_t m;
+ uint64_t start, end, timeout, nsec;
+ clock_interval_to_deadline(free_page_time, 1000 * 1000 /*ms*/, &timeout);
+ clock_get_uptime(&start);
+
+ for (i = 0; i < gobble_count; i++)
+ {
+ while (VM_PAGE_NULL == (m = vm_page_grab()))
+ {
+ clock_get_uptime(&end);
+ if (end >= timeout)
+ break;
+ VM_PAGE_WAIT();
+ }
+ if (!m)
+ break;
+ m->busy = FALSE;
+ vm_page_gobble(m);
+
+ m->pageq.next = (queue_entry_t) hibernate_gobble_queue;
+ hibernate_gobble_queue = m;
+ }
+
+ clock_get_uptime(&end);
+ absolutetime_to_nanoseconds(end - start, &nsec);
+ HIBLOG("Gobbled %d pages, time: %qd ms\n", i, nsec / 1000000ULL);
+}
+
+void
+hibernate_free_gobble_pages(void)
+{
+ vm_page_t m, next;
+ uint32_t count = 0;
+
+ m = (vm_page_t) hibernate_gobble_queue;
+ while(m)
+ {
+ next = (vm_page_t) m->pageq.next;
+ vm_page_free(m);
+ count++;
+ m = next;
+ }
+ hibernate_gobble_queue = VM_PAGE_NULL;
+
+ if (count)
+ HIBLOG("Freed %d pages\n", count);
+}
+
+static boolean_t
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
+{
+ vm_object_t object = NULL;
+ int refmod_state;
+ boolean_t discard = FALSE;
+
+ do
+ {
+ if (m->private)
+ panic("hibernate_consider_discard: private");
+
+ if (!vm_object_lock_try(m->object)) {
+ if (!preflight) hibernate_stats.cd_lock_failed++;
+ break;
+ }
+ object = m->object;
+
+ if (VM_PAGE_WIRED(m)) {
+ if (!preflight) hibernate_stats.cd_found_wired++;
+ break;
+ }
+ if (m->precious) {
+ if (!preflight) hibernate_stats.cd_found_precious++;
+ break;
+ }
+ if (m->busy || !object->alive) {
+ /*
+ * Somebody is playing with this page.
+ */
+ if (!preflight) hibernate_stats.cd_found_busy++;
+ break;
+ }
+ if (m->absent || m->unusual || m->error) {
+ /*
+ * If it's unusual in anyway, ignore it
+ */
+ if (!preflight) hibernate_stats.cd_found_unusual++;
+ break;
+ }
+ if (m->cleaning) {
+ if (!preflight) hibernate_stats.cd_found_cleaning++;
+ break;
+ }
+ if (m->laundry) {
+ if (!preflight) hibernate_stats.cd_found_laundry++;
+ break;
+ }
+ if (!m->dirty)
+ {
+ refmod_state = pmap_get_refmod(m->phys_page);
+
+ if (refmod_state & VM_MEM_REFERENCED)
+ m->reference = TRUE;
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+
+ /*
+ * If it's clean or purgeable we can discard the page on wakeup.
+ */
+ discard = (!m->dirty)
+ || (VM_PURGABLE_VOLATILE == object->purgable)
+ || (VM_PURGABLE_EMPTY == object->purgable);
+
+
+ if (discard == FALSE) {
+ if (!preflight)
+ hibernate_stats.cd_found_dirty++;
+ } else if (m->xpmapped && m->reference) {
+ if (!preflight)
+ hibernate_stats.cd_found_xpmapped++;
+ discard = FALSE;
+ }
+ }
+ while (FALSE);
+
+ if (object)
+ vm_object_unlock(object);
+
+ return (discard);
+}
+
+
+static void
+hibernate_discard_page(vm_page_t m)
+{
+ if (m->absent || m->unusual || m->error)
+ /*
+ * If it's unusual in anyway, ignore
+ */
+ return;
+
+#if DEBUG
+ vm_object_t object = m->object;
+ if (!vm_object_lock_try(m->object))
+ panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
+#else
+ /* No need to lock page queue for token delete, hibernate_vm_unlock()
+ makes sure these locks are uncontended before sleep */
+#endif /* !DEBUG */
+
+ if (m->pmapped == TRUE)
+ {
+ __unused int refmod_state = pmap_disconnect(m->phys_page);
+ }
+
+ if (m->laundry)
+ panic("hibernate_discard_page(%p) laundry", m);
+ if (m->private)
+ panic("hibernate_discard_page(%p) private", m);
+ if (m->fictitious)
+ panic("hibernate_discard_page(%p) fictitious", m);
+
+ if (VM_PURGABLE_VOLATILE == m->object->purgable)
+ {
+ /* object should be on a queue */
+ assert((m->object->objq.next != NULL) && (m->object->objq.prev != NULL));
+ purgeable_q_t old_queue = vm_purgeable_object_remove(m->object);
+ assert(old_queue);
+ if (m->object->purgeable_when_ripe) {
+ vm_purgeable_token_delete_first(old_queue);
+ }
+ m->object->purgable = VM_PURGABLE_EMPTY;
+ }
+
+ vm_page_free(m);
+
+#if DEBUG
+ vm_object_unlock(object);
+#endif /* DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+ vm_object_lock(compressor_object);
+ vm_page_lock_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_local_q) {
+ uint32_t i;
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+ if (vm_page_local_q) {
+ uint32_t i;
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
+}
+
+/*
+ Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
+ pages known to VM to not need saving are subtracted.
+ Wired pages to be saved are present in page_list_wired, pageable in page_list.
+*/
+
+void
+hibernate_page_list_setall(hibernate_page_list_t * page_list,
+ hibernate_page_list_t * page_list_wired,
+ hibernate_page_list_t * page_list_pal,
+ boolean_t preflight,
+ boolean_t will_discard,
+ uint32_t * pagesOut)
+{
+ uint64_t start, end, nsec;
+ vm_page_t m;
+ vm_page_t next;
+ uint32_t pages = page_list->page_count;
+ uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
+ uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
+ uint32_t count_wire = pages;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_cleaned = 0;
+ uint32_t count_discard_purgeable = 0;
+ uint32_t count_discard_speculative = 0;
+ uint32_t count_discard_vm_struct_pages = 0;
+ uint32_t i;
+ uint32_t bank;
+ hibernate_bitmap_t * bitmap;
+ hibernate_bitmap_t * bitmap_wired;
+ boolean_t discard_all;
+ boolean_t discard;
+
+ HIBLOG("hibernate_page_list_setall(preflight %d) start %p, %p\n", preflight, page_list, page_list_wired);
+
+ if (preflight) {
+ page_list = NULL;
+ page_list_wired = NULL;
+ page_list_pal = NULL;
+ discard_all = FALSE;
+ } else {
+ discard_all = will_discard;
+ }
+
+#if DEBUG
+ if (!preflight)
+ {
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+ }
+#endif /* DEBUG */
+
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
+
+ clock_get_uptime(&start);
+
+ if (!preflight) {
+ hibernate_page_list_zero(page_list);
+ hibernate_page_list_zero(page_list_wired);
+ hibernate_page_list_zero(page_list_pal);
+
+ hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+ hibernate_stats.cd_pages = pages;
+ }
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++)
+ vm_page_reactivate_local(i, TRUE, !preflight);
+ }
+
+ if (preflight) {
+ vm_object_lock(compressor_object);
+ vm_page_lock_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ }
+
+ m = (vm_page_t) hibernate_gobble_queue;
+ while (m)
+ {
+ pages--;
+ count_wire--;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ }
+ m = (vm_page_t) m->pageq.next;
+ }
+
+ if (!preflight) for( i = 0; i < real_ncpus; i++ )
+ {
+ if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
+ {
+ for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = (vm_page_t)m->pageq.next)
+ {
+ pages--;
+ count_wire--;
+ hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+ hibernate_stats.cd_local_free++;
+ hibernate_stats.cd_total_free++;
+ }
+ }
+ }
+
+ for( i = 0; i < vm_colors; i++ )
+ {
+ queue_iterate(&vm_page_queue_free[i],
+ m,
+ vm_page_t,
+ pageq)
+ {
+ pages--;
+ count_wire--;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+ hibernate_stats.cd_total_free++;
+ }
+ }
+ }
+
+ queue_iterate(&vm_lopage_queue_free,
+ m,
+ vm_page_t,
+ pageq)
+ {
+ pages--;
+ count_wire--;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+ hibernate_stats.cd_total_free++;
+ }
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+ while (m && !queue_end(&vm_page_queue_throttled, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_throttled++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+ while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_anonymous++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_inactive++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+ while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_cleaned++;
+ discard = discard_all;
+ }
+ else
+ count_cleaned++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+ {
+ m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ count_discard_speculative++;
+ discard = discard_all;
+ }
+ else
+ count_speculative++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ discard = discard_all;
+ }
+ else
+ count_active++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
+ {
+ count_compressor++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ }
+
+ if (preflight == FALSE && discard_all == TRUE) {
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ HIBLOG("hibernate_teardown started\n");
+ count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+ HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
+
+ pages -= count_discard_vm_struct_pages;
+ count_wire -= count_discard_vm_struct_pages;
+
+ hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+ }
+
+ if (!preflight) {
+ // pull wired from hibernate_bitmap
+ bitmap = &page_list->bank_bitmap[0];
+ bitmap_wired = &page_list_wired->bank_bitmap[0];
+ for (bank = 0; bank < page_list->bank_count; bank++)
+ {
+ for (i = 0; i < bitmap->bitmapwords; i++)
+ bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+ bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
+ bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+ }
+ }
+
+ // machine dependent adjustments
+ hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
+
+ if (!preflight) {
+ hibernate_stats.cd_count_wire = count_wire;
+ hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+ count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+ }
+
+ clock_get_uptime(&end);
+ absolutetime_to_nanoseconds(end - start, &nsec);
+ HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
+
+ HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
+ pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+ discard_all ? "did" : "could",
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+ *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+ if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+#if DEBUG
+ if (!preflight)
+ {
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+ }
+#endif /* DEBUG */
+
+ if (preflight) {
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
+ }
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
+}
+
+void
+hibernate_page_list_discard(hibernate_page_list_t * page_list)
+{
+ uint64_t start, end, nsec;
+ vm_page_t m;
+ vm_page_t next;
+ uint32_t i;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_purgeable = 0;
+ uint32_t count_discard_cleaned = 0;
+ uint32_t count_discard_speculative = 0;
+
+
+#if DEBUG
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+#endif /* DEBUG */
+
+ clock_get_uptime(&start);
+
+ m = (vm_page_t) queue_first(&vm_page_queue_anonymous);
+ while (m && !queue_end(&vm_page_queue_anonymous, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ if (hibernate_page_bittst(page_list, m->phys_page))
+ {
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+ {
+ m = (vm_page_t) queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !queue_end(&vm_page_queue_speculative[i].age_q, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ if (hibernate_page_bittst(page_list, m->phys_page))
+ {
+ count_discard_speculative++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ if (hibernate_page_bittst(page_list, m->phys_page))
+ {
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ if (hibernate_page_bittst(page_list, m->phys_page))
+ {
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+ while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ if (hibernate_page_bittst(page_list, m->phys_page))
+ {
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_cleaned++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+#if DEBUG
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+#endif /* DEBUG */
+
+ clock_get_uptime(&end);
+ absolutetime_to_nanoseconds(end - start, &nsec);
+ HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
+ nsec / 1000000ULL,
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t hibernate_paddr_map_inited = FALSE;
+boolean_t hibernate_rebuild_needed = FALSE;
+unsigned int hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t hibernate_rebuild_hash_list = NULL;
+
+unsigned int hibernate_teardown_found_tabled_pages = 0;
+unsigned int hibernate_teardown_found_created_pages = 0;
+unsigned int hibernate_teardown_found_free_pages = 0;
+unsigned int hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+ struct ppnum_mapping *ppnm_next;
+ ppnum_t ppnm_base_paddr;
+ unsigned int ppnm_sindx;
+ unsigned int ppnm_eindx;
+};
+
+struct ppnum_mapping *ppnm_head;
+struct ppnum_mapping *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map()
+{
+ unsigned int i;
+ ppnum_t next_ppnum_in_run = 0;
+ struct ppnum_mapping *ppnm = NULL;
+
+ if (hibernate_paddr_map_inited == FALSE) {
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ if (ppnm)
+ ppnm->ppnm_eindx = i;
+
+ if (ppnm == NULL || vm_pages[i].phys_page != next_ppnum_in_run) {
+
+ ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+ ppnm->ppnm_next = ppnm_head;
+ ppnm_head = ppnm;
+
+ ppnm->ppnm_sindx = i;
+ ppnm->ppnm_base_paddr = vm_pages[i].phys_page;
+ }
+ next_ppnum_in_run = vm_pages[i].phys_page + 1;
+ }
+ ppnm->ppnm_eindx++;
+
+ hibernate_paddr_map_inited = TRUE;
+ }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+ struct ppnum_mapping *ppnm = NULL;
+
+ ppnm = ppnm_last_found;
+
+ if (ppnm) {
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+ goto done;
+ }
+ for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+ ppnm_last_found = ppnm;
+ break;
+ }
+ }
+ if (ppnm == NULL)
+ panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+ return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ addr64_t saddr_aligned;
+ addr64_t eaddr_aligned;
+ addr64_t addr;
+ ppnum_t paddr;
+ unsigned int mark_as_unneeded_pages = 0;
+
+ saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+ eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+ for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+ paddr = pmap_find_phys(kernel_pmap, addr);
+
+ assert(paddr);
+
+ hibernate_page_bitset(page_list, TRUE, paddr);
+ hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+ mark_as_unneeded_pages++;
+ }
+ return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+ vm_page_bucket_t *bucket;
+ int hash_id;
+
+ assert(mem->hashed);
+ assert(mem->object);
+ assert(mem->offset != (vm_object_offset_t) -1);
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+ hash_id = vm_page_hash(mem->object, mem->offset);
+ bucket = &vm_page_buckets[hash_id];
+
+ mem->next = bucket->pages;
+ bucket->pages = mem;
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+ vm_page_t mem;
+ unsigned int color;
+
+ while (sindx < eindx) {
+ mem = &vm_pages[sindx];
+
+ vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+ mem->lopage = FALSE;
+ mem->free = TRUE;
+
+ color = mem->phys_page & vm_color_mask;
+ queue_enter_first(&vm_page_queue_free[color],
+ mem,
+ vm_page_t,
+ pageq);
+ vm_page_free_count++;
+
+ sindx++;
+ }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+ int cindx, sindx, eindx;
+ vm_page_t mem, tmem, mem_next;
+ AbsoluteTime startTime, endTime;
+ uint64_t nsec;
+
+ if (hibernate_rebuild_needed == FALSE)
+ return;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ HIBLOG("hibernate_rebuild started\n");
+
+ clock_get_uptime(&startTime);
+
+ hibernate_rebuild_pmap_structs();
+
+ bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+ eindx = vm_pages_count;
+
+ for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+
+ mem = &vm_pages[cindx];
+ /*
+ * hibernate_teardown_vm_structs leaves the location where
+ * this vm_page_t must be located in "next".
+ */
+ tmem = mem->next;
+ mem->next = NULL;
+
+ sindx = (int)(tmem - &vm_pages[0]);
+
+ if (mem != tmem) {
+ /*
+ * this vm_page_t was moved by hibernate_teardown_vm_structs,
+ * so move it back to its real location
+ */
+ *tmem = *mem;
+ mem = tmem;
+ }
+ if (mem->hashed)
+ hibernate_hash_insert_page(mem);
+ /*
+ * the 'hole' between this vm_page_t and the previous
+ * vm_page_t we moved needs to be initialized as
+ * a range of free vm_page_t's
+ */
+ hibernate_free_range(sindx + 1, eindx);
+
+ eindx = sindx;
+ }
+ if (sindx)
+ hibernate_free_range(0, sindx);
+
+ assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+ /*
+ * process the list of vm_page_t's that were entered in the hash,
+ * but were not located in the vm_pages arrary... these are
+ * vm_page_t's that were created on the fly (i.e. fictitious)
+ */
+ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+ mem_next = mem->next;
+
+ mem->next = NULL;
+ hibernate_hash_insert_page(mem);
+ }
+ hibernate_rebuild_hash_list = NULL;
+
+ clock_get_uptime(&endTime);
+ SUB_ABSOLUTETIME(&endTime, &startTime);
+ absolutetime_to_nanoseconds(endTime, &nsec);
+
+ HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+ hibernate_rebuild_needed = FALSE;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ unsigned int i;
+ unsigned int compact_target_indx;
+ vm_page_t mem, mem_next;
+ vm_page_bucket_t *bucket;
+ unsigned int mark_as_unneeded_pages = 0;
+ unsigned int unneeded_vm_page_bucket_pages = 0;
+ unsigned int unneeded_vm_pages_pages = 0;
+ unsigned int unneeded_pmap_pages = 0;
+ addr64_t start_of_unneeded = 0;
+ addr64_t end_of_unneeded = 0;
+
+
+ if (hibernate_should_abort())
+ return (0);
+
+ HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+ vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+ vm_page_cleaned_count, compressor_object->resident_page_count);
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+
+ bucket = &vm_page_buckets[i];
+
+ for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem_next) {
+ assert(mem->hashed);
+
+ mem_next = mem->next;
+
+ if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+ mem->next = hibernate_rebuild_hash_list;
+ hibernate_rebuild_hash_list = mem;
+ }
+ }
+ }
+ unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+ hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+ compact_target_indx = 0;
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ mem = &vm_pages[i];
+
+ if (mem->free) {
+ unsigned int color;
+
+ assert(mem->busy);
+ assert(!mem->lopage);
+
+ color = mem->phys_page & vm_color_mask;
+
+ queue_remove(&vm_page_queue_free[color],
+ mem,
+ vm_page_t,
+ pageq);
+ mem->pageq.next = NULL;
+ mem->pageq.prev = NULL;
+
+ vm_page_free_count--;
+
+ hibernate_teardown_found_free_pages++;
+
+ if ( !vm_pages[compact_target_indx].free)
+ compact_target_indx = i;
+ } else {
+ /*
+ * record this vm_page_t's original location
+ * we need this even if it doesn't get moved
+ * as an indicator to the rebuild function that
+ * we don't have to move it
+ */
+ mem->next = mem;
+
+ if (vm_pages[compact_target_indx].free) {
+ /*
+ * we've got a hole to fill, so
+ * move this vm_page_t to it's new home
+ */
+ vm_pages[compact_target_indx] = *mem;
+ mem->free = TRUE;
+
+ hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+ compact_target_indx++;
+ } else
+ hibernate_teardown_last_valid_compact_indx = i;
+ }
+ }
+ unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+ (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+ hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+ if (start_of_unneeded) {
+ unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_pmap_pages;
+ }
+ HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+ hibernate_rebuild_needed = TRUE;
+
+ return (mark_as_unneeded_pages);
+}
+
+
+#endif /* HIBERNATION */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <mach_vm_debug.h>
+#if MACH_VM_DEBUG
+
+#include <mach_debug/hash_info.h>
+#include <vm/vm_debug.h>
+
+/*
+ * Routine: vm_page_info
+ * Purpose:
+ * Return information about the global VP table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count)
+{
+ unsigned int i;
+ lck_spin_t *bucket_lock;
+
+ if (vm_page_bucket_count < count)
+ count = vm_page_bucket_count;
+
+ for (i = 0; i < count; i++) {
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ unsigned int bucket_count = 0;
+ vm_page_t m;
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+
+ for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
+ bucket_count++;
+
+ lck_spin_unlock(bucket_lock);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return vm_page_bucket_count;
+}
+#endif /* MACH_VM_DEBUG */
+
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+ unsigned int i;
+ vm_page_t p;
+ unsigned int p_hash;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+
+ if (!vm_page_buckets_check_ready) {
+ return;
+ }
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed ||
+ hibernate_rebuild_hash_list) {
+ panic("BUCKET_CHECK: hibernation in progress: "
+ "rebuild_needed=%d rebuild_hash_list=%p\n",
+ hibernate_rebuild_needed,
+ hibernate_rebuild_hash_list);
+ }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+ char *cp;
+ for (cp = (char *) vm_page_fake_buckets_start;
+ cp < (char *) vm_page_fake_buckets_end;
+ cp++) {
+ if (*cp != 0x5a) {
+ panic("BUCKET_CHECK: corruption at %p in fake buckets "
+ "[0x%llx:0x%llx]\n",
+ cp,
+ vm_page_fake_buckets_start,
+ vm_page_fake_buckets_end);
+ }
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ bucket = &vm_page_buckets[i];
+ if (bucket->pages == VM_PAGE_NULL) {
+ continue;
+ }
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+ p = bucket->pages;
+ while (p != VM_PAGE_NULL) {
+ if (!p->hashed) {
+ panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+ "hash %d in bucket %d at %p "
+ "is not hashed\n",
+ p, p->object, p->offset,
+ p_hash, i, bucket);
+ }
+ p_hash = vm_page_hash(p->object, p->offset);
+ if (p_hash != i) {
+ panic("BUCKET_CHECK: corruption in bucket %d "
+ "at %p: page %p object %p offset 0x%llx "
+ "hash %d\n",
+ i, bucket, p, p->object, p->offset,
+ p_hash);
+ }
+ p = p->next;
+ }
+ lck_spin_unlock(bucket_lock);
+ }
+
+// printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */