+
+
+boolean_t hibernate_skip_external = FALSE;
+
+static int
+hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
+{
+ vm_page_t m;
+ vm_object_t l_object = NULL;
+ vm_object_t m_object = NULL;
+ int refmod_state = 0;
+ int try_failed_count = 0;
+ int retval = 0;
+ int current_run = 0;
+ struct vm_pageout_queue *iq;
+ struct vm_pageout_queue *eq;
+ struct vm_pageout_queue *tq;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
+ VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
+
+ iq = &vm_pageout_queue_internal;
+ eq = &vm_pageout_queue_external;
+
+ vm_page_lock_queues();
+
+ while (qcount && !vm_page_queue_empty(q)) {
+
+ if (current_run++ == 1000) {
+ if (hibernate_should_abort()) {
+ retval = 1;
+ break;
+ }
+ current_run = 0;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(q);
+ m_object = VM_PAGE_OBJECT(m);
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != l_object) {
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run...
+ */
+ if ( !vm_object_lock_try_scan(m_object)) {
+
+ if (try_failed_count > 20) {
+ hibernate_stats.hibernate_queue_nolock++;
+
+ goto reenter_pg_on_q;
+ }
+
+ vm_page_unlock_queues();
+ mutex_pause(try_failed_count++);
+ vm_page_lock_queues();
+
+ hibernate_stats.hibernate_queue_paused++;
+ continue;
+ } else {
+ l_object = m_object;
+ }
+ }
+ if ( !m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error) {
+ /*
+ * page is not to be cleaned
+ * put it back on the head of its queue
+ */
+ if (m->vmp_cleaning)
+ hibernate_stats.hibernate_skipped_cleaning++;
+ else
+ hibernate_stats.hibernate_skipped_transient++;
+
+ goto reenter_pg_on_q;
+ }
+ if (m_object->copy == VM_OBJECT_NULL) {
+ if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
+ /*
+ * let the normal hibernate image path
+ * deal with these
+ */
+ goto reenter_pg_on_q;
+ }
+ }
+ if ( !m->vmp_dirty && m->vmp_pmapped) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+
+ if ((refmod_state & VM_MEM_MODIFIED)) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ } else
+ refmod_state = 0;
+
+ if ( !m->vmp_dirty) {
+ /*
+ * page is not to be cleaned
+ * put it back on the head of its queue
+ */
+ if (m->vmp_precious)
+ hibernate_stats.hibernate_skipped_precious++;
+
+ goto reenter_pg_on_q;
+ }
+
+ if (hibernate_skip_external == TRUE && !m_object->internal) {
+
+ hibernate_stats.hibernate_skipped_external++;
+
+ goto reenter_pg_on_q;
+ }
+ tq = NULL;
+
+ if (m_object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ tq = iq;
+ } else if (VM_PAGE_Q_THROTTLED(eq))
+ tq = eq;
+
+ if (tq != NULL) {
+ wait_result_t wait_result;
+ int wait_count = 5;
+
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+
+ while (retval == 0) {
+
+ tq->pgo_throttled = TRUE;
+
+ assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC);
+
+ vm_page_unlock_queues();
+
+ wait_result = thread_block(THREAD_CONTINUE_NULL);
+
+ vm_page_lock_queues();
+
+ if (wait_result != THREAD_TIMED_OUT)
+ break;
+ if (!VM_PAGE_Q_THROTTLED(tq))
+ break;
+
+ if (hibernate_should_abort())
+ retval = 1;
+
+ if (--wait_count == 0) {
+
+ hibernate_stats.hibernate_throttle_timeout++;
+
+ if (tq == eq) {
+ hibernate_skip_external = TRUE;
+ break;
+ }
+ retval = 1;
+ }
+ }
+ if (retval)
+ break;
+
+ hibernate_stats.hibernate_throttled++;
+
+ continue;
+ }
+ /*
+ * we've already factored out pages in the laundry which
+ * means this page can't be on the pageout queue so it's
+ * safe to do the vm_page_queues_remove
+ */
+ vm_page_queues_remove(m, TRUE);
+
+ if (m_object->internal == TRUE)
+ pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
+
+ vm_pageout_cluster(m);
+
+ hibernate_stats.hibernate_found_dirty++;
+
+ goto next_pg;
+
+reenter_pg_on_q:
+ vm_page_queue_remove(q, m, vm_page_t, vmp_pageq);
+ vm_page_queue_enter(q, m, vm_page_t, vmp_pageq);
+
+ hibernate_stats.hibernate_reentered_on_q++;
+next_pg:
+ hibernate_stats.hibernate_considered++;
+
+ qcount--;
+ try_failed_count = 0;
+ }
+ if (l_object != NULL) {
+ vm_object_unlock(l_object);
+ l_object = NULL;
+ }
+
+ vm_page_unlock_queues();
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
+
+ return (retval);
+}
+
+
+static int
+hibernate_flush_dirty_pages(int pass)
+{
+ struct vm_speculative_age_q *aq;
+ uint32_t i;
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++)
+ vm_page_reactivate_local(i, TRUE, FALSE);
+ }
+
+ for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
+ int qcount;
+ vm_page_t m;
+
+ aq = &vm_page_queue_speculative[i];
+
+ if (vm_page_queue_empty(&aq->age_q))
+ continue;
+ qcount = 0;
+
+ vm_page_lockspin_queues();
+
+ vm_page_queue_iterate(&aq->age_q,
+ m,
+ vm_page_t,
+ vmp_pageq)
+ {
+ qcount++;
+ }
+ vm_page_unlock_queues();
+
+ if (qcount) {
+ if (hibernate_flush_queue(&aq->age_q, qcount))
+ return (1);
+ }
+ }
+ if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count))
+ return (1);
+ /* XXX FBDP TODO: flush secluded queue */
+ if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count))
+ return (1);
+ if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count))
+ return (1);
+ if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal))
+ return (1);
+
+ if (pass == 1)
+ vm_compressor_record_warmup_start();
+
+ if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
+ if (pass == 1)
+ vm_compressor_record_warmup_end();
+ return (1);
+ }
+ if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
+ if (pass == 1)
+ vm_compressor_record_warmup_end();
+ return (1);
+ }
+ if (pass == 1)
+ vm_compressor_record_warmup_end();
+
+ if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external))
+ return (1);
+
+ return (0);
+}
+
+
+void
+hibernate_reset_stats()
+{
+ bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
+}
+
+
+int
+hibernate_flush_memory()
+{
+ int retval;
+
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
+
+ hibernate_cleaning_in_progress = TRUE;
+ hibernate_skip_external = FALSE;
+
+ if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+
+ vm_compressor_flush();
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
+
+ if (consider_buffer_cache_collect != NULL) {
+ unsigned int orig_wire_count;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+ orig_wire_count = vm_page_wire_count;
+
+ (void)(*consider_buffer_cache_collect)(1);
+ consider_zone_gc(FALSE);
+
+ HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
+ }
+ }
+ hibernate_cleaning_in_progress = FALSE;
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
+
+ if (retval)
+ HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
+
+
+ HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
+ hibernate_stats.hibernate_considered,
+ hibernate_stats.hibernate_reentered_on_q,
+ hibernate_stats.hibernate_found_dirty);
+ HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
+ hibernate_stats.hibernate_skipped_cleaning,
+ hibernate_stats.hibernate_skipped_transient,
+ hibernate_stats.hibernate_skipped_precious,
+ hibernate_stats.hibernate_skipped_external,
+ hibernate_stats.hibernate_queue_nolock);
+ HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
+ hibernate_stats.hibernate_queue_paused,
+ hibernate_stats.hibernate_throttled,
+ hibernate_stats.hibernate_throttle_timeout,
+ hibernate_stats.hibernate_drained,
+ hibernate_stats.hibernate_drain_timeout);
+
+ return (retval);
+}
+
+
+static void
+hibernate_page_list_zero(hibernate_page_list_t *list)
+{
+ uint32_t bank;
+ hibernate_bitmap_t * bitmap;
+
+ bitmap = &list->bank_bitmap[0];
+ for (bank = 0; bank < list->bank_count; bank++)
+ {
+ uint32_t last_bit;
+
+ bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
+ // set out-of-bound bits at end of bitmap.
+ last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
+ if (last_bit)
+ bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
+
+ bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
+ }
+}
+
+void
+hibernate_free_gobble_pages(void)
+{
+ vm_page_t m, next;
+ uint32_t count = 0;
+
+ m = (vm_page_t) hibernate_gobble_queue;
+ while(m)
+ {
+ next = m->vmp_snext;
+ vm_page_free(m);
+ count++;
+ m = next;
+ }
+ hibernate_gobble_queue = VM_PAGE_NULL;
+
+ if (count)
+ HIBLOG("Freed %d pages\n", count);
+}
+
+static boolean_t
+hibernate_consider_discard(vm_page_t m, boolean_t preflight)
+{
+ vm_object_t object = NULL;
+ int refmod_state;
+ boolean_t discard = FALSE;
+
+ do
+ {
+ if (m->vmp_private)
+ panic("hibernate_consider_discard: private");
+
+ object = VM_PAGE_OBJECT(m);
+
+ if (!vm_object_lock_try(object)) {
+ object = NULL;
+ if (!preflight) hibernate_stats.cd_lock_failed++;
+ break;
+ }
+ if (VM_PAGE_WIRED(m)) {
+ if (!preflight) hibernate_stats.cd_found_wired++;
+ break;
+ }
+ if (m->vmp_precious) {
+ if (!preflight) hibernate_stats.cd_found_precious++;
+ break;
+ }
+ if (m->vmp_busy || !object->alive) {
+ /*
+ * Somebody is playing with this page.
+ */
+ if (!preflight) hibernate_stats.cd_found_busy++;
+ break;
+ }
+ if (m->vmp_absent || m->vmp_unusual || m->vmp_error) {
+ /*
+ * If it's unusual in anyway, ignore it
+ */
+ if (!preflight) hibernate_stats.cd_found_unusual++;
+ break;
+ }
+ if (m->vmp_cleaning) {
+ if (!preflight) hibernate_stats.cd_found_cleaning++;
+ break;
+ }
+ if (m->vmp_laundry) {
+ if (!preflight) hibernate_stats.cd_found_laundry++;
+ break;
+ }
+ if (!m->vmp_dirty)
+ {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+
+ if (refmod_state & VM_MEM_REFERENCED)
+ m->vmp_reference = TRUE;
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+
+ /*
+ * If it's clean or purgeable we can discard the page on wakeup.
+ */
+ discard = (!m->vmp_dirty)
+ || (VM_PURGABLE_VOLATILE == object->purgable)
+ || (VM_PURGABLE_EMPTY == object->purgable);
+
+
+ if (discard == FALSE) {
+ if (!preflight)
+ hibernate_stats.cd_found_dirty++;
+ } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
+ if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+ if (!preflight)
+ hibernate_stats.cd_found_xpmapped++;
+ discard = FALSE;
+ } else {
+ if (!preflight)
+ hibernate_stats.cd_skipped_xpmapped++;
+ }
+ }
+ }
+ while (FALSE);
+
+ if (object)
+ vm_object_unlock(object);
+
+ return (discard);
+}
+
+
+static void
+hibernate_discard_page(vm_page_t m)
+{
+ vm_object_t m_object;
+
+ if (m->vmp_absent || m->vmp_unusual || m->vmp_error)
+ /*
+ * If it's unusual in anyway, ignore
+ */
+ return;
+
+ m_object = VM_PAGE_OBJECT(m);
+
+#if MACH_ASSERT || DEBUG
+ if (!vm_object_lock_try(m_object))
+ panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
+#else
+ /* No need to lock page queue for token delete, hibernate_vm_unlock()
+ makes sure these locks are uncontended before sleep */
+#endif /* MACH_ASSERT || DEBUG */
+
+ if (m->vmp_pmapped == TRUE)
+ {
+ __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ }
+
+ if (m->vmp_laundry)
+ panic("hibernate_discard_page(%p) laundry", m);
+ if (m->vmp_private)
+ panic("hibernate_discard_page(%p) private", m);
+ if (m->vmp_fictitious)
+ panic("hibernate_discard_page(%p) fictitious", m);
+
+ if (VM_PURGABLE_VOLATILE == m_object->purgable)
+ {
+ /* object should be on a queue */
+ assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
+ purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
+ assert(old_queue);
+ if (m_object->purgeable_when_ripe) {
+ vm_purgeable_token_delete_first(old_queue);
+ }
+ vm_object_lock_assert_exclusive(m_object);
+ m_object->purgable = VM_PURGABLE_EMPTY;
+
+ /*
+ * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
+ * accounted in the "volatile" ledger, so no change here.
+ * We have to update vm_page_purgeable_count, though, since we're
+ * effectively purging this object.
+ */
+ unsigned int delta;
+ assert(m_object->resident_page_count >= m_object->wired_page_count);
+ delta = (m_object->resident_page_count - m_object->wired_page_count);
+ assert(vm_page_purgeable_count >= delta);
+ assert(delta > 0);
+ OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
+ }
+
+ vm_page_free(m);
+
+#if MACH_ASSERT || DEBUG
+ vm_object_unlock(m_object);
+#endif /* MACH_ASSERT || DEBUG */
+}
+
+/*
+ Grab locks for hibernate_page_list_setall()
+*/
+void
+hibernate_vm_lock_queues(void)
+{
+ vm_object_lock(compressor_object);
+ vm_page_lock_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ lck_mtx_lock(&vm_purgeable_queue_lock);
+
+ if (vm_page_local_q) {
+ uint32_t i;
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+}
+
+void
+hibernate_vm_unlock_queues(void)
+{
+ if (vm_page_local_q) {
+ uint32_t i;
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ lck_mtx_unlock(&vm_purgeable_queue_lock);
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
+}
+
+/*
+ Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
+ pages known to VM to not need saving are subtracted.
+ Wired pages to be saved are present in page_list_wired, pageable in page_list.
+*/
+
+void
+hibernate_page_list_setall(hibernate_page_list_t * page_list,
+ hibernate_page_list_t * page_list_wired,
+ hibernate_page_list_t * page_list_pal,
+ boolean_t preflight,
+ boolean_t will_discard,
+ uint32_t * pagesOut)
+{
+ uint64_t start, end, nsec;
+ vm_page_t m;
+ vm_page_t next;
+ uint32_t pages = page_list->page_count;
+ uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
+ uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
+ uint32_t count_wire = pages;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_cleaned = 0;
+ uint32_t count_discard_purgeable = 0;
+ uint32_t count_discard_speculative = 0;
+ uint32_t count_discard_vm_struct_pages = 0;
+ uint32_t i;
+ uint32_t bank;
+ hibernate_bitmap_t * bitmap;
+ hibernate_bitmap_t * bitmap_wired;
+ boolean_t discard_all;
+ boolean_t discard;
+
+ HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
+
+ if (preflight) {
+ page_list = NULL;
+ page_list_wired = NULL;
+ page_list_pal = NULL;
+ discard_all = FALSE;
+ } else {
+ discard_all = will_discard;
+ }
+
+#if MACH_ASSERT || DEBUG
+ if (!preflight)
+ {
+ assert(hibernate_vm_locks_are_safe());
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+ }
+#endif /* MACH_ASSERT || DEBUG */
+
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
+
+ clock_get_uptime(&start);
+
+ if (!preflight) {
+ hibernate_page_list_zero(page_list);
+ hibernate_page_list_zero(page_list_wired);
+ hibernate_page_list_zero(page_list_pal);
+
+ hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
+ hibernate_stats.cd_pages = pages;
+ }
+
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++)
+ vm_page_reactivate_local(i, TRUE, !preflight);
+ }
+
+ if (preflight) {
+ vm_object_lock(compressor_object);
+ vm_page_lock_queues();
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ }
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ hibernation_vmqueues_inspection = TRUE;
+
+ m = (vm_page_t) hibernate_gobble_queue;
+ while (m)
+ {
+ pages--;
+ count_wire--;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ }
+ m = m->vmp_snext;
+ }
+
+ if (!preflight) for( i = 0; i < real_ncpus; i++ )
+ {
+ if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor)
+ {
+ for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = m->vmp_snext)
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
+
+ pages--;
+ count_wire--;
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ hibernate_stats.cd_local_free++;
+ hibernate_stats.cd_total_free++;
+ }
+ }
+ }
+
+ for( i = 0; i < vm_colors; i++ )
+ {
+ vm_page_queue_iterate(&vm_page_queue_free[i].qhead,
+ m,
+ vm_page_t,
+ vmp_pageq)
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
+
+ pages--;
+ count_wire--;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ hibernate_stats.cd_total_free++;
+ }
+ }
+ }
+
+ vm_page_queue_iterate(&vm_lopage_queue_free,
+ m,
+ vm_page_t,
+ vmp_pageq)
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
+
+ pages--;
+ count_wire--;
+ if (!preflight) {
+ hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ hibernate_stats.cd_total_free++;
+ }
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
+ while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_throttled++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_anonymous++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_cleaned++;
+ discard = discard_all;
+ }
+ else
+ count_cleaned++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+ while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ discard = discard_all;
+ }
+ else
+ count_active++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ discard = discard_all;
+ }
+ else
+ count_inactive++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+ /* XXX FBDP TODO: secluded queue */
+
+ for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+ {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+ assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
+ "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
+ m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
+
+ next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ count_discard_speculative++;
+ discard = discard_all;
+ }
+ else
+ count_speculative++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+ }
+
+ vm_page_queue_iterate(&compressor_object->memq, m, vm_page_t, vmp_listq)
+ {
+ assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
+
+ count_compressor++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
+ }
+
+ if (preflight == FALSE && discard_all == TRUE) {
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
+
+ HIBLOG("hibernate_teardown started\n");
+ count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
+ HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
+
+ pages -= count_discard_vm_struct_pages;
+ count_wire -= count_discard_vm_struct_pages;
+
+ hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
+ }
+
+ if (!preflight) {
+ // pull wired from hibernate_bitmap
+ bitmap = &page_list->bank_bitmap[0];
+ bitmap_wired = &page_list_wired->bank_bitmap[0];
+ for (bank = 0; bank < page_list->bank_count; bank++)
+ {
+ for (i = 0; i < bitmap->bitmapwords; i++)
+ bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
+ bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords];
+ bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
+ }
+ }
+
+ // machine dependent adjustments
+ hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
+
+ if (!preflight) {
+ hibernate_stats.cd_count_wire = count_wire;
+ hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
+ count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
+ }
+
+ clock_get_uptime(&end);
+ absolutetime_to_nanoseconds(end - start, &nsec);
+ HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
+
+ HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
+ pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
+ discard_all ? "did" : "could",
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+
+ if (hibernate_stats.cd_skipped_xpmapped)
+ HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
+ *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
+
+ if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+
+ hibernation_vmqueues_inspection = FALSE;
+
+#if MACH_ASSERT || DEBUG
+ if (!preflight)
+ {
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+ }
+#endif /* MACH_ASSERT || DEBUG */
+
+ if (preflight) {
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+ vm_object_unlock(compressor_object);
+ }
+
+ KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
+}
+
+void
+hibernate_page_list_discard(hibernate_page_list_t * page_list)
+{
+ uint64_t start, end, nsec;
+ vm_page_t m;
+ vm_page_t next;
+ uint32_t i;
+ uint32_t count_discard_active = 0;
+ uint32_t count_discard_inactive = 0;
+ uint32_t count_discard_purgeable = 0;
+ uint32_t count_discard_cleaned = 0;
+ uint32_t count_discard_speculative = 0;
+
+
+#if MACH_ASSERT || DEBUG
+ vm_page_lock_queues();
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ }
+ }
+#endif /* MACH_ASSERT || DEBUG */
+
+ clock_get_uptime(&start);
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
+ {
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ )
+ {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
+ while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
+ {
+ count_discard_speculative++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
+ {
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_inactive++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+ /* XXX FBDP TODO: secluded queue */
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+ while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
+ {
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m))
+ {
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
+ if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m)))
+ {
+ if (m->vmp_dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_cleaned++;
+ hibernate_discard_page(m);
+ }
+ m = next;
+ }
+
+#if MACH_ASSERT || DEBUG
+ if (vm_page_local_q) {
+ for (i = 0; i < vm_page_local_q_count; i++) {
+ struct vpl *lq;
+ lq = &vm_page_local_q[i].vpl_un.vpl;
+ VPL_UNLOCK(&lq->vpl_lock);
+ }
+ }
+ vm_page_unlock_queues();
+#endif /* MACH_ASSERT || DEBUG */
+
+ clock_get_uptime(&end);
+ absolutetime_to_nanoseconds(end - start, &nsec);
+ HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
+ nsec / 1000000ULL,
+ count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+}
+
+boolean_t hibernate_paddr_map_inited = FALSE;
+unsigned int hibernate_teardown_last_valid_compact_indx = -1;
+vm_page_t hibernate_rebuild_hash_list = NULL;
+
+unsigned int hibernate_teardown_found_tabled_pages = 0;
+unsigned int hibernate_teardown_found_created_pages = 0;
+unsigned int hibernate_teardown_found_free_pages = 0;
+unsigned int hibernate_teardown_vm_page_free_count;
+
+
+struct ppnum_mapping {
+ struct ppnum_mapping *ppnm_next;
+ ppnum_t ppnm_base_paddr;
+ unsigned int ppnm_sindx;
+ unsigned int ppnm_eindx;
+};
+
+struct ppnum_mapping *ppnm_head;
+struct ppnum_mapping *ppnm_last_found = NULL;
+
+
+void
+hibernate_create_paddr_map()
+{
+ unsigned int i;
+ ppnum_t next_ppnum_in_run = 0;
+ struct ppnum_mapping *ppnm = NULL;
+
+ if (hibernate_paddr_map_inited == FALSE) {
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ if (ppnm)
+ ppnm->ppnm_eindx = i;
+
+ if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
+
+ ppnm = kalloc(sizeof(struct ppnum_mapping));
+
+ ppnm->ppnm_next = ppnm_head;
+ ppnm_head = ppnm;
+
+ ppnm->ppnm_sindx = i;
+ ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
+ }
+ next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
+ }
+ ppnm->ppnm_eindx++;
+
+ hibernate_paddr_map_inited = TRUE;
+ }
+}
+
+ppnum_t
+hibernate_lookup_paddr(unsigned int indx)
+{
+ struct ppnum_mapping *ppnm = NULL;
+
+ ppnm = ppnm_last_found;
+
+ if (ppnm) {
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx)
+ goto done;
+ }
+ for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
+
+ if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
+ ppnm_last_found = ppnm;
+ break;
+ }
+ }
+ if (ppnm == NULL)
+ panic("hibernate_lookup_paddr of %d failed\n", indx);
+done:
+ return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx));
+}
+
+
+uint32_t
+hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ addr64_t saddr_aligned;
+ addr64_t eaddr_aligned;
+ addr64_t addr;
+ ppnum_t paddr;
+ unsigned int mark_as_unneeded_pages = 0;
+
+ saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
+ eaddr_aligned = eaddr & ~PAGE_MASK_64;
+
+ for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
+
+ paddr = pmap_find_phys(kernel_pmap, addr);
+
+ assert(paddr);
+
+ hibernate_page_bitset(page_list, TRUE, paddr);
+ hibernate_page_bitset(page_list_wired, TRUE, paddr);
+
+ mark_as_unneeded_pages++;
+ }
+ return (mark_as_unneeded_pages);
+}
+
+
+void
+hibernate_hash_insert_page(vm_page_t mem)
+{
+ vm_page_bucket_t *bucket;
+ int hash_id;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ assert(mem->vmp_hashed);
+ assert(m_object);
+ assert(mem->vmp_offset != (vm_object_offset_t) -1);
+
+ /*
+ * Insert it into the object_object/offset hash table
+ */
+ hash_id = vm_page_hash(m_object, mem->vmp_offset);
+ bucket = &vm_page_buckets[hash_id];
+
+ mem->vmp_next_m = bucket->page_list;
+ bucket->page_list = VM_PAGE_PACK_PTR(mem);
+}
+
+
+void
+hibernate_free_range(int sindx, int eindx)
+{
+ vm_page_t mem;
+ unsigned int color;
+
+ while (sindx < eindx) {
+ mem = &vm_pages[sindx];
+
+ vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
+
+ mem->vmp_lopage = FALSE;
+ mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
+
+ color = VM_PAGE_GET_COLOR(mem);
+#if defined(__x86_64__)
+ vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ vmp_pageq);
+#else
+ vm_page_queue_enter(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ vmp_pageq);
+#endif
+ vm_page_free_count++;
+
+ sindx++;
+ }
+}
+
+
+extern void hibernate_rebuild_pmap_structs(void);
+
+void
+hibernate_rebuild_vm_structs(void)
+{
+ int i, cindx, sindx, eindx;
+ vm_page_t mem, tmem, mem_next;
+ AbsoluteTime startTime, endTime;
+ uint64_t nsec;
+
+ if (hibernate_rebuild_needed == FALSE)
+ return;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
+ HIBLOG("hibernate_rebuild started\n");
+
+ clock_get_uptime(&startTime);
+
+ hibernate_rebuild_pmap_structs();
+
+ bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
+ eindx = vm_pages_count;
+
+ /*
+ * Mark all the vm_pages[] that have not been initialized yet as being
+ * transient. This is needed to ensure that buddy page search is corrrect.
+ * Without this random data in these vm_pages[] can trip the buddy search
+ */
+ for (i = hibernate_teardown_last_valid_compact_indx+1; i < eindx; ++i)
+ vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
+
+ for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
+
+ mem = &vm_pages[cindx];
+ assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
+ /*
+ * hibernate_teardown_vm_structs leaves the location where
+ * this vm_page_t must be located in "next".
+ */
+ tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
+ mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
+
+ sindx = (int)(tmem - &vm_pages[0]);
+
+ if (mem != tmem) {
+ /*
+ * this vm_page_t was moved by hibernate_teardown_vm_structs,
+ * so move it back to its real location
+ */
+ *tmem = *mem;
+ mem = tmem;
+ }
+ if (mem->vmp_hashed)
+ hibernate_hash_insert_page(mem);
+ /*
+ * the 'hole' between this vm_page_t and the previous
+ * vm_page_t we moved needs to be initialized as
+ * a range of free vm_page_t's
+ */
+ hibernate_free_range(sindx + 1, eindx);
+
+ eindx = sindx;
+ }
+ if (sindx)
+ hibernate_free_range(0, sindx);
+
+ assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
+
+ /*
+ * process the list of vm_page_t's that were entered in the hash,
+ * but were not located in the vm_pages arrary... these are
+ * vm_page_t's that were created on the fly (i.e. fictitious)
+ */
+ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
+ mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
+
+ mem->vmp_next_m = 0;
+ hibernate_hash_insert_page(mem);
+ }
+ hibernate_rebuild_hash_list = NULL;
+
+ clock_get_uptime(&endTime);
+ SUB_ABSOLUTETIME(&endTime, &startTime);
+ absolutetime_to_nanoseconds(endTime, &nsec);
+
+ HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
+
+ hibernate_rebuild_needed = FALSE;
+
+ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
+}
+
+
+extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *);
+
+uint32_t
+hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
+{
+ unsigned int i;
+ unsigned int compact_target_indx;
+ vm_page_t mem, mem_next;
+ vm_page_bucket_t *bucket;
+ unsigned int mark_as_unneeded_pages = 0;
+ unsigned int unneeded_vm_page_bucket_pages = 0;
+ unsigned int unneeded_vm_pages_pages = 0;
+ unsigned int unneeded_pmap_pages = 0;
+ addr64_t start_of_unneeded = 0;
+ addr64_t end_of_unneeded = 0;
+
+
+ if (hibernate_should_abort())
+ return (0);
+
+ hibernate_rebuild_needed = TRUE;
+
+ HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
+ vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
+ vm_page_cleaned_count, compressor_object->resident_page_count);
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+
+ bucket = &vm_page_buckets[i];
+
+ for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
+ assert(mem->vmp_hashed);
+
+ mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
+
+ if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
+ mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
+ hibernate_rebuild_hash_list = mem;
+ }
+ }
+ }
+ unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
+
+ hibernate_teardown_vm_page_free_count = vm_page_free_count;
+
+ compact_target_indx = 0;
+
+ for (i = 0; i < vm_pages_count; i++) {
+
+ mem = &vm_pages[i];
+
+ if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
+ unsigned int color;
+
+ assert(mem->vmp_busy);
+ assert(!mem->vmp_lopage);
+
+ color = VM_PAGE_GET_COLOR(mem);
+
+ vm_page_queue_remove(&vm_page_queue_free[color].qhead,
+ mem,
+ vm_page_t,
+ vmp_pageq);
+
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+
+ vm_page_free_count--;
+
+ hibernate_teardown_found_free_pages++;
+
+ if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q)
+ compact_target_indx = i;
+ } else {
+ /*
+ * record this vm_page_t's original location
+ * we need this even if it doesn't get moved
+ * as an indicator to the rebuild function that
+ * we don't have to move it
+ */
+ mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
+
+ if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
+ /*
+ * we've got a hole to fill, so
+ * move this vm_page_t to it's new home
+ */
+ vm_pages[compact_target_indx] = *mem;
+ mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
+
+ hibernate_teardown_last_valid_compact_indx = compact_target_indx;
+ compact_target_indx++;
+ } else
+ hibernate_teardown_last_valid_compact_indx = i;
+ }
+ }
+ unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1],
+ (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_vm_pages_pages;
+
+ hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
+
+ if (start_of_unneeded) {
+ unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
+ mark_as_unneeded_pages += unneeded_pmap_pages;
+ }
+ HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
+
+ return (mark_as_unneeded_pages);
+}
+
+
+#endif /* HIBERNATION */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <mach_vm_debug.h>
+#if MACH_VM_DEBUG
+
+#include <mach_debug/hash_info.h>
+#include <vm/vm_debug.h>
+
+/*
+ * Routine: vm_page_info
+ * Purpose:
+ * Return information about the global VP table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+vm_page_info(
+ hash_info_bucket_t *info,
+ unsigned int count)
+{
+ unsigned int i;
+ lck_spin_t *bucket_lock;
+
+ if (vm_page_bucket_count < count)
+ count = vm_page_bucket_count;
+
+ for (i = 0; i < count; i++) {
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ unsigned int bucket_count = 0;
+ vm_page_t m;
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+
+ for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+ m != VM_PAGE_NULL;
+ m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m)))
+ bucket_count++;
+
+ lck_spin_unlock(bucket_lock);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return vm_page_bucket_count;
+}
+#endif /* MACH_VM_DEBUG */
+
+#if VM_PAGE_BUCKETS_CHECK
+void
+vm_page_buckets_check(void)
+{
+ unsigned int i;
+ vm_page_t p;
+ unsigned int p_hash;
+ vm_page_bucket_t *bucket;
+ lck_spin_t *bucket_lock;
+
+ if (!vm_page_buckets_check_ready) {
+ return;
+ }
+
+#if HIBERNATION
+ if (hibernate_rebuild_needed ||
+ hibernate_rebuild_hash_list) {
+ panic("BUCKET_CHECK: hibernation in progress: "
+ "rebuild_needed=%d rebuild_hash_list=%p\n",
+ hibernate_rebuild_needed,
+ hibernate_rebuild_hash_list);
+ }
+#endif /* HIBERNATION */
+
+#if VM_PAGE_FAKE_BUCKETS
+ char *cp;
+ for (cp = (char *) vm_page_fake_buckets_start;
+ cp < (char *) vm_page_fake_buckets_end;
+ cp++) {
+ if (*cp != 0x5a) {
+ panic("BUCKET_CHECK: corruption at %p in fake buckets "
+ "[0x%llx:0x%llx]\n",
+ cp,
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ }
+ }
+#endif /* VM_PAGE_FAKE_BUCKETS */
+
+ for (i = 0; i < vm_page_bucket_count; i++) {
+ vm_object_t p_object;
+
+ bucket = &vm_page_buckets[i];
+ if (!bucket->page_list) {
+ continue;
+ }
+
+ bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
+ lck_spin_lock(bucket_lock);
+ p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
+
+ while (p != VM_PAGE_NULL) {
+ p_object = VM_PAGE_OBJECT(p);
+
+ if (!p->vmp_hashed) {
+ panic("BUCKET_CHECK: page %p (%p,0x%llx) "
+ "hash %d in bucket %d at %p "
+ "is not hashed\n",
+ p, p_object, p->vmp_offset,
+ p_hash, i, bucket);
+ }
+ p_hash = vm_page_hash(p_object, p->vmp_offset);
+ if (p_hash != i) {
+ panic("BUCKET_CHECK: corruption in bucket %d "
+ "at %p: page %p object %p offset 0x%llx "
+ "hash %d\n",
+ i, bucket, p, p_object, p->vmp_offset,
+ p_hash);
+ }
+ p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
+ }
+ lck_spin_unlock(bucket_lock);
+ }
+
+// printf("BUCKET_CHECK: checked buckets\n");
+}
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+/*
+ * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
+ * local queues if they exist... its the only spot in the system where we add pages
+ * to those queues... once on those queues, those pages can only move to one of the
+ * global page queues or the free queues... they NEVER move from local q to local q.
+ * the 'local' state is stable when vm_page_queues_remove is called since we're behind
+ * the global vm_page_queue_lock at this point... we still need to take the local lock
+ * in case this operation is being run on a different CPU then the local queue's identity,
+ * but we don't have to worry about the page moving to a global queue or becoming wired
+ * while we're grabbing the local lock since those operations would require the global
+ * vm_page_queue_lock to be held, and we already own it.
+ *
+ * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
+ * 'wired' and local are ALWAYS mutually exclusive conditions.
+ */
+
+#if CONFIG_BACKGROUND_QUEUE
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq)
+#else
+void
+vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq)
+#endif
+{
+ boolean_t was_pageable = TRUE;
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q)
+ {
+ assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ if (remove_from_backgroundq == TRUE) {
+ vm_page_remove_from_backgroundq(mem);
+ }
+ if (mem->vmp_on_backgroundq) {
+ assert(mem->vmp_backgroundq.next != 0);
+ assert(mem->vmp_backgroundq.prev != 0);
+ } else {
+ assert(mem->vmp_backgroundq.next == 0);
+ assert(mem->vmp_backgroundq.prev == 0);
+ }
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ return;
+ }
+
+ if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR)
+ {
+ assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vmp_backgroundq.next == 0 &&
+ mem->vmp_backgroundq.prev == 0 &&
+ mem->vmp_on_backgroundq == FALSE);
+#endif
+ return;
+ }
+ if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
+ /*
+ * might put these guys on a list for debugging purposes
+ * if we do, we'll need to remove this assert
+ */
+ assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vmp_backgroundq.next == 0 &&
+ mem->vmp_backgroundq.prev == 0 &&
+ mem->vmp_on_backgroundq == FALSE);
+#endif
+ return;
+ }
+
+ assert(m_object != compressor_object);
+ assert(m_object != kernel_object);
+ assert(m_object != vm_submap_object);
+ assert(!mem->vmp_fictitious);
+
+ switch(mem->vmp_q_state) {
+
+ case VM_PAGE_ON_ACTIVE_LOCAL_Q:
+ {
+ struct vpl *lq;
+
+ lq = &vm_page_local_q[mem->vmp_local_id].vpl_un.vpl;
+ VPL_LOCK(&lq->vpl_lock);
+ vm_page_queue_remove(&lq->vpl_queue,
+ mem, vm_page_t, vmp_pageq);
+ mem->vmp_local_id = 0;
+ lq->vpl_count--;
+ if (m_object->internal) {
+ lq->vpl_internal_count--;
+ } else {
+ lq->vpl_external_count--;
+ }
+ VPL_UNLOCK(&lq->vpl_lock);
+ was_pageable = FALSE;
+ break;
+ }
+ case VM_PAGE_ON_ACTIVE_Q:
+ {
+ vm_page_queue_remove(&vm_page_queue_active,
+ mem, vm_page_t, vmp_pageq);
+ vm_page_active_count--;
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
+ {
+ assert(m_object->internal == TRUE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_anonymous,
+ mem, vm_page_t, vmp_pageq);
+ vm_page_anonymous_count--;
+
+ vm_purgeable_q_advance_all();
+ vm_page_balance_inactive(3);
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_inactive,
+ mem, vm_page_t, vmp_pageq);
+ vm_purgeable_q_advance_all();
+ vm_page_balance_inactive(3);
+ break;
+ }
+
+ case VM_PAGE_ON_INACTIVE_CLEANED_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_inactive_count--;
+ vm_page_queue_remove(&vm_page_queue_cleaned,
+ mem, vm_page_t, vmp_pageq);
+ vm_page_cleaned_count--;
+ vm_page_balance_inactive(3);
+ break;
+ }
+
+ case VM_PAGE_ON_THROTTLED_Q:
+ {
+ assert(m_object->internal == TRUE);
+
+ vm_page_queue_remove(&vm_page_queue_throttled,
+ mem, vm_page_t, vmp_pageq);
+ vm_page_throttled_count--;
+ was_pageable = FALSE;
+ break;
+ }
+
+ case VM_PAGE_ON_SPECULATIVE_Q:
+ {
+ assert(m_object->internal == FALSE);
+
+ vm_page_remque(&mem->vmp_pageq);
+ vm_page_speculative_count--;
+ vm_page_balance_inactive(3);
+ break;
+ }
+
+#if CONFIG_SECLUDED_MEMORY
+ case VM_PAGE_ON_SECLUDED_Q:
+ {
+ vm_page_queue_remove(&vm_page_queue_secluded,
+ mem, vm_page_t, vmp_pageq);
+ vm_page_secluded_count--;
+ if (m_object == VM_OBJECT_NULL) {
+ vm_page_secluded_count_free--;
+ was_pageable = FALSE;
+ } else {
+ assert(!m_object->internal);
+ vm_page_secluded_count_inuse--;
+ was_pageable = FALSE;
+// was_pageable = TRUE;
+ }
+ break;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ default:
+ {
+ /*
+ * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
+ * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
+ * the caller is responsible for determing if the page is on that queue, and if so, must
+ * either first remove it (it needs both the page queues lock and the object lock to do
+ * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
+ *
+ * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
+ * or any of the undefined states
+ */
+ panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem, mem->vmp_q_state);
+ break;
+ }
+
+ }
+ VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
+ mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (remove_from_backgroundq == TRUE)
+ vm_page_remove_from_backgroundq(mem);
+#endif
+ if (was_pageable) {
+ if (m_object->internal) {
+ vm_page_pageable_internal_count--;
+ } else {
+ vm_page_pageable_external_count--;
+ }
+ }
+}
+
+void
+vm_page_remove_internal(vm_page_t page)
+{
+ vm_object_t __object = VM_PAGE_OBJECT(page);
+ if (page == __object->memq_hint) {
+ vm_page_t __new_hint;
+ vm_page_queue_entry_t __qe;
+ __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
+ if (vm_page_queue_end(&__object->memq, __qe)) {
+ __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
+ if (vm_page_queue_end(&__object->memq, __qe)) {
+ __qe = NULL;
+ }
+ }
+ __new_hint = (vm_page_t)((uintptr_t) __qe);
+ __object->memq_hint = __new_hint;
+ }
+ vm_page_queue_remove(&__object->memq, page, vm_page_t, vmp_listq);
+#if CONFIG_SECLUDED_MEMORY
+ if (__object->eligible_for_secluded) {
+ vm_page_secluded.eligible_for_secluded--;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+}
+
+void
+vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!mem->vmp_fictitious);
+ assert(!mem->vmp_laundry);
+ assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_check_pageable_safe(mem);
+
+ if (m_object->internal) {
+ mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, vmp_pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, vmp_pageq);
+
+ vm_page_anonymous_count++;
+ vm_page_pageable_internal_count++;
+ } else {
+ mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
+
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, vmp_pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_inactive, mem, vm_page_t, vmp_pageq);
+
+ vm_page_pageable_external_count++;
+ }
+ vm_page_inactive_count++;
+ token_new_pagecount++;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (mem->vmp_in_background)
+ vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+}
+
+void
+vm_page_enqueue_active(vm_page_t mem, boolean_t first)
+{
+ vm_object_t m_object;
+
+ m_object = VM_PAGE_OBJECT(mem);
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ assert(!mem->vmp_fictitious);
+ assert(!mem->vmp_laundry);
+ assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_check_pageable_safe(mem);
+
+ mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
+ if (first == TRUE)
+ vm_page_queue_enter_first(&vm_page_queue_active, mem, vm_page_t, vmp_pageq);
+ else
+ vm_page_queue_enter(&vm_page_queue_active, mem, vm_page_t, vmp_pageq);
+ vm_page_active_count++;
+
+ if (m_object->internal) {
+ vm_page_pageable_internal_count++;
+ } else {
+ vm_page_pageable_external_count++;
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (mem->vmp_in_background)
+ vm_page_add_to_backgroundq(mem, FALSE);
+#endif
+ vm_page_balance_inactive(3);
+}
+
+/*
+ * Pages from special kernel objects shouldn't
+ * be placed on pageable queues.
+ */
+void
+vm_page_check_pageable_safe(vm_page_t page)
+{
+ vm_object_t page_object;
+
+ page_object = VM_PAGE_OBJECT(page);
+
+ if (page_object == kernel_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from kernel object (%p) to pageable queue", kernel_object);
+ }
+
+ if (page_object == compressor_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from compressor object (%p) to pageable queue", compressor_object);
+ }
+
+ if (page_object == vm_submap_object) {
+ panic("vm_page_check_pageable_safe: trying to add page" \
+ "from submap object (%p) to pageable queue", vm_submap_object);
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * wired page diagnose
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <libkern/OSKextLibPrivate.h>
+
+#define KA_SIZE(namelen, subtotalscount) \
+ (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
+
+#define KA_NAME(alloc) \
+ ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
+
+#define KA_NAME_LEN(alloc) \
+ (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
+
+vm_tag_t
+vm_tag_bt(void)
+{
+ uintptr_t* frameptr;
+ uintptr_t* frameptr_next;
+ uintptr_t retaddr;
+ uintptr_t kstackb, kstackt;
+ const vm_allocation_site_t * site;
+ thread_t cthread;
+ kern_allocation_name_t name;
+
+ cthread = current_thread();
+ if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK;
+
+ if ((name = thread_get_kernel_state(cthread)->allocation_name))
+ {
+ if (!name->tag) vm_tag_alloc(name);
+ return name->tag;
+ }
+
+ kstackb = cthread->kernel_stack;
+ kstackt = kstackb + kernel_stack_size;
+
+ /* Load stack frame pointer (EBP on x86) into frameptr */
+ frameptr = __builtin_frame_address(0);
+ site = NULL;
+ while (frameptr != NULL)
+ {
+ /* Verify thread stack bounds */
+ if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break;
+
+ /* Next frame pointer is pointed to by the previous one */
+ frameptr_next = (uintptr_t*) *frameptr;
+
+ /* Pull return address from one spot above the frame pointer */
+ retaddr = *(frameptr + 1);
+
+
+ if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
+ || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top))
+ {
+ site = OSKextGetAllocationSiteForCaller(retaddr);
+ break;
+ }
+ frameptr = frameptr_next;
+ }
+
+ return (site ? site->tag : VM_KERN_MEMORY_NONE);
+}
+
+static uint64_t free_tag_bits[VM_MAX_TAG_VALUE/64];
+
+void
+vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
+{
+ vm_tag_t tag;
+ uint64_t avail;
+ uint32_t idx;
+ vm_allocation_site_t * prev;
+
+ if (site->tag) return;
+
+ idx = 0;
+ while (TRUE)
+ {
+ avail = free_tag_bits[idx];
+ if (avail)
+ {
+ tag = __builtin_clzll(avail);
+ avail &= ~(1ULL << (63 - tag));
+ free_tag_bits[idx] = avail;
+ tag += (idx << 6);
+ break;
+ }
+ idx++;
+ if (idx >= ARRAY_COUNT(free_tag_bits))
+ {
+ for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++)
+ {
+ prev = vm_allocation_sites[idx];
+ if (!prev) continue;
+ if (!KA_NAME_LEN(prev)) continue;
+ if (!prev->tag) continue;
+ if (prev->total) continue;
+ if (1 != prev->refcount) continue;
+
+ assert(idx == prev->tag);
+ tag = idx;
+ prev->tag = VM_KERN_MEMORY_NONE;
+ *releasesiteP = prev;
+ break;
+ }
+ if (idx >= ARRAY_COUNT(vm_allocation_sites))
+ {
+ tag = VM_KERN_MEMORY_ANY;
+ }
+ break;
+ }
+ }
+ site->tag = tag;
+
+ OSAddAtomic16(1, &site->refcount);
+
+ if (VM_KERN_MEMORY_ANY != tag) vm_allocation_sites[tag] = site;
+
+ if (tag > vm_allocation_tag_highest) vm_allocation_tag_highest = tag;
+}
+
+static void
+vm_tag_free_locked(vm_tag_t tag)
+{
+ uint64_t avail;
+ uint32_t idx;
+ uint64_t bit;
+
+ if (VM_KERN_MEMORY_ANY == tag) return;
+
+ idx = (tag >> 6);
+ avail = free_tag_bits[idx];
+ tag &= 63;
+ bit = (1ULL << (63 - tag));
+ assert(!(avail & bit));
+ free_tag_bits[idx] = (avail | bit);
+}
+
+static void
+vm_tag_init(void)
+{
+ vm_tag_t tag;
+ for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++)
+ {
+ vm_tag_free_locked(tag);
+ }
+
+ for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++)
+ {
+ vm_tag_free_locked(tag);
+ }
+}
+
+vm_tag_t
+vm_tag_alloc(vm_allocation_site_t * site)
+{
+ vm_tag_t tag;
+ vm_allocation_site_t * releasesite;
+
+ if (VM_TAG_BT & site->flags)
+ {
+ tag = vm_tag_bt();
+ if (VM_KERN_MEMORY_NONE != tag) return (tag);
+ }
+
+ if (!site->tag)
+ {
+ releasesite = NULL;
+ lck_spin_lock(&vm_allocation_sites_lock);
+ vm_tag_alloc_locked(site, &releasesite);
+ lck_spin_unlock(&vm_allocation_sites_lock);
+ if (releasesite) kern_allocation_name_release(releasesite);
+ }
+
+ return (site->tag);
+}
+
+void
+vm_tag_update_size(vm_tag_t tag, int64_t delta)
+{
+ vm_allocation_site_t * allocation;
+ uint64_t prior;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ allocation = vm_allocation_sites[tag];
+ assert(allocation);
+
+ if (delta < 0) {
+ assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation);
+ }
+ prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+ uint64_t new, peak;
+ new = prior + delta;
+ do
+ {
+ peak = allocation->peak;
+ if (new <= peak) break;
+ }
+ while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+ if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) return;
+
+ if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+void
+kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
+{
+ uint64_t prior;
+
+ if (delta < 0) {
+ assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
+ }
+ prior = OSAddAtomic64(delta, &allocation->total);
+
+#if DEBUG || DEVELOPMENT
+
+ uint64_t new, peak;
+ new = prior + delta;
+ do
+ {
+ peak = allocation->peak;
+ if (new <= peak) break;
+ }
+ while (!OSCompareAndSwap64(peak, new, &allocation->peak));
+
+#endif /* DEBUG || DEVELOPMENT */
+
+ if (!prior && !allocation->tag) vm_tag_alloc(allocation);
+}
+
+#if VM_MAX_TAG_ZONES
+
+void
+vm_allocation_zones_init(void)
+{
+ kern_return_t ret;
+ vm_offset_t addr;
+ vm_size_t size;
+
+ size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **)
+ + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+
+ ret = kernel_memory_allocate(kernel_map,
+ &addr, round_page(size), 0,
+ KMA_ZERO, VM_KERN_MEMORY_DIAG);
+ assert(KERN_SUCCESS == ret);
+
+ vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
+ addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **);
+
+ // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
+ // in vm_tag_update_zone_size() won't recurse
+ vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG] = (vm_allocation_zone_total_t *) addr;
+ addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t);
+ vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr;
+}
+
+void
+vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx)
+{
+ vm_allocation_zone_total_t * zone;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ if (zidx >= VM_MAX_TAG_ZONES) return;
+
+ zone = vm_allocation_zone_totals[tag];
+ if (!zone)
+ {
+ zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG);
+ if (!zone) return;
+ bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+ if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag]))
+ {
+ kfree(zone, VM_MAX_TAG_ZONES * sizeof(*zone));
+ }
+ }
+}
+
+void
+vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste)
+{
+ vm_allocation_zone_total_t * zone;
+ uint32_t new;
+
+ assert(VM_KERN_MEMORY_NONE != tag);
+ assert(tag < VM_MAX_TAG_VALUE);
+
+ if (zidx >= VM_MAX_TAG_ZONES) return;
+
+ zone = vm_allocation_zone_totals[tag];
+ assert(zone);
+ zone += zidx;
+
+ /* the zone is locked */
+ if (delta < 0)
+ {
+ assertf(zone->total >= ((uint64_t)-delta), "zidx %d, tag %d, %p", zidx, tag, zone);
+ zone->total += delta;
+ }
+ else
+ {
+ zone->total += delta;
+ if (zone->total > zone->peak) zone->peak = zone->total;
+ if (dwaste)
+ {
+ new = zone->waste;
+ if (zone->wastediv < 65536) zone->wastediv++;
+ else new -= (new >> 16);
+ __assert_only bool ov = os_add_overflow(new, dwaste, &new);
+ assert(!ov);
+ zone->waste = new;
+ }
+ }
+}
+
+#endif /* VM_MAX_TAG_ZONES */
+
+void
+kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
+{
+ kern_allocation_name_t other;
+ struct vm_allocation_total * total;
+ uint32_t subidx;
+
+ subidx = 0;
+ assert(VM_KERN_MEMORY_NONE != subtag);
+ for (; subidx < allocation->subtotalscount; subidx++)
+ {
+ if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag)
+ {
+ allocation->subtotals[subidx].tag = subtag;
+ break;
+ }
+ if (subtag == allocation->subtotals[subidx].tag) break;
+ }
+ assert(subidx < allocation->subtotalscount);
+ if (subidx >= allocation->subtotalscount) return;
+
+ total = &allocation->subtotals[subidx];
+ other = vm_allocation_sites[subtag];
+ assert(other);
+
+ if (delta < 0)
+ {
+ assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
+ OSAddAtomic64(delta, &total->total);
+ assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
+ OSAddAtomic64(delta, &other->mapped);
+ }
+ else
+ {
+ OSAddAtomic64(delta, &other->mapped);
+ OSAddAtomic64(delta, &total->total);
+ }
+}
+
+const char *
+kern_allocation_get_name(kern_allocation_name_t allocation)
+{
+ return (KA_NAME(allocation));
+}
+
+kern_allocation_name_t
+kern_allocation_name_allocate(const char * name, uint32_t subtotalscount)
+{
+ uint32_t namelen;
+
+ namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
+
+ kern_allocation_name_t allocation;
+ allocation = kalloc(KA_SIZE(namelen, subtotalscount));
+ bzero(allocation, KA_SIZE(namelen, subtotalscount));
+
+ allocation->refcount = 1;
+ allocation->subtotalscount = subtotalscount;
+ allocation->flags = (namelen << VM_TAG_NAME_LEN_SHIFT);
+ strlcpy(KA_NAME(allocation), name, namelen + 1);
+
+ return (allocation);
+}
+
+void
+kern_allocation_name_release(kern_allocation_name_t allocation)
+{
+ assert(allocation->refcount > 0);
+ if (1 == OSAddAtomic16(-1, &allocation->refcount))
+ {
+ kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
+ }
+}
+
+vm_tag_t
+kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
+{
+ return (vm_tag_alloc(allocation));
+}
+
+#if ! VM_TAG_ACTIVE_UPDATE
+static void
+vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
+{
+ if (!object->wired_page_count) return;
+ if (object != kernel_object)
+ {
+ assert(object->wire_tag < num_info);
+ info[object->wire_tag].size += ptoa_64(object->wired_page_count);
+ }
+}
+
+typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
+ unsigned int num_info, vm_object_t object);
+
+static void
+vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
+ vm_page_iterate_proc proc, purgeable_q_t queue,
+ int group)
+{
+ vm_object_t object;
+
+ for (object = (vm_object_t) queue_first(&queue->objq[group]);
+ !queue_end(&queue->objq[group], (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq))
+ {
+ proc(info, num_info, object);
+ }
+}
+
+static void
+vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
+ vm_page_iterate_proc proc)
+{
+ vm_object_t object;
+
+ lck_spin_lock(&vm_objects_wired_lock);
+ queue_iterate(&vm_objects_wired,
+ object,
+ vm_object_t,
+ wired_objq)
+ {
+ proc(info, num_info, object);
+ }
+ lck_spin_unlock(&vm_objects_wired_lock);
+}
+#endif /* ! VM_TAG_ACTIVE_UPDATE */
+
+static uint64_t
+process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, boolean_t iterated)
+{
+ size_t namelen;
+ unsigned int idx, count, nextinfo;
+ vm_allocation_site_t * site;
+ lck_spin_lock(&vm_allocation_sites_lock);
+
+ for (idx = 0; idx <= vm_allocation_tag_highest; idx++)
+ {
+ site = vm_allocation_sites[idx];
+ if (!site) continue;
+ info[idx].mapped = site->mapped;
+ info[idx].tag = site->tag;
+ if (!iterated)
+ {
+ info[idx].size = site->total;
+#if DEBUG || DEVELOPMENT
+ info[idx].peak = site->peak;
+#endif /* DEBUG || DEVELOPMENT */
+ }
+ else
+ {
+ if (!site->subtotalscount && (site->total != info[idx].size))
+ {
+ printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
+ info[idx].size = site->total;
+ }
+ }
+ }
+
+ nextinfo = (vm_allocation_tag_highest + 1);
+ count = nextinfo;
+ if (count >= num_info) count = num_info;
+
+ for (idx = 0; idx < count; idx++)
+ {
+ site = vm_allocation_sites[idx];
+ if (!site) continue;
+ info[idx].flags |= VM_KERN_SITE_WIRED;
+ if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC)
+ {
+ info[idx].site = idx;
+ info[idx].flags |= VM_KERN_SITE_TAG;
+ if (VM_KERN_MEMORY_ZONE == idx)
+ {
+ info[idx].flags |= VM_KERN_SITE_HIDE;
+ info[idx].flags &= ~VM_KERN_SITE_WIRED;
+ info[idx].collectable_bytes = zones_collectable_bytes;
+ }
+ }
+ else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT))))
+ {
+ info[idx].site = 0;
+ info[idx].flags |= VM_KERN_SITE_NAMED;
+ if (namelen > sizeof(info[idx].name)) namelen = sizeof(info[idx].name);
+ strncpy(&info[idx].name[0], KA_NAME(site), namelen);
+ }
+ else if (VM_TAG_KMOD & site->flags)
+ {
+ info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
+ info[idx].flags |= VM_KERN_SITE_KMOD;
+ }
+ else
+ {
+ info[idx].site = VM_KERNEL_UNSLIDE(site);
+ info[idx].flags |= VM_KERN_SITE_KERNEL;
+ }
+#if VM_MAX_TAG_ZONES
+ vm_allocation_zone_total_t * zone;
+ unsigned int zidx;
+ vm_size_t elem_size;
+
+ if (vm_allocation_zone_totals
+ && (zone = vm_allocation_zone_totals[idx])
+ && (nextinfo < num_info))
+ {
+ for (zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++)
+ {
+ if (!zone[zidx].peak) continue;
+ info[nextinfo] = info[idx];
+ info[nextinfo].zone = zone_index_from_tag_index(zidx, &elem_size);
+ info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
+ info[nextinfo].flags |= VM_KERN_SITE_ZONE;
+ info[nextinfo].size = zone[zidx].total;
+ info[nextinfo].peak = zone[zidx].peak;
+ info[nextinfo].mapped = 0;
+ if (zone[zidx].wastediv)
+ {
+ info[nextinfo].collectable_bytes = ((zone[zidx].waste * zone[zidx].total / elem_size) / zone[zidx].wastediv);
+ }
+ nextinfo++;
+ }
+ }
+#endif /* VM_MAX_TAG_ZONES */
+ if (site->subtotalscount)
+ {
+ uint64_t mapped, mapcost, take;
+ uint32_t sub;
+ vm_tag_t alloctag;
+
+ info[idx].size = site->total;
+ mapped = info[idx].size;
+ info[idx].mapped = mapped;
+ mapcost = 0;
+ for (sub = 0; sub < site->subtotalscount; sub++)
+ {
+ alloctag = site->subtotals[sub].tag;
+ assert(alloctag < num_info);
+ if (info[alloctag].name[0]) continue;
+ take = info[alloctag].mapped;
+ if (take > info[alloctag].size) take = info[alloctag].size;
+ if (take > mapped) take = mapped;
+ info[alloctag].mapped -= take;
+ info[alloctag].size -= take;
+ mapped -= take;
+ mapcost += take;
+ }
+ info[idx].size = mapcost;
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ return (0);
+}
+
+uint32_t
+vm_page_diagnose_estimate(void)
+{
+ vm_allocation_site_t * site;
+ uint32_t count;
+ uint32_t idx;
+
+ lck_spin_lock(&vm_allocation_sites_lock);
+ for (count = idx = 0; idx < VM_MAX_TAG_VALUE; idx++)
+ {
+ site = vm_allocation_sites[idx];
+ if (!site) continue;
+ count++;
+#if VM_MAX_TAG_ZONES
+ if (vm_allocation_zone_totals)
+ {
+ vm_allocation_zone_total_t * zone;
+ zone = vm_allocation_zone_totals[idx];
+ if (!zone) continue;
+ for (uint32_t zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) if (zone[zidx].peak) count++;
+ }
+#endif
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ /* some slop for new tags created */
+ count += 8;
+ count += VM_KERN_COUNTER_COUNT;
+
+ return (count);
+}
+
+
+kern_return_t
+vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes)
+{
+ uint64_t wired_size;
+ uint64_t wired_managed_size;
+ uint64_t wired_reserved_size;
+ uint64_t booter_size;
+ boolean_t iterate;
+ mach_memory_info_t * counts;
+
+ bzero(info, num_info * sizeof(mach_memory_info_t));
+
+ if (!vm_page_wire_count_initial) return (KERN_ABORTED);
+
+#if CONFIG_EMBEDDED
+ wired_size = ptoa_64(vm_page_wire_count);
+ wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
+#else
+ wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
+ wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
+#endif
+ wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
+
+ booter_size = ml_get_booter_memory_size();
+ wired_size += booter_size;
+
+ assert(num_info >= VM_KERN_COUNTER_COUNT);
+ num_info -= VM_KERN_COUNTER_COUNT;
+ counts = &info[num_info];
+
+#define SET_COUNT(xcount, xsize, xflags) \
+ counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].mapped = (xsize); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
+
+ SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
+ SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
+ SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
+ SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
+ SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
+
+#define SET_MAP(xcount, xsize, xfree, xlargest) \
+ counts[xcount].site = (xcount); \
+ counts[xcount].size = (xsize); \
+ counts[xcount].mapped = (xsize); \
+ counts[xcount].free = (xfree); \
+ counts[xcount].largest = (xlargest); \
+ counts[xcount].flags = VM_KERN_SITE_COUNTER;
+
+ vm_map_size_t map_size, map_free, map_largest;
+
+ vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
+
+ vm_map_sizes(zone_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
+
+ vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest);
+ SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest);
+
+ iterate = !VM_TAG_ACTIVE_UPDATE;
+ if (iterate)
+ {
+ enum { kMaxKernelDepth = 1 };
+ vm_map_t maps [kMaxKernelDepth];
+ vm_map_entry_t entries[kMaxKernelDepth];
+ vm_map_t map;
+ vm_map_entry_t entry;
+ vm_object_offset_t offset;
+ vm_page_t page;
+ int stackIdx, count;
+
+#if ! VM_TAG_ACTIVE_UPDATE
+ vm_page_iterate_objects(info, num_info, &vm_page_count_object);
+#endif /* ! VM_TAG_ACTIVE_UPDATE */
+
+ map = kernel_map;
+ stackIdx = 0;
+ while (map)
+ {
+ vm_map_lock(map);
+ for (entry = map->hdr.links.next; map; entry = entry->links.next)
+ {
+ if (entry->is_sub_map)
+ {
+ assert(stackIdx < kMaxKernelDepth);
+ maps[stackIdx] = map;
+ entries[stackIdx] = entry;
+ stackIdx++;
+ map = VME_SUBMAP(entry);
+ entry = NULL;
+ break;
+ }
+ if (VME_OBJECT(entry) == kernel_object)
+ {
+ count = 0;
+ vm_object_lock(VME_OBJECT(entry));
+ for (offset = entry->links.start; offset < entry->links.end; offset += page_size)
+ {
+ page = vm_page_lookup(VME_OBJECT(entry), offset);
+ if (page && VM_PAGE_WIRED(page)) count++;
+ }
+ vm_object_unlock(VME_OBJECT(entry));
+
+ if (count)
+ {
+ assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
+ assert(VME_ALIAS(entry) < num_info);
+ info[VME_ALIAS(entry)].size += ptoa_64(count);
+ }
+ }
+ while (map && (entry == vm_map_last_entry(map)))
+ {
+ vm_map_unlock(map);
+ if (!stackIdx) map = NULL;
+ else
+ {
+ --stackIdx;
+ map = maps[stackIdx];
+ entry = entries[stackIdx];
+ }
+ }
+ }
+ }
+ }
+
+ process_account(info, num_info, zones_collectable_bytes, iterate);
+
+ return (KERN_SUCCESS);
+}
+
+#if DEBUG || DEVELOPMENT
+
+kern_return_t
+vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
+{
+ kern_return_t ret;
+ vm_size_t zsize;
+ vm_map_t map;
+ vm_map_entry_t entry;
+
+ zsize = zone_element_info((void *) addr, tag);
+ if (zsize)
+ {
+ *zone_size = *size = zsize;
+ return (KERN_SUCCESS);
+ }
+
+ *zone_size = 0;
+ ret = KERN_INVALID_ADDRESS;
+ for (map = kernel_map; map; )
+ {
+ vm_map_lock(map);
+ if (!vm_map_lookup_entry(map, addr, &entry)) break;
+ if (entry->is_sub_map)
+ {
+ if (map != kernel_map) break;
+ map = VME_SUBMAP(entry);
+ continue;
+ }
+ if (entry->vme_start != addr) break;
+ *tag = VME_ALIAS(entry);
+ *size = (entry->vme_end - addr);
+ ret = KERN_SUCCESS;
+ break;
+ }
+ if (map != kernel_map) vm_map_unlock(map);
+ vm_map_unlock(kernel_map);
+
+ return (ret);
+}
+
+#endif /* DEBUG || DEVELOPMENT */
+
+uint32_t
+vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
+{
+ vm_allocation_site_t * site;
+ uint32_t kmodId;
+
+ kmodId = 0;
+ lck_spin_lock(&vm_allocation_sites_lock);
+ if ((site = vm_allocation_sites[tag]))
+ {
+ if (VM_TAG_KMOD & site->flags)
+ {
+ kmodId = OSKextGetKmodIDForSite(site, name, namelen);
+ }
+ }
+ lck_spin_unlock(&vm_allocation_sites_lock);
+
+ return (kmodId);
+}
+
+
+#if CONFIG_SECLUDED_MEMORY
+/*
+ * Note that there's no locking around other accesses to vm_page_secluded_target.
+ * That should be OK, since these are the only place where it can be changed after
+ * initialization. Other users (like vm_pageout) may see the wrong value briefly,
+ * but will eventually get the correct value. This brief mismatch is OK as pageout
+ * and page freeing will auto-adjust the vm_page_secluded_count to match the target
+ * over time.
+ */
+unsigned int vm_page_secluded_suppress_cnt = 0;
+unsigned int vm_page_secluded_save_target;
+
+
+lck_grp_attr_t secluded_suppress_slock_grp_attr;
+lck_grp_t secluded_suppress_slock_grp;
+lck_attr_t secluded_suppress_slock_attr;
+lck_spin_t secluded_suppress_slock;
+
+void
+secluded_suppression_init(void)
+{
+ lck_grp_attr_setdefault(&secluded_suppress_slock_grp_attr);
+ lck_grp_init(&secluded_suppress_slock_grp,
+ "secluded_suppress_slock", &secluded_suppress_slock_grp_attr);
+ lck_attr_setdefault(&secluded_suppress_slock_attr);
+ lck_spin_init(&secluded_suppress_slock,
+ &secluded_suppress_slock_grp, &secluded_suppress_slock_attr);
+}
+
+void
+start_secluded_suppression(task_t task)
+{
+ if (task->task_suppressed_secluded)
+ return;
+ lck_spin_lock(&secluded_suppress_slock);
+ if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
+ task->task_suppressed_secluded = TRUE;
+ vm_page_secluded_save_target = vm_page_secluded_target;
+ vm_page_secluded_target = 0;
+ }
+ lck_spin_unlock(&secluded_suppress_slock);
+}
+
+void
+stop_secluded_suppression(task_t task)
+{
+ lck_spin_lock(&secluded_suppress_slock);
+ if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
+ task->task_suppressed_secluded = FALSE;
+ vm_page_secluded_target = vm_page_secluded_save_target;
+ }
+ lck_spin_unlock(&secluded_suppress_slock);
+}
+
+#endif /* CONFIG_SECLUDED_MEMORY */