+ if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
+ /*
+ * XXX We've scanned all the speculative
+ * queues but still haven't found one
+ * that is not empty, even though
+ * vm_page_speculative_count is not 0.
+ *
+ * report the anomaly...
+ */
+ printf("vm_pageout_scan: "
+ "all speculative queues empty "
+ "but count=%d. Re-adjusting.\n",
+ vm_page_speculative_count);
+ if (vm_page_speculative_count > vm_page_speculative_count_drift_max)
+ vm_page_speculative_count_drift_max = vm_page_speculative_count;
+ vm_page_speculative_count_drifts++;
+#if DEVELOPMENT || DEBUG
+ panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
+#endif /* DEVELOPMENT || DEBUG */
+ /* readjust... */
+ vm_page_speculative_count = 0;
+ /* ... and continue */
+ continue;
+ }
+
+ if (vm_page_speculative_count > vm_page_speculative_target || force_speculative_aging == TRUE)
+ can_steal = TRUE;
+ else {
+ if (!delay_speculative_age) {
+ mach_timespec_t ts_fully_aged;
+
+ ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
+ ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
+ * 1000 * NSEC_PER_USEC;
+
+ ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
+
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
+ can_steal = TRUE;
+ else
+ delay_speculative_age++;
+ } else {
+ delay_speculative_age++;
+ if (delay_speculative_age == DELAY_SPECULATIVE_AGE)
+ delay_speculative_age = 0;
+ }
+ }
+ if (can_steal == TRUE)
+ vm_page_speculate_ageit(aq);
+ }
+ force_speculative_aging = FALSE;
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0 &&
+ ((vm_page_background_mode == VM_PAGE_BG_DISABLED) || (vm_page_background_count <= vm_page_background_target)))
+#else
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0)
+#endif
+ {
+ int pages_evicted;
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ pages_evicted = vm_object_cache_evict(100, 10);
+
+ if (pages_evicted) {
+
+ vm_pageout_cache_evicted += pages_evicted;
+
+ VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
+ vm_page_free_count, pages_evicted, vm_pageout_cache_evicted, 0);
+ memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
+
+ /*
+ * we just freed up to 100 pages,
+ * so go back to the top of the main loop
+ * and re-evaulate the memory situation
+ */
+ continue;
+ } else
+ cache_evict_throttle = 1000;
+ }
+ if (cache_evict_throttle)
+ cache_evict_throttle--;
+
+#if CONFIG_JETSAM
+ /*
+ * don't let the filecache_min fall below 15% of available memory
+ * on systems with an active compressor that isn't nearing its
+ * limits w/r to accepting new data
+ *
+ * on systems w/o the compressor/swapper, the filecache is always
+ * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
+ * since most (if not all) of the anonymous pages are in the
+ * throttled queue (which isn't counted as available) which
+ * effectively disables this filter
+ */
+ if (vm_compressor_low_on_space())
+ vm_page_filecache_min = 0;
+ else
+ vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 7);
+#else
+ if (vm_compressor_out_of_space())
+ vm_page_filecache_min = 0;
+ else {
+ /*
+ * don't let the filecache_min fall below 33% of available memory...
+ */
+ vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 3);
+ }
+#endif
+ if (vm_page_free_count < (vm_page_free_reserved / 4))
+ vm_page_filecache_min = 0;
+
+ exceeded_burst_throttle = FALSE;
+ /*
+ * Sometimes we have to pause:
+ * 1) No inactive pages - nothing to do.
+ * 2) Loop control - no acceptable pages found on the inactive queue
+ * within the last vm_pageout_burst_inactive_throttle iterations
+ * 3) Flow control - default pageout queue is full
+ */
+ if (vm_page_queue_empty(&vm_page_queue_inactive) &&
+ vm_page_queue_empty(&vm_page_queue_anonymous) &&
+ vm_page_queue_empty(&sq->age_q)) {
+ vm_pageout_scan_empty_throttle++;
+ msecs = vm_pageout_empty_wait;
+ goto vm_pageout_scan_delay;
+
+ } else if (inactive_burst_count >=
+ MIN(vm_pageout_burst_inactive_throttle,
+ (vm_page_inactive_count +
+ vm_page_speculative_count))) {
+ vm_pageout_scan_burst_throttle++;
+ msecs = vm_pageout_burst_wait;
+
+ exceeded_burst_throttle = TRUE;
+ goto vm_pageout_scan_delay;
+
+ } else if (vm_page_free_count > (vm_page_free_reserved / 4) &&
+ VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
+ vm_pageout_scan_swap_throttle++;
+ msecs = vm_pageout_swap_wait;
+ goto vm_pageout_scan_delay;
+
+ } else if (VM_PAGE_Q_THROTTLED(iq) &&
+ VM_DYNAMIC_PAGING_ENABLED()) {
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+
+ switch (flow_control.state) {
+
+ case FCS_IDLE:
+ if ((vm_page_free_count + local_freed) < vm_page_free_target) {
+
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_THREAD_YIELD);
+ if (!VM_PAGE_Q_THROTTLED(iq)) {
+ vm_pageout_scan_yield_unthrottled++;
+ continue;
+ }
+ if (vm_page_pageable_external_count > vm_page_filecache_min &&
+ !vm_page_queue_empty(&vm_page_queue_inactive)) {
+ anons_grabbed = ANONS_GRABBED_LIMIT;
+ vm_pageout_scan_throttle_deferred++;
+ goto consider_inactive;
+ }
+ if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) && vm_page_active_count)
+ continue;
+ }
+reset_deadlock_timer:
+ ts.tv_sec = vm_pageout_deadlock_wait / 1000;
+ ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
+ clock_get_system_nanotime(&sec, &nsec);
+ flow_control.ts.tv_sec = (unsigned int) sec;
+ flow_control.ts.tv_nsec = nsec;
+ ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
+
+ flow_control.state = FCS_DELAYED;
+ msecs = vm_pageout_deadlock_wait;
+
+ break;
+
+ case FCS_DELAYED:
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
+ /*
+ * the pageout thread for the default pager is potentially
+ * deadlocked since the
+ * default pager queue has been throttled for more than the
+ * allowable time... we need to move some clean pages or dirty
+ * pages belonging to the external pagers if they aren't throttled
+ * vm_page_free_wanted represents the number of threads currently
+ * blocked waiting for pages... we'll move one page for each of
+ * these plus a fixed amount to break the logjam... once we're done
+ * moving this number of pages, we'll re-enter the FSC_DELAYED state
+ * with a new timeout target since we have no way of knowing
+ * whether we've broken the deadlock except through observation
+ * of the queue associated with the default pager... we need to
+ * stop moving pages and allow the system to run to see what
+ * state it settles into.
+ */
+ vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
+ vm_pageout_scan_deadlock_detected++;
+ flow_control.state = FCS_DEADLOCK_DETECTED;
+ thread_wakeup((event_t) &vm_pageout_garbage_collect);
+ goto consider_inactive;
+ }
+ /*
+ * just resniff instead of trying
+ * to compute a new delay time... we're going to be
+ * awakened immediately upon a laundry completion,
+ * so we won't wait any longer than necessary
+ */
+ msecs = vm_pageout_idle_wait;
+ break;
+
+ case FCS_DEADLOCK_DETECTED:
+ if (vm_pageout_deadlock_target)
+ goto consider_inactive;
+ goto reset_deadlock_timer;
+
+ }
+vm_pageout_scan_delay:
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+
+ if (flow_control.state == FCS_DELAYED &&
+ !VM_PAGE_Q_THROTTLED(iq)) {
+ flow_control.state = FCS_IDLE;
+ goto consider_inactive;
+ }
+
+ if (vm_page_free_count >= vm_page_free_target) {
+ /*
+ * we're here because
+ * 1) someone else freed up some pages while we had
+ * the queues unlocked above
+ * and we've hit one of the 3 conditions that
+ * cause us to pause the pageout scan thread
+ *
+ * since we already have enough free pages,
+ * let's avoid stalling and return normally
+ *
+ * before we return, make sure the pageout I/O threads
+ * are running throttled in case there are still requests
+ * in the laundry... since we have enough free pages
+ * we don't need the laundry to be cleaned in a timely
+ * fashion... so let's avoid interfering with foreground
+ * activity
+ *
+ * we don't want to hold vm_page_queue_free_lock when
+ * calling vm_pageout_adjust_eq_iothrottle (since it
+ * may cause other locks to be taken), we do the intitial
+ * check outside of the lock. Once we take the lock,
+ * we recheck the condition since it may have changed.
+ * if it has, no problem, we will make the threads
+ * non-throttled before actually blocking
+ */
+ vm_pageout_adjust_eq_iothrottle(eq, TRUE);
+ }
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_free_count >= vm_page_free_target &&
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+ goto return_from_scan;
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
+ /*
+ * we're most likely about to block due to one of
+ * the 3 conditions that cause vm_pageout_scan to
+ * not be able to make forward progress w/r
+ * to providing new pages to the free queue,
+ * so unthrottle the I/O threads in case we
+ * have laundry to be cleaned... it needs
+ * to be completed ASAP.
+ *
+ * even if we don't block, we want the io threads
+ * running unthrottled since the sum of free +
+ * clean pages is still under our free target
+ */
+ vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+ }
+ if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
+ /*
+ * if we get here we're below our free target and
+ * we're stalling due to a full laundry queue or
+ * we don't have any inactive pages other then
+ * those in the clean queue...
+ * however, we have pages on the clean queue that
+ * can be moved to the free queue, so let's not
+ * stall the pageout scan
+ */
+ flow_control.state = FCS_IDLE;
+ goto consider_inactive;
+ }
+ VM_CHECK_MEMORYSTATUS;
+
+ if (flow_control.state != FCS_IDLE)
+ vm_pageout_scan_throttle++;
+ iq->pgo_throttled = TRUE;
+
+ assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
+ counter(c_vm_pageout_scan_block++);
+
+ vm_page_unlock_queues();
+
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+ VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
+ iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+ memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
+ iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+ memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
+
+ vm_page_lock_queues();
+
+ iq->pgo_throttled = FALSE;
+
+ if (loop_count >= vm_page_inactive_count)
+ loop_count = 0;
+ inactive_burst_count = 0;
+
+ goto Restart;
+ /*NOTREACHED*/
+ }
+
+
+ flow_control.state = FCS_IDLE;
+consider_inactive:
+ vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
+ vm_pageout_inactive_external_forced_reactivate_limit);
+ loop_count++;
+ inactive_burst_count++;
+ vm_pageout_inactive++;
+
+
+ /*
+ * Choose a victim.
+ */
+ while (1) {
+ uint32_t inactive_external_count;
+
+#if CONFIG_BACKGROUND_QUEUE
+ page_from_bg_q = FALSE;
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+ m = NULL;
+ m_object = VM_OBJECT_NULL;
+
+ if (VM_DYNAMIC_PAGING_ENABLED()) {
+ assert(vm_page_throttled_count == 0);
+ assert(vm_page_queue_empty(&vm_page_queue_throttled));
+ }
+
+ /*
+ * Try for a clean-queue inactive page.
+ * These are pages that vm_pageout_scan tried to steal earlier, but
+ * were dirty and had to be cleaned. Pick them up now that they are clean.
+ */
+ if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ break;
+ }
+
+ /*
+ * The next most eligible pages are ones we paged in speculatively,
+ * but which have not yet been touched and have been aged out.
+ */
+ if (!vm_page_queue_empty(&sq->age_q)) {
+ m = (vm_page_t) vm_page_queue_first(&sq->age_q);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ if (!m->dirty || force_anonymous == FALSE)
+ break;
+ else
+ m = NULL;
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
+ vm_object_t bg_m_object = NULL;
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
+
+ bg_m_object = VM_PAGE_OBJECT(m);
+
+ if (!VM_PAGE_PAGEABLE(m)) {
+ /*
+ * This page is on the background queue
+ * but not on a pageable queue. This is
+ * likely a transient state and whoever
+ * took it out of its pageable queue
+ * will likely put it back on a pageable
+ * queue soon but we can't deal with it
+ * at this point, so let's ignore this
+ * page.
+ */
+ } else if (force_anonymous == FALSE || bg_m_object->internal) {
+
+ if (bg_m_object->internal &&
+ ((vm_compressor_out_of_space() == TRUE) ||
+ (vm_page_free_count < (vm_page_free_reserved / 4)))) {
+
+ vm_pageout_skipped_bq_internal++;
+ } else {
+ page_from_bg_q = TRUE;
+
+ if (bg_m_object->internal)
+ vm_pageout_considered_bq_internal++;
+ else
+ vm_pageout_considered_bq_external++;
+
+ break;
+ }
+ }
+ }
+#endif
+
+ grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
+ inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
+
+ if ((vm_page_pageable_external_count < vm_page_filecache_min || force_anonymous == TRUE) ||
+ ((inactive_external_count < vm_page_anonymous_count) && (inactive_external_count < (vm_page_pageable_external_count / 3)))) {
+ grab_anonymous = TRUE;
+ anons_grabbed = 0;
+
+ vm_pageout_skipped_external++;
+ goto want_anonymous;
+ }
+#if CONFIG_JETSAM
+ /* If the file-backed pool has accumulated
+ * significantly more pages than the jetsam
+ * threshold, prefer to reclaim those
+ * inline to minimise compute overhead of reclaiming
+ * anonymous pages.
+ * This calculation does not account for the CPU local
+ * external page queues, as those are expected to be
+ * much smaller relative to the global pools.
+ */
+ if (grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
+ if (vm_page_pageable_external_count >
+ vm_page_filecache_min) {
+ if ((vm_page_pageable_external_count *
+ vm_pageout_memorystatus_fb_factor_dr) >
+ (memorystatus_available_pages_critical *
+ vm_pageout_memorystatus_fb_factor_nr)) {
+ grab_anonymous = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_grab_anon_overrides++;
+#endif
+ }
+ }
+#if DEVELOPMENT || DEBUG
+ if (grab_anonymous) {
+ vm_grab_anon_nops++;
+ }
+#endif
+ }
+#endif /* CONFIG_JETSAM */
+
+want_anonymous:
+ if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
+
+ if ( !vm_page_queue_empty(&vm_page_queue_inactive) ) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+ anons_grabbed = 0;
+
+ if (vm_page_pageable_external_count < vm_page_filecache_min) {
+ if ((++reactivated_this_call % 100))
+ goto must_activate_page;
+ /*
+ * steal 1% of the file backed pages even if
+ * we are under the limit that has been set
+ * for a healthy filecache
+ */
+ }
+ break;
+ }
+ }
+ if ( !vm_page_queue_empty(&vm_page_queue_anonymous) ) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+ anons_grabbed++;
+
+ break;
+ }
+
+ /*
+ * if we've gotten here, we have no victim page.
+ * check to see if we've not finished balancing the queues
+ * or we have a page on the aged speculative queue that we
+ * skipped due to force_anonymous == TRUE.. or we have
+ * speculative pages that we can prematurely age... if
+ * one of these cases we'll keep going, else panic
+ */
+ force_anonymous = FALSE;
+ vm_pageout_no_victim++;
+
+ if ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target)
+ goto done_with_inactivepage;
+
+ if (!vm_page_queue_empty(&sq->age_q))
+ goto done_with_inactivepage;
+
+ if (vm_page_speculative_count) {
+ force_speculative_aging = TRUE;
+ goto done_with_inactivepage;
+ }
+ panic("vm_pageout: no victim");
+
+ /* NOTREACHED */
+ }
+ assert(VM_PAGE_PAGEABLE(m));
+ m_object = VM_PAGE_OBJECT(m);
+ force_anonymous = FALSE;
+
+ page_prev_q_state = m->vm_page_q_state;
+ /*
+ * we just found this page on one of our queues...
+ * it can't also be on the pageout queue, so safe
+ * to call vm_page_queues_remove
+ */
+ vm_page_queues_remove(m, TRUE);
+
+ assert(!m->laundry);
+ assert(!m->private);
+ assert(!m->fictitious);
+ assert(m_object != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+ vm_pageout_stats[vm_pageout_stat_now].considered++;
+ vm_pageout_considered_page++;
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != object) {
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run... otherwise, we're likely to trip over this
+ * object in the same state as we work our way through
+ * the queue... clumps of pages associated with the same
+ * object are fairly typical on the inactive and active queues
+ */
+ if (!vm_object_lock_try_scan(m_object)) {
+ vm_page_t m_want = NULL;
+
+ vm_pageout_inactive_nolock++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_nolock++;
+
+ pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
+ m->reference = FALSE;
+
+#if !CONFIG_EMBEDDED
+ /*
+ * m->object must be stable since we hold the page queues lock...
+ * we can update the scan_collisions field sans the object lock
+ * since it is a separate field and this is the only spot that does
+ * a read-modify-write operation and it is never executed concurrently...
+ * we can asynchronously set this field to 0 when creating a UPL, so it
+ * is possible for the value to be a bit non-determistic, but that's ok
+ * since it's only used as a hint
+ */
+
+ /*
+ * This is not used on EMBEDDED because having this variable set *could* lead
+ * us to self-cannibalize pages from m_object to fill a UPL for a pagein.
+ * And, there's a high probability that the object that vm_pageout_scan
+ * wants and collides on is a very popular object e.g. the shared cache on EMBEDDED.
+ * The older pages that we cannibalize from the shared cache could be really
+ * important text pages e.g. the system call stubs.
+ */
+ m_object->scan_collisions = 1;
+#endif /* !CONFIG_EMBEDDED */
+
+ if ( !vm_page_queue_empty(&sq->age_q) )
+ m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
+ else if ( !vm_page_queue_empty(&vm_page_queue_cleaned))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ else if ( !vm_page_queue_empty(&vm_page_queue_inactive) &&
+ (anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ else if ( !vm_page_queue_empty(&vm_page_queue_anonymous))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+
+ /*
+ * this is the next object we're going to be interested in
+ * try to make sure its available after the mutex_yield
+ * returns control
+ */
+ if (m_want)
+ vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
+
+ /*
+ * force us to dump any collected free pages
+ * and to pause before moving on
+ */
+ try_failed = TRUE;
+
+ goto requeue_page;
+ }
+ object = m_object;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ try_failed = FALSE;
+ }
+ assert(m_object == object);
+ assert(VM_PAGE_OBJECT(m) == m_object);
+
+ if (m->busy) {
+ /*
+ * Somebody is already playing with this page.
+ * Put it back on the appropriate queue
+ *
+ */
+ vm_pageout_inactive_busy++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_busy++;
+requeue_page:
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ vm_page_enqueue_inactive(m, FALSE);
+ else
+ vm_page_activate(m);
+#if CONFIG_BACKGROUND_QUEUE
+ if (page_from_bg_q == TRUE) {
+ if (m_object->internal)
+ vm_pageout_rejected_bq_internal++;
+ else
+ vm_pageout_rejected_bq_external++;
+ }
+#endif
+ goto done_with_inactivepage;
+ }
+
+
+ /*
+ * If it's absent, in error or the object is no longer alive,
+ * we can reclaim the page... in the no longer alive case,
+ * there are 2 states the page can be in that preclude us
+ * from reclaiming it - busy or cleaning - that we've already
+ * dealt with
+ */
+ if (m->absent || m->error || !object->alive) {
+
+ if (m->absent)
+ vm_pageout_inactive_absent++;
+ else if (!object->alive)
+ vm_pageout_inactive_notalive++;
+ else
+ vm_pageout_inactive_error++;
+reclaim_page:
+ if (vm_pageout_deadlock_target) {
+ vm_pageout_scan_inactive_throttle_success++;
+ vm_pageout_deadlock_target--;
+ }
+
+ DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+
+ if (object->internal) {
+ DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
+ } else {
+ DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+ }
+ assert(!m->cleaning);
+ assert(!m->laundry);
+
+ m->busy = TRUE;
+
+ /*
+ * remove page from object here since we're already
+ * behind the object lock... defer the rest of the work
+ * we'd normally do in vm_page_free_prepare_object
+ * until 'vm_page_free_list' is called
+ */
+ if (m->tabled)
+ vm_page_remove(m, TRUE);
+
+ assert(m->pageq.next == 0 && m->pageq.prev == 0);
+ m->snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ vm_pageout_freed_from_speculative++;
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_freed_from_cleaned++;
+ else
+ vm_pageout_freed_from_inactive_clean++;
+
+ vm_pageout_stats[vm_pageout_stat_now].reclaimed_clean++;
+
+ inactive_burst_count = 0;
+ goto done_with_inactivepage;
+ }
+ /*
+ * If the object is empty, the page must be reclaimed even
+ * if dirty or used.
+ * If the page belongs to a volatile object, we stick it back
+ * on.
+ */
+ if (object->copy == VM_OBJECT_NULL) {
+ if (object->purgable == VM_PURGABLE_EMPTY) {
+ if (m->pmapped == TRUE) {
+ /* unmap the page */
+ refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ if (m->dirty || m->precious) {
+ /* we saved the cost of cleaning this page ! */
+ vm_page_purged_count++;
+ }
+ goto reclaim_page;
+ }
+
+ if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+ /*
+ * With the VM compressor, the cost of
+ * reclaiming a page is much lower (no I/O),
+ * so if we find a "volatile" page, it's better
+ * to let it get compressed rather than letting
+ * it occupy a full page until it gets purged.
+ * So no need to check for "volatile" here.
+ */
+ } else if (object->purgable == VM_PURGABLE_VOLATILE) {
+ /*
+ * Avoid cleaning a "volatile" page which might
+ * be purged soon.
+ */
+
+ /* if it's wired, we can't put it on our queue */
+ assert(!VM_PAGE_WIRED(m));
+
+ /* just stick it back on! */
+ reactivated_this_call++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_volatile_reactivated++;
+
+ goto reactivate_page;
+ }
+ }
+ /*
+ * If it's being used, reactivate.
+ * (Fictitious pages are either busy or absent.)
+ * First, update the reference and dirty bits
+ * to make sure the page is unreferenced.
+ */
+ refmod_state = -1;
+
+ if (m->reference == FALSE && m->pmapped == TRUE) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+
+ if (refmod_state & VM_MEM_REFERENCED)
+ m->reference = TRUE;
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+
+ /*
+ * if (m->cleaning && !m->free_when_done)
+ * If already cleaning this page in place and it hasn't
+ * been recently referenced, just pull off the queue.
+ * We can leave the page mapped, and upl_commit_range
+ * will put it on the clean queue.
+ *
+ * if (m->free_when_done && !m->cleaning)
+ * an msync INVALIDATE is in progress...
+ * this page has been marked for destruction
+ * after it has been cleaned,
+ * but not yet gathered into a UPL
+ * where 'cleaning' will be set...
+ * just leave it off the paging queues
+ *
+ * if (m->free_when_done && m->clenaing)
+ * an msync INVALIDATE is in progress
+ * and the UPL has already gathered this page...
+ * just leave it off the paging queues
+ */
+
+ /*
+ * page with m->free_when_done and still on the queues means that an
+ * MS_INVALIDATE is in progress on this page... leave it alone
+ */
+ if (m->free_when_done) {
+ goto done_with_inactivepage;
+ }
+
+ /* if cleaning, reactivate if referenced. otherwise, just pull off queue */
+ if (m->cleaning) {
+ if (m->reference == TRUE) {
+ reactivated_this_call++;
+ goto reactivate_page;
+ } else {
+ goto done_with_inactivepage;
+ }
+ }
+
+ if (m->reference || m->dirty) {
+ /* deal with a rogue "reusable" page */
+ VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
+ }
+
+ if (!m->no_cache &&
+#if CONFIG_BACKGROUND_QUEUE
+ page_from_bg_q == FALSE &&
+#endif
+ (m->reference ||
+ (m->xpmapped && !object->internal && (vm_page_xpmapped_external_count < (vm_page_external_count / 4))))) {
+ /*
+ * The page we pulled off the inactive list has
+ * been referenced. It is possible for other
+ * processors to be touching pages faster than we
+ * can clear the referenced bit and traverse the
+ * inactive queue, so we limit the number of
+ * reactivations.
+ */
+ if (++reactivated_this_call >= reactivate_limit) {
+ vm_pageout_reactivation_limit_exceeded++;
+ } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
+ vm_pageout_inactive_force_reclaim++;
+ } else {
+ uint32_t isinuse;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_reference_reactivated++;
+reactivate_page:
+ if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
+ vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
+ /*
+ * no explict mappings of this object exist
+ * and it's not open via the filesystem
+ */
+ vm_page_deactivate(m);
+ vm_pageout_inactive_deactivated++;
+ } else {
+must_activate_page:
+ /*
+ * The page was/is being used, so put back on active list.
+ */
+ vm_page_activate(m);
+ VM_STAT_INCR(reactivations);
+ inactive_burst_count = 0;
+ }
+#if CONFIG_BACKGROUND_QUEUE
+ if (page_from_bg_q == TRUE) {
+ if (m_object->internal)
+ vm_pageout_rejected_bq_internal++;
+ else
+ vm_pageout_rejected_bq_external++;
+ }
+#endif
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_reactivated++;
+ vm_pageout_inactive_used++;
+
+ goto done_with_inactivepage;
+ }
+ /*
+ * Make sure we call pmap_get_refmod() if it
+ * wasn't already called just above, to update
+ * the dirty bit.
+ */
+ if ((refmod_state == -1) && !m->dirty && m->pmapped) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ }
+
+ XPR(XPR_VM_PAGEOUT,
+ "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
+ object, m->offset, m, 0,0);
+
+ /*
+ * we've got a candidate page to steal...
+ *
+ * m->dirty is up to date courtesy of the
+ * preceding check for m->reference... if
+ * we get here, then m->reference had to be
+ * FALSE (or possibly "reactivate_limit" was
+ * exceeded), but in either case we called
+ * pmap_get_refmod() and updated both
+ * m->reference and m->dirty
+ *
+ * if it's dirty or precious we need to
+ * see if the target queue is throtttled
+ * it if is, we need to skip over it by moving it back
+ * to the end of the inactive queue
+ */
+
+ inactive_throttled = FALSE;
+
+ if (m->dirty || m->precious) {
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ inactive_throttled = TRUE;
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ inactive_throttled = TRUE;
+ }
+ }
+throttle_inactive:
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
+ object->internal && m->dirty &&
+ (object->purgable == VM_PURGABLE_DENY ||
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_throttled, m,
+ vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
+ vm_page_throttled_count++;
+
+ vm_pageout_scan_reclaimed_throttled++;
+
+ inactive_burst_count = 0;
+ goto done_with_inactivepage;
+ }
+ if (inactive_throttled == TRUE) {
+
+ if (object->internal == FALSE) {
+ /*
+ * we need to break up the following potential deadlock case...
+ * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
+ * b) The thread doing the writing is waiting for pages while holding the truncate lock
+ * c) Most of the pages in the inactive queue belong to this file.
+ *
+ * we are potentially in this deadlock because...
+ * a) the external pageout queue is throttled
+ * b) we're done with the active queue and moved on to the inactive queue
+ * c) we've got a dirty external page
+ *
+ * since we don't know the reason for the external pageout queue being throttled we
+ * must suspect that we are deadlocked, so move the current page onto the active queue
+ * in an effort to cause a page from the active queue to 'age' to the inactive queue
+ *
+ * if we don't have jetsam configured (i.e. we have a dynamic pager), set
+ * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
+ * pool the next time we select a victim page... if we can make enough new free pages,
+ * the deadlock will break, the external pageout queue will empty and it will no longer
+ * be throttled
+ *
+ * if we have jetsam configured, keep a count of the pages reactivated this way so
+ * that we can try to find clean pages in the active/inactive queues before
+ * deciding to jetsam a process
+ */
+ vm_pageout_scan_inactive_throttled_external++;
+
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
+ vm_page_active_count++;
+ vm_page_pageable_external_count++;
+
+ vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+
+#if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
+ vm_pageout_inactive_external_forced_reactivate_limit--;
+
+ if (vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
+ vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+ /*
+ * Possible deadlock scenario so request jetsam action
+ */
+ assert(object);
+ vm_object_unlock(object);
+ object = VM_OBJECT_NULL;
+ vm_page_unlock_queues();
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
+ if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
+ vm_pageout_inactive_external_forced_jetsam_count++;
+ }
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ vm_page_lock_queues();
+ delayed_unlock = 1;
+ }
+#else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+ force_anonymous = TRUE;
+#endif
+ inactive_burst_count = 0;
+ goto done_with_inactivepage;
+ } else {
+ vm_pageout_scan_inactive_throttled_internal++;
+ goto must_activate_page;
+ }
+ }
+
+ /*
+ * we've got a page that we can steal...
+ * eliminate all mappings and make sure
+ * we have the up-to-date modified state
+ *
+ * if we need to do a pmap_disconnect then we
+ * need to re-evaluate m->dirty since the pmap_disconnect
+ * provides the true state atomically... the
+ * page was still mapped up to the pmap_disconnect
+ * and may have been dirtied at the last microsecond
+ *
+ * Note that if 'pmapped' is FALSE then the page is not
+ * and has not been in any map, so there is no point calling
+ * pmap_disconnect(). m->dirty could have been set in anticipation
+ * of likely usage of the page.
+ */
+ if (m->pmapped == TRUE) {
+ int pmap_options;
+
+ /*
+ * Don't count this page as going into the compressor
+ * if any of these are true:
+ * 1) compressed pager isn't enabled
+ * 2) Freezer enabled device with compressed pager
+ * backend (exclusive use) i.e. most of the VM system
+ * (including vm_pageout_scan) has no knowledge of
+ * the compressor
+ * 3) This page belongs to a file and hence will not be
+ * sent into the compressor
+ */
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
+ object->internal == FALSE) {
+ pmap_options = 0;
+ } else if (m->dirty || m->precious) {
+ /*
+ * VM knows that this page is dirty (or
+ * precious) and needs to be compressed
+ * rather than freed.
+ * Tell the pmap layer to count this page
+ * as "compressed".
+ */
+ pmap_options = PMAP_OPTIONS_COMPRESSOR;
+ } else {
+ /*
+ * VM does not know if the page needs to
+ * be preserved but the pmap layer might tell
+ * us if any mapping has "modified" it.
+ * Let's the pmap layer to count this page
+ * as compressed if and only if it has been
+ * modified.
+ */
+ pmap_options =
+ PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+ }
+ refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
+ pmap_options,
+ NULL);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ /*
+ * reset our count of pages that have been reclaimed
+ * since the last page was 'stolen'
+ */
+ inactive_reclaim_run = 0;
+
+ /*
+ * If it's clean and not precious, we can free the page.
+ */
+ if (!m->dirty && !m->precious) {
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ vm_pageout_speculative_clean++;
+ else {
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
+ vm_pageout_inactive_anonymous++;
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_reclaimed++;
+
+ vm_pageout_inactive_clean++;
+ }
+ /*
+ * OK, at this point we have found a page we are going to free.
+ */
+#if CONFIG_PHANTOM_CACHE
+ if (!object->internal)
+ vm_phantom_cache_add_ghost(m);
+#endif
+ goto reclaim_page;
+ }
+
+ /*
+ * The page may have been dirtied since the last check
+ * for a throttled target queue (which may have been skipped
+ * if the page was clean then). With the dirty page
+ * disconnected here, we can make one final check.
+ */
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ inactive_throttled = TRUE;
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ inactive_throttled = TRUE;
+ }
+
+ if (inactive_throttled == TRUE)
+ goto throttle_inactive;
+
+#if VM_PRESSURE_EVENTS
+#if CONFIG_JETSAM
+
+ /*
+ * If Jetsam is enabled, then the sending
+ * of memory pressure notifications is handled
+ * from the same thread that takes care of high-water
+ * and other jetsams i.e. the memorystatus_thread.
+ */
+
+#else /* CONFIG_JETSAM */
+
+ vm_pressure_response();
+
+#endif /* CONFIG_JETSAM */
+#endif /* VM_PRESSURE_EVENTS */
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ vm_pageout_speculative_dirty++;
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
+ vm_pageout_inactive_anonymous++;
+
+ if (object->internal)
+ vm_pageout_inactive_dirty_internal++;
+ else
+ vm_pageout_inactive_dirty_external++;
+
+ /*
+ * do NOT set the pageout bit!
+ * sure, we might need free pages, but this page is going to take time to become free
+ * anyway, so we may as well put it on the clean queue first and take it from there later
+ * if necessary. that way, we'll ensure we don't free up too much. -mj
+ */
+ vm_pageout_cluster(m);
+
+done_with_inactivepage:
+
+ if (delayed_unlock++ > delayed_unlock_limit || try_failed == TRUE) {
+
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+ if (try_failed == TRUE)
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+
+ /*
+ * back to top of pageout scan loop
+ */
+ }
+}
+
+
+int vm_page_free_count_init;
+
+void
+vm_page_free_reserve(
+ int pages)
+{
+ int free_after_reserve;
+
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+
+ if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT))
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
+ else
+ vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
+
+ } else {
+ if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT)
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
+ else
+ vm_page_free_reserved += pages;
+ }