+/*
+ * This function is called only from vm_pageout_scan and
+ * it will put a page back on the active/inactive queue
+ * if we can't reclaim it for some reason.
+ */
+static void
+vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
+{
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+ vm_page_enqueue_inactive(m, FALSE);
+ } else {
+ vm_page_activate(m);
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+ vm_object_t m_object = VM_PAGE_OBJECT(m);
+
+ if (page_from_bg_q == TRUE) {
+ if (m_object->internal) {
+ vm_pageout_rejected_bq_internal++;
+ } else {
+ vm_pageout_rejected_bq_external++;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will try to grab the victim page's VM object (m_object)
+ * which differs from the previous victim page's object (object).
+ */
+static int
+vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
+{
+ struct vm_speculative_age_q *sq;
+
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (*object != NULL) {
+ vm_object_unlock(*object);
+ *object = NULL;
+ }
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run... otherwise, we're likely to trip over this
+ * object in the same state as we work our way through
+ * the queue... clumps of pages associated with the same
+ * object are fairly typical on the inactive and active queues
+ */
+ if (!vm_object_lock_try_scan(m_object)) {
+ vm_page_t m_want = NULL;
+
+ vm_pageout_vminfo.vm_pageout_inactive_nolock++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
+ }
+
+ pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
+
+ m->vmp_reference = FALSE;
+
+ if (!m_object->object_is_shared_cache) {
+ /*
+ * don't apply this optimization if this is the shared cache
+ * object, it's too easy to get rid of very hot and important
+ * pages...
+ * m->vmp_object must be stable since we hold the page queues lock...
+ * we can update the scan_collisions field sans the object lock
+ * since it is a separate field and this is the only spot that does
+ * a read-modify-write operation and it is never executed concurrently...
+ * we can asynchronously set this field to 0 when creating a UPL, so it
+ * is possible for the value to be a bit non-determistic, but that's ok
+ * since it's only used as a hint
+ */
+ m_object->scan_collisions = 1;
+ }
+ if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ } else if (!vm_page_queue_empty(&sq->age_q)) {
+ m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
+ } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
+ !vm_page_queue_empty(&vm_page_queue_inactive)) {
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ }
+
+ /*
+ * this is the next object we're going to be interested in
+ * try to make sure its available after the mutex_pause
+ * returns control
+ */
+ if (m_want) {
+ vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
+ }
+
+ vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
+
+ return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+ } else {
+ *object = m_object;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+
+ return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it notices that pageout scan may be rendered ineffective
+ * due to a FS deadlock and will jetsam a process if possible.
+ * If jetsam isn't supported, it'll move the page to the active
+ * queue to try and get some different pages pushed onwards so
+ * we can try to get out of this scenario.
+ */
+static void
+vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
+ int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
+{
+ struct vm_pageout_queue *eq;
+ vm_object_t cur_object = VM_OBJECT_NULL;
+
+ cur_object = *object;
+
+ eq = &vm_pageout_queue_external;
+
+ if (cur_object->internal == FALSE) {
+ /*
+ * we need to break up the following potential deadlock case...
+ * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
+ * b) The thread doing the writing is waiting for pages while holding the truncate lock
+ * c) Most of the pages in the inactive queue belong to this file.
+ *
+ * we are potentially in this deadlock because...
+ * a) the external pageout queue is throttled
+ * b) we're done with the active queue and moved on to the inactive queue
+ * c) we've got a dirty external page
+ *
+ * since we don't know the reason for the external pageout queue being throttled we
+ * must suspect that we are deadlocked, so move the current page onto the active queue
+ * in an effort to cause a page from the active queue to 'age' to the inactive queue
+ *
+ * if we don't have jetsam configured (i.e. we have a dynamic pager), set
+ * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
+ * pool the next time we select a victim page... if we can make enough new free pages,
+ * the deadlock will break, the external pageout queue will empty and it will no longer
+ * be throttled
+ *
+ * if we have jetsam configured, keep a count of the pages reactivated this way so
+ * that we can try to find clean pages in the active/inactive queues before
+ * deciding to jetsam a process
+ */
+ vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
+
+ vm_page_check_pageable_safe(m);
+ assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
+ m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
+ vm_page_active_count++;
+ vm_page_pageable_external_count++;
+
+ vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+
+#if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
+
+#pragma unused(force_anonymous)
+
+ *vm_pageout_inactive_external_forced_reactivate_limit -= 1;
+
+ if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
+ *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+ /*
+ * Possible deadlock scenario so request jetsam action
+ */
+
+ assert(cur_object);
+ vm_object_unlock(cur_object);
+
+ cur_object = VM_OBJECT_NULL;
+
+ /*
+ * VM pageout scan needs to know we have dropped this lock and so set the
+ * object variable we got passed in to NULL.
+ */
+ *object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
+ if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
+ }
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ vm_page_lock_queues();
+ *delayed_unlock = 1;
+ }
+#else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+
+#pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
+#pragma unused(delayed_unlock)
+
+ *force_anonymous = TRUE;
+#endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+ } else {
+ vm_page_activate(m);
+ counter_inc(&vm_statistics_reactivations);
+
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+ if (is_page_from_bg_q == TRUE) {
+ if (cur_object->internal) {
+ vm_pageout_rejected_bq_internal++;
+ } else {
+ vm_pageout_rejected_bq_external++;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+ vm_pageout_state.vm_pageout_inactive_used++;
+ }
+}
+
+
+void
+vm_page_balance_inactive(int max_to_move)
+{
+ vm_page_t m;
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
+ /*
+ * It is likely that the hibernation code path is
+ * dealing with these very queues as we are about
+ * to move pages around in/from them and completely
+ * change the linkage of the pages.
+ *
+ * And so we skip the rebalancing of these queues.
+ */
+ return;
+ }
+ vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+ vm_page_inactive_count +
+ vm_page_speculative_count);
+
+ while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
+ VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
+ assert(!m->vmp_laundry);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+ /*
+ * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+ *
+ * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+ * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+ * new reference happens. If no futher references happen on the page after that remote TLB flushes
+ * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+ * by pageout_scan, which is just fine since the last reference would have happened quite far
+ * in the past (TLB caches don't hang around for very long), and of course could just as easily
+ * have happened before we moved the page
+ */
+ if (m->vmp_pmapped == TRUE) {
+ pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+ }
+
+ /*
+ * The page might be absent or busy,
+ * but vm_page_deactivate can handle that.
+ * FALSE indicates that we don't want a H/W clear reference
+ */
+ vm_page_deactivate_internal(m, FALSE);
+ }
+}
+
+
+/*
+ * vm_pageout_scan does the dirty work for the pageout daemon.
+ * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
+ * held and vm_page_free_wanted == 0.
+ */
+void
+vm_pageout_scan(void)
+{
+ unsigned int loop_count = 0;
+ unsigned int inactive_burst_count = 0;
+ unsigned int reactivated_this_call;
+ unsigned int reactivate_limit;
+ vm_page_t local_freeq = NULL;
+ int local_freed = 0;
+ int delayed_unlock;
+ int delayed_unlock_limit = 0;
+ int refmod_state = 0;
+ int vm_pageout_deadlock_target = 0;
+ struct vm_pageout_queue *iq;
+ struct vm_pageout_queue *eq;
+ struct vm_speculative_age_q *sq;
+ struct flow_control flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
+ boolean_t inactive_throttled = FALSE;
+ vm_object_t object = NULL;
+ uint32_t inactive_reclaim_run;
+ boolean_t grab_anonymous = FALSE;
+ boolean_t force_anonymous = FALSE;
+ boolean_t force_speculative_aging = FALSE;
+ int anons_grabbed = 0;
+ int page_prev_q_state = 0;
+ boolean_t page_from_bg_q = FALSE;
+ uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
+ vm_object_t m_object = VM_OBJECT_NULL;
+ int retval = 0;
+ boolean_t lock_yield_check = FALSE;
+
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
+ vm_pageout_vminfo.vm_pageout_freed_speculative,
+ vm_pageout_state.vm_pageout_inactive_clean,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
+
+ flow_control.state = FCS_IDLE;
+ iq = &vm_pageout_queue_internal;
+ eq = &vm_pageout_queue_external;
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+ /* Ask the pmap layer to return any pages it no longer needs. */
+ uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
+
+ vm_page_lock_queues();
+
+ vm_page_wire_count -= pmap_wired_pages_freed;
+
+ delayed_unlock = 1;
+
+ /*
+ * Calculate the max number of referenced pages on the inactive
+ * queue that we will reactivate.
+ */
+ reactivated_this_call = 0;
+ reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
+ vm_page_inactive_count);
+ inactive_reclaim_run = 0;
+
+ vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+
+ /*
+ * We must limit the rate at which we send pages to the pagers
+ * so that we don't tie up too many pages in the I/O queues.
+ * We implement a throttling mechanism using the laundry count
+ * to limit the number of pages outstanding to the default
+ * and external pagers. We can bypass the throttles and look
+ * for clean pages if the pageout queues don't drain in a timely
+ * fashion since this may indicate that the pageout paths are
+ * stalled waiting for memory, which only we can provide.
+ */
+
+ vps_init_page_targets();
+ assert(object == NULL);
+ assert(delayed_unlock != 0);
+
+ for (;;) {
+ vm_page_t m;
+
+ DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
+
+ if (lock_yield_check) {
+ lock_yield_check = FALSE;
+
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ int freed = local_freed;
+
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+ if (freed == 0) {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ } else if (vm_pageout_scan_wants_object) {
+ vm_page_unlock_queues();
+ mutex_pause(0);
+ vm_page_lock_queues();
+ }
+ }
+
+ if (vm_upl_wait_for_pages < 0) {
+ vm_upl_wait_for_pages = 0;
+ }
+
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
+
+ if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
+ }
+
+ vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
+
+ assert(delayed_unlock);
+
+ /*
+ * maintain our balance
+ */
+ vm_page_balance_inactive(1);
+
+
+ /**********************************************************************
+ * above this point we're playing with the active and secluded queues
+ * below this point we're playing with the throttling mechanisms
+ * and the inactive queue
+ **********************************************************************/
+
+ if (vm_page_free_count + local_freed >= vm_page_free_target) {
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+ /*
+ * make sure the pageout I/O threads are running
+ * throttled in case there are still requests
+ * in the laundry... since we have met our targets
+ * we don't need the laundry to be cleaned in a timely
+ * fashion... so let's avoid interfering with foreground
+ * activity
+ */
+ vm_pageout_adjust_eq_iothrottle(eq, TRUE);
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if ((vm_page_free_count >= vm_page_free_target) &&
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+ /*
+ * done - we have met our target *and*
+ * there is no one waiting for a page.
+ */
+return_from_scan:
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
+ vm_pageout_state.vm_pageout_inactive,
+ vm_pageout_state.vm_pageout_inactive_used, 0, 0);
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
+ vm_pageout_vminfo.vm_pageout_freed_speculative,
+ vm_pageout_state.vm_pageout_inactive_clean,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
+
+ return;
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+
+ /*
+ * Before anything, we check if we have any ripe volatile
+ * objects around. If so, try to purge the first object.
+ * If the purge fails, fall through to reclaim a page instead.
+ * If the purge succeeds, go back to the top and reevalute
+ * the new memory situation.
+ */
+ retval = vps_purge_object();
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ /*
+ * Success
+ */
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+
+ lock_yield_check = FALSE;
+ continue;
+ }
+
+ /*
+ * If our 'aged' queue is empty and we have some speculative pages
+ * in the other queues, let's go through and see if we need to age
+ * them.
+ *
+ * If we succeeded in aging a speculative Q or just that everything
+ * looks normal w.r.t queue age and queue counts, we keep going onward.
+ *
+ * If, for some reason, we seem to have a mismatch between the spec.
+ * page count and the page queues, we reset those variables and
+ * restart the loop (LD TODO: Track this better?).
+ */
+ if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
+ retval = vps_age_speculative_queue(force_speculative_aging);
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ lock_yield_check = FALSE;
+ continue;
+ }
+ }
+ force_speculative_aging = FALSE;