+ if (can_steal == TRUE) {
+ vm_page_speculate_ageit(aq);
+ }
+
+ return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it evicts a single VM object from the cache.
+ */
+static int inline
+vps_object_cache_evict(vm_object_t *object_to_unlock)
+{
+ static int cache_evict_throttle = 0;
+ struct vm_speculative_age_q *sq;
+
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) {
+ int pages_evicted;
+
+ if (*object_to_unlock != NULL) {
+ vm_object_unlock(*object_to_unlock);
+ *object_to_unlock = NULL;
+ }
+ KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ pages_evicted = vm_object_cache_evict(100, 10);
+
+ KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0);
+
+ if (pages_evicted) {
+ vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted;
+
+ VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
+ vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0);
+ memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
+
+ /*
+ * we just freed up to 100 pages,
+ * so go back to the top of the main loop
+ * and re-evaulate the memory situation
+ */
+ return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+ } else {
+ cache_evict_throttle = 1000;
+ }
+ }
+ if (cache_evict_throttle) {
+ cache_evict_throttle--;
+ }
+
+ return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it calculates the filecache min. that needs to be maintained
+ * as we start to steal pages.
+ */
+static void
+vps_calculate_filecache_min(void)
+{
+ int divisor = vm_pageout_state.vm_page_filecache_min_divisor;
+
+#if CONFIG_JETSAM
+ /*
+ * don't let the filecache_min fall below 15% of available memory
+ * on systems with an active compressor that isn't nearing its
+ * limits w/r to accepting new data
+ *
+ * on systems w/o the compressor/swapper, the filecache is always
+ * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
+ * since most (if not all) of the anonymous pages are in the
+ * throttled queue (which isn't counted as available) which
+ * effectively disables this filter
+ */
+ if (vm_compressor_low_on_space() || divisor == 0) {
+ vm_pageout_state.vm_page_filecache_min = 0;
+ } else {
+ vm_pageout_state.vm_page_filecache_min =
+ ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
+ }
+#else
+ if (vm_compressor_out_of_space() || divisor == 0) {
+ vm_pageout_state.vm_page_filecache_min = 0;
+ } else {
+ /*
+ * don't let the filecache_min fall below the specified critical level
+ */
+ vm_pageout_state.vm_page_filecache_min =
+ ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor;
+ }
+#endif
+ if (vm_page_free_count < (vm_page_free_reserved / 4)) {
+ vm_pageout_state.vm_page_filecache_min = 0;
+ }
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it updates the flow control time to detect if VM pageoutscan
+ * isn't making progress.
+ */
+static void
+vps_flow_control_reset_deadlock_timer(struct flow_control *flow_control)
+{
+ mach_timespec_t ts;
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+
+ ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000;
+ ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
+ clock_get_system_nanotime(&sec, &nsec);
+ flow_control->ts.tv_sec = (unsigned int) sec;
+ flow_control->ts.tv_nsec = nsec;
+ ADD_MACH_TIMESPEC(&flow_control->ts, &ts);
+
+ flow_control->state = FCS_DELAYED;
+
+ vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_internal++;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it is the flow control logic of VM pageout scan which
+ * controls if it should block and for how long.
+ * Any blocking of vm_pageout_scan happens ONLY in this function.
+ */
+static int
+vps_flow_control(struct flow_control *flow_control, int *anons_grabbed, vm_object_t *object, int *delayed_unlock,
+ vm_page_t *local_freeq, int *local_freed, int *vm_pageout_deadlock_target, unsigned int inactive_burst_count)
+{
+ boolean_t exceeded_burst_throttle = FALSE;
+ unsigned int msecs = 0;
+ uint32_t inactive_external_count;
+ mach_timespec_t ts;
+ struct vm_pageout_queue *iq;
+ struct vm_pageout_queue *eq;
+ struct vm_speculative_age_q *sq;
+
+ iq = &vm_pageout_queue_internal;
+ eq = &vm_pageout_queue_external;
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+ /*
+ * Sometimes we have to pause:
+ * 1) No inactive pages - nothing to do.
+ * 2) Loop control - no acceptable pages found on the inactive queue
+ * within the last vm_pageout_burst_inactive_throttle iterations
+ * 3) Flow control - default pageout queue is full
+ */
+ if (vm_page_queue_empty(&vm_page_queue_inactive) &&
+ vm_page_queue_empty(&vm_page_queue_anonymous) &&
+ vm_page_queue_empty(&vm_page_queue_cleaned) &&
+ vm_page_queue_empty(&sq->age_q)) {
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1);
+ msecs = vm_pageout_state.vm_pageout_empty_wait;
+ } else if (inactive_burst_count >=
+ MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle,
+ (vm_page_inactive_count +
+ vm_page_speculative_count))) {
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1);
+ msecs = vm_pageout_state.vm_pageout_burst_wait;
+
+ exceeded_burst_throttle = TRUE;
+ } else if (VM_PAGE_Q_THROTTLED(iq) &&
+ VM_DYNAMIC_PAGING_ENABLED()) {
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+
+ switch (flow_control->state) {
+ case FCS_IDLE:
+ if ((vm_page_free_count + *local_freed) < vm_page_free_target &&
+ vm_pageout_state.vm_restricted_to_single_processor == FALSE) {
+ /*
+ * since the compressor is running independently of vm_pageout_scan
+ * let's not wait for it just yet... as long as we have a healthy supply
+ * of filecache pages to work with, let's keep stealing those.
+ */
+ inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
+
+ if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min &&
+ (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
+ *anons_grabbed = ANONS_GRABBED_LIMIT;
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1);
+ return VM_PAGEOUT_SCAN_PROCEED;
+ }
+ }
+
+ vps_flow_control_reset_deadlock_timer(flow_control);
+ msecs = vm_pageout_state.vm_pageout_deadlock_wait;
+
+ break;
+
+ case FCS_DELAYED:
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &flow_control->ts) >= 0) {
+ /*
+ * the pageout thread for the default pager is potentially
+ * deadlocked since the
+ * default pager queue has been throttled for more than the
+ * allowable time... we need to move some clean pages or dirty
+ * pages belonging to the external pagers if they aren't throttled
+ * vm_page_free_wanted represents the number of threads currently
+ * blocked waiting for pages... we'll move one page for each of
+ * these plus a fixed amount to break the logjam... once we're done
+ * moving this number of pages, we'll re-enter the FSC_DELAYED state
+ * with a new timeout target since we have no way of knowing
+ * whether we've broken the deadlock except through observation
+ * of the queue associated with the default pager... we need to
+ * stop moving pages and allow the system to run to see what
+ * state it settles into.
+ */
+
+ *vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief +
+ vm_page_free_wanted + vm_page_free_wanted_privileged;
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1);
+ flow_control->state = FCS_DEADLOCK_DETECTED;
+ thread_wakeup((event_t) &vm_pageout_garbage_collect);
+ return VM_PAGEOUT_SCAN_PROCEED;
+ }
+ /*
+ * just resniff instead of trying
+ * to compute a new delay time... we're going to be
+ * awakened immediately upon a laundry completion,
+ * so we won't wait any longer than necessary
+ */
+ msecs = vm_pageout_state.vm_pageout_idle_wait;
+ break;
+
+ case FCS_DEADLOCK_DETECTED:
+ if (*vm_pageout_deadlock_target) {
+ return VM_PAGEOUT_SCAN_PROCEED;
+ }
+
+ vps_flow_control_reset_deadlock_timer(flow_control);
+ msecs = vm_pageout_state.vm_pageout_deadlock_wait;
+
+ break;
+ }
+ } else {
+ /*
+ * No need to pause...
+ */
+ return VM_PAGEOUT_SCAN_PROCEED;
+ }
+
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_pageout_prepare_to_block(object, delayed_unlock, local_freeq, local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+
+ if (vm_page_free_count >= vm_page_free_target) {
+ /*
+ * we're here because
+ * 1) someone else freed up some pages while we had
+ * the queues unlocked above
+ * and we've hit one of the 3 conditions that
+ * cause us to pause the pageout scan thread
+ *
+ * since we already have enough free pages,
+ * let's avoid stalling and return normally
+ *
+ * before we return, make sure the pageout I/O threads
+ * are running throttled in case there are still requests
+ * in the laundry... since we have enough free pages
+ * we don't need the laundry to be cleaned in a timely
+ * fashion... so let's avoid interfering with foreground
+ * activity
+ *
+ * we don't want to hold vm_page_queue_free_lock when
+ * calling vm_pageout_adjust_eq_iothrottle (since it
+ * may cause other locks to be taken), we do the intitial
+ * check outside of the lock. Once we take the lock,
+ * we recheck the condition since it may have changed.
+ * if it has, no problem, we will make the threads
+ * non-throttled before actually blocking
+ */
+ vm_pageout_adjust_eq_iothrottle(eq, TRUE);
+ }
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_free_count >= vm_page_free_target &&
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+ return VM_PAGEOUT_SCAN_DONE_RETURN;
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
+ /*
+ * we're most likely about to block due to one of
+ * the 3 conditions that cause vm_pageout_scan to
+ * not be able to make forward progress w/r
+ * to providing new pages to the free queue,
+ * so unthrottle the I/O threads in case we
+ * have laundry to be cleaned... it needs
+ * to be completed ASAP.
+ *
+ * even if we don't block, we want the io threads
+ * running unthrottled since the sum of free +
+ * clean pages is still under our free target
+ */
+ vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+ }
+ if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
+ /*
+ * if we get here we're below our free target and
+ * we're stalling due to a full laundry queue or
+ * we don't have any inactive pages other then
+ * those in the clean queue...
+ * however, we have pages on the clean queue that
+ * can be moved to the free queue, so let's not
+ * stall the pageout scan
+ */
+ flow_control->state = FCS_IDLE;
+ return VM_PAGEOUT_SCAN_PROCEED;
+ }
+ if (flow_control->state == FCS_DELAYED && !VM_PAGE_Q_THROTTLED(iq)) {
+ flow_control->state = FCS_IDLE;
+ return VM_PAGEOUT_SCAN_PROCEED;
+ }
+
+ VM_CHECK_MEMORYSTATUS;
+
+ if (flow_control->state != FCS_IDLE) {
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1);
+ }
+
+ iq->pgo_throttled = TRUE;
+ assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC);
+
+ counter(c_vm_pageout_scan_block++);
+
+ vm_page_unlock_queues();
+
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+ VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
+ iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+ memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
+ iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+ memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
+
+ vm_page_lock_queues();
+
+ iq->pgo_throttled = FALSE;
+
+ vps_init_page_targets();
+
+ return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will find and return the most appropriate page to be
+ * reclaimed.
+ */
+static int
+vps_choose_victim_page(vm_page_t *victim_page, int *anons_grabbed, boolean_t *grab_anonymous, boolean_t force_anonymous,
+ boolean_t *is_page_from_bg_q, unsigned int *reactivated_this_call)
+{
+ vm_page_t m = NULL;
+ vm_object_t m_object = VM_OBJECT_NULL;
+ uint32_t inactive_external_count;
+ struct vm_speculative_age_q *sq;
+ struct vm_pageout_queue *iq;
+ int retval = VM_PAGEOUT_SCAN_PROCEED;
+
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+ iq = &vm_pageout_queue_internal;
+
+ *is_page_from_bg_q = FALSE;
+
+ m = NULL;
+ m_object = VM_OBJECT_NULL;
+
+ if (VM_DYNAMIC_PAGING_ENABLED()) {
+ assert(vm_page_throttled_count == 0);
+ assert(vm_page_queue_empty(&vm_page_queue_throttled));
+ }
+
+ /*
+ * Try for a clean-queue inactive page.
+ * These are pages that vm_pageout_scan tried to steal earlier, but
+ * were dirty and had to be cleaned. Pick them up now that they are clean.
+ */
+ if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ goto found_page;
+ }
+
+ /*
+ * The next most eligible pages are ones we paged in speculatively,
+ * but which have not yet been touched and have been aged out.
+ */
+ if (!vm_page_queue_empty(&sq->age_q)) {
+ m = (vm_page_t) vm_page_queue_first(&sq->age_q);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ if (!m->vmp_dirty || force_anonymous == FALSE) {
+ goto found_page;
+ } else {
+ m = NULL;
+ }
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
+ vm_object_t bg_m_object = NULL;
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
+
+ bg_m_object = VM_PAGE_OBJECT(m);
+
+ if (!VM_PAGE_PAGEABLE(m)) {
+ /*
+ * This page is on the background queue
+ * but not on a pageable queue. This is
+ * likely a transient state and whoever
+ * took it out of its pageable queue
+ * will likely put it back on a pageable
+ * queue soon but we can't deal with it
+ * at this point, so let's ignore this
+ * page.
+ */
+ } else if (force_anonymous == FALSE || bg_m_object->internal) {
+ if (bg_m_object->internal &&
+ (VM_PAGE_Q_THROTTLED(iq) ||
+ vm_compressor_out_of_space() == TRUE ||
+ vm_page_free_count < (vm_page_free_reserved / 4))) {
+ vm_pageout_skipped_bq_internal++;
+ } else {
+ *is_page_from_bg_q = TRUE;
+
+ if (bg_m_object->internal) {
+ vm_pageout_vminfo.vm_pageout_considered_bq_internal++;
+ } else {
+ vm_pageout_vminfo.vm_pageout_considered_bq_external++;
+ }
+ goto found_page;
+ }
+ }
+ }
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+ inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
+
+ if ((vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min || force_anonymous == TRUE) ||
+ (inactive_external_count < VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) {
+ *grab_anonymous = TRUE;
+ *anons_grabbed = 0;
+
+ vm_pageout_vminfo.vm_pageout_skipped_external++;
+ goto want_anonymous;
+ }
+ *grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
+
+#if CONFIG_JETSAM
+ /* If the file-backed pool has accumulated
+ * significantly more pages than the jetsam
+ * threshold, prefer to reclaim those
+ * inline to minimise compute overhead of reclaiming
+ * anonymous pages.
+ * This calculation does not account for the CPU local
+ * external page queues, as those are expected to be
+ * much smaller relative to the global pools.
+ */
+
+ struct vm_pageout_queue *eq = &vm_pageout_queue_external;
+
+ if (*grab_anonymous == TRUE && !VM_PAGE_Q_THROTTLED(eq)) {
+ if (vm_page_pageable_external_count >
+ vm_pageout_state.vm_page_filecache_min) {
+ if ((vm_page_pageable_external_count *
+ vm_pageout_memorystatus_fb_factor_dr) >
+ (memorystatus_available_pages_critical *
+ vm_pageout_memorystatus_fb_factor_nr)) {
+ *grab_anonymous = FALSE;
+
+ VM_PAGEOUT_DEBUG(vm_grab_anon_overrides, 1);
+ }
+ }
+ if (*grab_anonymous) {
+ VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1);
+ }
+ }
+#endif /* CONFIG_JETSAM */
+
+want_anonymous:
+ if (*grab_anonymous == FALSE || *anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
+ if (!vm_page_queue_empty(&vm_page_queue_inactive)) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+ *anons_grabbed = 0;
+
+ if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) {
+ if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+ if ((++(*reactivated_this_call) % 100)) {
+ vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++;
+
+ vm_page_activate(m);
+ VM_STAT_INCR(reactivations);
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+ if (*is_page_from_bg_q == TRUE) {
+ if (m_object->internal) {
+ vm_pageout_rejected_bq_internal++;
+ } else {
+ vm_pageout_rejected_bq_external++;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ vm_pageout_state.vm_pageout_inactive_used++;
+
+ m = NULL;
+ retval = VM_PAGEOUT_SCAN_NEXT_ITERATION;
+
+ goto found_page;
+ }
+
+ /*
+ * steal 1 of the file backed pages even if
+ * we are under the limit that has been set
+ * for a healthy filecache
+ */
+ }
+ }
+ goto found_page;
+ }
+ }
+ if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+ *anons_grabbed += 1;
+
+ goto found_page;
+ }
+
+ m = NULL;
+
+found_page:
+ *victim_page = m;
+
+ return retval;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will put a page back on the active/inactive queue
+ * if we can't reclaim it for some reason.
+ */
+static void
+vps_requeue_page(vm_page_t m, int page_prev_q_state, __unused boolean_t page_from_bg_q)
+{
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+ vm_page_enqueue_inactive(m, FALSE);
+ } else {
+ vm_page_activate(m);
+ }
+
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+ vm_object_t m_object = VM_PAGE_OBJECT(m);
+
+ if (page_from_bg_q == TRUE) {
+ if (m_object->internal) {
+ vm_pageout_rejected_bq_internal++;
+ } else {
+ vm_pageout_rejected_bq_external++;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it will try to grab the victim page's VM object (m_object)
+ * which differs from the previous victim page's object (object).
+ */
+static int
+vps_switch_object(vm_page_t m, vm_object_t m_object, vm_object_t *object, int page_prev_q_state, boolean_t avoid_anon_pages, boolean_t page_from_bg_q)
+{
+ struct vm_speculative_age_q *sq;
+
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (*object != NULL) {
+ vm_object_unlock(*object);
+ *object = NULL;
+ }
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run... otherwise, we're likely to trip over this
+ * object in the same state as we work our way through
+ * the queue... clumps of pages associated with the same
+ * object are fairly typical on the inactive and active queues
+ */
+ if (!vm_object_lock_try_scan(m_object)) {
+ vm_page_t m_want = NULL;
+
+ vm_pageout_vminfo.vm_pageout_inactive_nolock++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1);
+ }
+
+ pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
+
+ m->vmp_reference = FALSE;
+
+ if (!m_object->object_is_shared_cache) {
+ /*
+ * don't apply this optimization if this is the shared cache
+ * object, it's too easy to get rid of very hot and important
+ * pages...
+ * m->vmp_object must be stable since we hold the page queues lock...
+ * we can update the scan_collisions field sans the object lock
+ * since it is a separate field and this is the only spot that does
+ * a read-modify-write operation and it is never executed concurrently...
+ * we can asynchronously set this field to 0 when creating a UPL, so it
+ * is possible for the value to be a bit non-determistic, but that's ok
+ * since it's only used as a hint
+ */
+ m_object->scan_collisions = 1;
+ }
+ if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ } else if (!vm_page_queue_empty(&sq->age_q)) {
+ m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
+ } else if ((avoid_anon_pages || vm_page_queue_empty(&vm_page_queue_anonymous)) &&
+ !vm_page_queue_empty(&vm_page_queue_inactive)) {
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) {
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+ }
+
+ /*
+ * this is the next object we're going to be interested in
+ * try to make sure its available after the mutex_pause
+ * returns control
+ */
+ if (m_want) {
+ vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
+ }
+
+ vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
+
+ return VM_PAGEOUT_SCAN_NEXT_ITERATION;
+ } else {
+ *object = m_object;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+
+ return VM_PAGEOUT_SCAN_PROCEED;
+}
+
+/*
+ * This function is called only from vm_pageout_scan and
+ * it notices that pageout scan may be rendered ineffective
+ * due to a FS deadlock and will jetsam a process if possible.
+ * If jetsam isn't supported, it'll move the page to the active
+ * queue to try and get some different pages pushed onwards so
+ * we can try to get out of this scenario.
+ */
+static void
+vps_deal_with_throttled_queues(vm_page_t m, vm_object_t *object, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit,
+ int *delayed_unlock, boolean_t *force_anonymous, __unused boolean_t is_page_from_bg_q)
+{
+ struct vm_pageout_queue *eq;
+ vm_object_t cur_object = VM_OBJECT_NULL;
+
+ cur_object = *object;
+
+ eq = &vm_pageout_queue_external;
+
+ if (cur_object->internal == FALSE) {
+ /*
+ * we need to break up the following potential deadlock case...
+ * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
+ * b) The thread doing the writing is waiting for pages while holding the truncate lock
+ * c) Most of the pages in the inactive queue belong to this file.
+ *
+ * we are potentially in this deadlock because...
+ * a) the external pageout queue is throttled
+ * b) we're done with the active queue and moved on to the inactive queue
+ * c) we've got a dirty external page
+ *
+ * since we don't know the reason for the external pageout queue being throttled we
+ * must suspect that we are deadlocked, so move the current page onto the active queue
+ * in an effort to cause a page from the active queue to 'age' to the inactive queue
+ *
+ * if we don't have jetsam configured (i.e. we have a dynamic pager), set
+ * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
+ * pool the next time we select a victim page... if we can make enough new free pages,
+ * the deadlock will break, the external pageout queue will empty and it will no longer
+ * be throttled
+ *
+ * if we have jetsam configured, keep a count of the pages reactivated this way so
+ * that we can try to find clean pages in the active/inactive queues before
+ * deciding to jetsam a process
+ */
+ vm_pageout_vminfo.vm_pageout_scan_inactive_throttled_external++;
+
+ vm_page_check_pageable_safe(m);
+ assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq);
+ m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
+ vm_page_active_count++;
+ vm_page_pageable_external_count++;
+
+ vm_pageout_adjust_eq_iothrottle(eq, FALSE);
+
+#if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
+
+#pragma unused(force_anonymous)
+
+ *vm_pageout_inactive_external_forced_reactivate_limit -= 1;
+
+ if (*vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
+ *vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+ /*
+ * Possible deadlock scenario so request jetsam action
+ */
+
+ assert(cur_object);
+ vm_object_unlock(cur_object);
+
+ cur_object = VM_OBJECT_NULL;
+
+ /*
+ * VM pageout scan needs to know we have dropped this lock and so set the
+ * object variable we got passed in to NULL.
+ */
+ *object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
+ if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) {
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1);
+ }
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ vm_page_lock_queues();
+ *delayed_unlock = 1;
+ }
+#else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+
+#pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
+#pragma unused(delayed_unlock)
+
+ *force_anonymous = TRUE;
+#endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+ } else {
+ vm_page_activate(m);
+ VM_STAT_INCR(reactivations);
+
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+ if (is_page_from_bg_q == TRUE) {
+ if (cur_object->internal) {
+ vm_pageout_rejected_bq_internal++;
+ } else {
+ vm_pageout_rejected_bq_external++;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+ vm_pageout_state.vm_pageout_inactive_used++;
+ }
+}
+
+
+void
+vm_page_balance_inactive(int max_to_move)
+{
+ vm_page_t m;
+
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ if (hibernation_vmqueues_inspection || hibernate_cleaning_in_progress) {
+ /*
+ * It is likely that the hibernation code path is
+ * dealing with these very queues as we are about
+ * to move pages around in/from them and completely
+ * change the linkage of the pages.
+ *
+ * And so we skip the rebalancing of these queues.
+ */
+ return;
+ }
+ vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+ vm_page_inactive_count +
+ vm_page_speculative_count);
+
+ while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) {
+ VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1);
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
+ assert(!m->vmp_laundry);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+ /*
+ * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+ *
+ * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+ * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+ * new reference happens. If no futher references happen on the page after that remote TLB flushes
+ * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+ * by pageout_scan, which is just fine since the last reference would have happened quite far
+ * in the past (TLB caches don't hang around for very long), and of course could just as easily
+ * have happened before we moved the page
+ */
+ if (m->vmp_pmapped == TRUE) {
+ pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+ }
+
+ /*
+ * The page might be absent or busy,
+ * but vm_page_deactivate can handle that.
+ * FALSE indicates that we don't want a H/W clear reference
+ */
+ vm_page_deactivate_internal(m, FALSE);
+ }
+}
+
+
+/*
+ * vm_pageout_scan does the dirty work for the pageout daemon.
+ * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
+ * held and vm_page_free_wanted == 0.
+ */
+void
+vm_pageout_scan(void)
+{
+ unsigned int loop_count = 0;
+ unsigned int inactive_burst_count = 0;
+ unsigned int reactivated_this_call;
+ unsigned int reactivate_limit;
+ vm_page_t local_freeq = NULL;
+ int local_freed = 0;
+ int delayed_unlock;
+ int delayed_unlock_limit = 0;
+ int refmod_state = 0;
+ int vm_pageout_deadlock_target = 0;
+ struct vm_pageout_queue *iq;
+ struct vm_pageout_queue *eq;
+ struct vm_speculative_age_q *sq;
+ struct flow_control flow_control = { .state = 0, .ts = { .tv_sec = 0, .tv_nsec = 0 } };
+ boolean_t inactive_throttled = FALSE;
+ vm_object_t object = NULL;
+ uint32_t inactive_reclaim_run;
+ boolean_t grab_anonymous = FALSE;
+ boolean_t force_anonymous = FALSE;
+ boolean_t force_speculative_aging = FALSE;
+ int anons_grabbed = 0;
+ int page_prev_q_state = 0;
+ boolean_t page_from_bg_q = FALSE;
+ uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0;
+ vm_object_t m_object = VM_OBJECT_NULL;
+ int retval = 0;
+ boolean_t lock_yield_check = FALSE;
+
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START,
+ vm_pageout_vminfo.vm_pageout_freed_speculative,
+ vm_pageout_state.vm_pageout_inactive_clean,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
+
+ flow_control.state = FCS_IDLE;
+ iq = &vm_pageout_queue_internal;
+ eq = &vm_pageout_queue_external;
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
+ /* Ask the pmap layer to return any pages it no longer needs. */
+ uint64_t pmap_wired_pages_freed = pmap_release_pages_fast();
+
+ vm_page_lock_queues();
+
+ vm_page_wire_count -= pmap_wired_pages_freed;
+
+ delayed_unlock = 1;
+
+ /*
+ * Calculate the max number of referenced pages on the inactive
+ * queue that we will reactivate.
+ */
+ reactivated_this_call = 0;
+ reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
+ vm_page_inactive_count);
+ inactive_reclaim_run = 0;
+
+ vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+
+ /*
+ * We must limit the rate at which we send pages to the pagers
+ * so that we don't tie up too many pages in the I/O queues.
+ * We implement a throttling mechanism using the laundry count
+ * to limit the number of pages outstanding to the default
+ * and external pagers. We can bypass the throttles and look
+ * for clean pages if the pageout queues don't drain in a timely
+ * fashion since this may indicate that the pageout paths are
+ * stalled waiting for memory, which only we can provide.
+ */
+
+ vps_init_page_targets();
+ assert(object == NULL);
+ assert(delayed_unlock != 0);
+
+ for (;;) {
+ vm_page_t m;
+
+ DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
+
+ if (lock_yield_check) {
+ lock_yield_check = FALSE;
+
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ int freed = local_freed;
+
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+ if (freed == 0) {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ } else if (vm_pageout_scan_wants_object) {
+ vm_page_unlock_queues();
+ mutex_pause(0);
+ vm_page_lock_queues();
+ }
+ }
+
+ if (vm_upl_wait_for_pages < 0) {
+ vm_upl_wait_for_pages = 0;
+ }
+
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
+
+ if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) {
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
+ }
+
+ vps_deal_with_secluded_page_overflow(&local_freeq, &local_freed);
+
+ assert(delayed_unlock);
+
+ /*
+ * maintain our balance
+ */
+ vm_page_balance_inactive(1);
+
+
+ /**********************************************************************
+ * above this point we're playing with the active and secluded queues
+ * below this point we're playing with the throttling mechanisms
+ * and the inactive queue
+ **********************************************************************/
+
+ if (vm_page_free_count + local_freed >= vm_page_free_target) {
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed,
+ VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER);
+ /*
+ * make sure the pageout I/O threads are running
+ * throttled in case there are still requests
+ * in the laundry... since we have met our targets
+ * we don't need the laundry to be cleaned in a timely
+ * fashion... so let's avoid interfering with foreground
+ * activity
+ */
+ vm_pageout_adjust_eq_iothrottle(eq, TRUE);
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if ((vm_page_free_count >= vm_page_free_target) &&
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+ /*
+ * done - we have met our target *and*
+ * there is no one waiting for a page.
+ */
+return_from_scan:
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
+ vm_pageout_state.vm_pageout_inactive,
+ vm_pageout_state.vm_pageout_inactive_used, 0, 0);
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
+ vm_pageout_vminfo.vm_pageout_freed_speculative,
+ vm_pageout_state.vm_pageout_inactive_clean,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_internal,
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_external);
+
+ return;
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+
+ /*
+ * Before anything, we check if we have any ripe volatile
+ * objects around. If so, try to purge the first object.
+ * If the purge fails, fall through to reclaim a page instead.
+ * If the purge succeeds, go back to the top and reevalute
+ * the new memory situation.
+ */
+ retval = vps_purge_object();
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ /*
+ * Success
+ */
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+
+ lock_yield_check = FALSE;
+ continue;
+ }
+
+ /*
+ * If our 'aged' queue is empty and we have some speculative pages
+ * in the other queues, let's go through and see if we need to age
+ * them.
+ *
+ * If we succeeded in aging a speculative Q or just that everything
+ * looks normal w.r.t queue age and queue counts, we keep going onward.
+ *
+ * If, for some reason, we seem to have a mismatch between the spec.
+ * page count and the page queues, we reset those variables and
+ * restart the loop (LD TODO: Track this better?).
+ */
+ if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
+ retval = vps_age_speculative_queue(force_speculative_aging);
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ lock_yield_check = FALSE;
+ continue;
+ }
+ }
+ force_speculative_aging = FALSE;
+
+ /*
+ * Check to see if we need to evict objects from the cache.
+ *
+ * Note: 'object' here doesn't have anything to do with
+ * the eviction part. We just need to make sure we have dropped
+ * any object lock we might be holding if we need to go down
+ * into the eviction logic.
+ */
+ retval = vps_object_cache_evict(&object);
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ lock_yield_check = FALSE;
+ continue;
+ }
+
+
+ /*
+ * Calculate our filecache_min that will affect the loop
+ * going forward.
+ */
+ vps_calculate_filecache_min();
+
+ /*
+ * LD TODO: Use a structure to hold all state variables for a single
+ * vm_pageout_scan iteration and pass that structure to this function instead.
+ */
+ retval = vps_flow_control(&flow_control, &anons_grabbed, &object,
+ &delayed_unlock, &local_freeq, &local_freed,
+ &vm_pageout_deadlock_target, inactive_burst_count);
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ if (loop_count >= vm_page_inactive_count) {
+ loop_count = 0;
+ }
+
+ inactive_burst_count = 0;
+
+ assert(object == NULL);
+ assert(delayed_unlock != 0);
+
+ lock_yield_check = FALSE;
+ continue;
+ } else if (retval == VM_PAGEOUT_SCAN_DONE_RETURN) {
+ goto return_from_scan;
+ }
+
+ flow_control.state = FCS_IDLE;
+
+ vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
+ vm_pageout_inactive_external_forced_reactivate_limit);
+ loop_count++;
+ inactive_burst_count++;
+ vm_pageout_state.vm_pageout_inactive++;
+
+ /*
+ * Choose a victim.
+ */
+
+ m = NULL;
+ retval = vps_choose_victim_page(&m, &anons_grabbed, &grab_anonymous, force_anonymous, &page_from_bg_q, &reactivated_this_call);
+
+ if (m == NULL) {
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ inactive_burst_count = 0;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+ }
+
+ lock_yield_check = TRUE;
+ continue;
+ }
+
+ /*
+ * if we've gotten here, we have no victim page.
+ * check to see if we've not finished balancing the queues
+ * or we have a page on the aged speculative queue that we
+ * skipped due to force_anonymous == TRUE.. or we have
+ * speculative pages that we can prematurely age... if
+ * one of these cases we'll keep going, else panic
+ */
+ force_anonymous = FALSE;
+ VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1);
+
+ if (!vm_page_queue_empty(&sq->age_q)) {
+ lock_yield_check = TRUE;
+ continue;
+ }
+
+ if (vm_page_speculative_count) {
+ force_speculative_aging = TRUE;
+ lock_yield_check = TRUE;
+ continue;
+ }
+ panic("vm_pageout: no victim");
+
+ /* NOTREACHED */
+ }
+
+ assert(VM_PAGE_PAGEABLE(m));
+ m_object = VM_PAGE_OBJECT(m);
+ force_anonymous = FALSE;
+
+ page_prev_q_state = m->vmp_q_state;
+ /*
+ * we just found this page on one of our queues...
+ * it can't also be on the pageout queue, so safe
+ * to call vm_page_queues_remove
+ */
+ vm_page_queues_remove(m, TRUE);
+
+ assert(!m->vmp_laundry);
+ assert(!m->vmp_private);
+ assert(!m->vmp_fictitious);
+ assert(m_object != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+ vm_pageout_vminfo.vm_pageout_considered_page++;
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != object) {
+ boolean_t avoid_anon_pages = (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT);
+
+ /*
+ * vps_switch_object() will always drop the 'object' lock first
+ * and then try to acquire the 'm_object' lock. So 'object' has to point to
+ * either 'm_object' or NULL.
+ */
+ retval = vps_switch_object(m, m_object, &object, page_prev_q_state, avoid_anon_pages, page_from_bg_q);
+
+ if (retval == VM_PAGEOUT_SCAN_NEXT_ITERATION) {
+ lock_yield_check = TRUE;
+ continue;
+ }
+ }
+ assert(m_object == object);
+ assert(VM_PAGE_OBJECT(m) == m_object);
+
+ if (m->vmp_busy) {
+ /*
+ * Somebody is already playing with this page.
+ * Put it back on the appropriate queue
+ *
+ */
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1);
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1);
+ }
+
+ vps_requeue_page(m, page_prev_q_state, page_from_bg_q);
+
+ lock_yield_check = TRUE;
+ continue;
+ }
+
+ /*
+ * if (m->vmp_cleaning && !m->vmp_free_when_done)
+ * If already cleaning this page in place
+ * just leave if off the paging queues.
+ * We can leave the page mapped, and upl_commit_range
+ * will put it on the clean queue.
+ *
+ * if (m->vmp_free_when_done && !m->vmp_cleaning)
+ * an msync INVALIDATE is in progress...
+ * this page has been marked for destruction
+ * after it has been cleaned,
+ * but not yet gathered into a UPL
+ * where 'cleaning' will be set...
+ * just leave it off the paging queues
+ *
+ * if (m->vmp_free_when_done && m->vmp_clenaing)
+ * an msync INVALIDATE is in progress
+ * and the UPL has already gathered this page...
+ * just leave it off the paging queues
+ */
+ if (m->vmp_free_when_done || m->vmp_cleaning) {
+ lock_yield_check = TRUE;
+ continue;
+ }
+
+
+ /*
+ * If it's absent, in error or the object is no longer alive,
+ * we can reclaim the page... in the no longer alive case,
+ * there are 2 states the page can be in that preclude us
+ * from reclaiming it - busy or cleaning - that we've already
+ * dealt with
+ */
+ if (m->vmp_absent || m->vmp_error || !object->alive) {
+ if (m->vmp_absent) {
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1);
+ } else if (!object->alive) {
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1);
+ } else {
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1);
+ }
+reclaim_page:
+ if (vm_pageout_deadlock_target) {
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1);
+ vm_pageout_deadlock_target--;
+ }
+
+ DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+
+ if (object->internal) {
+ DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
+ } else {
+ DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+ }
+ assert(!m->vmp_cleaning);
+ assert(!m->vmp_laundry);
+
+ if (!object->internal &&
+ object->pager != NULL &&
+ object->pager->mo_pager_ops == &shared_region_pager_ops) {
+ shared_region_pager_reclaimed++;
+ }
+
+ m->vmp_busy = TRUE;
+
+ /*
+ * remove page from object here since we're already
+ * behind the object lock... defer the rest of the work
+ * we'd normally do in vm_page_free_prepare_object
+ * until 'vm_page_free_list' is called
+ */
+ if (m->vmp_tabled) {
+ vm_page_remove(m, TRUE);
+ }
+
+ assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
+ m->vmp_snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+ vm_pageout_vminfo.vm_pageout_freed_speculative++;
+ } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ vm_pageout_vminfo.vm_pageout_freed_cleaned++;
+ } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) {
+ vm_pageout_vminfo.vm_pageout_freed_internal++;
+ } else {
+ vm_pageout_vminfo.vm_pageout_freed_external++;
+ }
+
+ inactive_burst_count = 0;
+
+ lock_yield_check = TRUE;
+ continue;
+ }
+ if (object->copy == VM_OBJECT_NULL) {
+ /*
+ * No one else can have any interest in this page.
+ * If this is an empty purgable object, the page can be
+ * reclaimed even if dirty.
+ * If the page belongs to a volatile purgable object, we
+ * reactivate it if the compressor isn't active.
+ */
+ if (object->purgable == VM_PURGABLE_EMPTY) {
+ if (m->vmp_pmapped == TRUE) {
+ /* unmap the page */
+ refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ if (m->vmp_dirty || m->vmp_precious) {
+ /* we saved the cost of cleaning this page ! */
+ vm_page_purged_count++;
+ }
+ goto reclaim_page;
+ }
+
+ if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+ /*
+ * With the VM compressor, the cost of
+ * reclaiming a page is much lower (no I/O),
+ * so if we find a "volatile" page, it's better
+ * to let it get compressed rather than letting
+ * it occupy a full page until it gets purged.
+ * So no need to check for "volatile" here.
+ */
+ } else if (object->purgable == VM_PURGABLE_VOLATILE) {
+ /*
+ * Avoid cleaning a "volatile" page which might
+ * be purged soon.
+ */
+
+ /* if it's wired, we can't put it on our queue */
+ assert(!VM_PAGE_WIRED(m));
+
+ /* just stick it back on! */
+ reactivated_this_call++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1);
+ }
+
+ goto reactivate_page;
+ }
+ }
+ /*
+ * If it's being used, reactivate.
+ * (Fictitious pages are either busy or absent.)
+ * First, update the reference and dirty bits
+ * to make sure the page is unreferenced.
+ */
+ refmod_state = -1;
+
+ if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+
+ if (refmod_state & VM_MEM_REFERENCED) {
+ m->vmp_reference = TRUE;
+ }
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+
+ if (m->vmp_reference || m->vmp_dirty) {
+ /* deal with a rogue "reusable" page */
+ VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
+ }
+
+ if (vm_pageout_state.vm_page_xpmapped_min_divisor == 0) {
+ vm_pageout_state.vm_page_xpmapped_min = 0;
+ } else {
+ vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / vm_pageout_state.vm_page_xpmapped_min_divisor;
+ }
+
+ if (!m->vmp_no_cache &&
+ page_from_bg_q == FALSE &&
+ (m->vmp_reference || (m->vmp_xpmapped && !object->internal &&
+ (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) {
+ /*
+ * The page we pulled off the inactive list has
+ * been referenced. It is possible for other
+ * processors to be touching pages faster than we
+ * can clear the referenced bit and traverse the
+ * inactive queue, so we limit the number of
+ * reactivations.
+ */
+ if (++reactivated_this_call >= reactivate_limit) {
+ vm_pageout_vminfo.vm_pageout_reactivation_limit_exceeded++;
+ } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
+ vm_pageout_vminfo.vm_pageout_inactive_force_reclaim++;
+ } else {
+ uint32_t isinuse;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1);
+ }
+
+ vm_pageout_vminfo.vm_pageout_inactive_referenced++;
+reactivate_page:
+ if (!object->internal && object->pager != MEMORY_OBJECT_NULL &&
+ vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
+ /*
+ * no explict mappings of this object exist
+ * and it's not open via the filesystem
+ */
+ vm_page_deactivate(m);
+ VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated, 1);
+ } else {
+ /*
+ * The page was/is being used, so put back on active list.
+ */
+ vm_page_activate(m);
+ VM_STAT_INCR(reactivations);
+ inactive_burst_count = 0;
+ }
+#if CONFIG_BACKGROUND_QUEUE
+#if DEVELOPMENT || DEBUG
+ if (page_from_bg_q == TRUE) {
+ if (m_object->internal) {
+ vm_pageout_rejected_bq_internal++;
+ } else {
+ vm_pageout_rejected_bq_external++;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+ }
+ vm_pageout_state.vm_pageout_inactive_used++;
+
+ lock_yield_check = TRUE;
+ continue;
+ }
+ /*
+ * Make sure we call pmap_get_refmod() if it
+ * wasn't already called just above, to update
+ * the dirty bit.
+ */
+ if ((refmod_state == -1) && !m->vmp_dirty && m->vmp_pmapped) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ }
+
+ /*
+ * we've got a candidate page to steal...
+ *
+ * m->vmp_dirty is up to date courtesy of the
+ * preceding check for m->vmp_reference... if
+ * we get here, then m->vmp_reference had to be
+ * FALSE (or possibly "reactivate_limit" was
+ * exceeded), but in either case we called
+ * pmap_get_refmod() and updated both
+ * m->vmp_reference and m->vmp_dirty
+ *
+ * if it's dirty or precious we need to
+ * see if the target queue is throtttled
+ * it if is, we need to skip over it by moving it back
+ * to the end of the inactive queue
+ */
+
+ inactive_throttled = FALSE;
+
+ if (m->vmp_dirty || m->vmp_precious) {
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq)) {
+ inactive_throttled = TRUE;
+ }
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ inactive_throttled = TRUE;
+ }
+ }
+throttle_inactive:
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
+ object->internal && m->vmp_dirty &&
+ (object->purgable == VM_PURGABLE_DENY ||
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_check_pageable_safe(m);
+ assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
+ m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
+ vm_page_throttled_count++;
+
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled, 1);
+
+ inactive_burst_count = 0;
+
+ lock_yield_check = TRUE;
+ continue;
+ }
+ if (inactive_throttled == TRUE) {
+ vps_deal_with_throttled_queues(m, &object, &vm_pageout_inactive_external_forced_reactivate_limit,
+ &delayed_unlock, &force_anonymous, page_from_bg_q);
+
+ inactive_burst_count = 0;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
+ }
+
+ lock_yield_check = TRUE;
+ continue;
+ }
+
+ /*
+ * we've got a page that we can steal...
+ * eliminate all mappings and make sure
+ * we have the up-to-date modified state
+ *
+ * if we need to do a pmap_disconnect then we
+ * need to re-evaluate m->vmp_dirty since the pmap_disconnect
+ * provides the true state atomically... the
+ * page was still mapped up to the pmap_disconnect
+ * and may have been dirtied at the last microsecond
+ *
+ * Note that if 'pmapped' is FALSE then the page is not
+ * and has not been in any map, so there is no point calling
+ * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
+ * of likely usage of the page.
+ */
+ if (m->vmp_pmapped == TRUE) {
+ int pmap_options;
+
+ /*
+ * Don't count this page as going into the compressor
+ * if any of these are true:
+ * 1) compressed pager isn't enabled
+ * 2) Freezer enabled device with compressed pager
+ * backend (exclusive use) i.e. most of the VM system
+ * (including vm_pageout_scan) has no knowledge of
+ * the compressor
+ * 3) This page belongs to a file and hence will not be
+ * sent into the compressor
+ */
+ if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
+ object->internal == FALSE) {
+ pmap_options = 0;
+ } else if (m->vmp_dirty || m->vmp_precious) {
+ /*
+ * VM knows that this page is dirty (or
+ * precious) and needs to be compressed
+ * rather than freed.
+ * Tell the pmap layer to count this page
+ * as "compressed".
+ */
+ pmap_options = PMAP_OPTIONS_COMPRESSOR;
+ } else {
+ /*
+ * VM does not know if the page needs to
+ * be preserved but the pmap layer might tell
+ * us if any mapping has "modified" it.
+ * Let's the pmap layer to count this page
+ * as compressed if and only if it has been
+ * modified.
+ */
+ pmap_options =
+ PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+ }
+ refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
+ pmap_options,
+ NULL);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+
+ /*
+ * reset our count of pages that have been reclaimed
+ * since the last page was 'stolen'
+ */
+ inactive_reclaim_run = 0;
+
+ /*
+ * If it's clean and not precious, we can free the page.
+ */
+ if (!m->vmp_dirty && !m->vmp_precious) {
+ vm_pageout_state.vm_pageout_inactive_clean++;
+
+ /*
+ * OK, at this point we have found a page we are going to free.
+ */
+#if CONFIG_PHANTOM_CACHE
+ if (!object->internal) {
+ vm_phantom_cache_add_ghost(m);
+ }
+#endif
+ goto reclaim_page;
+ }
+
+ /*
+ * The page may have been dirtied since the last check
+ * for a throttled target queue (which may have been skipped
+ * if the page was clean then). With the dirty page
+ * disconnected here, we can make one final check.
+ */
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq)) {
+ inactive_throttled = TRUE;
+ }
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ inactive_throttled = TRUE;
+ }
+
+ if (inactive_throttled == TRUE) {
+ goto throttle_inactive;
+ }
+
+#if VM_PRESSURE_EVENTS
+#if CONFIG_JETSAM
+
+ /*
+ * If Jetsam is enabled, then the sending
+ * of memory pressure notifications is handled
+ * from the same thread that takes care of high-water
+ * and other jetsams i.e. the memorystatus_thread.
+ */
+
+#else /* CONFIG_JETSAM */
+
+ vm_pressure_response();
+
+#endif /* CONFIG_JETSAM */
+#endif /* VM_PRESSURE_EVENTS */
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
+ VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1);
+ }
+
+ if (object->internal) {
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++;
+ } else {
+ vm_pageout_vminfo.vm_pageout_inactive_dirty_external++;
+ }
+
+ /*
+ * internal pages will go to the compressor...
+ * external pages will go to the appropriate pager to be cleaned
+ * and upon completion will end up on 'vm_page_queue_cleaned' which
+ * is a preferred queue to steal from
+ */
+ vm_pageout_cluster(m);
+ inactive_burst_count = 0;
+
+ /*
+ * back to top of pageout scan loop
+ */
+ }
+}
+
+
+void
+vm_page_free_reserve(
+ int pages)
+{
+ int free_after_reserve;
+
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+ if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) {
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
+ } else {
+ vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
+ }
+ } else {
+ if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) {
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
+ } else {
+ vm_page_free_reserved += pages;
+ }
+ }
+ free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved;
+
+ vm_page_free_min = vm_page_free_reserved +
+ VM_PAGE_FREE_MIN(free_after_reserve);
+
+ if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) {
+ vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
+ }
+
+ vm_page_free_target = vm_page_free_reserved +
+ VM_PAGE_FREE_TARGET(free_after_reserve);
+
+ if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) {
+ vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
+ }
+
+ if (vm_page_free_target < vm_page_free_min + 5) {
+ vm_page_free_target = vm_page_free_min + 5;
+ }
+
+ vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
+}
+
+/*
+ * vm_pageout is the high level pageout daemon.
+ */
+
+void
+vm_pageout_continue(void)
+{
+ DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
+ VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter, 1);
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ vm_pageout_running = TRUE;
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ vm_pageout_scan();
+ /*
+ * we hold both the vm_page_queue_free_lock
+ * and the vm_page_queues_lock at this point
+ */
+ assert(vm_page_free_wanted == 0);
+ assert(vm_page_free_wanted_privileged == 0);
+ assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
+
+ vm_pageout_running = FALSE;
+#if !CONFIG_EMBEDDED
+ if (vm_pageout_waiter) {
+ vm_pageout_waiter = FALSE;
+ thread_wakeup((event_t)&vm_pageout_waiter);
+ }
+#endif /* !CONFIG_EMBEDDED */
+
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+
+ counter(c_vm_pageout_block++);
+ thread_block((thread_continue_t)vm_pageout_continue);
+ /*NOTREACHED*/
+}
+
+#if !CONFIG_EMBEDDED
+kern_return_t
+vm_pageout_wait(uint64_t deadline)
+{
+ kern_return_t kr;
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) {
+ vm_pageout_waiter = TRUE;
+ if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
+ &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
+ (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
+ kr = KERN_OPERATION_TIMED_OUT;
+ }
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ return kr;
+}
+#endif /* !CONFIG_EMBEDDED */
+
+
+static void
+vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
+{
+ vm_page_t m = NULL;
+ vm_object_t object;
+ vm_object_offset_t offset;
+ memory_object_t pager;
+
+ /* On systems with a compressor, the external IO thread clears its
+ * VM privileged bit to accommodate large allocations (e.g. bulk UPL
+ * creation)
+ */
+ if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) {
+ current_thread()->options &= ~TH_OPT_VMPRIV;
+ }
+
+ vm_page_lockspin_queues();
+
+ while (!vm_page_queue_empty(&q->pgo_pending)) {
+ q->pgo_busy = TRUE;
+ vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
+
+ assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
+ VM_PAGE_CHECK(m);
+ /*
+ * grab a snapshot of the object and offset this
+ * page is tabled in so that we can relookup this
+ * page after we've taken the object lock - these
+ * fields are stable while we hold the page queues lock
+ * but as soon as we drop it, there is nothing to keep
+ * this page in this object... we hold an activity_in_progress
+ * on this object which will keep it from terminating
+ */
+ object = VM_PAGE_OBJECT(m);
+ offset = m->vmp_offset;
+
+ m->vmp_q_state = VM_PAGE_NOT_ON_Q;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+
+ vm_page_unlock_queues();
+
+ vm_object_lock(object);
+
+ m = vm_page_lookup(object, offset);
+
+ if (m == NULL || m->vmp_busy || m->vmp_cleaning ||
+ !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) {
+ /*
+ * it's either the same page that someone else has
+ * started cleaning (or it's finished cleaning or
+ * been put back on the pageout queue), or
+ * the page has been freed or we have found a
+ * new page at this offset... in all of these cases
+ * we merely need to release the activity_in_progress
+ * we took when we put the page on the pageout queue
+ */
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
+ }
+ pager = object->pager;
+
+ if (pager == MEMORY_OBJECT_NULL) {
+ /*
+ * This pager has been destroyed by either
+ * memory_object_destroy or vm_object_destroy, and
+ * so there is nowhere for the page to go.
+ */
+ if (m->vmp_free_when_done) {
+ /*
+ * Just free the page... VM_PAGE_FREE takes
+ * care of cleaning up all the state...
+ * including doing the vm_pageout_throttle_up
+ */
+ VM_PAGE_FREE(m);
+ } else {
+ vm_page_lockspin_queues();
+
+ vm_pageout_throttle_up(m);
+ vm_page_activate(m);
+
+ vm_page_unlock_queues();
+
+ /*
+ * And we are done with it.
+ */
+ }
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
+ }
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
+ VM_PAGE_CHECK(m);
+#endif
+ /*
+ * give back the activity_in_progress reference we
+ * took when we queued up this page and replace it
+ * it with a paging_in_progress reference that will
+ * also hold the paging offset from changing and
+ * prevent the object from terminating
+ */
+ vm_object_activity_end(object);
+ vm_object_paging_begin(object);
+ vm_object_unlock(object);
+
+ /*
+ * Send the data to the pager.
+ * any pageout clustering happens there
+ */
+ memory_object_data_return(pager,
+ m->vmp_offset + object->paging_offset,
+ PAGE_SIZE,
+ NULL,
+ NULL,
+ FALSE,
+ FALSE,
+ 0);
+
+ vm_object_lock(object);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_pageout_io_throttle();
+
+ vm_page_lockspin_queues();
+ }
+ q->pgo_busy = FALSE;
+ q->pgo_idle = TRUE;
+
+ assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
+ vm_page_unlock_queues();
+
+ thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
+ /*NOTREACHED*/
+}
+
+
+#define MAX_FREE_BATCH 32
+uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
+ * this thread.
+ */
+
+
+void
+vm_pageout_iothread_internal_continue(struct cq *);
+void
+vm_pageout_iothread_internal_continue(struct cq *cq)
+{
+ struct vm_pageout_queue *q;
+ vm_page_t m = NULL;
+ boolean_t pgo_draining;
+ vm_page_t local_q;
+ int local_cnt;
+ vm_page_t local_freeq = NULL;
+ int local_freed = 0;
+ int local_batch_size;
+#if DEVELOPMENT || DEBUG
+ int ncomps = 0;
+ boolean_t marked_active = FALSE;
+#endif
+ KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ q = cq->q;
+#if __AMP__
+ if (vm_compressor_ebound && (vm_pageout_state.vm_compressor_thread_count > 1)) {
+ local_batch_size = (q->pgo_maxlaundry >> 3);
+ local_batch_size = MAX(local_batch_size, 16);
+ } else {
+ local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
+ }
+#else
+ local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2);
+#endif
+
+#if RECORD_THE_COMPRESSED_DATA
+ if (q->pgo_laundry) {
+ c_compressed_record_init();
+ }
+#endif
+ while (TRUE) {
+ int pages_left_on_q = 0;
+
+ local_cnt = 0;
+ local_q = NULL;
+
+ KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ vm_page_lock_queues();
+#if DEVELOPMENT || DEBUG
+ if (marked_active == FALSE) {
+ vmct_active++;
+ vmct_state[cq->id] = VMCT_ACTIVE;
+ marked_active = TRUE;
+ if (vmct_active == 1) {
+ vm_compressor_epoch_start = mach_absolute_time();
+ }
+ }
+#endif
+ KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
+
+ while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
+ vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq);
+ assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q);
+ VM_PAGE_CHECK(m);
+
+ m->vmp_q_state = VM_PAGE_NOT_ON_Q;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+ m->vmp_laundry = FALSE;
+
+ m->vmp_snext = local_q;
+ local_q = m;
+ local_cnt++;
+ }
+ if (local_q == NULL) {
+ break;
+ }
+
+ q->pgo_busy = TRUE;
+
+ if ((pgo_draining = q->pgo_draining) == FALSE) {
+ vm_pageout_throttle_up_batch(q, local_cnt);
+ pages_left_on_q = q->pgo_laundry;
+ } else {
+ pages_left_on_q = q->pgo_laundry - local_cnt;
+ }
+
+ vm_page_unlock_queues();
+
+#if !RECORD_THE_COMPRESSED_DATA
+ if (pages_left_on_q >= local_batch_size && cq->id < (vm_pageout_state.vm_compressor_thread_count - 1)) {
+ thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
+ }
+#endif
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
+
+ while (local_q) {
+ KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
+
+ m = local_q;
+ local_q = m->vmp_snext;
+ m->vmp_snext = NULL;
+
+ if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m) == KERN_SUCCESS) {
+#if DEVELOPMENT || DEBUG
+ ncomps++;
+#endif
+ KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END, local_cnt, 0, 0, 0, 0);
+
+ m->vmp_snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+
+ if (local_freed >= MAX_FREE_BATCH) {
+ OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ }
+#if !CONFIG_JETSAM
+ while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+ kern_return_t wait_result;
+ int need_wakeup = 0;
+
+ if (local_freeq) {
+ OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+ vm_page_free_list(local_freeq, TRUE);
+ local_freeq = NULL;
+ local_freed = 0;
+
+ continue;
+ }
+ lck_mtx_lock_spin(&vm_page_queue_free_lock);
+
+ if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+ if (vm_page_free_wanted_privileged++ == 0) {
+ need_wakeup = 1;
+ }
+ wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
+
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ if (need_wakeup) {
+ thread_wakeup((event_t)&vm_page_free_wanted);
+ }
+
+ if (wait_result == THREAD_WAITING) {
+ thread_block(THREAD_CONTINUE_NULL);
+ }
+ } else {
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+ }
+#endif
+ }
+ if (local_freeq) {
+ OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
+
+ vm_page_free_list(local_freeq, TRUE);
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ if (pgo_draining == TRUE) {
+ vm_page_lockspin_queues();
+ vm_pageout_throttle_up_batch(q, local_cnt);
+ vm_page_unlock_queues();
+ }
+ }
+ KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ /*
+ * queue lock is held and our q is empty
+ */
+ q->pgo_busy = FALSE;
+ q->pgo_idle = TRUE;
+
+ assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
+#if DEVELOPMENT || DEBUG
+ if (marked_active == TRUE) {
+ vmct_active--;
+ vmct_state[cq->id] = VMCT_IDLE;
+
+ if (vmct_active == 0) {
+ vm_compressor_epoch_stop = mach_absolute_time();
+ assertf(vm_compressor_epoch_stop >= vm_compressor_epoch_start,
+ "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
+ vm_compressor_epoch_start, vm_compressor_epoch_stop);
+ /* This interval includes intervals where one or more
+ * compressor threads were pre-empted
+ */
+ vmct_stats.vmct_cthreads_total += vm_compressor_epoch_stop - vm_compressor_epoch_start;
+ }
+ }
+#endif
+ vm_page_unlock_queues();
+#if DEVELOPMENT || DEBUG
+ if (__improbable(vm_compressor_time_thread)) {
+ vmct_stats.vmct_runtimes[cq->id] = thread_get_runtime_self();
+ vmct_stats.vmct_pages[cq->id] += ncomps;
+ vmct_stats.vmct_iterations[cq->id]++;
+ if (ncomps > vmct_stats.vmct_maxpages[cq->id]) {
+ vmct_stats.vmct_maxpages[cq->id] = ncomps;
+ }
+ if (ncomps < vmct_stats.vmct_minpages[cq->id]) {
+ vmct_stats.vmct_minpages[cq->id] = ncomps;
+ }
+ }
+#endif
+
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
+ /*NOTREACHED*/
+}
+
+
+kern_return_t
+vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m)
+{
+ vm_object_t object;
+ memory_object_t pager;
+ int compressed_count_delta;
+ kern_return_t retval;
+
+ object = VM_PAGE_OBJECT(m);
+
+ assert(!m->vmp_free_when_done);
+ assert(!m->vmp_laundry);
+
+ pager = object->pager;
+
+ if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
+ KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
+
+ vm_object_lock(object);
+
+ /*
+ * If there is no memory object for the page, create
+ * one and hand it to the compression pager.
+ */
+
+ if (!object->pager_initialized) {
+ vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
+ }
+ if (!object->pager_initialized) {
+ vm_object_compressor_pager_create(object);
+ }
+
+ pager = object->pager;
+
+ if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
+ /*
+ * Still no pager for the object,
+ * or the pager has been destroyed.
+ * Reactivate the page.
+ *
+ * Should only happen if there is no
+ * compression pager
+ */
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lockspin_queues();
+ vm_page_activate(m);
+ VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager, 1);
+ vm_page_unlock_queues();
+
+ /*
+ * And we are done with it.
+ */
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ return KERN_FAILURE;
+ }
+ vm_object_unlock(object);
+
+ KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
+ }
+ assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
+ assert(object->activity_in_progress > 0);
+
+ retval = vm_compressor_pager_put(
+ pager,
+ m->vmp_offset + object->paging_offset,
+ VM_PAGE_GET_PHYS_PAGE(m),
+ current_chead,
+ scratch_buf,
+ &compressed_count_delta);
+
+ vm_object_lock(object);
+
+ assert(object->activity_in_progress > 0);
+ assert(VM_PAGE_OBJECT(m) == object);
+ assert( !VM_PAGE_WIRED(m));
+
+ vm_compressor_pager_count(pager,
+ compressed_count_delta,
+ FALSE, /* shared_lock */
+ object);
+
+ if (retval == KERN_SUCCESS) {
+ /*
+ * If the object is purgeable, its owner's
+ * purgeable ledgers will be updated in
+ * vm_page_remove() but the page still
+ * contributes to the owner's memory footprint,
+ * so account for it as such.
+ */
+ if ((object->purgable != VM_PURGABLE_DENY ||
+ object->vo_ledger_tag) &&
+ object->vo_owner != NULL) {
+ /* one more compressed purgeable/tagged page */
+ vm_object_owner_compressed_update(object,
+ +1);
+ }
+ VM_STAT_INCR(compressions);
+
+ if (m->vmp_tabled) {
+ vm_page_remove(m, TRUE);
+ }
+ } else {
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lockspin_queues();
+
+ vm_page_activate(m);
+ vm_pageout_vminfo.vm_compressor_failed++;
+
+ vm_page_unlock_queues();
+ }
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ return retval;
+}
+
+
+static void
+vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority)
+{
+ uint32_t policy;
+
+ if (hibernate_cleaning_in_progress == TRUE) {
+ req_lowpriority = FALSE;
+ }
+
+ if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) {
+ vm_page_unlock_queues();
+
+ if (req_lowpriority == TRUE) {
+ policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
+ DTRACE_VM(laundrythrottle);
+ } else {
+ policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
+ DTRACE_VM(laundryunthrottle);
+ }
+ proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
+ TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
+
+ vm_page_lock_queues();
+ eq->pgo_lowpriority = req_lowpriority;
+ }
+}
+
+
+static void
+vm_pageout_iothread_external(void)
+{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
+
+ DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
+
+ proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
+ TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
+
+ vm_page_lock_queues();
+
+ vm_pageout_queue_external.pgo_tid = self->thread_id;
+ vm_pageout_queue_external.pgo_lowpriority = TRUE;
+ vm_pageout_queue_external.pgo_inited = TRUE;
+
+ vm_page_unlock_queues();
+
+ vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
+
+ /*NOTREACHED*/
+}
+
+
+static void
+vm_pageout_iothread_internal(struct cq *cq)
+{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
+
+ vm_page_lock_queues();
+
+ vm_pageout_queue_internal.pgo_tid = self->thread_id;
+ vm_pageout_queue_internal.pgo_lowpriority = TRUE;
+ vm_pageout_queue_internal.pgo_inited = TRUE;
+
+ vm_page_unlock_queues();
+
+ if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
+ thread_vm_bind_group_add();
+ }
+
+#if CONFIG_THREAD_GROUPS
+ thread_group_vm_add();
+#endif /* CONFIG_THREAD_GROUPS */
+
+#if __AMP__
+ if (vm_compressor_ebound) {
+ /*
+ * Use the soft bound option for vm_compressor to allow it to run on
+ * P-cores if E-cluster is unavailable.
+ */
+ thread_bind_cluster_type(self, 'E', true);
+ }
+#endif /* __AMP__ */
+
+ thread_set_thread_name(current_thread(), "VM_compressor");
+#if DEVELOPMENT || DEBUG
+ vmct_stats.vmct_minpages[cq->id] = INT32_MAX;
+#endif
+ vm_pageout_iothread_internal_continue(cq);
+
+ /*NOTREACHED*/
+}
+
+kern_return_t
+vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
+{
+ if (OSCompareAndSwapPtr(NULL, ptrauth_nop_cast(void *, func), (void * volatile *) &consider_buffer_cache_collect)) {
+ return KERN_SUCCESS;
+ } else {
+ return KERN_FAILURE; /* Already set */
+ }
+}
+
+extern boolean_t memorystatus_manual_testing_on;
+extern unsigned int memorystatus_level;
+
+
+#if VM_PRESSURE_EVENTS
+
+boolean_t vm_pressure_events_enabled = FALSE;
+
+void
+vm_pressure_response(void)
+{
+ vm_pressure_level_t old_level = kVMPressureNormal;
+ int new_level = -1;
+ unsigned int total_pages;
+ uint64_t available_memory = 0;
+
+ if (vm_pressure_events_enabled == FALSE) {
+ return;
+ }
+
+#if CONFIG_EMBEDDED
+
+ available_memory = (uint64_t) memorystatus_available_pages;
+
+#else /* CONFIG_EMBEDDED */
+
+ available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
+ memorystatus_available_pages = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
+
+#endif /* CONFIG_EMBEDDED */
+
+ total_pages = (unsigned int) atop_64(max_mem);
+#if CONFIG_SECLUDED_MEMORY
+ total_pages -= vm_page_secluded_count;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
+
+ if (memorystatus_manual_testing_on) {
+ return;
+ }
+
+ old_level = memorystatus_vm_pressure_level;
+
+ switch (memorystatus_vm_pressure_level) {
+ case kVMPressureNormal:
+ {
+ if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
+ new_level = kVMPressureCritical;
+ } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
+ new_level = kVMPressureWarning;
+ }
+ break;
+ }
+
+ case kVMPressureWarning:
+ case kVMPressureUrgent:
+ {
+ if (VM_PRESSURE_WARNING_TO_NORMAL()) {
+ new_level = kVMPressureNormal;
+ } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
+ new_level = kVMPressureCritical;
+ }
+ break;
+ }
+
+ case kVMPressureCritical:
+ {
+ if (VM_PRESSURE_WARNING_TO_NORMAL()) {
+ new_level = kVMPressureNormal;
+ } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
+ new_level = kVMPressureWarning;
+ }
+ break;
+ }
+
+ default:
+ return;
+ }
+
+ if (new_level != -1) {
+ memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
+
+ if (new_level != (int) old_level) {
+ VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE,
+ new_level, old_level, 0, 0);
+ }
+
+ if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level)) {
+ if (vm_pageout_state.vm_pressure_thread_running == FALSE) {
+ thread_wakeup(&vm_pressure_thread);
+ }
+
+ if (old_level != memorystatus_vm_pressure_level) {
+ thread_wakeup(&vm_pageout_state.vm_pressure_changed);
+ }
+ }
+ }
+}
+#endif /* VM_PRESSURE_EVENTS */
+
+/*
+ * Function called by a kernel thread to either get the current pressure level or
+ * wait until memory pressure changes from a given level.
+ */
+kern_return_t
+mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level)
+{
+#if !VM_PRESSURE_EVENTS
+
+ return KERN_FAILURE;
+
+#else /* VM_PRESSURE_EVENTS */
+
+ wait_result_t wr = 0;
+ vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
+
+ if (pressure_level == NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (*pressure_level == kVMPressureJetsam) {
+ if (!wait_for_pressure) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ lck_mtx_lock(&memorystatus_jetsam_fg_band_lock);
+ wr = assert_wait((event_t)&memorystatus_jetsam_fg_band_waiters,
+ THREAD_INTERRUPTIBLE);
+ if (wr == THREAD_WAITING) {
+ ++memorystatus_jetsam_fg_band_waiters;
+ lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
+ wr = thread_block(THREAD_CONTINUE_NULL);
+ } else {
+ lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock);
+ }
+ if (wr != THREAD_AWAKENED) {
+ return KERN_ABORTED;
+ }
+ *pressure_level = kVMPressureJetsam;
+ return KERN_SUCCESS;
+ }
+
+ if (wait_for_pressure == TRUE) {
+ while (old_level == *pressure_level) {
+ wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed,
+ THREAD_INTERRUPTIBLE);
+ if (wr == THREAD_WAITING) {
+ wr = thread_block(THREAD_CONTINUE_NULL);
+ }
+ if (wr == THREAD_INTERRUPTED) {
+ return KERN_ABORTED;
+ }
+
+ if (wr == THREAD_AWAKENED) {
+ old_level = memorystatus_vm_pressure_level;
+ }
+ }
+ }
+
+ *pressure_level = old_level;
+ return KERN_SUCCESS;
+#endif /* VM_PRESSURE_EVENTS */
+}
+
+#if VM_PRESSURE_EVENTS
+void
+vm_pressure_thread(void)
+{
+ static boolean_t thread_initialized = FALSE;
+
+ if (thread_initialized == TRUE) {
+ vm_pageout_state.vm_pressure_thread_running = TRUE;
+ consider_vm_pressure_events();
+ vm_pageout_state.vm_pressure_thread_running = FALSE;
+ }
+
+ thread_set_thread_name(current_thread(), "VM_pressure");
+ thread_initialized = TRUE;
+ assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
+ thread_block((thread_continue_t)vm_pressure_thread);
+}
+#endif /* VM_PRESSURE_EVENTS */
+
+
+/*
+ * called once per-second via "compute_averages"
+ */
+void
+compute_pageout_gc_throttle(__unused void *arg)
+{
+ if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) {
+ vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page;
+
+ thread_wakeup((event_t) &vm_pageout_garbage_collect);
+ }
+}
+
+/*
+ * vm_pageout_garbage_collect can also be called when the zone allocator needs
+ * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
+ * jetsams. We need to check if the zone map size is above its jetsam limit to
+ * decide if this was indeed the case.
+ *
+ * We need to do this on a different thread because of the following reasons:
+ *
+ * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
+ * itself causing the system to hang. We perform synchronous jetsams if we're
+ * leaking in the VM map entries zone, so the leaking process could be doing a
+ * zalloc for a VM map entry while holding its vm_map lock, when it decides to
+ * jetsam itself. We also need the vm_map lock on the process termination path,
+ * which would now lead the dying process to deadlock against itself.
+ *
+ * 2. The jetsam path might need to allocate zone memory itself. We could try
+ * using the non-blocking variant of zalloc for this path, but we can still
+ * end up trying to do a kernel_memory_allocate when the zone maps are almost
+ * full.
+ */
+
+void
+vm_pageout_garbage_collect(int collect)
+{
+ if (collect) {
+ if (is_zone_map_nearing_exhaustion()) {
+ /*
+ * Woken up by the zone allocator for zone-map-exhaustion jetsams.
+ *
+ * Bail out after calling zone_gc (which triggers the
+ * zone-map-exhaustion jetsams). If we fall through, the subsequent
+ * operations that clear out a bunch of caches might allocate zone
+ * memory themselves (for eg. vm_map operations would need VM map
+ * entries). Since the zone map is almost full at this point, we
+ * could end up with a panic. We just need to quickly jetsam a
+ * process and exit here.
+ *
+ * It could so happen that we were woken up to relieve memory
+ * pressure and the zone map also happened to be near its limit at
+ * the time, in which case we'll skip out early. But that should be
+ * ok; if memory pressure persists, the thread will simply be woken
+ * up again.
+ */
+ consider_zone_gc(TRUE);
+ } else {
+ /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
+ boolean_t buf_large_zfree = FALSE;
+ boolean_t first_try = TRUE;
+
+ stack_collect();
+
+ consider_machine_collect();
+ mbuf_drain(FALSE);
+
+ do {
+ if (consider_buffer_cache_collect != NULL) {
+ buf_large_zfree = (*consider_buffer_cache_collect)(0);
+ }
+ if (first_try == TRUE || buf_large_zfree == TRUE) {
+ /*
+ * consider_zone_gc should be last, because the other operations
+ * might return memory to zones.
+ */
+ consider_zone_gc(FALSE);
+ }
+ first_try = FALSE;
+ } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
+
+ consider_machine_adjust();
+ }
+ }
+
+ assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
+
+ thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
+ /*NOTREACHED*/
+}
+
+
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+
+
+void
+vm_set_restrictions(unsigned int num_cpus)
+{
+ int vm_restricted_to_single_processor = 0;
+
+ if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor, sizeof(vm_restricted_to_single_processor))) {
+ kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor);
+ vm_pageout_state.vm_restricted_to_single_processor = (vm_restricted_to_single_processor ? TRUE : FALSE);
+ } else {
+ assert(num_cpus > 0);
+
+ if (num_cpus <= 3) {
+ /*
+ * on systems with a limited number of CPUS, bind the
+ * 4 major threads that can free memory and that tend to use
+ * a fair bit of CPU under pressured conditions to a single processor.
+ * This insures that these threads don't hog all of the available CPUs
+ * (important for camera launch), while allowing them to run independently
+ * w/r to locks... the 4 threads are
+ * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
+ * vm_compressor_swap_trigger_thread (minor and major compactions),
+ * memorystatus_thread (jetsams).
+ *
+ * the first time the thread is run, it is responsible for checking the
+ * state of vm_restricted_to_single_processor, and if TRUE it calls
+ * thread_bind_master... someday this should be replaced with a group
+ * scheduling mechanism and KPI.
+ */
+ vm_pageout_state.vm_restricted_to_single_processor = TRUE;
+ } else {
+ vm_pageout_state.vm_restricted_to_single_processor = FALSE;
+ }
+ }
+}
+
+void
+vm_pageout(void)
+{
+ thread_t self = current_thread();
+ thread_t thread;
+ kern_return_t result;
+ spl_t s;
+
+ /*
+ * Set thread privileges.
+ */
+ s = splsched();
+
+ vm_pageout_scan_thread = self;
+
+#if CONFIG_VPS_DYNAMIC_PRIO
+
+ int vps_dynprio_bootarg = 0;
+
+ if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg, sizeof(vps_dynprio_bootarg))) {
+ vps_dynamic_priority_enabled = (vps_dynprio_bootarg ? TRUE : FALSE);
+ kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled);
+ } else {
+ if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) {
+ vps_dynamic_priority_enabled = TRUE;
+ } else {
+ vps_dynamic_priority_enabled = FALSE;
+ }
+ }
+
+ if (vps_dynamic_priority_enabled) {
+ sched_set_kernel_thread_priority(self, MAXPRI_THROTTLE);
+ thread_set_eager_preempt(self);
+ } else {
+ sched_set_kernel_thread_priority(self, BASEPRI_VM);
+ }
+
+#else /* CONFIG_VPS_DYNAMIC_PRIO */
+
+ vps_dynamic_priority_enabled = FALSE;
+ sched_set_kernel_thread_priority(self, BASEPRI_VM);
+
+#endif /* CONFIG_VPS_DYNAMIC_PRIO */
+
+ thread_lock(self);
+ self->options |= TH_OPT_VMPRIV;
+ thread_unlock(self);
+
+ if (!self->reserved_stack) {
+ self->reserved_stack = self->kernel_stack;
+ }
+
+ if (vm_pageout_state.vm_restricted_to_single_processor == TRUE &&
+ vps_dynamic_priority_enabled == FALSE) {
+ thread_vm_bind_group_add();
+ }
+
+
+#if CONFIG_THREAD_GROUPS
+ thread_group_vm_add();
+#endif /* CONFIG_THREAD_GROUPS */
+
+#if __AMP__
+ PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound, sizeof(vm_pgo_pbound));
+ if (vm_pgo_pbound) {
+ /*
+ * Use the soft bound option for vm pageout to allow it to run on
+ * E-cores if P-cluster is unavailable.
+ */
+ thread_bind_cluster_type(self, 'P', true);
+ }
+#endif /* __AMP__ */
+
+ splx(s);
+
+ thread_set_thread_name(current_thread(), "VM_pageout_scan");
+
+ /*
+ * Initialize some paging parameters.
+ */
+
+ vm_pageout_state.vm_pressure_thread_running = FALSE;
+ vm_pageout_state.vm_pressure_changed = FALSE;
+ vm_pageout_state.memorystatus_purge_on_warning = 2;
+ vm_pageout_state.memorystatus_purge_on_urgent = 5;
+ vm_pageout_state.memorystatus_purge_on_critical = 8;
+ vm_pageout_state.vm_page_speculative_q_age_ms = VM_PAGE_SPECULATIVE_Q_AGE_MS;
+ vm_pageout_state.vm_page_speculative_percentage = 5;
+ vm_pageout_state.vm_page_speculative_target = 0;
+
+ vm_pageout_state.vm_pageout_external_iothread = THREAD_NULL;
+ vm_pageout_state.vm_pageout_internal_iothread = THREAD_NULL;
+
+ vm_pageout_state.vm_pageout_swap_wait = 0;
+ vm_pageout_state.vm_pageout_idle_wait = 0;
+ vm_pageout_state.vm_pageout_empty_wait = 0;
+ vm_pageout_state.vm_pageout_burst_wait = 0;
+ vm_pageout_state.vm_pageout_deadlock_wait = 0;
+ vm_pageout_state.vm_pageout_deadlock_relief = 0;
+ vm_pageout_state.vm_pageout_burst_inactive_throttle = 0;
+
+ vm_pageout_state.vm_pageout_inactive = 0;
+ vm_pageout_state.vm_pageout_inactive_used = 0;
+ vm_pageout_state.vm_pageout_inactive_clean = 0;
+
+ vm_pageout_state.vm_memory_pressure = 0;
+ vm_pageout_state.vm_page_filecache_min = 0;
+#if CONFIG_JETSAM
+ vm_pageout_state.vm_page_filecache_min_divisor = 70;
+ vm_pageout_state.vm_page_xpmapped_min_divisor = 40;
+#else
+ vm_pageout_state.vm_page_filecache_min_divisor = 27;
+ vm_pageout_state.vm_page_xpmapped_min_divisor = 36;
+#endif
+ vm_pageout_state.vm_page_free_count_init = vm_page_free_count;
+
+ vm_pageout_state.vm_pageout_considered_page_last = 0;
+
+ if (vm_pageout_state.vm_pageout_swap_wait == 0) {
+ vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
+ }
+
+ if (vm_pageout_state.vm_pageout_idle_wait == 0) {
+ vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
+ }
+
+ if (vm_pageout_state.vm_pageout_burst_wait == 0) {
+ vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
+ }
+
+ if (vm_pageout_state.vm_pageout_empty_wait == 0) {
+ vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
+ }
+
+ if (vm_pageout_state.vm_pageout_deadlock_wait == 0) {
+ vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+ }
+
+ if (vm_pageout_state.vm_pageout_deadlock_relief == 0) {
+ vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+ }
+
+ if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) {
+ vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+ }
+ /*
+ * even if we've already called vm_page_free_reserve
+ * call it again here to insure that the targets are
+ * accurately calculated (it uses vm_page_free_count_init)
+ * calling it with an arg of 0 will not change the reserve
+ * but will re-calculate free_min and free_target
+ */
+ if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
+ vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
+ } else {
+ vm_page_free_reserve(0);
+ }
+
+
+ vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
+ vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+ vm_pageout_queue_external.pgo_laundry = 0;
+ vm_pageout_queue_external.pgo_idle = FALSE;
+ vm_pageout_queue_external.pgo_busy = FALSE;
+ vm_pageout_queue_external.pgo_throttled = FALSE;
+ vm_pageout_queue_external.pgo_draining = FALSE;
+ vm_pageout_queue_external.pgo_lowpriority = FALSE;
+ vm_pageout_queue_external.pgo_tid = -1;
+ vm_pageout_queue_external.pgo_inited = FALSE;
+
+ vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
+ vm_pageout_queue_internal.pgo_maxlaundry = 0;
+ vm_pageout_queue_internal.pgo_laundry = 0;
+ vm_pageout_queue_internal.pgo_idle = FALSE;
+ vm_pageout_queue_internal.pgo_busy = FALSE;
+ vm_pageout_queue_internal.pgo_throttled = FALSE;
+ vm_pageout_queue_internal.pgo_draining = FALSE;
+ vm_pageout_queue_internal.pgo_lowpriority = FALSE;
+ vm_pageout_queue_internal.pgo_tid = -1;
+ vm_pageout_queue_internal.pgo_inited = FALSE;
+
+ /* internal pageout thread started when default pager registered first time */
+ /* external pageout and garbage collection threads started here */
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
+ BASEPRI_VM,
+ &vm_pageout_state.vm_pageout_external_iothread);
+ if (result != KERN_SUCCESS) {
+ panic("vm_pageout_iothread_external: create failed");
+ }
+ thread_set_thread_name(vm_pageout_state.vm_pageout_external_iothread, "VM_pageout_external_iothread");
+ thread_deallocate(vm_pageout_state.vm_pageout_external_iothread);
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
+ BASEPRI_DEFAULT,
+ &thread);
+ if (result != KERN_SUCCESS) {
+ panic("vm_pageout_garbage_collect: create failed");
+ }
+ thread_set_thread_name(thread, "VM_pageout_garbage_collect");
+ thread_deallocate(thread);
+
+#if VM_PRESSURE_EVENTS
+ result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
+ BASEPRI_DEFAULT,
+ &thread);
+
+ if (result != KERN_SUCCESS) {
+ panic("vm_pressure_thread: create failed");
+ }
+
+ thread_deallocate(thread);
+#endif
+
+ vm_object_reaper_init();
+
+
+ bzero(&vm_config, sizeof(vm_config));
+
+ switch (vm_compressor_mode) {
+ case VM_PAGER_DEFAULT:
+ printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
+ OS_FALLTHROUGH;
+
+ case VM_PAGER_COMPRESSOR_WITH_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ vm_config.swap_is_active = TRUE;
+ break;
+
+ case VM_PAGER_COMPRESSOR_NO_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ break;
+
+ case VM_PAGER_FREEZER_DEFAULT:
+ printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
+ OS_FALLTHROUGH;
+
+ case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ break;
+
+ case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ vm_config.freezer_swap_is_active = TRUE;
+ break;
+
+ case VM_PAGER_NOT_CONFIGURED:
+ break;
+
+ default:
+ printf("unknown compressor mode - %x\n", vm_compressor_mode);
+ break;
+ }
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+ vm_compressor_pager_init();
+ }
+
+#if VM_PRESSURE_EVENTS
+ vm_pressure_events_enabled = TRUE;
+#endif /* VM_PRESSURE_EVENTS */
+
+#if CONFIG_PHANTOM_CACHE
+ vm_phantom_cache_init();
+#endif
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+ printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ pmap_protect(kernel_pmap,
+ vm_page_fake_buckets_start,
+ vm_page_fake_buckets_end,
+ VM_PROT_READ);
+// *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+#if VM_OBJECT_TRACKING
+ vm_object_tracking_init();
+#endif /* VM_OBJECT_TRACKING */
+
+ vm_pageout_continue();
+
+ /*
+ * Unreached code!
+ *
+ * The vm_pageout_continue() call above never returns, so the code below is never
+ * executed. We take advantage of this to declare several DTrace VM related probe
+ * points that our kernel doesn't have an analog for. These are probe points that
+ * exist in Solaris and are in the DTrace documentation, so people may have written
+ * scripts that use them. Declaring the probe points here means their scripts will
+ * compile and execute which we want for portability of the scripts, but since this
+ * section of code is never reached, the probe points will simply never fire. Yes,
+ * this is basically a hack. The problem is the DTrace probe points were chosen with
+ * Solaris specific VM events in mind, not portability to different VM implementations.
+ */
+
+ DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
+ /*NOTREACHED*/
+}
+
+
+
+kern_return_t
+vm_pageout_internal_start(void)
+{
+ kern_return_t result;
+ host_basic_info_data_t hinfo;
+ vm_offset_t buf, bufsize;
+
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+#define BSD_HOST 1
+ host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+
+ assert(hinfo.max_cpus > 0);
+
+#if CONFIG_EMBEDDED
+ vm_pageout_state.vm_compressor_thread_count = 1;
+#else
+ if (hinfo.max_cpus > 4) {
+ vm_pageout_state.vm_compressor_thread_count = 2;
+ } else {
+ vm_pageout_state.vm_compressor_thread_count = 1;
+ }
+#endif
+ PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count,
+ sizeof(vm_pageout_state.vm_compressor_thread_count));
+
+#if __AMP__
+ PE_parse_boot_argn("vmcomp_ecluster", &vm_compressor_ebound, sizeof(vm_compressor_ebound));
+ if (vm_compressor_ebound) {
+ vm_pageout_state.vm_compressor_thread_count = 2;
+ }
+#endif
+ if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) {
+ vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1;
+ }
+ if (vm_pageout_state.vm_compressor_thread_count <= 0) {
+ vm_pageout_state.vm_compressor_thread_count = 1;
+ } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) {
+ vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
+ }
+
+ vm_pageout_queue_internal.pgo_maxlaundry =
+ (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
+
+ PE_parse_boot_argn("vmpgoi_maxlaundry",
+ &vm_pageout_queue_internal.pgo_maxlaundry,
+ sizeof(vm_pageout_queue_internal.pgo_maxlaundry));
+
+ bufsize = COMPRESSOR_SCRATCH_BUF_SIZE;
+ if (kernel_memory_allocate(kernel_map, &buf,
+ bufsize * vm_pageout_state.vm_compressor_thread_count,
+ 0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR)) {
+ panic("vm_pageout_internal_start: Unable to allocate %zd bytes",
+ (size_t)(bufsize * vm_pageout_state.vm_compressor_thread_count));
+ }
+
+ for (int i = 0; i < vm_pageout_state.vm_compressor_thread_count; i++) {
+ ciq[i].id = i;
+ ciq[i].q = &vm_pageout_queue_internal;
+ ciq[i].current_chead = NULL;
+ ciq[i].scratch_buf = (char *)(buf + i * bufsize);
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal,
+ (void *)&ciq[i], BASEPRI_VM,
+ &vm_pageout_state.vm_pageout_internal_iothread);
+
+ if (result == KERN_SUCCESS) {
+ thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread);
+ } else {
+ break;
+ }
+ }
+ return result;
+}
+
+#if CONFIG_IOSCHED
+/*
+ * To support I/O Expedite for compressed files we mark the upls with special flags.
+ * The way decmpfs works is that we create a big upl which marks all the pages needed to
+ * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
+ * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
+ * being held in the big original UPL. We mark each of these smaller UPLs with the flag
+ * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
+ * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
+ * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
+ * unless the real I/O upl is being destroyed).
+ */
+
+
+static void
+upl_set_decmp_info(upl_t upl, upl_t src_upl)
+{
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+
+ upl_lock(src_upl);
+ if (src_upl->decmp_io_upl) {
+ /*
+ * If there is already an alive real I/O UPL, ignore this new UPL.
+ * This case should rarely happen and even if it does, it just means
+ * that we might issue a spurious expedite which the driver is expected
+ * to handle.
+ */
+ upl_unlock(src_upl);
+ return;
+ }
+ src_upl->decmp_io_upl = (void *)upl;
+ src_upl->ref_count++;
+
+ upl->flags |= UPL_DECMP_REAL_IO;
+ upl->decmp_io_upl = (void *)src_upl;
+ upl_unlock(src_upl);
+}
+#endif /* CONFIG_IOSCHED */
+
+#if UPL_DEBUG
+int upl_debug_enabled = 1;
+#else
+int upl_debug_enabled = 0;
+#endif
+
+static upl_t
+upl_create(int type, int flags, upl_size_t size)
+{
+ upl_t upl;
+ vm_size_t page_field_size = 0;
+ int upl_flags = 0;
+ vm_size_t upl_size = sizeof(struct upl);
+
+ assert(page_aligned(size));
+
+ size = round_page_32(size);
+
+ if (type & UPL_CREATE_LITE) {
+ page_field_size = (atop(size) + 7) >> 3;
+ page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
+
+ upl_flags |= UPL_LITE;
+ }
+ if (type & UPL_CREATE_INTERNAL) {
+ upl_size += sizeof(struct upl_page_info) * atop(size);
+
+ upl_flags |= UPL_INTERNAL;
+ }
+ upl = (upl_t)kalloc(upl_size + page_field_size);
+
+ if (page_field_size) {
+ bzero((char *)upl + upl_size, page_field_size);
+ }
+
+ upl->flags = upl_flags | flags;
+ upl->kaddr = (vm_offset_t)0;
+ upl->u_offset = 0;
+ upl->u_size = 0;
+ upl->map_object = NULL;
+ upl->ref_count = 1;
+ upl->ext_ref_count = 0;
+ upl->highest_page = 0;
+ upl_lock_init(upl);
+ upl->vector_upl = NULL;
+ upl->associated_upl = NULL;
+ upl->upl_iodone = NULL;
+#if CONFIG_IOSCHED
+ if (type & UPL_CREATE_IO_TRACKING) {
+ upl->upl_priority = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
+ }
+
+ upl->upl_reprio_info = 0;
+ upl->decmp_io_upl = 0;
+ if ((type & UPL_CREATE_INTERNAL) && (type & UPL_CREATE_EXPEDITE_SUP)) {
+ /* Only support expedite on internal UPLs */
+ thread_t curthread = current_thread();
+ upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size));
+ bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size)));
+ upl->flags |= UPL_EXPEDITE_SUPPORTED;
+ if (curthread->decmp_upl != NULL) {
+ upl_set_decmp_info(upl, curthread->decmp_upl);
+ }
+ }
+#endif
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if ((type & UPL_CREATE_IO_TRACKING) || upl_debug_enabled) {
+ upl->upl_creator = current_thread();
+ upl->uplq.next = 0;
+ upl->uplq.prev = 0;
+ upl->flags |= UPL_TRACKED_BY_OBJECT;
+ }
+#endif
+
+#if UPL_DEBUG
+ upl->ubc_alias1 = 0;
+ upl->ubc_alias2 = 0;
+
+ upl->upl_state = 0;
+ upl->upl_commit_index = 0;
+ bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
+
+ (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+#endif /* UPL_DEBUG */
+
+ return upl;
+}
+
+static void
+upl_destroy(upl_t upl)
+{
+ int page_field_size; /* bit field in word size buf */
+ int size;
+
+// DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object);
+
+ if (upl->ext_ref_count) {
+ panic("upl(%p) ext_ref_count", upl);
+ }
+
+#if CONFIG_IOSCHED
+ if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) {
+ upl_t src_upl;
+ src_upl = upl->decmp_io_upl;
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+ upl_lock(src_upl);
+ src_upl->decmp_io_upl = NULL;
+ upl_unlock(src_upl);
+ upl_deallocate(src_upl);
+ }
+#endif /* CONFIG_IOSCHED */
+
+#if CONFIG_IOSCHED || UPL_DEBUG
+ if (((upl->flags & UPL_TRACKED_BY_OBJECT) || upl_debug_enabled) &&
+ !(upl->flags & UPL_VECTOR)) {
+ vm_object_t object;
+
+ if (upl->flags & UPL_SHADOWED) {
+ object = upl->map_object->shadow;
+ } else {
+ object = upl->map_object;
+ }
+
+ vm_object_lock(object);
+ queue_remove(&object->uplq, upl, upl_t, uplq);
+ vm_object_activity_end(object);
+ vm_object_collapse(object, 0, TRUE);
+ vm_object_unlock(object);
+ }
+#endif
+ /*
+ * drop a reference on the map_object whether or
+ * not a pageout object is inserted
+ */
+ if (upl->flags & UPL_SHADOWED) {
+ vm_object_deallocate(upl->map_object);
+ }
+
+ if (upl->flags & UPL_DEVICE_MEMORY) {
+ size = PAGE_SIZE;
+ } else {
+ size = upl_adjusted_size(upl, PAGE_MASK);
+ }
+ page_field_size = 0;