+ /*
+ * SECLUDED_AGING_ALONG_ACTIVE:
+ * There might be free pages at the tail of the
+ * secluded queue:
+ * just move them to the free queue (in batches).
+ * There can also be an excessive number of "inuse"
+ * pages:
+ * we age them by resetting their "referenced" bit and
+ * moving them to the inactive queue. Their trip
+ * through the secluded queue was equivalent to a trip
+ * through the active queue.
+ *
+ * We're holding the page queue lock, so we need
+ * to throttle and give someone else a chance to
+ * grab that lock if needed.
+ *
+ * We're also limiting the number of secluded "inuse"
+ * pages that get moved to the inactive queue, using
+ * the same "active_bust_count" method we use when
+ * balancing the active and inactive queues, because
+ * there can be a large number
+ * of extra "inuse" pages and handling them gets in the
+ * way of actually reclaiming memory.
+ */
+
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_secluded_count_inuse);
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT;
+ delayed_unlock = 1;
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ while (secluded_overflow-- > 0 &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
+ assert(secluded_page->vm_page_q_state ==
+ VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(secluded_page, FALSE);
+ assert(!secluded_page->fictitious);
+ assert(!VM_PAGE_WIRED(secluded_page));
+ if (secluded_page->vm_page_object == 0) {
+ /* transfer to free queue */
+ assert(secluded_page->busy);
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ } else {
+ /* transfer to head of inactive queue */
+ pmap_clear_refmod_options(
+ VM_PAGE_GET_PHYS_PAGE(secluded_page),
+ VM_MEM_REFERENCED,
+ PMAP_OPTIONS_NOFLUSH,
+ (void *)NULL);
+ vm_page_enqueue_inactive(secluded_page,
+ FALSE);
+ if (active_burst_count-- == 0) {
+ vm_pageout_secluded_burst_count++;
+ break;
+ }
+ }
+ secluded_page = VM_PAGE_NULL;
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ delayed_unlock_limit,
+ 1);
+ vm_page_free_list(local_freeq,
+ TRUE);
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count,
+ 0, 0, 1);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ delayed_unlock = 1;
+ }
+ }
+ delayed_unlock = 1;
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_AFTER_INACTIVE) {
+ /*
+ * SECLUDED_AGING_AFTER_INACTIVE:
+ * No balancing needed at this point: when we get to
+ * the "choose a victim" part below, we'll consider the
+ * extra secluded pages before any inactive page.
+ */
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_BEFORE_ACTIVE) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_BEFORE_ACTIVE:
+ * Excess secluded pages go to the active queue and
+ * will later go to the inactive queue.
+ */
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_secluded_count_inuse);
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT;
+ delayed_unlock = 1;
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ while (secluded_overflow-- > 0 &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
+ assert(secluded_page->vm_page_q_state ==
+ VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(secluded_page, FALSE);
+ assert(!secluded_page->fictitious);
+ assert(!VM_PAGE_WIRED(secluded_page));
+ if (secluded_page->vm_page_object == 0) {
+ /* transfer to free queue */
+ assert(secluded_page->busy);
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ } else {
+ /* transfer to head of active queue */
+ vm_page_enqueue_active(secluded_page,
+ FALSE);
+ if (active_burst_count-- == 0) {
+ vm_pageout_secluded_burst_count++;
+ break;
+ }
+ }
+ secluded_page = VM_PAGE_NULL;
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ delayed_unlock_limit,
+ 1);
+ vm_page_free_list(local_freeq,
+ TRUE);
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count,
+ 0, 0, 1);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ delayed_unlock = 1;
+ }
+ }
+ delayed_unlock = 1;
+ } else if (vm_page_secluded_count > vm_page_secluded_target) {
+ panic("unsupported secluded_aging_policy %d\n",
+ secluded_aging_policy);
+ }
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ 0,
+ 0);
+ vm_page_free_list(local_freeq, TRUE);
+ VM_DEBUG_EVENT(vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count, 0, 0, 0);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ assert(delayed_unlock);
+
+ if (vm_upl_wait_for_pages < 0)
+ vm_upl_wait_for_pages = 0;
+
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages;
+
+ if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX)
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX;
+
+ /*
+ * Move pages from active to inactive if we're below the target
+ */
+ /* if we are trying to make clean, we need to make sure we actually have inactive - mj */
+ if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
+ goto done_moving_active_pages;
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+ /*
+ * Don't sweep through active queue more than the throttle
+ * which should be kept relatively low
+ */
+ active_burst_count = MIN(vm_pageout_burst_active_throttle, vm_page_active_count);
+
+ VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_START,
+ vm_pageout_inactive, vm_pageout_inactive_used, vm_page_free_count, local_freed);
+
+ VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_NONE,
+ vm_pageout_speculative_clean, vm_pageout_inactive_clean,
+ vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
+ memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_START);
+
+
+ while (!vm_page_queue_empty(&vm_page_queue_active) && active_burst_count--) {
+
+ vm_pageout_active++;
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_ACTIVE_Q);
+ assert(!m->laundry);
+ assert(VM_PAGE_OBJECT(m) != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+ /*
+ * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
+ *
+ * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
+ * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
+ * new reference happens. If no futher references happen on the page after that remote TLB flushes
+ * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
+ * by pageout_scan, which is just fine since the last reference would have happened quite far
+ * in the past (TLB caches don't hang around for very long), and of course could just as easily
+ * have happened before we moved the page
+ */
+ pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
+
+ /*
+ * The page might be absent or busy,
+ * but vm_page_deactivate can handle that.
+ * FALSE indicates that we don't want a H/W clear reference
+ */
+ vm_page_deactivate_internal(m, FALSE);
+
+ if (delayed_unlock++ > delayed_unlock_limit) {
+
+ if (local_freeq) {
+ vm_page_unlock_queues();
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 1);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, 0, 0, 1);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+
+ delayed_unlock = 1;
+
+ /*
+ * continue the while loop processing
+ * the active queue... need to hold
+ * the page queues lock
+ */
+ }
+ }
+
+ VM_DEBUG_EVENT(vm_pageout_balance, VM_PAGEOUT_BALANCE, DBG_FUNC_END,
+ vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, vm_page_inactive_target);
+ memoryshot(VM_PAGEOUT_BALANCE, DBG_FUNC_END);
+
+ /**********************************************************************
+ * above this point we're playing with the active queue
+ * below this point we're playing with the throttling mechanisms
+ * and the inactive queue
+ **********************************************************************/
+
+done_moving_active_pages:
+
+#if CONFIG_BACKGROUND_QUEUE
+ if ((vm_page_free_count + local_freed >= vm_page_free_target) &&
+ ((vm_page_background_mode < VM_PAGE_BG_LEVEL_2) || (vm_page_background_count <= vm_page_background_target)))
+#else
+ if (vm_page_free_count + local_freed >= vm_page_free_target)
+#endif
+ {
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ if (local_freeq) {
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 2);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, local_freed, 0, 2);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ vm_consider_waking_compactor_swapper();
+
+ vm_page_lock_queues();
+
+ /*
+ * make sure the pageout I/O threads are running
+ * throttled in case there are still requests
+ * in the laundry... since we have met our targets
+ * we don't need the laundry to be cleaned in a timely
+ * fashion... so let's avoid interfering with foreground
+ * activity
+ */
+ vm_pageout_adjust_io_throttles(iq, eq, TRUE);
+
+ /*
+ * recalculate vm_page_inactivate_target
+ */
+ vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+ vm_page_inactive_count +
+ vm_page_speculative_count);
+ if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
+ !vm_page_queue_empty(&vm_page_queue_active)) {
+ /*
+ * inactive target still not met... keep going
+ * until we get the queues balanced...
+ */
+ continue;
+ }
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if ((vm_page_free_count >= vm_page_free_target) &&
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+ /*
+ * done - we have met our target *and*
+ * there is no one waiting for a page.
+ */
+return_from_scan:
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE,
+ vm_pageout_inactive, vm_pageout_inactive_used, 0, 0);
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END,
+ vm_pageout_speculative_clean, vm_pageout_inactive_clean,
+ vm_pageout_inactive_dirty_internal, vm_pageout_inactive_dirty_external);
+
+ return;
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+
+ /*
+ * Before anything, we check if we have any ripe volatile
+ * objects around. If so, try to purge the first object.
+ * If the purge fails, fall through to reclaim a page instead.
+ * If the purge succeeds, go back to the top and reevalute
+ * the new memory situation.
+ */
+
+ assert (available_for_purge>=0);
+ force_purge = 0; /* no force-purging */
+
+#if VM_PRESSURE_EVENTS
+ pressure_level = memorystatus_vm_pressure_level;
+
+ if (pressure_level > kVMPressureNormal) {
+
+ if (pressure_level >= kVMPressureCritical) {
+ force_purge = memorystatus_purge_on_critical;
+ } else if (pressure_level >= kVMPressureUrgent) {
+ force_purge = memorystatus_purge_on_urgent;
+ } else if (pressure_level >= kVMPressureWarning) {
+ force_purge = memorystatus_purge_on_warning;
+ }
+ }
+#endif /* VM_PRESSURE_EVENTS */
+
+ if (available_for_purge || force_purge) {
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+
+ memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_START);
+
+ VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0);
+ if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) {
+ vm_pageout_purged_objects++;
+ VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0);
+ memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
+ continue;
+ }
+ VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, 0, 0, 0, -1);
+ memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END);
+ }
+
+ if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) {
+ /*
+ * try to pull pages from the aging bins...
+ * see vm_page.h for an explanation of how
+ * this mechanism works
+ */
+ struct vm_speculative_age_q *aq;
+ boolean_t can_steal = FALSE;
+ int num_scanned_queues;
+
+ aq = &vm_page_queue_speculative[speculative_steal_index];
+
+ num_scanned_queues = 0;
+ while (vm_page_queue_empty(&aq->age_q) &&
+ num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
+
+ speculative_steal_index++;
+
+ if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
+ speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+
+ aq = &vm_page_queue_speculative[speculative_steal_index];
+ }
+
+ if (num_scanned_queues == VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
+ /*
+ * XXX We've scanned all the speculative
+ * queues but still haven't found one
+ * that is not empty, even though
+ * vm_page_speculative_count is not 0.
+ *
+ * report the anomaly...
+ */
+ printf("vm_pageout_scan: "
+ "all speculative queues empty "
+ "but count=%d. Re-adjusting.\n",
+ vm_page_speculative_count);
+ if (vm_page_speculative_count > vm_page_speculative_count_drift_max)
+ vm_page_speculative_count_drift_max = vm_page_speculative_count;
+ vm_page_speculative_count_drifts++;
+#if DEVELOPMENT || DEBUG
+ panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count);
+#endif /* DEVELOPMENT || DEBUG */
+ /* readjust... */
+ vm_page_speculative_count = 0;
+ /* ... and continue */
+ continue;
+ }
+
+ if (vm_page_speculative_count > vm_page_speculative_target)
+ can_steal = TRUE;
+ else {
+ if (!delay_speculative_age) {
+ mach_timespec_t ts_fully_aged;
+
+ ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) / 1000;
+ ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_page_speculative_q_age_ms) % 1000)
+ * 1000 * NSEC_PER_USEC;
+
+ ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
+
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
+ can_steal = TRUE;
+ else
+ delay_speculative_age++;
+ } else {
+ delay_speculative_age++;
+ if (delay_speculative_age == DELAY_SPECULATIVE_AGE)
+ delay_speculative_age = 0;
+ }
+ }
+ if (can_steal == TRUE)
+ vm_page_speculate_ageit(aq);
+ }
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0 &&
+ ((vm_page_background_mode == VM_PAGE_BG_DISABLED) || (vm_page_background_count <= vm_page_background_target)))
+#else
+ if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0)
+#endif
+ {
+ int pages_evicted;
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ pages_evicted = vm_object_cache_evict(100, 10);
+
+ if (pages_evicted) {
+
+ vm_pageout_cache_evicted += pages_evicted;
+
+ VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE,
+ vm_page_free_count, pages_evicted, vm_pageout_cache_evicted, 0);
+ memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE);
+
+ /*
+ * we just freed up to 100 pages,
+ * so go back to the top of the main loop
+ * and re-evaulate the memory situation
+ */
+ continue;
+ } else
+ cache_evict_throttle = 100;
+ }
+ if (cache_evict_throttle)
+ cache_evict_throttle--;
+
+#if CONFIG_JETSAM
+ /*
+ * don't let the filecache_min fall below 15% of available memory
+ * on systems with an active compressor that isn't nearing its
+ * limits w/r to accepting new data
+ *
+ * on systems w/o the compressor/swapper, the filecache is always
+ * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
+ * since most (if not all) of the anonymous pages are in the
+ * throttled queue (which isn't counted as available) which
+ * effectively disables this filter
+ */
+ if (vm_compressor_low_on_space())
+ vm_page_filecache_min = 0;
+ else
+ vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 7);
+#else
+ /*
+ * don't let the filecache_min fall below 33% of available memory...
+ */
+ vm_page_filecache_min = (AVAILABLE_NON_COMPRESSED_MEMORY / 3);
+#endif
+ if (vm_page_free_count < (vm_page_free_reserved / 4))
+ vm_page_filecache_min = 0;
+
+ exceeded_burst_throttle = FALSE;
+ /*
+ * Sometimes we have to pause:
+ * 1) No inactive pages - nothing to do.
+ * 2) Loop control - no acceptable pages found on the inactive queue
+ * within the last vm_pageout_burst_inactive_throttle iterations
+ * 3) Flow control - default pageout queue is full
+ */
+ if (vm_page_queue_empty(&vm_page_queue_inactive) &&
+ vm_page_queue_empty(&vm_page_queue_anonymous) &&
+ vm_page_queue_empty(&sq->age_q)) {
+ vm_pageout_scan_empty_throttle++;
+ msecs = vm_pageout_empty_wait;
+ goto vm_pageout_scan_delay;
+
+ } else if (inactive_burst_count >=
+ MIN(vm_pageout_burst_inactive_throttle,
+ (vm_page_inactive_count +
+ vm_page_speculative_count))) {
+ vm_pageout_scan_burst_throttle++;
+ msecs = vm_pageout_burst_wait;
+
+ exceeded_burst_throttle = TRUE;
+ goto vm_pageout_scan_delay;
+
+ } else if (vm_page_free_count > (vm_page_free_reserved / 4) &&
+ VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
+ vm_pageout_scan_swap_throttle++;
+ msecs = vm_pageout_swap_wait;
+ goto vm_pageout_scan_delay;
+
+ } else if (VM_PAGE_Q_THROTTLED(iq) &&
+ VM_DYNAMIC_PAGING_ENABLED()) {
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+
+ switch (flow_control.state) {
+
+ case FCS_IDLE:
+ if ((vm_page_free_count + local_freed) < vm_page_free_target) {
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ if (local_freeq) {
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 3);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, local_freed, 0, 3);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ thread_yield_internal(1);
+
+ vm_page_lock_queues();
+
+ if (!VM_PAGE_Q_THROTTLED(iq)) {
+ vm_pageout_scan_yield_unthrottled++;
+ continue;
+ }
+ if (vm_page_pageable_external_count > vm_page_filecache_min &&
+ !vm_page_queue_empty(&vm_page_queue_inactive)) {
+ anons_grabbed = ANONS_GRABBED_LIMIT;
+ vm_pageout_scan_throttle_deferred++;
+ goto consider_inactive;
+ }
+ if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) && vm_page_active_count)
+ continue;
+ }
+reset_deadlock_timer:
+ ts.tv_sec = vm_pageout_deadlock_wait / 1000;
+ ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
+ clock_get_system_nanotime(&sec, &nsec);
+ flow_control.ts.tv_sec = (unsigned int) sec;
+ flow_control.ts.tv_nsec = nsec;
+ ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
+
+ flow_control.state = FCS_DELAYED;
+ msecs = vm_pageout_deadlock_wait;
+
+ break;
+
+ case FCS_DELAYED:
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
+ /*
+ * the pageout thread for the default pager is potentially
+ * deadlocked since the
+ * default pager queue has been throttled for more than the
+ * allowable time... we need to move some clean pages or dirty
+ * pages belonging to the external pagers if they aren't throttled
+ * vm_page_free_wanted represents the number of threads currently
+ * blocked waiting for pages... we'll move one page for each of
+ * these plus a fixed amount to break the logjam... once we're done
+ * moving this number of pages, we'll re-enter the FSC_DELAYED state
+ * with a new timeout target since we have no way of knowing
+ * whether we've broken the deadlock except through observation
+ * of the queue associated with the default pager... we need to
+ * stop moving pages and allow the system to run to see what
+ * state it settles into.
+ */
+ vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
+ vm_pageout_scan_deadlock_detected++;
+ flow_control.state = FCS_DEADLOCK_DETECTED;
+ thread_wakeup((event_t) &vm_pageout_garbage_collect);
+ goto consider_inactive;
+ }
+ /*
+ * just resniff instead of trying
+ * to compute a new delay time... we're going to be
+ * awakened immediately upon a laundry completion,
+ * so we won't wait any longer than necessary
+ */
+ msecs = vm_pageout_idle_wait;
+ break;
+
+ case FCS_DEADLOCK_DETECTED:
+ if (vm_pageout_deadlock_target)
+ goto consider_inactive;
+ goto reset_deadlock_timer;
+
+ }
+vm_pageout_scan_delay:
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ vm_page_unlock_queues();
+
+ if (local_freeq) {
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 3);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, local_freed, 0, 3);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ vm_consider_waking_compactor_swapper();
+
+ vm_page_lock_queues();
+
+ if (flow_control.state == FCS_DELAYED &&
+ !VM_PAGE_Q_THROTTLED(iq)) {
+ flow_control.state = FCS_IDLE;
+ goto consider_inactive;
+ }
+
+ if (vm_page_free_count >= vm_page_free_target) {
+ /*
+ * we're here because
+ * 1) someone else freed up some pages while we had
+ * the queues unlocked above
+ * and we've hit one of the 3 conditions that
+ * cause us to pause the pageout scan thread
+ *
+ * since we already have enough free pages,
+ * let's avoid stalling and return normally
+ *
+ * before we return, make sure the pageout I/O threads
+ * are running throttled in case there are still requests
+ * in the laundry... since we have enough free pages
+ * we don't need the laundry to be cleaned in a timely
+ * fashion... so let's avoid interfering with foreground
+ * activity
+ *
+ * we don't want to hold vm_page_queue_free_lock when
+ * calling vm_pageout_adjust_io_throttles (since it
+ * may cause other locks to be taken), we do the intitial
+ * check outside of the lock. Once we take the lock,
+ * we recheck the condition since it may have changed.
+ * if it has, no problem, we will make the threads
+ * non-throttled before actually blocking
+ */
+ vm_pageout_adjust_io_throttles(iq, eq, TRUE);
+ }
+ lck_mtx_lock(&vm_page_queue_free_lock);
+
+ if (vm_page_free_count >= vm_page_free_target &&
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
+ goto return_from_scan;
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ if ((vm_page_free_count + vm_page_cleaned_count) < vm_page_free_target) {
+ /*
+ * we're most likely about to block due to one of
+ * the 3 conditions that cause vm_pageout_scan to
+ * not be able to make forward progress w/r
+ * to providing new pages to the free queue,
+ * so unthrottle the I/O threads in case we
+ * have laundry to be cleaned... it needs
+ * to be completed ASAP.
+ *
+ * even if we don't block, we want the io threads
+ * running unthrottled since the sum of free +
+ * clean pages is still under our free target
+ */
+ vm_pageout_adjust_io_throttles(iq, eq, FALSE);
+ }
+ if (vm_page_cleaned_count > 0 && exceeded_burst_throttle == FALSE) {
+ /*
+ * if we get here we're below our free target and
+ * we're stalling due to a full laundry queue or
+ * we don't have any inactive pages other then
+ * those in the clean queue...
+ * however, we have pages on the clean queue that
+ * can be moved to the free queue, so let's not
+ * stall the pageout scan
+ */
+ flow_control.state = FCS_IDLE;
+ goto consider_inactive;
+ }
+ VM_CHECK_MEMORYSTATUS;
+
+ if (flow_control.state != FCS_IDLE)
+ vm_pageout_scan_throttle++;
+ iq->pgo_throttled = TRUE;
+
+ assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
+ counter(c_vm_pageout_scan_block++);
+
+ vm_page_unlock_queues();
+
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
+ VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START,
+ iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+ memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START);
+
+ thread_block(THREAD_CONTINUE_NULL);
+
+ VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END,
+ iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0);
+ memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END);
+
+ vm_page_lock_queues();
+ delayed_unlock = 1;
+
+ iq->pgo_throttled = FALSE;
+
+ if (loop_count >= vm_page_inactive_count)
+ loop_count = 0;
+ inactive_burst_count = 0;
+
+ goto Restart;
+ /*NOTREACHED*/
+ }
+
+
+ flow_control.state = FCS_IDLE;
+consider_inactive:
+ vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count),
+ vm_pageout_inactive_external_forced_reactivate_limit);
+ loop_count++;
+ inactive_burst_count++;
+ vm_pageout_inactive++;
+
+
+ /*
+ * Choose a victim.
+ */
+ while (1) {
+ uint32_t inactive_external_count;
+
+#if CONFIG_BACKGROUND_QUEUE
+ ignore_reference = FALSE;
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
+ m = NULL;
+ m_object = VM_OBJECT_NULL;
+
+ if (VM_DYNAMIC_PAGING_ENABLED()) {
+ assert(vm_page_throttled_count == 0);
+ assert(vm_page_queue_empty(&vm_page_queue_throttled));
+ }
+
+
+#if CONFIG_SECLUDED_MEMORY
+ if ((secluded_aging_policy ==
+ SECLUDED_AGING_AFTER_INACTIVE) &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ /*
+ * SECLUDED_AGING_AFTER_INACTIVE:
+ * Secluded pages have already been aged
+ * through the active and inactive queues, and
+ * we now have too many of them, so let's
+ * balance that queue by considering reclaiming
+ * the oldest page in the secluded queue.
+ */
+ assert(!vm_page_queue_empty(&vm_page_queue_secluded));
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_secluded);
+ if (m->vm_page_object == 0) {
+ /*
+ * It's already a free page:
+ * just move it to a free queue.
+ */
+ vm_page_queues_remove(m, TRUE);
+ assert(m->busy);
+ assert(m->pageq.next == 0);
+ assert(m->pageq.prev == 0);
+ m->snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+ goto done_with_inactivepage;
+ }
+ /*
+ * Not a free page: we've found our next
+ * "victim".
+ */
+ break;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+#if CONFIG_BACKGROUND_QUEUE
+ if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) {
+ vm_object_t bg_m_object = NULL;
+
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background);
+
+ bg_m_object = VM_PAGE_OBJECT(m);
+
+ if (!VM_PAGE_PAGEABLE(m)) {
+ /*
+ * This page is on the background queue
+ * but not on a pageable queue. This is
+ * likely a transient state and whoever
+ * took it out of its pageable queue
+ * will likely put it back on a pageable
+ * queue soon but we can't deal with it
+ * at this point, so let's ignore this
+ * page.
+ */
+ } else if (force_anonymous == FALSE || bg_m_object->internal) {
+ ignore_reference = TRUE;
+
+ if (bg_m_object->internal)
+ vm_pageout_considered_bq_internal++;
+ else
+ vm_pageout_considered_bq_external++;
+
+ break;
+ }
+ }
+#endif
+
+ /*
+ * The most eligible pages are ones we paged in speculatively,
+ * but which have not yet been touched.
+ */
+ if (!vm_page_queue_empty(&sq->age_q) && force_anonymous == FALSE) {
+ m = (vm_page_t) vm_page_queue_first(&sq->age_q);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_SPECULATIVE_Q);
+
+ break;
+ }
+ /*
+ * Try a clean-queue inactive page.
+ */
+ if (!vm_page_queue_empty(&vm_page_queue_cleaned)) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
+
+ break;
+ }
+
+ grab_anonymous = (vm_page_anonymous_count > vm_page_anonymous_min);
+ inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count;
+
+ if ((vm_page_pageable_external_count < vm_page_filecache_min || force_anonymous == TRUE) ||
+ ((inactive_external_count < vm_page_anonymous_count) && (inactive_external_count < (vm_page_pageable_external_count / 3)))) {
+ grab_anonymous = TRUE;
+ anons_grabbed = 0;
+ }
+#if CONFIG_JETSAM
+ /* If the file-backed pool has accumulated
+ * significantly more pages than the jetsam
+ * threshold, prefer to reclaim those
+ * inline to minimise compute overhead of reclaiming
+ * anonymous pages.
+ * This calculation does not account for the CPU local
+ * external page queues, as those are expected to be
+ * much smaller relative to the global pools.
+ */
+ if (grab_anonymous) {
+ if (vm_page_pageable_external_count >
+ vm_page_filecache_min) {
+ if ((vm_page_pageable_external_count *
+ vm_pageout_memorystatus_fb_factor_dr) >
+ (memorystatus_available_pages_critical *
+ vm_pageout_memorystatus_fb_factor_nr)) {
+ grab_anonymous = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_grab_anon_overrides++;
+#endif
+ }
+ }
+#if DEVELOPMENT || DEBUG
+ if (grab_anonymous) {
+ vm_grab_anon_nops++;
+
+ }
+#endif
+ }
+#endif /* CONFIG_JETSAM */
+
+ if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) {
+
+ if ( !vm_page_queue_empty(&vm_page_queue_inactive) ) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
+ anons_grabbed = 0;
+
+ if (vm_page_pageable_external_count < vm_page_filecache_min) {
+ if ((++reactivated_this_call % 100))
+ goto must_activate_page;
+ /*
+ * steal 1% of the file backed pages even if
+ * we are under the limit that has been set
+ * for a healthy filecache
+ */
+ }
+ break;
+ }
+ }
+ if ( !vm_page_queue_empty(&vm_page_queue_anonymous) ) {
+ m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
+ anons_grabbed++;
+
+ break;
+ }
+
+ /*
+ * if we've gotten here, we have no victim page.
+ * if making clean, free the local freed list and return.
+ * if making free, check to see if we've finished balancing the queues
+ * yet, if we haven't just continue, else panic
+ */
+ vm_page_unlock_queues();
+
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ if (local_freeq) {
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 5);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, local_freed, 0, 5);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ vm_page_lock_queues();
+ delayed_unlock = 1;
+
+ force_anonymous = FALSE;
+
+ if ((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target)
+ goto Restart;
+
+ if (!vm_page_queue_empty(&sq->age_q))
+ goto Restart;
+
+ panic("vm_pageout: no victim");
+
+ /* NOTREACHED */
+ }
+ assert(VM_PAGE_PAGEABLE(m));
+ m_object = VM_PAGE_OBJECT(m);
+ force_anonymous = FALSE;
+
+ page_prev_q_state = m->vm_page_q_state;
+ requeue_insert_first = FALSE;
+ /*
+ * we just found this page on one of our queues...
+ * it can't also be on the pageout queue, so safe
+ * to call vm_page_queues_remove
+ */
+ vm_page_queues_remove(m, TRUE);
+
+ assert(!m->laundry);
+ assert(!m->private);
+ assert(!m->fictitious);
+ assert(m_object != kernel_object);
+ assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
+
+
+ if (page_prev_q_state != VM_PAGE_ON_SPECULATIVE_Q &&
+ page_prev_q_state != VM_PAGE_ON_SECLUDED_Q)
+ vm_pageout_stats[vm_pageout_stat_now].considered++;
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
+
+ /*
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
+ */
+ if (m_object != object) {
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run... otherwise, we're likely to trip over this
+ * object in the same state as we work our way through
+ * the queue... clumps of pages associated with the same
+ * object are fairly typical on the inactive and active queues
+ */
+ if (!vm_object_lock_try_scan(m_object)) {
+ vm_page_t m_want = NULL;
+
+ vm_pageout_inactive_nolock++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_nolock++;
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ requeue_insert_first = TRUE;
+
+ pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
+ m->reference = FALSE;
+
+ /*
+ * m->object must be stable since we hold the page queues lock...
+ * we can update the scan_collisions field sans the object lock
+ * since it is a separate field and this is the only spot that does
+ * a read-modify-write operation and it is never executed concurrently...
+ * we can asynchronously set this field to 0 when creating a UPL, so it
+ * is possible for the value to be a bit non-determistic, but that's ok
+ * since it's only used as a hint
+ */
+ m_object->scan_collisions = 1;
+
+ if ( !vm_page_queue_empty(&sq->age_q) )
+ m_want = (vm_page_t) vm_page_queue_first(&sq->age_q);
+ else if ( !vm_page_queue_empty(&vm_page_queue_cleaned))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
+ else if ( !vm_page_queue_empty(&vm_page_queue_inactive) &&
+ (anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
+ else if ( !vm_page_queue_empty(&vm_page_queue_anonymous))
+ m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
+
+ /*
+ * this is the next object we're going to be interested in
+ * try to make sure its available after the mutex_yield
+ * returns control
+ */
+ if (m_want)
+ vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want);
+
+ /*
+ * force us to dump any collected free pages
+ * and to pause before moving on
+ */
+ try_failed = TRUE;
+
+ goto requeue_page;
+ }
+ object = m_object;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+ try_failed = FALSE;
+ }
+ assert(m_object == object);
+ assert(VM_PAGE_OBJECT(m) == m_object);
+
+ if (catch_up_count)
+ catch_up_count--;
+
+ if (m->busy) {
+ if (m->encrypted_cleaning) {
+ /*
+ * ENCRYPTED SWAP:
+ * if this page has already been picked up as
+ * part of a page-out cluster, it will be busy
+ * because it is being encrypted (see
+ * vm_object_upl_request()). But we still
+ * want to demote it from "clean-in-place"
+ * (aka "adjacent") to "clean-and-free" (aka
+ * "target"), so let's ignore its "busy" bit
+ * here and proceed to check for "cleaning" a
+ * little bit below...
+ *
+ * CAUTION CAUTION:
+ * A "busy" page should still be left alone for
+ * most purposes, so we have to be very careful
+ * not to process that page too much.
+ */
+ assert(m->cleaning);
+ goto consider_inactive_page;
+ }
+
+ /*
+ * Somebody is already playing with this page.
+ * Put it back on the appropriate queue
+ *
+ */
+ vm_pageout_inactive_busy++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_busy++;
+
+requeue_page:
+ if (requeue_insert_first)
+ vm_page_enqueue_inactive(m, TRUE);
+ else
+ vm_page_enqueue_inactive(m, FALSE);
+#if CONFIG_BACKGROUND_QUEUE
+ if (ignore_reference == TRUE) {
+ if (m_object->internal)
+ vm_pageout_rejected_bq_internal++;
+ else
+ vm_pageout_rejected_bq_external++;
+ }
+#endif
+ goto done_with_inactivepage;
+ }
+
+
+ /*
+ * If it's absent, in error or the object is no longer alive,
+ * we can reclaim the page... in the no longer alive case,
+ * there are 2 states the page can be in that preclude us
+ * from reclaiming it - busy or cleaning - that we've already
+ * dealt with
+ */
+ if (m->absent || m->error || !object->alive) {
+
+ if (m->absent)
+ vm_pageout_inactive_absent++;
+ else if (!object->alive)
+ vm_pageout_inactive_notalive++;
+ else
+ vm_pageout_inactive_error++;
+reclaim_page:
+ if (vm_pageout_deadlock_target) {
+ vm_pageout_scan_inactive_throttle_success++;
+ vm_pageout_deadlock_target--;
+ }
+
+ DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+
+ if (object->internal) {
+ DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
+ } else {
+ DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+ }
+ assert(!m->cleaning);
+ assert(!m->laundry);
+
+ m->busy = TRUE;
+
+ /*
+ * remove page from object here since we're already
+ * behind the object lock... defer the rest of the work
+ * we'd normally do in vm_page_free_prepare_object
+ * until 'vm_page_free_list' is called
+ */
+ if (m->tabled)
+ vm_page_remove(m, TRUE);
+
+ assert(m->pageq.next == 0 && m->pageq.prev == 0);
+ m->snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+
+#if CONFIG_SECLUDED_MEMORY
+ if (page_prev_q_state == VM_PAGE_ON_SECLUDED_Q)
+ vm_pageout_freed_from_secluded++;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ vm_pageout_freed_from_speculative++;
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_freed_from_cleaned++;
+ else
+ vm_pageout_freed_from_inactive_clean++;
+
+ if (page_prev_q_state != VM_PAGE_ON_SPECULATIVE_Q &&
+ page_prev_q_state != VM_PAGE_ON_SECLUDED_Q)
+ vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
+
+ inactive_burst_count = 0;
+ goto done_with_inactivepage;
+ }
+ /*
+ * If the object is empty, the page must be reclaimed even
+ * if dirty or used.
+ * If the page belongs to a volatile object, we stick it back
+ * on.
+ */
+ if (object->copy == VM_OBJECT_NULL) {
+ if (object->purgable == VM_PURGABLE_EMPTY) {
+ if (m->pmapped == TRUE) {
+ /* unmap the page */
+ refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ if (m->dirty || m->precious) {
+ /* we saved the cost of cleaning this page ! */
+ vm_page_purged_count++;
+ }
+ goto reclaim_page;
+ }
+
+ if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
+ /*
+ * With the VM compressor, the cost of
+ * reclaiming a page is much lower (no I/O),
+ * so if we find a "volatile" page, it's better
+ * to let it get compressed rather than letting
+ * it occupy a full page until it gets purged.
+ * So no need to check for "volatile" here.
+ */
+ } else if (object->purgable == VM_PURGABLE_VOLATILE) {
+ /*
+ * Avoid cleaning a "volatile" page which might
+ * be purged soon.
+ */
+
+ /* if it's wired, we can't put it on our queue */
+ assert(!VM_PAGE_WIRED(m));
+
+ /* just stick it back on! */
+ reactivated_this_call++;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_volatile_reactivated++;
+
+ goto reactivate_page;
+ }
+ }
+
+consider_inactive_page:
+ if (m->busy) {
+ /*
+ * CAUTION CAUTION:
+ * A "busy" page should always be left alone, except...
+ */
+ if (m->cleaning && m->encrypted_cleaning) {
+ /*
+ * ENCRYPTED_SWAP:
+ * We could get here with a "busy" page
+ * if it's being encrypted during a
+ * "clean-in-place" operation. We'll deal
+ * with it right away by testing if it has been
+ * referenced and either reactivating it or
+ * promoting it from "clean-in-place" to
+ * "clean-and-free".
+ */
+ } else {
+ panic("\"busy\" page considered for pageout\n");
+ }
+ }
+
+ /*
+ * If it's being used, reactivate.
+ * (Fictitious pages are either busy or absent.)
+ * First, update the reference and dirty bits
+ * to make sure the page is unreferenced.
+ */
+ refmod_state = -1;
+
+ if (m->reference == FALSE && m->pmapped == TRUE) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+
+ if (refmod_state & VM_MEM_REFERENCED)
+ m->reference = TRUE;
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+
+ /*
+ * if (m->cleaning && !m->free_when_done)
+ * If already cleaning this page in place and it hasn't
+ * been recently referenced, just pull off the queue.
+ * We can leave the page mapped, and upl_commit_range
+ * will put it on the clean queue.
+ *
+ * note: if m->encrypted_cleaning == TRUE, then
+ * m->cleaning == TRUE
+ * and we'll handle it here
+ *
+ * if (m->free_when_done && !m->cleaning)
+ * an msync INVALIDATE is in progress...
+ * this page has been marked for destruction
+ * after it has been cleaned,
+ * but not yet gathered into a UPL
+ * where 'cleaning' will be set...
+ * just leave it off the paging queues
+ *
+ * if (m->free_when_done && m->clenaing)
+ * an msync INVALIDATE is in progress
+ * and the UPL has already gathered this page...
+ * just leave it off the paging queues
+ */
+
+ /*
+ * page with m->free_when_done and still on the queues means that an
+ * MS_INVALIDATE is in progress on this page... leave it alone
+ */
+ if (m->free_when_done) {
+ goto done_with_inactivepage;
+ }
+
+ /* if cleaning, reactivate if referenced. otherwise, just pull off queue */
+ if (m->cleaning) {
+ if (m->reference == TRUE) {
+ reactivated_this_call++;
+ goto reactivate_page;
+ } else {
+ goto done_with_inactivepage;
+ }
+ }
+
+ if (m->reference || m->dirty) {
+ /* deal with a rogue "reusable" page */
+ VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object);
+ }
+
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ vm_page_secluded_target > 0 &&
+ m_object->eligible_for_secluded &&
+ secluded_aging_policy == SECLUDED_AGING_FIFO) {
+ /*
+ * SECLUDED_AGING_FIFO:
+ * This victim page is eligible for the secluded pool
+ * and we're not aging secluded pages, so let's not
+ * reactivate it if it's been re-referenced.
+ * Later on, we'll move it to the secluded queue
+ * instead of freeing it.
+ */
+ ignore_reference_secluded = TRUE;
+ } else {
+ ignore_reference_secluded = FALSE;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ if (!m->no_cache &&
+#if CONFIG_BACKGROUND_QUEUE
+ ignore_reference == FALSE &&
+#endif
+#if CONFIG_SECLUDED_MEMORY
+ ignore_reference_secluded == FALSE &&
+#endif /* CONFIG_SECLUDED_MEMORY */
+ (m->reference ||
+ (m->xpmapped && !object->internal && (vm_page_xpmapped_external_count < (vm_page_external_count / 4))))) {
+ /*
+ * The page we pulled off the inactive list has
+ * been referenced. It is possible for other
+ * processors to be touching pages faster than we
+ * can clear the referenced bit and traverse the
+ * inactive queue, so we limit the number of
+ * reactivations.
+ */
+ if (++reactivated_this_call >= reactivate_limit) {
+ vm_pageout_reactivation_limit_exceeded++;
+ } else if (catch_up_count) {
+ vm_pageout_catch_ups++;
+ } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
+ vm_pageout_inactive_force_reclaim++;
+ } else {
+ uint32_t isinuse;
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_reference_reactivated++;
+
+reactivate_page:
+ if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
+ vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
+ /*
+ * no explict mappings of this object exist
+ * and it's not open via the filesystem
+ */
+ vm_page_deactivate(m);
+ vm_pageout_inactive_deactivated++;
+ } else {
+must_activate_page:
+ /*
+ * The page was/is being used, so put back on active list.
+ */
+ vm_page_activate(m);
+ VM_STAT_INCR(reactivations);
+ inactive_burst_count = 0;
+ }
+#if CONFIG_BACKGROUND_QUEUE
+ if (ignore_reference == TRUE) {
+ if (m_object->internal)
+ vm_pageout_rejected_bq_internal++;
+ else
+ vm_pageout_rejected_bq_external++;
+ }
+#endif
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_reactivated++;
+#if CONFIG_SECLUDED_MEMORY
+ if (page_prev_q_state == VM_PAGE_ON_SECLUDED_Q)
+ vm_pageout_secluded_reactivated++;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ vm_pageout_inactive_used++;
+
+ goto done_with_inactivepage;
+ }
+ /*
+ * Make sure we call pmap_get_refmod() if it
+ * wasn't already called just above, to update
+ * the dirty bit.
+ */
+ if ((refmod_state == -1) && !m->dirty && m->pmapped) {
+ refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ }
+
+ XPR(XPR_VM_PAGEOUT,
+ "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
+ object, m->offset, m, 0,0);
+
+ /*
+ * we've got a candidate page to steal...
+ *
+ * m->dirty is up to date courtesy of the
+ * preceding check for m->reference... if
+ * we get here, then m->reference had to be
+ * FALSE (or possibly "reactivate_limit" was
+ * exceeded), but in either case we called
+ * pmap_get_refmod() and updated both
+ * m->reference and m->dirty
+ *
+ * if it's dirty or precious we need to
+ * see if the target queue is throtttled
+ * it if is, we need to skip over it by moving it back
+ * to the end of the inactive queue
+ */
+
+ inactive_throttled = FALSE;
+
+ if (m->dirty || m->precious) {
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ inactive_throttled = TRUE;
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ inactive_throttled = TRUE;
+ }
+ }
+throttle_inactive:
+ if (!VM_DYNAMIC_PAGING_ENABLED() &&
+ object->internal && m->dirty &&
+ (object->purgable == VM_PURGABLE_DENY ||
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE)) {
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_throttled, m,
+ vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_THROTTLED_Q;
+ vm_page_throttled_count++;
+
+ vm_pageout_scan_reclaimed_throttled++;
+
+ inactive_burst_count = 0;
+ goto done_with_inactivepage;
+ }
+ if (inactive_throttled == TRUE) {
+
+ if (object->internal == FALSE) {
+ /*
+ * we need to break up the following potential deadlock case...
+ * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
+ * b) The thread doing the writing is waiting for pages while holding the truncate lock
+ * c) Most of the pages in the inactive queue belong to this file.
+ *
+ * we are potentially in this deadlock because...
+ * a) the external pageout queue is throttled
+ * b) we're done with the active queue and moved on to the inactive queue
+ * c) we've got a dirty external page
+ *
+ * since we don't know the reason for the external pageout queue being throttled we
+ * must suspect that we are deadlocked, so move the current page onto the active queue
+ * in an effort to cause a page from the active queue to 'age' to the inactive queue
+ *
+ * if we don't have jetsam configured (i.e. we have a dynamic pager), set
+ * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
+ * pool the next time we select a victim page... if we can make enough new free pages,
+ * the deadlock will break, the external pageout queue will empty and it will no longer
+ * be throttled
+ *
+ * if we have jestam configured, keep a count of the pages reactivated this way so
+ * that we can try to find clean pages in the active/inactive queues before
+ * deciding to jetsam a process
+ */
+ vm_pageout_scan_inactive_throttled_external++;
+
+ vm_page_check_pageable_safe(m);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ vm_page_queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
+ m->vm_page_q_state = VM_PAGE_ON_ACTIVE_Q;
+ vm_page_active_count++;
+ vm_page_pageable_external_count++;
+
+ vm_pageout_adjust_io_throttles(iq, eq, FALSE);
+
+#if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
+ vm_pageout_inactive_external_forced_reactivate_limit--;
+
+ if (vm_pageout_inactive_external_forced_reactivate_limit <= 0) {
+ vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count;
+ /*
+ * Possible deadlock scenario so request jetsam action
+ */
+ assert(object);
+ vm_object_unlock(object);
+ object = VM_OBJECT_NULL;
+ vm_page_unlock_queues();
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START,
+ vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count);
+
+ /* Kill first suitable process */
+ if (memorystatus_kill_on_VM_page_shortage(FALSE) == FALSE) {
+ panic("vm_pageout_scan: Jetsam request failed\n");
+ }
+
+ VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, 0, 0, 0, 0);
+
+ vm_pageout_inactive_external_forced_jetsam_count++;
+ vm_page_lock_queues();
+ delayed_unlock = 1;
+ }
+#else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
+ force_anonymous = TRUE;
+#endif
+ inactive_burst_count = 0;
+ goto done_with_inactivepage;
+ } else {
+ vm_pageout_scan_inactive_throttled_internal++;
+
+ goto must_activate_page;
+ }
+ }
+
+ /*
+ * we've got a page that we can steal...
+ * eliminate all mappings and make sure
+ * we have the up-to-date modified state
+ *
+ * if we need to do a pmap_disconnect then we
+ * need to re-evaluate m->dirty since the pmap_disconnect
+ * provides the true state atomically... the
+ * page was still mapped up to the pmap_disconnect
+ * and may have been dirtied at the last microsecond
+ *
+ * Note that if 'pmapped' is FALSE then the page is not
+ * and has not been in any map, so there is no point calling
+ * pmap_disconnect(). m->dirty could have been set in anticipation
+ * of likely usage of the page.
+ */
+ if (m->pmapped == TRUE) {
+ int pmap_options;
+
+ /*
+ * Don't count this page as going into the compressor
+ * if any of these are true:
+ * 1) compressed pager isn't enabled
+ * 2) Freezer enabled device with compressed pager
+ * backend (exclusive use) i.e. most of the VM system
+ * (including vm_pageout_scan) has no knowledge of
+ * the compressor
+ * 3) This page belongs to a file and hence will not be
+ * sent into the compressor
+ */
+ if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE ||
+ object->internal == FALSE) {
+ pmap_options = 0;
+ } else if (m->dirty || m->precious) {
+ /*
+ * VM knows that this page is dirty (or
+ * precious) and needs to be compressed
+ * rather than freed.
+ * Tell the pmap layer to count this page
+ * as "compressed".
+ */
+ pmap_options = PMAP_OPTIONS_COMPRESSOR;
+ } else {
+ /*
+ * VM does not know if the page needs to
+ * be preserved but the pmap layer might tell
+ * us if any mapping has "modified" it.
+ * Let's the pmap layer to count this page
+ * as compressed if and only if it has been
+ * modified.
+ */
+ pmap_options =
+ PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
+ }
+ refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m),
+ pmap_options,
+ NULL);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ SET_PAGE_DIRTY(m, FALSE);
+ }
+ }
+ /*
+ * reset our count of pages that have been reclaimed
+ * since the last page was 'stolen'
+ */
+ inactive_reclaim_run = 0;
+
+ /*
+ * If it's clean and not precious, we can free the page.
+ */
+ if (!m->dirty && !m->precious) {
+
+ if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q)
+ vm_pageout_speculative_clean++;
+ else {
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
+ vm_pageout_inactive_anonymous++;
+ else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q)
+ vm_pageout_cleaned_reclaimed++;
+
+ vm_pageout_inactive_clean++;
+ }
+
+#if CONFIG_SECLUDED_MEMORY
+ if (secluded_for_filecache &&
+ vm_page_secluded_target > 0 &&
+ !m->fictitious &&
+ m_object->eligible_for_secluded &&
+ num_tasks_can_use_secluded_mem == 0 &&
+ (secluded_aging_policy == SECLUDED_AGING_FIFO ||
+ ((secluded_aging_policy ==
+ SECLUDED_AGING_AFTER_INACTIVE) &&
+ (page_prev_q_state != VM_PAGE_ON_SECLUDED_Q)))) {
+ assert(page_prev_q_state != VM_PAGE_ON_SECLUDED_Q);
+ assert(m->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ LCK_MTX_ASSERT(&vm_page_queue_lock,
+ LCK_MTX_ASSERT_OWNED);
+ vm_page_queue_enter(&vm_page_queue_secluded,
+ m,
+ vm_page_t,
+ pageq);
+ m->vm_page_q_state = VM_PAGE_ON_SECLUDED_Q;
+ vm_object_unlock(m_object);
+ object = VM_OBJECT_NULL;
+ vm_page_secluded_count++;
+ vm_page_secluded_count_inuse++;
+ assert(!m_object->internal);
+// vm_page_pageable_external_count++;
+ m = VM_PAGE_NULL;
+ goto done_with_inactivepage;
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ /*
+ * OK, at this point we have found a page we are going to free.
+ */
+#if CONFIG_PHANTOM_CACHE
+ if (!object->internal)
+ vm_phantom_cache_add_ghost(m);
+#endif
+ goto reclaim_page;
+ }
+
+ /*
+ * The page may have been dirtied since the last check
+ * for a throttled target queue (which may have been skipped
+ * if the page was clean then). With the dirty page
+ * disconnected here, we can make one final check.
+ */
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ inactive_throttled = TRUE;
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ inactive_throttled = TRUE;
+ }
+
+ if (inactive_throttled == TRUE)
+ goto throttle_inactive;
+
+#if VM_PRESSURE_EVENTS
+#if CONFIG_JETSAM
+
+ /*
+ * If Jetsam is enabled, then the sending
+ * of memory pressure notifications is handled
+ * from the same thread that takes care of high-water
+ * and other jetsams i.e. the memorystatus_thread.
+ */
+
+#else /* CONFIG_JETSAM */
+
+ vm_pressure_response();
+
+#endif /* CONFIG_JETSAM */
+#endif /* VM_PRESSURE_EVENTS */
+
+ if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q)
+ vm_pageout_inactive_anonymous++;
+ if (object->internal)
+ vm_pageout_inactive_dirty_internal++;
+ else
+ vm_pageout_inactive_dirty_external++;
+
+ /*
+ * do NOT set the pageout bit!
+ * sure, we might need free pages, but this page is going to take time to become free
+ * anyway, so we may as well put it on the clean queue first and take it from there later
+ * if necessary. that way, we'll ensure we don't free up too much. -mj
+ */
+ vm_pageout_cluster(m, FALSE, FALSE);
+
+done_with_inactivepage:
+
+ if (delayed_unlock++ > delayed_unlock_limit || try_failed == TRUE) {
+ boolean_t need_delay = TRUE;
+
+ if (object != NULL) {
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ vm_page_unlock_queues();
+
+ if (local_freeq) {
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_START,
+ vm_page_free_count, local_freed, delayed_unlock_limit, 4);
+
+ vm_page_free_list(local_freeq, TRUE);
+
+ VM_DEBUG_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END,
+ vm_page_free_count, local_freed, 0, 4);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ need_delay = FALSE;
+ }
+ vm_consider_waking_compactor_swapper();
+
+ vm_page_lock_queues();
+
+ if (need_delay == TRUE)
+ lck_mtx_yield(&vm_page_queue_lock);
+
+ delayed_unlock = 1;
+ }
+ vm_pageout_considered_page++;
+
+ /*
+ * back to top of pageout scan loop
+ */
+ }
+}
+
+
+int vm_page_free_count_init;
+
+void
+vm_page_free_reserve(
+ int pages)
+{
+ int free_after_reserve;
+
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
+
+ if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT))
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT;
+ else
+ vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT);
+
+ } else {
+ if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT)
+ vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT;
+ else
+ vm_page_free_reserved += pages;
+ }
+ free_after_reserve = vm_page_free_count_init - vm_page_free_reserved;
+
+ vm_page_free_min = vm_page_free_reserved +
+ VM_PAGE_FREE_MIN(free_after_reserve);
+
+ if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
+ vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
+
+ vm_page_free_target = vm_page_free_reserved +
+ VM_PAGE_FREE_TARGET(free_after_reserve);
+
+ if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
+ vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
+
+ if (vm_page_free_target < vm_page_free_min + 5)
+ vm_page_free_target = vm_page_free_min + 5;
+
+ vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2);
+}
+
+/*
+ * vm_pageout is the high level pageout daemon.
+ */
+
+void
+vm_pageout_continue(void)
+{
+ DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
+ vm_pageout_scan_event_counter++;
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ vm_pageout_running = TRUE;
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ vm_pageout_scan();
+ /*
+ * we hold both the vm_page_queue_free_lock
+ * and the vm_page_queues_lock at this point
+ */
+ assert(vm_page_free_wanted == 0);
+ assert(vm_page_free_wanted_privileged == 0);
+ assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
+
+ vm_pageout_running = FALSE;
+ if (vm_pageout_waiter) {
+ vm_pageout_waiter = FALSE;
+ thread_wakeup((event_t)&vm_pageout_waiter);
+ }
+
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();
+
+ counter(c_vm_pageout_block++);
+ thread_block((thread_continue_t)vm_pageout_continue);
+ /*NOTREACHED*/
+}
+
+kern_return_t
+vm_pageout_wait(uint64_t deadline)
+{
+ kern_return_t kr;
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
+ for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr); ) {
+ vm_pageout_waiter = TRUE;
+ if (THREAD_AWAKENED != lck_mtx_sleep_deadline(
+ &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT,
+ (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) {
+ kr = KERN_OPERATION_TIMED_OUT;
+ }
+ }
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ return (kr);
+}
+
+
+static void
+vm_pageout_iothread_external_continue(struct vm_pageout_queue *q)
+{
+ vm_page_t m = NULL;
+ vm_object_t object;
+ vm_object_offset_t offset;
+ memory_object_t pager;
+
+
+ if (vm_pageout_internal_iothread != THREAD_NULL)
+ current_thread()->options &= ~TH_OPT_VMPRIV;
+
+ vm_page_lockspin_queues();
+
+ while ( !vm_page_queue_empty(&q->pgo_pending) ) {
+
+ q->pgo_busy = TRUE;
+ vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+
+ assert(m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q);
+ VM_PAGE_CHECK(m);
+ /*
+ * grab a snapshot of the object and offset this
+ * page is tabled in so that we can relookup this
+ * page after we've taken the object lock - these
+ * fields are stable while we hold the page queues lock
+ * but as soon as we drop it, there is nothing to keep
+ * this page in this object... we hold an activity_in_progress
+ * on this object which will keep it from terminating
+ */
+ object = VM_PAGE_OBJECT(m);
+ offset = m->offset;
+
+ if (object->object_slid) {
+ panic("slid page %p not allowed on this path\n", m);
+ }
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+
+ vm_page_unlock_queues();
+
+ vm_object_lock(object);
+
+ m = vm_page_lookup(object, offset);
+
+ if (m == NULL ||
+ m->busy || m->cleaning || !m->laundry || (m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
+ /*
+ * it's either the same page that someone else has
+ * started cleaning (or it's finished cleaning or
+ * been put back on the pageout queue), or
+ * the page has been freed or we have found a
+ * new page at this offset... in all of these cases
+ * we merely need to release the activity_in_progress
+ * we took when we put the page on the pageout queue
+ */
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
+ }
+ pager = object->pager;
+
+ if (pager == MEMORY_OBJECT_NULL) {
+ /*
+ * This pager has been destroyed by either
+ * memory_object_destroy or vm_object_destroy, and
+ * so there is nowhere for the page to go.
+ */
+ if (m->free_when_done) {
+ /*
+ * Just free the page... VM_PAGE_FREE takes
+ * care of cleaning up all the state...
+ * including doing the vm_pageout_throttle_up
+ */
+ VM_PAGE_FREE(m);
+ } else {
+ vm_page_lockspin_queues();
+
+ vm_pageout_throttle_up(m);
+ vm_page_activate(m);
+
+ vm_page_unlock_queues();
+
+ /*
+ * And we are done with it.
+ */
+ }
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
+ }
+#if 0
+ /*
+ * we don't hold the page queue lock
+ * so this check isn't safe to make
+ */
+ VM_PAGE_CHECK(m);
+#endif
+ /*
+ * give back the activity_in_progress reference we
+ * took when we queued up this page and replace it
+ * it with a paging_in_progress reference that will
+ * also hold the paging offset from changing and
+ * prevent the object from terminating
+ */
+ vm_object_activity_end(object);
+ vm_object_paging_begin(object);
+ vm_object_unlock(object);
+
+ /*
+ * Send the data to the pager.
+ * any pageout clustering happens there
+ */
+ memory_object_data_return(pager,
+ m->offset + object->paging_offset,
+ PAGE_SIZE,
+ NULL,
+ NULL,
+ FALSE,
+ FALSE,
+ 0);
+
+ vm_object_lock(object);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_pageout_io_throttle();
+
+ vm_page_lockspin_queues();
+ }
+ q->pgo_busy = FALSE;
+ q->pgo_idle = TRUE;
+
+ assert_wait((event_t) &q->pgo_pending, THREAD_UNINT);
+ vm_page_unlock_queues();
+
+ thread_block_parameter((thread_continue_t)vm_pageout_iothread_external_continue, (void *) q);
+ /*NOTREACHED*/
+}
+
+
+uint32_t vm_compressor_failed;
+
+#define MAX_FREE_BATCH 32
+uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by
+ * this thread.
+ */
+uint64_t vm_compressor_thread_runtime;
+
+static void
+vm_pageout_iothread_internal_continue(struct cq *cq)
+{
+ struct vm_pageout_queue *q;
+ vm_page_t m = NULL;
+ boolean_t pgo_draining;
+ vm_page_t local_q;
+ int local_cnt;
+ vm_page_t local_freeq = NULL;
+ int local_freed = 0;
+ int local_batch_size;
+
+
+ KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ q = cq->q;
+ local_batch_size = q->pgo_maxlaundry / (vm_compressor_thread_count * 2);
+
+#if RECORD_THE_COMPRESSED_DATA
+ if (q->pgo_laundry)
+ c_compressed_record_init();
+#endif
+ while (TRUE) {
+ int pages_left_on_q = 0;
+
+ local_cnt = 0;
+ local_q = NULL;
+
+ KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ vm_page_lock_queues();
+
+ KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0);
+
+ while ( !vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) {
+
+ vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+ assert(m->vm_page_q_state == VM_PAGE_ON_PAGEOUT_Q);
+ VM_PAGE_CHECK(m);
+
+ m->vm_page_q_state = VM_PAGE_NOT_ON_Q;
+ VM_PAGE_ZERO_PAGEQ_ENTRY(m);
+ m->laundry = FALSE;
+
+ m->snext = local_q;
+ local_q = m;
+ local_cnt++;
+ }
+ if (local_q == NULL)
+ break;
+
+ q->pgo_busy = TRUE;
+
+ if ((pgo_draining = q->pgo_draining) == FALSE) {
+ vm_pageout_throttle_up_batch(q, local_cnt);
+ pages_left_on_q = q->pgo_laundry;
+ } else
+ pages_left_on_q = q->pgo_laundry - local_cnt;
+
+ vm_page_unlock_queues();
+
+#if !RECORD_THE_COMPRESSED_DATA
+ if (pages_left_on_q >= local_batch_size && cq->id < (vm_compressor_thread_count - 1))
+ thread_wakeup((event_t) ((uintptr_t)&q->pgo_pending + cq->id + 1));
+#endif
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0);
+
+ while (local_q) {
+
+ KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0);
+
+ m = local_q;
+ local_q = m->snext;
+ m->snext = NULL;
+
+ if (vm_pageout_compress_page(&cq->current_chead, cq->scratch_buf, m, FALSE) == KERN_SUCCESS) {
+
+ m->snext = local_freeq;
+ local_freeq = m;
+ local_freed++;
+
+ if (local_freed >= MAX_FREE_BATCH) {
+
+ vm_page_free_list(local_freeq, TRUE);
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ }
+#if !CONFIG_JETSAM
+ while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+ kern_return_t wait_result;
+ int need_wakeup = 0;
+
+ if (local_freeq) {
+ vm_page_free_list(local_freeq, TRUE);
+
+ local_freeq = NULL;
+ local_freed = 0;
+
+ continue;
+ }
+ lck_mtx_lock_spin(&vm_page_queue_free_lock);
+
+ if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) {
+
+ if (vm_page_free_wanted_privileged++ == 0)
+ need_wakeup = 1;
+ wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT);
+
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+
+ if (need_wakeup)
+ thread_wakeup((event_t)&vm_page_free_wanted);
+
+ if (wait_result == THREAD_WAITING)
+
+ thread_block(THREAD_CONTINUE_NULL);
+ } else
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+#endif
+ }
+ if (local_freeq) {
+ vm_page_free_list(local_freeq, TRUE);
+
+ local_freeq = NULL;
+ local_freed = 0;
+ }
+ if (pgo_draining == TRUE) {
+ vm_page_lockspin_queues();
+ vm_pageout_throttle_up_batch(q, local_cnt);
+ vm_page_unlock_queues();
+ }
+ }
+ KERNEL_DEBUG(0xe040000c | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ /*
+ * queue lock is held and our q is empty
+ */
+ q->pgo_busy = FALSE;
+ q->pgo_idle = TRUE;
+
+ assert_wait((event_t) ((uintptr_t)&q->pgo_pending + cq->id), THREAD_UNINT);
+ vm_page_unlock_queues();
+
+ if (__improbable(vm_compressor_time_thread)) {
+ vm_compressor_thread_runtime = thread_get_runtime_self();
+ }
+
+ KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, 0, 0, 0, 0, 0);
+
+ thread_block_parameter((thread_continue_t)vm_pageout_iothread_internal_continue, (void *) cq);
+ /*NOTREACHED*/
+}
+
+
+
+static void
+vm_pageout_immediate(vm_page_t m, boolean_t object_locked_by_caller)
+{
+ assert(vm_pageout_immediate_scratch_buf);
+
+ if (vm_pageout_compress_page(&vm_pageout_immediate_chead, vm_pageout_immediate_scratch_buf, m, object_locked_by_caller) == KERN_SUCCESS) {
+
+ vm_page_free_prepare_object(m, TRUE);
+ vm_page_release(m, TRUE);
+ }
+}
+
+
+kern_return_t
+vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m, boolean_t object_locked_by_caller)
+{
+ vm_object_t object;
+ memory_object_t pager;
+ int compressed_count_delta;
+ kern_return_t retval;
+
+ object = VM_PAGE_OBJECT(m);
+
+ if (object->object_slid) {
+ panic("slid page %p not allowed on this path\n", m);
+ }
+ assert(!m->free_when_done);
+ assert(!m->laundry);
+
+ pager = object->pager;
+
+ if (object_locked_by_caller == FALSE && (!object->pager_initialized || pager == MEMORY_OBJECT_NULL)) {
+
+ KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0);
+
+ vm_object_lock(object);
+
+ /*
+ * If there is no memory object for the page, create
+ * one and hand it to the compression pager.
+ */
+
+ if (!object->pager_initialized)
+ vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
+ if (!object->pager_initialized)
+ vm_object_compressor_pager_create(object);
+
+ pager = object->pager;
+
+ if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) {
+ /*
+ * Still no pager for the object,
+ * or the pager has been destroyed.
+ * Reactivate the page.
+ *
+ * Should only happen if there is no
+ * compression pager
+ */
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lockspin_queues();
+ vm_page_activate(m);
+ vm_pageout_dirty_no_pager++;
+ vm_page_unlock_queues();
+
+ /*
+ * And we are done with it.
+ */
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+
+ return KERN_FAILURE;
+ }
+ vm_object_unlock(object);
+
+ KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END, object, pager, 0, 0, 0);
+ }
+ assert(object->pager_initialized && pager != MEMORY_OBJECT_NULL);
+
+ if (object_locked_by_caller == FALSE)
+ assert(object->activity_in_progress > 0);
+
+ retval = vm_compressor_pager_put(
+ pager,
+ m->offset + object->paging_offset,
+ VM_PAGE_GET_PHYS_PAGE(m),
+ current_chead,
+ scratch_buf,
+ &compressed_count_delta);
+
+ if (object_locked_by_caller == FALSE) {
+ vm_object_lock(object);
+
+ assert(object->activity_in_progress > 0);
+ assert(VM_PAGE_OBJECT(m) == object);
+ }
+
+ vm_compressor_pager_count(pager,
+ compressed_count_delta,
+ FALSE, /* shared_lock */
+ object);
+
+ assert( !VM_PAGE_WIRED(m));
+
+ if (retval == KERN_SUCCESS) {
+ /*
+ * If the object is purgeable, its owner's
+ * purgeable ledgers will be updated in
+ * vm_page_remove() but the page still
+ * contributes to the owner's memory footprint,
+ * so account for it as such.
+ */
+ if (object->purgable != VM_PURGABLE_DENY &&
+ object->vo_purgeable_owner != NULL) {
+ /* one more compressed purgeable page */
+ vm_purgeable_compressed_update(object,
+ +1);
+ }
+ VM_STAT_INCR(compressions);
+
+ if (m->tabled)
+ vm_page_remove(m, TRUE);
+
+ } else {
+ PAGE_WAKEUP_DONE(m);
+
+ vm_page_lockspin_queues();
+
+ vm_page_activate(m);
+ vm_compressor_failed++;
+
+ vm_page_unlock_queues();
+ }
+ if (object_locked_by_caller == FALSE) {
+ vm_object_activity_end(object);
+ vm_object_unlock(object);
+ }
+ return retval;
+}
+
+
+static void
+vm_pageout_adjust_io_throttles(struct vm_pageout_queue *iq, struct vm_pageout_queue *eq, boolean_t req_lowpriority)
+{
+ uint32_t policy;
+ boolean_t set_iq = FALSE;
+ boolean_t set_eq = FALSE;
+
+ if (hibernate_cleaning_in_progress == TRUE)
+ req_lowpriority = FALSE;
+
+ if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority)
+ set_eq = TRUE;
+
+ if (set_iq == TRUE || set_eq == TRUE) {
+
+ vm_page_unlock_queues();
+
+ if (req_lowpriority == TRUE) {
+ policy = THROTTLE_LEVEL_PAGEOUT_THROTTLED;
+ DTRACE_VM(laundrythrottle);
+ } else {
+ policy = THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED;
+ DTRACE_VM(laundryunthrottle);
+ }
+ if (set_iq == TRUE) {
+ proc_set_thread_policy_with_tid(kernel_task, iq->pgo_tid,
+ TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
+
+ iq->pgo_lowpriority = req_lowpriority;
+ }
+ if (set_eq == TRUE) {
+ proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid,
+ TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy);
+
+ eq->pgo_lowpriority = req_lowpriority;
+ }
+ vm_page_lock_queues();
+ }
+}
+
+
+static void
+vm_pageout_iothread_external(void)
+{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
+
+ DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL);
+
+ proc_set_thread_policy(self, TASK_POLICY_EXTERNAL,
+ TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED);
+
+ vm_page_lock_queues();
+
+ vm_pageout_queue_external.pgo_tid = self->thread_id;
+ vm_pageout_queue_external.pgo_lowpriority = TRUE;
+ vm_pageout_queue_external.pgo_inited = TRUE;
+
+ vm_page_unlock_queues();
+
+ vm_pageout_iothread_external_continue(&vm_pageout_queue_external);
+
+ /*NOTREACHED*/
+}
+
+
+static void
+vm_pageout_iothread_internal(struct cq *cq)
+{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
+
+ vm_page_lock_queues();
+
+ vm_pageout_queue_internal.pgo_tid = self->thread_id;
+ vm_pageout_queue_internal.pgo_lowpriority = TRUE;
+ vm_pageout_queue_internal.pgo_inited = TRUE;
+
+ vm_page_unlock_queues();
+
+ if (vm_restricted_to_single_processor == TRUE)
+ thread_vm_bind_group_add();
+
+ vm_pageout_iothread_internal_continue(cq);
+
+ /*NOTREACHED*/
+}
+
+kern_return_t
+vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
+{
+ if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
+ return KERN_SUCCESS;
+ } else {
+ return KERN_FAILURE; /* Already set */
+ }
+}
+
+extern boolean_t memorystatus_manual_testing_on;
+extern unsigned int memorystatus_level;
+
+
+#if VM_PRESSURE_EVENTS
+
+boolean_t vm_pressure_events_enabled = FALSE;
+
+void
+vm_pressure_response(void)
+{
+
+ vm_pressure_level_t old_level = kVMPressureNormal;
+ int new_level = -1;
+ unsigned int total_pages;
+ uint64_t available_memory = 0;
+
+ if (vm_pressure_events_enabled == FALSE)
+ return;
+
+
+ available_memory = (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY;
+
+
+ total_pages = (unsigned int) atop_64(max_mem);
+#if CONFIG_SECLUDED_MEMORY
+ total_pages -= vm_page_secluded_count;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ memorystatus_level = (unsigned int) ((available_memory * 100) / total_pages);
+
+ if (memorystatus_manual_testing_on) {
+ return;
+ }
+
+ old_level = memorystatus_vm_pressure_level;
+
+ switch (memorystatus_vm_pressure_level) {
+
+ case kVMPressureNormal:
+ {
+ if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
+ new_level = kVMPressureCritical;
+ } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
+ new_level = kVMPressureWarning;
+ }
+ break;
+ }
+
+ case kVMPressureWarning:
+ case kVMPressureUrgent:
+ {
+ if (VM_PRESSURE_WARNING_TO_NORMAL()) {
+ new_level = kVMPressureNormal;
+ } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
+ new_level = kVMPressureCritical;
+ }
+ break;
+ }
+
+ case kVMPressureCritical:
+ {
+ if (VM_PRESSURE_WARNING_TO_NORMAL()) {
+ new_level = kVMPressureNormal;
+ } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
+ new_level = kVMPressureWarning;
+ }
+ break;
+ }
+
+ default:
+ return;
+ }
+
+ if (new_level != -1) {
+ memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level;
+
+ if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != new_level)) {
+ if (vm_pressure_thread_running == FALSE) {
+ thread_wakeup(&vm_pressure_thread);
+ }
+
+ if (old_level != new_level) {
+ thread_wakeup(&vm_pressure_changed);
+ }
+ }
+ }
+
+}
+#endif /* VM_PRESSURE_EVENTS */
+
+kern_return_t
+mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) {
+
+#if !VM_PRESSURE_EVENTS
+
+ return KERN_FAILURE;
+
+#else /* VM_PRESSURE_EVENTS */
+
+ kern_return_t kr = KERN_SUCCESS;
+
+ if (pressure_level != NULL) {
+
+ vm_pressure_level_t old_level = memorystatus_vm_pressure_level;
+
+ if (wait_for_pressure == TRUE) {
+ wait_result_t wr = 0;
+
+ while (old_level == *pressure_level) {
+ wr = assert_wait((event_t) &vm_pressure_changed,
+ THREAD_INTERRUPTIBLE);
+ if (wr == THREAD_WAITING) {
+ wr = thread_block(THREAD_CONTINUE_NULL);
+ }
+ if (wr == THREAD_INTERRUPTED) {
+ return KERN_ABORTED;
+ }
+ if (wr == THREAD_AWAKENED) {
+
+ old_level = memorystatus_vm_pressure_level;
+
+ if (old_level != *pressure_level) {
+ break;
+ }
+ }
+ }
+ }
+
+ *pressure_level = old_level;
+ kr = KERN_SUCCESS;
+ } else {
+ kr = KERN_INVALID_ARGUMENT;
+ }
+
+ return kr;
+#endif /* VM_PRESSURE_EVENTS */
+}
+
+#if VM_PRESSURE_EVENTS
+void
+vm_pressure_thread(void) {
+ static boolean_t thread_initialized = FALSE;
+
+ if (thread_initialized == TRUE) {
+ vm_pressure_thread_running = TRUE;
+ consider_vm_pressure_events();
+ vm_pressure_thread_running = FALSE;
+ }
+
+ thread_initialized = TRUE;
+ assert_wait((event_t) &vm_pressure_thread, THREAD_UNINT);
+ thread_block((thread_continue_t)vm_pressure_thread);
+}
+#endif /* VM_PRESSURE_EVENTS */
+
+
+uint32_t vm_pageout_considered_page_last = 0;
+
+/*
+ * called once per-second via "compute_averages"
+ */
+void
+compute_pageout_gc_throttle(__unused void *arg)
+{
+ if (vm_pageout_considered_page != vm_pageout_considered_page_last) {
+
+ vm_pageout_considered_page_last = vm_pageout_considered_page;
+
+ thread_wakeup((event_t) &vm_pageout_garbage_collect);
+ }
+}
+
+
+static void
+vm_pageout_garbage_collect(int collect)
+{
+
+ if (collect) {
+ boolean_t buf_large_zfree = FALSE;
+ boolean_t first_try = TRUE;
+
+ stack_collect();
+
+ consider_machine_collect();
+ m_drain();
+
+ do {
+ if (consider_buffer_cache_collect != NULL) {
+ buf_large_zfree = (*consider_buffer_cache_collect)(0);
+ }
+ if (first_try == TRUE || buf_large_zfree == TRUE) {
+ /*
+ * consider_zone_gc should be last, because the other operations
+ * might return memory to zones.
+ */
+ consider_zone_gc();
+ }
+ first_try = FALSE;
+
+ } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target);
+
+ consider_machine_adjust();
+ }
+ assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
+
+ thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
+ /*NOTREACHED*/
+}
+
+
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+extern vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+
+#define FBDP_TEST_COLLAPSE_COMPRESSOR 0
+#define FBDP_TEST_WIRE_AND_EXTRACT 0
+#define FBDP_TEST_PAGE_WIRE_OVERFLOW 0
+
+#if FBDP_TEST_COLLAPSE_COMPRESSOR
+extern boolean_t vm_object_collapse_compressor_allowed;
+#include <IOKit/IOLib.h>
+#endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
+
+#if FBDP_TEST_WIRE_AND_EXTRACT
+extern ledger_template_t task_ledger_template;
+#include <mach/mach_vm.h>
+extern ppnum_t vm_map_get_phys_page(vm_map_t map,
+ vm_offset_t offset);
+#endif /* FBDP_TEST_WIRE_AND_EXTRACT */
+
+
+void
+vm_set_restrictions()
+{
+ host_basic_info_data_t hinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+
+#define BSD_HOST 1
+ host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+
+ assert(hinfo.max_cpus > 0);
+
+ if (hinfo.max_cpus <= 3) {
+ /*
+ * on systems with a limited number of CPUS, bind the
+ * 4 major threads that can free memory and that tend to use
+ * a fair bit of CPU under pressured conditions to a single processor.
+ * This insures that these threads don't hog all of the available CPUs
+ * (important for camera launch), while allowing them to run independently
+ * w/r to locks... the 4 threads are
+ * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
+ * vm_compressor_swap_trigger_thread (minor and major compactions),
+ * memorystatus_thread (jetsams).
+ *
+ * the first time the thread is run, it is responsible for checking the
+ * state of vm_restricted_to_single_processor, and if TRUE it calls
+ * thread_bind_master... someday this should be replaced with a group
+ * scheduling mechanism and KPI.
+ */
+ vm_restricted_to_single_processor = TRUE;
+ }
+}
+
+
+void
+vm_pageout(void)
+{
+ thread_t self = current_thread();
+ thread_t thread;
+ kern_return_t result;
+ spl_t s;
+
+ /*
+ * Set thread privileges.
+ */
+ s = splsched();
+
+ thread_lock(self);
+ self->options |= TH_OPT_VMPRIV;
+ sched_set_thread_base_priority(self, BASEPRI_PREEMPT - 1);
+ thread_unlock(self);
+
+ if (!self->reserved_stack)
+ self->reserved_stack = self->kernel_stack;
+
+ if (vm_restricted_to_single_processor == TRUE)
+ thread_vm_bind_group_add();
+
+ splx(s);
+
+ /*
+ * Initialize some paging parameters.
+ */
+
+ if (vm_pageout_swap_wait == 0)
+ vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT;
+
+ if (vm_pageout_idle_wait == 0)
+ vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
+
+ if (vm_pageout_burst_wait == 0)
+ vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
+
+ if (vm_pageout_empty_wait == 0)
+ vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
+
+ if (vm_pageout_deadlock_wait == 0)
+ vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+
+ if (vm_pageout_deadlock_relief == 0)
+ vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+
+ if (vm_pageout_inactive_relief == 0)
+ vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
+
+ if (vm_pageout_burst_active_throttle == 0)
+ vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
+
+ if (vm_pageout_burst_inactive_throttle == 0)
+ vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+
+ /*
+ * Set kernel task to low backing store privileged
+ * status
+ */
+ task_lock(kernel_task);
+ kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
+ task_unlock(kernel_task);
+
+ vm_page_free_count_init = vm_page_free_count;
+
+ /*
+ * even if we've already called vm_page_free_reserve
+ * call it again here to insure that the targets are
+ * accurately calculated (it uses vm_page_free_count_init)
+ * calling it with an arg of 0 will not change the reserve
+ * but will re-calculate free_min and free_target
+ */
+ if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) {
+ vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved);
+ } else
+ vm_page_free_reserve(0);
+
+
+ vm_page_queue_init(&vm_pageout_queue_external.pgo_pending);
+ vm_pageout_queue_external.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+ vm_pageout_queue_external.pgo_laundry = 0;
+ vm_pageout_queue_external.pgo_idle = FALSE;
+ vm_pageout_queue_external.pgo_busy = FALSE;
+ vm_pageout_queue_external.pgo_throttled = FALSE;
+ vm_pageout_queue_external.pgo_draining = FALSE;
+ vm_pageout_queue_external.pgo_lowpriority = FALSE;
+ vm_pageout_queue_external.pgo_tid = -1;
+ vm_pageout_queue_external.pgo_inited = FALSE;
+
+ vm_page_queue_init(&vm_pageout_queue_internal.pgo_pending);
+ vm_pageout_queue_internal.pgo_maxlaundry = 0;
+ vm_pageout_queue_internal.pgo_laundry = 0;
+ vm_pageout_queue_internal.pgo_idle = FALSE;
+ vm_pageout_queue_internal.pgo_busy = FALSE;
+ vm_pageout_queue_internal.pgo_throttled = FALSE;
+ vm_pageout_queue_internal.pgo_draining = FALSE;
+ vm_pageout_queue_internal.pgo_lowpriority = FALSE;
+ vm_pageout_queue_internal.pgo_tid = -1;
+ vm_pageout_queue_internal.pgo_inited = FALSE;
+
+ /* internal pageout thread started when default pager registered first time */
+ /* external pageout and garbage collection threads started here */
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
+ BASEPRI_PREEMPT - 1,
+ &vm_pageout_external_iothread);
+ if (result != KERN_SUCCESS)
+ panic("vm_pageout_iothread_external: create failed");
+
+ thread_deallocate(vm_pageout_external_iothread);
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
+ BASEPRI_DEFAULT,
+ &thread);
+ if (result != KERN_SUCCESS)
+ panic("vm_pageout_garbage_collect: create failed");
+
+ thread_deallocate(thread);
+
+#if VM_PRESSURE_EVENTS
+ result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL,
+ BASEPRI_DEFAULT,
+ &thread);
+
+ if (result != KERN_SUCCESS)
+ panic("vm_pressure_thread: create failed");
+
+ thread_deallocate(thread);
+#endif
+
+ vm_object_reaper_init();
+
+
+ bzero(&vm_config, sizeof(vm_config));
+
+ switch(vm_compressor_mode) {
+
+ case VM_PAGER_DEFAULT:
+ printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
+
+ case VM_PAGER_COMPRESSOR_WITH_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ vm_config.swap_is_active = TRUE;
+ break;
+
+ case VM_PAGER_COMPRESSOR_NO_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ break;
+
+ case VM_PAGER_FREEZER_DEFAULT:
+ printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
+
+ case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ break;
+
+ case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP:
+ vm_config.compressor_is_present = TRUE;
+ vm_config.swap_is_present = TRUE;
+ vm_config.compressor_is_active = TRUE;
+ vm_config.freezer_swap_is_active = TRUE;
+ break;
+
+ case VM_PAGER_NOT_CONFIGURED:
+ break;
+
+ default:
+ printf("unknown compressor mode - %x\n", vm_compressor_mode);
+ break;
+ }
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT)
+ vm_compressor_pager_init();
+
+#if VM_PRESSURE_EVENTS
+ vm_pressure_events_enabled = TRUE;
+#endif /* VM_PRESSURE_EVENTS */
+
+#if CONFIG_PHANTOM_CACHE
+ vm_phantom_cache_init();
+#endif
+#if VM_PAGE_BUCKETS_CHECK
+#if VM_PAGE_FAKE_BUCKETS
+ printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
+ (uint64_t) vm_page_fake_buckets_start,
+ (uint64_t) vm_page_fake_buckets_end);
+ pmap_protect(kernel_pmap,
+ vm_page_fake_buckets_start,
+ vm_page_fake_buckets_end,
+ VM_PROT_READ);
+// *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
+#endif /* VM_PAGE_FAKE_BUCKETS */
+#endif /* VM_PAGE_BUCKETS_CHECK */
+
+#if VM_OBJECT_TRACKING
+ vm_object_tracking_init();
+#endif /* VM_OBJECT_TRACKING */
+
+
+#if FBDP_TEST_COLLAPSE_COMPRESSOR
+ vm_object_size_t backing_size, top_size;
+ vm_object_t backing_object, top_object;
+ vm_map_offset_t backing_offset, top_offset;
+ unsigned char *backing_address, *top_address;
+ kern_return_t kr;
+
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR:\n");
+
+ /* create backing object */
+ backing_size = 15 * PAGE_SIZE;
+ backing_object = vm_object_allocate(backing_size);
+ assert(backing_object != VM_OBJECT_NULL);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
+ backing_object);
+ /* map backing object */
+ backing_offset = 0;
+ kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
+ VM_FLAGS_ANYWHERE, backing_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ backing_address = (unsigned char *) backing_offset;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped backing object %p at 0x%llx\n",
+ backing_object, (uint64_t) backing_offset);
+ /* populate with pages to be compressed in backing object */
+ backing_address[0x1*PAGE_SIZE] = 0xB1;
+ backing_address[0x4*PAGE_SIZE] = 0xB4;
+ backing_address[0x7*PAGE_SIZE] = 0xB7;
+ backing_address[0xa*PAGE_SIZE] = 0xBA;
+ backing_address[0xd*PAGE_SIZE] = 0xBD;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "backing_object %p\n", backing_object);
+ /* compress backing object */
+ vm_object_pageout(backing_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
+ backing_object);
+ /* wait for all the pages to be gone */
+ while (*(volatile int *)&backing_object->resident_page_count != 0)
+ IODelay(10);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
+ backing_object);
+ /* populate with pages to be resident in backing object */
+ backing_address[0x0*PAGE_SIZE] = 0xB0;
+ backing_address[0x3*PAGE_SIZE] = 0xB3;
+ backing_address[0x6*PAGE_SIZE] = 0xB6;
+ backing_address[0x9*PAGE_SIZE] = 0xB9;
+ backing_address[0xc*PAGE_SIZE] = 0xBC;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "backing_object %p\n", backing_object);
+ /* leave the other pages absent */
+ /* mess with the paging_offset of the backing_object */
+ assert(backing_object->paging_offset == 0);
+ backing_object->paging_offset = 0x3000;
+
+ /* create top object */
+ top_size = 9 * PAGE_SIZE;
+ top_object = vm_object_allocate(top_size);
+ assert(top_object != VM_OBJECT_NULL);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
+ top_object);
+ /* map top object */
+ top_offset = 0;
+ kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
+ VM_FLAGS_ANYWHERE, top_object, 0, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ top_address = (unsigned char *) top_offset;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "mapped top object %p at 0x%llx\n",
+ top_object, (uint64_t) top_offset);
+ /* populate with pages to be compressed in top object */
+ top_address[0x3*PAGE_SIZE] = 0xA3;
+ top_address[0x4*PAGE_SIZE] = 0xA4;
+ top_address[0x5*PAGE_SIZE] = 0xA5;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be compressed in "
+ "top_object %p\n", top_object);
+ /* compress top object */
+ vm_object_pageout(top_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
+ top_object);
+ /* wait for all the pages to be gone */
+ while (top_object->resident_page_count != 0);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
+ top_object);
+ /* populate with pages to be resident in top object */
+ top_address[0x0*PAGE_SIZE] = 0xA0;
+ top_address[0x1*PAGE_SIZE] = 0xA1;
+ top_address[0x2*PAGE_SIZE] = 0xA2;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "populated pages to be resident in "
+ "top_object %p\n", top_object);
+ /* leave the other pages absent */
+
+ /* link the 2 objects */
+ vm_object_reference(backing_object);
+ top_object->shadow = backing_object;
+ top_object->vo_shadow_offset = 0x3000;
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
+ top_object, backing_object);
+
+ /* unmap backing object */
+ vm_map_remove(kernel_map,
+ backing_offset,
+ backing_offset + backing_size,
+ 0);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "unmapped backing_object %p [0x%llx:0x%llx]\n",
+ backing_object,
+ (uint64_t) backing_offset,
+ (uint64_t) (backing_offset + backing_size));
+
+ /* collapse */
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
+ vm_object_lock(top_object);
+ vm_object_collapse(top_object, 0, FALSE);
+ vm_object_unlock(top_object);
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
+
+ /* did it work? */
+ if (top_object->shadow != VM_OBJECT_NULL) {
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ if (vm_object_collapse_compressor_allowed) {
+ panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ }
+ } else {
+ /* check the contents of the mapping */
+ unsigned char expect[9] =
+ { 0xA0, 0xA1, 0xA2, /* resident in top */
+ 0xA3, 0xA4, 0xA5, /* compressed in top */
+ 0xB9, /* resident in backing + shadow_offset */
+ 0xBD, /* compressed in backing + shadow_offset + paging_offset */
+ 0x00 }; /* absent in both */
+ unsigned char actual[9];
+ unsigned int i, errors;
+
+ errors = 0;
+ for (i = 0; i < sizeof (actual); i++) {
+ actual[i] = (unsigned char) top_address[i*PAGE_SIZE];
+ if (actual[i] != expect[i]) {
+ errors++;
+ }
+ }
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
+ "actual [%x %x %x %x %x %x %x %x %x] "
+ "expect [%x %x %x %x %x %x %x %x %x] "
+ "%d errors\n",
+ actual[0], actual[1], actual[2], actual[3],
+ actual[4], actual[5], actual[6], actual[7],
+ actual[8],
+ expect[0], expect[1], expect[2], expect[3],
+ expect[4], expect[5], expect[6], expect[7],
+ expect[8],
+ errors);
+ if (errors) {
+ panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
+ } else {
+ printf("FBDP_TEST_COLLAPSE_COMPRESSOR: PASS\n");
+ }
+ }
+#endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
+
+#if FBDP_TEST_WIRE_AND_EXTRACT
+ ledger_t ledger;
+ vm_map_t user_map, wire_map;
+ mach_vm_address_t user_addr, wire_addr;
+ mach_vm_size_t user_size, wire_size;
+ mach_vm_offset_t cur_offset;
+ vm_prot_t cur_prot, max_prot;
+ ppnum_t user_ppnum, wire_ppnum;
+ kern_return_t kr;
+
+ ledger = ledger_instantiate(task_ledger_template,
+ LEDGER_CREATE_ACTIVE_ENTRIES);
+ user_map = vm_map_create(pmap_create(ledger, 0, PMAP_CREATE_64BIT),
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ wire_map = vm_map_create(NULL,
+ 0x100000000ULL,
+ 0x200000000ULL,
+ TRUE);
+ user_addr = 0;
+ user_size = 0x10000;
+ kr = mach_vm_allocate(user_map,
+ &user_addr,
+ user_size,
+ VM_FLAGS_ANYWHERE);
+ assert(kr == KERN_SUCCESS);
+ wire_addr = 0;
+ wire_size = user_size;
+ kr = mach_vm_remap(wire_map,
+ &wire_addr,
+ wire_size,
+ 0,
+ VM_FLAGS_ANYWHERE,
+ user_map,
+ user_addr,
+ FALSE,
+ &cur_prot,
+ &max_prot,
+ VM_INHERIT_NONE);
+ assert(kr == KERN_SUCCESS);
+ for (cur_offset = 0;
+ cur_offset < wire_size;
+ cur_offset += PAGE_SIZE) {
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ user_ppnum = vm_map_get_phys_page(user_map,
+ user_addr + cur_offset);
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+ }
+ cur_offset -= PAGE_SIZE;
+ kr = vm_map_wire_and_extract(wire_map,
+ wire_addr + cur_offset,
+ VM_PROT_DEFAULT,
+ TRUE,
+ &wire_ppnum);
+ assert(kr == KERN_SUCCESS);
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
+ "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
+ kr,
+ user_map, user_addr + cur_offset, user_ppnum,
+ wire_map, wire_addr + cur_offset, wire_ppnum);
+ if (kr != KERN_SUCCESS ||
+ wire_ppnum == 0 ||
+ wire_ppnum != user_ppnum) {
+ panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
+ }
+
+ printf("FBDP_TEST_WIRE_AND_EXTRACT: PASS\n");
+#endif /* FBDP_TEST_WIRE_AND_EXTRACT */
+
+#if FBDP_TEST_PAGE_WIRE_OVERFLOW
+ vm_object_t fbdp_object;
+ vm_page_t fbdp_page;
+
+ printf("FBDP_TEST_PAGE_WIRE_OVERFLOW: starting...\n");
+
+ fbdp_object = vm_object_allocate(PAGE_SIZE);
+ vm_object_lock(fbdp_object);
+ fbdp_page = vm_page_alloc(fbdp_object, 0x0);
+ vm_page_lock_queues();
+ do {
+ vm_page_wire(fbdp_page, 1, FALSE);
+ } while (fbdp_page->wire_count != 0);
+ vm_page_unlock_queues();
+ vm_object_unlock(fbdp_object);
+ panic("FBDP(%p,%p): wire_count overflow not detected\n",
+ fbdp_object, fbdp_page);
+#endif /* FBDP_TEST_PAGE_WIRE_OVERFLOW */
+
+ vm_pageout_continue();
+
+ /*
+ * Unreached code!
+ *
+ * The vm_pageout_continue() call above never returns, so the code below is never
+ * executed. We take advantage of this to declare several DTrace VM related probe
+ * points that our kernel doesn't have an analog for. These are probe points that
+ * exist in Solaris and are in the DTrace documentation, so people may have written
+ * scripts that use them. Declaring the probe points here means their scripts will
+ * compile and execute which we want for portability of the scripts, but since this
+ * section of code is never reached, the probe points will simply never fire. Yes,
+ * this is basically a hack. The problem is the DTrace probe points were chosen with
+ * Solaris specific VM events in mind, not portability to different VM implementations.
+ */
+
+ DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
+ /*NOTREACHED*/
+}
+
+
+
+int vm_compressor_thread_count = 2;
+
+kern_return_t
+vm_pageout_internal_start(void)
+{
+ kern_return_t result;
+ int i;
+ host_basic_info_data_t hinfo;
+
+ assert (VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+#define BSD_HOST 1
+ host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
+
+ assert(hinfo.max_cpus > 0);
+
+ if (vm_compressor_thread_count >= hinfo.max_cpus)
+ vm_compressor_thread_count = hinfo.max_cpus - 1;
+ if (vm_compressor_thread_count <= 0)
+ vm_compressor_thread_count = 1;
+ else if (vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT)
+ vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT;
+
+ if (vm_compressor_immediate_preferred == TRUE) {
+ vm_pageout_immediate_chead = NULL;
+ vm_pageout_immediate_scratch_buf = kalloc(vm_compressor_get_encode_scratch_size());
+
+ vm_compressor_thread_count = 1;
+ }
+
+ vm_pageout_queue_internal.pgo_maxlaundry = (vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX;
+
+ for (i = 0; i < vm_compressor_thread_count; i++) {
+ ciq[i].id = i;
+ ciq[i].q = &vm_pageout_queue_internal;
+ ciq[i].current_chead = NULL;
+ ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE);
+
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
+
+ if (result == KERN_SUCCESS)
+ thread_deallocate(vm_pageout_internal_iothread);
+ else
+ break;
+ }
+ return result;
+}
+
+#if CONFIG_IOSCHED
+/*
+ * To support I/O Expedite for compressed files we mark the upls with special flags.
+ * The way decmpfs works is that we create a big upl which marks all the pages needed to
+ * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
+ * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
+ * being held in the big original UPL. We mark each of these smaller UPLs with the flag
+ * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
+ * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
+ * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
+ * unless the real I/O upl is being destroyed).
+ */
+
+
+static void
+upl_set_decmp_info(upl_t upl, upl_t src_upl)
+{
+ assert((src_upl->flags & UPL_DECMP_REQ) != 0);
+
+ upl_lock(src_upl);
+ if (src_upl->decmp_io_upl) {
+ /*
+ * If there is already an alive real I/O UPL, ignore this new UPL.
+ * This case should rarely happen and even if it does, it just means
+ * that we might issue a spurious expedite which the driver is expected
+ * to handle.
+ */
+ upl_unlock(src_upl);
+ return;
+ }
+ src_upl->decmp_io_upl = (void *)upl;
+ src_upl->ref_count++;
+
+ upl->flags |= UPL_DECMP_REAL_IO;
+ upl->decmp_io_upl = (void *)src_upl;
+ upl_unlock(src_upl);
+}
+#endif /* CONFIG_IOSCHED */
+
+#if UPL_DEBUG
+int upl_debug_enabled = 1;
+#else
+int upl_debug_enabled = 0;
+#endif