+#if CONFIG_SECLUDED_MEMORY
+ if (vm_page_secluded_count > vm_page_secluded_target &&
+ object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+ }
+
+ /*
+ * Deal with secluded_q overflow.
+ */
+ if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_FIFO) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_FIFO:
+ * No aging, just reclaim the excess pages
+ * at the tail of the secluded queue.
+ * We're reclaiming pages and we're not hogging
+ * any global lock, so no need for throttling.
+ */
+
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ /* transfer to free queue */
+ vm_page_unlock_queues();
+ while (secluded_overflow--) {
+ secluded_page = vm_page_grab_secluded();
+ if (secluded_page == VM_PAGE_NULL) {
+ break;
+ }
+ assert(secluded_page->busy);
+ assert(secluded_page->pageq.next == 0 &&
+ secluded_page->pageq.prev == 0);
+
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ secluded_page = VM_PAGE_NULL;
+ }
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_ALONG_ACTIVE) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_ALONG_ACTIVE:
+ * There might be free pages at the tail of the
+ * secluded queue:
+ * just move them to the free queue (in batches).
+ * There can also be an excessive number of "inuse"
+ * pages:
+ * we age them by resetting their "referenced" bit and
+ * moving them to the inactive queue. Their trip
+ * through the secluded queue was equivalent to a trip
+ * through the active queue.
+ *
+ * We're holding the page queue lock, so we need
+ * to throttle and give someone else a chance to
+ * grab that lock if needed.
+ *
+ * We're also limiting the number of secluded "inuse"
+ * pages that get moved to the inactive queue, using
+ * the same "active_bust_count" method we use when
+ * balancing the active and inactive queues, because
+ * there can be a large number
+ * of extra "inuse" pages and handling them gets in the
+ * way of actually reclaiming memory.
+ */
+
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_secluded_count_inuse);
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT;
+ delayed_unlock = 1;
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ while (secluded_overflow-- > 0 &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
+ assert(secluded_page->vm_page_q_state ==
+ VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(secluded_page, FALSE);
+ assert(!secluded_page->fictitious);
+ assert(!VM_PAGE_WIRED(secluded_page));
+ if (secluded_page->vm_page_object == 0) {
+ /* transfer to free queue */
+ assert(secluded_page->busy);
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ } else {
+ /* transfer to head of inactive queue */
+ pmap_clear_refmod_options(
+ VM_PAGE_GET_PHYS_PAGE(secluded_page),
+ VM_MEM_REFERENCED,
+ PMAP_OPTIONS_NOFLUSH,
+ (void *)NULL);
+ vm_page_enqueue_inactive(secluded_page,
+ FALSE);
+ if (active_burst_count-- == 0) {
+ vm_pageout_secluded_burst_count++;
+ break;
+ }
+ }
+ secluded_page = VM_PAGE_NULL;
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ delayed_unlock_limit,
+ 1);
+ vm_page_free_list(local_freeq,
+ TRUE);
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count,
+ 0, 0, 1);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ delayed_unlock = 1;
+ }
+ }
+ delayed_unlock = 1;
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_AFTER_INACTIVE) {
+ /*
+ * SECLUDED_AGING_AFTER_INACTIVE:
+ * No balancing needed at this point: when we get to
+ * the "choose a victim" part below, we'll consider the
+ * extra secluded pages before any inactive page.
+ */
+ } else if (vm_page_secluded_count > vm_page_secluded_target &&
+ secluded_aging_policy == SECLUDED_AGING_BEFORE_ACTIVE) {
+ unsigned int secluded_overflow;
+ vm_page_t secluded_page;
+
+ /*
+ * SECLUDED_AGING_BEFORE_ACTIVE:
+ * Excess secluded pages go to the active queue and
+ * will later go to the inactive queue.
+ */
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_secluded_count_inuse);
+ delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT;
+ delayed_unlock = 1;
+ secluded_overflow = (vm_page_secluded_count -
+ vm_page_secluded_target);
+ while (secluded_overflow-- > 0 &&
+ vm_page_secluded_count > vm_page_secluded_target) {
+ assert((vm_page_secluded_count_free +
+ vm_page_secluded_count_inuse) ==
+ vm_page_secluded_count);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
+ assert(secluded_page->vm_page_q_state ==
+ VM_PAGE_ON_SECLUDED_Q);
+ vm_page_queues_remove(secluded_page, FALSE);
+ assert(!secluded_page->fictitious);
+ assert(!VM_PAGE_WIRED(secluded_page));
+ if (secluded_page->vm_page_object == 0) {
+ /* transfer to free queue */
+ assert(secluded_page->busy);
+ secluded_page->snext = local_freeq;
+ local_freeq = secluded_page;
+ local_freed++;
+ } else {
+ /* transfer to head of active queue */
+ vm_page_enqueue_active(secluded_page,
+ FALSE);
+ if (active_burst_count-- == 0) {
+ vm_pageout_secluded_burst_count++;
+ break;
+ }
+ }
+ secluded_page = VM_PAGE_NULL;
+ if (delayed_unlock++ > delayed_unlock_limit) {
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ delayed_unlock_limit,
+ 1);
+ vm_page_free_list(local_freeq,
+ TRUE);
+ VM_DEBUG_EVENT(
+ vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count,
+ 0, 0, 1);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ } else {
+ lck_mtx_yield(&vm_page_queue_lock);
+ }
+ delayed_unlock = 1;
+ }
+ }
+ delayed_unlock = 1;
+ } else if (vm_page_secluded_count > vm_page_secluded_target) {
+ panic("unsupported secluded_aging_policy %d\n",
+ secluded_aging_policy);
+ }
+ if (local_freeq) {
+ vm_page_unlock_queues();
+ VM_DEBUG_EVENT(vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_START,
+ vm_page_free_count,
+ local_freed,
+ 0,
+ 0);
+ vm_page_free_list(local_freeq, TRUE);
+ VM_DEBUG_EVENT(vm_pageout_freelist,
+ VM_PAGEOUT_FREELIST,
+ DBG_FUNC_END,
+ vm_page_free_count, 0, 0, 0);
+ local_freeq = NULL;
+ local_freed = 0;
+ vm_page_lock_queues();
+ }
+#endif /* CONFIG_SECLUDED_MEMORY */
+