+#if MACH_ASSERT
+ vm_page_verify_free_lists();
+
+ clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
+#endif
+ vm_page_lock_queues();
+ mutex_lock(&vm_page_queue_free_lock);
+
+ RESET_STATE_OF_RUN();
+
+ considered = 0;
+ free_available = vm_page_free_count - vm_page_free_reserved;
+
+ for (page_idx = 0, start_idx = 0;
+ npages < contig_pages && page_idx < vm_pages_count;
+ page_idx++) {
+retry:
+ m = &vm_pages[page_idx];
+
+ if (max_pnum && m->phys_page > max_pnum) {
+ /* no more low pages... */
+ break;
+ }
+ if (m->phys_page <= vm_lopage_poolend &&
+ m->phys_page >= vm_lopage_poolstart) {
+ /*
+ * don't want to take pages from our
+ * reserved pool of low memory
+ * so don't consider it which
+ * means starting a new run
+ */
+ RESET_STATE_OF_RUN();
+
+ } else if (m->wire_count || m->gobbled ||
+ m->encrypted || m->encrypted_cleaning || m->cs_validated || m->cs_tainted ||
+ m->error || m->absent || m->pageout_queue || m->laundry || m->wanted || m->precious ||
+ m->cleaning || m->overwriting || m->restart || m->unusual || m->list_req_pending) {
+ /*
+ * page is in a transient state
+ * or a state we don't want to deal
+ * with, so don't consider it which
+ * means starting a new run
+ */
+ RESET_STATE_OF_RUN();
+
+ } else if (!m->free && !m->active && !m->inactive && !m->speculative && !m->throttled) {
+ /*
+ * page needs to be on one of our queues
+ * in order for it to be stable behind the
+ * locks we hold at this point...
+ * if not, don't consider it which
+ * means starting a new run
+ */
+ RESET_STATE_OF_RUN();
+
+ } else if (!m->free && (!m->tabled || m->busy)) {
+ /*
+ * pages on the free list are always 'busy'
+ * so we couldn't test for 'busy' in the check
+ * for the transient states... pages that are
+ * 'free' are never 'tabled', so we also couldn't
+ * test for 'tabled'. So we check here to make
+ * sure that a non-free page is not busy and is
+ * tabled on an object...
+ * if not, don't consider it which
+ * means starting a new run
+ */
+ RESET_STATE_OF_RUN();
+
+ } else {
+ if (m->phys_page != prevcontaddr + 1) {
+ npages = 1;
+ start_idx = page_idx;
+ } else {
+ npages++;
+ }
+ prevcontaddr = m->phys_page;
+
+ if (m->pmapped || m->dirty)
+ substitute_needed++;
+
+ if (m->free) {
+ free_considered++;
+ }
+ if ((free_considered + substitute_needed) > free_available) {
+ /*
+ * if we let this run continue
+ * we will end up dropping the vm_page_free_count
+ * below the reserve limit... we need to abort
+ * this run, but we can at least re-consider this
+ * page... thus the jump back to 'retry'
+ */
+ RESET_STATE_OF_RUN();
+
+ if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
+ considered++;
+ goto retry;
+ }
+ /*
+ * free_available == 0
+ * so can't consider any free pages... if
+ * we went to retry in this case, we'd
+ * get stuck looking at the same page
+ * w/o making any forward progress
+ * we also want to take this path if we've already
+ * reached our limit that controls the lock latency
+ */
+ }
+ }
+ if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
+
+ mutex_unlock(&vm_page_queue_free_lock);
+ vm_page_unlock_queues();