+#define FAKE_COUNT 5000
+
+int internal_count = 0;
+int fake_deadlock = 0;
+
+#endif
+
+static void
+vm_pageout_iothread_continue(struct vm_pageout_queue *q)
+{
+ vm_page_t m = NULL;
+ vm_object_t object;
+ boolean_t need_wakeup;
+ memory_object_t pager;
+ thread_t self = current_thread();
+
+ if ((vm_pageout_internal_iothread != THREAD_NULL)
+ && (self == vm_pageout_external_iothread )
+ && (self->options & TH_OPT_VMPRIV))
+ self->options &= ~TH_OPT_VMPRIV;
+
+ vm_page_lockspin_queues();
+
+ while ( !queue_empty(&q->pgo_pending) ) {
+
+ q->pgo_busy = TRUE;
+ queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+ m->pageout_queue = FALSE;
+ vm_page_unlock_queues();
+
+ m->pageq.next = NULL;
+ m->pageq.prev = NULL;
+#ifdef FAKE_DEADLOCK
+ if (q == &vm_pageout_queue_internal) {
+ vm_offset_t addr;
+ int pg_count;
+
+ internal_count++;
+
+ if ((internal_count == FAKE_COUNT)) {
+
+ pg_count = vm_page_free_count + vm_page_free_reserved;
+
+ if (kmem_alloc(kernel_map, &addr, PAGE_SIZE * pg_count) == KERN_SUCCESS) {
+ kmem_free(kernel_map, addr, PAGE_SIZE * pg_count);
+ }
+ internal_count = 0;
+ fake_deadlock++;
+ }
+ }
+#endif
+ object = m->object;
+
+ vm_object_lock(object);
+
+ if (!object->pager_initialized) {
+
+ /*
+ * If there is no memory object for the page, create
+ * one and hand it to the default pager.
+ */
+
+ if (!object->pager_initialized)
+ vm_object_collapse(object,
+ (vm_object_offset_t) 0,
+ TRUE);
+ if (!object->pager_initialized)
+ vm_object_pager_create(object);
+ if (!object->pager_initialized) {
+ /*
+ * Still no pager for the object.
+ * Reactivate the page.
+ *
+ * Should only happen if there is no
+ * default pager.
+ */
+ m->list_req_pending = FALSE;
+ m->cleaning = FALSE;
+ m->pageout = FALSE;
+
+ vm_page_lockspin_queues();
+ vm_page_unwire(m);
+ vm_pageout_throttle_up(m);
+ vm_pageout_dirty_no_pager++;
+ vm_page_activate(m);
+ vm_page_unlock_queues();
+
+ /*
+ * And we are done with it.
+ */
+ PAGE_WAKEUP_DONE(m);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
+ }
+ }
+ pager = object->pager;
+ if (pager == MEMORY_OBJECT_NULL) {
+ /*
+ * This pager has been destroyed by either
+ * memory_object_destroy or vm_object_destroy, and
+ * so there is nowhere for the page to go.
+ * Just free the page... VM_PAGE_FREE takes
+ * care of cleaning up all the state...
+ * including doing the vm_pageout_throttle_up
+ */
+
+ VM_PAGE_FREE(m);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
+ }
+ vm_object_unlock(object);
+ /*
+ * we expect the paging_in_progress reference to have
+ * already been taken on the object before it was added
+ * to the appropriate pageout I/O queue... this will
+ * keep the object from being terminated and/or the
+ * paging_offset from changing until the I/O has
+ * completed... therefore no need to lock the object to
+ * pull the paging_offset from it.
+ *
+ * Send the data to the pager.
+ * any pageout clustering happens there
+ */
+ memory_object_data_return(pager,
+ m->offset + object->paging_offset,
+ PAGE_SIZE,
+ NULL,
+ NULL,
+ FALSE,
+ FALSE,
+ 0);
+
+ vm_object_lock(object);
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ }
+ assert_wait((event_t) q, THREAD_UNINT);
+
+
+ if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
+ q->pgo_throttled = FALSE;
+ need_wakeup = TRUE;
+ } else
+ need_wakeup = FALSE;
+
+ q->pgo_busy = FALSE;
+ q->pgo_idle = TRUE;
+ vm_page_unlock_queues();
+
+ if (need_wakeup == TRUE)
+ thread_wakeup((event_t) &q->pgo_laundry);
+
+ thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
+ /*NOTREACHED*/
+}
+
+
+static void
+vm_pageout_iothread_external(void)
+{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
+
+ vm_pageout_iothread_continue(&vm_pageout_queue_external);
+ /*NOTREACHED*/
+}
+
+
+static void
+vm_pageout_iothread_internal(void)
+{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
+
+ vm_pageout_iothread_continue(&vm_pageout_queue_internal);
+ /*NOTREACHED*/
+}
+
+static void
+vm_pageout_garbage_collect(int collect)
+{
+ if (collect) {
+ stack_collect();
+
+ /*
+ * consider_zone_gc should be last, because the other operations
+ * might return memory to zones.
+ */
+ consider_machine_collect();
+ consider_zone_gc();
+
+ consider_machine_adjust();
+ }
+
+ assert_wait((event_t) &vm_pageout_garbage_collect, THREAD_UNINT);
+
+ thread_block_parameter((thread_continue_t) vm_pageout_garbage_collect, (void *)1);
+ /*NOTREACHED*/
+}
+
+
+
+void
+vm_pageout(void)
+{
+ thread_t self = current_thread();
+ thread_t thread;
+ kern_return_t result;
+ spl_t s;
+
+ /*
+ * Set thread privileges.
+ */
+ s = splsched();
+ thread_lock(self);
+ self->priority = BASEPRI_PREEMPT - 1;
+ set_sched_pri(self, self->priority);
+ thread_unlock(self);
+
+ if (!self->reserved_stack)
+ self->reserved_stack = self->kernel_stack;
+
+ splx(s);
+
+ /*
+ * Initialize some paging parameters.
+ */
+
+ if (vm_pageout_idle_wait == 0)
+ vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT;
+
+ if (vm_pageout_burst_wait == 0)
+ vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT;
+
+ if (vm_pageout_empty_wait == 0)
+ vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT;
+
+ if (vm_pageout_deadlock_wait == 0)
+ vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT;
+
+ if (vm_pageout_deadlock_relief == 0)
+ vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF;
+
+ if (vm_pageout_inactive_relief == 0)
+ vm_pageout_inactive_relief = VM_PAGEOUT_INACTIVE_RELIEF;
+
+ if (vm_pageout_burst_active_throttle == 0)
+ vm_pageout_burst_active_throttle = VM_PAGEOUT_BURST_ACTIVE_THROTTLE;
+
+ if (vm_pageout_burst_inactive_throttle == 0)
+ vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE;
+
+ /*
+ * Set kernel task to low backing store privileged
+ * status
+ */
+ task_lock(kernel_task);
+ kernel_task->priv_flags |= VM_BACKING_STORE_PRIV;
+ task_unlock(kernel_task);
+
+ vm_page_free_count_init = vm_page_free_count;