+
+#define V_O_R_MAX_BATCH 128
+
+
+#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
+ MACRO_BEGIN \
+ if (_local_free_q) { \
+ if (do_disconnect) { \
+ vm_page_t m; \
+ for (m = _local_free_q; \
+ m != VM_PAGE_NULL; \
+ m = (vm_page_t) m->pageq.next) { \
+ if (m->pmapped) { \
+ pmap_disconnect(m->phys_page); \
+ } \
+ } \
+ } \
+ vm_page_free_list(_local_free_q, TRUE); \
+ _local_free_q = VM_PAGE_NULL; \
+ } \
+ MACRO_END
+
+
+void
+vm_object_reap_pages(
+ vm_object_t object,
+ int reap_type)
+{
+ vm_page_t p;
+ vm_page_t next;
+ vm_page_t local_free_q = VM_PAGE_NULL;
+ int loop_count;
+ boolean_t disconnect_on_release;
+
+ if (reap_type == REAP_DATA_FLUSH) {
+ /*
+ * We need to disconnect pages from all pmaps before
+ * releasing them to the free list
+ */
+ disconnect_on_release = TRUE;
+ } else {
+ /*
+ * Either the caller has already disconnected the pages
+ * from all pmaps, or we disconnect them here as we add
+ * them to out local list of pages to be released.
+ * No need to re-disconnect them when we release the pages
+ * to the free list.
+ */
+ disconnect_on_release = FALSE;
+ }
+
+restart_after_sleep:
+ if (queue_empty(&object->memq))
+ return;
+ loop_count = V_O_R_MAX_BATCH + 1;
+
+ vm_page_lockspin_queues();
+
+ next = (vm_page_t)queue_first(&object->memq);
+
+ while (!queue_end(&object->memq, (queue_entry_t)next)) {
+
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ if (--loop_count == 0) {
+
+ vm_page_unlock_queues();
+
+ if (local_free_q) {
+ /*
+ * Free the pages we reclaimed so far
+ * and take a little break to avoid
+ * hogging the page queue lock too long
+ */
+ VM_OBJ_REAP_FREELIST(local_free_q,
+ disconnect_on_release);
+ } else
+ mutex_pause(0);
+
+ loop_count = V_O_R_MAX_BATCH + 1;
+
+ vm_page_lockspin_queues();
+ }
+ if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
+
+ if (reap_type == REAP_DATA_FLUSH &&
+ ((p->pageout == TRUE || p->cleaning == TRUE) && p->list_req_pending == TRUE)) {
+ p->list_req_pending = FALSE;
+ p->cleaning = FALSE;
+ /*
+ * need to drop the laundry count...
+ * we may also need to remove it
+ * from the I/O paging queue...
+ * vm_pageout_throttle_up handles both cases
+ *
+ * the laundry and pageout_queue flags are cleared...
+ */
+#if CONFIG_EMBEDDED
+ if (p->laundry)
+ vm_pageout_throttle_up(p);
+#else
+ vm_pageout_throttle_up(p);
+#endif
+ if (p->pageout == TRUE) {
+ /*
+ * toss the wire count we picked up
+ * when we initially set this page up
+ * to be cleaned and stolen...
+ */
+ vm_page_unwire(p, TRUE);
+ p->pageout = FALSE;
+ }
+ PAGE_WAKEUP(p);
+
+ } else if (p->busy || p->cleaning) {
+
+ vm_page_unlock_queues();
+ /*
+ * free the pages reclaimed so far
+ */
+ VM_OBJ_REAP_FREELIST(local_free_q,
+ disconnect_on_release);
+
+ PAGE_SLEEP(object, p, THREAD_UNINT);
+
+ goto restart_after_sleep;
+ }
+ }
+ switch (reap_type) {
+
+ case REAP_DATA_FLUSH:
+ if (VM_PAGE_WIRED(p)) {
+ /*
+ * this is an odd case... perhaps we should
+ * zero-fill this page since we're conceptually
+ * tossing its data at this point, but leaving
+ * it on the object to honor the 'wire' contract
+ */
+ continue;
+ }
+ break;
+
+ case REAP_PURGEABLE:
+ if (VM_PAGE_WIRED(p)) {
+ /* can't purge a wired page */
+ vm_page_purged_wired++;
+ continue;
+ }
+
+ if (p->busy) {
+ /*
+ * We can't reclaim a busy page but we can
+ * make it pageable (it's not wired) to make
+ * sure that it gets considered by
+ * vm_pageout_scan() later.
+ */
+ vm_page_deactivate(p);
+ vm_page_purged_busy++;
+ continue;
+ }
+
+ if (p->cleaning || p->laundry || p->list_req_pending) {
+ /*
+ * page is being acted upon,
+ * so don't mess with it
+ */
+ vm_page_purged_others++;
+ continue;
+ }
+ assert(p->object != kernel_object);
+
+ /*
+ * we can discard this page...
+ */
+ if (p->pmapped == TRUE) {
+ int refmod_state;
+ /*
+ * unmap the page
+ */
+ refmod_state = pmap_disconnect(p->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ p->dirty = TRUE;
+ }
+ }
+ if (p->dirty || p->precious) {
+ /*
+ * we saved the cost of cleaning this page !
+ */
+ vm_page_purged_count++;
+ }
+
+ break;
+
+ case REAP_TERMINATE:
+ if (p->absent || p->private) {
+ /*
+ * For private pages, VM_PAGE_FREE just
+ * leaves the page structure around for
+ * its owner to clean up. For absent
+ * pages, the structure is returned to
+ * the appropriate pool.
+ */
+ break;
+ }
+ if (p->fictitious) {
+ assert (p->phys_page == vm_page_guard_addr);
+ break;
+ }
+ if (!p->dirty && p->wpmapped)
+ p->dirty = pmap_is_modified(p->phys_page);
+
+ if ((p->dirty || p->precious) && !p->error && object->alive) {
+
+ p->busy = TRUE;
+
+ VM_PAGE_QUEUES_REMOVE(p);
+
+ vm_page_unlock_queues();
+ /*
+ * free the pages reclaimed so far
+ */
+ VM_OBJ_REAP_FREELIST(local_free_q,
+ disconnect_on_release);
+
+ /*
+ * flush page... page will be freed
+ * upon completion of I/O
+ */
+ vm_pageout_cluster(p);
+ vm_object_paging_wait(object, THREAD_UNINT);
+
+ goto restart_after_sleep;
+ }
+ break;
+
+ case REAP_REAP:
+ break;
+ }
+ vm_page_free_prepare_queues(p);
+ assert(p->pageq.next == NULL && p->pageq.prev == NULL);
+ /*
+ * Add this page to our list of reclaimed pages,
+ * to be freed later.
+ */
+ p->pageq.next = (queue_entry_t) local_free_q;
+ local_free_q = p;
+ }
+ vm_page_unlock_queues();
+
+ /*
+ * Free the remaining reclaimed pages
+ */
+ VM_OBJ_REAP_FREELIST(local_free_q,
+ disconnect_on_release);
+}
+
+
+void
+vm_object_reap_async(
+ vm_object_t object)
+{
+ vm_object_lock_assert_exclusive(object);
+
+ vm_object_reaper_lock_spin();
+
+ vm_object_reap_count_async++;
+
+ /* enqueue the VM object... */
+ queue_enter(&vm_object_reaper_queue, object,
+ vm_object_t, cached_list);
+
+ vm_object_reaper_unlock();
+
+ /* ... and wake up the reaper thread */
+ thread_wakeup((event_t) &vm_object_reaper_queue);
+}
+
+
+void
+vm_object_reaper_thread(void)
+{
+ vm_object_t object, shadow_object;
+
+ vm_object_reaper_lock_spin();
+
+ while (!queue_empty(&vm_object_reaper_queue)) {
+ queue_remove_first(&vm_object_reaper_queue,
+ object,
+ vm_object_t,
+ cached_list);
+
+ vm_object_reaper_unlock();
+ vm_object_lock(object);
+
+ assert(object->terminating);
+ assert(!object->alive);
+
+ /*
+ * The pageout daemon might be playing with our pages.
+ * Now that the object is dead, it won't touch any more
+ * pages, but some pages might already be on their way out.
+ * Hence, we wait until the active paging activities have
+ * ceased before we break the association with the pager
+ * itself.
+ */
+ while (object->paging_in_progress != 0 ||
+ object->activity_in_progress != 0) {
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+ THREAD_UNINT);
+ vm_object_lock(object);
+ }
+
+ shadow_object =
+ object->pageout ? VM_OBJECT_NULL : object->shadow;
+
+ vm_object_reap(object);
+ /* cache is unlocked and object is no longer valid */
+ object = VM_OBJECT_NULL;
+
+ if (shadow_object != VM_OBJECT_NULL) {
+ /*
+ * Drop the reference "object" was holding on
+ * its shadow object.
+ */
+ vm_object_deallocate(shadow_object);
+ shadow_object = VM_OBJECT_NULL;
+ }
+ vm_object_reaper_lock_spin();
+ }
+
+ /* wait for more work... */
+ assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
+
+ vm_object_reaper_unlock();
+
+ thread_block((thread_continue_t) vm_object_reaper_thread);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: vm_object_pager_wakeup
+ * Purpose: Wake up anyone waiting for termination of a pager.
+ */
+
+static void