+
+
+/*
+ * vm_object_cluster_size
+ *
+ * Determine how big a cluster we should issue an I/O for...
+ *
+ * Inputs: *start == offset of page needed
+ * *length == maximum cluster pager can handle
+ * Outputs: *start == beginning offset of cluster
+ * *length == length of cluster to try
+ *
+ * The original *start will be encompassed by the cluster
+ *
+ */
+extern int speculative_reads_disabled;
+extern int ignore_is_ssd;
+
+#if CONFIG_EMBEDDED
+unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
+unsigned int preheat_pages_min = 8;
+#else
+unsigned int preheat_pages_max = MAX_UPL_TRANSFER;
+unsigned int preheat_pages_min = 8;
+#endif
+
+uint32_t pre_heat_scaling[MAX_UPL_TRANSFER + 1];
+uint32_t pre_heat_cluster[MAX_UPL_TRANSFER + 1];
+
+
+__private_extern__ void
+vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
+ vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
+{
+ vm_size_t pre_heat_size;
+ vm_size_t tail_size;
+ vm_size_t head_size;
+ vm_size_t max_length;
+ vm_size_t cluster_size;
+ vm_object_offset_t object_size;
+ vm_object_offset_t orig_start;
+ vm_object_offset_t target_start;
+ vm_object_offset_t offset;
+ vm_behavior_t behavior;
+ boolean_t look_behind = TRUE;
+ boolean_t look_ahead = TRUE;
+ boolean_t isSSD = FALSE;
+ uint32_t throttle_limit;
+ int sequential_run;
+ int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ unsigned int max_ph_size;
+ unsigned int min_ph_size;
+ unsigned int min_ph_size_in_bytes;
+
+ assert( !(*length & PAGE_MASK));
+ assert( !(*start & PAGE_MASK_64));
+
+ /*
+ * remember maxiumum length of run requested
+ */
+ max_length = *length;
+ /*
+ * we'll always return a cluster size of at least
+ * 1 page, since the original fault must always
+ * be processed
+ */
+ *length = PAGE_SIZE;
+ *io_streaming = 0;
+
+ if (speculative_reads_disabled || fault_info == NULL) {
+ /*
+ * no cluster... just fault the page in
+ */
+ return;
+ }
+ orig_start = *start;
+ target_start = orig_start;
+ cluster_size = round_page(fault_info->cluster_size);
+ behavior = fault_info->behavior;
+
+ vm_object_lock(object);
+
+ if (object->pager == MEMORY_OBJECT_NULL)
+ goto out; /* pager is gone for this object, nothing more to do */
+
+ if (!ignore_is_ssd)
+ vnode_pager_get_isSSD(object->pager, &isSSD);
+
+ min_ph_size = preheat_pages_min;
+ max_ph_size = preheat_pages_max;
+
+ if (isSSD) {
+ min_ph_size /= 2;
+ max_ph_size /= 8;
+ }
+ if (min_ph_size < 1)
+ min_ph_size = 1;
+
+ if (max_ph_size < 1)
+ max_ph_size = 1;
+ else if (max_ph_size > MAX_UPL_TRANSFER)
+ max_ph_size = MAX_UPL_TRANSFER;
+
+ if (max_length > (max_ph_size * PAGE_SIZE))
+ max_length = max_ph_size * PAGE_SIZE;
+
+ if (max_length <= PAGE_SIZE)
+ goto out;
+
+ min_ph_size_in_bytes = min_ph_size * PAGE_SIZE;
+
+ if (object->internal)
+ object_size = object->vo_size;
+ else
+ vnode_pager_get_object_size(object->pager, &object_size);
+
+ object_size = round_page_64(object_size);
+
+ if (orig_start >= object_size) {
+ /*
+ * fault occurred beyond the EOF...
+ * we need to punt w/o changing the
+ * starting offset
+ */
+ goto out;
+ }
+ if (object->pages_used > object->pages_created) {
+ /*
+ * must have wrapped our 32 bit counters
+ * so reset
+ */
+ object->pages_used = object->pages_created = 0;
+ }
+ if ((sequential_run = object->sequential)) {
+ if (sequential_run < 0) {
+ sequential_behavior = VM_BEHAVIOR_RSEQNTL;
+ sequential_run = 0 - sequential_run;
+ } else {
+ sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ }
+
+ }
+ switch (behavior) {
+
+ default:
+ behavior = VM_BEHAVIOR_DEFAULT;
+
+ case VM_BEHAVIOR_DEFAULT:
+ if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
+ goto out;
+
+ if (sequential_run >= (3 * PAGE_SIZE)) {
+ pre_heat_size = sequential_run + PAGE_SIZE;
+
+ if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL)
+ look_behind = FALSE;
+ else
+ look_ahead = FALSE;
+
+ *io_streaming = 1;
+ } else {
+
+ if (object->pages_created < (20 * min_ph_size)) {
+ /*
+ * prime the pump
+ */
+ pre_heat_size = min_ph_size_in_bytes;
+ } else {
+ /*
+ * Linear growth in PH size: The maximum size is max_length...
+ * this cacluation will result in a size that is neither a
+ * power of 2 nor a multiple of PAGE_SIZE... so round
+ * it up to the nearest PAGE_SIZE boundary
+ */
+ pre_heat_size = (max_length * object->pages_used) / object->pages_created;
+
+ if (pre_heat_size < min_ph_size_in_bytes)
+ pre_heat_size = min_ph_size_in_bytes;
+ else
+ pre_heat_size = round_page(pre_heat_size);
+ }
+ }
+ break;
+
+ case VM_BEHAVIOR_RANDOM:
+ if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
+ goto out;
+ break;
+
+ case VM_BEHAVIOR_SEQUENTIAL:
+ if ((pre_heat_size = cluster_size) == 0)
+ pre_heat_size = sequential_run + PAGE_SIZE;
+ look_behind = FALSE;
+ *io_streaming = 1;
+
+ break;
+
+ case VM_BEHAVIOR_RSEQNTL:
+ if ((pre_heat_size = cluster_size) == 0)
+ pre_heat_size = sequential_run + PAGE_SIZE;
+ look_ahead = FALSE;
+ *io_streaming = 1;
+
+ break;
+
+ }
+ throttle_limit = (uint32_t) max_length;
+ assert(throttle_limit == max_length);
+
+ if (vnode_pager_check_hard_throttle(object->pager, &throttle_limit, *io_streaming) == KERN_SUCCESS) {
+ if (max_length > throttle_limit)
+ max_length = throttle_limit;
+ }
+ if (pre_heat_size > max_length)
+ pre_heat_size = max_length;
+
+ if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size_in_bytes)) {
+ if (vm_page_free_count < vm_page_throttle_limit)
+ pre_heat_size = trunc_page(pre_heat_size / 16);
+ else if (vm_page_free_count < vm_page_free_target)
+ pre_heat_size = trunc_page(pre_heat_size / 4);
+
+ if (pre_heat_size < min_ph_size_in_bytes)
+ pre_heat_size = min_ph_size_in_bytes;
+ }
+ if (look_ahead == TRUE) {
+ if (look_behind == TRUE) {
+ /*
+ * if we get here its due to a random access...
+ * so we want to center the original fault address
+ * within the cluster we will issue... make sure
+ * to calculate 'head_size' as a multiple of PAGE_SIZE...
+ * 'pre_heat_size' is a multiple of PAGE_SIZE but not
+ * necessarily an even number of pages so we need to truncate
+ * the result to a PAGE_SIZE boundary
+ */
+ head_size = trunc_page(pre_heat_size / 2);
+
+ if (target_start > head_size)
+ target_start -= head_size;
+ else
+ target_start = 0;
+
+ /*
+ * 'target_start' at this point represents the beginning offset
+ * of the cluster we are considering... 'orig_start' will be in
+ * the center of this cluster if we didn't have to clip the start
+ * due to running into the start of the file
+ */
+ }
+ if ((target_start + pre_heat_size) > object_size)
+ pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
+ /*
+ * at this point caclulate the number of pages beyond the original fault
+ * address that we want to consider... this is guaranteed not to extend beyond
+ * the current EOF...
+ */
+ assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
+ tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
+ } else {
+ if (pre_heat_size > target_start) {
+ /*
+ * since pre_heat_size is always smaller then 2^32,
+ * if it is larger then target_start (a 64 bit value)
+ * it is safe to clip target_start to 32 bits
+ */
+ pre_heat_size = (vm_size_t) target_start;
+ }
+ tail_size = 0;
+ }
+ assert( !(target_start & PAGE_MASK_64));
+ assert( !(pre_heat_size & PAGE_MASK));
+
+ pre_heat_scaling[pre_heat_size / PAGE_SIZE]++;
+
+ if (pre_heat_size <= PAGE_SIZE)
+ goto out;
+
+ if (look_behind == TRUE) {
+ /*
+ * take a look at the pages before the original
+ * faulting offset... recalculate this in case
+ * we had to clip 'pre_heat_size' above to keep
+ * from running past the EOF.
+ */
+ head_size = pre_heat_size - tail_size - PAGE_SIZE;
+
+ for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
+ /*
+ * don't poke below the lowest offset
+ */
+ if (offset < fault_info->lo_offset)
+ break;
+ /*
+ * for external objects and internal objects w/o an existence map
+ * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
+ */
+#if MACH_PAGEMAP
+ if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
+ /*
+ * we know for a fact that the pager can't provide the page
+ * so don't include it or any pages beyond it in this cluster
+ */
+ break;
+ }
+#endif
+ if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
+ /*
+ * don't bridge resident pages
+ */
+ break;
+ }
+ *start = offset;
+ *length += PAGE_SIZE;
+ }
+ }
+ if (look_ahead == TRUE) {
+ for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
+ /*
+ * don't poke above the highest offset
+ */
+ if (offset >= fault_info->hi_offset)
+ break;
+ assert(offset < object_size);
+
+ /*
+ * for external objects and internal objects w/o an existence map
+ * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
+ */
+#if MACH_PAGEMAP
+ if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
+ /*
+ * we know for a fact that the pager can't provide the page
+ * so don't include it or any pages beyond it in this cluster
+ */
+ break;
+ }
+#endif
+ if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
+ /*
+ * don't bridge resident pages
+ */
+ break;
+ }
+ *length += PAGE_SIZE;
+ }
+ }
+out:
+ if (*length > max_length)
+ *length = max_length;
+
+ pre_heat_cluster[*length / PAGE_SIZE]++;
+
+ vm_object_unlock(object);
+}
+
+
+/*
+ * Allow manipulation of individual page state. This is actually part of
+ * the UPL regimen but takes place on the VM object rather than on a UPL
+ */
+
+kern_return_t
+vm_object_page_op(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ int ops,
+ ppnum_t *phys_entry,
+ int *flags)
+{
+ vm_page_t dst_page;
+
+ vm_object_lock(object);
+
+ if(ops & UPL_POP_PHYSICAL) {
+ if(object->phys_contiguous) {
+ if (phys_entry) {
+ *phys_entry = (ppnum_t)
+ (object->vo_shadow_offset >> PAGE_SHIFT);
+ }
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ } else {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+ }
+ if(object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ while(TRUE) {
+ if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ /* Sync up on getting the busy bit */
+ if((dst_page->busy || dst_page->cleaning) &&
+ (((ops & UPL_POP_SET) &&
+ (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
+ /* someone else is playing with the page, we will */
+ /* have to wait */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
+
+ if (ops & UPL_POP_DUMP) {
+ if (dst_page->pmapped == TRUE)
+ pmap_disconnect(dst_page->phys_page);
+
+ VM_PAGE_FREE(dst_page);
+ break;
+ }
+
+ if (flags) {
+ *flags = 0;
+
+ /* Get the condition of flags before requested ops */
+ /* are undertaken */
+
+ if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
+ if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
+ if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
+ if(dst_page->absent) *flags |= UPL_POP_ABSENT;
+ if(dst_page->busy) *flags |= UPL_POP_BUSY;
+ }
+
+ /* The caller should have made a call either contingent with */
+ /* or prior to this call to set UPL_POP_BUSY */
+ if(ops & UPL_POP_SET) {
+ /* The protection granted with this assert will */
+ /* not be complete. If the caller violates the */
+ /* convention and attempts to change page state */
+ /* without first setting busy we may not see it */
+ /* because the page may already be busy. However */
+ /* if such violations occur we will assert sooner */
+ /* or later. */
+ assert(dst_page->busy || (ops & UPL_POP_BUSY));
+ if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
+ if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
+ if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
+ if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
+ if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
+ }
+
+ if(ops & UPL_POP_CLR) {
+ assert(dst_page->busy);
+ if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
+ if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
+ if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
+ if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
+ if (ops & UPL_POP_BUSY) {
+ dst_page->busy = FALSE;
+ PAGE_WAKEUP(dst_page);
+ }
+ }
+
+ if (dst_page->encrypted) {
+ /*
+ * ENCRYPTED SWAP:
+ * We need to decrypt this encrypted page before the
+ * caller can access its contents.
+ * But if the caller really wants to access the page's
+ * contents, they have to keep the page "busy".
+ * Otherwise, the page could get recycled or re-encrypted
+ * at any time.
+ */
+ if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
+ dst_page->busy) {
+ /*
+ * The page is stable enough to be accessed by
+ * the caller, so make sure its contents are
+ * not encrypted.
+ */
+ vm_page_decrypt(dst_page, 0);
+ } else {
+ /*
+ * The page is not busy, so don't bother
+ * decrypting it, since anything could
+ * happen to it between now and when the
+ * caller wants to access it.
+ * We should not give the caller access
+ * to this page.
+ */
+ assert(!phys_entry);
+ }
+ }
+
+ if (phys_entry) {
+ /*
+ * The physical page number will remain valid
+ * only if the page is kept busy.
+ * ENCRYPTED SWAP: make sure we don't let the
+ * caller access an encrypted page.
+ */
+ assert(dst_page->busy);
+ assert(!dst_page->encrypted);
+ *phys_entry = dst_page->phys_page;
+ }
+
+ break;
+ }
+
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+
+}
+
+/*
+ * vm_object_range_op offers performance enhancement over
+ * vm_object_page_op for page_op functions which do not require page
+ * level state to be returned from the call. Page_op was created to provide
+ * a low-cost alternative to page manipulation via UPLs when only a single
+ * page was involved. The range_op call establishes the ability in the _op
+ * family of functions to work on multiple pages where the lack of page level
+ * state handling allows the caller to avoid the overhead of the upl structures.
+ */
+
+kern_return_t
+vm_object_range_op(
+ vm_object_t object,
+ vm_object_offset_t offset_beg,
+ vm_object_offset_t offset_end,
+ int ops,
+ uint32_t *range)
+{
+ vm_object_offset_t offset;
+ vm_page_t dst_page;
+
+ if (offset_end - offset_beg > (uint32_t) -1) {
+ /* range is too big and would overflow "*range" */
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (object->resident_page_count == 0) {
+ if (range) {
+ if (ops & UPL_ROP_PRESENT) {
+ *range = 0;
+ } else {
+ *range = (uint32_t) (offset_end - offset_beg);
+ assert(*range == (offset_end - offset_beg));
+ }
+ }
+ return KERN_SUCCESS;
+ }
+ vm_object_lock(object);
+
+ if (object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ offset = offset_beg & ~PAGE_MASK_64;
+
+ while (offset < offset_end) {
+ dst_page = vm_page_lookup(object, offset);
+ if (dst_page != VM_PAGE_NULL) {
+ if (ops & UPL_ROP_DUMP) {
+ if (dst_page->list_req_pending) {
+ /*
+ * This page isn't on a UPL yet.
+ * So it's safe to steal it here and dump it.
+ */
+ } else if (dst_page->busy || dst_page->cleaning) {
+ /*
+ * someone else is playing with the
+ * page, we will have to wait
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ /*
+ * need to relook the page up since it's
+ * state may have changed while we slept
+ * it might even belong to a different object
+ * at this point
+ */
+ continue;
+ }
+ if (dst_page->pmapped == TRUE)
+ pmap_disconnect(dst_page->phys_page);
+
+ VM_PAGE_FREE(dst_page);
+
+ } else if ((ops & UPL_ROP_ABSENT) && !dst_page->absent)
+ break;
+ } else if (ops & UPL_ROP_PRESENT)
+ break;
+
+ offset += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ if (range) {
+ if (offset > offset_end)
+ offset = offset_end;
+ if(offset > offset_beg) {
+ *range = (uint32_t) (offset - offset_beg);
+ assert(*range == (offset - offset_beg));
+ } else {
+ *range = 0;
+ }
+ }
+ return KERN_SUCCESS;
+}
+
+
+uint32_t scan_object_collision = 0;
+
+void
+vm_object_lock(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ mutex_pause(2);
+ }
+ lck_rw_lock_exclusive(&object->Lock);
+}
+
+boolean_t
+vm_object_lock_avoid(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ return TRUE;
+ }
+ return FALSE;
+}
+
+boolean_t
+_vm_object_lock_try(vm_object_t object)
+{
+ return (lck_rw_try_lock_exclusive(&object->Lock));
+}
+
+boolean_t
+vm_object_lock_try(vm_object_t object)
+{
+ /*
+ * Called from hibernate path so check before blocking.
+ */
+ if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) {
+ mutex_pause(2);
+ }
+ return _vm_object_lock_try(object);
+}
+
+void
+vm_object_lock_shared(vm_object_t object)
+{
+ if (vm_object_lock_avoid(object)) {
+ mutex_pause(2);
+ }
+ lck_rw_lock_shared(&object->Lock);
+}
+
+boolean_t
+vm_object_lock_try_shared(vm_object_t object)
+{
+ if (vm_object_lock_avoid(object)) {
+ mutex_pause(2);
+ }
+ return (lck_rw_try_lock_shared(&object->Lock));
+}
+
+
+unsigned int vm_object_change_wimg_mode_count = 0;
+
+/*
+ * The object must be locked
+ */
+void
+vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
+{
+ vm_page_t p;
+
+ vm_object_lock_assert_exclusive(object);
+
+ vm_object_paging_wait(object, THREAD_UNINT);
+
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+
+ if (!p->fictitious)
+ pmap_set_cache_attributes(p->phys_page, wimg_mode);
+ }
+ if (wimg_mode == VM_WIMG_USE_DEFAULT)
+ object->set_cache_attr = FALSE;
+ else
+ object->set_cache_attr = TRUE;
+
+ object->wimg_bits = wimg_mode;
+
+ vm_object_change_wimg_mode_count++;
+}
+
+#if CONFIG_FREEZE
+
+__private_extern__ void default_freezer_pack_page(vm_page_t , vm_object_t , vm_object_offset_t, void**);
+__private_extern__ void default_freezer_unpack(vm_object_t , void**);
+
+kern_return_t vm_object_pack(
+ unsigned int *purgeable_count,
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ boolean_t *shared,
+ vm_object_t src_object,
+ vm_object_t compact_object,
+ void **table,
+ vm_object_offset_t *offset)
+{
+ kern_return_t kr = KERN_SUCCESS;
+
+ vm_object_lock(src_object);
+
+ *purgeable_count = *wired_count = *clean_count = *dirty_count = 0;
+ *shared = FALSE;
+
+ if (!src_object->alive || src_object->terminating){
+ kr = KERN_FAILURE;
+ goto done;
+ }
+
+ if (src_object->purgable == VM_PURGABLE_VOLATILE) {
+ *purgeable_count = src_object->resident_page_count;
+
+ /* If the destination object is null, we're just walking the pages to discover how many can be hibernated */
+ if (VM_OBJECT_NULL != compact_object) {
+ purgeable_q_t queue;
+ /* object should be on a queue */
+ assert(src_object->objq.next != NULL &&
+ src_object->objq.prev != NULL);
+ queue = vm_purgeable_object_remove(src_object);
+ assert(queue);
+ vm_page_lock_queues();
+ vm_purgeable_token_delete_first(queue);
+ vm_page_unlock_queues();
+ vm_object_purge(src_object);
+ }
+ goto done;
+ }
+
+ if (src_object->ref_count == 1) {
+ vm_object_pack_pages(wired_count, clean_count, dirty_count, src_object, compact_object, table, offset);
+ } else {
+ if (src_object->internal) {
+ *shared = TRUE;
+ }
+ }
+done:
+ vm_object_unlock(src_object);
+
+ return kr;
+}
+
+
+void
+vm_object_pack_pages(
+ unsigned int *wired_count,
+ unsigned int *clean_count,
+ unsigned int *dirty_count,
+ vm_object_t src_object,
+ vm_object_t compact_object,
+ void **table,
+ vm_object_offset_t *offset)
+{
+ vm_page_t p, next;
+
+ next = (vm_page_t)queue_first(&src_object->memq);
+
+ /* Since this function is dual purpose in order that we can count
+ * the freezable pages as well as prepare them, assert that our
+ * arguments are sane. Gnarly, but avoids code duplication.
+ */
+ if (VM_OBJECT_NULL == compact_object){
+ assert(!table);
+ assert(!offset);
+ } else {
+ assert(table);
+ assert(offset);
+ }
+
+ while (!queue_end(&src_object->memq, (queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ if (p->fictitious || p->busy )
+ continue;
+
+ if (p->absent || p->unusual || p->error)
+ continue;
+
+ if (VM_PAGE_WIRED(p)) {
+ (*wired_count)++;
+ continue;
+ }
+
+ if (VM_OBJECT_NULL == compact_object) {
+ if (p->dirty || pmap_is_modified(p->phys_page)) {
+ (*dirty_count)++;
+ } else {
+ (*clean_count)++;
+ }
+ continue;
+ }
+
+ if (p->cleaning) {
+ p->busy = TRUE;
+ p->pageout = TRUE;
+ p->dump_cleaning = TRUE;
+
+ vm_page_lockspin_queues();
+ vm_page_wire(p);
+ vm_page_unlock_queues();
+
+ continue;
+ }
+
+ if (p->pmapped == TRUE) {
+ int refmod_state;
+ refmod_state = pmap_disconnect(p->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ p->dirty = TRUE;
+ }
+ }
+
+ if (p->dirty) {
+ p->busy = TRUE;
+
+ default_freezer_pack_page(p, compact_object, *offset, table);
+ *offset += PAGE_SIZE;
+
+ (*dirty_count)++;
+ }
+ else {
+ VM_PAGE_FREE(p);
+ (*clean_count)++;
+ }
+ }
+}
+
+void
+vm_object_pageout(
+ vm_object_t object)
+{
+ vm_page_t p, next;
+
+ assert(object != VM_OBJECT_NULL );
+
+ vm_object_lock(object);
+
+ next = (vm_page_t)queue_first(&object->memq);
+
+ while (!queue_end(&object->memq, (queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ /* Throw to the pageout queue */
+ vm_page_lockspin_queues();
+
+ VM_PAGE_QUEUES_REMOVE(p);
+ vm_pageout_cluster(p);
+
+ vm_page_unlock_queues();
+ }
+
+ vm_object_unlock(object);
+}
+
+kern_return_t
+vm_object_pagein(
+ vm_object_t object)
+{
+ memory_object_t pager;
+ kern_return_t kr;
+
+ vm_object_lock(object);
+
+ pager = object->pager;
+
+ if (!object->pager_ready || pager == MEMORY_OBJECT_NULL) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ vm_object_paging_wait(object, THREAD_UNINT);
+ vm_object_paging_begin(object);
+
+ object->blocked_access = TRUE;
+ vm_object_unlock(object);
+
+ kr = memory_object_data_reclaim(pager, TRUE);
+
+ vm_object_lock(object);
+
+ object->blocked_access = FALSE;
+ vm_object_paging_end(object);
+
+ vm_object_unlock(object);
+
+ return kr;
+}
+
+void
+vm_object_unpack(
+ vm_object_t compact_object,
+ void **table)
+{
+ /*
+ * Future Work:
+ * Right now we treat the default freezer much like
+ * the default pager with respect to when it is
+ * created and terminated.
+ * But, in the future, we may want to terminate the
+ * default freezer at the very instant that an object
+ * has been completely re-filled with all it's previously
+ * paged-out pages.
+ * At that time we'll need to reset the object fields like
+ * "pager" and the associated "pager_{created,initialized,trusted}"
+ * fields right here.
+ */
+ default_freezer_unpack(compact_object, table);
+}
+
+#endif /* CONFIG_FREEZE */