+static void
+vm_object_do_bypass(
+ vm_object_t object,
+ vm_object_t backing_object)
+{
+ /*
+ * Make the parent shadow the next object
+ * in the chain.
+ */
+
+ vm_object_lock_assert_exclusive(object);
+ vm_object_lock_assert_exclusive(backing_object);
+
+#if TASK_SWAPPER
+ /*
+ * Do object reference in-line to
+ * conditionally increment shadow's
+ * residence count. If object is not
+ * resident, leave residence count
+ * on shadow alone.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL) {
+ vm_object_lock(backing_object->shadow);
+ vm_object_lock_assert_exclusive(backing_object->shadow);
+ backing_object->shadow->ref_count++;
+ if (object->res_count != 0)
+ vm_object_res_reference(backing_object->shadow);
+ vm_object_unlock(backing_object->shadow);
+ }
+#else /* TASK_SWAPPER */
+ vm_object_reference(backing_object->shadow);
+#endif /* TASK_SWAPPER */
+
+ assert(!object->phys_contiguous);
+ assert(!backing_object->phys_contiguous);
+ object->shadow = backing_object->shadow;
+ if (object->shadow) {
+ object->vo_shadow_offset += backing_object->vo_shadow_offset;
+ } else {
+ /* no shadow, therefore no shadow offset... */
+ object->vo_shadow_offset = 0;
+ }
+
+ /*
+ * Backing object might have had a copy pointer
+ * to us. If it did, clear it.
+ */
+ if (backing_object->copy == object) {
+ backing_object->copy = VM_OBJECT_NULL;
+ }
+
+ /*
+ * Drop the reference count on backing_object.
+#if TASK_SWAPPER
+ * Since its ref_count was at least 2, it
+ * will not vanish; so we don't need to call
+ * vm_object_deallocate.
+ * [with a caveat for "named" objects]
+ *
+ * The res_count on the backing object is
+ * conditionally decremented. It's possible
+ * (via vm_pageout_scan) to get here with
+ * a "swapped" object, which has a 0 res_count,
+ * in which case, the backing object res_count
+ * is already down by one.
+#else
+ * Don't call vm_object_deallocate unless
+ * ref_count drops to zero.
+ *
+ * The ref_count can drop to zero here if the
+ * backing object could be bypassed but not
+ * collapsed, such as when the backing object
+ * is temporary and cachable.
+#endif
+ */
+ if (backing_object->ref_count > 2 ||
+ (!backing_object->named && backing_object->ref_count > 1)) {
+ vm_object_lock_assert_exclusive(backing_object);
+ backing_object->ref_count--;
+#if TASK_SWAPPER
+ if (object->res_count != 0)
+ vm_object_res_deallocate(backing_object);
+ assert(backing_object->ref_count > 0);
+#endif /* TASK_SWAPPER */
+ vm_object_unlock(backing_object);
+ } else {
+
+ /*
+ * Drop locks so that we can deallocate
+ * the backing object.
+ */
+
+#if TASK_SWAPPER
+ if (object->res_count == 0) {
+ /* XXX get a reference for the deallocate below */
+ vm_object_res_reference(backing_object);
+ }
+#endif /* TASK_SWAPPER */
+ /*
+ * vm_object_collapse (the caller of this function) is
+ * now called from contexts that may not guarantee that a
+ * valid reference is held on the object... w/o a valid
+ * reference, it is unsafe and unwise (you will definitely
+ * regret it) to unlock the object and then retake the lock
+ * since the object may be terminated and recycled in between.
+ * The "activity_in_progress" reference will keep the object
+ * 'stable'.
+ */
+ vm_object_activity_begin(object);
+ vm_object_unlock(object);
+
+ vm_object_unlock(backing_object);
+ vm_object_deallocate(backing_object);
+
+ /*
+ * Relock object. We don't have to reverify
+ * its state since vm_object_collapse will
+ * do that for us as it starts at the
+ * top of its loop.
+ */
+
+ vm_object_lock(object);
+ vm_object_activity_end(object);
+ }
+
+ object_bypasses++;
+}
+
+
+/*
+ * vm_object_collapse:
+ *
+ * Perform an object collapse or an object bypass if appropriate.
+ * The real work of collapsing and bypassing is performed in
+ * the routines vm_object_do_collapse and vm_object_do_bypass.
+ *
+ * Requires that the object be locked and the page queues be unlocked.
+ *
+ */
+static unsigned long vm_object_collapse_calls = 0;
+static unsigned long vm_object_collapse_objects = 0;
+static unsigned long vm_object_collapse_do_collapse = 0;
+static unsigned long vm_object_collapse_do_bypass = 0;
+
+__private_extern__ void
+vm_object_collapse(
+ vm_object_t object,
+ vm_object_offset_t hint_offset,
+ boolean_t can_bypass)
+{
+ vm_object_t backing_object;
+ unsigned int rcount;
+ unsigned int size;
+ vm_object_t original_object;
+ int object_lock_type;
+ int backing_object_lock_type;
+
+ vm_object_collapse_calls++;
+
+ if (! vm_object_collapse_allowed &&
+ ! (can_bypass && vm_object_bypass_allowed)) {
+ return;
+ }
+
+ XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
+ object, 0,0,0,0);
+
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ original_object = object;
+
+ /*
+ * The top object was locked "exclusive" by the caller.
+ * In the first pass, to determine if we can collapse the shadow chain,
+ * take a "shared" lock on the shadow objects. If we can collapse,
+ * we'll have to go down the chain again with exclusive locks.
+ */
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_SHARED;
+
+retry:
+ object = original_object;
+ vm_object_lock_assert_exclusive(object);
+
+ while (TRUE) {
+ vm_object_collapse_objects++;
+ /*
+ * Verify that the conditions are right for either
+ * collapse or bypass:
+ */
+
+ /*
+ * There is a backing object, and
+ */
+
+ backing_object = object->shadow;
+ if (backing_object == VM_OBJECT_NULL) {
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ return;
+ }
+ if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
+ vm_object_lock_shared(backing_object);
+ } else {
+ vm_object_lock(backing_object);
+ }
+
+ /*
+ * No pages in the object are currently
+ * being paged out, and
+ */
+ if (object->paging_in_progress != 0 ||
+ object->activity_in_progress != 0) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * ...
+ * The backing object is not read_only,
+ * and no pages in the backing object are
+ * currently being paged out.
+ * The backing object is internal.
+ *
+ */
+
+ if (!backing_object->internal ||
+ backing_object->paging_in_progress != 0 ||
+ backing_object->activity_in_progress != 0) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * Purgeable objects are not supposed to engage in
+ * copy-on-write activities, so should not have
+ * any shadow objects or be a shadow object to another
+ * object.
+ * Collapsing a purgeable object would require some
+ * updates to the purgeable compressed ledgers.
+ */
+ if (object->purgable != VM_PURGABLE_DENY ||
+ backing_object->purgable != VM_PURGABLE_DENY) {
+ panic("vm_object_collapse() attempting to collapse "
+ "purgeable object: %p(%d) %p(%d)\n",
+ object, object->purgable,
+ backing_object, backing_object->purgable);
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * The backing object can't be a copy-object:
+ * the shadow_offset for the copy-object must stay
+ * as 0. Furthermore (for the 'we have all the
+ * pages' case), if we bypass backing_object and
+ * just shadow the next object in the chain, old
+ * pages from that object would then have to be copied
+ * BOTH into the (former) backing_object and into the
+ * parent object.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL &&
+ backing_object->shadow->copy == backing_object) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * We can now try to either collapse the backing
+ * object (if the parent is the only reference to
+ * it) or (perhaps) remove the parent's reference
+ * to it.
+ *
+ * If there is exactly one reference to the backing
+ * object, we may be able to collapse it into the
+ * parent.
+ *
+ * As long as one of the objects is still not known
+ * to the pager, we can collapse them.
+ */
+ if (backing_object->ref_count == 1 &&
+ (vm_object_collapse_compressor_allowed ||
+ !object->pager_created
+ || (!backing_object->pager_created)
+ ) && vm_object_collapse_allowed) {
+
+ /*
+ * We need the exclusive lock on the VM objects.
+ */
+ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
+ /*
+ * We have an object and its shadow locked
+ * "shared". We can't just upgrade the locks
+ * to "exclusive", as some other thread might
+ * also have these objects locked "shared" and
+ * attempt to upgrade one or the other to
+ * "exclusive". The upgrades would block
+ * forever waiting for the other "shared" locks
+ * to get released.
+ * So we have to release the locks and go
+ * down the shadow chain again (since it could
+ * have changed) with "exclusive" locking.
+ */
+ vm_object_unlock(backing_object);
+ if (object != original_object)
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ goto retry;
+ }
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
+ backing_object, object,
+ backing_object->pager,
+ backing_object->pager_control, 0);
+
+ /*
+ * Collapse the object with its backing
+ * object, and try again with the object's
+ * new backing object.
+ */
+
+ vm_object_do_collapse(object, backing_object);
+ vm_object_collapse_do_collapse++;
+ continue;
+ }
+
+ /*
+ * Collapsing the backing object was not possible
+ * or permitted, so let's try bypassing it.
+ */
+
+ if (! (can_bypass && vm_object_bypass_allowed)) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+
+ /*
+ * If the object doesn't have all its pages present,
+ * we have to make sure no pages in the backing object
+ * "show through" before bypassing it.
+ */
+ size = (unsigned int)atop(object->vo_size);
+ rcount = object->resident_page_count;
+
+ if (rcount != size) {
+ vm_object_offset_t offset;
+ vm_object_offset_t backing_offset;
+ unsigned int backing_rcount;
+
+ /*
+ * If the backing object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (backing_object->pager_created) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If the object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (object->pager_created) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ backing_offset = object->vo_shadow_offset;
+ backing_rcount = backing_object->resident_page_count;
+
+ if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+ /*
+ * we have enough pages in the backing object to guarantee that
+ * at least 1 of them must be 'uncovered' by a resident page
+ * in the object we're evaluating, so move on and
+ * try to collapse the rest of the shadow chain
+ */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * If the backing object has existence info,
+ * we must check examine its existence info
+ * as well.
+ *
+ */
+
+#define EXISTS_IN_OBJECT(obj, off, rc) \
+ ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
+ == VM_EXTERNAL_STATE_EXISTS) || \
+ ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+
+ /*
+ * Check the hint location first
+ * (since it is often the quickest way out of here).
+ */
+ if (object->cow_hint != ~(vm_offset_t)0)
+ hint_offset = (vm_object_offset_t)object->cow_hint;
+ else
+ hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
+ (hint_offset - 8 * PAGE_SIZE_64) : 0;
+
+ if (EXISTS_IN_OBJECT(backing_object, hint_offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
+ /* dependency right at the hint */
+ object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If the object's window onto the backing_object
+ * is large compared to the number of resident
+ * pages in the backing object, it makes sense to
+ * walk the backing_object's resident pages first.
+ *
+ * NOTE: Pages may be in both the existence map and/or
+ * resident, so if we don't find a dependency while
+ * walking the backing object's resident page list
+ * directly, and there is an existence map, we'll have
+ * to run the offset based 2nd pass. Because we may
+ * have to run both passes, we need to be careful
+ * not to decrement 'rcount' in the 1st pass
+ */
+ if (backing_rcount && backing_rcount < (size / 8)) {
+ unsigned int rc = rcount;
+ vm_page_t p;
+
+ backing_rcount = backing_object->resident_page_count;
+ p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
+ do {
+ offset = (p->offset - backing_offset);
+
+ if (offset < object->vo_size &&
+ offset != hint_offset &&
+ !EXISTS_IN_OBJECT(object, offset, rc)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t) offset; /* atomic */
+
+ break;
+ }
+ p = (vm_page_t) vm_page_queue_next(&p->listq);
+
+ } while (--backing_rcount);
+ if (backing_rcount != 0 ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+ }
+
+ /*
+ * Walk through the offsets looking for pages in the
+ * backing object that show through to the object.
+ */
+ if (backing_rcount) {
+ offset = hint_offset;
+
+ while((offset =
+ (offset + PAGE_SIZE_64 < object->vo_size) ?
+ (offset + PAGE_SIZE_64) : 0) != hint_offset) {
+
+ if (EXISTS_IN_OBJECT(backing_object, offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, offset, rcount)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t) offset; /* atomic */
+ break;
+ }
+ }
+ if (offset != hint_offset) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * We need "exclusive" locks on the 2 VM objects.
+ */
+ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
+ vm_object_unlock(backing_object);
+ if (object != original_object)
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ goto retry;
+ }
+
+ /* reset the offset hint for any objects deeper in the chain */
+ object->cow_hint = (vm_offset_t)0;
+
+ /*
+ * All interesting pages in the backing object
+ * already live in the parent or its pager.
+ * Thus we can bypass the backing object.
+ */
+
+ vm_object_do_bypass(object, backing_object);
+ vm_object_collapse_do_bypass++;
+
+ /*
+ * Try again with this object's new backing object.
+ */
+
+ continue;
+ }
+
+ /* NOT REACHED */
+ /*
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ */
+}
+
+/*
+ * Routine: vm_object_page_remove: [internal]
+ * Purpose:
+ * Removes all physical pages in the specified
+ * object range from the object's list of pages.
+ *
+ * In/out conditions:
+ * The object must be locked.
+ * The object must not have paging_in_progress, usually
+ * guaranteed by not having a pager.
+ */
+unsigned int vm_object_page_remove_lookup = 0;
+unsigned int vm_object_page_remove_iterate = 0;
+
+__private_extern__ void
+vm_object_page_remove(
+ vm_object_t object,
+ vm_object_offset_t start,
+ vm_object_offset_t end)
+{
+ vm_page_t p, next;
+
+ /*
+ * One and two page removals are most popular.
+ * The factor of 16 here is somewhat arbitrary.
+ * It balances vm_object_lookup vs iteration.
+ */
+
+ if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
+ vm_object_page_remove_lookup++;
+
+ for (; start < end; start += PAGE_SIZE_64) {
+ p = vm_page_lookup(object, start);
+ if (p != VM_PAGE_NULL) {
+ assert(!p->cleaning && !p->laundry);
+ if (!p->fictitious && p->pmapped)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+ VM_PAGE_FREE(p);
+ }
+ }
+ } else {
+ vm_object_page_remove_iterate++;
+
+ p = (vm_page_t) vm_page_queue_first(&object->memq);
+ while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
+ next = (vm_page_t) vm_page_queue_next(&p->listq);
+ if ((start <= p->offset) && (p->offset < end)) {
+ assert(!p->cleaning && !p->laundry);
+ if (!p->fictitious && p->pmapped)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+ VM_PAGE_FREE(p);
+ }
+ p = next;
+ }
+ }
+}
+
+
+/*
+ * Routine: vm_object_coalesce
+ * Function: Coalesces two objects backing up adjoining
+ * regions of memory into a single object.
+ *
+ * returns TRUE if objects were combined.
+ *
+ * NOTE: Only works at the moment if the second object is NULL -
+ * if it's not, which object do we lock first?
+ *
+ * Parameters:
+ * prev_object First object to coalesce
+ * prev_offset Offset into prev_object
+ * next_object Second object into coalesce
+ * next_offset Offset into next_object
+ *
+ * prev_size Size of reference to prev_object
+ * next_size Size of reference to next_object
+ *
+ * Conditions:
+ * The object(s) must *not* be locked. The map must be locked
+ * to preserve the reference to the object(s).
+ */
+static int vm_object_coalesce_count = 0;
+
+__private_extern__ boolean_t
+vm_object_coalesce(
+ vm_object_t prev_object,
+ vm_object_t next_object,
+ vm_object_offset_t prev_offset,
+ __unused vm_object_offset_t next_offset,
+ vm_object_size_t prev_size,
+ vm_object_size_t next_size)
+{
+ vm_object_size_t newsize;
+
+#ifdef lint
+ next_offset++;
+#endif /* lint */
+
+ if (next_object != VM_OBJECT_NULL) {
+ return(FALSE);
+ }
+
+ if (prev_object == VM_OBJECT_NULL) {
+ return(TRUE);
+ }
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
+ prev_object, prev_offset, prev_size, next_size, 0);
+
+ vm_object_lock(prev_object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(prev_object, prev_offset, TRUE);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * prev_entry may be in use any way:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . is purgeable
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((prev_object->ref_count > 1) ||
+ prev_object->pager_created ||
+ (prev_object->shadow != VM_OBJECT_NULL) ||
+ (prev_object->copy != VM_OBJECT_NULL) ||
+ (prev_object->true_share != FALSE) ||
+ (prev_object->purgable != VM_PURGABLE_DENY) ||
+ (prev_object->paging_in_progress != 0) ||
+ (prev_object->activity_in_progress != 0)) {
+ vm_object_unlock(prev_object);
+ return(FALSE);
+ }
+
+ vm_object_coalesce_count++;
+
+ /*
+ * Remove any pages that may still be in the object from
+ * a previous deallocation.
+ */
+ vm_object_page_remove(prev_object,
+ prev_offset + prev_size,
+ prev_offset + prev_size + next_size);
+
+ /*
+ * Extend the object if necessary.
+ */
+ newsize = prev_offset + prev_size + next_size;
+ if (newsize > prev_object->vo_size) {
+ prev_object->vo_size = newsize;
+ }
+
+ vm_object_unlock(prev_object);
+ return(TRUE);
+}
+
+kern_return_t
+vm_object_populate_with_private(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ ppnum_t phys_page,
+ vm_size_t size)
+{
+ ppnum_t base_page;
+ vm_object_offset_t base_offset;
+
+
+ if (!object->private)
+ return KERN_FAILURE;
+
+ base_page = phys_page;
+
+ vm_object_lock(object);
+
+ if (!object->phys_contiguous) {
+ vm_page_t m;
+
+ if ((base_offset = trunc_page_64(offset)) != offset) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+ base_offset += object->paging_offset;
+
+ while (size) {
+ m = vm_page_lookup(object, base_offset);
+
+ if (m != VM_PAGE_NULL) {
+ if (m->fictitious) {
+ if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
+
+ vm_page_lockspin_queues();
+ m->private = TRUE;
+ vm_page_unlock_queues();
+
+ m->fictitious = FALSE;
+ VM_PAGE_SET_PHYS_PAGE(m, base_page);
+ }
+ } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
+
+ if ( !m->private) {
+ /*
+ * we'd leak a real page... that can't be right
+ */
+ panic("vm_object_populate_with_private - %p not private", m);
+ }
+ if (m->pmapped) {
+ /*
+ * pmap call to clear old mapping
+ */
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ }
+ VM_PAGE_SET_PHYS_PAGE(m, base_page);
+ }
+ if (m->encrypted) {
+ /*
+ * we should never see this on a ficticious or private page
+ */
+ panic("vm_object_populate_with_private - %p encrypted", m);
+ }
+
+ } else {
+ while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ /*
+ * private normally requires lock_queues but since we
+ * are initializing the page, its not necessary here
+ */
+ m->private = TRUE;
+ m->fictitious = FALSE;
+ VM_PAGE_SET_PHYS_PAGE(m, base_page);
+ m->unusual = TRUE;
+ m->busy = FALSE;
+
+ vm_page_insert(m, object, base_offset);
+ }
+ base_page++; /* Go to the next physical page */
+ base_offset += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ /* NOTE: we should check the original settings here */
+ /* if we have a size > zero a pmap call should be made */
+ /* to disable the range */
+
+ /* pmap_? */
+
+ /* shadows on contiguous memory are not allowed */
+ /* we therefore can use the offset field */
+ object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
+ object->vo_size = size;
+ }
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * memory_object_free_from_cache:
+ *
+ * Walk the vm_object cache list, removing and freeing vm_objects
+ * which are backed by the pager identified by the caller, (pager_ops).
+ * Remove up to "count" objects, if there are that may available
+ * in the cache.
+ *
+ * Walk the list at most once, return the number of vm_objects
+ * actually freed.
+ */
+
+__private_extern__ kern_return_t
+memory_object_free_from_cache(
+ __unused host_t host,
+ __unused memory_object_pager_ops_t pager_ops,
+ int *count)
+{
+#if VM_OBJECT_CACHE
+ int object_released = 0;
+
+ vm_object_t object = VM_OBJECT_NULL;
+ vm_object_t shadow;
+
+/*
+ if(host == HOST_NULL)
+ return(KERN_INVALID_ARGUMENT);
+*/
+
+ try_again:
+ vm_object_cache_lock();
+
+ queue_iterate(&vm_object_cached_list, object,
+ vm_object_t, cached_list) {
+ if (object->pager &&
+ (pager_ops == object->pager->mo_pager_ops)) {
+ vm_object_lock(object);
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+
+ vm_object_cache_unlock();
+ /*
+ * Since this object is in the cache, we know
+ * that it is initialized and has only a pager's
+ * (implicit) reference. Take a reference to avoid
+ * recursive deallocations.
+ */
+
+ assert(object->pager_initialized);
+ assert(object->ref_count == 0);
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+
+ /*
+ * Terminate the object.
+ * If the object had a shadow, we let
+ * vm_object_deallocate deallocate it.
+ * "pageout" objects have a shadow, but
+ * maintain a "paging reference" rather
+ * than a normal reference.
+ * (We are careful here to limit recursion.)
+ */
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+
+ if ((vm_object_terminate(object) == KERN_SUCCESS)
+ && (shadow != VM_OBJECT_NULL)) {
+ vm_object_deallocate(shadow);
+ }
+
+ if(object_released++ == *count)
+ return KERN_SUCCESS;
+ goto try_again;
+ }
+ }
+ vm_object_cache_unlock();
+ *count = object_released;
+#else
+ *count = 0;
+#endif
+ return KERN_SUCCESS;
+}
+
+
+
+kern_return_t
+memory_object_create_named(
+ memory_object_t pager,
+ memory_object_offset_t size,
+ memory_object_control_t *control)
+{
+ vm_object_t object;
+ vm_object_hash_entry_t entry;
+ lck_mtx_t *lck;
+
+ *control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager == MEMORY_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ lck = vm_object_hash_lock_spin(pager);
+ entry = vm_object_hash_lookup(pager, FALSE);
+
+ if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
+ (entry->object != VM_OBJECT_NULL)) {
+ if (entry->object->named == TRUE)
+ panic("memory_object_create_named: caller already holds the right"); }
+ vm_object_hash_unlock(lck);
+
+ if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
+ return(KERN_INVALID_OBJECT);
+ }
+
+ /* wait for object (if any) to be ready */
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ object->named = TRUE;
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ *control = object->pager_control;
+ vm_object_unlock(object);
+ }
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * Routine: memory_object_recover_named [user interface]
+ * Purpose:
+ * Attempt to recover a named reference for a VM object.
+ * VM will verify that the object has not already started
+ * down the termination path, and if it has, will optionally
+ * wait for that to finish.
+ * Returns:
+ * KERN_SUCCESS - we recovered a named reference on the object
+ * KERN_FAILURE - we could not recover a reference (object dead)
+ * KERN_INVALID_ARGUMENT - bad memory object control
+ */
+kern_return_t
+memory_object_recover_named(
+ memory_object_control_t control,
+ boolean_t wait_on_terminating)
+{
+ vm_object_t object;
+
+ object = memory_object_control_to_vm_object(control);
+ if (object == VM_OBJECT_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+restart:
+ vm_object_lock(object);
+
+ if (object->terminating && wait_on_terminating) {
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+ THREAD_UNINT);
+ goto restart;
+ }
+
+ if (!object->alive) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ if (object->named == TRUE) {
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+#if VM_OBJECT_CACHE
+ if ((object->ref_count == 0) && (!object->terminating)) {
+ if (!vm_object_cache_lock_try()) {
+ vm_object_unlock(object);
+ goto restart;
+ }
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ XPR(XPR_VM_OBJECT_CACHE,
+ "memory_object_recover_named: removing %X, head (%X, %X)\n",
+ object,
+ vm_object_cached_list.next,
+ vm_object_cached_list.prev, 0,0);
+
+ vm_object_cache_unlock();
+ }
+#endif
+ object->named = TRUE;
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+ vm_object_res_reference(object);
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ vm_object_unlock(object);
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * vm_object_release_name:
+ *
+ * Enforces name semantic on memory_object reference count decrement
+ * This routine should not be called unless the caller holds a name
+ * reference gained through the memory_object_create_named.
+ *
+ * If the TERMINATE_IDLE flag is set, the call will return if the
+ * reference count is not 1. i.e. idle with the only remaining reference
+ * being the name.
+ * If the decision is made to proceed the name field flag is set to
+ * false and the reference count is decremented. If the RESPECT_CACHE
+ * flag is set and the reference count has gone to zero, the
+ * memory_object is checked to see if it is cacheable otherwise when
+ * the reference count is zero, it is simply terminated.
+ */
+
+__private_extern__ kern_return_t
+vm_object_release_name(
+ vm_object_t object,
+ int flags)
+{
+ vm_object_t shadow;
+ boolean_t original_object = TRUE;
+
+ while (object != VM_OBJECT_NULL) {
+
+ vm_object_lock(object);
+
+ assert(object->alive);
+ if (original_object)
+ assert(object->named);
+ assert(object->ref_count > 0);
+
+ /*
+ * We have to wait for initialization before
+ * destroying or caching the object.
+ */
+
+ if (object->pager_created && !object->pager_initialized) {
+ assert(!object->can_persist);
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ vm_object_unlock(object);
+ thread_block(THREAD_CONTINUE_NULL);
+ continue;
+ }
+
+ if (((object->ref_count > 1)
+ && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
+ || (object->terminating)) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ } else {
+ if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+ }
+
+ if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
+ (object->ref_count == 1)) {
+ if (original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ /* let vm_object_deallocate push this thing into */
+ /* the cache, if that it is where it is bound */
+ vm_object_deallocate(object);
+ return KERN_SUCCESS;
+ }
+ VM_OBJ_RES_DECR(object);
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+
+ if (object->ref_count == 1) {
+ if (vm_object_terminate(object) != KERN_SUCCESS) {
+ if (original_object) {
+ return KERN_FAILURE;
+ } else {
+ return KERN_SUCCESS;
+ }
+ }
+ if (shadow != VM_OBJECT_NULL) {
+ original_object = FALSE;
+ object = shadow;
+ continue;
+ }
+ return KERN_SUCCESS;
+ } else {
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count--;
+ assert(object->ref_count > 0);
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+ }
+ /*NOTREACHED*/
+ assert(0);
+ return KERN_FAILURE;
+}
+
+
+__private_extern__ kern_return_t
+vm_object_lock_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot)
+{
+ __unused boolean_t should_flush;
+
+ should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
+
+ XPR(XPR_MEMORY_OBJECT,
+ "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
+ object, offset, size,
+ (((should_return&1)<<1)|should_flush), prot);
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page_64(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object reference from being released.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ (void)vm_object_update(object,
+ offset, size, NULL, NULL, should_return, flags, prot);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Empty a purgeable object by grabbing the physical pages assigned to it and
+ * putting them on the free queue without writing them to backing store, etc.
+ * When the pages are next touched they will be demand zero-fill pages. We
+ * skip pages which are busy, being paged in/out, wired, etc. We do _not_
+ * skip referenced/dirty pages, pages on the active queue, etc. We're more
+ * than happy to grab these since this is a purgeable object. We mark the
+ * object as "empty" after reaping its pages.
+ *
+ * On entry the object must be locked and it must be
+ * purgeable with no delayed copies pending.
+ */
+void
+vm_object_purge(vm_object_t object, int flags)
+{
+ unsigned int object_page_count = 0;
+ unsigned int pgcount = 0;
+ boolean_t skipped_object = FALSE;
+
+ vm_object_lock_assert_exclusive(object);
+
+ if (object->purgable == VM_PURGABLE_DENY)
+ return;
+
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+
+ /*
+ * We need to set the object's state to VM_PURGABLE_EMPTY *before*
+ * reaping its pages. We update vm_page_purgeable_count in bulk
+ * and we don't want vm_page_remove() to update it again for each
+ * page we reap later.
+ *
+ * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
+ * are all accounted for in the "volatile" ledgers, so this does not
+ * make any difference.
+ * If we transitioned directly from NONVOLATILE to EMPTY,
+ * vm_page_purgeable_count must have been updated when the object
+ * was dequeued from its volatile queue and the purgeable ledgers
+ * must have also been updated accordingly at that time (in
+ * vm_object_purgable_control()).
+ */
+ if (object->purgable == VM_PURGABLE_VOLATILE) {
+ unsigned int delta;
+ assert(object->resident_page_count >=
+ object->wired_page_count);
+ delta = (object->resident_page_count -
+ object->wired_page_count);
+ if (delta != 0) {
+ assert(vm_page_purgeable_count >=
+ delta);
+ OSAddAtomic(-delta,
+ (SInt32 *)&vm_page_purgeable_count);
+ }
+ if (object->wired_page_count != 0) {
+ assert(vm_page_purgeable_wired_count >=
+ object->wired_page_count);
+ OSAddAtomic(-object->wired_page_count,
+ (SInt32 *)&vm_page_purgeable_wired_count);
+ }
+ object->purgable = VM_PURGABLE_EMPTY;
+ }
+ assert(object->purgable == VM_PURGABLE_EMPTY);
+
+ object_page_count = object->resident_page_count;
+
+ vm_object_reap_pages(object, REAP_PURGEABLE);
+
+ if (object->pager != NULL) {
+
+ assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
+
+ if (object->activity_in_progress == 0 &&
+ object->paging_in_progress == 0) {
+ /*
+ * Also reap any memory coming from this object
+ * in the VM compressor.
+ *
+ * There are no operations in progress on the VM object
+ * and no operation can start while we're holding the
+ * VM object lock, so it's safe to reap the compressed
+ * pages and update the page counts.
+ */
+ pgcount = vm_compressor_pager_get_count(object->pager);
+ if (pgcount) {
+ pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
+ vm_compressor_pager_count(object->pager,
+ -pgcount,
+ FALSE, /* shared */
+ object);
+ vm_purgeable_compressed_update(object,
+ -pgcount);
+ }
+ if ( !(flags & C_DONT_BLOCK)) {
+ assert(vm_compressor_pager_get_count(object->pager)
+ == 0);
+ }
+ } else {
+ /*
+ * There's some kind of paging activity in progress
+ * for this object, which could result in a page
+ * being compressed or decompressed, possibly while
+ * the VM object is not locked, so it could race
+ * with us.
+ *
+ * We can't really synchronize this without possibly
+ * causing a deadlock when the compressor needs to
+ * allocate or free memory while compressing or
+ * decompressing a page from a purgeable object
+ * mapped in the kernel_map...
+ *
+ * So let's not attempt to purge the compressor
+ * pager if there's any kind of operation in
+ * progress on the VM object.
+ */
+ skipped_object = TRUE;
+ }
+ }
+
+ vm_object_lock_assert_exclusive(object);
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
+ VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
+ object_page_count,
+ pgcount,
+ skipped_object,
+ 0);
+
+}
+
+
+/*
+ * vm_object_purgeable_control() allows the caller to control and investigate the
+ * state of a purgeable object. A purgeable object is created via a call to
+ * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
+ * never be coalesced with any other object -- even other purgeable objects --
+ * and will thus always remain a distinct object. A purgeable object has
+ * special semantics when its reference count is exactly 1. If its reference
+ * count is greater than 1, then a purgeable object will behave like a normal
+ * object and attempts to use this interface will result in an error return
+ * of KERN_INVALID_ARGUMENT.
+ *
+ * A purgeable object may be put into a "volatile" state which will make the
+ * object's pages elligable for being reclaimed without paging to backing
+ * store if the system runs low on memory. If the pages in a volatile
+ * purgeable object are reclaimed, the purgeable object is said to have been
+ * "emptied." When a purgeable object is emptied the system will reclaim as
+ * many pages from the object as it can in a convenient manner (pages already
+ * en route to backing store or busy for other reasons are left as is). When
+ * a purgeable object is made volatile, its pages will generally be reclaimed
+ * before other pages in the application's working set. This semantic is
+ * generally used by applications which can recreate the data in the object
+ * faster than it can be paged in. One such example might be media assets
+ * which can be reread from a much faster RAID volume.
+ *
+ * A purgeable object may be designated as "non-volatile" which means it will
+ * behave like all other objects in the system with pages being written to and
+ * read from backing store as needed to satisfy system memory needs. If the
+ * object was emptied before the object was made non-volatile, that fact will
+ * be returned as the old state of the purgeable object (see
+ * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
+ * were reclaimed as part of emptying the object will be refaulted in as
+ * zero-fill on demand. It is up to the application to note that an object
+ * was emptied and recreate the objects contents if necessary. When a
+ * purgeable object is made non-volatile, its pages will generally not be paged
+ * out to backing store in the immediate future. A purgeable object may also
+ * be manually emptied.
+ *
+ * Finally, the current state (non-volatile, volatile, volatile & empty) of a
+ * volatile purgeable object may be queried at any time. This information may
+ * be used as a control input to let the application know when the system is
+ * experiencing memory pressure and is reclaiming memory.
+ *
+ * The specified address may be any address within the purgeable object. If
+ * the specified address does not represent any object in the target task's
+ * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
+ * object containing the specified address is not a purgeable object, then
+ * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
+ * returned.
+ *
+ * The control parameter may be any one of VM_PURGABLE_SET_STATE or
+ * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
+ * state is used to set the new state of the purgeable object and return its
+ * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
+ * object is returned in the parameter state.
+ *
+ * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
+ * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
+ * the non-volatile, volatile and volatile/empty states described above.
+ * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
+ * immediately reclaim as many pages in the object as can be conveniently
+ * collected (some may have already been written to backing store or be
+ * otherwise busy).
+ *
+ * The process of making a purgeable object non-volatile and determining its
+ * previous state is atomic. Thus, if a purgeable object is made
+ * VM_PURGABLE_NONVOLATILE and the old state is returned as
+ * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
+ * completely intact and will remain so until the object is made volatile
+ * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
+ * was reclaimed while it was in a volatile state and its previous contents
+ * have been lost.
+ */
+/*
+ * The object must be locked.
+ */
+kern_return_t
+vm_object_purgable_control(
+ vm_object_t object,
+ vm_purgable_t control,
+ int *state)
+{
+ int old_state;
+ int new_state;
+
+ if (object == VM_OBJECT_NULL) {
+ /*
+ * Object must already be present or it can't be purgeable.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_object_lock_assert_exclusive(object);
+
+ /*
+ * Get current state of the purgeable object.
+ */
+ old_state = object->purgable;
+ if (old_state == VM_PURGABLE_DENY)
+ return KERN_INVALID_ARGUMENT;
+
+ /* purgeable cant have delayed copies - now or in the future */
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+
+ /*
+ * Execute the desired operation.
+ */
+ if (control == VM_PURGABLE_GET_STATE) {
+ *state = old_state;
+ return KERN_SUCCESS;
+ }
+
+ if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
+ object->volatile_empty = TRUE;
+ }
+ if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
+ object->volatile_fault = TRUE;
+ }
+
+ new_state = *state & VM_PURGABLE_STATE_MASK;
+ if (new_state == VM_PURGABLE_VOLATILE &&
+ object->volatile_empty) {
+ new_state = VM_PURGABLE_EMPTY;
+ }
+
+ switch (new_state) {
+ case VM_PURGABLE_DENY:
+ case VM_PURGABLE_NONVOLATILE:
+ object->purgable = new_state;
+
+ if (old_state == VM_PURGABLE_VOLATILE) {
+ unsigned int delta;
+
+ assert(object->resident_page_count >=
+ object->wired_page_count);
+ delta = (object->resident_page_count -
+ object->wired_page_count);
+
+ assert(vm_page_purgeable_count >= delta);
+
+ if (delta != 0) {
+ OSAddAtomic(-delta,
+ (SInt32 *)&vm_page_purgeable_count);
+ }
+ if (object->wired_page_count != 0) {
+ assert(vm_page_purgeable_wired_count >=
+ object->wired_page_count);
+ OSAddAtomic(-object->wired_page_count,
+ (SInt32 *)&vm_page_purgeable_wired_count);
+ }
+
+ vm_page_lock_queues();
+
+ /* object should be on a queue */
+ assert(object->objq.next != NULL &&
+ object->objq.prev != NULL);
+ purgeable_q_t queue;
+
+ /*
+ * Move object from its volatile queue to the
+ * non-volatile queue...
+ */
+ queue = vm_purgeable_object_remove(object);
+ assert(queue);
+
+ if (object->purgeable_when_ripe) {
+ vm_purgeable_token_delete_last(queue);
+ }
+ assert(queue->debug_count_objects>=0);
+
+ vm_page_unlock_queues();
+ }
+ if (old_state == VM_PURGABLE_VOLATILE ||
+ old_state == VM_PURGABLE_EMPTY) {
+ /*
+ * Transfer the object's pages from the volatile to
+ * non-volatile ledgers.
+ */
+ vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE,
+ FALSE);
+ }
+
+ break;
+
+ case VM_PURGABLE_VOLATILE:
+ if (object->volatile_fault) {
+ vm_page_t p;
+ int refmod;
+
+ vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (p->busy ||
+ VM_PAGE_WIRED(p) ||
+ p->fictitious) {
+ continue;
+ }
+ refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+ if ((refmod & VM_MEM_MODIFIED) &&
+ !p->dirty) {
+ SET_PAGE_DIRTY(p, FALSE);
+ }
+ }
+ }
+
+ if (old_state == VM_PURGABLE_EMPTY &&
+ object->resident_page_count == 0 &&
+ object->pager == NULL)
+ break;
+
+ purgeable_q_t queue;
+
+ /* find the correct queue */
+ if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
+ queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+ else {
+ if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
+ queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ else
+ queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+ }
+
+ if (old_state == VM_PURGABLE_NONVOLATILE ||
+ old_state == VM_PURGABLE_EMPTY) {
+ unsigned int delta;
+
+ if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
+ VM_PURGABLE_NO_AGING) {
+ object->purgeable_when_ripe = FALSE;
+ } else {
+ object->purgeable_when_ripe = TRUE;
+ }
+
+ if (object->purgeable_when_ripe) {
+ kern_return_t result;
+
+ /* try to add token... this can fail */
+ vm_page_lock_queues();
+
+ result = vm_purgeable_token_add(queue);
+ if (result != KERN_SUCCESS) {
+ vm_page_unlock_queues();
+ return result;
+ }
+ vm_page_unlock_queues();
+ }
+
+ assert(object->resident_page_count >=
+ object->wired_page_count);
+ delta = (object->resident_page_count -
+ object->wired_page_count);
+
+ if (delta != 0) {
+ OSAddAtomic(delta,
+ &vm_page_purgeable_count);
+ }
+ if (object->wired_page_count != 0) {
+ OSAddAtomic(object->wired_page_count,
+ &vm_page_purgeable_wired_count);
+ }
+
+ object->purgable = new_state;
+
+ /* object should be on "non-volatile" queue */
+ assert(object->objq.next != NULL);
+ assert(object->objq.prev != NULL);
+ }
+ else if (old_state == VM_PURGABLE_VOLATILE) {
+ purgeable_q_t old_queue;
+ boolean_t purgeable_when_ripe;
+
+ /*
+ * if reassigning priorities / purgeable groups, we don't change the
+ * token queue. So moving priorities will not make pages stay around longer.
+ * Reasoning is that the algorithm gives most priority to the most important
+ * object. If a new token is added, the most important object' priority is boosted.
+ * This biases the system already for purgeable queues that move a lot.
+ * It doesn't seem more biasing is neccessary in this case, where no new object is added.
+ */
+ assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
+
+ old_queue = vm_purgeable_object_remove(object);
+ assert(old_queue);
+
+ if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
+ VM_PURGABLE_NO_AGING) {
+ purgeable_when_ripe = FALSE;
+ } else {
+ purgeable_when_ripe = TRUE;
+ }
+
+ if (old_queue != queue ||
+ (purgeable_when_ripe !=
+ object->purgeable_when_ripe)) {
+ kern_return_t result;
+
+ /* Changing queue. Have to move token. */
+ vm_page_lock_queues();
+ if (object->purgeable_when_ripe) {
+ vm_purgeable_token_delete_last(old_queue);
+ }
+ object->purgeable_when_ripe = purgeable_when_ripe;
+ if (object->purgeable_when_ripe) {
+ result = vm_purgeable_token_add(queue);
+ assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */
+ }
+ vm_page_unlock_queues();
+
+ }
+ };
+ vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
+ if (old_state == VM_PURGABLE_NONVOLATILE) {
+ vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
+ FALSE);
+ }
+
+ assert(queue->debug_count_objects>=0);
+
+ break;
+
+
+ case VM_PURGABLE_EMPTY:
+ if (object->volatile_fault) {
+ vm_page_t p;
+ int refmod;
+
+ vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (p->busy ||
+ VM_PAGE_WIRED(p) ||
+ p->fictitious) {
+ continue;
+ }
+ refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+ if ((refmod & VM_MEM_MODIFIED) &&
+ !p->dirty) {
+ SET_PAGE_DIRTY(p, FALSE);
+ }
+ }
+ }
+
+ if (old_state == new_state) {
+ /* nothing changes */
+ break;
+ }
+
+ assert(old_state == VM_PURGABLE_NONVOLATILE ||
+ old_state == VM_PURGABLE_VOLATILE);
+ if (old_state == VM_PURGABLE_VOLATILE) {
+ purgeable_q_t old_queue;
+
+ /* object should be on a queue */
+ assert(object->objq.next != NULL &&
+ object->objq.prev != NULL);
+
+ old_queue = vm_purgeable_object_remove(object);
+ assert(old_queue);
+ if (object->purgeable_when_ripe) {
+ vm_page_lock_queues();
+ vm_purgeable_token_delete_first(old_queue);
+ vm_page_unlock_queues();
+ }
+ }
+
+ if (old_state == VM_PURGABLE_NONVOLATILE) {
+ /*
+ * This object's pages were previously accounted as
+ * "non-volatile" and now need to be accounted as
+ * "volatile".
+ */
+ vm_purgeable_accounting(object, VM_PURGABLE_NONVOLATILE,
+ FALSE);
+ /*
+ * Set to VM_PURGABLE_EMPTY because the pages are no
+ * longer accounted in the "non-volatile" ledger
+ * and are also not accounted for in
+ * "vm_page_purgeable_count".
+ */
+ object->purgable = VM_PURGABLE_EMPTY;
+ }
+
+ (void) vm_object_purge(object, 0);
+ assert(object->purgable == VM_PURGABLE_EMPTY);
+
+ break;
+ }
+
+ *state = old_state;
+
+ vm_object_lock_assert_exclusive(object);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_object_get_page_counts(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ unsigned int *resident_page_count,
+ unsigned int *dirty_page_count)
+{
+
+ kern_return_t kr = KERN_SUCCESS;
+ boolean_t count_dirty_pages = FALSE;
+ vm_page_t p = VM_PAGE_NULL;
+ unsigned int local_resident_count = 0;
+ unsigned int local_dirty_count = 0;
+ vm_object_offset_t cur_offset = 0;
+ vm_object_offset_t end_offset = 0;
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+
+ cur_offset = offset;
+
+ end_offset = offset + size;
+
+ vm_object_lock_assert_exclusive(object);
+
+ if (dirty_page_count != NULL) {
+
+ count_dirty_pages = TRUE;
+ }
+
+ if (resident_page_count != NULL && count_dirty_pages == FALSE) {
+ /*
+ * Fast path when:
+ * - we only want the resident page count, and,
+ * - the entire object is exactly covered by the request.
+ */
+ if (offset == 0 && (object->vo_size == size)) {
+
+ *resident_page_count = object->resident_page_count;
+ goto out;
+ }
+ }
+
+ if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
+
+ vm_page_queue_iterate(&object->memq, p, vm_page_t, listq) {
+
+ if (p->offset >= cur_offset && p->offset < end_offset) {
+
+ local_resident_count++;
+
+ if (count_dirty_pages) {
+
+ if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
+
+ local_dirty_count++;
+ }
+ }
+ }
+ }
+ } else {
+
+ for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
+
+ p = vm_page_lookup(object, cur_offset);
+
+ if (p != VM_PAGE_NULL) {
+
+ local_resident_count++;
+
+ if (count_dirty_pages) {
+
+ if (p->dirty || (p->wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
+
+ local_dirty_count++;
+ }
+ }
+ }
+ }
+
+ }
+
+ if (resident_page_count != NULL) {
+ *resident_page_count = local_resident_count;
+ }
+
+ if (dirty_page_count != NULL) {
+ *dirty_page_count = local_dirty_count;
+ }
+
+out:
+ return kr;
+}
+
+
+#if TASK_SWAPPER
+/*
+ * vm_object_res_deallocate
+ *
+ * (recursively) decrement residence counts on vm objects and their shadows.
+ * Called from vm_object_deallocate and when swapping out an object.
+ *
+ * The object is locked, and remains locked throughout the function,
+ * even as we iterate down the shadow chain. Locks on intermediate objects
+ * will be dropped, but not the original object.
+ *
+ * NOTE: this function used to use recursion, rather than iteration.
+ */
+
+__private_extern__ void
+vm_object_res_deallocate(
+ vm_object_t object)
+{
+ vm_object_t orig_object = object;
+ /*
+ * Object is locked so it can be called directly
+ * from vm_object_deallocate. Original object is never
+ * unlocked.
+ */
+ assert(object->res_count > 0);
+ while (--object->res_count == 0) {
+ assert(object->ref_count >= object->res_count);
+ vm_object_deactivate_all_pages(object);
+ /* iterate on shadow, if present */
+ if (object->shadow != VM_OBJECT_NULL) {
+ vm_object_t tmp_object = object->shadow;
+ vm_object_lock(tmp_object);
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
+ assert(object->res_count > 0);
+ } else
+ break;
+ }
+ if (object != orig_object)
+ vm_object_unlock(object);
+}
+
+/*
+ * vm_object_res_reference
+ *
+ * Internal function to increment residence count on a vm object
+ * and its shadows. It is called only from vm_object_reference, and
+ * when swapping in a vm object, via vm_map_swap.
+ *
+ * The object is locked, and remains locked throughout the function,
+ * even as we iterate down the shadow chain. Locks on intermediate objects
+ * will be dropped, but not the original object.
+ *
+ * NOTE: this function used to use recursion, rather than iteration.
+ */
+
+__private_extern__ void
+vm_object_res_reference(
+ vm_object_t object)
+{
+ vm_object_t orig_object = object;
+ /*
+ * Object is locked, so this can be called directly
+ * from vm_object_reference. This lock is never released.
+ */
+ while ((++object->res_count == 1) &&
+ (object->shadow != VM_OBJECT_NULL)) {
+ vm_object_t tmp_object = object->shadow;
+
+ assert(object->ref_count >= object->res_count);
+ vm_object_lock(tmp_object);
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
+ }
+ if (object != orig_object)
+ vm_object_unlock(object);
+ assert(orig_object->ref_count >= orig_object->res_count);
+}
+#endif /* TASK_SWAPPER */
+
+/*
+ * vm_object_reference:
+ *
+ * Gets another reference to the given object.
+ */
+#ifdef vm_object_reference
+#undef vm_object_reference
+#endif
+__private_extern__ void
+vm_object_reference(
+ vm_object_t object)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ vm_object_reference_locked(object);
+ vm_object_unlock(object);
+}
+
+#ifdef MACH_BSD
+/*
+ * Scale the vm_object_cache
+ * This is required to make sure that the vm_object_cache is big
+ * enough to effectively cache the mapped file.
+ * This is really important with UBC as all the regular file vnodes
+ * have memory object associated with them. Havving this cache too
+ * small results in rapid reclaim of vnodes and hurts performance a LOT!
+ *
+ * This is also needed as number of vnodes can be dynamically scaled.
+ */
+kern_return_t
+adjust_vm_object_cache(
+ __unused vm_size_t oval,
+ __unused vm_size_t nval)
+{
+#if VM_OBJECT_CACHE
+ vm_object_cached_max = nval;
+ vm_object_cache_trim(FALSE);
+#endif
+ return (KERN_SUCCESS);
+}
+#endif /* MACH_BSD */
+
+
+/*
+ * vm_object_transpose
+ *
+ * This routine takes two VM objects of the same size and exchanges
+ * their backing store.
+ * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
+ * and UPL_BLOCK_ACCESS if they are referenced anywhere.
+ *
+ * The VM objects must not be locked by caller.
+ */
+unsigned int vm_object_transpose_count = 0;
+kern_return_t
+vm_object_transpose(
+ vm_object_t object1,
+ vm_object_t object2,
+ vm_object_size_t transpose_size)
+{
+ vm_object_t tmp_object;
+ kern_return_t retval;
+ boolean_t object1_locked, object2_locked;
+ vm_page_t page;
+ vm_object_offset_t page_offset;
+ lck_mtx_t *hash_lck;
+ vm_object_hash_entry_t hash_entry;
+
+ tmp_object = VM_OBJECT_NULL;
+ object1_locked = FALSE; object2_locked = FALSE;
+
+ if (object1 == object2 ||
+ object1 == VM_OBJECT_NULL ||
+ object2 == VM_OBJECT_NULL) {
+ /*
+ * If the 2 VM objects are the same, there's
+ * no point in exchanging their backing store.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+ /*
+ * Since we need to lock both objects at the same time,
+ * make sure we always lock them in the same order to
+ * avoid deadlocks.
+ */
+ if (object1 > object2) {
+ tmp_object = object1;
+ object1 = object2;
+ object2 = tmp_object;
+ }
+
+ /*
+ * Allocate a temporary VM object to hold object1's contents
+ * while we copy object2 to object1.
+ */
+ tmp_object = vm_object_allocate(transpose_size);
+ vm_object_lock(tmp_object);
+ tmp_object->can_persist = FALSE;
+
+
+ /*
+ * Grab control of the 1st VM object.
+ */
+ vm_object_lock(object1);
+ object1_locked = TRUE;
+ if (!object1->alive || object1->terminating ||
+ object1->copy || object1->shadow || object1->shadowed ||
+ object1->purgable != VM_PURGABLE_DENY) {
+ /*
+ * We don't deal with copy or shadow objects (yet).
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ /*
+ * We're about to mess with the object's backing store and
+ * taking a "paging_in_progress" reference wouldn't be enough
+ * to prevent any paging activity on this object, so the caller should
+ * have "quiesced" the objects beforehand, via a UPL operation with
+ * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
+ * and UPL_BLOCK_ACCESS (to mark the pages "busy").
+ *
+ * Wait for any paging operation to complete (but only paging, not
+ * other kind of activities not linked to the pager). After we're
+ * statisfied that there's no more paging in progress, we keep the
+ * object locked, to guarantee that no one tries to access its pager.
+ */
+ vm_object_paging_only_wait(object1, THREAD_UNINT);
+
+ /*
+ * Same as above for the 2nd object...
+ */
+ vm_object_lock(object2);
+ object2_locked = TRUE;
+ if (! object2->alive || object2->terminating ||
+ object2->copy || object2->shadow || object2->shadowed ||
+ object2->purgable != VM_PURGABLE_DENY) {
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ vm_object_paging_only_wait(object2, THREAD_UNINT);
+
+
+ if (object1->vo_size != object2->vo_size ||
+ object1->vo_size != transpose_size) {
+ /*
+ * If the 2 objects don't have the same size, we can't
+ * exchange their backing stores or one would overflow.
+ * If their size doesn't match the caller's
+ * "transpose_size", we can't do it either because the
+ * transpose operation will affect the entire span of
+ * the objects.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+
+ /*
+ * Transpose the lists of resident pages.
+ * This also updates the resident_page_count and the memq_hint.
+ */
+ if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
+ /*
+ * No pages in object1, just transfer pages
+ * from object2 to object1. No need to go through
+ * an intermediate object.
+ */
+ while (!vm_page_queue_empty(&object2->memq)) {
+ page = (vm_page_t) vm_page_queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset, FALSE);
+ }
+ assert(vm_page_queue_empty(&object2->memq));
+ } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
+ /*
+ * No pages in object2, just transfer pages
+ * from object1 to object2. No need to go through
+ * an intermediate object.
+ */
+ while (!vm_page_queue_empty(&object1->memq)) {
+ page = (vm_page_t) vm_page_queue_first(&object1->memq);
+ vm_page_rename(page, object2, page->offset, FALSE);
+ }
+ assert(vm_page_queue_empty(&object1->memq));
+ } else {
+ /* transfer object1's pages to tmp_object */
+ while (!vm_page_queue_empty(&object1->memq)) {
+ page = (vm_page_t) vm_page_queue_first(&object1->memq);
+ page_offset = page->offset;
+ vm_page_remove(page, TRUE);
+ page->offset = page_offset;
+ vm_page_queue_enter(&tmp_object->memq, page, vm_page_t, listq);
+ }
+ assert(vm_page_queue_empty(&object1->memq));
+ /* transfer object2's pages to object1 */
+ while (!vm_page_queue_empty(&object2->memq)) {
+ page = (vm_page_t) vm_page_queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset, FALSE);
+ }
+ assert(vm_page_queue_empty(&object2->memq));
+ /* transfer tmp_object's pages to object2 */
+ while (!vm_page_queue_empty(&tmp_object->memq)) {
+ page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
+ vm_page_queue_remove(&tmp_object->memq, page,
+ vm_page_t, listq);
+ vm_page_insert(page, object2, page->offset);
+ }
+ assert(vm_page_queue_empty(&tmp_object->memq));
+ }
+
+#define __TRANSPOSE_FIELD(field) \
+MACRO_BEGIN \
+ tmp_object->field = object1->field; \
+ object1->field = object2->field; \
+ object2->field = tmp_object->field; \
+MACRO_END
+
+ /* "Lock" refers to the object not its contents */
+ /* "size" should be identical */
+ assert(object1->vo_size == object2->vo_size);
+ /* "memq_hint" was updated above when transposing pages */
+ /* "ref_count" refers to the object not its contents */
+#if TASK_SWAPPER
+ /* "res_count" refers to the object not its contents */
+#endif
+ /* "resident_page_count" was updated above when transposing pages */
+ /* "wired_page_count" was updated above when transposing pages */
+ /* "reusable_page_count" was updated above when transposing pages */
+ /* there should be no "copy" */
+ assert(!object1->copy);
+ assert(!object2->copy);
+ /* there should be no "shadow" */
+ assert(!object1->shadow);
+ assert(!object2->shadow);
+ __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
+ __TRANSPOSE_FIELD(pager);
+ __TRANSPOSE_FIELD(paging_offset);
+ __TRANSPOSE_FIELD(pager_control);
+ /* update the memory_objects' pointers back to the VM objects */
+ if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object1->pager_control,
+ object1);
+ }
+ if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object2->pager_control,
+ object2);
+ }
+ __TRANSPOSE_FIELD(copy_strategy);
+ /* "paging_in_progress" refers to the object not its contents */
+ assert(!object1->paging_in_progress);
+ assert(!object2->paging_in_progress);
+ assert(object1->activity_in_progress);
+ assert(object2->activity_in_progress);
+ /* "all_wanted" refers to the object not its contents */
+ __TRANSPOSE_FIELD(pager_created);
+ __TRANSPOSE_FIELD(pager_initialized);
+ __TRANSPOSE_FIELD(pager_ready);
+ __TRANSPOSE_FIELD(pager_trusted);
+ __TRANSPOSE_FIELD(can_persist);
+ __TRANSPOSE_FIELD(internal);
+ __TRANSPOSE_FIELD(temporary);
+ __TRANSPOSE_FIELD(private);
+ __TRANSPOSE_FIELD(pageout);
+ /* "alive" should be set */
+ assert(object1->alive);
+ assert(object2->alive);
+ /* "purgeable" should be non-purgeable */
+ assert(object1->purgable == VM_PURGABLE_DENY);
+ assert(object2->purgable == VM_PURGABLE_DENY);
+ /* "shadowed" refers to the the object not its contents */
+ __TRANSPOSE_FIELD(purgeable_when_ripe);
+ __TRANSPOSE_FIELD(advisory_pageout);
+ __TRANSPOSE_FIELD(true_share);
+ /* "terminating" should not be set */
+ assert(!object1->terminating);
+ assert(!object2->terminating);
+ __TRANSPOSE_FIELD(named);
+ /* "shadow_severed" refers to the object not its contents */
+ __TRANSPOSE_FIELD(phys_contiguous);
+ __TRANSPOSE_FIELD(nophyscache);
+ /* "cached_list.next" points to transposed object */
+ object1->cached_list.next = (queue_entry_t) object2;
+ object2->cached_list.next = (queue_entry_t) object1;
+ /* "cached_list.prev" should be NULL */
+ assert(object1->cached_list.prev == NULL);
+ assert(object2->cached_list.prev == NULL);
+ /* "msr_q" is linked to the object not its contents */
+ assert(queue_empty(&object1->msr_q));
+ assert(queue_empty(&object2->msr_q));
+ __TRANSPOSE_FIELD(last_alloc);
+ __TRANSPOSE_FIELD(sequential);
+ __TRANSPOSE_FIELD(pages_created);
+ __TRANSPOSE_FIELD(pages_used);
+ __TRANSPOSE_FIELD(scan_collisions);
+ __TRANSPOSE_FIELD(cow_hint);
+#if MACH_ASSERT
+ __TRANSPOSE_FIELD(paging_object);
+#endif
+ __TRANSPOSE_FIELD(wimg_bits);
+ __TRANSPOSE_FIELD(set_cache_attr);
+ __TRANSPOSE_FIELD(code_signed);
+ if (object1->hashed) {
+ hash_lck = vm_object_hash_lock_spin(object2->pager);
+ hash_entry = vm_object_hash_lookup(object2->pager, FALSE);
+ assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
+ hash_entry->object = object2;
+ vm_object_hash_unlock(hash_lck);
+ }
+ if (object2->hashed) {
+ hash_lck = vm_object_hash_lock_spin(object1->pager);
+ hash_entry = vm_object_hash_lookup(object1->pager, FALSE);
+ assert(hash_entry != VM_OBJECT_HASH_ENTRY_NULL);
+ hash_entry->object = object1;
+ vm_object_hash_unlock(hash_lck);
+ }
+ __TRANSPOSE_FIELD(hashed);
+ object1->transposed = TRUE;
+ object2->transposed = TRUE;
+ __TRANSPOSE_FIELD(mapping_in_progress);
+ __TRANSPOSE_FIELD(volatile_empty);
+ __TRANSPOSE_FIELD(volatile_fault);
+ __TRANSPOSE_FIELD(all_reusable);
+ assert(object1->blocked_access);
+ assert(object2->blocked_access);
+ assert(object1->__object2_unused_bits == 0);
+ assert(object2->__object2_unused_bits == 0);
+#if UPL_DEBUG
+ /* "uplq" refers to the object not its contents (see upl_transpose()) */
+#endif
+ assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
+ assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
+ assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
+ assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
+
+#undef __TRANSPOSE_FIELD
+
+ retval = KERN_SUCCESS;
+
+done:
+ /*
+ * Cleanup.
+ */
+ if (tmp_object != VM_OBJECT_NULL) {
+ vm_object_unlock(tmp_object);
+ /*
+ * Re-initialize the temporary object to avoid
+ * deallocating a real pager.
+ */
+ _vm_object_allocate(transpose_size, tmp_object);
+ vm_object_deallocate(tmp_object);
+ tmp_object = VM_OBJECT_NULL;
+ }
+
+ if (object1_locked) {
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
+ }
+ if (object2_locked) {
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
+ }
+
+ vm_object_transpose_count++;
+
+ return retval;
+}
+
+
+/*
+ * vm_object_cluster_size
+ *
+ * Determine how big a cluster we should issue an I/O for...
+ *
+ * Inputs: *start == offset of page needed
+ * *length == maximum cluster pager can handle
+ * Outputs: *start == beginning offset of cluster
+ * *length == length of cluster to try
+ *
+ * The original *start will be encompassed by the cluster
+ *
+ */
+extern int speculative_reads_disabled;
+extern int ignore_is_ssd;
+
+/*
+ * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
+ * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
+ * always be page-aligned. The derivation could involve operations (e.g. division)
+ * that could give us non-page-size aligned values if we start out with values that
+ * are odd multiples of PAGE_SIZE.
+ */
+ unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
+unsigned int preheat_min_bytes = (1024 * 32);