+ /*
+ * There is a backing object, and
+ */
+
+ backing_object = object->shadow;
+ if (backing_object == VM_OBJECT_NULL) {
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ return;
+ }
+ if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
+ vm_object_lock_shared(backing_object);
+ } else {
+ vm_object_lock(backing_object);
+ }
+
+ /*
+ * No pages in the object are currently
+ * being paged out, and
+ */
+ if (object->paging_in_progress != 0 ||
+ object->activity_in_progress != 0) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * ...
+ * The backing object is not read_only,
+ * and no pages in the backing object are
+ * currently being paged out.
+ * The backing object is internal.
+ *
+ */
+
+ if (!backing_object->internal ||
+ backing_object->paging_in_progress != 0 ||
+ backing_object->activity_in_progress != 0) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * Purgeable objects are not supposed to engage in
+ * copy-on-write activities, so should not have
+ * any shadow objects or be a shadow object to another
+ * object.
+ * Collapsing a purgeable object would require some
+ * updates to the purgeable compressed ledgers.
+ */
+ if (object->purgable != VM_PURGABLE_DENY ||
+ backing_object->purgable != VM_PURGABLE_DENY) {
+ panic("vm_object_collapse() attempting to collapse "
+ "purgeable object: %p(%d) %p(%d)\n",
+ object, object->purgable,
+ backing_object, backing_object->purgable);
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * The backing object can't be a copy-object:
+ * the shadow_offset for the copy-object must stay
+ * as 0. Furthermore (for the 'we have all the
+ * pages' case), if we bypass backing_object and
+ * just shadow the next object in the chain, old
+ * pages from that object would then have to be copied
+ * BOTH into the (former) backing_object and into the
+ * parent object.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL &&
+ backing_object->shadow->copy == backing_object) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * We can now try to either collapse the backing
+ * object (if the parent is the only reference to
+ * it) or (perhaps) remove the parent's reference
+ * to it.
+ *
+ * If there is exactly one reference to the backing
+ * object, we may be able to collapse it into the
+ * parent.
+ *
+ * As long as one of the objects is still not known
+ * to the pager, we can collapse them.
+ */
+ if (backing_object->ref_count == 1 &&
+ (vm_object_collapse_compressor_allowed ||
+ !object->pager_created
+ || (!backing_object->pager_created)
+ ) && vm_object_collapse_allowed) {
+
+ /*
+ * We need the exclusive lock on the VM objects.
+ */
+ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
+ /*
+ * We have an object and its shadow locked
+ * "shared". We can't just upgrade the locks
+ * to "exclusive", as some other thread might
+ * also have these objects locked "shared" and
+ * attempt to upgrade one or the other to
+ * "exclusive". The upgrades would block
+ * forever waiting for the other "shared" locks
+ * to get released.
+ * So we have to release the locks and go
+ * down the shadow chain again (since it could
+ * have changed) with "exclusive" locking.
+ */
+ vm_object_unlock(backing_object);
+ if (object != original_object)
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ goto retry;
+ }
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
+ backing_object, object,
+ backing_object->pager,
+ backing_object->pager_control, 0);
+
+ /*
+ * Collapse the object with its backing
+ * object, and try again with the object's
+ * new backing object.
+ */
+
+ vm_object_do_collapse(object, backing_object);
+ vm_object_collapse_do_collapse++;
+ continue;
+ }
+
+ /*
+ * Collapsing the backing object was not possible
+ * or permitted, so let's try bypassing it.
+ */
+
+ if (! (can_bypass && vm_object_bypass_allowed)) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+
+ /*
+ * If the object doesn't have all its pages present,
+ * we have to make sure no pages in the backing object
+ * "show through" before bypassing it.
+ */
+ size = (unsigned int)atop(object->vo_size);
+ rcount = object->resident_page_count;
+
+ if (rcount != size) {
+ vm_object_offset_t offset;
+ vm_object_offset_t backing_offset;
+ unsigned int backing_rcount;
+
+ /*
+ * If the backing object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (backing_object->pager_created) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If the object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (object->pager_created) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ backing_offset = object->vo_shadow_offset;
+ backing_rcount = backing_object->resident_page_count;
+
+ if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+ /*
+ * we have enough pages in the backing object to guarantee that
+ * at least 1 of them must be 'uncovered' by a resident page
+ * in the object we're evaluating, so move on and
+ * try to collapse the rest of the shadow chain
+ */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * If the backing object has existence info,
+ * we must check examine its existence info
+ * as well.
+ *
+ */
+
+#define EXISTS_IN_OBJECT(obj, off, rc) \
+ ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
+ == VM_EXTERNAL_STATE_EXISTS) || \
+ ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+
+ /*
+ * Check the hint location first
+ * (since it is often the quickest way out of here).
+ */
+ if (object->cow_hint != ~(vm_offset_t)0)
+ hint_offset = (vm_object_offset_t)object->cow_hint;
+ else
+ hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
+ (hint_offset - 8 * PAGE_SIZE_64) : 0;
+
+ if (EXISTS_IN_OBJECT(backing_object, hint_offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
+ /* dependency right at the hint */
+ object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If the object's window onto the backing_object
+ * is large compared to the number of resident
+ * pages in the backing object, it makes sense to
+ * walk the backing_object's resident pages first.
+ *
+ * NOTE: Pages may be in both the existence map and/or
+ * resident, so if we don't find a dependency while
+ * walking the backing object's resident page list
+ * directly, and there is an existence map, we'll have
+ * to run the offset based 2nd pass. Because we may
+ * have to run both passes, we need to be careful
+ * not to decrement 'rcount' in the 1st pass
+ */
+ if (backing_rcount && backing_rcount < (size / 8)) {
+ unsigned int rc = rcount;
+ vm_page_t p;
+
+ backing_rcount = backing_object->resident_page_count;
+ p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
+ do {
+ offset = (p->offset - backing_offset);
+
+ if (offset < object->vo_size &&
+ offset != hint_offset &&
+ !EXISTS_IN_OBJECT(object, offset, rc)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t) offset; /* atomic */
+
+ break;
+ }
+ p = (vm_page_t) vm_page_queue_next(&p->listq);
+
+ } while (--backing_rcount);
+ if (backing_rcount != 0 ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+ }
+
+ /*
+ * Walk through the offsets looking for pages in the
+ * backing object that show through to the object.
+ */
+ if (backing_rcount) {
+ offset = hint_offset;
+
+ while((offset =
+ (offset + PAGE_SIZE_64 < object->vo_size) ?
+ (offset + PAGE_SIZE_64) : 0) != hint_offset) {
+
+ if (EXISTS_IN_OBJECT(backing_object, offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, offset, rcount)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t) offset; /* atomic */
+ break;
+ }
+ }
+ if (offset != hint_offset) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * We need "exclusive" locks on the 2 VM objects.
+ */
+ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
+ vm_object_unlock(backing_object);
+ if (object != original_object)
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ goto retry;
+ }
+
+ /* reset the offset hint for any objects deeper in the chain */
+ object->cow_hint = (vm_offset_t)0;
+
+ /*
+ * All interesting pages in the backing object
+ * already live in the parent or its pager.
+ * Thus we can bypass the backing object.
+ */
+
+ vm_object_do_bypass(object, backing_object);
+ vm_object_collapse_do_bypass++;
+
+ /*
+ * Try again with this object's new backing object.
+ */
+
+ continue;
+ }
+
+ /* NOT REACHED */
+ /*
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ */
+}
+
+/*
+ * Routine: vm_object_page_remove: [internal]
+ * Purpose:
+ * Removes all physical pages in the specified
+ * object range from the object's list of pages.
+ *
+ * In/out conditions:
+ * The object must be locked.
+ * The object must not have paging_in_progress, usually
+ * guaranteed by not having a pager.
+ */
+unsigned int vm_object_page_remove_lookup = 0;
+unsigned int vm_object_page_remove_iterate = 0;
+
+__private_extern__ void
+vm_object_page_remove(
+ vm_object_t object,
+ vm_object_offset_t start,
+ vm_object_offset_t end)
+{
+ vm_page_t p, next;
+
+ /*
+ * One and two page removals are most popular.
+ * The factor of 16 here is somewhat arbitrary.
+ * It balances vm_object_lookup vs iteration.
+ */
+
+ if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
+ vm_object_page_remove_lookup++;
+
+ for (; start < end; start += PAGE_SIZE_64) {
+ p = vm_page_lookup(object, start);
+ if (p != VM_PAGE_NULL) {
+ assert(!p->cleaning && !p->laundry);
+ if (!p->fictitious && p->pmapped)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+ VM_PAGE_FREE(p);
+ }
+ }
+ } else {
+ vm_object_page_remove_iterate++;
+
+ p = (vm_page_t) vm_page_queue_first(&object->memq);
+ while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
+ next = (vm_page_t) vm_page_queue_next(&p->listq);
+ if ((start <= p->offset) && (p->offset < end)) {
+ assert(!p->cleaning && !p->laundry);
+ if (!p->fictitious && p->pmapped)
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
+ VM_PAGE_FREE(p);
+ }
+ p = next;
+ }
+ }
+}
+
+
+/*
+ * Routine: vm_object_coalesce
+ * Function: Coalesces two objects backing up adjoining
+ * regions of memory into a single object.
+ *
+ * returns TRUE if objects were combined.
+ *
+ * NOTE: Only works at the moment if the second object is NULL -
+ * if it's not, which object do we lock first?
+ *
+ * Parameters:
+ * prev_object First object to coalesce
+ * prev_offset Offset into prev_object
+ * next_object Second object into coalesce
+ * next_offset Offset into next_object
+ *
+ * prev_size Size of reference to prev_object
+ * next_size Size of reference to next_object
+ *
+ * Conditions:
+ * The object(s) must *not* be locked. The map must be locked
+ * to preserve the reference to the object(s).
+ */
+static int vm_object_coalesce_count = 0;
+
+__private_extern__ boolean_t
+vm_object_coalesce(
+ vm_object_t prev_object,
+ vm_object_t next_object,
+ vm_object_offset_t prev_offset,
+ __unused vm_object_offset_t next_offset,
+ vm_object_size_t prev_size,
+ vm_object_size_t next_size)
+{
+ vm_object_size_t newsize;
+
+#ifdef lint
+ next_offset++;
+#endif /* lint */
+
+ if (next_object != VM_OBJECT_NULL) {
+ return(FALSE);
+ }
+
+ if (prev_object == VM_OBJECT_NULL) {
+ return(TRUE);
+ }
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
+ prev_object, prev_offset, prev_size, next_size, 0);
+
+ vm_object_lock(prev_object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(prev_object, prev_offset, TRUE);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * prev_entry may be in use any way:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . is purgeable
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((prev_object->ref_count > 1) ||
+ prev_object->pager_created ||
+ (prev_object->shadow != VM_OBJECT_NULL) ||
+ (prev_object->copy != VM_OBJECT_NULL) ||
+ (prev_object->true_share != FALSE) ||
+ (prev_object->purgable != VM_PURGABLE_DENY) ||
+ (prev_object->paging_in_progress != 0) ||
+ (prev_object->activity_in_progress != 0)) {
+ vm_object_unlock(prev_object);
+ return(FALSE);
+ }
+
+ vm_object_coalesce_count++;
+
+ /*
+ * Remove any pages that may still be in the object from
+ * a previous deallocation.
+ */
+ vm_object_page_remove(prev_object,
+ prev_offset + prev_size,
+ prev_offset + prev_size + next_size);
+
+ /*
+ * Extend the object if necessary.
+ */
+ newsize = prev_offset + prev_size + next_size;
+ if (newsize > prev_object->vo_size) {
+ prev_object->vo_size = newsize;
+ }
+
+ vm_object_unlock(prev_object);
+ return(TRUE);
+}
+
+kern_return_t
+vm_object_populate_with_private(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ ppnum_t phys_page,
+ vm_size_t size)
+{
+ ppnum_t base_page;
+ vm_object_offset_t base_offset;
+
+
+ if (!object->private)
+ return KERN_FAILURE;
+
+ base_page = phys_page;
+
+ vm_object_lock(object);
+
+ if (!object->phys_contiguous) {
+ vm_page_t m;
+
+ if ((base_offset = trunc_page_64(offset)) != offset) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+ base_offset += object->paging_offset;
+
+ while (size) {
+ m = vm_page_lookup(object, base_offset);
+
+ if (m != VM_PAGE_NULL) {
+ if (m->fictitious) {
+ if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
+
+ vm_page_lockspin_queues();
+ m->private = TRUE;
+ vm_page_unlock_queues();
+
+ m->fictitious = FALSE;
+ VM_PAGE_SET_PHYS_PAGE(m, base_page);
+ }
+ } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
+
+ if ( !m->private) {
+ /*
+ * we'd leak a real page... that can't be right
+ */
+ panic("vm_object_populate_with_private - %p not private", m);
+ }
+ if (m->pmapped) {
+ /*
+ * pmap call to clear old mapping
+ */
+ pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
+ }
+ VM_PAGE_SET_PHYS_PAGE(m, base_page);
+ }
+ if (m->encrypted) {
+ /*
+ * we should never see this on a ficticious or private page
+ */
+ panic("vm_object_populate_with_private - %p encrypted", m);
+ }
+
+ } else {
+ while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ /*
+ * private normally requires lock_queues but since we
+ * are initializing the page, its not necessary here
+ */
+ m->private = TRUE;
+ m->fictitious = FALSE;
+ VM_PAGE_SET_PHYS_PAGE(m, base_page);
+ m->unusual = TRUE;
+ m->busy = FALSE;
+
+ vm_page_insert(m, object, base_offset);
+ }
+ base_page++; /* Go to the next physical page */
+ base_offset += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ /* NOTE: we should check the original settings here */
+ /* if we have a size > zero a pmap call should be made */
+ /* to disable the range */
+
+ /* pmap_? */
+
+ /* shadows on contiguous memory are not allowed */
+ /* we therefore can use the offset field */
+ object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
+ object->vo_size = size;
+ }
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * memory_object_free_from_cache:
+ *
+ * Walk the vm_object cache list, removing and freeing vm_objects
+ * which are backed by the pager identified by the caller, (pager_ops).
+ * Remove up to "count" objects, if there are that may available
+ * in the cache.
+ *
+ * Walk the list at most once, return the number of vm_objects
+ * actually freed.
+ */
+
+__private_extern__ kern_return_t
+memory_object_free_from_cache(
+ __unused host_t host,
+ __unused memory_object_pager_ops_t pager_ops,
+ int *count)
+{
+#if VM_OBJECT_CACHE
+ int object_released = 0;
+
+ vm_object_t object = VM_OBJECT_NULL;
+ vm_object_t shadow;
+
+/*
+ if(host == HOST_NULL)
+ return(KERN_INVALID_ARGUMENT);
+*/
+
+ try_again:
+ vm_object_cache_lock();
+
+ queue_iterate(&vm_object_cached_list, object,
+ vm_object_t, cached_list) {
+ if (object->pager &&
+ (pager_ops == object->pager->mo_pager_ops)) {
+ vm_object_lock(object);
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+
+ vm_object_cache_unlock();
+ /*
+ * Since this object is in the cache, we know
+ * that it is initialized and has only a pager's
+ * (implicit) reference. Take a reference to avoid
+ * recursive deallocations.
+ */
+
+ assert(object->pager_initialized);
+ assert(object->ref_count == 0);
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+
+ /*
+ * Terminate the object.
+ * If the object had a shadow, we let
+ * vm_object_deallocate deallocate it.
+ * "pageout" objects have a shadow, but
+ * maintain a "paging reference" rather
+ * than a normal reference.
+ * (We are careful here to limit recursion.)
+ */
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+
+ if ((vm_object_terminate(object) == KERN_SUCCESS)
+ && (shadow != VM_OBJECT_NULL)) {
+ vm_object_deallocate(shadow);
+ }
+
+ if(object_released++ == *count)
+ return KERN_SUCCESS;
+ goto try_again;
+ }
+ }
+ vm_object_cache_unlock();
+ *count = object_released;
+#else
+ *count = 0;
+#endif
+ return KERN_SUCCESS;
+}
+
+
+
+kern_return_t
+memory_object_create_named(
+ memory_object_t pager,
+ memory_object_offset_t size,
+ memory_object_control_t *control)
+{
+ vm_object_t object;
+ vm_object_hash_entry_t entry;
+ lck_mtx_t *lck;
+
+ *control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager == MEMORY_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ lck = vm_object_hash_lock_spin(pager);
+ entry = vm_object_hash_lookup(pager, FALSE);
+
+ if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
+ (entry->object != VM_OBJECT_NULL)) {
+ if (entry->object->named == TRUE)
+ panic("memory_object_create_named: caller already holds the right"); }
+ vm_object_hash_unlock(lck);
+
+ if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
+ return(KERN_INVALID_OBJECT);
+ }
+
+ /* wait for object (if any) to be ready */
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ object->named = TRUE;
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ *control = object->pager_control;
+ vm_object_unlock(object);
+ }
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * Routine: memory_object_recover_named [user interface]
+ * Purpose:
+ * Attempt to recover a named reference for a VM object.
+ * VM will verify that the object has not already started
+ * down the termination path, and if it has, will optionally
+ * wait for that to finish.
+ * Returns:
+ * KERN_SUCCESS - we recovered a named reference on the object
+ * KERN_FAILURE - we could not recover a reference (object dead)
+ * KERN_INVALID_ARGUMENT - bad memory object control
+ */
+kern_return_t
+memory_object_recover_named(
+ memory_object_control_t control,
+ boolean_t wait_on_terminating)
+{
+ vm_object_t object;
+
+ object = memory_object_control_to_vm_object(control);
+ if (object == VM_OBJECT_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+restart:
+ vm_object_lock(object);
+
+ if (object->terminating && wait_on_terminating) {
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+ THREAD_UNINT);
+ goto restart;
+ }
+
+ if (!object->alive) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ if (object->named == TRUE) {
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+#if VM_OBJECT_CACHE
+ if ((object->ref_count == 0) && (!object->terminating)) {
+ if (!vm_object_cache_lock_try()) {
+ vm_object_unlock(object);
+ goto restart;
+ }
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ XPR(XPR_VM_OBJECT_CACHE,
+ "memory_object_recover_named: removing %X, head (%X, %X)\n",
+ object,
+ vm_object_cached_list.next,
+ vm_object_cached_list.prev, 0,0);
+
+ vm_object_cache_unlock();
+ }
+#endif
+ object->named = TRUE;
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+ vm_object_res_reference(object);
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ vm_object_unlock(object);
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * vm_object_release_name:
+ *
+ * Enforces name semantic on memory_object reference count decrement
+ * This routine should not be called unless the caller holds a name
+ * reference gained through the memory_object_create_named.
+ *
+ * If the TERMINATE_IDLE flag is set, the call will return if the
+ * reference count is not 1. i.e. idle with the only remaining reference
+ * being the name.
+ * If the decision is made to proceed the name field flag is set to
+ * false and the reference count is decremented. If the RESPECT_CACHE
+ * flag is set and the reference count has gone to zero, the
+ * memory_object is checked to see if it is cacheable otherwise when
+ * the reference count is zero, it is simply terminated.
+ */
+
+__private_extern__ kern_return_t
+vm_object_release_name(
+ vm_object_t object,
+ int flags)
+{
+ vm_object_t shadow;
+ boolean_t original_object = TRUE;
+
+ while (object != VM_OBJECT_NULL) {
+
+ vm_object_lock(object);
+
+ assert(object->alive);
+ if (original_object)
+ assert(object->named);
+ assert(object->ref_count > 0);
+
+ /*
+ * We have to wait for initialization before
+ * destroying or caching the object.
+ */
+
+ if (object->pager_created && !object->pager_initialized) {
+ assert(!object->can_persist);
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ vm_object_unlock(object);
+ thread_block(THREAD_CONTINUE_NULL);
+ continue;
+ }
+
+ if (((object->ref_count > 1)
+ && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
+ || (object->terminating)) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ } else {
+ if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+ }
+
+ if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
+ (object->ref_count == 1)) {
+ if (original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ /* let vm_object_deallocate push this thing into */
+ /* the cache, if that it is where it is bound */
+ vm_object_deallocate(object);
+ return KERN_SUCCESS;
+ }
+ VM_OBJ_RES_DECR(object);
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+
+ if (object->ref_count == 1) {
+ if (vm_object_terminate(object) != KERN_SUCCESS) {
+ if (original_object) {
+ return KERN_FAILURE;
+ } else {
+ return KERN_SUCCESS;
+ }
+ }
+ if (shadow != VM_OBJECT_NULL) {
+ original_object = FALSE;
+ object = shadow;
+ continue;
+ }
+ return KERN_SUCCESS;
+ } else {
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count--;
+ assert(object->ref_count > 0);
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+ }
+ /*NOTREACHED*/
+ assert(0);
+ return KERN_FAILURE;
+}
+
+
+__private_extern__ kern_return_t
+vm_object_lock_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot)
+{
+ __unused boolean_t should_flush;
+
+ should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
+
+ XPR(XPR_MEMORY_OBJECT,
+ "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
+ object, offset, size,
+ (((should_return&1)<<1)|should_flush), prot);
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page_64(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object reference from being released.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ (void)vm_object_update(object,
+ offset, size, NULL, NULL, should_return, flags, prot);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Empty a purgeable object by grabbing the physical pages assigned to it and
+ * putting them on the free queue without writing them to backing store, etc.
+ * When the pages are next touched they will be demand zero-fill pages. We
+ * skip pages which are busy, being paged in/out, wired, etc. We do _not_
+ * skip referenced/dirty pages, pages on the active queue, etc. We're more