+ * Create a new object which is backed by the
+ * specified existing object range. The source
+ * object reference is deallocated.
+ *
+ * The new object and offset into that object
+ * are returned in the source parameters.
+ */
+boolean_t vm_object_shadow_check = TRUE;
+
+__private_extern__ boolean_t
+vm_object_shadow(
+ vm_object_t *object, /* IN/OUT */
+ vm_object_offset_t *offset, /* IN/OUT */
+ vm_object_size_t length)
+{
+ register vm_object_t source;
+ register vm_object_t result;
+
+ source = *object;
+ assert(source != VM_OBJECT_NULL);
+ if (source == VM_OBJECT_NULL)
+ return FALSE;
+
+#if 0
+ /*
+ * XXX FBDP
+ * This assertion is valid but it gets triggered by Rosetta for example
+ * due to a combination of vm_remap() that changes a VM object's
+ * copy_strategy from SYMMETRIC to DELAY and vm_protect(VM_PROT_COPY)
+ * that then sets "needs_copy" on its map entry. This creates a
+ * mapping situation that VM should never see and doesn't know how to
+ * handle.
+ * It's not clear if this can create any real problem but we should
+ * look into fixing this, probably by having vm_protect(VM_PROT_COPY)
+ * do more than just set "needs_copy" to handle the copy-on-write...
+ * In the meantime, let's disable the assertion.
+ */
+ assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
+#endif
+
+ /*
+ * Determine if we really need a shadow.
+ *
+ * If the source object is larger than what we are trying
+ * to create, then force the shadow creation even if the
+ * ref count is 1. This will allow us to [potentially]
+ * collapse the underlying object away in the future
+ * (freeing up the extra data it might contain and that
+ * we don't need).
+ */
+ if (vm_object_shadow_check &&
+ source->vo_size == length &&
+ source->ref_count == 1 &&
+ (source->shadow == VM_OBJECT_NULL ||
+ source->shadow->copy == VM_OBJECT_NULL) )
+ {
+ source->shadowed = FALSE;
+ return FALSE;
+ }
+
+ /*
+ * Allocate a new object with the given length
+ */
+
+ if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
+ panic("vm_object_shadow: no object for shadowing");
+
+ /*
+ * The new object shadows the source object, adding
+ * a reference to it. Our caller changes his reference
+ * to point to the new object, removing a reference to
+ * the source object. Net result: no change of reference
+ * count.
+ */
+ result->shadow = source;
+
+ /*
+ * Store the offset into the source object,
+ * and fix up the offset into the new object.
+ */
+
+ result->vo_shadow_offset = *offset;
+
+ /*
+ * Return the new things
+ */
+
+ *offset = 0;
+ *object = result;
+ return TRUE;
+}
+
+/*
+ * The relationship between vm_object structures and
+ * the memory_object requires careful synchronization.
+ *
+ * All associations are created by memory_object_create_named
+ * for external pagers and vm_object_pager_create for internal
+ * objects as follows:
+ *
+ * pager: the memory_object itself, supplied by
+ * the user requesting a mapping (or the kernel,
+ * when initializing internal objects); the
+ * kernel simulates holding send rights by keeping
+ * a port reference;
+ *
+ * pager_request:
+ * the memory object control port,
+ * created by the kernel; the kernel holds
+ * receive (and ownership) rights to this
+ * port, but no other references.
+ *
+ * When initialization is complete, the "initialized" field
+ * is asserted. Other mappings using a particular memory object,
+ * and any references to the vm_object gained through the
+ * port association must wait for this initialization to occur.
+ *
+ * In order to allow the memory manager to set attributes before
+ * requests (notably virtual copy operations, but also data or
+ * unlock requests) are made, a "ready" attribute is made available.
+ * Only the memory manager may affect the value of this attribute.
+ * Its value does not affect critical kernel functions, such as
+ * internal object initialization or destruction. [Furthermore,
+ * memory objects created by the kernel are assumed to be ready
+ * immediately; the default memory manager need not explicitly
+ * set the "ready" attribute.]
+ *
+ * [Both the "initialized" and "ready" attribute wait conditions
+ * use the "pager" field as the wait event.]
+ *
+ * The port associations can be broken down by any of the
+ * following routines:
+ * vm_object_terminate:
+ * No references to the vm_object remain, and
+ * the object cannot (or will not) be cached.
+ * This is the normal case, and is done even
+ * though one of the other cases has already been
+ * done.
+ * memory_object_destroy:
+ * The memory manager has requested that the
+ * kernel relinquish references to the memory
+ * object. [The memory manager may not want to
+ * destroy the memory object, but may wish to
+ * refuse or tear down existing memory mappings.]
+ *
+ * Each routine that breaks an association must break all of
+ * them at once. At some later time, that routine must clear
+ * the pager field and release the memory object references.
+ * [Furthermore, each routine must cope with the simultaneous
+ * or previous operations of the others.]
+ *
+ * In addition to the lock on the object, the vm_object_hash_lock
+ * governs the associations. References gained through the
+ * association require use of the hash lock.
+ *
+ * Because the pager field may be cleared spontaneously, it
+ * cannot be used to determine whether a memory object has
+ * ever been associated with a particular vm_object. [This
+ * knowledge is important to the shadow object mechanism.]
+ * For this reason, an additional "created" attribute is
+ * provided.
+ *
+ * During various paging operations, the pager reference found in the
+ * vm_object must be valid. To prevent this from being released,
+ * (other than being removed, i.e., made null), routines may use
+ * the vm_object_paging_begin/end routines [actually, macros].
+ * The implementation uses the "paging_in_progress" and "wanted" fields.
+ * [Operations that alter the validity of the pager values include the
+ * termination routines and vm_object_collapse.]
+ */
+
+
+/*
+ * Routine: vm_object_enter
+ * Purpose:
+ * Find a VM object corresponding to the given
+ * pager; if no such object exists, create one,
+ * and initialize the pager.
+ */
+vm_object_t
+vm_object_enter(
+ memory_object_t pager,
+ vm_object_size_t size,
+ boolean_t internal,
+ boolean_t init,
+ boolean_t named)
+{
+ register vm_object_t object;
+ vm_object_t new_object;
+ boolean_t must_init;
+ vm_object_hash_entry_t entry, new_entry;
+ uint32_t try_failed_count = 0;
+ lck_mtx_t *lck;
+
+ if (pager == MEMORY_OBJECT_NULL)
+ return(vm_object_allocate(size));
+
+ new_object = VM_OBJECT_NULL;
+ new_entry = VM_OBJECT_HASH_ENTRY_NULL;
+ must_init = init;
+
+ /*
+ * Look for an object associated with this port.
+ */
+Retry:
+ lck = vm_object_hash_lock_spin(pager);
+ do {
+ entry = vm_object_hash_lookup(pager, FALSE);
+
+ if (entry == VM_OBJECT_HASH_ENTRY_NULL) {
+ if (new_object == VM_OBJECT_NULL) {
+ /*
+ * We must unlock to create a new object;
+ * if we do so, we must try the lookup again.
+ */
+ vm_object_hash_unlock(lck);
+ assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL);
+ new_entry = vm_object_hash_entry_alloc(pager);
+ new_object = vm_object_allocate(size);
+ /*
+ * Set new_object->hashed now, while noone
+ * knows about this object yet and we
+ * don't need to lock it. Once it's in
+ * the hash table, we would have to lock
+ * the object to set its "hashed" bit and
+ * we can't lock the object while holding
+ * the hash lock as a spinlock...
+ */
+ new_object->hashed = TRUE;
+ lck = vm_object_hash_lock_spin(pager);
+ } else {
+ /*
+ * Lookup failed twice, and we have something
+ * to insert; set the object.
+ */
+ /*
+ * We can't lock the object here since we're
+ * holding the hash lock as a spin lock.
+ * We've already pre-set "new_object->hashed"
+ * when we created "new_object" above, so we
+ * won't need to modify the object in
+ * vm_object_hash_insert().
+ */
+ assert(new_object->hashed);
+ vm_object_hash_insert(new_entry, new_object);
+ entry = new_entry;
+ new_entry = VM_OBJECT_HASH_ENTRY_NULL;
+ new_object = VM_OBJECT_NULL;
+ must_init = TRUE;
+ }
+ } else if (entry->object == VM_OBJECT_NULL) {
+ /*
+ * If a previous object is being terminated,
+ * we must wait for the termination message
+ * to be queued (and lookup the entry again).
+ */
+ entry->waiting = TRUE;
+ entry = VM_OBJECT_HASH_ENTRY_NULL;
+ assert_wait((event_t) pager, THREAD_UNINT);
+ vm_object_hash_unlock(lck);
+
+ thread_block(THREAD_CONTINUE_NULL);
+ lck = vm_object_hash_lock_spin(pager);
+ }
+ } while (entry == VM_OBJECT_HASH_ENTRY_NULL);
+
+ object = entry->object;
+ assert(object != VM_OBJECT_NULL);
+
+ if (!must_init) {
+ if ( !vm_object_lock_try(object)) {
+
+ vm_object_hash_unlock(lck);
+
+ try_failed_count++;
+ mutex_pause(try_failed_count); /* wait a bit */
+ goto Retry;
+ }
+ assert(!internal || object->internal);
+#if VM_OBJECT_CACHE
+ if (object->ref_count == 0) {
+ if ( !vm_object_cache_lock_try()) {
+
+ vm_object_hash_unlock(lck);
+ vm_object_unlock(object);
+
+ try_failed_count++;
+ mutex_pause(try_failed_count); /* wait a bit */
+ goto Retry;
+ }
+ XPR(XPR_VM_OBJECT_CACHE,
+ "vm_object_enter: removing %x from cache, head (%x, %x)\n",
+ object,
+ vm_object_cached_list.next,
+ vm_object_cached_list.prev, 0,0);
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+
+ vm_object_cache_unlock();
+ }
+#endif
+ if (named) {
+ assert(!object->named);
+ object->named = TRUE;
+ }
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+ vm_object_res_reference(object);
+
+ vm_object_hash_unlock(lck);
+ vm_object_unlock(object);
+
+ VM_STAT_INCR(hits);
+ } else
+ vm_object_hash_unlock(lck);
+
+ assert(object->ref_count > 0);
+
+ VM_STAT_INCR(lookups);
+
+ XPR(XPR_VM_OBJECT,
+ "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n",
+ pager, object, must_init, 0, 0);
+
+ /*
+ * If we raced to create a vm_object but lost, let's
+ * throw away ours.
+ */
+
+ if (new_object != VM_OBJECT_NULL) {
+ /*
+ * Undo the pre-setting of "new_object->hashed" before
+ * deallocating "new_object", since we did not insert it
+ * into the hash table after all.
+ */
+ assert(new_object->hashed);
+ new_object->hashed = FALSE;
+ vm_object_deallocate(new_object);
+ }
+
+ if (new_entry != VM_OBJECT_HASH_ENTRY_NULL)
+ vm_object_hash_entry_free(new_entry);
+
+ if (must_init) {
+ memory_object_control_t control;
+
+ /*
+ * Allocate request port.
+ */
+
+ control = memory_object_control_allocate(object);
+ assert (control != MEMORY_OBJECT_CONTROL_NULL);
+
+ vm_object_lock(object);
+ assert(object != kernel_object);
+
+ /*
+ * Copy the reference we were given.
+ */
+
+ memory_object_reference(pager);
+ object->pager_created = TRUE;
+ object->pager = pager;
+ object->internal = internal;
+ object->pager_trusted = internal;
+ if (!internal) {
+ /* copy strategy invalid until set by memory manager */
+ object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
+ }
+ object->pager_control = control;
+ object->pager_ready = FALSE;
+
+ vm_object_unlock(object);
+
+ /*
+ * Let the pager know we're using it.
+ */
+
+ (void) memory_object_init(pager,
+ object->pager_control,
+ PAGE_SIZE);
+
+ vm_object_lock(object);
+ if (named)
+ object->named = TRUE;
+ if (internal) {
+ object->pager_ready = TRUE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
+ }
+
+ object->pager_initialized = TRUE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
+ } else {
+ vm_object_lock(object);
+ }
+
+ /*
+ * [At this point, the object must be locked]
+ */
+
+ /*
+ * Wait for the work above to be done by the first
+ * thread to map this object.
+ */
+
+ while (!object->pager_initialized) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ }
+ vm_object_unlock(object);
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_enter: vm_object %x, memory_object %x, internal %d\n",
+ object, object->pager, internal, 0,0);
+ return(object);
+}
+
+/*
+ * Routine: vm_object_pager_create
+ * Purpose:
+ * Create a memory object for an internal object.
+ * In/out conditions:
+ * The object is locked on entry and exit;
+ * it may be unlocked within this call.
+ * Limitations:
+ * Only one thread may be performing a
+ * vm_object_pager_create on an object at
+ * a time. Presumably, only the pageout
+ * daemon will be using this routine.
+ */
+
+void
+vm_object_pager_create(
+ register vm_object_t object)
+{
+ memory_object_t pager;
+ vm_object_hash_entry_t entry;
+ lck_mtx_t *lck;
+#if MACH_PAGEMAP
+ vm_object_size_t size;
+ vm_external_map_t map;
+#endif /* MACH_PAGEMAP */
+
+ XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n",
+ object, 0,0,0,0);
+
+ assert(object != kernel_object);
+
+ if (memory_manager_default_check() != KERN_SUCCESS)
+ return;
+
+ /*
+ * Prevent collapse or termination by holding a paging reference
+ */
+
+ vm_object_paging_begin(object);
+ if (object->pager_created) {
+ /*
+ * Someone else got to it first...
+ * wait for them to finish initializing the ports
+ */
+ while (!object->pager_initialized) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ }
+ vm_object_paging_end(object);
+ return;
+ }
+
+ /*
+ * Indicate that a memory object has been assigned
+ * before dropping the lock, to prevent a race.
+ */
+
+ object->pager_created = TRUE;
+ object->paging_offset = 0;
+
+#if MACH_PAGEMAP
+ size = object->vo_size;
+#endif /* MACH_PAGEMAP */
+ vm_object_unlock(object);
+
+#if MACH_PAGEMAP
+ if (DEFAULT_PAGER_IS_ACTIVE) {
+ map = vm_external_create(size);
+ vm_object_lock(object);
+ assert(object->vo_size == size);
+ object->existence_map = map;
+ vm_object_unlock(object);
+ }
+#endif /* MACH_PAGEMAP */
+
+ if ((uint32_t) object->vo_size != object->vo_size) {
+ panic("vm_object_pager_create(): object size 0x%llx >= 4GB\n",
+ (uint64_t) object->vo_size);
+ }
+
+ /*
+ * Create the [internal] pager, and associate it with this object.
+ *
+ * We make the association here so that vm_object_enter()
+ * can look up the object to complete initializing it. No
+ * user will ever map this object.
+ */
+ {
+ memory_object_default_t dmm;
+
+ /* acquire a reference for the default memory manager */
+ dmm = memory_manager_default_reference();
+
+ assert(object->temporary);
+
+ /* create our new memory object */
+ assert((vm_size_t) object->vo_size == object->vo_size);
+ (void) memory_object_create(dmm, (vm_size_t) object->vo_size,
+ &pager);
+
+ memory_object_default_deallocate(dmm);
+ }
+
+ entry = vm_object_hash_entry_alloc(pager);
+
+ vm_object_lock(object);
+ lck = vm_object_hash_lock_spin(pager);
+ vm_object_hash_insert(entry, object);
+ vm_object_hash_unlock(lck);
+ vm_object_unlock(object);
+
+ /*
+ * A reference was returned by
+ * memory_object_create(), and it is
+ * copied by vm_object_enter().
+ */
+
+ if (vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE) != object)
+ panic("vm_object_pager_create: mismatch");
+
+ /*
+ * Drop the reference we were passed.
+ */
+ memory_object_deallocate(pager);
+
+ vm_object_lock(object);
+
+ /*
+ * Release the paging reference
+ */
+ vm_object_paging_end(object);
+}
+
+void
+vm_object_compressor_pager_create(
+ register vm_object_t object)
+{
+ memory_object_t pager;
+ vm_object_hash_entry_t entry;
+ lck_mtx_t *lck;
+ vm_object_t pager_object = VM_OBJECT_NULL;
+
+ assert(object != kernel_object);
+
+ /*
+ * Prevent collapse or termination by holding a paging reference
+ */
+
+ vm_object_paging_begin(object);
+ if (object->pager_created) {
+ /*
+ * Someone else got to it first...
+ * wait for them to finish initializing the ports
+ */
+ while (!object->pager_initialized) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ }
+ vm_object_paging_end(object);
+ return;
+ }
+
+ /*
+ * Indicate that a memory object has been assigned
+ * before dropping the lock, to prevent a race.
+ */
+
+ object->pager_created = TRUE;
+ object->paging_offset = 0;
+
+ vm_object_unlock(object);
+
+ if ((uint32_t) (object->vo_size/PAGE_SIZE) !=
+ (object->vo_size/PAGE_SIZE)) {
+ panic("vm_object_compressor_pager_create(%p): "
+ "object size 0x%llx >= 0x%llx\n",
+ object,
+ (uint64_t) object->vo_size,
+ 0x0FFFFFFFFULL*PAGE_SIZE);
+ }
+
+ /*
+ * Create the [internal] pager, and associate it with this object.
+ *
+ * We make the association here so that vm_object_enter()
+ * can look up the object to complete initializing it. No
+ * user will ever map this object.
+ */
+ {
+ assert(object->temporary);
+
+ /* create our new memory object */
+ assert((uint32_t) (object->vo_size/PAGE_SIZE) ==
+ (object->vo_size/PAGE_SIZE));
+ (void) compressor_memory_object_create(
+ (memory_object_size_t) object->vo_size,
+ &pager);
+ if (pager == NULL) {
+ panic("vm_object_compressor_pager_create(): "
+ "no pager for object %p size 0x%llx\n",
+ object, (uint64_t) object->vo_size);
+ }
+ }
+
+ entry = vm_object_hash_entry_alloc(pager);
+
+ vm_object_lock(object);
+ lck = vm_object_hash_lock_spin(pager);
+ vm_object_hash_insert(entry, object);
+ vm_object_hash_unlock(lck);
+ vm_object_unlock(object);
+
+ /*
+ * A reference was returned by
+ * memory_object_create(), and it is
+ * copied by vm_object_enter().
+ */
+
+ pager_object = vm_object_enter(pager, object->vo_size, TRUE, TRUE, FALSE);
+
+ if (pager_object != object) {
+ panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size);
+ }
+
+ /*
+ * Drop the reference we were passed.
+ */
+ memory_object_deallocate(pager);
+
+ vm_object_lock(object);
+
+ /*
+ * Release the paging reference
+ */
+ vm_object_paging_end(object);
+}
+
+/*
+ * Routine: vm_object_remove
+ * Purpose:
+ * Eliminate the pager/object association
+ * for this pager.
+ * Conditions:
+ * The object cache must be locked.
+ */
+__private_extern__ void
+vm_object_remove(
+ vm_object_t object)
+{
+ memory_object_t pager;
+
+ if ((pager = object->pager) != MEMORY_OBJECT_NULL) {
+ vm_object_hash_entry_t entry;
+
+ entry = vm_object_hash_lookup(pager, FALSE);
+ if (entry != VM_OBJECT_HASH_ENTRY_NULL)
+ entry->object = VM_OBJECT_NULL;
+ }
+
+}
+
+/*
+ * Global variables for vm_object_collapse():
+ *
+ * Counts for normal collapses and bypasses.
+ * Debugging variables, to watch or disable collapse.
+ */
+static long object_collapses = 0;
+static long object_bypasses = 0;
+
+static boolean_t vm_object_collapse_allowed = TRUE;
+static boolean_t vm_object_bypass_allowed = TRUE;
+
+#if MACH_PAGEMAP
+static int vm_external_discarded;
+static int vm_external_collapsed;
+#endif
+
+unsigned long vm_object_collapse_encrypted = 0;
+
+void vm_object_do_collapse_compressor(vm_object_t object,
+ vm_object_t backing_object);
+void
+vm_object_do_collapse_compressor(
+ vm_object_t object,
+ vm_object_t backing_object)
+{
+ vm_object_offset_t new_offset, backing_offset;
+ vm_object_size_t size;
+
+ vm_counters.do_collapse_compressor++;
+
+ vm_object_lock_assert_exclusive(object);
+ vm_object_lock_assert_exclusive(backing_object);
+
+ size = object->vo_size;
+
+ /*
+ * Move all compressed pages from backing_object
+ * to the parent.
+ */
+
+ for (backing_offset = object->vo_shadow_offset;
+ backing_offset < object->vo_shadow_offset + object->vo_size;
+ backing_offset += PAGE_SIZE) {
+ memory_object_offset_t backing_pager_offset;
+
+ /* find the next compressed page at or after this offset */
+ backing_pager_offset = (backing_offset +
+ backing_object->paging_offset);
+ backing_pager_offset = vm_compressor_pager_next_compressed(
+ backing_object->pager,
+ backing_pager_offset);
+ if (backing_pager_offset == (memory_object_offset_t) -1) {
+ /* no more compressed pages */
+ break;
+ }
+ backing_offset = (backing_pager_offset -
+ backing_object->paging_offset);
+
+ new_offset = backing_offset - object->vo_shadow_offset;
+
+ if (new_offset >= object->vo_size) {
+ /* we're out of the scope of "object": done */
+ break;
+ }
+
+ if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
+ (vm_compressor_pager_state_get(object->pager,
+ (new_offset +
+ object->paging_offset)) ==
+ VM_EXTERNAL_STATE_EXISTS)) {
+ /*
+ * This page already exists in object, resident or
+ * compressed.
+ * We don't need this compressed page in backing_object
+ * and it will be reclaimed when we release
+ * backing_object.
+ */
+ continue;
+ }
+
+ /*
+ * backing_object has this page in the VM compressor and
+ * we need to transfer it to object.
+ */
+ vm_counters.do_collapse_compressor_pages++;
+ vm_compressor_pager_transfer(
+ /* destination: */
+ object->pager,
+ (new_offset + object->paging_offset),
+ /* source: */
+ backing_object->pager,
+ (backing_offset + backing_object->paging_offset));
+ }
+}
+
+/*
+ * Routine: vm_object_do_collapse
+ * Purpose:
+ * Collapse an object with the object backing it.
+ * Pages in the backing object are moved into the
+ * parent, and the backing object is deallocated.
+ * Conditions:
+ * Both objects and the cache are locked; the page
+ * queues are unlocked.
+ *
+ */
+static void
+vm_object_do_collapse(
+ vm_object_t object,
+ vm_object_t backing_object)
+{
+ vm_page_t p, pp;
+ vm_object_offset_t new_offset, backing_offset;
+ vm_object_size_t size;
+
+ vm_object_lock_assert_exclusive(object);
+ vm_object_lock_assert_exclusive(backing_object);
+
+ assert(object->purgable == VM_PURGABLE_DENY);
+ assert(backing_object->purgable == VM_PURGABLE_DENY);
+
+ backing_offset = object->vo_shadow_offset;
+ size = object->vo_size;
+
+ /*
+ * Move all in-memory pages from backing_object
+ * to the parent. Pages that have been paged out
+ * will be overwritten by any of the parent's
+ * pages that shadow them.
+ */
+
+ while (!queue_empty(&backing_object->memq)) {
+
+ p = (vm_page_t) queue_first(&backing_object->memq);
+
+ new_offset = (p->offset - backing_offset);
+
+ assert(!p->busy || p->absent);
+
+ /*
+ * If the parent has a page here, or if
+ * this page falls outside the parent,
+ * dispose of it.
+ *
+ * Otherwise, move it as planned.
+ */
+
+ if (p->offset < backing_offset || new_offset >= size) {
+ VM_PAGE_FREE(p);
+ } else {
+ /*
+ * ENCRYPTED SWAP:
+ * The encryption key includes the "pager" and the
+ * "paging_offset". These will not change during the
+ * object collapse, so we can just move an encrypted
+ * page from one object to the other in this case.
+ * We can't decrypt the page here, since we can't drop
+ * the object lock.
+ */
+ if (p->encrypted) {
+ vm_object_collapse_encrypted++;
+ }
+ pp = vm_page_lookup(object, new_offset);
+ if (pp == VM_PAGE_NULL) {
+
+ if (VM_COMPRESSOR_PAGER_STATE_GET(object,
+ new_offset)
+ == VM_EXTERNAL_STATE_EXISTS) {
+ /*
+ * Parent object has this page
+ * in the VM compressor.
+ * Throw away the backing
+ * object's page.
+ */
+ VM_PAGE_FREE(p);
+ } else {
+ /*
+ * Parent now has no page.
+ * Move the backing object's page
+ * up.
+ */
+ vm_page_rename(p, object, new_offset,
+ TRUE);
+ }
+
+#if MACH_PAGEMAP
+ } else if (pp->absent) {
+
+ /*
+ * Parent has an absent page...
+ * it's not being paged in, so
+ * it must really be missing from
+ * the parent.
+ *
+ * Throw out the absent page...
+ * any faults looking for that
+ * page will restart with the new
+ * one.
+ */
+
+ VM_PAGE_FREE(pp);
+ vm_page_rename(p, object, new_offset, TRUE);
+#endif /* MACH_PAGEMAP */
+ } else {
+ assert(! pp->absent);
+
+ /*
+ * Parent object has a real page.
+ * Throw away the backing object's
+ * page.
+ */
+ VM_PAGE_FREE(p);
+ }
+ }
+ }
+
+ if (vm_object_collapse_compressor_allowed &&
+ object->pager != MEMORY_OBJECT_NULL &&
+ backing_object->pager != MEMORY_OBJECT_NULL) {
+
+ /* move compressed pages from backing_object to object */
+ vm_object_do_collapse_compressor(object, backing_object);
+
+ } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
+ vm_object_hash_entry_t entry;
+
+#if !MACH_PAGEMAP
+ assert((!object->pager_created &&
+ (object->pager == MEMORY_OBJECT_NULL)) ||
+ (!backing_object->pager_created &&
+ (backing_object->pager == MEMORY_OBJECT_NULL)));
+#else
+ assert(!object->pager_created &&
+ object->pager == MEMORY_OBJECT_NULL);
+#endif /* !MACH_PAGEMAP */
+
+ /*
+ * Move the pager from backing_object to object.
+ *
+ * XXX We're only using part of the paging space
+ * for keeps now... we ought to discard the
+ * unused portion.
+ */
+
+ assert(!object->paging_in_progress);
+ assert(!object->activity_in_progress);
+ assert(!object->pager_created);
+ assert(object->pager == NULL);
+ object->pager = backing_object->pager;
+
+ if (backing_object->hashed) {
+ lck_mtx_t *lck;
+
+ lck = vm_object_hash_lock_spin(backing_object->pager);
+ entry = vm_object_hash_lookup(object->pager, FALSE);
+ assert(entry != VM_OBJECT_HASH_ENTRY_NULL);
+ entry->object = object;
+ vm_object_hash_unlock(lck);
+
+ object->hashed = TRUE;
+ }
+ object->pager_created = backing_object->pager_created;
+ object->pager_control = backing_object->pager_control;
+ object->pager_ready = backing_object->pager_ready;
+ object->pager_initialized = backing_object->pager_initialized;
+ object->paging_offset =
+ backing_object->paging_offset + backing_offset;
+ if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object->pager_control,
+ object);
+ }
+ /* the backing_object has lost its pager: reset all fields */
+ backing_object->pager_created = FALSE;
+ backing_object->pager_control = NULL;
+ backing_object->pager_ready = FALSE;
+ backing_object->paging_offset = 0;
+ backing_object->pager = NULL;
+ }
+
+#if MACH_PAGEMAP
+ /*
+ * If the shadow offset is 0, the use the existence map from
+ * the backing object if there is one. If the shadow offset is
+ * not zero, toss it.
+ *
+ * XXX - If the shadow offset is not 0 then a bit copy is needed
+ * if the map is to be salvaged. For now, we just just toss the
+ * old map, giving the collapsed object no map. This means that
+ * the pager is invoked for zero fill pages. If analysis shows
+ * that this happens frequently and is a performance hit, then
+ * this code should be fixed to salvage the map.
+ */
+ assert(object->existence_map == VM_EXTERNAL_NULL);
+ if (backing_offset || (size != backing_object->vo_size)) {
+ vm_external_discarded++;
+ vm_external_destroy(backing_object->existence_map,
+ backing_object->vo_size);
+ }
+ else {
+ vm_external_collapsed++;
+ object->existence_map = backing_object->existence_map;
+ }
+ backing_object->existence_map = VM_EXTERNAL_NULL;
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Object now shadows whatever backing_object did.
+ * Note that the reference to backing_object->shadow
+ * moves from within backing_object to within object.
+ */
+
+ assert(!object->phys_contiguous);
+ assert(!backing_object->phys_contiguous);
+ object->shadow = backing_object->shadow;
+ if (object->shadow) {
+ object->vo_shadow_offset += backing_object->vo_shadow_offset;
+ /* "backing_object" gave its shadow to "object" */
+ backing_object->shadow = VM_OBJECT_NULL;
+ backing_object->vo_shadow_offset = 0;
+ } else {
+ /* no shadow, therefore no shadow offset... */
+ object->vo_shadow_offset = 0;
+ }
+ assert((object->shadow == VM_OBJECT_NULL) ||
+ (object->shadow->copy != backing_object));
+
+ /*
+ * Discard backing_object.
+ *
+ * Since the backing object has no pages, no
+ * pager left, and no object references within it,
+ * all that is necessary is to dispose of it.
+ */
+ object_collapses++;
+
+ assert(backing_object->ref_count == 1);
+ assert(backing_object->resident_page_count == 0);
+ assert(backing_object->paging_in_progress == 0);
+ assert(backing_object->activity_in_progress == 0);
+ assert(backing_object->shadow == VM_OBJECT_NULL);
+ assert(backing_object->vo_shadow_offset == 0);
+
+ if (backing_object->pager != MEMORY_OBJECT_NULL) {
+ /* ... unless it has a pager; need to terminate pager too */
+ vm_counters.do_collapse_terminate++;
+ if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
+ vm_counters.do_collapse_terminate_failure++;
+ }
+ return;
+ }
+
+ assert(backing_object->pager == NULL);
+
+ backing_object->alive = FALSE;
+ vm_object_unlock(backing_object);
+
+ XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n",
+ backing_object, 0,0,0,0);
+
+#if VM_OBJECT_TRACKING
+ if (vm_object_tracking_inited) {
+ btlog_remove_entries_for_element(vm_object_tracking_btlog,
+ backing_object);
+ }
+#endif /* VM_OBJECT_TRACKING */
+
+ vm_object_lock_destroy(backing_object);
+
+ zfree(vm_object_zone, backing_object);
+
+}
+
+static void
+vm_object_do_bypass(
+ vm_object_t object,
+ vm_object_t backing_object)
+{
+ /*
+ * Make the parent shadow the next object
+ * in the chain.
+ */
+
+ vm_object_lock_assert_exclusive(object);
+ vm_object_lock_assert_exclusive(backing_object);
+
+#if TASK_SWAPPER
+ /*
+ * Do object reference in-line to
+ * conditionally increment shadow's
+ * residence count. If object is not
+ * resident, leave residence count
+ * on shadow alone.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL) {
+ vm_object_lock(backing_object->shadow);
+ vm_object_lock_assert_exclusive(backing_object->shadow);
+ backing_object->shadow->ref_count++;
+ if (object->res_count != 0)
+ vm_object_res_reference(backing_object->shadow);
+ vm_object_unlock(backing_object->shadow);
+ }
+#else /* TASK_SWAPPER */
+ vm_object_reference(backing_object->shadow);
+#endif /* TASK_SWAPPER */
+
+ assert(!object->phys_contiguous);
+ assert(!backing_object->phys_contiguous);
+ object->shadow = backing_object->shadow;
+ if (object->shadow) {
+ object->vo_shadow_offset += backing_object->vo_shadow_offset;
+ } else {
+ /* no shadow, therefore no shadow offset... */
+ object->vo_shadow_offset = 0;
+ }
+
+ /*
+ * Backing object might have had a copy pointer
+ * to us. If it did, clear it.
+ */
+ if (backing_object->copy == object) {
+ backing_object->copy = VM_OBJECT_NULL;
+ }
+
+ /*
+ * Drop the reference count on backing_object.
+#if TASK_SWAPPER
+ * Since its ref_count was at least 2, it
+ * will not vanish; so we don't need to call
+ * vm_object_deallocate.
+ * [with a caveat for "named" objects]
+ *
+ * The res_count on the backing object is
+ * conditionally decremented. It's possible
+ * (via vm_pageout_scan) to get here with
+ * a "swapped" object, which has a 0 res_count,
+ * in which case, the backing object res_count
+ * is already down by one.
+#else
+ * Don't call vm_object_deallocate unless
+ * ref_count drops to zero.
+ *
+ * The ref_count can drop to zero here if the
+ * backing object could be bypassed but not
+ * collapsed, such as when the backing object
+ * is temporary and cachable.
+#endif
+ */
+ if (backing_object->ref_count > 2 ||
+ (!backing_object->named && backing_object->ref_count > 1)) {
+ vm_object_lock_assert_exclusive(backing_object);
+ backing_object->ref_count--;
+#if TASK_SWAPPER
+ if (object->res_count != 0)
+ vm_object_res_deallocate(backing_object);
+ assert(backing_object->ref_count > 0);
+#endif /* TASK_SWAPPER */
+ vm_object_unlock(backing_object);
+ } else {
+
+ /*
+ * Drop locks so that we can deallocate
+ * the backing object.
+ */
+
+#if TASK_SWAPPER
+ if (object->res_count == 0) {
+ /* XXX get a reference for the deallocate below */
+ vm_object_res_reference(backing_object);
+ }
+#endif /* TASK_SWAPPER */
+ /*
+ * vm_object_collapse (the caller of this function) is
+ * now called from contexts that may not guarantee that a
+ * valid reference is held on the object... w/o a valid
+ * reference, it is unsafe and unwise (you will definitely
+ * regret it) to unlock the object and then retake the lock
+ * since the object may be terminated and recycled in between.
+ * The "activity_in_progress" reference will keep the object
+ * 'stable'.
+ */
+ vm_object_activity_begin(object);
+ vm_object_unlock(object);
+
+ vm_object_unlock(backing_object);
+ vm_object_deallocate(backing_object);
+
+ /*
+ * Relock object. We don't have to reverify
+ * its state since vm_object_collapse will
+ * do that for us as it starts at the
+ * top of its loop.
+ */
+
+ vm_object_lock(object);
+ vm_object_activity_end(object);
+ }
+
+ object_bypasses++;
+}
+
+
+/*
+ * vm_object_collapse:
+ *
+ * Perform an object collapse or an object bypass if appropriate.
+ * The real work of collapsing and bypassing is performed in
+ * the routines vm_object_do_collapse and vm_object_do_bypass.
+ *
+ * Requires that the object be locked and the page queues be unlocked.
+ *
+ */
+static unsigned long vm_object_collapse_calls = 0;
+static unsigned long vm_object_collapse_objects = 0;
+static unsigned long vm_object_collapse_do_collapse = 0;
+static unsigned long vm_object_collapse_do_bypass = 0;
+
+__private_extern__ void
+vm_object_collapse(
+ register vm_object_t object,
+ register vm_object_offset_t hint_offset,
+ boolean_t can_bypass)
+{
+ register vm_object_t backing_object;
+ register unsigned int rcount;
+ register unsigned int size;
+ vm_object_t original_object;
+ int object_lock_type;
+ int backing_object_lock_type;
+
+ vm_object_collapse_calls++;
+
+ if (! vm_object_collapse_allowed &&
+ ! (can_bypass && vm_object_bypass_allowed)) {
+ return;
+ }
+
+ XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n",
+ object, 0,0,0,0);
+
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ original_object = object;
+
+ /*
+ * The top object was locked "exclusive" by the caller.
+ * In the first pass, to determine if we can collapse the shadow chain,
+ * take a "shared" lock on the shadow objects. If we can collapse,
+ * we'll have to go down the chain again with exclusive locks.
+ */
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_SHARED;
+
+retry:
+ object = original_object;
+ vm_object_lock_assert_exclusive(object);
+
+ while (TRUE) {
+ vm_object_collapse_objects++;
+ /*
+ * Verify that the conditions are right for either
+ * collapse or bypass:
+ */
+
+ /*
+ * There is a backing object, and
+ */
+
+ backing_object = object->shadow;
+ if (backing_object == VM_OBJECT_NULL) {
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ return;
+ }
+ if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
+ vm_object_lock_shared(backing_object);
+ } else {
+ vm_object_lock(backing_object);
+ }
+
+ /*
+ * No pages in the object are currently
+ * being paged out, and
+ */
+ if (object->paging_in_progress != 0 ||
+ object->activity_in_progress != 0) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * ...
+ * The backing object is not read_only,
+ * and no pages in the backing object are
+ * currently being paged out.
+ * The backing object is internal.
+ *
+ */
+
+ if (!backing_object->internal ||
+ backing_object->paging_in_progress != 0 ||
+ backing_object->activity_in_progress != 0) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * Purgeable objects are not supposed to engage in
+ * copy-on-write activities, so should not have
+ * any shadow objects or be a shadow object to another
+ * object.
+ * Collapsing a purgeable object would require some
+ * updates to the purgeable compressed ledgers.
+ */
+ if (object->purgable != VM_PURGABLE_DENY ||
+ backing_object->purgable != VM_PURGABLE_DENY) {
+ panic("vm_object_collapse() attempting to collapse "
+ "purgeable object: %p(%d) %p(%d)\n",
+ object, object->purgable,
+ backing_object, backing_object->purgable);
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * The backing object can't be a copy-object:
+ * the shadow_offset for the copy-object must stay
+ * as 0. Furthermore (for the 'we have all the
+ * pages' case), if we bypass backing_object and
+ * just shadow the next object in the chain, old
+ * pages from that object would then have to be copied
+ * BOTH into the (former) backing_object and into the
+ * parent object.
+ */
+ if (backing_object->shadow != VM_OBJECT_NULL &&
+ backing_object->shadow->copy == backing_object) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * We can now try to either collapse the backing
+ * object (if the parent is the only reference to
+ * it) or (perhaps) remove the parent's reference
+ * to it.
+ *
+ * If there is exactly one reference to the backing
+ * object, we may be able to collapse it into the
+ * parent.
+ *
+ * If MACH_PAGEMAP is defined:
+ * The parent must not have a pager created for it,
+ * since collapsing a backing_object dumps new pages
+ * into the parent that its pager doesn't know about
+ * (and the collapse code can't merge the existence
+ * maps).
+ * Otherwise:
+ * As long as one of the objects is still not known
+ * to the pager, we can collapse them.
+ */
+ if (backing_object->ref_count == 1 &&
+ (vm_object_collapse_compressor_allowed ||
+ !object->pager_created
+#if !MACH_PAGEMAP
+ || (!backing_object->pager_created)
+#endif /*!MACH_PAGEMAP */
+ ) && vm_object_collapse_allowed) {
+
+ /*
+ * We need the exclusive lock on the VM objects.
+ */
+ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
+ /*
+ * We have an object and its shadow locked
+ * "shared". We can't just upgrade the locks
+ * to "exclusive", as some other thread might
+ * also have these objects locked "shared" and
+ * attempt to upgrade one or the other to
+ * "exclusive". The upgrades would block
+ * forever waiting for the other "shared" locks
+ * to get released.
+ * So we have to release the locks and go
+ * down the shadow chain again (since it could
+ * have changed) with "exclusive" locking.
+ */
+ vm_object_unlock(backing_object);
+ if (object != original_object)
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ goto retry;
+ }
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_collapse: %x to %x, pager %x, pager_control %x\n",
+ backing_object, object,
+ backing_object->pager,
+ backing_object->pager_control, 0);
+
+ /*
+ * Collapse the object with its backing
+ * object, and try again with the object's
+ * new backing object.
+ */
+
+ vm_object_do_collapse(object, backing_object);
+ vm_object_collapse_do_collapse++;
+ continue;
+ }
+
+ /*
+ * Collapsing the backing object was not possible
+ * or permitted, so let's try bypassing it.
+ */
+
+ if (! (can_bypass && vm_object_bypass_allowed)) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+
+ /*
+ * If the object doesn't have all its pages present,
+ * we have to make sure no pages in the backing object
+ * "show through" before bypassing it.
+ */
+ size = (unsigned int)atop(object->vo_size);
+ rcount = object->resident_page_count;
+
+ if (rcount != size) {
+ vm_object_offset_t offset;
+ vm_object_offset_t backing_offset;
+ unsigned int backing_rcount;
+
+ /*
+ * If the backing object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (backing_object->pager_created
+#if MACH_PAGEMAP
+ && (backing_object->existence_map == VM_EXTERNAL_NULL)
+#endif /* MACH_PAGEMAP */
+ ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If the object has a pager but no pagemap,
+ * then we cannot bypass it, because we don't know
+ * what pages it has.
+ */
+ if (object->pager_created
+#if MACH_PAGEMAP
+ && (object->existence_map == VM_EXTERNAL_NULL)
+#endif /* MACH_PAGEMAP */
+ ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ backing_offset = object->vo_shadow_offset;
+ backing_rcount = backing_object->resident_page_count;
+
+ if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+ /*
+ * we have enough pages in the backing object to guarantee that
+ * at least 1 of them must be 'uncovered' by a resident page
+ * in the object we're evaluating, so move on and
+ * try to collapse the rest of the shadow chain
+ */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * If the backing object has existence info,
+ * we must check examine its existence info
+ * as well.
+ *
+ */
+
+#if MACH_PAGEMAP
+#define EXISTS_IN_OBJECT(obj, off, rc) \
+ ((vm_external_state_get((obj)->existence_map, \
+ (vm_offset_t)(off)) \
+ == VM_EXTERNAL_STATE_EXISTS) || \
+ (VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
+ == VM_EXTERNAL_STATE_EXISTS) || \
+ ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+#else /* MACH_PAGEMAP */
+#define EXISTS_IN_OBJECT(obj, off, rc) \
+ ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
+ == VM_EXTERNAL_STATE_EXISTS) || \
+ ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+#endif /* MACH_PAGEMAP */
+
+ /*
+ * Check the hint location first
+ * (since it is often the quickest way out of here).
+ */
+ if (object->cow_hint != ~(vm_offset_t)0)
+ hint_offset = (vm_object_offset_t)object->cow_hint;
+ else
+ hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
+ (hint_offset - 8 * PAGE_SIZE_64) : 0;
+
+ if (EXISTS_IN_OBJECT(backing_object, hint_offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, hint_offset, rcount)) {
+ /* dependency right at the hint */
+ object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
+ /*
+ * If the object's window onto the backing_object
+ * is large compared to the number of resident
+ * pages in the backing object, it makes sense to
+ * walk the backing_object's resident pages first.
+ *
+ * NOTE: Pages may be in both the existence map and/or
+ * resident, so if we don't find a dependency while
+ * walking the backing object's resident page list
+ * directly, and there is an existence map, we'll have
+ * to run the offset based 2nd pass. Because we may
+ * have to run both passes, we need to be careful
+ * not to decrement 'rcount' in the 1st pass
+ */
+ if (backing_rcount && backing_rcount < (size / 8)) {
+ unsigned int rc = rcount;
+ vm_page_t p;
+
+ backing_rcount = backing_object->resident_page_count;
+ p = (vm_page_t)queue_first(&backing_object->memq);
+ do {
+ offset = (p->offset - backing_offset);
+
+ if (offset < object->vo_size &&
+ offset != hint_offset &&
+ !EXISTS_IN_OBJECT(object, offset, rc)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t) offset; /* atomic */
+
+ break;
+ }
+ p = (vm_page_t) queue_next(&p->listq);
+
+ } while (--backing_rcount);
+ if (backing_rcount != 0 ) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+ }
+
+ /*
+ * Walk through the offsets looking for pages in the
+ * backing object that show through to the object.
+ */
+ if (backing_rcount
+#if MACH_PAGEMAP
+ || backing_object->existence_map
+#endif /* MACH_PAGEMAP */
+ ) {
+ offset = hint_offset;
+
+ while((offset =
+ (offset + PAGE_SIZE_64 < object->vo_size) ?
+ (offset + PAGE_SIZE_64) : 0) != hint_offset) {
+
+ if (EXISTS_IN_OBJECT(backing_object, offset +
+ backing_offset, backing_rcount) &&
+ !EXISTS_IN_OBJECT(object, offset, rcount)) {
+ /* found a dependency */
+ object->cow_hint = (vm_offset_t) offset; /* atomic */
+ break;
+ }
+ }
+ if (offset != hint_offset) {
+ /* try and collapse the rest of the shadow chain */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+ }
+ }
+
+ /*
+ * We need "exclusive" locks on the 2 VM objects.
+ */
+ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
+ vm_object_unlock(backing_object);
+ if (object != original_object)
+ vm_object_unlock(object);
+ object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
+ goto retry;
+ }
+
+ /* reset the offset hint for any objects deeper in the chain */
+ object->cow_hint = (vm_offset_t)0;
+
+ /*
+ * All interesting pages in the backing object
+ * already live in the parent or its pager.
+ * Thus we can bypass the backing object.
+ */
+
+ vm_object_do_bypass(object, backing_object);
+ vm_object_collapse_do_bypass++;
+
+ /*
+ * Try again with this object's new backing object.
+ */
+
+ continue;
+ }
+
+ /* NOT REACHED */
+ /*
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ */
+}
+
+/*
+ * Routine: vm_object_page_remove: [internal]
+ * Purpose:
+ * Removes all physical pages in the specified
+ * object range from the object's list of pages.
+ *
+ * In/out conditions:
+ * The object must be locked.
+ * The object must not have paging_in_progress, usually
+ * guaranteed by not having a pager.
+ */
+unsigned int vm_object_page_remove_lookup = 0;
+unsigned int vm_object_page_remove_iterate = 0;
+
+__private_extern__ void
+vm_object_page_remove(
+ register vm_object_t object,
+ register vm_object_offset_t start,
+ register vm_object_offset_t end)
+{
+ register vm_page_t p, next;
+
+ /*
+ * One and two page removals are most popular.
+ * The factor of 16 here is somewhat arbitrary.
+ * It balances vm_object_lookup vs iteration.
+ */
+
+ if (atop_64(end - start) < (unsigned)object->resident_page_count/16) {
+ vm_object_page_remove_lookup++;
+
+ for (; start < end; start += PAGE_SIZE_64) {
+ p = vm_page_lookup(object, start);
+ if (p != VM_PAGE_NULL) {
+ assert(!p->cleaning && !p->pageout && !p->laundry);
+ if (!p->fictitious && p->pmapped)
+ pmap_disconnect(p->phys_page);
+ VM_PAGE_FREE(p);
+ }
+ }
+ } else {
+ vm_object_page_remove_iterate++;
+
+ p = (vm_page_t) queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t) p)) {
+ next = (vm_page_t) queue_next(&p->listq);
+ if ((start <= p->offset) && (p->offset < end)) {
+ assert(!p->cleaning && !p->pageout && !p->laundry);
+ if (!p->fictitious && p->pmapped)
+ pmap_disconnect(p->phys_page);
+ VM_PAGE_FREE(p);
+ }
+ p = next;
+ }
+ }
+}
+
+
+/*
+ * Routine: vm_object_coalesce
+ * Function: Coalesces two objects backing up adjoining
+ * regions of memory into a single object.
+ *
+ * returns TRUE if objects were combined.
+ *
+ * NOTE: Only works at the moment if the second object is NULL -
+ * if it's not, which object do we lock first?
+ *
+ * Parameters:
+ * prev_object First object to coalesce
+ * prev_offset Offset into prev_object
+ * next_object Second object into coalesce
+ * next_offset Offset into next_object
+ *
+ * prev_size Size of reference to prev_object
+ * next_size Size of reference to next_object
+ *
+ * Conditions:
+ * The object(s) must *not* be locked. The map must be locked
+ * to preserve the reference to the object(s).
+ */
+static int vm_object_coalesce_count = 0;
+
+__private_extern__ boolean_t
+vm_object_coalesce(
+ register vm_object_t prev_object,
+ vm_object_t next_object,
+ vm_object_offset_t prev_offset,
+ __unused vm_object_offset_t next_offset,
+ vm_object_size_t prev_size,
+ vm_object_size_t next_size)
+{
+ vm_object_size_t newsize;
+
+#ifdef lint
+ next_offset++;
+#endif /* lint */
+
+ if (next_object != VM_OBJECT_NULL) {
+ return(FALSE);
+ }
+
+ if (prev_object == VM_OBJECT_NULL) {
+ return(TRUE);
+ }
+
+ XPR(XPR_VM_OBJECT,
+ "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n",
+ prev_object, prev_offset, prev_size, next_size, 0);
+
+ vm_object_lock(prev_object);
+
+ /*
+ * Try to collapse the object first
+ */
+ vm_object_collapse(prev_object, prev_offset, TRUE);
+
+ /*
+ * Can't coalesce if pages not mapped to
+ * prev_entry may be in use any way:
+ * . more than one reference
+ * . paged out
+ * . shadows another object
+ * . has a copy elsewhere
+ * . is purgeable
+ * . paging references (pages might be in page-list)
+ */
+
+ if ((prev_object->ref_count > 1) ||
+ prev_object->pager_created ||
+ (prev_object->shadow != VM_OBJECT_NULL) ||
+ (prev_object->copy != VM_OBJECT_NULL) ||
+ (prev_object->true_share != FALSE) ||
+ (prev_object->purgable != VM_PURGABLE_DENY) ||
+ (prev_object->paging_in_progress != 0) ||
+ (prev_object->activity_in_progress != 0)) {
+ vm_object_unlock(prev_object);
+ return(FALSE);
+ }
+
+ vm_object_coalesce_count++;
+
+ /*
+ * Remove any pages that may still be in the object from
+ * a previous deallocation.
+ */
+ vm_object_page_remove(prev_object,
+ prev_offset + prev_size,
+ prev_offset + prev_size + next_size);
+
+ /*
+ * Extend the object if necessary.
+ */
+ newsize = prev_offset + prev_size + next_size;
+ if (newsize > prev_object->vo_size) {
+#if MACH_PAGEMAP
+ /*
+ * We cannot extend an object that has existence info,
+ * since the existence info might then fail to cover
+ * the entire object.
+ *
+ * This assertion must be true because the object
+ * has no pager, and we only create existence info
+ * for objects with pagers.
+ */
+ assert(prev_object->existence_map == VM_EXTERNAL_NULL);
+#endif /* MACH_PAGEMAP */
+ prev_object->vo_size = newsize;
+ }
+
+ vm_object_unlock(prev_object);
+ return(TRUE);
+}
+
+kern_return_t
+vm_object_populate_with_private(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ ppnum_t phys_page,
+ vm_size_t size)
+{
+ ppnum_t base_page;
+ vm_object_offset_t base_offset;
+
+
+ if (!object->private)
+ return KERN_FAILURE;
+
+ base_page = phys_page;
+
+ vm_object_lock(object);
+
+ if (!object->phys_contiguous) {
+ vm_page_t m;
+
+ if ((base_offset = trunc_page_64(offset)) != offset) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+ base_offset += object->paging_offset;
+
+ while (size) {
+ m = vm_page_lookup(object, base_offset);
+
+ if (m != VM_PAGE_NULL) {
+ if (m->fictitious) {
+ if (m->phys_page != vm_page_guard_addr) {
+
+ vm_page_lockspin_queues();
+ m->private = TRUE;
+ vm_page_unlock_queues();
+
+ m->fictitious = FALSE;
+ m->phys_page = base_page;
+ }
+ } else if (m->phys_page != base_page) {
+
+ if ( !m->private) {
+ /*
+ * we'd leak a real page... that can't be right
+ */
+ panic("vm_object_populate_with_private - %p not private", m);
+ }
+ if (m->pmapped) {
+ /*
+ * pmap call to clear old mapping
+ */
+ pmap_disconnect(m->phys_page);
+ }
+ m->phys_page = base_page;
+ }
+ if (m->encrypted) {
+ /*
+ * we should never see this on a ficticious or private page
+ */
+ panic("vm_object_populate_with_private - %p encrypted", m);
+ }
+
+ } else {
+ while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
+ vm_page_more_fictitious();
+
+ /*
+ * private normally requires lock_queues but since we
+ * are initializing the page, its not necessary here
+ */
+ m->private = TRUE;
+ m->fictitious = FALSE;
+ m->phys_page = base_page;
+ m->unusual = TRUE;
+ m->busy = FALSE;
+
+ vm_page_insert(m, object, base_offset);
+ }
+ base_page++; /* Go to the next physical page */
+ base_offset += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ /* NOTE: we should check the original settings here */
+ /* if we have a size > zero a pmap call should be made */
+ /* to disable the range */
+
+ /* pmap_? */
+
+ /* shadows on contiguous memory are not allowed */
+ /* we therefore can use the offset field */
+ object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
+ object->vo_size = size;
+ }
+ vm_object_unlock(object);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * memory_object_free_from_cache:
+ *
+ * Walk the vm_object cache list, removing and freeing vm_objects
+ * which are backed by the pager identified by the caller, (pager_ops).
+ * Remove up to "count" objects, if there are that may available
+ * in the cache.
+ *
+ * Walk the list at most once, return the number of vm_objects
+ * actually freed.
+ */
+
+__private_extern__ kern_return_t
+memory_object_free_from_cache(
+ __unused host_t host,
+ __unused memory_object_pager_ops_t pager_ops,
+ int *count)
+{
+#if VM_OBJECT_CACHE
+ int object_released = 0;
+
+ register vm_object_t object = VM_OBJECT_NULL;
+ vm_object_t shadow;
+
+/*
+ if(host == HOST_NULL)
+ return(KERN_INVALID_ARGUMENT);
+*/
+
+ try_again:
+ vm_object_cache_lock();
+
+ queue_iterate(&vm_object_cached_list, object,
+ vm_object_t, cached_list) {
+ if (object->pager &&
+ (pager_ops == object->pager->mo_pager_ops)) {
+ vm_object_lock(object);
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+
+ vm_object_cache_unlock();
+ /*
+ * Since this object is in the cache, we know
+ * that it is initialized and has only a pager's
+ * (implicit) reference. Take a reference to avoid
+ * recursive deallocations.
+ */
+
+ assert(object->pager_initialized);
+ assert(object->ref_count == 0);
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+
+ /*
+ * Terminate the object.
+ * If the object had a shadow, we let
+ * vm_object_deallocate deallocate it.
+ * "pageout" objects have a shadow, but
+ * maintain a "paging reference" rather
+ * than a normal reference.
+ * (We are careful here to limit recursion.)
+ */
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+
+ if ((vm_object_terminate(object) == KERN_SUCCESS)
+ && (shadow != VM_OBJECT_NULL)) {
+ vm_object_deallocate(shadow);
+ }
+
+ if(object_released++ == *count)
+ return KERN_SUCCESS;
+ goto try_again;
+ }
+ }
+ vm_object_cache_unlock();
+ *count = object_released;
+#else
+ *count = 0;
+#endif
+ return KERN_SUCCESS;
+}
+
+
+
+kern_return_t
+memory_object_create_named(
+ memory_object_t pager,
+ memory_object_offset_t size,
+ memory_object_control_t *control)
+{
+ vm_object_t object;
+ vm_object_hash_entry_t entry;
+ lck_mtx_t *lck;
+
+ *control = MEMORY_OBJECT_CONTROL_NULL;
+ if (pager == MEMORY_OBJECT_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ lck = vm_object_hash_lock_spin(pager);
+ entry = vm_object_hash_lookup(pager, FALSE);
+
+ if ((entry != VM_OBJECT_HASH_ENTRY_NULL) &&
+ (entry->object != VM_OBJECT_NULL)) {
+ if (entry->object->named == TRUE)
+ panic("memory_object_create_named: caller already holds the right"); }
+ vm_object_hash_unlock(lck);
+
+ if ((object = vm_object_enter(pager, size, FALSE, FALSE, TRUE)) == VM_OBJECT_NULL) {
+ return(KERN_INVALID_OBJECT);
+ }
+
+ /* wait for object (if any) to be ready */
+ if (object != VM_OBJECT_NULL) {
+ vm_object_lock(object);
+ object->named = TRUE;
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ *control = object->pager_control;
+ vm_object_unlock(object);
+ }
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * Routine: memory_object_recover_named [user interface]
+ * Purpose:
+ * Attempt to recover a named reference for a VM object.
+ * VM will verify that the object has not already started
+ * down the termination path, and if it has, will optionally
+ * wait for that to finish.
+ * Returns:
+ * KERN_SUCCESS - we recovered a named reference on the object
+ * KERN_FAILURE - we could not recover a reference (object dead)
+ * KERN_INVALID_ARGUMENT - bad memory object control
+ */
+kern_return_t
+memory_object_recover_named(
+ memory_object_control_t control,
+ boolean_t wait_on_terminating)
+{
+ vm_object_t object;
+
+ object = memory_object_control_to_vm_object(control);
+ if (object == VM_OBJECT_NULL) {
+ return (KERN_INVALID_ARGUMENT);
+ }
+restart:
+ vm_object_lock(object);
+
+ if (object->terminating && wait_on_terminating) {
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+ THREAD_UNINT);
+ goto restart;
+ }
+
+ if (!object->alive) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ if (object->named == TRUE) {
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+#if VM_OBJECT_CACHE
+ if ((object->ref_count == 0) && (!object->terminating)) {
+ if (!vm_object_cache_lock_try()) {
+ vm_object_unlock(object);
+ goto restart;
+ }
+ queue_remove(&vm_object_cached_list, object,
+ vm_object_t, cached_list);
+ vm_object_cached_count--;
+ XPR(XPR_VM_OBJECT_CACHE,
+ "memory_object_recover_named: removing %X, head (%X, %X)\n",
+ object,
+ vm_object_cached_list.next,
+ vm_object_cached_list.prev, 0,0);
+
+ vm_object_cache_unlock();
+ }
+#endif
+ object->named = TRUE;
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count++;
+ vm_object_res_reference(object);
+ while (!object->pager_ready) {
+ vm_object_sleep(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ }
+ vm_object_unlock(object);
+ return (KERN_SUCCESS);
+}
+
+
+/*
+ * vm_object_release_name:
+ *
+ * Enforces name semantic on memory_object reference count decrement
+ * This routine should not be called unless the caller holds a name
+ * reference gained through the memory_object_create_named.
+ *
+ * If the TERMINATE_IDLE flag is set, the call will return if the
+ * reference count is not 1. i.e. idle with the only remaining reference
+ * being the name.
+ * If the decision is made to proceed the name field flag is set to
+ * false and the reference count is decremented. If the RESPECT_CACHE
+ * flag is set and the reference count has gone to zero, the
+ * memory_object is checked to see if it is cacheable otherwise when
+ * the reference count is zero, it is simply terminated.
+ */
+
+__private_extern__ kern_return_t
+vm_object_release_name(
+ vm_object_t object,
+ int flags)
+{
+ vm_object_t shadow;
+ boolean_t original_object = TRUE;
+
+ while (object != VM_OBJECT_NULL) {
+
+ vm_object_lock(object);
+
+ assert(object->alive);
+ if (original_object)
+ assert(object->named);
+ assert(object->ref_count > 0);
+
+ /*
+ * We have to wait for initialization before
+ * destroying or caching the object.
+ */
+
+ if (object->pager_created && !object->pager_initialized) {
+ assert(!object->can_persist);
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ vm_object_unlock(object);
+ thread_block(THREAD_CONTINUE_NULL);
+ continue;
+ }
+
+ if (((object->ref_count > 1)
+ && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
+ || (object->terminating)) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ } else {
+ if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+ }
+
+ if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
+ (object->ref_count == 1)) {
+ if (original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ /* let vm_object_deallocate push this thing into */
+ /* the cache, if that it is where it is bound */
+ vm_object_deallocate(object);
+ return KERN_SUCCESS;
+ }
+ VM_OBJ_RES_DECR(object);
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+
+ if (object->ref_count == 1) {
+ if (vm_object_terminate(object) != KERN_SUCCESS) {
+ if (original_object) {
+ return KERN_FAILURE;
+ } else {
+ return KERN_SUCCESS;
+ }
+ }
+ if (shadow != VM_OBJECT_NULL) {
+ original_object = FALSE;
+ object = shadow;
+ continue;
+ }
+ return KERN_SUCCESS;
+ } else {
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count--;
+ assert(object->ref_count > 0);
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ }
+ }
+ /*NOTREACHED*/
+ assert(0);
+ return KERN_FAILURE;
+}
+
+
+__private_extern__ kern_return_t
+vm_object_lock_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot)
+{
+ __unused boolean_t should_flush;
+
+ should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
+
+ XPR(XPR_MEMORY_OBJECT,
+ "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
+ object, offset, size,
+ (((should_return&1)<<1)|should_flush), prot);
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page_64(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object reference from being released.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ (void)vm_object_update(object,
+ offset, size, NULL, NULL, should_return, flags, prot);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Empty a purgeable object by grabbing the physical pages assigned to it and
+ * putting them on the free queue without writing them to backing store, etc.
+ * When the pages are next touched they will be demand zero-fill pages. We
+ * skip pages which are busy, being paged in/out, wired, etc. We do _not_
+ * skip referenced/dirty pages, pages on the active queue, etc. We're more
+ * than happy to grab these since this is a purgeable object. We mark the
+ * object as "empty" after reaping its pages.
+ *
+ * On entry the object must be locked and it must be
+ * purgeable with no delayed copies pending.
+ */
+void
+vm_object_purge(vm_object_t object, int flags)
+{
+ unsigned int object_page_count = 0;
+ unsigned int pgcount = 0;
+ boolean_t skipped_object = FALSE;
+
+ vm_object_lock_assert_exclusive(object);
+
+ if (object->purgable == VM_PURGABLE_DENY)
+ return;
+
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+
+ /*
+ * We need to set the object's state to VM_PURGABLE_EMPTY *before*
+ * reaping its pages. We update vm_page_purgeable_count in bulk
+ * and we don't want vm_page_remove() to update it again for each
+ * page we reap later.
+ *
+ * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
+ * are all accounted for in the "volatile" ledgers, so this does not
+ * make any difference.
+ * If we transitioned directly from NONVOLATILE to EMPTY,
+ * vm_page_purgeable_count must have been updated when the object
+ * was dequeued from its volatile queue and the purgeable ledgers
+ * must have also been updated accordingly at that time (in
+ * vm_object_purgable_control()).
+ */
+ if (object->purgable == VM_PURGABLE_VOLATILE) {
+ unsigned int delta;
+ assert(object->resident_page_count >=
+ object->wired_page_count);
+ delta = (object->resident_page_count -
+ object->wired_page_count);
+ if (delta != 0) {
+ assert(vm_page_purgeable_count >=
+ delta);
+ OSAddAtomic(-delta,
+ (SInt32 *)&vm_page_purgeable_count);
+ }
+ if (object->wired_page_count != 0) {
+ assert(vm_page_purgeable_wired_count >=
+ object->wired_page_count);
+ OSAddAtomic(-object->wired_page_count,
+ (SInt32 *)&vm_page_purgeable_wired_count);
+ }
+ object->purgable = VM_PURGABLE_EMPTY;
+ }
+ assert(object->purgable == VM_PURGABLE_EMPTY);
+
+ object_page_count = object->resident_page_count;
+
+ vm_object_reap_pages(object, REAP_PURGEABLE);
+
+ if (object->pager != NULL &&
+ COMPRESSED_PAGER_IS_ACTIVE) {
+
+ if (object->activity_in_progress == 0 &&
+ object->paging_in_progress == 0) {
+ /*
+ * Also reap any memory coming from this object
+ * in the VM compressor.
+ *
+ * There are no operations in progress on the VM object
+ * and no operation can start while we're holding the
+ * VM object lock, so it's safe to reap the compressed
+ * pages and update the page counts.
+ */
+ pgcount = vm_compressor_pager_get_count(object->pager);
+ if (pgcount) {
+ pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
+ vm_compressor_pager_count(object->pager,
+ -pgcount,
+ FALSE, /* shared */
+ object);
+ vm_purgeable_compressed_update(object,
+ -pgcount);
+ }
+ if ( !(flags & C_DONT_BLOCK)) {
+ assert(vm_compressor_pager_get_count(object->pager)
+ == 0);
+ }
+ } else {
+ /*
+ * There's some kind of paging activity in progress
+ * for this object, which could result in a page
+ * being compressed or decompressed, possibly while
+ * the VM object is not locked, so it could race
+ * with us.
+ *
+ * We can't really synchronize this without possibly
+ * causing a deadlock when the compressor needs to
+ * allocate or free memory while compressing or
+ * decompressing a page from a purgeable object
+ * mapped in the kernel_map...
+ *
+ * So let's not attempt to purge the compressor
+ * pager if there's any kind of operation in
+ * progress on the VM object.
+ */
+ skipped_object = TRUE;
+ }
+ }