+__private_extern__ kern_return_t
+vm_object_release_name(
+ vm_object_t object,
+ int flags)
+{
+ vm_object_t shadow;
+ boolean_t original_object = TRUE;
+
+ while (object != VM_OBJECT_NULL) {
+
+ /*
+ * The cache holds a reference (uncounted) to
+ * the object. We must locke it before removing
+ * the object.
+ *
+ */
+
+ vm_object_cache_lock();
+ vm_object_lock(object);
+ assert(object->alive);
+ if(original_object)
+ assert(object->named);
+ assert(object->ref_count > 0);
+
+ /*
+ * We have to wait for initialization before
+ * destroying or caching the object.
+ */
+
+ if (object->pager_created && !object->pager_initialized) {
+ assert(!object->can_persist);
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED,
+ THREAD_UNINT);
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ thread_block(THREAD_CONTINUE_NULL);
+ continue;
+ }
+
+ if (((object->ref_count > 1)
+ && (flags & MEMORY_OBJECT_TERMINATE_IDLE))
+ || (object->terminating)) {
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return KERN_FAILURE;
+ } else {
+ if (flags & MEMORY_OBJECT_RELEASE_NO_OP) {
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return KERN_SUCCESS;
+ }
+ }
+
+ if ((flags & MEMORY_OBJECT_RESPECT_CACHE) &&
+ (object->ref_count == 1)) {
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ /* let vm_object_deallocate push this thing into */
+ /* the cache, if that it is where it is bound */
+ vm_object_deallocate(object);
+ return KERN_SUCCESS;
+ }
+ VM_OBJ_RES_DECR(object);
+ shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
+ if(object->ref_count == 1) {
+ if(vm_object_terminate(object) != KERN_SUCCESS) {
+ if(original_object) {
+ return KERN_FAILURE;
+ } else {
+ return KERN_SUCCESS;
+ }
+ }
+ if (shadow != VM_OBJECT_NULL) {
+ original_object = FALSE;
+ object = shadow;
+ continue;
+ }
+ return KERN_SUCCESS;
+ } else {
+ vm_object_lock_assert_exclusive(object);
+ object->ref_count--;
+ assert(object->ref_count > 0);
+ if(original_object)
+ object->named = FALSE;
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return KERN_SUCCESS;
+ }
+ }
+ /*NOTREACHED*/
+ assert(0);
+ return KERN_FAILURE;
+}
+
+
+__private_extern__ kern_return_t
+vm_object_lock_request(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_object_size_t size,
+ memory_object_return_t should_return,
+ int flags,
+ vm_prot_t prot)
+{
+ __unused boolean_t should_flush;
+
+ should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
+
+ XPR(XPR_MEMORY_OBJECT,
+ "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n",
+ (integer_t)object, offset, size,
+ (((should_return&1)<<1)|should_flush), prot);
+
+ /*
+ * Check for bogus arguments.
+ */
+ if (object == VM_OBJECT_NULL)
+ return (KERN_INVALID_ARGUMENT);
+
+ if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
+ return (KERN_INVALID_ARGUMENT);
+
+ size = round_page_64(size);
+
+ /*
+ * Lock the object, and acquire a paging reference to
+ * prevent the memory_object reference from being released.
+ */
+ vm_object_lock(object);
+ vm_object_paging_begin(object);
+
+ (void)vm_object_update(object,
+ offset, size, NULL, NULL, should_return, flags, prot);
+
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Empty a purgeable object by grabbing the physical pages assigned to it and
+ * putting them on the free queue without writing them to backing store, etc.
+ * When the pages are next touched they will be demand zero-fill pages. We
+ * skip pages which are busy, being paged in/out, wired, etc. We do _not_
+ * skip referenced/dirty pages, pages on the active queue, etc. We're more
+ * than happy to grab these since this is a purgeable object. We mark the
+ * object as "empty" after reaping its pages.
+ *
+ * On entry the object and page queues are locked, the object must be a
+ * purgeable object with no delayed copies pending.
+ */
+unsigned int
+vm_object_purge(vm_object_t object)
+{
+ vm_page_t p, next;
+ unsigned int num_purged_pages;
+ vm_page_t local_freeq;
+ unsigned long local_freed;
+ int purge_loop_quota;
+/* free pages as soon as we gather PURGE_BATCH_FREE_LIMIT pages to free */
+#define PURGE_BATCH_FREE_LIMIT 50
+/* release page queues lock every PURGE_LOOP_QUOTA iterations */
+#define PURGE_LOOP_QUOTA 100
+
+ num_purged_pages = 0;
+ if (object->purgable == VM_PURGABLE_DENY)
+ return num_purged_pages;
+
+ assert(object->purgable != VM_PURGABLE_NONVOLATILE);
+ object->purgable = VM_PURGABLE_EMPTY;
+
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+ purge_loop_quota = PURGE_LOOP_QUOTA;
+
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+
+ /*
+ * Go through the object's resident pages and try and discard them.
+ */
+ next = (vm_page_t)queue_first(&object->memq);
+ while (!queue_end(&object->memq, (queue_entry_t)next)) {
+ p = next;
+ next = (vm_page_t)queue_next(&next->listq);
+
+ if (purge_loop_quota-- == 0) {
+ /*
+ * Avoid holding the page queues lock for too long.
+ * Let someone else take it for a while if needed.
+ * Keep holding the object's lock to guarantee that
+ * the object's page list doesn't change under us
+ * while we yield.
+ */
+ if (local_freeq != VM_PAGE_NULL) {
+ /*
+ * Flush our queue of pages to free.
+ */
+ vm_page_free_list(local_freeq);
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+ }
+ mutex_yield(&vm_page_queue_lock);
+
+ /* resume with the current page and a new quota */
+ purge_loop_quota = PURGE_LOOP_QUOTA;
+ }
+
+
+ if (p->busy || p->cleaning || p->laundry ||
+ p->list_req_pending) {
+ /* page is being acted upon, so don't mess with it */
+ continue;
+ }
+ if (p->wire_count) {
+ /* don't discard a wired page */
+ continue;
+ }
+
+ assert(!p->laundry);
+ assert(p->object != kernel_object);
+
+ /* we can discard this page */
+
+ /* advertize that this page is in a transition state */
+ p->busy = TRUE;
+
+ if (p->pmapped == TRUE) {
+ /* unmap the page */
+ int refmod_state;
+
+ refmod_state = pmap_disconnect(p->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ p->dirty = TRUE;
+ }
+ }
+
+ if (p->dirty || p->precious) {
+ /* we saved the cost of cleaning this page ! */
+ num_purged_pages++;
+ vm_page_purged_count++;
+ }
+
+ vm_page_free_prepare(p);
+
+ /* ... and put it on our queue of pages to free */
+ assert(p->pageq.next == NULL &&
+ p->pageq.prev == NULL);
+ p->pageq.next = (queue_entry_t) local_freeq;
+ local_freeq = p;
+ if (++local_freed >= PURGE_BATCH_FREE_LIMIT) {
+ /* flush our queue of pages to free */
+ vm_page_free_list(local_freeq);
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+ }
+ }
+
+ /* flush our local queue of pages to free one last time */
+ if (local_freeq != VM_PAGE_NULL) {
+ vm_page_free_list(local_freeq);
+ local_freeq = VM_PAGE_NULL;
+ local_freed = 0;
+ }
+
+ return num_purged_pages;
+}
+
+/*
+ * vm_object_purgeable_control() allows the caller to control and investigate the
+ * state of a purgeable object. A purgeable object is created via a call to
+ * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
+ * never be coalesced with any other object -- even other purgeable objects --
+ * and will thus always remain a distinct object. A purgeable object has
+ * special semantics when its reference count is exactly 1. If its reference
+ * count is greater than 1, then a purgeable object will behave like a normal
+ * object and attempts to use this interface will result in an error return
+ * of KERN_INVALID_ARGUMENT.
+ *
+ * A purgeable object may be put into a "volatile" state which will make the
+ * object's pages elligable for being reclaimed without paging to backing
+ * store if the system runs low on memory. If the pages in a volatile
+ * purgeable object are reclaimed, the purgeable object is said to have been
+ * "emptied." When a purgeable object is emptied the system will reclaim as
+ * many pages from the object as it can in a convenient manner (pages already
+ * en route to backing store or busy for other reasons are left as is). When
+ * a purgeable object is made volatile, its pages will generally be reclaimed
+ * before other pages in the application's working set. This semantic is
+ * generally used by applications which can recreate the data in the object
+ * faster than it can be paged in. One such example might be media assets
+ * which can be reread from a much faster RAID volume.
+ *
+ * A purgeable object may be designated as "non-volatile" which means it will
+ * behave like all other objects in the system with pages being written to and
+ * read from backing store as needed to satisfy system memory needs. If the
+ * object was emptied before the object was made non-volatile, that fact will
+ * be returned as the old state of the purgeable object (see
+ * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
+ * were reclaimed as part of emptying the object will be refaulted in as
+ * zero-fill on demand. It is up to the application to note that an object
+ * was emptied and recreate the objects contents if necessary. When a
+ * purgeable object is made non-volatile, its pages will generally not be paged
+ * out to backing store in the immediate future. A purgeable object may also
+ * be manually emptied.
+ *
+ * Finally, the current state (non-volatile, volatile, volatile & empty) of a
+ * volatile purgeable object may be queried at any time. This information may
+ * be used as a control input to let the application know when the system is
+ * experiencing memory pressure and is reclaiming memory.
+ *
+ * The specified address may be any address within the purgeable object. If
+ * the specified address does not represent any object in the target task's
+ * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
+ * object containing the specified address is not a purgeable object, then
+ * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
+ * returned.
+ *
+ * The control parameter may be any one of VM_PURGABLE_SET_STATE or
+ * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
+ * state is used to set the new state of the purgeable object and return its
+ * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
+ * object is returned in the parameter state.
+ *
+ * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
+ * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
+ * the non-volatile, volatile and volatile/empty states described above.
+ * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
+ * immediately reclaim as many pages in the object as can be conveniently
+ * collected (some may have already been written to backing store or be
+ * otherwise busy).
+ *
+ * The process of making a purgeable object non-volatile and determining its
+ * previous state is atomic. Thus, if a purgeable object is made
+ * VM_PURGABLE_NONVOLATILE and the old state is returned as
+ * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
+ * completely intact and will remain so until the object is made volatile
+ * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
+ * was reclaimed while it was in a volatile state and its previous contents
+ * have been lost.
+ */
+/*
+ * The object must be locked.
+ */
+kern_return_t
+vm_object_purgable_control(
+ vm_object_t object,
+ vm_purgable_t control,
+ int *state)
+{
+ int old_state;
+ int new_state;
+
+ if (object == VM_OBJECT_NULL) {
+ /*
+ * Object must already be present or it can't be purgeable.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*
+ * Get current state of the purgeable object.
+ */
+ old_state = object->purgable;
+ if (old_state == VM_PURGABLE_DENY)
+ return KERN_INVALID_ARGUMENT;
+
+ /* purgeable cant have delayed copies - now or in the future */
+ assert(object->copy == VM_OBJECT_NULL);
+ assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
+
+ /*
+ * Execute the desired operation.
+ */
+ if (control == VM_PURGABLE_GET_STATE) {
+ *state = old_state;
+ return KERN_SUCCESS;
+ }
+
+ new_state = *state & VM_PURGABLE_STATE_MASK;
+ switch (new_state) {
+ case VM_PURGABLE_DENY:
+ case VM_PURGABLE_NONVOLATILE:
+ object->purgable = new_state;
+
+ if (old_state != VM_PURGABLE_NONVOLATILE) {
+ vm_page_lock_queues();
+ assert(vm_page_purgeable_count >=
+ object->resident_page_count);
+ vm_page_purgeable_count -= object->resident_page_count;
+
+ if (old_state==VM_PURGABLE_VOLATILE) {
+ assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
+ purgeable_q_t queue = vm_purgeable_object_remove(object);
+ assert(queue);
+
+ vm_purgeable_token_delete_first(queue);
+ assert(queue->debug_count_objects>=0);
+ };
+ vm_page_unlock_queues();
+ }
+ break;
+
+ case VM_PURGABLE_VOLATILE:
+
+ if ((old_state != VM_PURGABLE_NONVOLATILE) && (old_state != VM_PURGABLE_VOLATILE))
+ break;
+ purgeable_q_t queue;
+
+ /* find the correct queue */
+ if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE)
+ queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ else {
+ if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO)
+ queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ else
+ queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+ }
+
+ if (old_state == VM_PURGABLE_NONVOLATILE) {
+ /* try to add token... this can fail */
+ vm_page_lock_queues();
+
+ kern_return_t result = vm_purgeable_token_add(queue);
+ if (result != KERN_SUCCESS) {
+ vm_page_unlock_queues();
+ return result;
+ }
+ vm_page_purgeable_count += object->resident_page_count;
+
+ vm_page_unlock_queues();
+
+ object->purgable = new_state;
+
+ /* object should not be on a queue */
+ assert(object->objq.next == NULL && object->objq.prev == NULL);
+ }
+ else if (old_state == VM_PURGABLE_VOLATILE) {
+ /*
+ * if reassigning priorities / purgeable groups, we don't change the
+ * token queue. So moving priorities will not make pages stay around longer.
+ * Reasoning is that the algorithm gives most priority to the most important
+ * object. If a new token is added, the most important object' priority is boosted.
+ * This biases the system already for purgeable queues that move a lot.
+ * It doesn't seem more biasing is neccessary in this case, where no new object is added.
+ */
+ assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
+
+ purgeable_q_t old_queue=vm_purgeable_object_remove(object);
+ assert(old_queue);
+
+ if (old_queue != queue) {
+ kern_return_t result;
+
+ /* Changing queue. Have to move token. */
+ vm_page_lock_queues();
+ vm_purgeable_token_delete_first(old_queue);
+ result = vm_purgeable_token_add(queue);
+ vm_page_unlock_queues();
+
+ assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */
+ }
+ };
+ vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT );
+
+ assert(queue->debug_count_objects>=0);
+
+ break;
+
+
+ case VM_PURGABLE_EMPTY:
+ if (old_state != new_state)
+ {
+ assert(old_state==VM_PURGABLE_NONVOLATILE || old_state==VM_PURGABLE_VOLATILE);
+ if(old_state==VM_PURGABLE_VOLATILE) {
+ assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
+ purgeable_q_t old_queue=vm_purgeable_object_remove(object);
+ assert(old_queue);
+ vm_page_lock_queues();
+ vm_purgeable_token_delete_first(old_queue);
+ }
+
+ if (old_state==VM_PURGABLE_NONVOLATILE) {
+ vm_page_purgeable_count += object->resident_page_count;
+ vm_page_lock_queues();
+ }
+ (void) vm_object_purge(object);
+ vm_page_unlock_queues();
+ }
+ break;
+
+ }
+ *state = old_state;
+
+ return KERN_SUCCESS;
+}
+
+#if TASK_SWAPPER
+/*
+ * vm_object_res_deallocate
+ *
+ * (recursively) decrement residence counts on vm objects and their shadows.
+ * Called from vm_object_deallocate and when swapping out an object.
+ *
+ * The object is locked, and remains locked throughout the function,
+ * even as we iterate down the shadow chain. Locks on intermediate objects
+ * will be dropped, but not the original object.
+ *
+ * NOTE: this function used to use recursion, rather than iteration.
+ */
+
+__private_extern__ void
+vm_object_res_deallocate(
+ vm_object_t object)
+{
+ vm_object_t orig_object = object;
+ /*
+ * Object is locked so it can be called directly
+ * from vm_object_deallocate. Original object is never
+ * unlocked.
+ */
+ assert(object->res_count > 0);
+ while (--object->res_count == 0) {
+ assert(object->ref_count >= object->res_count);
+ vm_object_deactivate_all_pages(object);
+ /* iterate on shadow, if present */
+ if (object->shadow != VM_OBJECT_NULL) {
+ vm_object_t tmp_object = object->shadow;
+ vm_object_lock(tmp_object);
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
+ assert(object->res_count > 0);
+ } else
+ break;
+ }
+ if (object != orig_object)
+ vm_object_unlock(object);
+}
+
+/*
+ * vm_object_res_reference
+ *
+ * Internal function to increment residence count on a vm object
+ * and its shadows. It is called only from vm_object_reference, and
+ * when swapping in a vm object, via vm_map_swap.
+ *
+ * The object is locked, and remains locked throughout the function,
+ * even as we iterate down the shadow chain. Locks on intermediate objects
+ * will be dropped, but not the original object.
+ *
+ * NOTE: this function used to use recursion, rather than iteration.
+ */
+
+__private_extern__ void
+vm_object_res_reference(
+ vm_object_t object)
+{
+ vm_object_t orig_object = object;
+ /*
+ * Object is locked, so this can be called directly
+ * from vm_object_reference. This lock is never released.
+ */
+ while ((++object->res_count == 1) &&
+ (object->shadow != VM_OBJECT_NULL)) {
+ vm_object_t tmp_object = object->shadow;
+
+ assert(object->ref_count >= object->res_count);
+ vm_object_lock(tmp_object);
+ if (object != orig_object)
+ vm_object_unlock(object);
+ object = tmp_object;
+ }
+ if (object != orig_object)
+ vm_object_unlock(object);
+ assert(orig_object->ref_count >= orig_object->res_count);
+}
+#endif /* TASK_SWAPPER */
+
+/*
+ * vm_object_reference:
+ *
+ * Gets another reference to the given object.
+ */
+#ifdef vm_object_reference
+#undef vm_object_reference
+#endif
+__private_extern__ void
+vm_object_reference(
+ register vm_object_t object)
+{
+ if (object == VM_OBJECT_NULL)
+ return;
+
+ vm_object_lock(object);
+ assert(object->ref_count > 0);
+ vm_object_reference_locked(object);
+ vm_object_unlock(object);
+}
+
+#ifdef MACH_BSD
+/*
+ * Scale the vm_object_cache
+ * This is required to make sure that the vm_object_cache is big
+ * enough to effectively cache the mapped file.
+ * This is really important with UBC as all the regular file vnodes
+ * have memory object associated with them. Havving this cache too
+ * small results in rapid reclaim of vnodes and hurts performance a LOT!
+ *
+ * This is also needed as number of vnodes can be dynamically scaled.
+ */
+kern_return_t
+adjust_vm_object_cache(
+ __unused vm_size_t oval,
+ vm_size_t nval)
+{
+ vm_object_cached_max = nval;
+ vm_object_cache_trim(FALSE);
+ return (KERN_SUCCESS);
+}
+#endif /* MACH_BSD */
+
+
+/*
+ * vm_object_transpose
+ *
+ * This routine takes two VM objects of the same size and exchanges
+ * their backing store.
+ * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
+ * and UPL_BLOCK_ACCESS if they are referenced anywhere.
+ *
+ * The VM objects must not be locked by caller.
+ */
+kern_return_t
+vm_object_transpose(
+ vm_object_t object1,
+ vm_object_t object2,
+ vm_object_size_t transpose_size)
+{
+ vm_object_t tmp_object;
+ kern_return_t retval;
+ boolean_t object1_locked, object2_locked;
+ boolean_t object1_paging, object2_paging;
+ vm_page_t page;
+ vm_object_offset_t page_offset;
+
+ tmp_object = VM_OBJECT_NULL;
+ object1_locked = FALSE; object2_locked = FALSE;
+ object1_paging = FALSE; object2_paging = FALSE;
+
+ if (object1 == object2 ||
+ object1 == VM_OBJECT_NULL ||
+ object2 == VM_OBJECT_NULL) {
+ /*
+ * If the 2 VM objects are the same, there's
+ * no point in exchanging their backing store.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+ vm_object_lock(object1);
+ object1_locked = TRUE;
+ if (!object1->alive || object1->terminating ||
+ object1->copy || object1->shadow || object1->shadowed ||
+ object1->purgable != VM_PURGABLE_DENY) {
+ /*
+ * We don't deal with copy or shadow objects (yet).
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ /*
+ * Since we're about to mess with the object's backing store,
+ * mark it as "paging_in_progress". Note that this is not enough
+ * to prevent any paging activity on this object, so the caller should
+ * have "quiesced" the objects beforehand, via a UPL operation with
+ * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
+ * and UPL_BLOCK_ACCESS (to mark the pages "busy").
+ */
+ vm_object_paging_begin(object1);
+ object1_paging = TRUE;
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
+
+ /*
+ * Same as above for the 2nd object...
+ */
+ vm_object_lock(object2);
+ object2_locked = TRUE;
+ if (! object2->alive || object2->terminating ||
+ object2->copy || object2->shadow || object2->shadowed ||
+ object2->purgable != VM_PURGABLE_DENY) {
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ vm_object_paging_begin(object2);
+ object2_paging = TRUE;
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
+
+ /*
+ * Allocate a temporary VM object to hold object1's contents
+ * while we copy object2 to object1.
+ */
+ tmp_object = vm_object_allocate(transpose_size);
+ vm_object_lock(tmp_object);
+ vm_object_paging_begin(tmp_object);
+ tmp_object->can_persist = FALSE;
+
+ /*
+ * Since we need to lock both objects at the same time,
+ * make sure we always lock them in the same order to
+ * avoid deadlocks.
+ */
+ if (object1 < object2) {
+ vm_object_lock(object1);
+ vm_object_lock(object2);
+ } else {
+ vm_object_lock(object2);
+ vm_object_lock(object1);
+ }
+ object1_locked = TRUE;
+ object2_locked = TRUE;
+
+ if (object1->size != object2->size ||
+ object1->size != transpose_size) {
+ /*
+ * If the 2 objects don't have the same size, we can't
+ * exchange their backing stores or one would overflow.
+ * If their size doesn't match the caller's
+ * "transpose_size", we can't do it either because the
+ * transpose operation will affect the entire span of
+ * the objects.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+
+ /*
+ * Transpose the lists of resident pages.
+ * This also updates the resident_page_count and the memq_hint.
+ */
+ if (object1->phys_contiguous || queue_empty(&object1->memq)) {
+ /*
+ * No pages in object1, just transfer pages
+ * from object2 to object1. No need to go through
+ * an intermediate object.
+ */
+ while (!queue_empty(&object2->memq)) {
+ page = (vm_page_t) queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset, FALSE);
+ }
+ assert(queue_empty(&object2->memq));
+ } else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
+ /*
+ * No pages in object2, just transfer pages
+ * from object1 to object2. No need to go through
+ * an intermediate object.
+ */
+ while (!queue_empty(&object1->memq)) {
+ page = (vm_page_t) queue_first(&object1->memq);
+ vm_page_rename(page, object2, page->offset, FALSE);
+ }
+ assert(queue_empty(&object1->memq));
+ } else {
+ /* transfer object1's pages to tmp_object */
+ vm_page_lock_queues();
+ while (!queue_empty(&object1->memq)) {
+ page = (vm_page_t) queue_first(&object1->memq);
+ page_offset = page->offset;
+ vm_page_remove(page);
+ page->offset = page_offset;
+ queue_enter(&tmp_object->memq, page, vm_page_t, listq);
+ }
+ vm_page_unlock_queues();
+ assert(queue_empty(&object1->memq));
+ /* transfer object2's pages to object1 */
+ while (!queue_empty(&object2->memq)) {
+ page = (vm_page_t) queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset, FALSE);
+ }
+ assert(queue_empty(&object2->memq));
+ /* transfer tmp_object's pages to object1 */
+ while (!queue_empty(&tmp_object->memq)) {
+ page = (vm_page_t) queue_first(&tmp_object->memq);
+ queue_remove(&tmp_object->memq, page,
+ vm_page_t, listq);
+ vm_page_insert(page, object2, page->offset);
+ }
+ assert(queue_empty(&tmp_object->memq));
+ }
+
+#define __TRANSPOSE_FIELD(field) \
+MACRO_BEGIN \
+ tmp_object->field = object1->field; \
+ object1->field = object2->field; \
+ object2->field = tmp_object->field; \
+MACRO_END
+
+ /* "size" should be identical */
+ assert(object1->size == object2->size);
+ /* "Lock" refers to the object not its contents */
+ /* "ref_count" refers to the object not its contents */
+#if TASK_SWAPPER
+ /* "res_count" refers to the object not its contents */
+#endif
+ /* "resident_page_count" was updated above when transposing pages */
+ /* there should be no "copy" */
+ assert(!object1->copy);
+ assert(!object2->copy);
+ /* there should be no "shadow" */
+ assert(!object1->shadow);
+ assert(!object2->shadow);
+ __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
+ __TRANSPOSE_FIELD(pager);
+ __TRANSPOSE_FIELD(paging_offset);
+ __TRANSPOSE_FIELD(pager_control);
+ /* update the memory_objects' pointers back to the VM objects */
+ if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object1->pager_control,
+ object1);
+ }
+ if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object2->pager_control,
+ object2);
+ }
+ __TRANSPOSE_FIELD(copy_strategy);
+ /* "paging_in_progress" refers to the object not its contents */
+ assert(object1->paging_in_progress);
+ assert(object2->paging_in_progress);
+ /* "all_wanted" refers to the object not its contents */
+ __TRANSPOSE_FIELD(pager_created);
+ __TRANSPOSE_FIELD(pager_initialized);
+ __TRANSPOSE_FIELD(pager_ready);
+ __TRANSPOSE_FIELD(pager_trusted);
+ __TRANSPOSE_FIELD(can_persist);
+ __TRANSPOSE_FIELD(internal);
+ __TRANSPOSE_FIELD(temporary);
+ __TRANSPOSE_FIELD(private);
+ __TRANSPOSE_FIELD(pageout);
+ /* "alive" should be set */
+ assert(object1->alive);
+ assert(object2->alive);
+ /* "purgeable" should be non-purgeable */
+ assert(object1->purgable == VM_PURGABLE_DENY);
+ assert(object2->purgable == VM_PURGABLE_DENY);
+ /* "shadowed" refers to the the object not its contents */
+ __TRANSPOSE_FIELD(silent_overwrite);
+ __TRANSPOSE_FIELD(advisory_pageout);
+ __TRANSPOSE_FIELD(true_share);
+ /* "terminating" should not be set */
+ assert(!object1->terminating);
+ assert(!object2->terminating);
+ __TRANSPOSE_FIELD(named);
+ /* "shadow_severed" refers to the object not its contents */
+ __TRANSPOSE_FIELD(phys_contiguous);
+ __TRANSPOSE_FIELD(nophyscache);
+ /* "cached_list" should be NULL */
+ assert(object1->cached_list.prev == NULL);
+ assert(object1->cached_list.next == NULL);
+ assert(object2->cached_list.prev == NULL);
+ assert(object2->cached_list.next == NULL);
+ /* "msr_q" is linked to the object not its contents */
+ assert(queue_empty(&object1->msr_q));
+ assert(queue_empty(&object2->msr_q));
+ __TRANSPOSE_FIELD(last_alloc);
+ __TRANSPOSE_FIELD(sequential);
+ __TRANSPOSE_FIELD(pages_created);
+ __TRANSPOSE_FIELD(pages_used);
+#if MACH_PAGEMAP
+ __TRANSPOSE_FIELD(existence_map);
+#endif
+ __TRANSPOSE_FIELD(cow_hint);
+#if MACH_ASSERT
+ __TRANSPOSE_FIELD(paging_object);
+#endif
+ __TRANSPOSE_FIELD(wimg_bits);
+ __TRANSPOSE_FIELD(code_signed);
+ __TRANSPOSE_FIELD(not_in_use);
+#ifdef UPL_DEBUG
+ /* "uplq" refers to the object not its contents (see upl_transpose()) */
+#endif
+
+#undef __TRANSPOSE_FIELD
+
+ retval = KERN_SUCCESS;
+
+done:
+ /*
+ * Cleanup.
+ */
+ if (tmp_object != VM_OBJECT_NULL) {
+ vm_object_paging_end(tmp_object);
+ vm_object_unlock(tmp_object);
+ /*
+ * Re-initialize the temporary object to avoid
+ * deallocating a real pager.
+ */
+ _vm_object_allocate(transpose_size, tmp_object);
+ vm_object_deallocate(tmp_object);
+ tmp_object = VM_OBJECT_NULL;
+ }
+
+ if (object1_locked) {
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
+ }
+ if (object2_locked) {
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
+ }
+ if (object1_paging) {
+ vm_object_lock(object1);
+ vm_object_paging_end(object1);
+ vm_object_unlock(object1);
+ object1_paging = FALSE;
+ }
+ if (object2_paging) {
+ vm_object_lock(object2);
+ vm_object_paging_end(object2);
+ vm_object_unlock(object2);
+ object2_paging = FALSE;
+ }
+
+ return retval;
+}
+
+
+/*
+ * vm_object_build_cluster
+ *
+ * Determine how big a cluster we should issue an I/O for...
+ *
+ * Inputs: *start == offset of page needed
+ * *length == maximum cluster pager can handle
+ * Outputs: *start == beginning offset of cluster
+ * *length == length of cluster to try
+ *
+ * The original *start will be encompassed by the cluster
+ *
+ */
+extern int speculative_reads_disabled;
+
+uint32_t pre_heat_scaling[MAX_UPL_TRANSFER];
+uint32_t pre_heat_cluster[MAX_UPL_TRANSFER];
+
+#define PRE_HEAT_MULTIPLIER 4
+
+__private_extern__ void
+vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
+ vm_size_t *length, vm_object_fault_info_t fault_info)
+{
+ vm_size_t pre_heat_size;
+ vm_size_t tail_size;
+ vm_size_t head_size;
+ vm_size_t max_length;
+ vm_size_t cluster_size;
+ vm_object_offset_t object_size;
+ vm_object_offset_t orig_start;
+ vm_object_offset_t target_start;
+ vm_object_offset_t offset;
+ vm_behavior_t behavior;
+ boolean_t look_behind = TRUE;
+ boolean_t look_ahead = TRUE;
+ int sequential_run;
+ int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+
+ assert( !(*length & PAGE_MASK));
+ assert( !(*start & PAGE_MASK_64));
+
+ if ( (max_length = *length) > (MAX_UPL_TRANSFER * PAGE_SIZE) )
+ max_length = (MAX_UPL_TRANSFER * PAGE_SIZE);
+ /*
+ * we'll always return a cluster size of at least
+ * 1 page, since the original fault must always
+ * be processed
+ */
+ *length = PAGE_SIZE;
+
+ if (speculative_reads_disabled || fault_info == NULL || max_length == 0) {
+ /*
+ * no cluster... just fault the page in
+ */
+ return;
+ }
+ orig_start = *start;
+ target_start = orig_start;
+ cluster_size = round_page_32(fault_info->cluster_size);
+ behavior = fault_info->behavior;
+
+ vm_object_lock(object);
+
+ if (object->internal)
+ object_size = object->size;
+ else if (object->pager != MEMORY_OBJECT_NULL)
+ vnode_pager_get_object_size(object->pager, &object_size);
+ else
+ goto out; /* pager is gone for this object, nothing more to do */
+
+ object_size = round_page_64(object_size);
+
+ if (orig_start >= object_size) {
+ /*
+ * fault occurred beyond the EOF...
+ * we need to punt w/o changing the
+ * starting offset
+ */
+ goto out;
+ }
+ if (object->pages_used > object->pages_created) {
+ /*
+ * must have wrapped our 32 bit counters
+ * so reset
+ */
+ object->pages_used = object->pages_created = 0;
+ }
+ if ((sequential_run = object->sequential)) {
+ if (sequential_run < 0) {
+ sequential_behavior = VM_BEHAVIOR_RSEQNTL;
+ sequential_run = 0 - sequential_run;
+ } else {
+ sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
+ }
+ }
+ switch(behavior) {
+
+ default:
+ behavior = VM_BEHAVIOR_DEFAULT;
+
+ case VM_BEHAVIOR_DEFAULT:
+ if (object->internal && fault_info->user_tag == VM_MEMORY_STACK)
+ goto out;
+
+ if (sequential_run >= (3 * PAGE_SIZE)) {
+ pre_heat_size = sequential_run + PAGE_SIZE;
+
+ if ((behavior = sequential_behavior) == VM_BEHAVIOR_SEQUENTIAL)
+ look_behind = FALSE;
+ else
+ look_ahead = FALSE;
+ } else {
+ uint32_t pages_unused;
+
+ if (object->pages_created < 32 * PRE_HEAT_MULTIPLIER) {
+ /*
+ * prime the pump
+ */
+ pre_heat_size = PAGE_SIZE * 8 * PRE_HEAT_MULTIPLIER;
+ break;
+ }
+ pages_unused = object->pages_created - object->pages_used;
+
+ if (pages_unused < (object->pages_created / 8)) {
+ pre_heat_size = PAGE_SIZE * 32 * PRE_HEAT_MULTIPLIER;
+ } else if (pages_unused < (object->pages_created / 4)) {
+ pre_heat_size = PAGE_SIZE * 16 * PRE_HEAT_MULTIPLIER;
+ } else if (pages_unused < (object->pages_created / 2)) {
+ pre_heat_size = PAGE_SIZE * 8 * PRE_HEAT_MULTIPLIER;
+ } else {
+ pre_heat_size = PAGE_SIZE * 4 * PRE_HEAT_MULTIPLIER;
+ }
+ }
+ break;
+
+ case VM_BEHAVIOR_RANDOM:
+ if ((pre_heat_size = cluster_size) <= PAGE_SIZE)
+ goto out;
+ break;
+
+ case VM_BEHAVIOR_SEQUENTIAL:
+ if ((pre_heat_size = cluster_size) == 0)
+ pre_heat_size = sequential_run + PAGE_SIZE;
+ look_behind = FALSE;
+
+ break;
+
+ case VM_BEHAVIOR_RSEQNTL:
+ if ((pre_heat_size = cluster_size) == 0)
+ pre_heat_size = sequential_run + PAGE_SIZE;
+ look_ahead = FALSE;
+
+ break;
+
+ }
+ if (pre_heat_size > max_length)
+ pre_heat_size = max_length;
+
+ if (behavior == VM_BEHAVIOR_DEFAULT && vm_page_free_count < vm_page_free_target)
+ pre_heat_size /= 2;
+
+ if (look_ahead == TRUE) {
+ if (look_behind == TRUE)
+ target_start &= ~(pre_heat_size - 1);
+
+ if ((target_start + pre_heat_size) > object_size)
+ pre_heat_size = (vm_size_t)(trunc_page_64(object_size - target_start));
+
+ tail_size = pre_heat_size - (orig_start - target_start) - PAGE_SIZE;
+ } else {
+ if (pre_heat_size > target_start)
+ pre_heat_size = target_start;
+ tail_size = 0;
+ }
+ pre_heat_scaling[pre_heat_size / PAGE_SIZE]++;
+
+ if (pre_heat_size <= PAGE_SIZE)
+ goto out;
+
+ if (look_behind == TRUE) {
+ /*
+ * take a look at the pages before the original
+ * faulting offset
+ */
+ head_size = pre_heat_size - tail_size - PAGE_SIZE;
+
+ for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
+ /*
+ * don't poke below the lowest offset
+ */
+ if (offset < fault_info->lo_offset)
+ break;
+ /*
+ * for external objects and internal objects w/o an existence map
+ * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
+ */
+#if MACH_PAGEMAP
+ if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
+ /*
+ * we know for a fact that the pager can't provide the page
+ * so don't include it or any pages beyond it in this cluster
+ */
+ break;
+ }
+#endif
+ if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
+ /*
+ * don't bridge resident pages
+ */
+ break;
+ }
+ *start = offset;
+ *length += PAGE_SIZE;
+ }
+ }
+ if (look_ahead == TRUE) {
+ for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
+ /*
+ * don't poke above the highest offset
+ */
+ if (offset >= fault_info->hi_offset)
+ break;
+ /*
+ * for external objects and internal objects w/o an existence map
+ * vm_externl_state_get will return VM_EXTERNAL_STATE_UNKNOWN
+ */
+#if MACH_PAGEMAP
+ if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_ABSENT) {
+ /*
+ * we know for a fact that the pager can't provide the page
+ * so don't include it or any pages beyond it in this cluster
+ */
+ break;
+ }
+#endif
+ if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
+ /*
+ * don't bridge resident pages
+ */
+ break;
+ }
+ *length += PAGE_SIZE;
+ }
+ }
+out:
+ pre_heat_cluster[*length / PAGE_SIZE]++;
+
+ vm_object_unlock(object);
+}
+
+
+/*
+ * Allow manipulation of individual page state. This is actually part of
+ * the UPL regimen but takes place on the VM object rather than on a UPL
+ */
+
+kern_return_t
+vm_object_page_op(
+ vm_object_t object,
+ vm_object_offset_t offset,
+ int ops,
+ ppnum_t *phys_entry,
+ int *flags)
+{
+ vm_page_t dst_page;
+
+ vm_object_lock(object);
+
+ if(ops & UPL_POP_PHYSICAL) {
+ if(object->phys_contiguous) {
+ if (phys_entry) {
+ *phys_entry = (ppnum_t)
+ (object->shadow_offset >> 12);
+ }
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+ } else {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+ }
+ if(object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ while(TRUE) {
+ if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
+ vm_object_unlock(object);
+ return KERN_FAILURE;
+ }
+
+ /* Sync up on getting the busy bit */
+ if((dst_page->busy || dst_page->cleaning) &&
+ (((ops & UPL_POP_SET) &&
+ (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
+ /* someone else is playing with the page, we will */
+ /* have to wait */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
+
+ if (ops & UPL_POP_DUMP) {
+ if (dst_page->pmapped == TRUE)
+ pmap_disconnect(dst_page->phys_page);
+
+ vm_page_lock_queues();
+ vm_page_free(dst_page);
+ vm_page_unlock_queues();
+
+ break;
+ }
+
+ if (flags) {
+ *flags = 0;
+
+ /* Get the condition of flags before requested ops */
+ /* are undertaken */
+
+ if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
+ if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
+ if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
+ if(dst_page->absent) *flags |= UPL_POP_ABSENT;
+ if(dst_page->busy) *flags |= UPL_POP_BUSY;
+ }
+
+ /* The caller should have made a call either contingent with */
+ /* or prior to this call to set UPL_POP_BUSY */
+ if(ops & UPL_POP_SET) {
+ /* The protection granted with this assert will */
+ /* not be complete. If the caller violates the */
+ /* convention and attempts to change page state */
+ /* without first setting busy we may not see it */
+ /* because the page may already be busy. However */
+ /* if such violations occur we will assert sooner */
+ /* or later. */
+ assert(dst_page->busy || (ops & UPL_POP_BUSY));
+ if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
+ if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
+ if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
+ if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
+ if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
+ }
+
+ if(ops & UPL_POP_CLR) {
+ assert(dst_page->busy);
+ if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
+ if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
+ if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
+ if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
+ if (ops & UPL_POP_BUSY) {
+ dst_page->busy = FALSE;
+ PAGE_WAKEUP(dst_page);
+ }
+ }
+
+ if (dst_page->encrypted) {
+ /*
+ * ENCRYPTED SWAP:
+ * We need to decrypt this encrypted page before the
+ * caller can access its contents.
+ * But if the caller really wants to access the page's
+ * contents, they have to keep the page "busy".
+ * Otherwise, the page could get recycled or re-encrypted
+ * at any time.
+ */
+ if ((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY) &&
+ dst_page->busy) {
+ /*
+ * The page is stable enough to be accessed by
+ * the caller, so make sure its contents are
+ * not encrypted.
+ */
+ vm_page_decrypt(dst_page, 0);
+ } else {
+ /*
+ * The page is not busy, so don't bother
+ * decrypting it, since anything could
+ * happen to it between now and when the
+ * caller wants to access it.
+ * We should not give the caller access
+ * to this page.
+ */
+ assert(!phys_entry);
+ }
+ }
+
+ if (phys_entry) {
+ /*
+ * The physical page number will remain valid
+ * only if the page is kept busy.
+ * ENCRYPTED SWAP: make sure we don't let the
+ * caller access an encrypted page.
+ */
+ assert(dst_page->busy);
+ assert(!dst_page->encrypted);
+ *phys_entry = dst_page->phys_page;
+ }
+
+ break;
+ }
+
+ vm_object_unlock(object);
+ return KERN_SUCCESS;
+
+}
+
+/*
+ * vm_object_range_op offers performance enhancement over
+ * vm_object_page_op for page_op functions which do not require page
+ * level state to be returned from the call. Page_op was created to provide
+ * a low-cost alternative to page manipulation via UPLs when only a single
+ * page was involved. The range_op call establishes the ability in the _op
+ * family of functions to work on multiple pages where the lack of page level
+ * state handling allows the caller to avoid the overhead of the upl structures.
+ */
+
+kern_return_t
+vm_object_range_op(
+ vm_object_t object,
+ vm_object_offset_t offset_beg,
+ vm_object_offset_t offset_end,
+ int ops,
+ int *range)
+{
+ vm_object_offset_t offset;
+ vm_page_t dst_page;
+
+ if (object->resident_page_count == 0) {
+ if (range) {
+ if (ops & UPL_ROP_PRESENT)
+ *range = 0;
+ else
+ *range = offset_end - offset_beg;
+ }
+ return KERN_SUCCESS;
+ }
+ vm_object_lock(object);
+
+ if (object->phys_contiguous) {
+ vm_object_unlock(object);
+ return KERN_INVALID_OBJECT;
+ }
+
+ offset = offset_beg & ~PAGE_MASK_64;
+
+ while (offset < offset_end) {
+ dst_page = vm_page_lookup(object, offset);
+ if (dst_page != VM_PAGE_NULL) {
+ if (ops & UPL_ROP_DUMP) {
+ if (dst_page->busy || dst_page->cleaning) {
+ /*
+ * someone else is playing with the
+ * page, we will have to wait
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ /*
+ * need to relook the page up since it's
+ * state may have changed while we slept
+ * it might even belong to a different object
+ * at this point
+ */
+ continue;
+ }
+ if (dst_page->pmapped == TRUE)
+ pmap_disconnect(dst_page->phys_page);
+
+ vm_page_lock_queues();
+ vm_page_free(dst_page);
+ vm_page_unlock_queues();
+
+ } else if (ops & UPL_ROP_ABSENT)
+ break;
+ } else if (ops & UPL_ROP_PRESENT)
+ break;
+
+ offset += PAGE_SIZE;
+ }
+ vm_object_unlock(object);
+
+ if (range) {
+ if (offset > offset_end)
+ offset = offset_end;
+ *range = offset - offset_beg;
+ }
+ return KERN_SUCCESS;
+}
+
+
+uint32_t scan_object_collision = 0;
+
+void
+vm_object_lock(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ mutex_pause(2);
+ }
+ lck_rw_lock_exclusive(&object->Lock);
+}
+
+boolean_t
+vm_object_lock_try(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ mutex_pause(2);
+ }
+ return (lck_rw_try_lock_exclusive(&object->Lock));
+}
+
+void
+vm_object_lock_shared(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ mutex_pause(2);
+ }
+ lck_rw_lock_shared(&object->Lock);
+}
+
+boolean_t
+vm_object_lock_try_shared(vm_object_t object)
+{
+ if (object == vm_pageout_scan_wants_object) {
+ scan_object_collision++;
+ mutex_pause(2);
+ }
+ return (lck_rw_try_lock_shared(&object->Lock));
+}