+
+/*
+ * vm_object_transpose
+ *
+ * This routine takes two VM objects of the same size and exchanges
+ * their backing store.
+ * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
+ * and UPL_BLOCK_ACCESS if they are referenced anywhere.
+ *
+ * The VM objects must not be locked by caller.
+ */
+kern_return_t
+vm_object_transpose(
+ vm_object_t object1,
+ vm_object_t object2,
+ vm_object_size_t transpose_size)
+{
+ vm_object_t tmp_object;
+ kern_return_t retval;
+ boolean_t object1_locked, object2_locked;
+ boolean_t object1_paging, object2_paging;
+ vm_page_t page;
+ vm_object_offset_t page_offset;
+
+ tmp_object = VM_OBJECT_NULL;
+ object1_locked = FALSE; object2_locked = FALSE;
+ object1_paging = FALSE; object2_paging = FALSE;
+
+ if (object1 == object2 ||
+ object1 == VM_OBJECT_NULL ||
+ object2 == VM_OBJECT_NULL) {
+ /*
+ * If the 2 VM objects are the same, there's
+ * no point in exchanging their backing store.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+ vm_object_lock(object1);
+ object1_locked = TRUE;
+ if (object1->copy || object1->shadow || object1->shadowed ||
+ object1->purgable != VM_OBJECT_NONPURGABLE) {
+ /*
+ * We don't deal with copy or shadow objects (yet).
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ /*
+ * Since we're about to mess with the object's backing store,
+ * mark it as "paging_in_progress". Note that this is not enough
+ * to prevent any paging activity on this object, so the caller should
+ * have "quiesced" the objects beforehand, via a UPL operation with
+ * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
+ * and UPL_BLOCK_ACCESS (to mark the pages "busy").
+ */
+ vm_object_paging_begin(object1);
+ object1_paging = TRUE;
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
+
+ /*
+ * Same as above for the 2nd object...
+ */
+ vm_object_lock(object2);
+ object2_locked = TRUE;
+ if (object2->copy || object2->shadow || object2->shadowed ||
+ object2->purgable != VM_OBJECT_NONPURGABLE) {
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+ vm_object_paging_begin(object2);
+ object2_paging = TRUE;
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
+
+ /*
+ * Allocate a temporary VM object to hold object1's contents
+ * while we copy object2 to object1.
+ */
+ tmp_object = vm_object_allocate(transpose_size);
+ vm_object_lock(tmp_object);
+ vm_object_paging_begin(tmp_object);
+ tmp_object->can_persist = FALSE;
+
+ /*
+ * Since we need to lock both objects at the same time,
+ * make sure we always lock them in the same order to
+ * avoid deadlocks.
+ */
+ if (object1 < object2) {
+ vm_object_lock(object1);
+ vm_object_lock(object2);
+ } else {
+ vm_object_lock(object2);
+ vm_object_lock(object1);
+ }
+ object1_locked = TRUE;
+ object2_locked = TRUE;
+
+ if (object1->size != object2->size ||
+ object1->size != transpose_size) {
+ /*
+ * If the 2 objects don't have the same size, we can't
+ * exchange their backing stores or one would overflow.
+ * If their size doesn't match the caller's
+ * "transpose_size", we can't do it either because the
+ * transpose operation will affect the entire span of
+ * the objects.
+ */
+ retval = KERN_INVALID_VALUE;
+ goto done;
+ }
+
+
+ /*
+ * Transpose the lists of resident pages.
+ */
+ if (object1->phys_contiguous || queue_empty(&object1->memq)) {
+ /*
+ * No pages in object1, just transfer pages
+ * from object2 to object1. No need to go through
+ * an intermediate object.
+ */
+ while (!queue_empty(&object2->memq)) {
+ page = (vm_page_t) queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset);
+ }
+ assert(queue_empty(&object2->memq));
+ } else if (object2->phys_contiguous || queue_empty(&object2->memq)) {
+ /*
+ * No pages in object2, just transfer pages
+ * from object1 to object2. No need to go through
+ * an intermediate object.
+ */
+ while (!queue_empty(&object1->memq)) {
+ page = (vm_page_t) queue_first(&object1->memq);
+ vm_page_rename(page, object2, page->offset);
+ }
+ assert(queue_empty(&object1->memq));
+ } else {
+ /* transfer object1's pages to tmp_object */
+ vm_page_lock_queues();
+ while (!queue_empty(&object1->memq)) {
+ page = (vm_page_t) queue_first(&object1->memq);
+ page_offset = page->offset;
+ vm_page_remove(page);
+ page->offset = page_offset;
+ queue_enter(&tmp_object->memq, page, vm_page_t, listq);
+ }
+ vm_page_unlock_queues();
+ assert(queue_empty(&object1->memq));
+ /* transfer object2's pages to object1 */
+ while (!queue_empty(&object2->memq)) {
+ page = (vm_page_t) queue_first(&object2->memq);
+ vm_page_rename(page, object1, page->offset);
+ }
+ assert(queue_empty(&object2->memq));
+ /* transfer tmp_object's pages to object1 */
+ while (!queue_empty(&tmp_object->memq)) {
+ page = (vm_page_t) queue_first(&tmp_object->memq);
+ queue_remove(&tmp_object->memq, page,
+ vm_page_t, listq);
+ vm_page_insert(page, object2, page->offset);
+ }
+ assert(queue_empty(&tmp_object->memq));
+ }
+
+ /* no need to transpose the size: they should be identical */
+ assert(object1->size == object2->size);
+
+#define __TRANSPOSE_FIELD(field) \
+MACRO_BEGIN \
+ tmp_object->field = object1->field; \
+ object1->field = object2->field; \
+ object2->field = tmp_object->field; \
+MACRO_END
+
+ assert(!object1->copy);
+ assert(!object2->copy);
+
+ assert(!object1->shadow);
+ assert(!object2->shadow);
+
+ __TRANSPOSE_FIELD(shadow_offset); /* used by phys_contiguous objects */
+ __TRANSPOSE_FIELD(pager);
+ __TRANSPOSE_FIELD(paging_offset);
+
+ __TRANSPOSE_FIELD(pager_control);
+ /* update the memory_objects' pointers back to the VM objects */
+ if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object1->pager_control,
+ object1);
+ }
+ if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
+ memory_object_control_collapse(object2->pager_control,
+ object2);
+ }
+
+ __TRANSPOSE_FIELD(absent_count);
+
+ assert(object1->paging_in_progress);
+ assert(object2->paging_in_progress);
+
+ __TRANSPOSE_FIELD(pager_created);
+ __TRANSPOSE_FIELD(pager_initialized);
+ __TRANSPOSE_FIELD(pager_ready);
+ __TRANSPOSE_FIELD(pager_trusted);
+ __TRANSPOSE_FIELD(internal);
+ __TRANSPOSE_FIELD(temporary);
+ __TRANSPOSE_FIELD(private);
+ __TRANSPOSE_FIELD(pageout);
+ __TRANSPOSE_FIELD(true_share);
+ __TRANSPOSE_FIELD(phys_contiguous);
+ __TRANSPOSE_FIELD(nophyscache);
+ __TRANSPOSE_FIELD(last_alloc);
+ __TRANSPOSE_FIELD(sequential);
+ __TRANSPOSE_FIELD(cluster_size);
+ __TRANSPOSE_FIELD(existence_map);
+ __TRANSPOSE_FIELD(cow_hint);
+ __TRANSPOSE_FIELD(wimg_bits);
+
+#undef __TRANSPOSE_FIELD
+
+ retval = KERN_SUCCESS;
+
+done:
+ /*
+ * Cleanup.
+ */
+ if (tmp_object != VM_OBJECT_NULL) {
+ vm_object_paging_end(tmp_object);
+ vm_object_unlock(tmp_object);
+ /*
+ * Re-initialize the temporary object to avoid
+ * deallocating a real pager.
+ */
+ _vm_object_allocate(transpose_size, tmp_object);
+ vm_object_deallocate(tmp_object);
+ tmp_object = VM_OBJECT_NULL;
+ }
+
+ if (object1_locked) {
+ vm_object_unlock(object1);
+ object1_locked = FALSE;
+ }
+ if (object2_locked) {
+ vm_object_unlock(object2);
+ object2_locked = FALSE;
+ }
+ if (object1_paging) {
+ vm_object_lock(object1);
+ vm_object_paging_end(object1);
+ vm_object_unlock(object1);
+ object1_paging = FALSE;
+ }
+ if (object2_paging) {
+ vm_object_lock(object2);
+ vm_object_paging_end(object2);
+ vm_object_unlock(object2);
+ object2_paging = FALSE;
+ }
+
+ return retval;
+}