+ copy_size = sub_end - sub_start;
+
+ /* adjust the copy object */
+ if (total_size > copy_size) {
+ vm_map_size_t local_size = 0;
+ vm_map_size_t entry_size;
+
+ nentries = 1;
+ new_offset = copy->offset;
+ copy_entry = vm_map_copy_first_entry(copy);
+ while(copy_entry !=
+ vm_map_copy_to_entry(copy)){
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ if((local_size < copy_size) &&
+ ((local_size + entry_size)
+ >= copy_size)) {
+ vm_map_copy_clip_end(copy,
+ copy_entry,
+ copy_entry->vme_start +
+ (copy_size - local_size));
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ local_size += entry_size;
+ new_offset += entry_size;
+ }
+ if(local_size >= copy_size) {
+ next_copy = copy_entry->vme_next;
+ copy_entry->vme_next =
+ vm_map_copy_to_entry(copy);
+ previous_prev =
+ copy->cpy_hdr.links.prev;
+ copy->cpy_hdr.links.prev = copy_entry;
+ copy->size = copy_size;
+ remaining_entries =
+ copy->cpy_hdr.nentries;
+ remaining_entries -= nentries;
+ copy->cpy_hdr.nentries = nentries;
+ break;
+ } else {
+ local_size += entry_size;
+ new_offset += entry_size;
+ nentries++;
+ }
+ copy_entry = copy_entry->vme_next;
+ }
+ }
+
+ if((entry->use_pmap) && (pmap == NULL)) {
+ kr = vm_map_copy_overwrite_nested(
+ VME_SUBMAP(entry),
+ sub_start,
+ copy,
+ interruptible,
+ VME_SUBMAP(entry)->pmap,
+ TRUE);
+ } else if (pmap != NULL) {
+ kr = vm_map_copy_overwrite_nested(
+ VME_SUBMAP(entry),
+ sub_start,
+ copy,
+ interruptible, pmap,
+ TRUE);
+ } else {
+ kr = vm_map_copy_overwrite_nested(
+ VME_SUBMAP(entry),
+ sub_start,
+ copy,
+ interruptible,
+ dst_map->pmap,
+ TRUE);
+ }
+ if(kr != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev
+ = previous_prev;
+ copy->size = total_size;
+ }
+ return kr;
+ }
+ if (dst_end <= local_end) {
+ return(KERN_SUCCESS);
+ }
+ /* otherwise copy no longer exists, it was */
+ /* destroyed after successful copy_overwrite */
+ copy = (vm_map_copy_t)
+ zalloc(vm_map_copy_zone);
+ copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) =
+ vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->offset = new_offset;
+
+ /*
+ * XXX FBDP
+ * this does not seem to deal with
+ * the VM map store (R&B tree)
+ */
+
+ total_size -= copy_size;
+ copy_size = 0;
+ /* put back remainder of copy in container */
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries = remaining_entries;
+ copy->cpy_hdr.links.next = next_copy;
+ copy->cpy_hdr.links.prev = previous_prev;
+ copy->size = total_size;
+ next_copy->vme_prev =
+ vm_map_copy_to_entry(copy);
+ next_copy = NULL;
+ }
+ base_addr = local_end;
+ vm_map_lock(dst_map);
+ if(!vm_map_lookup_entry(dst_map,
+ local_end, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ entry = tmp_entry;
+ continue;
+ }
+ if (dst_end <= entry->vme_end) {
+ copy_size = dst_end - base_addr;
+ break;
+ }
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+ entry = next;
+ }/* for */
+
+ next_copy = NULL;
+ nentries = 1;
+
+ /* adjust the copy object */
+ if (total_size > copy_size) {
+ vm_map_size_t local_size = 0;
+ vm_map_size_t entry_size;
+
+ new_offset = copy->offset;
+ copy_entry = vm_map_copy_first_entry(copy);
+ while(copy_entry != vm_map_copy_to_entry(copy)) {
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ if((local_size < copy_size) &&
+ ((local_size + entry_size)
+ >= copy_size)) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start +
+ (copy_size - local_size));
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ local_size += entry_size;
+ new_offset += entry_size;
+ }
+ if(local_size >= copy_size) {
+ next_copy = copy_entry->vme_next;
+ copy_entry->vme_next =
+ vm_map_copy_to_entry(copy);
+ previous_prev =
+ copy->cpy_hdr.links.prev;
+ copy->cpy_hdr.links.prev = copy_entry;
+ copy->size = copy_size;
+ remaining_entries =
+ copy->cpy_hdr.nentries;
+ remaining_entries -= nentries;
+ copy->cpy_hdr.nentries = nentries;
+ break;
+ } else {
+ local_size += entry_size;
+ new_offset += entry_size;
+ nentries++;
+ }
+ copy_entry = copy_entry->vme_next;
+ }
+ }
+
+ if (aligned) {
+ pmap_t local_pmap;
+
+ if(pmap)
+ local_pmap = pmap;
+ else
+ local_pmap = dst_map->pmap;
+
+ if ((kr = vm_map_copy_overwrite_aligned(
+ dst_map, tmp_entry, copy,
+ base_addr, local_pmap)) != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev =
+ previous_prev;
+ copy->size += copy_size;
+ }
+ return kr;
+ }
+ vm_map_unlock(dst_map);
+ } else {
+ /*
+ * Performance gain:
+ *
+ * if the copy and dst address are misaligned but the same
+ * offset within the page we can copy_not_aligned the
+ * misaligned parts and copy aligned the rest. If they are
+ * aligned but len is unaligned we simply need to copy
+ * the end bit unaligned. We'll need to split the misaligned
+ * bits of the region in this case !
+ */
+ /* ALWAYS UNLOCKS THE dst_map MAP */
+ kr = vm_map_copy_overwrite_unaligned(
+ dst_map,
+ tmp_entry,
+ copy,
+ base_addr,
+ discard_on_success);
+ if (kr != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev =
+ previous_prev;
+ copy->size += copy_size;
+ }
+ return kr;
+ }
+ }
+ total_size -= copy_size;
+ if(total_size == 0)
+ break;
+ base_addr += copy_size;
+ copy_size = 0;
+ copy->offset = new_offset;
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries = remaining_entries;
+ copy->cpy_hdr.links.next = next_copy;
+ copy->cpy_hdr.links.prev = previous_prev;
+ next_copy->vme_prev = vm_map_copy_to_entry(copy);
+ copy->size = total_size;
+ }
+ vm_map_lock(dst_map);
+ while(TRUE) {
+ if (!vm_map_lookup_entry(dst_map,
+ base_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ if (tmp_entry->in_transition) {
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+ } else {
+ break;
+ }
+ }
+ vm_map_clip_start(dst_map,
+ tmp_entry,
+ vm_map_trunc_page(base_addr,
+ VM_MAP_PAGE_MASK(dst_map)));
+
+ entry = tmp_entry;
+ } /* while */
+
+ /*
+ * Throw away the vm_map_copy object
+ */
+ if (discard_on_success)
+ vm_map_copy_discard(copy);
+
+ return(KERN_SUCCESS);
+}/* vm_map_copy_overwrite */
+
+kern_return_t
+vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_map_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
+{
+ vm_map_size_t head_size, tail_size;
+ vm_map_copy_t head_copy, tail_copy;
+ vm_map_offset_t head_addr, tail_addr;
+ vm_map_entry_t entry;
+ kern_return_t kr;
+
+ head_size = 0;
+ tail_size = 0;
+ head_copy = NULL;
+ tail_copy = NULL;
+ head_addr = 0;
+ tail_addr = 0;
+
+ if (interruptible ||
+ copy == VM_MAP_COPY_NULL ||
+ copy->type != VM_MAP_COPY_ENTRY_LIST) {
+ /*
+ * We can't split the "copy" map if we're interruptible
+ * or if we don't have a "copy" map...
+ */
+ blunt_copy:
+ return vm_map_copy_overwrite_nested(dst_map,
+ dst_addr,
+ copy,
+ interruptible,
+ (pmap_t) NULL,
+ TRUE);
+ }
+
+ if (copy->size < 3 * PAGE_SIZE) {
+ /*
+ * Too small to bother with optimizing...
+ */
+ goto blunt_copy;
+ }
+
+ if ((dst_addr & VM_MAP_PAGE_MASK(dst_map)) !=
+ (copy->offset & VM_MAP_PAGE_MASK(dst_map))) {
+ /*
+ * Incompatible mis-alignment of source and destination...
+ */
+ goto blunt_copy;
+ }
+
+ /*
+ * Proper alignment or identical mis-alignment at the beginning.
+ * Let's try and do a small unaligned copy first (if needed)
+ * and then an aligned copy for the rest.
+ */
+ if (!page_aligned(dst_addr)) {
+ head_addr = dst_addr;
+ head_size = (VM_MAP_PAGE_SIZE(dst_map) -
+ (copy->offset & VM_MAP_PAGE_MASK(dst_map)));
+ }
+ if (!page_aligned(copy->offset + copy->size)) {
+ /*
+ * Mis-alignment at the end.
+ * Do an aligned copy up to the last page and
+ * then an unaligned copy for the remaining bytes.
+ */
+ tail_size = ((copy->offset + copy->size) &
+ VM_MAP_PAGE_MASK(dst_map));
+ tail_addr = dst_addr + copy->size - tail_size;
+ }
+
+ if (head_size + tail_size == copy->size) {
+ /*
+ * It's all unaligned, no optimization possible...
+ */
+ goto blunt_copy;
+ }
+
+ /*
+ * Can't optimize if there are any submaps in the
+ * destination due to the way we free the "copy" map
+ * progressively in vm_map_copy_overwrite_nested()
+ * in that case.
+ */
+ vm_map_lock_read(dst_map);
+ if (! vm_map_lookup_entry(dst_map, dst_addr, &entry)) {
+ vm_map_unlock_read(dst_map);
+ goto blunt_copy;
+ }
+ for (;
+ (entry != vm_map_copy_to_entry(copy) &&
+ entry->vme_start < dst_addr + copy->size);
+ entry = entry->vme_next) {
+ if (entry->is_sub_map) {
+ vm_map_unlock_read(dst_map);
+ goto blunt_copy;
+ }
+ }
+ vm_map_unlock_read(dst_map);
+
+ if (head_size) {
+ /*
+ * Unaligned copy of the first "head_size" bytes, to reach
+ * a page boundary.
+ */
+
+ /*
+ * Extract "head_copy" out of "copy".
+ */
+ head_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ head_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
+ vm_map_copy_first_entry(head_copy) =
+ vm_map_copy_to_entry(head_copy);
+ vm_map_copy_last_entry(head_copy) =
+ vm_map_copy_to_entry(head_copy);
+ head_copy->type = VM_MAP_COPY_ENTRY_LIST;
+ head_copy->cpy_hdr.nentries = 0;
+ head_copy->cpy_hdr.entries_pageable =
+ copy->cpy_hdr.entries_pageable;
+ vm_map_store_init(&head_copy->cpy_hdr);
+
+ head_copy->offset = copy->offset;
+ head_copy->size = head_size;
+
+ copy->offset += head_size;
+ copy->size -= head_size;
+
+ entry = vm_map_copy_first_entry(copy);
+ vm_map_copy_clip_end(copy, entry, copy->offset);
+ vm_map_copy_entry_unlink(copy, entry);
+ vm_map_copy_entry_link(head_copy,
+ vm_map_copy_to_entry(head_copy),
+ entry);
+
+ /*
+ * Do the unaligned copy.
+ */
+ kr = vm_map_copy_overwrite_nested(dst_map,
+ head_addr,
+ head_copy,
+ interruptible,
+ (pmap_t) NULL,
+ FALSE);
+ if (kr != KERN_SUCCESS)
+ goto done;
+ }
+
+ if (tail_size) {
+ /*
+ * Extract "tail_copy" out of "copy".
+ */
+ tail_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ tail_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
+ vm_map_copy_first_entry(tail_copy) =
+ vm_map_copy_to_entry(tail_copy);
+ vm_map_copy_last_entry(tail_copy) =
+ vm_map_copy_to_entry(tail_copy);
+ tail_copy->type = VM_MAP_COPY_ENTRY_LIST;
+ tail_copy->cpy_hdr.nentries = 0;
+ tail_copy->cpy_hdr.entries_pageable =
+ copy->cpy_hdr.entries_pageable;
+ vm_map_store_init(&tail_copy->cpy_hdr);
+
+ tail_copy->offset = copy->offset + copy->size - tail_size;
+ tail_copy->size = tail_size;
+
+ copy->size -= tail_size;
+
+ entry = vm_map_copy_last_entry(copy);
+ vm_map_copy_clip_start(copy, entry, tail_copy->offset);
+ entry = vm_map_copy_last_entry(copy);
+ vm_map_copy_entry_unlink(copy, entry);
+ vm_map_copy_entry_link(tail_copy,
+ vm_map_copy_last_entry(tail_copy),
+ entry);
+ }
+
+ /*
+ * Copy most (or possibly all) of the data.
+ */
+ kr = vm_map_copy_overwrite_nested(dst_map,
+ dst_addr + head_size,
+ copy,
+ interruptible,
+ (pmap_t) NULL,
+ FALSE);
+ if (kr != KERN_SUCCESS) {
+ goto done;
+ }
+
+ if (tail_size) {
+ kr = vm_map_copy_overwrite_nested(dst_map,
+ tail_addr,
+ tail_copy,
+ interruptible,
+ (pmap_t) NULL,
+ FALSE);
+ }
+
+done:
+ assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
+ if (kr == KERN_SUCCESS) {
+ /*
+ * Discard all the copy maps.
+ */
+ if (head_copy) {
+ vm_map_copy_discard(head_copy);
+ head_copy = NULL;
+ }
+ vm_map_copy_discard(copy);
+ if (tail_copy) {
+ vm_map_copy_discard(tail_copy);
+ tail_copy = NULL;
+ }
+ } else {
+ /*
+ * Re-assemble the original copy map.
+ */
+ if (head_copy) {
+ entry = vm_map_copy_first_entry(head_copy);
+ vm_map_copy_entry_unlink(head_copy, entry);
+ vm_map_copy_entry_link(copy,
+ vm_map_copy_to_entry(copy),
+ entry);
+ copy->offset -= head_size;
+ copy->size += head_size;
+ vm_map_copy_discard(head_copy);
+ head_copy = NULL;
+ }
+ if (tail_copy) {
+ entry = vm_map_copy_last_entry(tail_copy);
+ vm_map_copy_entry_unlink(tail_copy, entry);
+ vm_map_copy_entry_link(copy,
+ vm_map_copy_last_entry(copy),
+ entry);
+ copy->size += tail_size;
+ vm_map_copy_discard(tail_copy);
+ tail_copy = NULL;
+ }
+ }
+ return kr;
+}
+
+
+/*
+ * Routine: vm_map_copy_overwrite_unaligned [internal use only]
+ *
+ * Decription:
+ * Physically copy unaligned data
+ *
+ * Implementation:
+ * Unaligned parts of pages have to be physically copied. We use
+ * a modified form of vm_fault_copy (which understands none-aligned
+ * page offsets and sizes) to do the copy. We attempt to copy as
+ * much memory in one go as possibly, however vm_fault_copy copies
+ * within 1 memory object so we have to find the smaller of "amount left"
+ * "source object data size" and "target object data size". With
+ * unaligned data we don't need to split regions, therefore the source
+ * (copy) object should be one map entry, the target range may be split
+ * over multiple map entries however. In any event we are pessimistic
+ * about these assumptions.
+ *
+ * Assumptions:
+ * dst_map is locked on entry and is return locked on success,
+ * unlocked on error.
+ */
+
+static kern_return_t
+vm_map_copy_overwrite_unaligned(
+ vm_map_t dst_map,
+ vm_map_entry_t entry,
+ vm_map_copy_t copy,
+ vm_map_offset_t start,
+ boolean_t discard_on_success)
+{
+ vm_map_entry_t copy_entry;
+ vm_map_entry_t copy_entry_next;
+ vm_map_version_t version;
+ vm_object_t dst_object;
+ vm_object_offset_t dst_offset;
+ vm_object_offset_t src_offset;
+ vm_object_offset_t entry_offset;
+ vm_map_offset_t entry_end;
+ vm_map_size_t src_size,
+ dst_size,
+ copy_size,
+ amount_left;
+ kern_return_t kr = KERN_SUCCESS;
+
+
+ copy_entry = vm_map_copy_first_entry(copy);
+
+ vm_map_lock_write_to_read(dst_map);
+
+ src_offset = copy->offset - vm_object_trunc_page(copy->offset);
+ amount_left = copy->size;
+/*
+ * unaligned so we never clipped this entry, we need the offset into
+ * the vm_object not just the data.
+ */
+ while (amount_left > 0) {
+
+ if (entry == vm_map_to_entry(dst_map)) {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /* "start" must be within the current map entry */
+ assert ((start>=entry->vme_start) && (start<entry->vme_end));
+
+ dst_offset = start - entry->vme_start;
+
+ dst_size = entry->vme_end - start;
+
+ src_size = copy_entry->vme_end -
+ (copy_entry->vme_start + src_offset);
+
+ if (dst_size < src_size) {
+/*
+ * we can only copy dst_size bytes before
+ * we have to get the next destination entry
+ */
+ copy_size = dst_size;
+ } else {
+/*
+ * we can only copy src_size bytes before
+ * we have to get the next source copy entry
+ */
+ copy_size = src_size;
+ }
+
+ if (copy_size > amount_left) {
+ copy_size = amount_left;
+ }
+/*
+ * Entry needs copy, create a shadow shadow object for
+ * Copy on write region.
+ */
+ if (entry->needs_copy &&
+ ((entry->protection & VM_PROT_WRITE) != 0))
+ {
+ if (vm_map_lock_read_to_write(dst_map)) {
+ vm_map_lock_read(dst_map);
+ goto RetryLookup;
+ }
+ VME_OBJECT_SHADOW(entry,
+ (vm_map_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->needs_copy = FALSE;
+ vm_map_lock_write_to_read(dst_map);
+ }
+ dst_object = VME_OBJECT(entry);
+/*
+ * unlike with the virtual (aligned) copy we're going
+ * to fault on it therefore we need a target object.
+ */
+ if (dst_object == VM_OBJECT_NULL) {
+ if (vm_map_lock_read_to_write(dst_map)) {
+ vm_map_lock_read(dst_map);
+ goto RetryLookup;
+ }
+ dst_object = vm_object_allocate((vm_map_size_t)
+ entry->vme_end - entry->vme_start);
+ VME_OBJECT(entry) = dst_object;
+ VME_OFFSET_SET(entry, 0);
+ assert(entry->use_pmap);
+ vm_map_lock_write_to_read(dst_map);
+ }
+/*
+ * Take an object reference and unlock map. The "entry" may
+ * disappear or change when the map is unlocked.
+ */
+ vm_object_reference(dst_object);
+ version.main_timestamp = dst_map->timestamp;
+ entry_offset = VME_OFFSET(entry);
+ entry_end = entry->vme_end;
+ vm_map_unlock_read(dst_map);
+/*
+ * Copy as much as possible in one pass
+ */
+ kr = vm_fault_copy(
+ VME_OBJECT(copy_entry),
+ VME_OFFSET(copy_entry) + src_offset,
+ ©_size,
+ dst_object,
+ entry_offset + dst_offset,
+ dst_map,
+ &version,
+ THREAD_UNINT );
+
+ start += copy_size;
+ src_offset += copy_size;
+ amount_left -= copy_size;
+/*
+ * Release the object reference
+ */
+ vm_object_deallocate(dst_object);
+/*
+ * If a hard error occurred, return it now
+ */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end
+ || amount_left == 0)
+ {
+/*
+ * all done with this copy entry, dispose.
+ */
+ copy_entry_next = copy_entry->vme_next;
+
+ if (discard_on_success) {
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ assert(!copy_entry->is_sub_map);
+ vm_object_deallocate(VME_OBJECT(copy_entry));
+ vm_map_copy_entry_dispose(copy, copy_entry);
+ }
+
+ if (copy_entry_next == vm_map_copy_to_entry(copy) &&
+ amount_left) {
+/*
+ * not finished copying but run out of source
+ */
+ return KERN_INVALID_ADDRESS;
+ }
+
+ copy_entry = copy_entry_next;
+
+ src_offset = 0;
+ }
+
+ if (amount_left == 0)
+ return KERN_SUCCESS;
+
+ vm_map_lock_read(dst_map);
+ if (version.main_timestamp == dst_map->timestamp) {
+ if (start == entry_end) {
+/*
+ * destination region is split. Use the version
+ * information to avoid a lookup in the normal
+ * case.
+ */
+ entry = entry->vme_next;
+/*
+ * should be contiguous. Fail if we encounter
+ * a hole in the destination.
+ */
+ if (start != entry->vme_start) {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS ;
+ }
+ }
+ } else {
+/*
+ * Map version check failed.
+ * we must lookup the entry because somebody
+ * might have changed the map behind our backs.
+ */
+ RetryLookup:
+ if (!vm_map_lookup_entry(dst_map, start, &entry))
+ {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS ;
+ }
+ }
+ }/* while */
+
+ return KERN_SUCCESS;
+}/* vm_map_copy_overwrite_unaligned */
+
+/*
+ * Routine: vm_map_copy_overwrite_aligned [internal use only]
+ *
+ * Description:
+ * Does all the vm_trickery possible for whole pages.
+ *
+ * Implementation:
+ *
+ * If there are no permanent objects in the destination,
+ * and the source and destination map entry zones match,
+ * and the destination map entry is not shared,
+ * then the map entries can be deleted and replaced
+ * with those from the copy. The following code is the
+ * basic idea of what to do, but there are lots of annoying
+ * little details about getting protection and inheritance
+ * right. Should add protection, inheritance, and sharing checks
+ * to the above pass and make sure that no wiring is involved.
+ */
+
+int vm_map_copy_overwrite_aligned_src_not_internal = 0;
+int vm_map_copy_overwrite_aligned_src_not_symmetric = 0;
+int vm_map_copy_overwrite_aligned_src_large = 0;
+
+static kern_return_t
+vm_map_copy_overwrite_aligned(
+ vm_map_t dst_map,
+ vm_map_entry_t tmp_entry,
+ vm_map_copy_t copy,
+ vm_map_offset_t start,
+ __unused pmap_t pmap)
+{
+ vm_object_t object;
+ vm_map_entry_t copy_entry;
+ vm_map_size_t copy_size;
+ vm_map_size_t size;
+ vm_map_entry_t entry;
+
+ while ((copy_entry = vm_map_copy_first_entry(copy))
+ != vm_map_copy_to_entry(copy))
+ {
+ copy_size = (copy_entry->vme_end - copy_entry->vme_start);
+
+ entry = tmp_entry;
+ if (entry->is_sub_map) {
+ /* unnested when clipped earlier */
+ assert(!entry->use_pmap);
+ }
+ if (entry == vm_map_to_entry(dst_map)) {
+ vm_map_unlock(dst_map);
+ return KERN_INVALID_ADDRESS;
+ }
+ size = (entry->vme_end - entry->vme_start);
+ /*
+ * Make sure that no holes popped up in the
+ * address map, and that the protection is
+ * still valid, in case the map was unlocked
+ * earlier.
+ */
+
+ if ((entry->vme_start != start) || ((entry->is_sub_map)
+ && !entry->needs_copy)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ assert(entry != vm_map_to_entry(dst_map));
+
+ /*
+ * Check protection again
+ */
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * Adjust to source size first
+ */
+
+ if (copy_size < size) {
+ if (entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(entry->vme_start + copy_size,
+ VM_MAP_PAGE_MASK(dst_map))) {
+ /* no longer map-aligned */
+ entry->map_aligned = FALSE;
+ }
+ vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
+ size = copy_size;
+ }
+
+ /*
+ * Adjust to destination size
+ */
+
+ if (size < copy_size) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + size);
+ copy_size = size;
+ }
+
+ assert((entry->vme_end - entry->vme_start) == size);
+ assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
+ assert((copy_entry->vme_end - copy_entry->vme_start) == size);
+
+ /*
+ * If the destination contains temporary unshared memory,
+ * we can perform the copy by throwing it away and
+ * installing the source data.
+ */
+
+ object = VME_OBJECT(entry);
+ if ((!entry->is_shared &&
+ ((object == VM_OBJECT_NULL) ||
+ (object->internal && !object->true_share))) ||
+ entry->needs_copy) {
+ vm_object_t old_object = VME_OBJECT(entry);
+ vm_object_offset_t old_offset = VME_OFFSET(entry);
+ vm_object_offset_t offset;
+
+ /*
+ * Ensure that the source and destination aren't
+ * identical
+ */
+ if (old_object == VME_OBJECT(copy_entry) &&
+ old_offset == VME_OFFSET(copy_entry)) {
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ if (old_object != VM_OBJECT_NULL)
+ vm_object_deallocate(old_object);
+
+ start = tmp_entry->vme_end;
+ tmp_entry = tmp_entry->vme_next;
+ continue;
+ }
+
+#define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */
+#define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */
+ if (VME_OBJECT(copy_entry) != VM_OBJECT_NULL &&
+ VME_OBJECT(copy_entry)->vo_size >= __TRADEOFF1_OBJ_SIZE &&
+ copy_size <= __TRADEOFF1_COPY_SIZE) {
+ /*
+ * Virtual vs. Physical copy tradeoff #1.
+ *
+ * Copying only a few pages out of a large
+ * object: do a physical copy instead of
+ * a virtual copy, to avoid possibly keeping
+ * the entire large object alive because of
+ * those few copy-on-write pages.
+ */
+ vm_map_copy_overwrite_aligned_src_large++;
+ goto slow_copy;
+ }
+
+ if ((dst_map->pmap != kernel_pmap) &&
+ (VME_ALIAS(entry) >= VM_MEMORY_MALLOC) &&
+ (VME_ALIAS(entry) <= VM_MEMORY_MALLOC_LARGE_REUSED)) {
+ vm_object_t new_object, new_shadow;
+
+ /*
+ * We're about to map something over a mapping
+ * established by malloc()...
+ */
+ new_object = VME_OBJECT(copy_entry);
+ if (new_object != VM_OBJECT_NULL) {
+ vm_object_lock_shared(new_object);
+ }
+ while (new_object != VM_OBJECT_NULL &&
+ !new_object->true_share &&
+ new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+ new_object->internal) {
+ new_shadow = new_object->shadow;
+ if (new_shadow == VM_OBJECT_NULL) {
+ break;
+ }
+ vm_object_lock_shared(new_shadow);
+ vm_object_unlock(new_object);
+ new_object = new_shadow;
+ }
+ if (new_object != VM_OBJECT_NULL) {
+ if (!new_object->internal) {
+ /*
+ * The new mapping is backed
+ * by an external object. We
+ * don't want malloc'ed memory
+ * to be replaced with such a
+ * non-anonymous mapping, so
+ * let's go off the optimized
+ * path...
+ */
+ vm_map_copy_overwrite_aligned_src_not_internal++;
+ vm_object_unlock(new_object);
+ goto slow_copy;
+ }
+ if (new_object->true_share ||
+ new_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) {
+ /*
+ * Same if there's a "true_share"
+ * object in the shadow chain, or
+ * an object with a non-default
+ * (SYMMETRIC) copy strategy.
+ */
+ vm_map_copy_overwrite_aligned_src_not_symmetric++;
+ vm_object_unlock(new_object);
+ goto slow_copy;
+ }
+ vm_object_unlock(new_object);
+ }
+ /*
+ * The new mapping is still backed by
+ * anonymous (internal) memory, so it's
+ * OK to substitute it for the original
+ * malloc() mapping.
+ */
+ }
+
+ if (old_object != VM_OBJECT_NULL) {
+ if(entry->is_sub_map) {
+ if(entry->use_pmap) {
+#ifndef NO_NESTED_PMAP
+ pmap_unnest(dst_map->pmap,
+ (addr64_t)entry->vme_start,
+ entry->vme_end - entry->vme_start);
+#endif /* NO_NESTED_PMAP */
+ if(dst_map->mapped_in_other_pmaps) {
+ /* clean up parent */
+ /* map/maps */
+ vm_map_submap_pmap_clean(
+ dst_map, entry->vme_start,
+ entry->vme_end,
+ VME_SUBMAP(entry),
+ VME_OFFSET(entry));
+ }
+ } else {
+ vm_map_submap_pmap_clean(
+ dst_map, entry->vme_start,
+ entry->vme_end,
+ VME_SUBMAP(entry),
+ VME_OFFSET(entry));
+ }
+ vm_map_deallocate(VME_SUBMAP(entry));
+ } else {
+ if(dst_map->mapped_in_other_pmaps) {
+ vm_object_pmap_protect_options(
+ VME_OBJECT(entry),
+ VME_OFFSET(entry),
+ entry->vme_end
+ - entry->vme_start,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE,
+ PMAP_OPTIONS_REMOVE);
+ } else {
+ pmap_remove_options(
+ dst_map->pmap,
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end),
+ PMAP_OPTIONS_REMOVE);
+ }
+ vm_object_deallocate(old_object);
+ }
+ }
+
+ entry->is_sub_map = FALSE;
+ VME_OBJECT_SET(entry, VME_OBJECT(copy_entry));
+ object = VME_OBJECT(entry);
+ entry->needs_copy = copy_entry->needs_copy;
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+ offset = VME_OFFSET(copy_entry);
+ VME_OFFSET_SET(entry, offset);
+
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ /*
+ * we could try to push pages into the pmap at this point, BUT
+ * this optimization only saved on average 2 us per page if ALL
+ * the pages in the source were currently mapped
+ * and ALL the pages in the dest were touched, if there were fewer
+ * than 2/3 of the pages touched, this optimization actually cost more cycles
+ * it also puts a lot of pressure on the pmap layer w/r to mapping structures
+ */
+
+ /*
+ * Set up for the next iteration. The map
+ * has not been unlocked, so the next
+ * address should be at the end of this
+ * entry, and the next map entry should be
+ * the one following it.
+ */
+
+ start = tmp_entry->vme_end;
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ vm_map_version_t version;
+ vm_object_t dst_object;
+ vm_object_offset_t dst_offset;
+ kern_return_t r;
+
+ slow_copy:
+ if (entry->needs_copy) {
+ VME_OBJECT_SHADOW(entry,
+ (entry->vme_end -
+ entry->vme_start));
+ entry->needs_copy = FALSE;
+ }
+
+ dst_object = VME_OBJECT(entry);
+ dst_offset = VME_OFFSET(entry);
+
+ /*
+ * Take an object reference, and record
+ * the map version information so that the
+ * map can be safely unlocked.
+ */
+
+ if (dst_object == VM_OBJECT_NULL) {
+ /*
+ * We would usually have just taken the
+ * optimized path above if the destination
+ * object has not been allocated yet. But we
+ * now disable that optimization if the copy
+ * entry's object is not backed by anonymous
+ * memory to avoid replacing malloc'ed
+ * (i.e. re-usable) anonymous memory with a
+ * not-so-anonymous mapping.
+ * So we have to handle this case here and
+ * allocate a new VM object for this map entry.
+ */
+ dst_object = vm_object_allocate(
+ entry->vme_end - entry->vme_start);
+ dst_offset = 0;
+ VME_OBJECT_SET(entry, dst_object);
+ VME_OFFSET_SET(entry, dst_offset);
+ assert(entry->use_pmap);
+
+ }
+
+ vm_object_reference(dst_object);
+
+ /* account for unlock bumping up timestamp */
+ version.main_timestamp = dst_map->timestamp + 1;
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * Copy as much as possible in one pass
+ */
+
+ copy_size = size;
+ r = vm_fault_copy(
+ VME_OBJECT(copy_entry),
+ VME_OFFSET(copy_entry),
+ ©_size,
+ dst_object,
+ dst_offset,
+ dst_map,
+ &version,
+ THREAD_UNINT );
+
+ /*
+ * Release the object reference
+ */
+
+ vm_object_deallocate(dst_object);
+
+ /*
+ * If a hard error occurred, return it now
+ */
+
+ if (r != KERN_SUCCESS)
+ return(r);
+
+ if (copy_size != 0) {
+ /*
+ * Dispose of the copied region
+ */
+
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + copy_size);
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_object_deallocate(VME_OBJECT(copy_entry));
+ vm_map_copy_entry_dispose(copy, copy_entry);
+ }
+
+ /*
+ * Pick up in the destination map where we left off.
+ *
+ * Use the version information to avoid a lookup
+ * in the normal case.
+ */
+
+ start += copy_size;
+ vm_map_lock(dst_map);
+ if (version.main_timestamp == dst_map->timestamp &&
+ copy_size != 0) {
+ /* We can safely use saved tmp_entry value */
+
+ if (tmp_entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(
+ start,
+ VM_MAP_PAGE_MASK(dst_map))) {
+ /* no longer map-aligned */
+ tmp_entry->map_aligned = FALSE;
+ }
+ vm_map_clip_end(dst_map, tmp_entry, start);
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ /* Must do lookup of tmp_entry */
+
+ if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ if (tmp_entry->map_aligned &&
+ !VM_MAP_PAGE_ALIGNED(
+ start,
+ VM_MAP_PAGE_MASK(dst_map))) {
+ /* no longer map-aligned */
+ tmp_entry->map_aligned = FALSE;
+ }
+ vm_map_clip_start(dst_map, tmp_entry, start);
+ }
+ }
+ }/* while */
+
+ return(KERN_SUCCESS);
+}/* vm_map_copy_overwrite_aligned */
+
+/*
+ * Routine: vm_map_copyin_kernel_buffer [internal use only]
+ *
+ * Description:
+ * Copy in data to a kernel buffer from space in the
+ * source map. The original space may be optionally
+ * deallocated.
+ *
+ * If successful, returns a new copy object.
+ */
+static kern_return_t
+vm_map_copyin_kernel_buffer(
+ vm_map_t src_map,
+ vm_map_offset_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result)
+{
+ kern_return_t kr;
+ vm_map_copy_t copy;
+ vm_size_t kalloc_size;
+
+ if (len > msg_ool_size_small)
+ return KERN_INVALID_ARGUMENT;
+
+ kalloc_size = (vm_size_t)(cpy_kdata_hdr_sz + len);
+
+ copy = (vm_map_copy_t)kalloc(kalloc_size);
+ if (copy == VM_MAP_COPY_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+ copy->type = VM_MAP_COPY_KERNEL_BUFFER;
+ copy->size = len;
+ copy->offset = 0;
+
+ kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t)len);
+ if (kr != KERN_SUCCESS) {
+ kfree(copy, kalloc_size);
+ return kr;
+ }
+ if (src_destroy) {
+ (void) vm_map_remove(
+ src_map,
+ vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map)),
+ vm_map_round_page(src_addr + len,
+ VM_MAP_PAGE_MASK(src_map)),
+ (VM_MAP_REMOVE_INTERRUPTIBLE |
+ VM_MAP_REMOVE_WAIT_FOR_KWIRE |
+ ((src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : 0)));
+ }
+ *copy_result = copy;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_map_copyout_kernel_buffer [internal use only]
+ *
+ * Description:
+ * Copy out data from a kernel buffer into space in the
+ * destination map. The space may be otpionally dynamically
+ * allocated.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+static int vm_map_copyout_kernel_buffer_failures = 0;
+static kern_return_t
+vm_map_copyout_kernel_buffer(
+ vm_map_t map,
+ vm_map_address_t *addr, /* IN/OUT */
+ vm_map_copy_t copy,
+ vm_map_size_t copy_size,
+ boolean_t overwrite,
+ boolean_t consume_on_success)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ thread_t thread = current_thread();
+
+ assert(copy->size == copy_size);
+
+ /*
+ * check for corrupted vm_map_copy structure
+ */
+ if (copy_size > msg_ool_size_small || copy->offset)
+ panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld",
+ (long long)copy->size, (long long)copy->offset);
+
+ if (!overwrite) {
+
+ /*
+ * Allocate space in the target map for the data
+ */
+ *addr = 0;
+ kr = vm_map_enter(map,
+ addr,
+ vm_map_round_page(copy_size,
+ VM_MAP_PAGE_MASK(map)),
+ (vm_map_offset_t) 0,
+ VM_FLAGS_ANYWHERE,
+ VM_OBJECT_NULL,
+ (vm_object_offset_t) 0,
+ FALSE,
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ }
+
+ /*
+ * Copyout the data from the kernel buffer to the target map.
+ */
+ if (thread->map == map) {
+
+ /*
+ * If the target map is the current map, just do
+ * the copy.
+ */
+ assert((vm_size_t)copy_size == copy_size);
+ if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ }
+ else {
+ vm_map_t oldmap;
+
+ /*
+ * If the target map is another map, assume the
+ * target's address space identity for the duration
+ * of the copy.
+ */
+ vm_map_reference(map);
+ oldmap = vm_map_switch(map);
+
+ assert((vm_size_t)copy_size == copy_size);
+ if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) {
+ vm_map_copyout_kernel_buffer_failures++;
+ kr = KERN_INVALID_ADDRESS;
+ }
+
+ (void) vm_map_switch(oldmap);
+ vm_map_deallocate(map);
+ }
+
+ if (kr != KERN_SUCCESS) {
+ /* the copy failed, clean up */
+ if (!overwrite) {
+ /*
+ * Deallocate the space we allocated in the target map.
+ */
+ (void) vm_map_remove(
+ map,
+ vm_map_trunc_page(*addr,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page((*addr +
+ vm_map_round_page(copy_size,
+ VM_MAP_PAGE_MASK(map))),
+ VM_MAP_PAGE_MASK(map)),
+ VM_MAP_NO_FLAGS);
+ *addr = 0;
+ }
+ } else {
+ /* copy was successful, dicard the copy structure */
+ if (consume_on_success) {
+ kfree(copy, copy_size + cpy_kdata_hdr_sz);
+ }
+ }
+
+ return kr;
+}
+
+/*
+ * Macro: vm_map_copy_insert
+ *
+ * Description:
+ * Link a copy chain ("copy") into a map at the
+ * specified location (after "where").
+ * Side effects:
+ * The copy chain is destroyed.
+ * Warning:
+ * The arguments are evaluated multiple times.
+ */
+#define vm_map_copy_insert(map, where, copy) \
+MACRO_BEGIN \
+ vm_map_store_copy_insert(map, where, copy); \
+ zfree(vm_map_copy_zone, copy); \
+MACRO_END
+
+void
+vm_map_copy_remap(
+ vm_map_t map,
+ vm_map_entry_t where,
+ vm_map_copy_t copy,
+ vm_map_offset_t adjustment,
+ vm_prot_t cur_prot,
+ vm_prot_t max_prot,
+ vm_inherit_t inheritance)
+{
+ vm_map_entry_t copy_entry, new_entry;
+
+ for (copy_entry = vm_map_copy_first_entry(copy);
+ copy_entry != vm_map_copy_to_entry(copy);
+ copy_entry = copy_entry->vme_next) {
+ /* get a new VM map entry for the map */
+ new_entry = vm_map_entry_create(map,
+ !map->hdr.entries_pageable);
+ /* copy the "copy entry" to the new entry */
+ vm_map_entry_copy(new_entry, copy_entry);
+ /* adjust "start" and "end" */
+ new_entry->vme_start += adjustment;
+ new_entry->vme_end += adjustment;
+ /* clear some attributes */
+ new_entry->inheritance = inheritance;
+ new_entry->protection = cur_prot;
+ new_entry->max_protection = max_prot;
+ new_entry->behavior = VM_BEHAVIOR_DEFAULT;
+ /* take an extra reference on the entry's "object" */
+ if (new_entry->is_sub_map) {
+ assert(!new_entry->use_pmap); /* not nested */
+ vm_map_lock(VME_SUBMAP(new_entry));
+ vm_map_reference(VME_SUBMAP(new_entry));
+ vm_map_unlock(VME_SUBMAP(new_entry));
+ } else {
+ vm_object_reference(VME_OBJECT(new_entry));
+ }
+ /* insert the new entry in the map */
+ vm_map_store_entry_link(map, where, new_entry);
+ /* continue inserting the "copy entries" after the new entry */
+ where = new_entry;
+ }
+}
+
+
+/*
+ * Returns true if *size matches (or is in the range of) copy->size.
+ * Upon returning true, the *size field is updated with the actual size of the
+ * copy object (may be different for VM_MAP_COPY_ENTRY_LIST types)
+ */
+boolean_t
+vm_map_copy_validate_size(
+ vm_map_t dst_map,
+ vm_map_copy_t copy,
+ vm_map_size_t *size)
+{
+ if (copy == VM_MAP_COPY_NULL)
+ return FALSE;
+ vm_map_size_t copy_sz = copy->size;
+ vm_map_size_t sz = *size;
+ switch (copy->type) {
+ case VM_MAP_COPY_OBJECT:
+ case VM_MAP_COPY_KERNEL_BUFFER:
+ if (sz == copy_sz)
+ return TRUE;
+ break;
+ case VM_MAP_COPY_ENTRY_LIST:
+ /*
+ * potential page-size rounding prevents us from exactly
+ * validating this flavor of vm_map_copy, but we can at least
+ * assert that it's within a range.
+ */
+ if (copy_sz >= sz &&
+ copy_sz <= vm_map_round_page(sz, VM_MAP_PAGE_MASK(dst_map))) {
+ *size = copy_sz;
+ return TRUE;
+ }
+ break;
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+/*
+ * Routine: vm_map_copyout_size
+ *
+ * Description:
+ * Copy out a copy chain ("copy") into newly-allocated
+ * space in the destination map. Uses a prevalidated
+ * size for the copy object (vm_map_copy_validate_size).
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+kern_return_t
+vm_map_copyout_size(
+ vm_map_t dst_map,
+ vm_map_address_t *dst_addr, /* OUT */
+ vm_map_copy_t copy,
+ vm_map_size_t copy_size)
+{
+ return vm_map_copyout_internal(dst_map, dst_addr, copy, copy_size,
+ TRUE, /* consume_on_success */
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+}
+
+/*
+ * Routine: vm_map_copyout
+ *
+ * Description:
+ * Copy out a copy chain ("copy") into newly-allocated
+ * space in the destination map.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+kern_return_t
+vm_map_copyout(
+ vm_map_t dst_map,
+ vm_map_address_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
+{
+ return vm_map_copyout_internal(dst_map, dst_addr, copy, copy ? copy->size : 0,
+ TRUE, /* consume_on_success */
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+}
+
+kern_return_t
+vm_map_copyout_internal(
+ vm_map_t dst_map,
+ vm_map_address_t *dst_addr, /* OUT */
+ vm_map_copy_t copy,
+ vm_map_size_t copy_size,
+ boolean_t consume_on_success,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_size_t size;
+ vm_map_size_t adjustment;
+ vm_map_offset_t start;
+ vm_object_offset_t vm_copy_start;
+ vm_map_entry_t last;
+ vm_map_entry_t entry;
+ vm_map_entry_t hole_entry;
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL) {
+ *dst_addr = 0;
+ return(KERN_SUCCESS);
+ }
+
+ if (copy->size != copy_size) {
+ *dst_addr = 0;
+ return KERN_FAILURE;
+ }
+
+ /*
+ * Check for special copy object, created
+ * by vm_map_copyin_object.
+ */
+
+ if (copy->type == VM_MAP_COPY_OBJECT) {
+ vm_object_t object = copy->cpy_object;
+ kern_return_t kr;
+ vm_object_offset_t offset;
+
+ offset = vm_object_trunc_page(copy->offset);
+ size = vm_map_round_page((copy_size +
+ (vm_map_size_t)(copy->offset -
+ offset)),
+ VM_MAP_PAGE_MASK(dst_map));
+ *dst_addr = 0;
+ kr = vm_map_enter(dst_map, dst_addr, size,
+ (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
+ object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return(kr);
+ /* Account for non-pagealigned copy object */
+ *dst_addr += (vm_map_offset_t)(copy->offset - offset);
+ if (consume_on_success)
+ zfree(vm_map_copy_zone, copy);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check for special kernel buffer allocated
+ * by new_ipc_kmsg_copyin.
+ */
+
+ if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
+ return vm_map_copyout_kernel_buffer(dst_map, dst_addr,
+ copy, copy_size, FALSE,
+ consume_on_success);
+ }
+
+
+ /*
+ * Find space for the data
+ */
+
+ vm_copy_start = vm_map_trunc_page((vm_map_size_t)copy->offset,
+ VM_MAP_COPY_PAGE_MASK(copy));
+ size = vm_map_round_page((vm_map_size_t)copy->offset + copy_size,
+ VM_MAP_COPY_PAGE_MASK(copy))
+ - vm_copy_start;
+
+
+StartAgain: ;
+
+ vm_map_lock(dst_map);
+ if( dst_map->disable_vmentry_reuse == TRUE) {
+ VM_MAP_HIGHEST_ENTRY(dst_map, entry, start);
+ last = entry;
+ } else {
+ if (dst_map->holelistenabled) {
+ hole_entry = (vm_map_entry_t)dst_map->holes_list;
+
+ if (hole_entry == NULL) {
+ /*
+ * No more space in the map?
+ */
+ vm_map_unlock(dst_map);
+ return(KERN_NO_SPACE);
+ }
+
+ last = hole_entry;
+ start = last->vme_start;
+ } else {
+ assert(first_free_is_valid(dst_map));
+ start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
+ vm_map_min(dst_map) : last->vme_end;
+ }
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(dst_map));
+ }
+
+ while (TRUE) {
+ vm_map_entry_t next = last->vme_next;
+ vm_map_offset_t end = start + size;
+
+ if ((end > dst_map->max_offset) || (end < start)) {
+ if (dst_map->wait_for_space) {
+ if (size <= (dst_map->max_offset - dst_map->min_offset)) {
+ assert_wait((event_t) dst_map,
+ THREAD_INTERRUPTIBLE);
+ vm_map_unlock(dst_map);
+ thread_block(THREAD_CONTINUE_NULL);
+ goto StartAgain;
+ }
+ }
+ vm_map_unlock(dst_map);
+ return(KERN_NO_SPACE);
+ }
+
+ if (dst_map->holelistenabled) {
+ if (last->vme_end >= end)
+ break;
+ } else {
+ /*
+ * If there are no more entries, we must win.
+ *
+ * OR
+ *
+ * If there is another entry, it must be
+ * after the end of the potential new region.
+ */
+
+ if (next == vm_map_to_entry(dst_map))
+ break;
+
+ if (next->vme_start >= end)
+ break;
+ }
+
+ last = next;
+
+ if (dst_map->holelistenabled) {
+ if (last == (vm_map_entry_t) dst_map->holes_list) {
+ /*
+ * Wrapped around
+ */
+ vm_map_unlock(dst_map);
+ return(KERN_NO_SPACE);
+ }
+ start = last->vme_start;
+ } else {
+ start = last->vme_end;
+ }
+ start = vm_map_round_page(start,
+ VM_MAP_PAGE_MASK(dst_map));
+ }
+
+ if (dst_map->holelistenabled) {
+ if (vm_map_lookup_entry(dst_map, last->vme_start, &last)) {
+ panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", last, (unsigned long long)last->vme_start);
+ }
+ }
+
+
+ adjustment = start - vm_copy_start;
+ if (! consume_on_success) {
+ /*
+ * We're not allowed to consume "copy", so we'll have to
+ * copy its map entries into the destination map below.
+ * No need to re-allocate map entries from the correct
+ * (pageable or not) zone, since we'll get new map entries
+ * during the transfer.
+ * We'll also adjust the map entries's "start" and "end"
+ * during the transfer, to keep "copy"'s entries consistent
+ * with its "offset".
+ */
+ goto after_adjustments;
+ }
+
+ /*
+ * Since we're going to just drop the map
+ * entries from the copy into the destination
+ * map, they must come from the same pool.
+ */
+
+ if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
+ /*
+ * Mismatches occur when dealing with the default
+ * pager.
+ */
+ zone_t old_zone;
+ vm_map_entry_t next, new;
+
+ /*
+ * Find the zone that the copies were allocated from
+ */
+
+ entry = vm_map_copy_first_entry(copy);
+
+ /*
+ * Reinitialize the copy so that vm_map_copy_entry_link
+ * will work.
+ */
+ vm_map_store_copy_reset(copy, entry);
+ copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
+
+ /*
+ * Copy each entry.
+ */
+ while (entry != vm_map_copy_to_entry(copy)) {
+ new = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
+ vm_map_entry_copy_full(new, entry);
+ assert(!new->iokit_acct);
+ if (new->is_sub_map) {
+ /* clr address space specifics */
+ new->use_pmap = FALSE;
+ }
+ vm_map_copy_entry_link(copy,
+ vm_map_copy_last_entry(copy),
+ new);
+ next = entry->vme_next;
+ old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone;
+ zfree(old_zone, entry);
+ entry = next;
+ }
+ }
+
+ /*
+ * Adjust the addresses in the copy chain, and
+ * reset the region attributes.
+ */
+
+ for (entry = vm_map_copy_first_entry(copy);
+ entry != vm_map_copy_to_entry(copy);
+ entry = entry->vme_next) {
+ if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) {
+ /*
+ * We're injecting this copy entry into a map that
+ * has the standard page alignment, so clear
+ * "map_aligned" (which might have been inherited
+ * from the original map entry).
+ */
+ entry->map_aligned = FALSE;
+ }
+
+ entry->vme_start += adjustment;
+ entry->vme_end += adjustment;
+
+ if (entry->map_aligned) {
+ assert(VM_MAP_PAGE_ALIGNED(entry->vme_start,
+ VM_MAP_PAGE_MASK(dst_map)));
+ assert(VM_MAP_PAGE_ALIGNED(entry->vme_end,
+ VM_MAP_PAGE_MASK(dst_map)));
+ }
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->behavior = VM_BEHAVIOR_DEFAULT;
+
+ /*
+ * If the entry is now wired,
+ * map the pages into the destination map.
+ */
+ if (entry->wired_count != 0) {
+ vm_map_offset_t va;
+ vm_object_offset_t offset;
+ vm_object_t object;
+ vm_prot_t prot;
+ int type_of_fault;
+
+ object = VME_OBJECT(entry);
+ offset = VME_OFFSET(entry);
+ va = entry->vme_start;
+
+ pmap_pageable(dst_map->pmap,
+ entry->vme_start,
+ entry->vme_end,
+ TRUE);
+
+ while (va < entry->vme_end) {
+ vm_page_t m;
+
+ /*
+ * Look up the page in the object.
+ * Assert that the page will be found in the
+ * top object:
+ * either
+ * the object was newly created by
+ * vm_object_copy_slowly, and has
+ * copies of all of the pages from
+ * the source object
+ * or
+ * the object was moved from the old
+ * map entry; because the old map
+ * entry was wired, all of the pages
+ * were in the top-level object.
+ * (XXX not true if we wire pages for
+ * reading)
+ */
+ vm_object_lock(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) ||
+ m->absent)
+ panic("vm_map_copyout: wiring %p", m);
+
+ /*
+ * ENCRYPTED SWAP:
+ * The page is assumed to be wired here, so it
+ * shouldn't be encrypted. Otherwise, we
+ * couldn't enter it in the page table, since
+ * we don't want the user to see the encrypted
+ * data.
+ */
+ ASSERT_PAGE_DECRYPTED(m);
+
+ prot = entry->protection;
+
+ if (override_nx(dst_map, VME_ALIAS(entry)) &&
+ prot)
+ prot |= VM_PROT_EXECUTE;
+
+ type_of_fault = DBG_CACHE_HIT_FAULT;
+
+ vm_fault_enter(m, dst_map->pmap, va, prot, prot,
+ VM_PAGE_WIRED(m), FALSE, FALSE,
+ FALSE, VME_ALIAS(entry),
+ ((entry->iokit_acct ||
+ (!entry->is_sub_map &&
+ !entry->use_pmap))
+ ? PMAP_OPTIONS_ALT_ACCT
+ : 0),
+ NULL, &type_of_fault);
+
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE_64;
+ va += PAGE_SIZE;
+ }
+ }
+ }
+
+after_adjustments:
+
+ /*
+ * Correct the page alignment for the result
+ */
+
+ *dst_addr = start + (copy->offset - vm_copy_start);
+
+ /*
+ * Update the hints and the map size
+ */
+
+ if (consume_on_success) {
+ SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
+ } else {
+ SAVE_HINT_MAP_WRITE(dst_map, last);
+ }
+
+ dst_map->size += size;
+
+ /*
+ * Link in the copy
+ */
+
+ if (consume_on_success) {
+ vm_map_copy_insert(dst_map, last, copy);
+ } else {
+ vm_map_copy_remap(dst_map, last, copy, adjustment,
+ cur_protection, max_protection,
+ inheritance);
+ }
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * XXX If wiring_required, call vm_map_pageable
+ */
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_copyin
+ *
+ * Description:
+ * see vm_map_copyin_common. Exported via Unsupported.exports.
+ *
+ */
+
+#undef vm_map_copyin
+
+kern_return_t
+vm_map_copyin(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ return(vm_map_copyin_common(src_map, src_addr, len, src_destroy,
+ FALSE, copy_result, FALSE));
+}
+
+/*
+ * Routine: vm_map_copyin_common
+ *
+ * Description:
+ * Copy the specified region (src_addr, len) from the
+ * source address space (src_map), possibly removing
+ * the region from the source address space (src_destroy).
+ *
+ * Returns:
+ * A vm_map_copy_t object (copy_result), suitable for
+ * insertion into another address space (using vm_map_copyout),
+ * copying over another address space region (using
+ * vm_map_copy_overwrite). If the copy is unused, it
+ * should be destroyed (using vm_map_copy_discard).
+ *
+ * In/out conditions:
+ * The source map should not be locked on entry.
+ */
+
+typedef struct submap_map {
+ vm_map_t parent_map;
+ vm_map_offset_t base_start;
+ vm_map_offset_t base_end;
+ vm_map_size_t base_len;
+ struct submap_map *next;
+} submap_map_t;
+
+kern_return_t
+vm_map_copyin_common(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ __unused boolean_t src_volatile,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t use_maxprot)
+{
+ int flags;
+
+ flags = 0;
+ if (src_destroy) {
+ flags |= VM_MAP_COPYIN_SRC_DESTROY;
+ }
+ if (use_maxprot) {
+ flags |= VM_MAP_COPYIN_USE_MAXPROT;
+ }
+ return vm_map_copyin_internal(src_map,
+ src_addr,
+ len,
+ flags,
+ copy_result);
+}
+kern_return_t
+vm_map_copyin_internal(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ int flags,
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ vm_map_entry_t tmp_entry; /* Result of last map lookup --
+ * in multi-level lookup, this
+ * entry contains the actual
+ * vm_object/offset.
+ */
+ vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */
+
+ vm_map_offset_t src_start; /* Start of current entry --
+ * where copy is taking place now
+ */
+ vm_map_offset_t src_end; /* End of entire region to be
+ * copied */
+ vm_map_offset_t src_base;
+ vm_map_t base_map = src_map;
+ boolean_t map_share=FALSE;
+ submap_map_t *parent_maps = NULL;
+
+ vm_map_copy_t copy; /* Resulting copy */
+ vm_map_address_t copy_addr;
+ vm_map_size_t copy_size;
+ boolean_t src_destroy;
+ boolean_t use_maxprot;
+ boolean_t preserve_purgeable;
+
+ if (flags & ~VM_MAP_COPYIN_ALL_FLAGS) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ src_destroy = (flags & VM_MAP_COPYIN_SRC_DESTROY) ? TRUE : FALSE;
+ use_maxprot = (flags & VM_MAP_COPYIN_USE_MAXPROT) ? TRUE : FALSE;
+ preserve_purgeable =
+ (flags & VM_MAP_COPYIN_PRESERVE_PURGEABLE) ? TRUE : FALSE;
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+ src_end = src_addr + len;
+ if (src_end < src_addr)
+ return KERN_INVALID_ADDRESS;
+
+ /*
+ * Compute (page aligned) start and end of region
+ */
+ src_start = vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map));
+ src_end = vm_map_round_page(src_end,
+ VM_MAP_PAGE_MASK(src_map));
+
+ /*
+ * If the copy is sufficiently small, use a kernel buffer instead
+ * of making a virtual copy. The theory being that the cost of
+ * setting up VM (and taking C-O-W faults) dominates the copy costs
+ * for small regions.
+ */
+ if ((len < msg_ool_size_small) &&
+ !use_maxprot &&
+ !preserve_purgeable &&
+ !(flags & VM_MAP_COPYIN_ENTRY_LIST) &&
+ /*
+ * Since the "msg_ool_size_small" threshold was increased and
+ * vm_map_copyin_kernel_buffer() doesn't handle accesses beyond the
+ * address space limits, we revert to doing a virtual copy if the
+ * copied range goes beyond those limits. Otherwise, mach_vm_read()
+ * of the commpage would now fail when it used to work.
+ */
+ (src_start >= vm_map_min(src_map) &&
+ src_start < vm_map_max(src_map) &&
+ src_end >= vm_map_min(src_map) &&
+ src_end < vm_map_max(src_map)))
+ return vm_map_copyin_kernel_buffer(src_map, src_addr, len,
+ src_destroy, copy_result);
+
+ XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0);
+
+ /*
+ * Allocate a header element for the list.
+ *
+ * Use the start and end in the header to
+ * remember the endpoints prior to rounding.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE;
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = TRUE;
+#if 00
+ copy->cpy_hdr.page_shift = src_map->hdr.page_shift;
+#else
+ /*
+ * The copy entries can be broken down for a variety of reasons,
+ * so we can't guarantee that they will remain map-aligned...
+ * Will need to adjust the first copy_entry's "vme_start" and
+ * the last copy_entry's "vme_end" to be rounded to PAGE_MASK
+ * rather than the original map's alignment.
+ */
+ copy->cpy_hdr.page_shift = PAGE_SHIFT;
+#endif
+
+ vm_map_store_init( &(copy->cpy_hdr) );
+
+ copy->offset = src_addr;
+ copy->size = len;
+
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
+
+#define RETURN(x) \
+ MACRO_BEGIN \
+ vm_map_unlock(src_map); \
+ if(src_map != base_map) \
+ vm_map_deallocate(src_map); \
+ if (new_entry != VM_MAP_ENTRY_NULL) \
+ vm_map_copy_entry_dispose(copy,new_entry); \
+ vm_map_copy_discard(copy); \
+ { \
+ submap_map_t *_ptr; \
+ \
+ for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \
+ parent_maps=parent_maps->next; \
+ if (_ptr->parent_map != base_map) \
+ vm_map_deallocate(_ptr->parent_map); \
+ kfree(_ptr, sizeof(submap_map_t)); \
+ } \
+ } \
+ MACRO_RETURN(x); \
+ MACRO_END
+
+ /*
+ * Find the beginning of the region.
+ */
+
+ vm_map_lock(src_map);
+
+ /*
+ * Lookup the original "src_addr" rather than the truncated
+ * "src_start", in case "src_start" falls in a non-map-aligned
+ * map entry *before* the map entry that contains "src_addr"...
+ */
+ if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ if(!tmp_entry->is_sub_map) {
+ /*
+ * ... but clip to the map-rounded "src_start" rather than
+ * "src_addr" to preserve map-alignment. We'll adjust the
+ * first copy entry at the end, if needed.
+ */
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+ }
+ if (src_start < tmp_entry->vme_start) {
+ /*
+ * Move "src_start" up to the start of the
+ * first map entry to copy.
+ */
+ src_start = tmp_entry->vme_start;
+ }
+ /* set for later submap fix-up */
+ copy_addr = src_start;
+
+ /*
+ * Go through entries until we get to the end.
+ */
+
+ while (TRUE) {
+ vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
+ vm_map_size_t src_size; /* Size of source
+ * map entry (in both
+ * maps)
+ */
+
+ vm_object_t src_object; /* Object to copy */
+ vm_object_offset_t src_offset;
+
+ boolean_t src_needs_copy; /* Should source map
+ * be made read-only
+ * for copy-on-write?
+ */
+
+ boolean_t new_entry_needs_copy; /* Will new entry be COW? */
+
+ boolean_t was_wired; /* Was source wired? */
+ vm_map_version_t version; /* Version before locks
+ * dropped to make copy
+ */
+ kern_return_t result; /* Return value from
+ * copy_strategically.
+ */
+ while(tmp_entry->is_sub_map) {
+ vm_map_size_t submap_len;
+ submap_map_t *ptr;
+
+ ptr = (submap_map_t *)kalloc(sizeof(submap_map_t));
+ ptr->next = parent_maps;
+ parent_maps = ptr;
+ ptr->parent_map = src_map;
+ ptr->base_start = src_start;
+ ptr->base_end = src_end;
+ submap_len = tmp_entry->vme_end - src_start;
+ if(submap_len > (src_end-src_start))
+ submap_len = src_end-src_start;
+ ptr->base_len = submap_len;
+
+ src_start -= tmp_entry->vme_start;
+ src_start += VME_OFFSET(tmp_entry);
+ src_end = src_start + submap_len;
+ src_map = VME_SUBMAP(tmp_entry);
+ vm_map_lock(src_map);
+ /* keep an outstanding reference for all maps in */
+ /* the parents tree except the base map */
+ vm_map_reference(src_map);
+ vm_map_unlock(ptr->parent_map);
+ if (!vm_map_lookup_entry(
+ src_map, src_start, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ map_share = TRUE;
+ if(!tmp_entry->is_sub_map)
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+ src_entry = tmp_entry;
+ }
+ /* we are now in the lowest level submap... */
+
+ if ((VME_OBJECT(tmp_entry) != VM_OBJECT_NULL) &&
+ (VME_OBJECT(tmp_entry)->phys_contiguous)) {
+ /* This is not, supported for now.In future */
+ /* we will need to detect the phys_contig */
+ /* condition and then upgrade copy_slowly */
+ /* to do physical copy from the device mem */
+ /* based object. We can piggy-back off of */
+ /* the was wired boolean to set-up the */
+ /* proper handling */
+ RETURN(KERN_PROTECTION_FAILURE);
+ }
+ /*
+ * Create a new address map entry to hold the result.
+ * Fill in the fields from the appropriate source entries.
+ * We must unlock the source map to do this if we need
+ * to allocate a map entry.
+ */
+ if (new_entry == VM_MAP_ENTRY_NULL) {
+ version.main_timestamp = src_map->timestamp;
+ vm_map_unlock(src_map);
+
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
+
+ vm_map_lock(src_map);
+ if ((version.main_timestamp + 1) != src_map->timestamp) {
+ if (!vm_map_lookup_entry(src_map, src_start,
+ &tmp_entry)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+ if (!tmp_entry->is_sub_map)
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+ continue; /* restart w/ new tmp_entry */
+ }
+ }
+
+ /*
+ * Verify that the region can be read.
+ */
+ if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE &&
+ !use_maxprot) ||
+ (src_entry->max_protection & VM_PROT_READ) == 0)
+ RETURN(KERN_PROTECTION_FAILURE);
+
+ /*
+ * Clip against the endpoints of the entire region.
+ */
+
+ vm_map_clip_end(src_map, src_entry, src_end);
+
+ src_size = src_entry->vme_end - src_start;
+ src_object = VME_OBJECT(src_entry);
+ src_offset = VME_OFFSET(src_entry);
+ was_wired = (src_entry->wired_count != 0);
+
+ vm_map_entry_copy(new_entry, src_entry);
+ if (new_entry->is_sub_map) {
+ /* clr address space specifics */
+ new_entry->use_pmap = FALSE;
+ }
+
+ /*
+ * Attempt non-blocking copy-on-write optimizations.
+ */
+
+ if (src_destroy &&
+ (src_object == VM_OBJECT_NULL ||
+ (src_object->internal &&
+ src_object->copy_strategy != MEMORY_OBJECT_COPY_DELAY &&
+ !src_object->true_share &&
+ !map_share))) {
+ /*
+ * If we are destroying the source, and the object
+ * is internal, we can move the object reference
+ * from the source to the copy. The copy is
+ * copy-on-write only if the source is.
+ * We make another reference to the object, because
+ * destroying the source entry will deallocate it.
+ */
+ vm_object_reference(src_object);
+
+ /*
+ * Copy is always unwired. vm_map_copy_entry
+ * set its wired count to zero.
+ */
+
+ goto CopySuccessful;
+ }
+
+
+ RestartCopy:
+ XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
+ src_object, new_entry, VME_OBJECT(new_entry),
+ was_wired, 0);
+ if ((src_object == VM_OBJECT_NULL ||
+ (!was_wired && !map_share && !tmp_entry->is_shared)) &&
+ vm_object_copy_quickly(
+ &VME_OBJECT(new_entry),
+ src_offset,
+ src_size,
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !tmp_entry->needs_copy) {
+ vm_prot_t prot;
+
+ prot = src_entry->protection & ~VM_PROT_WRITE;