+ /*
+ *
+ * Make a second pass, overwriting the data
+ * At the beginning of each loop iteration,
+ * the next entry to be overwritten is "tmp_entry"
+ * (initially, the value returned from the lookup above),
+ * and the starting address expected in that entry
+ * is "start".
+ */
+
+ total_size = copy->size;
+ if(encountered_sub_map) {
+ copy_size = 0;
+ /* re-calculate tmp_entry since we've had the map */
+ /* unlocked */
+ if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ } else {
+ copy_size = copy->size;
+ }
+
+ base_addr = dst_addr;
+ while(TRUE) {
+ /* deconstruct the copy object and do in parts */
+ /* only in sub_map, interruptable case */
+ vm_map_entry_t copy_entry;
+ vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL;
+ vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL;
+ int nentries;
+ int remaining_entries = 0;
+ vm_map_offset_t new_offset = 0;
+
+ for (entry = tmp_entry; copy_size == 0;) {
+ vm_map_entry_t next;
+
+ next = entry->vme_next;
+
+ /* tmp_entry and base address are moved along */
+ /* each time we encounter a sub-map. Otherwise */
+ /* entry can outpase tmp_entry, and the copy_size */
+ /* may reflect the distance between them */
+ /* if the current entry is found to be in transition */
+ /* we will start over at the beginning or the last */
+ /* encounter of a submap as dictated by base_addr */
+ /* we will zero copy_size accordingly. */
+ if (entry->in_transition) {
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+
+ if(!vm_map_lookup_entry(dst_map, base_addr,
+ &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ copy_size = 0;
+ entry = tmp_entry;
+ continue;
+ }
+ if(entry->is_sub_map) {
+ vm_map_offset_t sub_start;
+ vm_map_offset_t sub_end;
+ vm_map_offset_t local_end;
+
+ if (entry->needs_copy) {
+ /* if this is a COW submap */
+ /* just back the range with a */
+ /* anonymous entry */
+ if(entry->vme_end < dst_end)
+ sub_end = entry->vme_end;
+ else
+ sub_end = dst_end;
+ if(entry->vme_start < base_addr)
+ sub_start = base_addr;
+ else
+ sub_start = entry->vme_start;
+ vm_map_clip_end(
+ dst_map, entry, sub_end);
+ vm_map_clip_start(
+ dst_map, entry, sub_start);
+ assert(!entry->use_pmap);
+ entry->is_sub_map = FALSE;
+ vm_map_deallocate(
+ entry->object.sub_map);
+ entry->object.sub_map = NULL;
+ entry->is_shared = FALSE;
+ entry->needs_copy = FALSE;
+ entry->offset = 0;
+ /*
+ * XXX FBDP
+ * We should propagate the protections
+ * of the submap entry here instead
+ * of forcing them to VM_PROT_ALL...
+ * Or better yet, we should inherit
+ * the protection of the copy_entry.
+ */
+ entry->protection = VM_PROT_ALL;
+ entry->max_protection = VM_PROT_ALL;
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+ if(entry->inheritance
+ == VM_INHERIT_SHARE)
+ entry->inheritance = VM_INHERIT_COPY;
+ continue;
+ }
+ /* first take care of any non-sub_map */
+ /* entries to send */
+ if(base_addr < entry->vme_start) {
+ /* stuff to send */
+ copy_size =
+ entry->vme_start - base_addr;
+ break;
+ }
+ sub_start = entry->offset;
+
+ if(entry->vme_end < dst_end)
+ sub_end = entry->vme_end;
+ else
+ sub_end = dst_end;
+ sub_end -= entry->vme_start;
+ sub_end += entry->offset;
+ local_end = entry->vme_end;
+ vm_map_unlock(dst_map);
+ copy_size = sub_end - sub_start;
+
+ /* adjust the copy object */
+ if (total_size > copy_size) {
+ vm_map_size_t local_size = 0;
+ vm_map_size_t entry_size;
+
+ nentries = 1;
+ new_offset = copy->offset;
+ copy_entry = vm_map_copy_first_entry(copy);
+ while(copy_entry !=
+ vm_map_copy_to_entry(copy)){
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ if((local_size < copy_size) &&
+ ((local_size + entry_size)
+ >= copy_size)) {
+ vm_map_copy_clip_end(copy,
+ copy_entry,
+ copy_entry->vme_start +
+ (copy_size - local_size));
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ local_size += entry_size;
+ new_offset += entry_size;
+ }
+ if(local_size >= copy_size) {
+ next_copy = copy_entry->vme_next;
+ copy_entry->vme_next =
+ vm_map_copy_to_entry(copy);
+ previous_prev =
+ copy->cpy_hdr.links.prev;
+ copy->cpy_hdr.links.prev = copy_entry;
+ copy->size = copy_size;
+ remaining_entries =
+ copy->cpy_hdr.nentries;
+ remaining_entries -= nentries;
+ copy->cpy_hdr.nentries = nentries;
+ break;
+ } else {
+ local_size += entry_size;
+ new_offset += entry_size;
+ nentries++;
+ }
+ copy_entry = copy_entry->vme_next;
+ }
+ }
+
+ if((entry->use_pmap) && (pmap == NULL)) {
+ kr = vm_map_copy_overwrite_nested(
+ entry->object.sub_map,
+ sub_start,
+ copy,
+ interruptible,
+ entry->object.sub_map->pmap);
+ } else if (pmap != NULL) {
+ kr = vm_map_copy_overwrite_nested(
+ entry->object.sub_map,
+ sub_start,
+ copy,
+ interruptible, pmap);
+ } else {
+ kr = vm_map_copy_overwrite_nested(
+ entry->object.sub_map,
+ sub_start,
+ copy,
+ interruptible,
+ dst_map->pmap);
+ }
+ if(kr != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev
+ = previous_prev;
+ copy->size = total_size;
+ }
+ return kr;
+ }
+ if (dst_end <= local_end) {
+ return(KERN_SUCCESS);
+ }
+ /* otherwise copy no longer exists, it was */
+ /* destroyed after successful copy_overwrite */
+ copy = (vm_map_copy_t)
+ zalloc(vm_map_copy_zone);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) =
+ vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->offset = new_offset;
+
+ total_size -= copy_size;
+ copy_size = 0;
+ /* put back remainder of copy in container */
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries = remaining_entries;
+ copy->cpy_hdr.links.next = next_copy;
+ copy->cpy_hdr.links.prev = previous_prev;
+ copy->size = total_size;
+ next_copy->vme_prev =
+ vm_map_copy_to_entry(copy);
+ next_copy = NULL;
+ }
+ base_addr = local_end;
+ vm_map_lock(dst_map);
+ if(!vm_map_lookup_entry(dst_map,
+ local_end, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ entry = tmp_entry;
+ continue;
+ }
+ if (dst_end <= entry->vme_end) {
+ copy_size = dst_end - base_addr;
+ break;
+ }
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+ entry = next;
+ }/* for */
+
+ next_copy = NULL;
+ nentries = 1;
+
+ /* adjust the copy object */
+ if (total_size > copy_size) {
+ vm_map_size_t local_size = 0;
+ vm_map_size_t entry_size;
+
+ new_offset = copy->offset;
+ copy_entry = vm_map_copy_first_entry(copy);
+ while(copy_entry != vm_map_copy_to_entry(copy)) {
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ if((local_size < copy_size) &&
+ ((local_size + entry_size)
+ >= copy_size)) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start +
+ (copy_size - local_size));
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ local_size += entry_size;
+ new_offset += entry_size;
+ }
+ if(local_size >= copy_size) {
+ next_copy = copy_entry->vme_next;
+ copy_entry->vme_next =
+ vm_map_copy_to_entry(copy);
+ previous_prev =
+ copy->cpy_hdr.links.prev;
+ copy->cpy_hdr.links.prev = copy_entry;
+ copy->size = copy_size;
+ remaining_entries =
+ copy->cpy_hdr.nentries;
+ remaining_entries -= nentries;
+ copy->cpy_hdr.nentries = nentries;
+ break;
+ } else {
+ local_size += entry_size;
+ new_offset += entry_size;
+ nentries++;
+ }
+ copy_entry = copy_entry->vme_next;
+ }
+ }
+
+ if (aligned) {
+ pmap_t local_pmap;
+
+ if(pmap)
+ local_pmap = pmap;
+ else
+ local_pmap = dst_map->pmap;
+
+ if ((kr = vm_map_copy_overwrite_aligned(
+ dst_map, tmp_entry, copy,
+ base_addr, local_pmap)) != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev =
+ previous_prev;
+ copy->size += copy_size;
+ }
+ return kr;
+ }
+ vm_map_unlock(dst_map);
+ } else {
+ /*
+ * Performance gain:
+ *
+ * if the copy and dst address are misaligned but the same
+ * offset within the page we can copy_not_aligned the
+ * misaligned parts and copy aligned the rest. If they are
+ * aligned but len is unaligned we simply need to copy
+ * the end bit unaligned. We'll need to split the misaligned
+ * bits of the region in this case !
+ */
+ /* ALWAYS UNLOCKS THE dst_map MAP */
+ if ((kr = vm_map_copy_overwrite_unaligned( dst_map,
+ tmp_entry, copy, base_addr)) != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev =
+ previous_prev;
+ copy->size += copy_size;
+ }
+ return kr;
+ }
+ }
+ total_size -= copy_size;
+ if(total_size == 0)
+ break;
+ base_addr += copy_size;
+ copy_size = 0;
+ copy->offset = new_offset;
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries = remaining_entries;
+ copy->cpy_hdr.links.next = next_copy;
+ copy->cpy_hdr.links.prev = previous_prev;
+ next_copy->vme_prev = vm_map_copy_to_entry(copy);
+ copy->size = total_size;
+ }
+ vm_map_lock(dst_map);
+ while(TRUE) {
+ if (!vm_map_lookup_entry(dst_map,
+ base_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ if (tmp_entry->in_transition) {
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+ } else {
+ break;
+ }
+ }
+ vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(base_addr));
+
+ entry = tmp_entry;
+ } /* while */
+
+ /*
+ * Throw away the vm_map_copy object
+ */
+ vm_map_copy_discard(copy);
+
+ return(KERN_SUCCESS);
+}/* vm_map_copy_overwrite */
+
+kern_return_t
+vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_map_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
+{
+ return vm_map_copy_overwrite_nested(
+ dst_map, dst_addr, copy, interruptible, (pmap_t) NULL);
+}
+
+
+/*
+ * Routine: vm_map_copy_overwrite_unaligned [internal use only]
+ *
+ * Decription:
+ * Physically copy unaligned data
+ *
+ * Implementation:
+ * Unaligned parts of pages have to be physically copied. We use
+ * a modified form of vm_fault_copy (which understands none-aligned
+ * page offsets and sizes) to do the copy. We attempt to copy as
+ * much memory in one go as possibly, however vm_fault_copy copies
+ * within 1 memory object so we have to find the smaller of "amount left"
+ * "source object data size" and "target object data size". With
+ * unaligned data we don't need to split regions, therefore the source
+ * (copy) object should be one map entry, the target range may be split
+ * over multiple map entries however. In any event we are pessimistic
+ * about these assumptions.
+ *
+ * Assumptions:
+ * dst_map is locked on entry and is return locked on success,
+ * unlocked on error.
+ */
+
+static kern_return_t
+vm_map_copy_overwrite_unaligned(
+ vm_map_t dst_map,
+ vm_map_entry_t entry,
+ vm_map_copy_t copy,
+ vm_map_offset_t start)
+{
+ vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
+ vm_map_version_t version;
+ vm_object_t dst_object;
+ vm_object_offset_t dst_offset;
+ vm_object_offset_t src_offset;
+ vm_object_offset_t entry_offset;
+ vm_map_offset_t entry_end;
+ vm_map_size_t src_size,
+ dst_size,
+ copy_size,
+ amount_left;
+ kern_return_t kr = KERN_SUCCESS;
+
+ vm_map_lock_write_to_read(dst_map);
+
+ src_offset = copy->offset - vm_object_trunc_page(copy->offset);
+ amount_left = copy->size;
+/*
+ * unaligned so we never clipped this entry, we need the offset into
+ * the vm_object not just the data.
+ */
+ while (amount_left > 0) {
+
+ if (entry == vm_map_to_entry(dst_map)) {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /* "start" must be within the current map entry */
+ assert ((start>=entry->vme_start) && (start<entry->vme_end));
+
+ dst_offset = start - entry->vme_start;
+
+ dst_size = entry->vme_end - start;
+
+ src_size = copy_entry->vme_end -
+ (copy_entry->vme_start + src_offset);
+
+ if (dst_size < src_size) {
+/*
+ * we can only copy dst_size bytes before
+ * we have to get the next destination entry
+ */
+ copy_size = dst_size;
+ } else {
+/*
+ * we can only copy src_size bytes before
+ * we have to get the next source copy entry
+ */
+ copy_size = src_size;
+ }
+
+ if (copy_size > amount_left) {
+ copy_size = amount_left;
+ }
+/*
+ * Entry needs copy, create a shadow shadow object for
+ * Copy on write region.
+ */
+ if (entry->needs_copy &&
+ ((entry->protection & VM_PROT_WRITE) != 0))
+ {
+ if (vm_map_lock_read_to_write(dst_map)) {
+ vm_map_lock_read(dst_map);
+ goto RetryLookup;
+ }
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ (vm_map_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->needs_copy = FALSE;
+ vm_map_lock_write_to_read(dst_map);
+ }
+ dst_object = entry->object.vm_object;
+/*
+ * unlike with the virtual (aligned) copy we're going
+ * to fault on it therefore we need a target object.
+ */
+ if (dst_object == VM_OBJECT_NULL) {
+ if (vm_map_lock_read_to_write(dst_map)) {
+ vm_map_lock_read(dst_map);
+ goto RetryLookup;
+ }
+ dst_object = vm_object_allocate((vm_map_size_t)
+ entry->vme_end - entry->vme_start);
+ entry->object.vm_object = dst_object;
+ entry->offset = 0;
+ vm_map_lock_write_to_read(dst_map);
+ }
+/*
+ * Take an object reference and unlock map. The "entry" may
+ * disappear or change when the map is unlocked.
+ */
+ vm_object_reference(dst_object);
+ version.main_timestamp = dst_map->timestamp;
+ entry_offset = entry->offset;
+ entry_end = entry->vme_end;
+ vm_map_unlock_read(dst_map);
+/*
+ * Copy as much as possible in one pass
+ */
+ kr = vm_fault_copy(
+ copy_entry->object.vm_object,
+ copy_entry->offset + src_offset,
+ ©_size,
+ dst_object,
+ entry_offset + dst_offset,
+ dst_map,
+ &version,
+ THREAD_UNINT );
+
+ start += copy_size;
+ src_offset += copy_size;
+ amount_left -= copy_size;
+/*
+ * Release the object reference
+ */
+ vm_object_deallocate(dst_object);
+/*
+ * If a hard error occurred, return it now
+ */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end
+ || amount_left == 0)
+ {
+/*
+ * all done with this copy entry, dispose.
+ */
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_object_deallocate(copy_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ if ((copy_entry = vm_map_copy_first_entry(copy))
+ == vm_map_copy_to_entry(copy) && amount_left) {
+/*
+ * not finished copying but run out of source
+ */
+ return KERN_INVALID_ADDRESS;
+ }
+ src_offset = 0;
+ }
+
+ if (amount_left == 0)
+ return KERN_SUCCESS;
+
+ vm_map_lock_read(dst_map);
+ if (version.main_timestamp == dst_map->timestamp) {
+ if (start == entry_end) {
+/*
+ * destination region is split. Use the version
+ * information to avoid a lookup in the normal
+ * case.
+ */
+ entry = entry->vme_next;
+/*
+ * should be contiguous. Fail if we encounter
+ * a hole in the destination.
+ */
+ if (start != entry->vme_start) {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS ;
+ }
+ }
+ } else {
+/*
+ * Map version check failed.
+ * we must lookup the entry because somebody
+ * might have changed the map behind our backs.
+ */
+ RetryLookup:
+ if (!vm_map_lookup_entry(dst_map, start, &entry))
+ {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS ;
+ }
+ }
+ }/* while */
+
+ return KERN_SUCCESS;
+}/* vm_map_copy_overwrite_unaligned */
+
+/*
+ * Routine: vm_map_copy_overwrite_aligned [internal use only]
+ *
+ * Description:
+ * Does all the vm_trickery possible for whole pages.
+ *
+ * Implementation:
+ *
+ * If there are no permanent objects in the destination,
+ * and the source and destination map entry zones match,
+ * and the destination map entry is not shared,
+ * then the map entries can be deleted and replaced
+ * with those from the copy. The following code is the
+ * basic idea of what to do, but there are lots of annoying
+ * little details about getting protection and inheritance
+ * right. Should add protection, inheritance, and sharing checks
+ * to the above pass and make sure that no wiring is involved.
+ */
+
+static kern_return_t
+vm_map_copy_overwrite_aligned(
+ vm_map_t dst_map,
+ vm_map_entry_t tmp_entry,
+ vm_map_copy_t copy,
+ vm_map_offset_t start,
+ __unused pmap_t pmap)
+{
+ vm_object_t object;
+ vm_map_entry_t copy_entry;
+ vm_map_size_t copy_size;
+ vm_map_size_t size;
+ vm_map_entry_t entry;
+
+ while ((copy_entry = vm_map_copy_first_entry(copy))
+ != vm_map_copy_to_entry(copy))
+ {
+ copy_size = (copy_entry->vme_end - copy_entry->vme_start);
+
+ entry = tmp_entry;
+ assert(!entry->use_pmap); /* unnested when clipped earlier */
+ if (entry == vm_map_to_entry(dst_map)) {
+ vm_map_unlock(dst_map);
+ return KERN_INVALID_ADDRESS;
+ }
+ size = (entry->vme_end - entry->vme_start);
+ /*
+ * Make sure that no holes popped up in the
+ * address map, and that the protection is
+ * still valid, in case the map was unlocked
+ * earlier.
+ */
+
+ if ((entry->vme_start != start) || ((entry->is_sub_map)
+ && !entry->needs_copy)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ assert(entry != vm_map_to_entry(dst_map));
+
+ /*
+ * Check protection again
+ */
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * Adjust to source size first
+ */
+
+ if (copy_size < size) {
+ vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size);
+ size = copy_size;
+ }
+
+ /*
+ * Adjust to destination size
+ */
+
+ if (size < copy_size) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + size);
+ copy_size = size;
+ }
+
+ assert((entry->vme_end - entry->vme_start) == size);
+ assert((tmp_entry->vme_end - tmp_entry->vme_start) == size);
+ assert((copy_entry->vme_end - copy_entry->vme_start) == size);
+
+ /*
+ * If the destination contains temporary unshared memory,
+ * we can perform the copy by throwing it away and
+ * installing the source data.
+ */
+
+ object = entry->object.vm_object;
+ if ((!entry->is_shared &&
+ ((object == VM_OBJECT_NULL) ||
+ (object->internal && !object->true_share))) ||
+ entry->needs_copy) {
+ vm_object_t old_object = entry->object.vm_object;
+ vm_object_offset_t old_offset = entry->offset;
+ vm_object_offset_t offset;
+
+ /*
+ * Ensure that the source and destination aren't
+ * identical
+ */
+ if (old_object == copy_entry->object.vm_object &&
+ old_offset == copy_entry->offset) {
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ if (old_object != VM_OBJECT_NULL)
+ vm_object_deallocate(old_object);
+
+ start = tmp_entry->vme_end;
+ tmp_entry = tmp_entry->vme_next;
+ continue;
+ }
+
+ if (old_object != VM_OBJECT_NULL) {
+ if(entry->is_sub_map) {
+ if(entry->use_pmap) {
+#ifndef NO_NESTED_PMAP
+ pmap_unnest(dst_map->pmap,
+ (addr64_t)entry->vme_start,
+ entry->vme_end - entry->vme_start);
+#endif /* NO_NESTED_PMAP */
+ if(dst_map->mapped) {
+ /* clean up parent */
+ /* map/maps */
+ vm_map_submap_pmap_clean(
+ dst_map, entry->vme_start,
+ entry->vme_end,
+ entry->object.sub_map,
+ entry->offset);
+ }
+ } else {
+ vm_map_submap_pmap_clean(
+ dst_map, entry->vme_start,
+ entry->vme_end,
+ entry->object.sub_map,
+ entry->offset);
+ }
+ vm_map_deallocate(
+ entry->object.sub_map);
+ } else {
+ if(dst_map->mapped) {
+ vm_object_pmap_protect(
+ entry->object.vm_object,
+ entry->offset,
+ entry->vme_end
+ - entry->vme_start,
+ PMAP_NULL,
+ entry->vme_start,
+ VM_PROT_NONE);
+ } else {
+ pmap_remove(dst_map->pmap,
+ (addr64_t)(entry->vme_start),
+ (addr64_t)(entry->vme_end));
+ }
+ vm_object_deallocate(old_object);
+ }
+ }
+
+ entry->is_sub_map = FALSE;
+ entry->object = copy_entry->object;
+ object = entry->object.vm_object;
+ entry->needs_copy = copy_entry->needs_copy;
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+ offset = entry->offset = copy_entry->offset;
+
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ /*
+ * we could try to push pages into the pmap at this point, BUT
+ * this optimization only saved on average 2 us per page if ALL
+ * the pages in the source were currently mapped
+ * and ALL the pages in the dest were touched, if there were fewer
+ * than 2/3 of the pages touched, this optimization actually cost more cycles
+ * it also puts a lot of pressure on the pmap layer w/r to mapping structures
+ */
+
+ /*
+ * Set up for the next iteration. The map
+ * has not been unlocked, so the next
+ * address should be at the end of this
+ * entry, and the next map entry should be
+ * the one following it.
+ */
+
+ start = tmp_entry->vme_end;
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ vm_map_version_t version;
+ vm_object_t dst_object = entry->object.vm_object;
+ vm_object_offset_t dst_offset = entry->offset;
+ kern_return_t r;
+
+ /*
+ * Take an object reference, and record
+ * the map version information so that the
+ * map can be safely unlocked.
+ */
+
+ vm_object_reference(dst_object);
+
+ /* account for unlock bumping up timestamp */
+ version.main_timestamp = dst_map->timestamp + 1;
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * Copy as much as possible in one pass
+ */
+
+ copy_size = size;
+ r = vm_fault_copy(
+ copy_entry->object.vm_object,
+ copy_entry->offset,
+ ©_size,
+ dst_object,
+ dst_offset,
+ dst_map,
+ &version,
+ THREAD_UNINT );
+
+ /*
+ * Release the object reference
+ */
+
+ vm_object_deallocate(dst_object);
+
+ /*
+ * If a hard error occurred, return it now
+ */
+
+ if (r != KERN_SUCCESS)
+ return(r);
+
+ if (copy_size != 0) {
+ /*
+ * Dispose of the copied region
+ */
+
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start + copy_size);
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_object_deallocate(copy_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+ }
+
+ /*
+ * Pick up in the destination map where we left off.
+ *
+ * Use the version information to avoid a lookup
+ * in the normal case.
+ */
+
+ start += copy_size;
+ vm_map_lock(dst_map);
+ if (version.main_timestamp == dst_map->timestamp) {
+ /* We can safely use saved tmp_entry value */
+
+ vm_map_clip_end(dst_map, tmp_entry, start);
+ tmp_entry = tmp_entry->vme_next;
+ } else {
+ /* Must do lookup of tmp_entry */
+
+ if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ vm_map_clip_start(dst_map, tmp_entry, start);
+ }
+ }
+ }/* while */
+
+ return(KERN_SUCCESS);
+}/* vm_map_copy_overwrite_aligned */
+
+/*
+ * Routine: vm_map_copyin_kernel_buffer [internal use only]
+ *
+ * Description:
+ * Copy in data to a kernel buffer from space in the
+ * source map. The original space may be optionally
+ * deallocated.
+ *
+ * If successful, returns a new copy object.
+ */
+static kern_return_t
+vm_map_copyin_kernel_buffer(
+ vm_map_t src_map,
+ vm_map_offset_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result)
+{
+ kern_return_t kr;
+ vm_map_copy_t copy;
+ vm_size_t kalloc_size;
+
+ if ((vm_size_t) len != len) {
+ /* "len" is too big and doesn't fit in a "vm_size_t" */
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ kalloc_size = (vm_size_t) (sizeof(struct vm_map_copy) + len);
+ assert((vm_map_size_t) kalloc_size == sizeof (struct vm_map_copy) + len);
+
+ copy = (vm_map_copy_t) kalloc(kalloc_size);
+ if (copy == VM_MAP_COPY_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ copy->type = VM_MAP_COPY_KERNEL_BUFFER;
+ copy->size = len;
+ copy->offset = 0;
+ copy->cpy_kdata = (void *) (copy + 1);
+ copy->cpy_kalloc_size = kalloc_size;
+
+ kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t) len);
+ if (kr != KERN_SUCCESS) {
+ kfree(copy, kalloc_size);
+ return kr;
+ }
+ if (src_destroy) {
+ (void) vm_map_remove(src_map, vm_map_trunc_page(src_addr),
+ vm_map_round_page(src_addr + len),
+ VM_MAP_REMOVE_INTERRUPTIBLE |
+ VM_MAP_REMOVE_WAIT_FOR_KWIRE |
+ (src_map == kernel_map) ?
+ VM_MAP_REMOVE_KUNWIRE : 0);
+ }
+ *copy_result = copy;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: vm_map_copyout_kernel_buffer [internal use only]
+ *
+ * Description:
+ * Copy out data from a kernel buffer into space in the
+ * destination map. The space may be otpionally dynamically
+ * allocated.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+static int vm_map_copyout_kernel_buffer_failures = 0;
+static kern_return_t
+vm_map_copyout_kernel_buffer(
+ vm_map_t map,
+ vm_map_address_t *addr, /* IN/OUT */
+ vm_map_copy_t copy,
+ boolean_t overwrite)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ thread_t thread = current_thread();
+
+ if (!overwrite) {
+
+ /*
+ * Allocate space in the target map for the data
+ */
+ *addr = 0;
+ kr = vm_map_enter(map,
+ addr,
+ vm_map_round_page(copy->size),
+ (vm_map_offset_t) 0,
+ VM_FLAGS_ANYWHERE,
+ VM_OBJECT_NULL,
+ (vm_object_offset_t) 0,
+ FALSE,
+ VM_PROT_DEFAULT,
+ VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ }
+
+ /*
+ * Copyout the data from the kernel buffer to the target map.
+ */
+ if (thread->map == map) {
+
+ /*
+ * If the target map is the current map, just do
+ * the copy.
+ */
+ assert((vm_size_t) copy->size == copy->size);
+ if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) {
+ kr = KERN_INVALID_ADDRESS;
+ }
+ }
+ else {
+ vm_map_t oldmap;
+
+ /*
+ * If the target map is another map, assume the
+ * target's address space identity for the duration
+ * of the copy.
+ */
+ vm_map_reference(map);
+ oldmap = vm_map_switch(map);
+
+ assert((vm_size_t) copy->size == copy->size);
+ if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) {
+ vm_map_copyout_kernel_buffer_failures++;
+ kr = KERN_INVALID_ADDRESS;
+ }
+
+ (void) vm_map_switch(oldmap);
+ vm_map_deallocate(map);
+ }
+
+ if (kr != KERN_SUCCESS) {
+ /* the copy failed, clean up */
+ if (!overwrite) {
+ /*
+ * Deallocate the space we allocated in the target map.
+ */
+ (void) vm_map_remove(map,
+ vm_map_trunc_page(*addr),
+ vm_map_round_page(*addr +
+ vm_map_round_page(copy->size)),
+ VM_MAP_NO_FLAGS);
+ *addr = 0;
+ }
+ } else {
+ /* copy was successful, dicard the copy structure */
+ kfree(copy, copy->cpy_kalloc_size);
+ }
+
+ return kr;
+}
+
+/*
+ * Macro: vm_map_copy_insert
+ *
+ * Description:
+ * Link a copy chain ("copy") into a map at the
+ * specified location (after "where").
+ * Side effects:
+ * The copy chain is destroyed.
+ * Warning:
+ * The arguments are evaluated multiple times.
+ */
+#define vm_map_copy_insert(map, where, copy) \
+MACRO_BEGIN \
+ vm_map_t VMCI_map; \
+ vm_map_entry_t VMCI_where; \
+ vm_map_copy_t VMCI_copy; \
+ VMCI_map = (map); \
+ VMCI_where = (where); \
+ VMCI_copy = (copy); \
+ ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
+ ->vme_next = (VMCI_where->vme_next); \
+ ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
+ ->vme_prev = VMCI_where; \
+ VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
+ UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \
+ zfree(vm_map_copy_zone, VMCI_copy); \
+MACRO_END
+
+/*
+ * Routine: vm_map_copyout
+ *
+ * Description:
+ * Copy out a copy chain ("copy") into newly-allocated
+ * space in the destination map.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ */
+kern_return_t
+vm_map_copyout(
+ vm_map_t dst_map,
+ vm_map_address_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
+{
+ vm_map_size_t size;
+ vm_map_size_t adjustment;
+ vm_map_offset_t start;
+ vm_object_offset_t vm_copy_start;
+ vm_map_entry_t last;
+ register
+ vm_map_entry_t entry;
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL) {
+ *dst_addr = 0;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check for special copy object, created
+ * by vm_map_copyin_object.
+ */
+
+ if (copy->type == VM_MAP_COPY_OBJECT) {
+ vm_object_t object = copy->cpy_object;
+ kern_return_t kr;
+ vm_object_offset_t offset;
+
+ offset = vm_object_trunc_page(copy->offset);
+ size = vm_map_round_page(copy->size +
+ (vm_map_size_t)(copy->offset - offset));
+ *dst_addr = 0;
+ kr = vm_map_enter(dst_map, dst_addr, size,
+ (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE,
+ object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ if (kr != KERN_SUCCESS)
+ return(kr);
+ /* Account for non-pagealigned copy object */
+ *dst_addr += (vm_map_offset_t)(copy->offset - offset);
+ zfree(vm_map_copy_zone, copy);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check for special kernel buffer allocated
+ * by new_ipc_kmsg_copyin.
+ */
+
+ if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
+ return(vm_map_copyout_kernel_buffer(dst_map, dst_addr,
+ copy, FALSE));
+ }
+
+ /*
+ * Find space for the data
+ */
+
+ vm_copy_start = vm_object_trunc_page(copy->offset);
+ size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size)
+ - vm_copy_start;
+
+StartAgain: ;
+
+ vm_map_lock(dst_map);
+ assert(first_free_is_valid(dst_map));
+ start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
+ vm_map_min(dst_map) : last->vme_end;
+
+ while (TRUE) {
+ vm_map_entry_t next = last->vme_next;
+ vm_map_offset_t end = start + size;
+
+ if ((end > dst_map->max_offset) || (end < start)) {
+ if (dst_map->wait_for_space) {
+ if (size <= (dst_map->max_offset - dst_map->min_offset)) {
+ assert_wait((event_t) dst_map,
+ THREAD_INTERRUPTIBLE);
+ vm_map_unlock(dst_map);
+ thread_block(THREAD_CONTINUE_NULL);
+ goto StartAgain;
+ }
+ }
+ vm_map_unlock(dst_map);
+ return(KERN_NO_SPACE);
+ }
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start >= end))
+ break;
+
+ last = next;
+ start = last->vme_end;
+ }
+
+ /*
+ * Since we're going to just drop the map
+ * entries from the copy into the destination
+ * map, they must come from the same pool.
+ */
+
+ if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
+ /*
+ * Mismatches occur when dealing with the default
+ * pager.
+ */
+ zone_t old_zone;
+ vm_map_entry_t next, new;
+
+ /*
+ * Find the zone that the copies were allocated from
+ */
+ old_zone = (copy->cpy_hdr.entries_pageable)
+ ? vm_map_entry_zone
+ : vm_map_kentry_zone;
+ entry = vm_map_copy_first_entry(copy);
+
+ /*
+ * Reinitialize the copy so that vm_map_copy_entry_link
+ * will work.
+ */
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) =
+ vm_map_copy_to_entry(copy);
+
+ /*
+ * Copy each entry.
+ */
+ while (entry != vm_map_copy_to_entry(copy)) {
+ new = vm_map_copy_entry_create(copy);
+ vm_map_entry_copy_full(new, entry);
+ new->use_pmap = FALSE; /* clr address space specifics */
+ vm_map_copy_entry_link(copy,
+ vm_map_copy_last_entry(copy),
+ new);
+ next = entry->vme_next;
+ zfree(old_zone, entry);
+ entry = next;
+ }
+ }
+
+ /*
+ * Adjust the addresses in the copy chain, and
+ * reset the region attributes.
+ */
+
+ adjustment = start - vm_copy_start;
+ for (entry = vm_map_copy_first_entry(copy);
+ entry != vm_map_copy_to_entry(copy);
+ entry = entry->vme_next) {
+ entry->vme_start += adjustment;
+ entry->vme_end += adjustment;
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->behavior = VM_BEHAVIOR_DEFAULT;
+
+ /*
+ * If the entry is now wired,
+ * map the pages into the destination map.
+ */
+ if (entry->wired_count != 0) {
+ register vm_map_offset_t va;
+ vm_object_offset_t offset;
+ register vm_object_t object;
+ vm_prot_t prot;
+ int type_of_fault;
+
+ object = entry->object.vm_object;
+ offset = entry->offset;
+ va = entry->vme_start;
+
+ pmap_pageable(dst_map->pmap,
+ entry->vme_start,
+ entry->vme_end,
+ TRUE);
+
+ while (va < entry->vme_end) {
+ register vm_page_t m;
+
+ /*
+ * Look up the page in the object.
+ * Assert that the page will be found in the
+ * top object:
+ * either
+ * the object was newly created by
+ * vm_object_copy_slowly, and has
+ * copies of all of the pages from
+ * the source object
+ * or
+ * the object was moved from the old
+ * map entry; because the old map
+ * entry was wired, all of the pages
+ * were in the top-level object.
+ * (XXX not true if we wire pages for
+ * reading)
+ */
+ vm_object_lock(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) ||
+ m->absent)
+ panic("vm_map_copyout: wiring %p", m);
+
+ /*
+ * ENCRYPTED SWAP:
+ * The page is assumed to be wired here, so it
+ * shouldn't be encrypted. Otherwise, we
+ * couldn't enter it in the page table, since
+ * we don't want the user to see the encrypted
+ * data.
+ */
+ ASSERT_PAGE_DECRYPTED(m);
+
+ prot = entry->protection;
+
+ if (override_nx(dst_map, entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+
+ type_of_fault = DBG_CACHE_HIT_FAULT;
+
+ vm_fault_enter(m, dst_map->pmap, va, prot,
+ VM_PAGE_WIRED(m), FALSE, FALSE,
+ &type_of_fault);
+
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE_64;
+ va += PAGE_SIZE;
+ }
+ }
+ }
+
+ /*
+ * Correct the page alignment for the result
+ */
+
+ *dst_addr = start + (copy->offset - vm_copy_start);
+
+ /*
+ * Update the hints and the map size
+ */
+
+ SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
+
+ dst_map->size += size;
+
+ /*
+ * Link in the copy
+ */
+
+ vm_map_copy_insert(dst_map, last, copy);
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * XXX If wiring_required, call vm_map_pageable
+ */