+ last_timestamp = map->timestamp;
+ }
+
+ if (map->wait_for_space)
+ thread_wakeup((event_t) map);
+ /*
+ * wake up anybody waiting on entries that we have already deleted.
+ */
+ if (need_wakeup)
+ vm_map_entry_wakeup(map);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_map_remove:
+ *
+ * Remove the given address range from the target map.
+ * This is the exported form of vm_map_delete.
+ */
+kern_return_t
+vm_map_remove(
+ register vm_map_t map,
+ register vm_map_offset_t start,
+ register vm_map_offset_t end,
+ register boolean_t flags)
+{
+ register kern_return_t result;
+
+ vm_map_lock(map);
+ VM_MAP_RANGE_CHECK(map, start, end);
+ result = vm_map_delete(map, start, end, flags, VM_MAP_NULL);
+ vm_map_unlock(map);
+
+ return(result);
+}
+
+
+/*
+ * Routine: vm_map_copy_discard
+ *
+ * Description:
+ * Dispose of a map copy object (returned by
+ * vm_map_copyin).
+ */
+void
+vm_map_copy_discard(
+ vm_map_copy_t copy)
+{
+ if (copy == VM_MAP_COPY_NULL)
+ return;
+
+ switch (copy->type) {
+ case VM_MAP_COPY_ENTRY_LIST:
+ while (vm_map_copy_first_entry(copy) !=
+ vm_map_copy_to_entry(copy)) {
+ vm_map_entry_t entry = vm_map_copy_first_entry(copy);
+
+ vm_map_copy_entry_unlink(copy, entry);
+ vm_object_deallocate(entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, entry);
+ }
+ break;
+ case VM_MAP_COPY_OBJECT:
+ vm_object_deallocate(copy->cpy_object);
+ break;
+ case VM_MAP_COPY_KERNEL_BUFFER:
+
+ /*
+ * The vm_map_copy_t and possibly the data buffer were
+ * allocated by a single call to kalloc(), i.e. the
+ * vm_map_copy_t was not allocated out of the zone.
+ */
+ kfree(copy, copy->cpy_kalloc_size);
+ return;
+ }
+ zfree(vm_map_copy_zone, copy);
+}
+
+/*
+ * Routine: vm_map_copy_copy
+ *
+ * Description:
+ * Move the information in a map copy object to
+ * a new map copy object, leaving the old one
+ * empty.
+ *
+ * This is used by kernel routines that need
+ * to look at out-of-line data (in copyin form)
+ * before deciding whether to return SUCCESS.
+ * If the routine returns FAILURE, the original
+ * copy object will be deallocated; therefore,
+ * these routines must make a copy of the copy
+ * object and leave the original empty so that
+ * deallocation will not fail.
+ */
+vm_map_copy_t
+vm_map_copy_copy(
+ vm_map_copy_t copy)
+{
+ vm_map_copy_t new_copy;
+
+ if (copy == VM_MAP_COPY_NULL)
+ return VM_MAP_COPY_NULL;
+
+ /*
+ * Allocate a new copy object, and copy the information
+ * from the old one into it.
+ */
+
+ new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ *new_copy = *copy;
+
+ if (copy->type == VM_MAP_COPY_ENTRY_LIST) {
+ /*
+ * The links in the entry chain must be
+ * changed to point to the new copy object.
+ */
+ vm_map_copy_first_entry(copy)->vme_prev
+ = vm_map_copy_to_entry(new_copy);
+ vm_map_copy_last_entry(copy)->vme_next
+ = vm_map_copy_to_entry(new_copy);
+ }
+
+ /*
+ * Change the old copy object into one that contains
+ * nothing to be deallocated.
+ */
+ copy->type = VM_MAP_COPY_OBJECT;
+ copy->cpy_object = VM_OBJECT_NULL;
+
+ /*
+ * Return the new object.
+ */
+ return new_copy;
+}
+
+static kern_return_t
+vm_map_overwrite_submap_recurse(
+ vm_map_t dst_map,
+ vm_map_offset_t dst_addr,
+ vm_map_size_t dst_size)
+{
+ vm_map_offset_t dst_end;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+ kern_return_t result;
+ boolean_t encountered_sub_map = FALSE;
+
+
+
+ /*
+ * Verify that the destination is all writeable
+ * initially. We have to trunc the destination
+ * address and round the copy size or we'll end up
+ * splitting entries in strange ways.
+ */
+
+ dst_end = vm_map_round_page(dst_addr + dst_size);
+ vm_map_lock(dst_map);
+
+start_pass_1:
+ if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+ vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr));
+ assert(!tmp_entry->use_pmap); /* clipping did unnest if needed */
+
+ for (entry = tmp_entry;;) {
+ vm_map_entry_t next;
+
+ next = entry->vme_next;
+ while(entry->is_sub_map) {
+ vm_map_offset_t sub_start;
+ vm_map_offset_t sub_end;
+ vm_map_offset_t local_end;
+
+ if (entry->in_transition) {
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+
+ goto start_pass_1;
+ }
+
+ encountered_sub_map = TRUE;
+ sub_start = entry->offset;
+
+ if(entry->vme_end < dst_end)
+ sub_end = entry->vme_end;
+ else
+ sub_end = dst_end;
+ sub_end -= entry->vme_start;
+ sub_end += entry->offset;
+ local_end = entry->vme_end;
+ vm_map_unlock(dst_map);
+
+ result = vm_map_overwrite_submap_recurse(
+ entry->object.sub_map,
+ sub_start,
+ sub_end - sub_start);
+
+ if(result != KERN_SUCCESS)
+ return result;
+ if (dst_end <= entry->vme_end)
+ return KERN_SUCCESS;
+ vm_map_lock(dst_map);
+ if(!vm_map_lookup_entry(dst_map, local_end,
+ &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ entry = tmp_entry;
+ next = entry->vme_next;
+ }
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * If the entry is in transition, we must wait
+ * for it to exit that state. Anything could happen
+ * when we unlock the map, so start over.
+ */
+ if (entry->in_transition) {
+
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+
+ goto start_pass_1;
+ }
+
+/*
+ * our range is contained completely within this map entry
+ */
+ if (dst_end <= entry->vme_end) {
+ vm_map_unlock(dst_map);
+ return KERN_SUCCESS;
+ }
+/*
+ * check that range specified is contiguous region
+ */
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+ /*
+ * Check for permanent objects in the destination.
+ */
+ if ((entry->object.vm_object != VM_OBJECT_NULL) &&
+ ((!entry->object.vm_object->internal) ||
+ (entry->object.vm_object->true_share))) {
+ if(encountered_sub_map) {
+ vm_map_unlock(dst_map);
+ return(KERN_FAILURE);
+ }
+ }
+
+
+ entry = next;
+ }/* for */
+ vm_map_unlock(dst_map);
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_copy_overwrite
+ *
+ * Description:
+ * Copy the memory described by the map copy
+ * object (copy; returned by vm_map_copyin) onto
+ * the specified destination region (dst_map, dst_addr).
+ * The destination must be writeable.
+ *
+ * Unlike vm_map_copyout, this routine actually
+ * writes over previously-mapped memory. If the
+ * previous mapping was to a permanent (user-supplied)
+ * memory object, it is preserved.
+ *
+ * The attributes (protection and inheritance) of the
+ * destination region are preserved.
+ *
+ * If successful, consumes the copy object.
+ * Otherwise, the caller is responsible for it.
+ *
+ * Implementation notes:
+ * To overwrite aligned temporary virtual memory, it is
+ * sufficient to remove the previous mapping and insert
+ * the new copy. This replacement is done either on
+ * the whole region (if no permanent virtual memory
+ * objects are embedded in the destination region) or
+ * in individual map entries.
+ *
+ * To overwrite permanent virtual memory , it is necessary
+ * to copy each page, as the external memory management
+ * interface currently does not provide any optimizations.
+ *
+ * Unaligned memory also has to be copied. It is possible
+ * to use 'vm_trickery' to copy the aligned data. This is
+ * not done but not hard to implement.
+ *
+ * Once a page of permanent memory has been overwritten,
+ * it is impossible to interrupt this function; otherwise,
+ * the call would be neither atomic nor location-independent.
+ * The kernel-state portion of a user thread must be
+ * interruptible.
+ *
+ * It may be expensive to forward all requests that might
+ * overwrite permanent memory (vm_write, vm_copy) to
+ * uninterruptible kernel threads. This routine may be
+ * called by interruptible threads; however, success is
+ * not guaranteed -- if the request cannot be performed
+ * atomically and interruptibly, an error indication is
+ * returned.
+ */
+
+static kern_return_t
+vm_map_copy_overwrite_nested(
+ vm_map_t dst_map,
+ vm_map_address_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible,
+ pmap_t pmap)
+{
+ vm_map_offset_t dst_end;
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+ kern_return_t kr;
+ boolean_t aligned = TRUE;
+ boolean_t contains_permanent_objects = FALSE;
+ boolean_t encountered_sub_map = FALSE;
+ vm_map_offset_t base_addr;
+ vm_map_size_t copy_size;
+ vm_map_size_t total_size;
+
+
+ /*
+ * Check for null copy object.
+ */
+
+ if (copy == VM_MAP_COPY_NULL)
+ return(KERN_SUCCESS);
+
+ /*
+ * Check for special kernel buffer allocated
+ * by new_ipc_kmsg_copyin.
+ */
+
+ if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) {
+ return(vm_map_copyout_kernel_buffer(
+ dst_map, &dst_addr,
+ copy, TRUE));
+ }
+
+ /*
+ * Only works for entry lists at the moment. Will
+ * support page lists later.
+ */
+
+ assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
+
+ if (copy->size == 0) {
+ vm_map_copy_discard(copy);
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Verify that the destination is all writeable
+ * initially. We have to trunc the destination
+ * address and round the copy size or we'll end up
+ * splitting entries in strange ways.
+ */
+
+ if (!page_aligned(copy->size) ||
+ !page_aligned (copy->offset) ||
+ !page_aligned (dst_addr))
+ {
+ aligned = FALSE;
+ dst_end = vm_map_round_page(dst_addr + copy->size);
+ } else {
+ dst_end = dst_addr + copy->size;
+ }
+
+ vm_map_lock(dst_map);
+
+ /* LP64todo - remove this check when vm_map_commpage64()
+ * no longer has to stuff in a map_entry for the commpage
+ * above the map's max_offset.
+ */
+ if (dst_addr >= dst_map->max_offset) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+start_pass_1:
+ if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr));
+ for (entry = tmp_entry;;) {
+ vm_map_entry_t next = entry->vme_next;
+
+ while(entry->is_sub_map) {
+ vm_map_offset_t sub_start;
+ vm_map_offset_t sub_end;
+ vm_map_offset_t local_end;
+
+ if (entry->in_transition) {
+
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+
+ goto start_pass_1;
+ }
+
+ local_end = entry->vme_end;
+ if (!(entry->needs_copy)) {
+ /* if needs_copy we are a COW submap */
+ /* in such a case we just replace so */
+ /* there is no need for the follow- */
+ /* ing check. */
+ encountered_sub_map = TRUE;
+ sub_start = entry->offset;
+
+ if(entry->vme_end < dst_end)
+ sub_end = entry->vme_end;
+ else
+ sub_end = dst_end;
+ sub_end -= entry->vme_start;
+ sub_end += entry->offset;
+ vm_map_unlock(dst_map);
+
+ kr = vm_map_overwrite_submap_recurse(
+ entry->object.sub_map,
+ sub_start,
+ sub_end - sub_start);
+ if(kr != KERN_SUCCESS)
+ return kr;
+ vm_map_lock(dst_map);
+ }
+
+ if (dst_end <= entry->vme_end)
+ goto start_overwrite;
+ if(!vm_map_lookup_entry(dst_map, local_end,
+ &entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ next = entry->vme_next;
+ }
+
+ if ( ! (entry->protection & VM_PROT_WRITE)) {
+ vm_map_unlock(dst_map);
+ return(KERN_PROTECTION_FAILURE);
+ }
+
+ /*
+ * If the entry is in transition, we must wait
+ * for it to exit that state. Anything could happen
+ * when we unlock the map, so start over.
+ */
+ if (entry->in_transition) {
+
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+
+ goto start_pass_1;
+ }
+
+/*
+ * our range is contained completely within this map entry
+ */
+ if (dst_end <= entry->vme_end)
+ break;
+/*
+ * check that range specified is contiguous region
+ */
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+
+ /*
+ * Check for permanent objects in the destination.
+ */
+ if ((entry->object.vm_object != VM_OBJECT_NULL) &&
+ ((!entry->object.vm_object->internal) ||
+ (entry->object.vm_object->true_share))) {
+ contains_permanent_objects = TRUE;
+ }
+
+ entry = next;
+ }/* for */
+
+start_overwrite:
+ /*
+ * If there are permanent objects in the destination, then
+ * the copy cannot be interrupted.
+ */
+
+ if (interruptible && contains_permanent_objects) {
+ vm_map_unlock(dst_map);
+ return(KERN_FAILURE); /* XXX */
+ }
+
+ /*
+ *
+ * Make a second pass, overwriting the data
+ * At the beginning of each loop iteration,
+ * the next entry to be overwritten is "tmp_entry"
+ * (initially, the value returned from the lookup above),
+ * and the starting address expected in that entry
+ * is "start".
+ */
+
+ total_size = copy->size;
+ if(encountered_sub_map) {
+ copy_size = 0;
+ /* re-calculate tmp_entry since we've had the map */
+ /* unlocked */
+ if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ } else {
+ copy_size = copy->size;
+ }
+
+ base_addr = dst_addr;
+ while(TRUE) {
+ /* deconstruct the copy object and do in parts */
+ /* only in sub_map, interruptable case */
+ vm_map_entry_t copy_entry;
+ vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL;
+ vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL;
+ int nentries;
+ int remaining_entries = 0;
+ vm_map_offset_t new_offset = 0;
+
+ for (entry = tmp_entry; copy_size == 0;) {
+ vm_map_entry_t next;
+
+ next = entry->vme_next;
+
+ /* tmp_entry and base address are moved along */
+ /* each time we encounter a sub-map. Otherwise */
+ /* entry can outpase tmp_entry, and the copy_size */
+ /* may reflect the distance between them */
+ /* if the current entry is found to be in transition */
+ /* we will start over at the beginning or the last */
+ /* encounter of a submap as dictated by base_addr */
+ /* we will zero copy_size accordingly. */
+ if (entry->in_transition) {
+ /*
+ * Say that we are waiting, and wait for entry.
+ */
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+
+ if(!vm_map_lookup_entry(dst_map, base_addr,
+ &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ copy_size = 0;
+ entry = tmp_entry;
+ continue;
+ }
+ if(entry->is_sub_map) {
+ vm_map_offset_t sub_start;
+ vm_map_offset_t sub_end;
+ vm_map_offset_t local_end;
+
+ if (entry->needs_copy) {
+ /* if this is a COW submap */
+ /* just back the range with a */
+ /* anonymous entry */
+ if(entry->vme_end < dst_end)
+ sub_end = entry->vme_end;
+ else
+ sub_end = dst_end;
+ if(entry->vme_start < base_addr)
+ sub_start = base_addr;
+ else
+ sub_start = entry->vme_start;
+ vm_map_clip_end(
+ dst_map, entry, sub_end);
+ vm_map_clip_start(
+ dst_map, entry, sub_start);
+ assert(!entry->use_pmap);
+ entry->is_sub_map = FALSE;
+ vm_map_deallocate(
+ entry->object.sub_map);
+ entry->object.sub_map = NULL;
+ entry->is_shared = FALSE;
+ entry->needs_copy = FALSE;
+ entry->offset = 0;
+ /*
+ * XXX FBDP
+ * We should propagate the protections
+ * of the submap entry here instead
+ * of forcing them to VM_PROT_ALL...
+ * Or better yet, we should inherit
+ * the protection of the copy_entry.
+ */
+ entry->protection = VM_PROT_ALL;
+ entry->max_protection = VM_PROT_ALL;
+ entry->wired_count = 0;
+ entry->user_wired_count = 0;
+ if(entry->inheritance
+ == VM_INHERIT_SHARE)
+ entry->inheritance = VM_INHERIT_COPY;
+ continue;
+ }
+ /* first take care of any non-sub_map */
+ /* entries to send */
+ if(base_addr < entry->vme_start) {
+ /* stuff to send */
+ copy_size =
+ entry->vme_start - base_addr;
+ break;
+ }
+ sub_start = entry->offset;
+
+ if(entry->vme_end < dst_end)
+ sub_end = entry->vme_end;
+ else
+ sub_end = dst_end;
+ sub_end -= entry->vme_start;
+ sub_end += entry->offset;
+ local_end = entry->vme_end;
+ vm_map_unlock(dst_map);
+ copy_size = sub_end - sub_start;
+
+ /* adjust the copy object */
+ if (total_size > copy_size) {
+ vm_map_size_t local_size = 0;
+ vm_map_size_t entry_size;
+
+ nentries = 1;
+ new_offset = copy->offset;
+ copy_entry = vm_map_copy_first_entry(copy);
+ while(copy_entry !=
+ vm_map_copy_to_entry(copy)){
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ if((local_size < copy_size) &&
+ ((local_size + entry_size)
+ >= copy_size)) {
+ vm_map_copy_clip_end(copy,
+ copy_entry,
+ copy_entry->vme_start +
+ (copy_size - local_size));
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ local_size += entry_size;
+ new_offset += entry_size;
+ }
+ if(local_size >= copy_size) {
+ next_copy = copy_entry->vme_next;
+ copy_entry->vme_next =
+ vm_map_copy_to_entry(copy);
+ previous_prev =
+ copy->cpy_hdr.links.prev;
+ copy->cpy_hdr.links.prev = copy_entry;
+ copy->size = copy_size;
+ remaining_entries =
+ copy->cpy_hdr.nentries;
+ remaining_entries -= nentries;
+ copy->cpy_hdr.nentries = nentries;
+ break;
+ } else {
+ local_size += entry_size;
+ new_offset += entry_size;
+ nentries++;
+ }
+ copy_entry = copy_entry->vme_next;
+ }
+ }
+
+ if((entry->use_pmap) && (pmap == NULL)) {
+ kr = vm_map_copy_overwrite_nested(
+ entry->object.sub_map,
+ sub_start,
+ copy,
+ interruptible,
+ entry->object.sub_map->pmap);
+ } else if (pmap != NULL) {
+ kr = vm_map_copy_overwrite_nested(
+ entry->object.sub_map,
+ sub_start,
+ copy,
+ interruptible, pmap);
+ } else {
+ kr = vm_map_copy_overwrite_nested(
+ entry->object.sub_map,
+ sub_start,
+ copy,
+ interruptible,
+ dst_map->pmap);
+ }
+ if(kr != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev
+ = previous_prev;
+ copy->size = total_size;
+ }
+ return kr;
+ }
+ if (dst_end <= local_end) {
+ return(KERN_SUCCESS);
+ }
+ /* otherwise copy no longer exists, it was */
+ /* destroyed after successful copy_overwrite */
+ copy = (vm_map_copy_t)
+ zalloc(vm_map_copy_zone);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) =
+ vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->offset = new_offset;
+
+ total_size -= copy_size;
+ copy_size = 0;
+ /* put back remainder of copy in container */
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries = remaining_entries;
+ copy->cpy_hdr.links.next = next_copy;
+ copy->cpy_hdr.links.prev = previous_prev;
+ copy->size = total_size;
+ next_copy->vme_prev =
+ vm_map_copy_to_entry(copy);
+ next_copy = NULL;
+ }
+ base_addr = local_end;
+ vm_map_lock(dst_map);
+ if(!vm_map_lookup_entry(dst_map,
+ local_end, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ entry = tmp_entry;
+ continue;
+ }
+ if (dst_end <= entry->vme_end) {
+ copy_size = dst_end - base_addr;
+ break;
+ }
+
+ if ((next == vm_map_to_entry(dst_map)) ||
+ (next->vme_start != entry->vme_end)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+
+ entry = next;
+ }/* for */
+
+ next_copy = NULL;
+ nentries = 1;
+
+ /* adjust the copy object */
+ if (total_size > copy_size) {
+ vm_map_size_t local_size = 0;
+ vm_map_size_t entry_size;
+
+ new_offset = copy->offset;
+ copy_entry = vm_map_copy_first_entry(copy);
+ while(copy_entry != vm_map_copy_to_entry(copy)) {
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ if((local_size < copy_size) &&
+ ((local_size + entry_size)
+ >= copy_size)) {
+ vm_map_copy_clip_end(copy, copy_entry,
+ copy_entry->vme_start +
+ (copy_size - local_size));
+ entry_size = copy_entry->vme_end -
+ copy_entry->vme_start;
+ local_size += entry_size;
+ new_offset += entry_size;
+ }
+ if(local_size >= copy_size) {
+ next_copy = copy_entry->vme_next;
+ copy_entry->vme_next =
+ vm_map_copy_to_entry(copy);
+ previous_prev =
+ copy->cpy_hdr.links.prev;
+ copy->cpy_hdr.links.prev = copy_entry;
+ copy->size = copy_size;
+ remaining_entries =
+ copy->cpy_hdr.nentries;
+ remaining_entries -= nentries;
+ copy->cpy_hdr.nentries = nentries;
+ break;
+ } else {
+ local_size += entry_size;
+ new_offset += entry_size;
+ nentries++;
+ }
+ copy_entry = copy_entry->vme_next;
+ }
+ }
+
+ if (aligned) {
+ pmap_t local_pmap;
+
+ if(pmap)
+ local_pmap = pmap;
+ else
+ local_pmap = dst_map->pmap;
+
+ if ((kr = vm_map_copy_overwrite_aligned(
+ dst_map, tmp_entry, copy,
+ base_addr, local_pmap)) != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev =
+ previous_prev;
+ copy->size += copy_size;
+ }
+ return kr;
+ }
+ vm_map_unlock(dst_map);
+ } else {
+ /*
+ * Performance gain:
+ *
+ * if the copy and dst address are misaligned but the same
+ * offset within the page we can copy_not_aligned the
+ * misaligned parts and copy aligned the rest. If they are
+ * aligned but len is unaligned we simply need to copy
+ * the end bit unaligned. We'll need to split the misaligned
+ * bits of the region in this case !
+ */
+ /* ALWAYS UNLOCKS THE dst_map MAP */
+ if ((kr = vm_map_copy_overwrite_unaligned( dst_map,
+ tmp_entry, copy, base_addr)) != KERN_SUCCESS) {
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries +=
+ remaining_entries;
+ copy->cpy_hdr.links.prev->vme_next =
+ next_copy;
+ copy->cpy_hdr.links.prev =
+ previous_prev;
+ copy->size += copy_size;
+ }
+ return kr;
+ }
+ }
+ total_size -= copy_size;
+ if(total_size == 0)
+ break;
+ base_addr += copy_size;
+ copy_size = 0;
+ copy->offset = new_offset;
+ if(next_copy != NULL) {
+ copy->cpy_hdr.nentries = remaining_entries;
+ copy->cpy_hdr.links.next = next_copy;
+ copy->cpy_hdr.links.prev = previous_prev;
+ next_copy->vme_prev = vm_map_copy_to_entry(copy);
+ copy->size = total_size;
+ }
+ vm_map_lock(dst_map);
+ while(TRUE) {
+ if (!vm_map_lookup_entry(dst_map,
+ base_addr, &tmp_entry)) {
+ vm_map_unlock(dst_map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ if (tmp_entry->in_transition) {
+ entry->needs_wakeup = TRUE;
+ vm_map_entry_wait(dst_map, THREAD_UNINT);
+ } else {
+ break;
+ }
+ }
+ vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(base_addr));
+
+ entry = tmp_entry;
+ } /* while */
+
+ /*
+ * Throw away the vm_map_copy object
+ */
+ vm_map_copy_discard(copy);
+
+ return(KERN_SUCCESS);
+}/* vm_map_copy_overwrite */
+
+kern_return_t
+vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_map_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
+{
+ return vm_map_copy_overwrite_nested(
+ dst_map, dst_addr, copy, interruptible, (pmap_t) NULL);
+}
+
+
+/*
+ * Routine: vm_map_copy_overwrite_unaligned [internal use only]
+ *
+ * Decription:
+ * Physically copy unaligned data
+ *
+ * Implementation:
+ * Unaligned parts of pages have to be physically copied. We use
+ * a modified form of vm_fault_copy (which understands none-aligned
+ * page offsets and sizes) to do the copy. We attempt to copy as
+ * much memory in one go as possibly, however vm_fault_copy copies
+ * within 1 memory object so we have to find the smaller of "amount left"
+ * "source object data size" and "target object data size". With
+ * unaligned data we don't need to split regions, therefore the source
+ * (copy) object should be one map entry, the target range may be split
+ * over multiple map entries however. In any event we are pessimistic
+ * about these assumptions.
+ *
+ * Assumptions:
+ * dst_map is locked on entry and is return locked on success,
+ * unlocked on error.
+ */
+
+static kern_return_t
+vm_map_copy_overwrite_unaligned(
+ vm_map_t dst_map,
+ vm_map_entry_t entry,
+ vm_map_copy_t copy,
+ vm_map_offset_t start)
+{
+ vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy);
+ vm_map_version_t version;
+ vm_object_t dst_object;
+ vm_object_offset_t dst_offset;
+ vm_object_offset_t src_offset;
+ vm_object_offset_t entry_offset;
+ vm_map_offset_t entry_end;
+ vm_map_size_t src_size,
+ dst_size,
+ copy_size,
+ amount_left;
+ kern_return_t kr = KERN_SUCCESS;
+
+ vm_map_lock_write_to_read(dst_map);
+
+ src_offset = copy->offset - vm_object_trunc_page(copy->offset);
+ amount_left = copy->size;
+/*
+ * unaligned so we never clipped this entry, we need the offset into
+ * the vm_object not just the data.
+ */
+ while (amount_left > 0) {
+
+ if (entry == vm_map_to_entry(dst_map)) {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /* "start" must be within the current map entry */
+ assert ((start>=entry->vme_start) && (start<entry->vme_end));
+
+ dst_offset = start - entry->vme_start;
+
+ dst_size = entry->vme_end - start;
+
+ src_size = copy_entry->vme_end -
+ (copy_entry->vme_start + src_offset);
+
+ if (dst_size < src_size) {
+/*
+ * we can only copy dst_size bytes before
+ * we have to get the next destination entry
+ */
+ copy_size = dst_size;
+ } else {
+/*
+ * we can only copy src_size bytes before
+ * we have to get the next source copy entry
+ */
+ copy_size = src_size;
+ }
+
+ if (copy_size > amount_left) {
+ copy_size = amount_left;
+ }
+/*
+ * Entry needs copy, create a shadow shadow object for
+ * Copy on write region.
+ */
+ if (entry->needs_copy &&
+ ((entry->protection & VM_PROT_WRITE) != 0))
+ {
+ if (vm_map_lock_read_to_write(dst_map)) {
+ vm_map_lock_read(dst_map);
+ goto RetryLookup;
+ }
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ (vm_map_size_t)(entry->vme_end
+ - entry->vme_start));
+ entry->needs_copy = FALSE;
+ vm_map_lock_write_to_read(dst_map);
+ }
+ dst_object = entry->object.vm_object;
+/*
+ * unlike with the virtual (aligned) copy we're going
+ * to fault on it therefore we need a target object.
+ */
+ if (dst_object == VM_OBJECT_NULL) {
+ if (vm_map_lock_read_to_write(dst_map)) {
+ vm_map_lock_read(dst_map);
+ goto RetryLookup;
+ }
+ dst_object = vm_object_allocate((vm_map_size_t)
+ entry->vme_end - entry->vme_start);
+ entry->object.vm_object = dst_object;
+ entry->offset = 0;
+ vm_map_lock_write_to_read(dst_map);
+ }
+/*
+ * Take an object reference and unlock map. The "entry" may
+ * disappear or change when the map is unlocked.
+ */
+ vm_object_reference(dst_object);
+ version.main_timestamp = dst_map->timestamp;
+ entry_offset = entry->offset;
+ entry_end = entry->vme_end;
+ vm_map_unlock_read(dst_map);
+/*
+ * Copy as much as possible in one pass
+ */
+ kr = vm_fault_copy(
+ copy_entry->object.vm_object,
+ copy_entry->offset + src_offset,
+ ©_size,
+ dst_object,
+ entry_offset + dst_offset,
+ dst_map,
+ &version,
+ THREAD_UNINT );
+
+ start += copy_size;
+ src_offset += copy_size;
+ amount_left -= copy_size;
+/*
+ * Release the object reference
+ */
+ vm_object_deallocate(dst_object);
+/*
+ * If a hard error occurred, return it now
+ */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end
+ || amount_left == 0)
+ {
+/*
+ * all done with this copy entry, dispose.
+ */
+ vm_map_copy_entry_unlink(copy, copy_entry);
+ vm_object_deallocate(copy_entry->object.vm_object);
+ vm_map_copy_entry_dispose(copy, copy_entry);
+
+ if ((copy_entry = vm_map_copy_first_entry(copy))
+ == vm_map_copy_to_entry(copy) && amount_left) {
+/*
+ * not finished copying but run out of source
+ */
+ return KERN_INVALID_ADDRESS;
+ }
+ src_offset = 0;
+ }
+
+ if (amount_left == 0)
+ return KERN_SUCCESS;
+
+ vm_map_lock_read(dst_map);
+ if (version.main_timestamp == dst_map->timestamp) {
+ if (start == entry_end) {
+/*
+ * destination region is split. Use the version
+ * information to avoid a lookup in the normal
+ * case.
+ */
+ entry = entry->vme_next;
+/*
+ * should be contiguous. Fail if we encounter
+ * a hole in the destination.
+ */
+ if (start != entry->vme_start) {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS ;
+ }
+ }
+ } else {
+/*
+ * Map version check failed.
+ * we must lookup the entry because somebody
+ * might have changed the map behind our backs.
+ */
+ RetryLookup:
+ if (!vm_map_lookup_entry(dst_map, start, &entry))
+ {
+ vm_map_unlock_read(dst_map);
+ return KERN_INVALID_ADDRESS ;
+ }
+ }
+ }/* while */
+
+ return KERN_SUCCESS;
+}/* vm_map_copy_overwrite_unaligned */
+
+/*
+ * Routine: vm_map_copy_overwrite_aligned [internal use only]
+ *
+ * Description:
+ * Does all the vm_trickery possible for whole pages.
+ *
+ * Implementation:
+ *
+ * If there are no permanent objects in the destination,
+ * and the source and destination map entry zones match,
+ * and the destination map entry is not shared,
+ * then the map entries can be deleted and replaced
+ * with those from the copy. The following code is the
+ * basic idea of what to do, but there are lots of annoying
+ * little details about getting protection and inheritance
+ * right. Should add protection, inheritance, and sharing checks
+ * to the above pass and make sure that no wiring is involved.
+ */
+
+static kern_return_t
+vm_map_copy_overwrite_aligned(
+ vm_map_t dst_map,
+ vm_map_entry_t tmp_entry,
+ vm_map_copy_t copy,
+ vm_map_offset_t start,
+ __unused pmap_t pmap)
+{
+ vm_object_t object;
+ vm_map_entry_t copy_entry;
+ vm_map_size_t copy_size;
+ vm_map_size_t size;
+ vm_map_entry_t entry;
+
+ while ((copy_entry = vm_map_copy_first_entry(copy))
+ != vm_map_copy_to_entry(copy))
+ {
+ copy_size = (copy_entry->vme_end - copy_entry->vme_start);
+
+ entry = tmp_entry;
+ assert(!entry->use_pmap); /* unnested when clipped earlier */
+ if (entry == vm_map_to_entry(dst_map)) {
+ vm_map_unlock(dst_map);
+ return KERN_INVALID_ADDRESS;
+ }
+ size = (entry->vme_end - entry->vme_start);
+ /*
+ * Make sure that no holes popped up in the
+ * address map, and that the protection is
+ * still valid, in case the map was unlocked
+ * earlier.
+ */