+
+ } else if (named_entry->is_copy) {
+ kern_return_t kr;
+ vm_map_copy_t copy_map;
+ vm_map_entry_t copy_entry;
+ vm_map_offset_t copy_addr;
+
+ if (flags & ~(VM_FLAGS_FIXED |
+ VM_FLAGS_ANYWHERE |
+ VM_FLAGS_OVERWRITE |
+ VM_FLAGS_RETURN_DATA_ADDR)) {
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ offset_in_mapping = offset - vm_object_trunc_page(offset);
+ offset = vm_object_trunc_page(offset);
+ map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset;
+ }
+
+ copy_map = named_entry->backing.copy;
+ assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
+ if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) {
+ /* unsupported type; should not happen */
+ printf("vm_map_enter_mem_object: "
+ "memory_entry->backing.copy "
+ "unsupported type 0x%x\n",
+ copy_map->type);
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* reserve a contiguous range */
+ kr = vm_map_enter(target_map,
+ &map_addr,
+ map_size,
+ mask,
+ flags & (VM_FLAGS_ANYWHERE |
+ VM_FLAGS_OVERWRITE |
+ VM_FLAGS_RETURN_DATA_ADDR),
+ VM_OBJECT_NULL,
+ 0,
+ FALSE, /* copy */
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (kr != KERN_SUCCESS) {
+ named_entry_unlock(named_entry);
+ return kr;
+ }
+
+ copy_addr = map_addr;
+
+ for (copy_entry = vm_map_copy_first_entry(copy_map);
+ copy_entry != vm_map_copy_to_entry(copy_map);
+ copy_entry = copy_entry->vme_next) {
+ int remap_flags = 0;
+ vm_map_t copy_submap;
+ vm_object_t copy_object;
+ vm_map_size_t copy_size;
+ vm_object_offset_t copy_offset;
+
+ copy_offset = copy_entry->offset;
+ copy_size = (copy_entry->vme_end -
+ copy_entry->vme_start);
+
+ /* sanity check */
+ if (copy_addr + copy_size >
+ map_addr + map_size) {
+ /* over-mapping too much !? */
+ kr = KERN_INVALID_ARGUMENT;
+ /* abort */
+ break;
+ }
+
+ /* take a reference on the object */
+ if (copy_entry->is_sub_map) {
+ remap_flags |= VM_FLAGS_SUBMAP;
+ copy_submap =
+ copy_entry->object.sub_map;
+ vm_map_lock(copy_submap);
+ vm_map_reference(copy_submap);
+ vm_map_unlock(copy_submap);
+ copy_object = (vm_object_t) copy_submap;
+ } else {
+ copy_object =
+ copy_entry->object.vm_object;
+ vm_object_reference(copy_object);
+ }
+
+ /* over-map the object into destination */
+ remap_flags |= flags;
+ remap_flags |= VM_FLAGS_FIXED;
+ remap_flags |= VM_FLAGS_OVERWRITE;
+ remap_flags &= ~VM_FLAGS_ANYWHERE;
+ kr = vm_map_enter(target_map,
+ ©_addr,
+ copy_size,
+ (vm_map_offset_t) 0,
+ remap_flags,
+ copy_object,
+ copy_offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (kr != KERN_SUCCESS) {
+ if (copy_entry->is_sub_map) {
+ vm_map_deallocate(copy_submap);
+ } else {
+ vm_object_deallocate(copy_object);
+ }
+ /* abort */
+ break;
+ }
+
+ /* next mapping */
+ copy_addr += copy_size;
+ }
+
+ if (kr == KERN_SUCCESS) {
+ if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) {
+ *address = map_addr + offset_in_mapping;
+ } else {
+ *address = map_addr;
+ }
+ }
+ named_entry_unlock(named_entry);
+
+ if (kr != KERN_SUCCESS) {
+ if (! (flags & VM_FLAGS_OVERWRITE)) {
+ /* deallocate the contiguous range */
+ (void) vm_deallocate(target_map,
+ map_addr,
+ map_size);
+ }
+ }
+
+ return kr;
+