+
+ } else if (named_entry->is_copy) {
+ kern_return_t kr;
+ vm_map_copy_t copy_map;
+ vm_map_entry_t copy_entry;
+ vm_map_offset_t copy_addr;
+
+ if (flags & ~(VM_FLAGS_FIXED |
+ VM_FLAGS_ANYWHERE |
+ VM_FLAGS_OVERWRITE |
+ VM_FLAGS_IOKIT_ACCT |
+ VM_FLAGS_RETURN_4K_DATA_ADDR |
+ VM_FLAGS_RETURN_DATA_ADDR |
+ VM_FLAGS_ALIAS_MASK)) {
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
+ VM_FLAGS_RETURN_4K_DATA_ADDR)) {
+ offset_in_mapping = offset - vm_object_trunc_page(offset);
+ if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR)
+ offset_in_mapping &= ~((signed)(0xFFF));
+ offset = vm_object_trunc_page(offset);
+ map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset;
+ }
+
+ copy_map = named_entry->backing.copy;
+ assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
+ if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) {
+ /* unsupported type; should not happen */
+ printf("vm_map_enter_mem_object: "
+ "memory_entry->backing.copy "
+ "unsupported type 0x%x\n",
+ copy_map->type);
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* reserve a contiguous range */
+ kr = vm_map_enter(target_map,
+ &map_addr,
+ /* map whole mem entry, trim later: */
+ named_entry->size,
+ mask,
+ flags & (VM_FLAGS_ANYWHERE |
+ VM_FLAGS_OVERWRITE |
+ VM_FLAGS_IOKIT_ACCT |
+ VM_FLAGS_RETURN_4K_DATA_ADDR |
+ VM_FLAGS_RETURN_DATA_ADDR |
+ VM_FLAGS_ALIAS_MASK),
+ VM_OBJECT_NULL,
+ 0,
+ FALSE, /* copy */
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (kr != KERN_SUCCESS) {
+ named_entry_unlock(named_entry);
+ return kr;
+ }
+
+ copy_addr = map_addr;
+
+ for (copy_entry = vm_map_copy_first_entry(copy_map);
+ copy_entry != vm_map_copy_to_entry(copy_map);
+ copy_entry = copy_entry->vme_next) {
+ int remap_flags = 0;
+ vm_map_t copy_submap;
+ vm_object_t copy_object;
+ vm_map_size_t copy_size;
+ vm_object_offset_t copy_offset;
+ int copy_vm_alias;
+
+ copy_object = VME_OBJECT(copy_entry);
+ copy_offset = VME_OFFSET(copy_entry);
+ copy_size = (copy_entry->vme_end -
+ copy_entry->vme_start);
+ VM_GET_FLAGS_ALIAS(flags, copy_vm_alias);
+ if (copy_vm_alias == 0) {
+ /*
+ * Caller does not want a specific
+ * alias for this new mapping: use
+ * the alias of the original mapping.
+ */
+ copy_vm_alias = VME_ALIAS(copy_entry);
+ }
+
+ /* sanity check */
+ if ((copy_addr + copy_size) >
+ (map_addr +
+ named_entry->size /* XXX full size */ )) {
+ /* over-mapping too much !? */
+ kr = KERN_INVALID_ARGUMENT;
+ /* abort */
+ break;
+ }
+
+ /* take a reference on the object */
+ if (copy_entry->is_sub_map) {
+ remap_flags |= VM_FLAGS_SUBMAP;
+ copy_submap = VME_SUBMAP(copy_entry);
+ vm_map_lock(copy_submap);
+ vm_map_reference(copy_submap);
+ vm_map_unlock(copy_submap);
+ copy_object = (vm_object_t) copy_submap;
+ } else if (!copy &&
+ copy_object != VM_OBJECT_NULL &&
+ (copy_entry->needs_copy ||
+ copy_object->shadowed ||
+ (!copy_object->true_share &&
+ !copy_entry->is_shared &&
+ copy_object->vo_size > copy_size))) {
+ /*
+ * We need to resolve our side of this
+ * "symmetric" copy-on-write now; we
+ * need a new object to map and share,
+ * instead of the current one which
+ * might still be shared with the
+ * original mapping.
+ *
+ * Note: A "vm_map_copy_t" does not
+ * have a lock but we're protected by
+ * the named entry's lock here.
+ */
+ // assert(copy_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
+ VME_OBJECT_SHADOW(copy_entry, copy_size);
+ if (!copy_entry->needs_copy &&
+ copy_entry->protection & VM_PROT_WRITE) {
+ vm_prot_t prot;
+
+ prot = copy_entry->protection & ~VM_PROT_WRITE;
+ vm_object_pmap_protect(copy_object,
+ copy_offset,
+ copy_size,
+ PMAP_NULL,
+ 0,
+ prot);
+ }
+
+ copy_entry->needs_copy = FALSE;
+ copy_entry->is_shared = TRUE;
+ copy_object = VME_OBJECT(copy_entry);
+ copy_offset = VME_OFFSET(copy_entry);
+ vm_object_lock(copy_object);
+ vm_object_reference_locked(copy_object);
+ if (copy_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+ /* we're about to make a shared mapping of this object */
+ copy_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ copy_object->true_share = TRUE;
+ }
+ vm_object_unlock(copy_object);
+ } else {
+ /*
+ * We already have the right object
+ * to map.
+ */
+ copy_object = VME_OBJECT(copy_entry);
+ vm_object_reference(copy_object);
+ }
+
+ /* over-map the object into destination */
+ remap_flags |= flags;
+ remap_flags |= VM_FLAGS_FIXED;
+ remap_flags |= VM_FLAGS_OVERWRITE;
+ remap_flags &= ~VM_FLAGS_ANYWHERE;
+ remap_flags |= VM_MAKE_TAG(copy_vm_alias);
+ if (!copy && !copy_entry->is_sub_map) {
+ /*
+ * copy-on-write should have been
+ * resolved at this point, or we would
+ * end up sharing instead of copying.
+ */
+ assert(!copy_entry->needs_copy);
+ }
+ kr = vm_map_enter(target_map,
+ ©_addr,
+ copy_size,
+ (vm_map_offset_t) 0,
+ remap_flags,
+ copy_object,
+ copy_offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (kr != KERN_SUCCESS) {
+ if (copy_entry->is_sub_map) {
+ vm_map_deallocate(copy_submap);
+ } else {
+ vm_object_deallocate(copy_object);
+ }
+ /* abort */
+ break;
+ }
+
+ /* next mapping */
+ copy_addr += copy_size;
+ }
+
+ if (kr == KERN_SUCCESS) {
+ if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
+ VM_FLAGS_RETURN_4K_DATA_ADDR)) {
+ *address = map_addr + offset_in_mapping;
+ } else {
+ *address = map_addr;
+ }
+
+ if (offset) {
+ /*
+ * Trim in front, from 0 to "offset".
+ */
+ vm_map_remove(target_map,
+ map_addr,
+ map_addr + offset,
+ 0);
+ *address += offset;
+ }
+ if (offset + map_size < named_entry->size) {
+ /*
+ * Trim in back, from
+ * "offset + map_size" to
+ * "named_entry->size".
+ */
+ vm_map_remove(target_map,
+ (map_addr +
+ offset + map_size),
+ (map_addr +
+ named_entry->size),
+ 0);
+ }
+ }
+ named_entry_unlock(named_entry);
+
+ if (kr != KERN_SUCCESS) {
+ if (! (flags & VM_FLAGS_OVERWRITE)) {
+ /* deallocate the contiguous range */
+ (void) vm_deallocate(target_map,
+ map_addr,
+ map_size);
+ }
+ }
+
+ return kr;
+