+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (permission & MAP_MEM_VM_COPY) {
+ vm_map_copy_t copy;
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat) {
+ offset_in_page &= ~((signed)(0xFFF));
+ }
+ } else {
+ offset_in_page = 0;
+ }
+
+ kr = vm_map_copyin_internal(target_map,
+ map_start,
+ map_size,
+ VM_MAP_COPYIN_ENTRY_LIST,
+ ©);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return KERN_FAILURE;
+ }
+
+ user_entry->backing.copy = copy;
+ user_entry->internal = FALSE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_copy = TRUE;
+ user_entry->offset = 0;
+ user_entry->protection = protections;
+ user_entry->size = map_size;
+ user_entry->data_offset = offset_in_page;
+
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (permission & MAP_MEM_VM_SHARE) {
+ vm_map_copy_t copy;
+ vm_prot_t cur_prot, max_prot;
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ map_end = vm_map_round_page(offset + *size, PAGE_MASK);
+ map_size = map_end - map_start;
+ if (use_data_addr || use_4K_compat) {
+ offset_in_page = offset - map_start;
+ if (use_4K_compat) {
+ offset_in_page &= ~((signed)(0xFFF));
+ }
+ } else {
+ offset_in_page = 0;
+ }
+
+ cur_prot = VM_PROT_ALL;
+ kr = vm_map_copy_extract(target_map,
+ map_start,
+ map_size,
+ ©,
+ &cur_prot,
+ &max_prot);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (mask_protections) {
+ /*
+ * We just want as much of "original_protections"
+ * as we can get out of the actual "cur_prot".
+ */
+ protections &= cur_prot;
+ if (protections == VM_PROT_NONE) {
+ /* no access at all: fail */
+ vm_map_copy_discard(copy);
+ return KERN_PROTECTION_FAILURE;
+ }
+ } else {
+ /*
+ * We want exactly "original_protections"
+ * out of "cur_prot".
+ */
+ if ((cur_prot & protections) != protections) {
+ vm_map_copy_discard(copy);
+ return KERN_PROTECTION_FAILURE;
+ }
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return KERN_FAILURE;
+ }
+
+ user_entry->backing.copy = copy;
+ user_entry->internal = FALSE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_copy = TRUE;
+ user_entry->offset = 0;
+ user_entry->protection = protections;
+ user_entry->size = map_size;
+ user_entry->data_offset = offset_in_page;
+
+ *size = CAST_DOWN(vm_size_t, (user_entry->size -
+ user_entry->data_offset));