+ map_offset = vm_map_trunc_page(offset, PAGE_MASK);
+
+ if (permission & MAP_MEM_ONLY) {
+ boolean_t parent_is_object;
+
+ map_size = vm_map_round_page(*size, PAGE_MASK);
+
+ if (use_data_addr || parent_entry == NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ parent_is_object = !(parent_entry->is_sub_map ||
+ parent_entry->is_pager);
+ object = parent_entry->backing.object;
+ if(parent_is_object && object != VM_OBJECT_NULL)
+ wimg_mode = object->wimg_bits;
+ else
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ if((access != GET_MAP_MEM(parent_entry->protection)) &&
+ !(parent_entry->protection & VM_PROT_WRITE)) {
+ return KERN_INVALID_RIGHT;
+ }
+ if(access == MAP_MEM_IO) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_IO;
+ } else if (access == MAP_MEM_COPYBACK) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_INNERWBACK;
+ } else if (access == MAP_MEM_WTHRU) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_WTHRU;
+ } else if (access == MAP_MEM_WCOMB) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_WCOMB;
+ }
+ if (parent_is_object && object &&
+ (access != MAP_MEM_NOOP) &&
+ (!(object->nophyscache))) {
+
+ if (object->wimg_bits != wimg_mode) {
+ vm_object_lock(object);
+ vm_object_change_wimg_mode(object, wimg_mode);
+ vm_object_unlock(object);
+ }
+ }
+ if (object_handle)
+ *object_handle = IP_NULL;
+ return KERN_SUCCESS;
+ } else if (permission & MAP_MEM_NAMED_CREATE) {
+ map_size = vm_map_round_page(*size, PAGE_MASK);
+
+ if (use_data_addr) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+
+ /*
+ * Force the creation of the VM object now.
+ */
+ if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
+ /*
+ * LP64todo - for now, we can only allocate 4GB-4096
+ * internal objects because the default pager can't
+ * page bigger ones. Remove this when it can.
+ */
+ kr = KERN_FAILURE;
+ goto make_mem_done;
+ }
+
+ object = vm_object_allocate(map_size);
+ assert(object != VM_OBJECT_NULL);
+
+ if (permission & MAP_MEM_PURGABLE) {
+ if (! (permission & VM_PROT_WRITE)) {
+ /* if we can't write, we can't purge */
+ vm_object_deallocate(object);
+ kr = KERN_INVALID_ARGUMENT;
+ goto make_mem_done;
+ }
+ object->purgable = VM_PURGABLE_NONVOLATILE;
+ assert(object->vo_purgeable_owner == NULL);
+ assert(object->resident_page_count == 0);
+ assert(object->wired_page_count == 0);
+ vm_object_lock(object);
+ vm_purgeable_nonvolatile_enqueue(object,
+ current_task());
+ vm_object_unlock(object);
+ }
+
+ /*
+ * The VM object is brand new and nobody else knows about it,
+ * so we don't need to lock it.
+ */
+
+ wimg_mode = object->wimg_bits;
+ if (access == MAP_MEM_IO) {
+ wimg_mode = VM_WIMG_IO;
+ } else if (access == MAP_MEM_COPYBACK) {
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ wimg_mode = VM_WIMG_INNERWBACK;
+ } else if (access == MAP_MEM_WTHRU) {
+ wimg_mode = VM_WIMG_WTHRU;
+ } else if (access == MAP_MEM_WCOMB) {
+ wimg_mode = VM_WIMG_WCOMB;
+ }
+ if (access != MAP_MEM_NOOP) {
+ object->wimg_bits = wimg_mode;
+ }
+ /* the object has no pages, so no WIMG bits to update here */
+
+ /*
+ * XXX
+ * We use this path when we want to make sure that
+ * nobody messes with the object (coalesce, for
+ * example) before we map it.
+ * We might want to use these objects for transposition via
+ * vm_object_transpose() too, so we don't want any copy or
+ * shadow objects either...
+ */
+ object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ object->true_share = TRUE;
+
+ user_entry->backing.object = object;
+ user_entry->internal = TRUE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_pager = FALSE;
+ user_entry->offset = 0;
+ user_entry->data_offset = 0;
+ user_entry->protection = protections;
+ SET_MAP_MEM(access, user_entry->protection);
+ user_entry->size = map_size;
+
+ /* user_object pager and internal fields are not used */
+ /* when the object field is filled in. */
+
+ *size = CAST_DOWN(vm_size_t, map_size);
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (permission & MAP_MEM_VM_COPY) {
+ vm_map_copy_t copy;
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (use_data_addr) {
+ map_size = (vm_map_round_page(offset + *size,
+ PAGE_MASK) -
+ map_offset);
+ offset_in_page = offset - map_offset;
+ } else {
+ map_size = vm_map_round_page(*size, PAGE_MASK);
+ offset_in_page = 0;
+ }
+
+ kr = vm_map_copyin(target_map,
+ map_offset,
+ map_size,
+ FALSE,
+ ©);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return KERN_FAILURE;
+ }
+
+ user_entry->backing.copy = copy;
+ user_entry->internal = FALSE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_pager = FALSE;
+ user_entry->is_copy = TRUE;
+ user_entry->offset = 0;
+ user_entry->protection = protections;
+ user_entry->size = map_size;
+ user_entry->data_offset = offset_in_page;
+
+ *size = CAST_DOWN(vm_size_t, map_size);
+ *object_handle = user_handle;
+ return KERN_SUCCESS;
+ }
+
+ if (permission & MAP_MEM_VM_SHARE) {
+ vm_map_copy_t copy;
+ vm_prot_t cur_prot, max_prot;
+
+ if (target_map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }