+ if (parent_handle != IP_NULL &&
+ ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) {
+ parent_entry = (vm_named_entry_t) parent_handle->ip_kobject;
+ } else {
+ parent_entry = NULL;
+ }
+
+ if (parent_entry && parent_entry->is_copy) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ original_protections = permission & VM_PROT_ALL;
+ protections = original_protections;
+ mask_protections = permission & VM_PROT_IS_MASK;
+ access = GET_MAP_MEM(permission);
+ use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0);
+
+ user_handle = IP_NULL;
+ user_entry = NULL;
+
+ map_offset = vm_map_trunc_page(offset, PAGE_MASK);
+
+ if (permission & MAP_MEM_ONLY) {
+ boolean_t parent_is_object;
+
+ map_size = vm_map_round_page(*size, PAGE_MASK);
+
+ if (use_data_addr || parent_entry == NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ parent_is_object = !(parent_entry->is_sub_map ||
+ parent_entry->is_pager);
+ object = parent_entry->backing.object;
+ if(parent_is_object && object != VM_OBJECT_NULL)
+ wimg_mode = object->wimg_bits;
+ else
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ if((access != GET_MAP_MEM(parent_entry->protection)) &&
+ !(parent_entry->protection & VM_PROT_WRITE)) {
+ return KERN_INVALID_RIGHT;
+ }
+ if(access == MAP_MEM_IO) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_IO;
+ } else if (access == MAP_MEM_COPYBACK) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_INNERWBACK;
+ } else if (access == MAP_MEM_WTHRU) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_WTHRU;
+ } else if (access == MAP_MEM_WCOMB) {
+ SET_MAP_MEM(access, parent_entry->protection);
+ wimg_mode = VM_WIMG_WCOMB;
+ }
+ if (parent_is_object && object &&
+ (access != MAP_MEM_NOOP) &&
+ (!(object->nophyscache))) {
+
+ if (object->wimg_bits != wimg_mode) {
+ vm_object_lock(object);
+ vm_object_change_wimg_mode(object, wimg_mode);
+ vm_object_unlock(object);
+ }
+ }
+ if (object_handle)
+ *object_handle = IP_NULL;
+ return KERN_SUCCESS;
+ } else if (permission & MAP_MEM_NAMED_CREATE) {
+ map_size = vm_map_round_page(*size, PAGE_MASK);
+
+ if (use_data_addr) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = mach_memory_entry_allocate(&user_entry, &user_handle);
+ if (kr != KERN_SUCCESS) {
+ return KERN_FAILURE;
+ }
+
+ /*
+ * Force the creation of the VM object now.
+ */
+ if (map_size > (vm_map_size_t) ANON_MAX_SIZE) {
+ /*
+ * LP64todo - for now, we can only allocate 4GB-4096
+ * internal objects because the default pager can't
+ * page bigger ones. Remove this when it can.
+ */
+ kr = KERN_FAILURE;
+ goto make_mem_done;
+ }
+
+ object = vm_object_allocate(map_size);
+ assert(object != VM_OBJECT_NULL);
+
+ if (permission & MAP_MEM_PURGABLE) {
+ if (! (permission & VM_PROT_WRITE)) {
+ /* if we can't write, we can't purge */
+ vm_object_deallocate(object);
+ kr = KERN_INVALID_ARGUMENT;
+ goto make_mem_done;
+ }
+ object->purgable = VM_PURGABLE_NONVOLATILE;
+ assert(object->vo_purgeable_owner == NULL);
+ assert(object->resident_page_count == 0);
+ assert(object->wired_page_count == 0);
+ vm_object_lock(object);
+ vm_purgeable_nonvolatile_enqueue(object,
+ current_task());
+ vm_object_unlock(object);
+ }
+
+ /*
+ * The VM object is brand new and nobody else knows about it,
+ * so we don't need to lock it.
+ */
+
+ wimg_mode = object->wimg_bits;
+ if (access == MAP_MEM_IO) {
+ wimg_mode = VM_WIMG_IO;
+ } else if (access == MAP_MEM_COPYBACK) {
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_INNERWBACK) {
+ wimg_mode = VM_WIMG_INNERWBACK;
+ } else if (access == MAP_MEM_WTHRU) {
+ wimg_mode = VM_WIMG_WTHRU;
+ } else if (access == MAP_MEM_WCOMB) {
+ wimg_mode = VM_WIMG_WCOMB;
+ }
+ if (access != MAP_MEM_NOOP) {
+ object->wimg_bits = wimg_mode;
+ }
+ /* the object has no pages, so no WIMG bits to update here */
+
+ /*
+ * XXX
+ * We use this path when we want to make sure that
+ * nobody messes with the object (coalesce, for
+ * example) before we map it.
+ * We might want to use these objects for transposition via
+ * vm_object_transpose() too, so we don't want any copy or
+ * shadow objects either...
+ */
+ object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ object->true_share = TRUE;
+
+ user_entry->backing.object = object;
+ user_entry->internal = TRUE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_pager = FALSE;
+ user_entry->offset = 0;
+ user_entry->data_offset = 0;
+ user_entry->protection = protections;
+ SET_MAP_MEM(access, user_entry->protection);
+ user_entry->size = map_size;
+
+ /* user_object pager and internal fields are not used */
+ /* when the object field is filled in. */