+ if (vmk_flags.vmkf_beyond_max) {
+ return KERN_NOT_SUPPORTED;
+ } else {
+ effective_max_offset = map->max_offset;
+ }
+
+ if (size == 0 ||
+ (offset & FOURK_PAGE_MASK) != 0) {
+ *address = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#define RETURN(value) { result = value; goto BailOut; }
+
+ assert(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK));
+ assert(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK));
+
+ if (!anywhere && overwrite) {
+ return KERN_NOT_SUPPORTED;
+ }
+ if (!anywhere && overwrite) {
+ /*
+ * Create a temporary VM map to hold the old mappings in the
+ * affected area while we create the new one.
+ * This avoids releasing the VM map lock in
+ * vm_map_entry_delete() and allows atomicity
+ * when we want to replace some mappings with a new one.
+ * It also allows us to restore the old VM mappings if the
+ * new mapping fails.
+ */
+ zap_old_map = vm_map_create(PMAP_NULL,
+ *address,
+ *address + size,
+ map->hdr.entries_pageable);
+ vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map));
+ vm_map_disable_hole_optimization(zap_old_map);
+ }
+
+ fourk_start = *address;
+ fourk_size = size;
+ fourk_end = fourk_start + fourk_size;
+
+ start = vm_map_trunc_page(*address, VM_MAP_PAGE_MASK(map));
+ end = vm_map_round_page(fourk_end, VM_MAP_PAGE_MASK(map));
+ size = end - start;
+
+ if (anywhere) {
+ return KERN_NOT_SUPPORTED;
+ } else {
+ /*
+ * Verify that:
+ * the address doesn't itself violate
+ * the mask requirement.
+ */
+
+ vm_map_lock(map);
+ map_locked = TRUE;
+ if ((start & mask) != 0) {
+ RETURN(KERN_NO_SPACE);
+ }
+
+ /*
+ * ... the address is within bounds
+ */
+
+ end = start + size;
+
+ if ((start < effective_min_offset) ||
+ (end > effective_max_offset) ||
+ (start >= end)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ if (overwrite && zap_old_map != VM_MAP_NULL) {
+ /*
+ * Fixed mapping and "overwrite" flag: attempt to
+ * remove all existing mappings in the specified
+ * address range, saving them in our "zap_old_map".
+ */
+ (void) vm_map_delete(map, start, end,
+ (VM_MAP_REMOVE_SAVE_ENTRIES |
+ VM_MAP_REMOVE_NO_MAP_ALIGN),
+ zap_old_map);
+ }
+
+ /*
+ * ... the starting address isn't allocated
+ */
+ if (vm_map_lookup_entry(map, start, &entry)) {
+ vm_object_t cur_object, shadow_object;
+
+ /*
+ * We might already some 4K mappings
+ * in a 16K page here.
+ */
+
+ if (entry->vme_end - entry->vme_start
+ != SIXTEENK_PAGE_SIZE) {
+ RETURN(KERN_NO_SPACE);
+ }
+ if (entry->is_sub_map) {
+ RETURN(KERN_NO_SPACE);
+ }
+ if (VME_OBJECT(entry) == VM_OBJECT_NULL) {
+ RETURN(KERN_NO_SPACE);
+ }
+
+ /* go all the way down the shadow chain */
+ cur_object = VME_OBJECT(entry);
+ vm_object_lock(cur_object);
+ while (cur_object->shadow != VM_OBJECT_NULL) {
+ shadow_object = cur_object->shadow;
+ vm_object_lock(shadow_object);
+ vm_object_unlock(cur_object);
+ cur_object = shadow_object;
+ shadow_object = VM_OBJECT_NULL;
+ }
+ if (cur_object->internal ||
+ cur_object->pager == NULL) {
+ vm_object_unlock(cur_object);
+ RETURN(KERN_NO_SPACE);
+ }
+ if (cur_object->pager->mo_pager_ops
+ != &fourk_pager_ops) {
+ vm_object_unlock(cur_object);
+ RETURN(KERN_NO_SPACE);
+ }
+ fourk_object = cur_object;
+ fourk_mem_obj = fourk_object->pager;
+
+ /* keep the "4K" object alive */
+ vm_object_reference_locked(fourk_object);
+ vm_object_unlock(fourk_object);
+
+ /* merge permissions */
+ entry->protection |= cur_protection;
+ entry->max_protection |= max_protection;
+ if ((entry->protection & (VM_PROT_WRITE |
+ VM_PROT_EXECUTE)) ==
+ (VM_PROT_WRITE | VM_PROT_EXECUTE) &&
+ fourk_binary_compatibility_unsafe &&
+ fourk_binary_compatibility_allow_wx) {
+ /* write+execute: need to be "jit" */
+ entry->used_for_jit = TRUE;
+ }
+
+ goto map_in_fourk_pager;
+ }
+
+ /*
+ * ... the next region doesn't overlap the
+ * end point.
+ */
+
+ if ((entry->vme_next != vm_map_to_entry(map)) &&
+ (entry->vme_next->vme_start < end)) {
+ RETURN(KERN_NO_SPACE);
+ }
+ }
+
+ /*
+ * At this point,
+ * "start" and "end" should define the endpoints of the
+ * available new range, and
+ * "entry" should refer to the region before the new
+ * range, and
+ *
+ * the map should be locked.
+ */
+
+ /* create a new "4K" pager */
+ fourk_mem_obj = fourk_pager_create();
+ fourk_object = fourk_pager_to_vm_object(fourk_mem_obj);
+ assert(fourk_object);
+
+ /* keep the "4" object alive */
+ vm_object_reference(fourk_object);
+
+ /* create a "copy" object, to map the "4K" object copy-on-write */
+ fourk_copy = TRUE;
+ result = vm_object_copy_strategically(fourk_object,
+ 0,
+ end - start,
+ ©_object,
+ ©_offset,
+ &fourk_copy);
+ assert(result == KERN_SUCCESS);
+ assert(copy_object != VM_OBJECT_NULL);
+ assert(copy_offset == 0);
+
+ /* take a reference on the copy object, for this mapping */
+ vm_object_reference(copy_object);
+
+ /* map the "4K" pager's copy object */
+ new_entry =
+ vm_map_entry_insert(map, entry,
+ vm_map_trunc_page(start,
+ VM_MAP_PAGE_MASK(map)),
+ vm_map_round_page(end,
+ VM_MAP_PAGE_MASK(map)),
+ copy_object,
+ 0, /* offset */
+ FALSE, /* needs_copy */
+ FALSE, FALSE,
+ cur_protection, max_protection,
+ VM_BEHAVIOR_DEFAULT,
+ ((entry_for_jit)
+ ? VM_INHERIT_NONE
+ : inheritance),
+ 0,
+ no_cache,
+ permanent,
+ superpage_size,
+ clear_map_aligned,
+ is_submap,
+ FALSE, /* jit */
+ alias);
+ entry = new_entry;
+
+#if VM_MAP_DEBUG_FOURK
+ if (vm_map_debug_fourk) {
+ printf("FOURK_PAGER: map %p [0x%llx:0x%llx] new pager %p\n",
+ map,
+ (uint64_t) entry->vme_start,
+ (uint64_t) entry->vme_end,
+ fourk_mem_obj);
+ }
+#endif /* VM_MAP_DEBUG_FOURK */
+
+ new_mapping_established = TRUE;
+
+map_in_fourk_pager:
+ /* "map" the original "object" where it belongs in the "4K" pager */
+ fourk_pager_offset = (fourk_start & SIXTEENK_PAGE_MASK);
+ fourk_pager_index_start = (int) (fourk_pager_offset / FOURK_PAGE_SIZE);
+ if (fourk_size > SIXTEENK_PAGE_SIZE) {
+ fourk_pager_index_num = 4;
+ } else {
+ fourk_pager_index_num = (int) (fourk_size / FOURK_PAGE_SIZE);
+ }
+ if (fourk_pager_index_start + fourk_pager_index_num > 4) {
+ fourk_pager_index_num = 4 - fourk_pager_index_start;
+ }
+ for (cur_idx = 0;
+ cur_idx < fourk_pager_index_num;
+ cur_idx++) {
+ vm_object_t old_object;
+ vm_object_offset_t old_offset;
+
+ kr = fourk_pager_populate(fourk_mem_obj,
+ TRUE, /* overwrite */
+ fourk_pager_index_start + cur_idx,
+ object,
+ (object
+ ? (offset +
+ (cur_idx * FOURK_PAGE_SIZE))
+ : 0),
+ &old_object,
+ &old_offset);
+#if VM_MAP_DEBUG_FOURK
+ if (vm_map_debug_fourk) {
+ if (old_object == (vm_object_t) -1 &&
+ old_offset == (vm_object_offset_t) -1) {
+ printf("FOURK_PAGER: map %p [0x%llx:0x%llx] "
+ "pager [%p:0x%llx] "
+ "populate[%d] "
+ "[object:%p,offset:0x%llx]\n",
+ map,
+ (uint64_t) entry->vme_start,
+ (uint64_t) entry->vme_end,
+ fourk_mem_obj,
+ VME_OFFSET(entry),
+ fourk_pager_index_start + cur_idx,
+ object,
+ (object
+ ? (offset + (cur_idx * FOURK_PAGE_SIZE))
+ : 0));
+ } else {
+ printf("FOURK_PAGER: map %p [0x%llx:0x%llx] "
+ "pager [%p:0x%llx] "
+ "populate[%d] [object:%p,offset:0x%llx] "
+ "old [%p:0x%llx]\n",
+ map,
+ (uint64_t) entry->vme_start,
+ (uint64_t) entry->vme_end,
+ fourk_mem_obj,
+ VME_OFFSET(entry),
+ fourk_pager_index_start + cur_idx,
+ object,
+ (object
+ ? (offset + (cur_idx * FOURK_PAGE_SIZE))
+ : 0),
+ old_object,
+ old_offset);
+ }
+ }
+#endif /* VM_MAP_DEBUG_FOURK */
+
+ assert(kr == KERN_SUCCESS);
+ if (object != old_object &&
+ object != VM_OBJECT_NULL &&
+ object != (vm_object_t) -1) {
+ vm_object_reference(object);
+ }
+ if (object != old_object &&
+ old_object != VM_OBJECT_NULL &&
+ old_object != (vm_object_t) -1) {
+ vm_object_deallocate(old_object);
+ }
+ }
+
+BailOut:
+ assert(map_locked == TRUE);
+
+ if (fourk_object != VM_OBJECT_NULL) {
+ vm_object_deallocate(fourk_object);
+ fourk_object = VM_OBJECT_NULL;
+ fourk_mem_obj = MEMORY_OBJECT_NULL;
+ }
+
+ if (result == KERN_SUCCESS) {
+ vm_prot_t pager_prot;
+ memory_object_t pager;
+
+#if DEBUG
+ if (pmap_empty &&
+ !(vmk_flags.vmkf_no_pmap_check)) {
+ assert(vm_map_pmap_is_empty(map,
+ *address,
+ *address+size));
+ }
+#endif /* DEBUG */
+
+ /*
+ * For "named" VM objects, let the pager know that the
+ * memory object is being mapped. Some pagers need to keep
+ * track of this, to know when they can reclaim the memory
+ * object, for example.
+ * VM calls memory_object_map() for each mapping (specifying
+ * the protection of each mapping) and calls
+ * memory_object_last_unmap() when all the mappings are gone.
+ */
+ pager_prot = max_protection;
+ if (needs_copy) {
+ /*
+ * Copy-On-Write mapping: won't modify
+ * the memory object.
+ */
+ pager_prot &= ~VM_PROT_WRITE;
+ }
+ if (!is_submap &&
+ object != VM_OBJECT_NULL &&
+ object->named &&
+ object->pager != MEMORY_OBJECT_NULL) {
+ vm_object_lock(object);
+ pager = object->pager;
+ if (object->named &&
+ pager != MEMORY_OBJECT_NULL) {
+ assert(object->pager_ready);
+ vm_object_mapping_wait(object, THREAD_UNINT);
+ vm_object_mapping_begin(object);
+ vm_object_unlock(object);
+
+ kr = memory_object_map(pager, pager_prot);
+ assert(kr == KERN_SUCCESS);
+
+ vm_object_lock(object);
+ vm_object_mapping_end(object);
+ }
+ vm_object_unlock(object);
+ }
+ if (!is_submap &&
+ fourk_object != VM_OBJECT_NULL &&
+ fourk_object->named &&
+ fourk_object->pager != MEMORY_OBJECT_NULL) {
+ vm_object_lock(fourk_object);
+ pager = fourk_object->pager;
+ if (fourk_object->named &&
+ pager != MEMORY_OBJECT_NULL) {
+ assert(fourk_object->pager_ready);
+ vm_object_mapping_wait(fourk_object,
+ THREAD_UNINT);
+ vm_object_mapping_begin(fourk_object);
+ vm_object_unlock(fourk_object);
+
+ kr = memory_object_map(pager, VM_PROT_READ);
+ assert(kr == KERN_SUCCESS);
+
+ vm_object_lock(fourk_object);
+ vm_object_mapping_end(fourk_object);
+ }
+ vm_object_unlock(fourk_object);
+ }
+ }
+
+ assert(map_locked == TRUE);
+
+ if (!keep_map_locked) {
+ vm_map_unlock(map);
+ map_locked = FALSE;
+ }
+
+ /*
+ * We can't hold the map lock if we enter this block.
+ */
+
+ if (result == KERN_SUCCESS) {
+
+ /* Wire down the new entry if the user
+ * requested all new map entries be wired.
+ */
+ if ((map->wiring_required)||(superpage_size)) {
+ assert(!keep_map_locked);
+ pmap_empty = FALSE; /* pmap won't be empty */
+ kr = vm_map_wire_kernel(map, start, end,
+ new_entry->protection, VM_KERN_MEMORY_MLOCK,
+ TRUE);
+ result = kr;
+ }
+
+ }
+
+ if (result != KERN_SUCCESS) {
+ if (new_mapping_established) {
+ /*
+ * We have to get rid of the new mappings since we
+ * won't make them available to the user.
+ * Try and do that atomically, to minimize the risk
+ * that someone else create new mappings that range.
+ */
+ zap_new_map = vm_map_create(PMAP_NULL,
+ *address,
+ *address + size,
+ map->hdr.entries_pageable);
+ vm_map_set_page_shift(zap_new_map,
+ VM_MAP_PAGE_SHIFT(map));
+ vm_map_disable_hole_optimization(zap_new_map);
+
+ if (!map_locked) {
+ vm_map_lock(map);
+ map_locked = TRUE;
+ }
+ (void) vm_map_delete(map, *address, *address+size,
+ (VM_MAP_REMOVE_SAVE_ENTRIES |
+ VM_MAP_REMOVE_NO_MAP_ALIGN),
+ zap_new_map);
+ }
+ if (zap_old_map != VM_MAP_NULL &&
+ zap_old_map->hdr.nentries != 0) {
+ vm_map_entry_t entry1, entry2;
+
+ /*
+ * The new mapping failed. Attempt to restore
+ * the old mappings, saved in the "zap_old_map".
+ */
+ if (!map_locked) {
+ vm_map_lock(map);
+ map_locked = TRUE;
+ }
+
+ /* first check if the coast is still clear */
+ start = vm_map_first_entry(zap_old_map)->vme_start;
+ end = vm_map_last_entry(zap_old_map)->vme_end;
+ if (vm_map_lookup_entry(map, start, &entry1) ||
+ vm_map_lookup_entry(map, end, &entry2) ||
+ entry1 != entry2) {
+ /*
+ * Part of that range has already been
+ * re-mapped: we can't restore the old
+ * mappings...
+ */
+ vm_map_enter_restore_failures++;
+ } else {
+ /*
+ * Transfer the saved map entries from
+ * "zap_old_map" to the original "map",
+ * inserting them all after "entry1".
+ */
+ for (entry2 = vm_map_first_entry(zap_old_map);
+ entry2 != vm_map_to_entry(zap_old_map);
+ entry2 = vm_map_first_entry(zap_old_map)) {
+ vm_map_size_t entry_size;
+
+ entry_size = (entry2->vme_end -
+ entry2->vme_start);
+ vm_map_store_entry_unlink(zap_old_map,
+ entry2);
+ zap_old_map->size -= entry_size;
+ vm_map_store_entry_link(map, entry1, entry2);
+ map->size += entry_size;
+ entry1 = entry2;
+ }
+ if (map->wiring_required) {
+ /*
+ * XXX TODO: we should rewire the
+ * old pages here...
+ */
+ }
+ vm_map_enter_restore_successes++;
+ }
+ }
+ }
+
+ /*
+ * The caller is responsible for releasing the lock if it requested to
+ * keep the map locked.
+ */
+ if (map_locked && !keep_map_locked) {
+ vm_map_unlock(map);
+ }
+
+ /*
+ * Get rid of the "zap_maps" and all the map entries that
+ * they may still contain.
+ */
+ if (zap_old_map != VM_MAP_NULL) {
+ vm_map_destroy(zap_old_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP);
+ zap_old_map = VM_MAP_NULL;
+ }
+ if (zap_new_map != VM_MAP_NULL) {
+ vm_map_destroy(zap_new_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP);
+ zap_new_map = VM_MAP_NULL;
+ }
+
+ return result;
+
+#undef RETURN
+}
+#endif /* __arm64__ */
+
+/*
+ * Counters for the prefault optimization.
+ */
+int64_t vm_prefault_nb_pages = 0;
+int64_t vm_prefault_nb_bailout = 0;
+
+static kern_return_t
+vm_map_enter_mem_object_helper(
+ vm_map_t target_map,
+ vm_map_offset_t *address,
+ vm_map_size_t initial_size,
+ vm_map_offset_t mask,
+ int flags,
+ vm_map_kernel_flags_t vmk_flags,
+ vm_tag_t tag,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance,
+ upl_page_list_ptr_t page_list,
+ unsigned int page_list_count)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ vm_object_t object;
+ vm_object_size_t size;
+ kern_return_t result;
+ boolean_t mask_cur_protection, mask_max_protection;
+ boolean_t kernel_prefault, try_prefault = (page_list_count != 0);
+ vm_map_offset_t offset_in_mapping = 0;
+#if __arm64__
+ boolean_t fourk = vmk_flags.vmkf_fourk;
+#endif /* __arm64__ */
+
+ assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused);
+
+ mask_cur_protection = cur_protection & VM_PROT_IS_MASK;
+ mask_max_protection = max_protection & VM_PROT_IS_MASK;
+ cur_protection &= ~VM_PROT_IS_MASK;
+ max_protection &= ~VM_PROT_IS_MASK;
+
+ /*
+ * Check arguments for validity
+ */
+ if ((target_map == VM_MAP_NULL) ||
+ (cur_protection & ~VM_PROT_ALL) ||
+ (max_protection & ~VM_PROT_ALL) ||
+ (inheritance > VM_INHERIT_LAST_VALID) ||
+ (try_prefault && (copy || !page_list)) ||
+ initial_size == 0) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#if __arm64__
+ if (fourk) {
+ map_addr = vm_map_trunc_page(*address, FOURK_PAGE_MASK);
+ map_size = vm_map_round_page(initial_size, FOURK_PAGE_MASK);
+ } else
+#endif /* __arm64__ */
+ {
+ map_addr = vm_map_trunc_page(*address,
+ VM_MAP_PAGE_MASK(target_map));
+ map_size = vm_map_round_page(initial_size,
+ VM_MAP_PAGE_MASK(target_map));
+ }
+ size = vm_object_round_page(initial_size);
+
+ /*
+ * Find the vm object (if any) corresponding to this port.
+ */
+ if (!IP_VALID(port)) {
+ object = VM_OBJECT_NULL;
+ offset = 0;
+ copy = FALSE;
+ } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
+ vm_named_entry_t named_entry;
+
+ named_entry = (vm_named_entry_t) port->ip_kobject;
+
+ if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
+ VM_FLAGS_RETURN_4K_DATA_ADDR)) {
+ offset += named_entry->data_offset;
+ }
+
+ /* a few checks to make sure user is obeying rules */
+ if (size == 0) {
+ if (offset >= named_entry->size)
+ return KERN_INVALID_RIGHT;
+ size = named_entry->size - offset;
+ }
+ if (mask_max_protection) {
+ max_protection &= named_entry->protection;
+ }
+ if (mask_cur_protection) {
+ cur_protection &= named_entry->protection;
+ }
+ if ((named_entry->protection & max_protection) !=
+ max_protection)
+ return KERN_INVALID_RIGHT;
+ if ((named_entry->protection & cur_protection) !=
+ cur_protection)
+ return KERN_INVALID_RIGHT;
+ if (offset + size < offset) {
+ /* overflow */
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (named_entry->size < (offset + initial_size)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (named_entry->is_copy) {
+ /* for a vm_map_copy, we can only map it whole */
+ if ((size != named_entry->size) &&
+ (vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(target_map)) ==
+ named_entry->size)) {
+ /* XXX FBDP use the rounded size... */
+ size = vm_map_round_page(
+ size,
+ VM_MAP_PAGE_MASK(target_map));
+ }
+
+ if (!(flags & VM_FLAGS_ANYWHERE) &&
+ (offset != 0 ||
+ size != named_entry->size)) {
+ /*
+ * XXX for a mapping at a "fixed" address,
+ * we can't trim after mapping the whole
+ * memory entry, so reject a request for a
+ * partial mapping.
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+ }
+
+ /* the callers parameter offset is defined to be the */
+ /* offset from beginning of named entry offset in object */
+ offset = offset + named_entry->offset;
+
+ if (! VM_MAP_PAGE_ALIGNED(size,
+ VM_MAP_PAGE_MASK(target_map))) {
+ /*
+ * Let's not map more than requested;
+ * vm_map_enter() will handle this "not map-aligned"
+ * case.
+ */
+ map_size = size;
+ }
+
+ named_entry_lock(named_entry);
+ if (named_entry->is_sub_map) {
+ vm_map_t submap;
+
+ if (flags & (VM_FLAGS_RETURN_DATA_ADDR |
+ VM_FLAGS_RETURN_4K_DATA_ADDR)) {
+ panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap.");
+ }
+
+ submap = named_entry->backing.map;
+ vm_map_lock(submap);
+ vm_map_reference(submap);
+ vm_map_unlock(submap);
+ named_entry_unlock(named_entry);
+
+ vmk_flags.vmkf_submap = TRUE;
+
+ result = vm_map_enter(target_map,
+ &map_addr,
+ map_size,
+ mask,
+ flags,
+ vmk_flags,
+ tag,
+ (vm_object_t) submap,
+ offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (result != KERN_SUCCESS) {
+ vm_map_deallocate(submap);
+ } else {
+ /*
+ * No need to lock "submap" just to check its
+ * "mapped" flag: that flag is never reset
+ * once it's been set and if we race, we'll
+ * just end up setting it twice, which is OK.