+ zone_t old_zone;
+ vm_map_entry_t next, new;
+
+ /*
+ * Find the zone that the copies were allocated from
+ */
+
+ entry = vm_map_copy_first_entry(copy);
+
+ /*
+ * Reinitialize the copy so that vm_map_copy_entry_link
+ * will work.
+ */
+ vm_map_store_copy_reset(copy, entry);
+ copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
+
+ /*
+ * Copy each entry.
+ */
+ while (entry != vm_map_copy_to_entry(copy)) {
+ new = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
+ vm_map_entry_copy_full(new, entry);
+ new->use_pmap = FALSE; /* clr address space specifics */
+ vm_map_copy_entry_link(copy,
+ vm_map_copy_last_entry(copy),
+ new);
+ next = entry->vme_next;
+ old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone;
+ zfree(old_zone, entry);
+ entry = next;
+ }
+ }
+
+ /*
+ * Adjust the addresses in the copy chain, and
+ * reset the region attributes.
+ */
+
+ for (entry = vm_map_copy_first_entry(copy);
+ entry != vm_map_copy_to_entry(copy);
+ entry = entry->vme_next) {
+ if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) {
+ /*
+ * We're injecting this copy entry into a map that
+ * has the standard page alignment, so clear
+ * "map_aligned" (which might have been inherited
+ * from the original map entry).
+ */
+ entry->map_aligned = FALSE;
+ }
+
+ entry->vme_start += adjustment;
+ entry->vme_end += adjustment;
+
+ if (entry->map_aligned) {
+ assert(VM_MAP_PAGE_ALIGNED(entry->vme_start,
+ VM_MAP_PAGE_MASK(dst_map)));
+ assert(VM_MAP_PAGE_ALIGNED(entry->vme_end,
+ VM_MAP_PAGE_MASK(dst_map)));
+ }
+
+ entry->inheritance = VM_INHERIT_DEFAULT;
+ entry->protection = VM_PROT_DEFAULT;
+ entry->max_protection = VM_PROT_ALL;
+ entry->behavior = VM_BEHAVIOR_DEFAULT;
+
+ /*
+ * If the entry is now wired,
+ * map the pages into the destination map.
+ */
+ if (entry->wired_count != 0) {
+ register vm_map_offset_t va;
+ vm_object_offset_t offset;
+ register vm_object_t object;
+ vm_prot_t prot;
+ int type_of_fault;
+
+ object = entry->object.vm_object;
+ offset = entry->offset;
+ va = entry->vme_start;
+
+ pmap_pageable(dst_map->pmap,
+ entry->vme_start,
+ entry->vme_end,
+ TRUE);
+
+ while (va < entry->vme_end) {
+ register vm_page_t m;
+
+ /*
+ * Look up the page in the object.
+ * Assert that the page will be found in the
+ * top object:
+ * either
+ * the object was newly created by
+ * vm_object_copy_slowly, and has
+ * copies of all of the pages from
+ * the source object
+ * or
+ * the object was moved from the old
+ * map entry; because the old map
+ * entry was wired, all of the pages
+ * were in the top-level object.
+ * (XXX not true if we wire pages for
+ * reading)
+ */
+ vm_object_lock(object);
+
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) ||
+ m->absent)
+ panic("vm_map_copyout: wiring %p", m);
+
+ /*
+ * ENCRYPTED SWAP:
+ * The page is assumed to be wired here, so it
+ * shouldn't be encrypted. Otherwise, we
+ * couldn't enter it in the page table, since
+ * we don't want the user to see the encrypted
+ * data.
+ */
+ ASSERT_PAGE_DECRYPTED(m);
+
+ prot = entry->protection;
+
+ if (override_nx(dst_map, entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+
+ type_of_fault = DBG_CACHE_HIT_FAULT;
+
+ vm_fault_enter(m, dst_map->pmap, va, prot, prot,
+ VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, NULL,
+ &type_of_fault);
+
+ vm_object_unlock(object);
+
+ offset += PAGE_SIZE_64;
+ va += PAGE_SIZE;
+ }
+ }
+ }
+
+after_adjustments:
+
+ /*
+ * Correct the page alignment for the result
+ */
+
+ *dst_addr = start + (copy->offset - vm_copy_start);
+
+ /*
+ * Update the hints and the map size
+ */
+
+ if (consume_on_success) {
+ SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy));
+ } else {
+ SAVE_HINT_MAP_WRITE(dst_map, last);
+ }
+
+ dst_map->size += size;
+
+ /*
+ * Link in the copy
+ */
+
+ if (consume_on_success) {
+ vm_map_copy_insert(dst_map, last, copy);
+ } else {
+ vm_map_copy_remap(dst_map, last, copy, adjustment,
+ cur_protection, max_protection,
+ inheritance);
+ }
+
+ vm_map_unlock(dst_map);
+
+ /*
+ * XXX If wiring_required, call vm_map_pageable
+ */
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: vm_map_copyin
+ *
+ * Description:
+ * see vm_map_copyin_common. Exported via Unsupported.exports.
+ *
+ */
+
+#undef vm_map_copyin
+
+kern_return_t
+vm_map_copyin(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ return(vm_map_copyin_common(src_map, src_addr, len, src_destroy,
+ FALSE, copy_result, FALSE));
+}
+
+/*
+ * Routine: vm_map_copyin_common
+ *
+ * Description:
+ * Copy the specified region (src_addr, len) from the
+ * source address space (src_map), possibly removing
+ * the region from the source address space (src_destroy).
+ *
+ * Returns:
+ * A vm_map_copy_t object (copy_result), suitable for
+ * insertion into another address space (using vm_map_copyout),
+ * copying over another address space region (using
+ * vm_map_copy_overwrite). If the copy is unused, it
+ * should be destroyed (using vm_map_copy_discard).
+ *
+ * In/out conditions:
+ * The source map should not be locked on entry.
+ */
+
+typedef struct submap_map {
+ vm_map_t parent_map;
+ vm_map_offset_t base_start;
+ vm_map_offset_t base_end;
+ vm_map_size_t base_len;
+ struct submap_map *next;
+} submap_map_t;
+
+kern_return_t
+vm_map_copyin_common(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ boolean_t src_destroy,
+ __unused boolean_t src_volatile,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t use_maxprot)
+{
+ vm_map_entry_t tmp_entry; /* Result of last map lookup --
+ * in multi-level lookup, this
+ * entry contains the actual
+ * vm_object/offset.
+ */
+ register
+ vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */
+
+ vm_map_offset_t src_start; /* Start of current entry --
+ * where copy is taking place now
+ */
+ vm_map_offset_t src_end; /* End of entire region to be
+ * copied */
+ vm_map_offset_t src_base;
+ vm_map_t base_map = src_map;
+ boolean_t map_share=FALSE;
+ submap_map_t *parent_maps = NULL;
+
+ register
+ vm_map_copy_t copy; /* Resulting copy */
+ vm_map_address_t copy_addr;
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+ src_end = src_addr + len;
+ if (src_end < src_addr)
+ return KERN_INVALID_ADDRESS;
+
+ /*
+ * If the copy is sufficiently small, use a kernel buffer instead
+ * of making a virtual copy. The theory being that the cost of
+ * setting up VM (and taking C-O-W faults) dominates the copy costs
+ * for small regions.
+ */
+ if ((len < msg_ool_size_small) && !use_maxprot)
+ return vm_map_copyin_kernel_buffer(src_map, src_addr, len,
+ src_destroy, copy_result);
+
+ /*
+ * Compute (page aligned) start and end of region
+ */
+ src_start = vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map));
+ src_end = vm_map_round_page(src_end,
+ VM_MAP_PAGE_MASK(src_map));
+
+ XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0);
+
+ /*
+ * Allocate a header element for the list.
+ *
+ * Use the start and end in the header to
+ * remember the endpoints prior to rounding.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = TRUE;
+#if 00
+ copy->cpy_hdr.page_shift = src_map->hdr.page_shift;
+#else
+ /*
+ * The copy entries can be broken down for a variety of reasons,
+ * so we can't guarantee that they will remain map-aligned...
+ * Will need to adjust the first copy_entry's "vme_start" and
+ * the last copy_entry's "vme_end" to be rounded to PAGE_MASK
+ * rather than the original map's alignment.
+ */
+ copy->cpy_hdr.page_shift = PAGE_SHIFT;
+#endif
+
+ vm_map_store_init( &(copy->cpy_hdr) );
+
+ copy->offset = src_addr;
+ copy->size = len;
+
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
+
+#define RETURN(x) \
+ MACRO_BEGIN \
+ vm_map_unlock(src_map); \
+ if(src_map != base_map) \
+ vm_map_deallocate(src_map); \
+ if (new_entry != VM_MAP_ENTRY_NULL) \
+ vm_map_copy_entry_dispose(copy,new_entry); \
+ vm_map_copy_discard(copy); \
+ { \
+ submap_map_t *_ptr; \
+ \
+ for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \
+ parent_maps=parent_maps->next; \
+ if (_ptr->parent_map != base_map) \
+ vm_map_deallocate(_ptr->parent_map); \
+ kfree(_ptr, sizeof(submap_map_t)); \
+ } \
+ } \
+ MACRO_RETURN(x); \
+ MACRO_END
+
+ /*
+ * Find the beginning of the region.
+ */
+
+ vm_map_lock(src_map);
+
+ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ if(!tmp_entry->is_sub_map) {
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+ }
+ /* set for later submap fix-up */
+ copy_addr = src_start;
+
+ /*
+ * Go through entries until we get to the end.
+ */
+
+ while (TRUE) {
+ register
+ vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
+ vm_map_size_t src_size; /* Size of source
+ * map entry (in both
+ * maps)
+ */
+
+ register
+ vm_object_t src_object; /* Object to copy */
+ vm_object_offset_t src_offset;
+
+ boolean_t src_needs_copy; /* Should source map
+ * be made read-only
+ * for copy-on-write?
+ */
+
+ boolean_t new_entry_needs_copy; /* Will new entry be COW? */
+
+ boolean_t was_wired; /* Was source wired? */
+ vm_map_version_t version; /* Version before locks
+ * dropped to make copy
+ */
+ kern_return_t result; /* Return value from
+ * copy_strategically.
+ */
+ while(tmp_entry->is_sub_map) {
+ vm_map_size_t submap_len;
+ submap_map_t *ptr;
+
+ ptr = (submap_map_t *)kalloc(sizeof(submap_map_t));
+ ptr->next = parent_maps;
+ parent_maps = ptr;
+ ptr->parent_map = src_map;
+ ptr->base_start = src_start;
+ ptr->base_end = src_end;
+ submap_len = tmp_entry->vme_end - src_start;
+ if(submap_len > (src_end-src_start))
+ submap_len = src_end-src_start;
+ ptr->base_len = submap_len;
+
+ src_start -= tmp_entry->vme_start;
+ src_start += tmp_entry->offset;
+ src_end = src_start + submap_len;
+ src_map = tmp_entry->object.sub_map;
+ vm_map_lock(src_map);
+ /* keep an outstanding reference for all maps in */
+ /* the parents tree except the base map */
+ vm_map_reference(src_map);
+ vm_map_unlock(ptr->parent_map);
+ if (!vm_map_lookup_entry(
+ src_map, src_start, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ map_share = TRUE;
+ if(!tmp_entry->is_sub_map)
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+ src_entry = tmp_entry;
+ }
+ /* we are now in the lowest level submap... */
+
+ if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) &&
+ (tmp_entry->object.vm_object->phys_contiguous)) {
+ /* This is not, supported for now.In future */
+ /* we will need to detect the phys_contig */
+ /* condition and then upgrade copy_slowly */
+ /* to do physical copy from the device mem */
+ /* based object. We can piggy-back off of */
+ /* the was wired boolean to set-up the */
+ /* proper handling */
+ RETURN(KERN_PROTECTION_FAILURE);
+ }
+ /*
+ * Create a new address map entry to hold the result.
+ * Fill in the fields from the appropriate source entries.
+ * We must unlock the source map to do this if we need
+ * to allocate a map entry.
+ */
+ if (new_entry == VM_MAP_ENTRY_NULL) {
+ version.main_timestamp = src_map->timestamp;
+ vm_map_unlock(src_map);
+
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
+
+ vm_map_lock(src_map);
+ if ((version.main_timestamp + 1) != src_map->timestamp) {
+ if (!vm_map_lookup_entry(src_map, src_start,
+ &tmp_entry)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+ if (!tmp_entry->is_sub_map)
+ vm_map_clip_start(src_map, tmp_entry, src_start);
+ continue; /* restart w/ new tmp_entry */
+ }
+ }
+
+ /*
+ * Verify that the region can be read.
+ */
+ if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE &&
+ !use_maxprot) ||
+ (src_entry->max_protection & VM_PROT_READ) == 0)
+ RETURN(KERN_PROTECTION_FAILURE);
+
+ /*
+ * Clip against the endpoints of the entire region.
+ */
+
+ vm_map_clip_end(src_map, src_entry, src_end);
+
+ src_size = src_entry->vme_end - src_start;
+ src_object = src_entry->object.vm_object;
+ src_offset = src_entry->offset;
+ was_wired = (src_entry->wired_count != 0);
+
+ vm_map_entry_copy(new_entry, src_entry);
+ new_entry->use_pmap = FALSE; /* clr address space specifics */
+
+ /*
+ * Attempt non-blocking copy-on-write optimizations.
+ */
+
+ if (src_destroy &&
+ (src_object == VM_OBJECT_NULL ||
+ (src_object->internal && !src_object->true_share
+ && !map_share))) {
+ /*
+ * If we are destroying the source, and the object
+ * is internal, we can move the object reference
+ * from the source to the copy. The copy is
+ * copy-on-write only if the source is.
+ * We make another reference to the object, because
+ * destroying the source entry will deallocate it.
+ */
+ vm_object_reference(src_object);
+
+ /*
+ * Copy is always unwired. vm_map_copy_entry
+ * set its wired count to zero.
+ */
+
+ goto CopySuccessful;
+ }
+
+
+ RestartCopy:
+ XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
+ src_object, new_entry, new_entry->object.vm_object,
+ was_wired, 0);
+ if ((src_object == VM_OBJECT_NULL ||
+ (!was_wired && !map_share && !tmp_entry->is_shared)) &&
+ vm_object_copy_quickly(
+ &new_entry->object.vm_object,
+ src_offset,
+ src_size,
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !tmp_entry->needs_copy) {
+ vm_prot_t prot;
+
+ prot = src_entry->protection & ~VM_PROT_WRITE;
+
+ if (override_nx(src_map, src_entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+
+ vm_object_pmap_protect(
+ src_object,
+ src_offset,
+ src_size,
+ (src_entry->is_shared ?
+ PMAP_NULL
+ : src_map->pmap),
+ src_entry->vme_start,
+ prot);
+
+ tmp_entry->needs_copy = TRUE;
+ }
+
+ /*
+ * The map has never been unlocked, so it's safe
+ * to move to the next entry rather than doing
+ * another lookup.
+ */
+
+ goto CopySuccessful;
+ }
+
+ /*
+ * Take an object reference, so that we may
+ * release the map lock(s).
+ */
+
+ assert(src_object != VM_OBJECT_NULL);
+ vm_object_reference(src_object);
+
+ /*
+ * Record the timestamp for later verification.
+ * Unlock the map.
+ */
+
+ version.main_timestamp = src_map->timestamp;
+ vm_map_unlock(src_map); /* Increments timestamp once! */
+
+ /*
+ * Perform the copy
+ */
+
+ if (was_wired) {
+ CopySlowly:
+ vm_object_lock(src_object);
+ result = vm_object_copy_slowly(
+ src_object,
+ src_offset,
+ src_size,
+ THREAD_UNINT,
+ &new_entry->object.vm_object);
+ new_entry->offset = 0;
+ new_entry->needs_copy = FALSE;
+
+ }
+ else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+ (tmp_entry->is_shared || map_share)) {
+ vm_object_t new_object;
+
+ vm_object_lock_shared(src_object);
+ new_object = vm_object_copy_delayed(
+ src_object,
+ src_offset,
+ src_size,
+ TRUE);
+ if (new_object == VM_OBJECT_NULL)
+ goto CopySlowly;
+
+ new_entry->object.vm_object = new_object;
+ new_entry->needs_copy = TRUE;
+ result = KERN_SUCCESS;
+
+ } else {
+ result = vm_object_copy_strategically(src_object,
+ src_offset,
+ src_size,
+ &new_entry->object.vm_object,
+ &new_entry->offset,
+ &new_entry_needs_copy);
+
+ new_entry->needs_copy = new_entry_needs_copy;
+ }
+
+ if (result != KERN_SUCCESS &&
+ result != KERN_MEMORY_RESTART_COPY) {
+ vm_map_lock(src_map);
+ RETURN(result);
+ }
+
+ /*
+ * Throw away the extra reference
+ */
+
+ vm_object_deallocate(src_object);
+
+ /*
+ * Verify that the map has not substantially
+ * changed while the copy was being made.
+ */
+
+ vm_map_lock(src_map);
+
+ if ((version.main_timestamp + 1) == src_map->timestamp)
+ goto VerificationSuccessful;
+
+ /*
+ * Simple version comparison failed.
+ *
+ * Retry the lookup and verify that the
+ * same object/offset are still present.
+ *
+ * [Note: a memory manager that colludes with
+ * the calling task can detect that we have
+ * cheated. While the map was unlocked, the
+ * mapping could have been changed and restored.]
+ */
+
+ if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+
+ src_entry = tmp_entry;
+ vm_map_clip_start(src_map, src_entry, src_start);
+
+ if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) &&
+ !use_maxprot) ||
+ ((src_entry->max_protection & VM_PROT_READ) == 0))
+ goto VerificationFailed;
+
+ if (src_entry->vme_end < new_entry->vme_end) {
+ assert(VM_MAP_PAGE_ALIGNED(src_entry->vme_end,
+ VM_MAP_COPY_PAGE_MASK(copy)));
+ new_entry->vme_end = src_entry->vme_end;
+ src_size = new_entry->vme_end - src_start;
+ }
+
+ if ((src_entry->object.vm_object != src_object) ||
+ (src_entry->offset != src_offset) ) {
+
+ /*
+ * Verification failed.
+ *
+ * Start over with this top-level entry.
+ */
+
+ VerificationFailed: ;
+
+ vm_object_deallocate(new_entry->object.vm_object);
+ tmp_entry = src_entry;
+ continue;
+ }
+
+ /*
+ * Verification succeeded.
+ */
+
+ VerificationSuccessful: ;
+
+ if (result == KERN_MEMORY_RESTART_COPY)
+ goto RestartCopy;
+
+ /*
+ * Copy succeeded.
+ */
+
+ CopySuccessful: ;
+
+ /*
+ * Link in the new copy entry.
+ */
+
+ vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),
+ new_entry);
+
+ /*
+ * Determine whether the entire region
+ * has been copied.
+ */
+ src_base = src_start;
+ src_start = new_entry->vme_end;
+ new_entry = VM_MAP_ENTRY_NULL;
+ while ((src_start >= src_end) && (src_end != 0)) {
+ if (src_map != base_map) {
+ submap_map_t *ptr;
+
+ ptr = parent_maps;
+ assert(ptr != NULL);
+ parent_maps = parent_maps->next;
+
+ /* fix up the damage we did in that submap */
+ vm_map_simplify_range(src_map,
+ src_base,
+ src_end);
+
+ vm_map_unlock(src_map);
+ vm_map_deallocate(src_map);
+ vm_map_lock(ptr->parent_map);
+ src_map = ptr->parent_map;
+ src_base = ptr->base_start;
+ src_start = ptr->base_start + ptr->base_len;
+ src_end = ptr->base_end;
+ if ((src_end > src_start) &&
+ !vm_map_lookup_entry(
+ src_map, src_start, &tmp_entry))
+ RETURN(KERN_INVALID_ADDRESS);
+ kfree(ptr, sizeof(submap_map_t));
+ if(parent_maps == NULL)
+ map_share = FALSE;
+ src_entry = tmp_entry->vme_prev;
+ } else
+ break;
+ }
+ if ((src_start >= src_end) && (src_end != 0))
+ break;
+
+ /*
+ * Verify that there are no gaps in the region
+ */
+
+ tmp_entry = src_entry->vme_next;
+ if ((tmp_entry->vme_start != src_start) ||
+ (tmp_entry == vm_map_to_entry(src_map))) {
+
+ if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT &&
+ (vm_map_round_page(src_entry->vme_end,
+ VM_MAP_PAGE_MASK(src_map)) ==
+ src_end)) {
+ vm_map_entry_t last_copy_entry;
+ vm_map_offset_t adjustment;
+
+ /*
+ * This is the last entry in the range we
+ * want and it happens to miss a few pages
+ * because it is not map-aligned (must have
+ * been imported from a differently-aligned
+ * map).
+ * Let's say we're done, but first we have
+ * to compensate for the alignment adjustment
+ * we're about to do before returning.
+ */
+
+ last_copy_entry = vm_map_copy_last_entry(copy);
+ assert(last_copy_entry !=
+ vm_map_copy_to_entry(copy));
+ adjustment =
+ (vm_map_round_page((copy->offset +
+ copy->size),
+ VM_MAP_PAGE_MASK(src_map)) -
+ vm_map_round_page((copy->offset +
+ copy->size),
+ PAGE_MASK));
+ last_copy_entry->vme_end += adjustment;
+ last_copy_entry->map_aligned = FALSE;
+ /* ... and we're done */
+ break;
+ }
+
+ RETURN(KERN_INVALID_ADDRESS);
+ }
+ }
+
+ /*
+ * If the source should be destroyed, do it now, since the
+ * copy was successful.
+ */
+ if (src_destroy) {
+ (void) vm_map_delete(
+ src_map,
+ vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map)),
+ src_end,
+ ((src_map == kernel_map) ?
+ VM_MAP_REMOVE_KUNWIRE :
+ VM_MAP_NO_FLAGS),
+ VM_MAP_NULL);
+ } else {
+ /* fix up the damage we did in the base map */
+ vm_map_simplify_range(
+ src_map,
+ vm_map_trunc_page(src_addr,
+ VM_MAP_PAGE_MASK(src_map)),
+ vm_map_round_page(src_end,
+ VM_MAP_PAGE_MASK(src_map)));
+ }
+
+ vm_map_unlock(src_map);
+
+ if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) {
+ assert(VM_MAP_COPY_PAGE_MASK(copy) == PAGE_MASK);
+
+ /* adjust alignment of first copy_entry's "vme_start" */
+ tmp_entry = vm_map_copy_first_entry(copy);
+ if (tmp_entry != vm_map_copy_to_entry(copy)) {
+ vm_map_offset_t adjustment;
+ adjustment =
+ (vm_map_trunc_page(copy->offset,
+ PAGE_MASK) -
+ vm_map_trunc_page(copy->offset,
+ VM_MAP_PAGE_MASK(src_map)));
+ if (adjustment) {
+ assert(page_aligned(adjustment));
+ assert(adjustment < VM_MAP_PAGE_SIZE(src_map));
+ tmp_entry->vme_start += adjustment;
+ tmp_entry->offset += adjustment;
+ copy_addr += adjustment;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
+ }
+ }
+
+ /* adjust alignment of last copy_entry's "vme_end" */
+ tmp_entry = vm_map_copy_last_entry(copy);
+ if (tmp_entry != vm_map_copy_to_entry(copy)) {
+ vm_map_offset_t adjustment;
+ adjustment =
+ (vm_map_round_page((copy->offset +
+ copy->size),
+ VM_MAP_PAGE_MASK(src_map)) -
+ vm_map_round_page((copy->offset +
+ copy->size),
+ PAGE_MASK));
+ if (adjustment) {
+ assert(page_aligned(adjustment));
+ assert(adjustment < VM_MAP_PAGE_SIZE(src_map));
+ tmp_entry->vme_end -= adjustment;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
+ }
+ }
+ }
+
+ /* Fix-up start and end points in copy. This is necessary */
+ /* when the various entries in the copy object were picked */
+ /* up from different sub-maps */
+
+ tmp_entry = vm_map_copy_first_entry(copy);
+ while (tmp_entry != vm_map_copy_to_entry(copy)) {
+ assert(VM_MAP_PAGE_ALIGNED(
+ copy_addr + (tmp_entry->vme_end -
+ tmp_entry->vme_start),
+ VM_MAP_COPY_PAGE_MASK(copy)));
+ assert(VM_MAP_PAGE_ALIGNED(
+ copy_addr,
+ VM_MAP_COPY_PAGE_MASK(copy)));
+
+ /*
+ * The copy_entries will be injected directly into the
+ * destination map and might not be "map aligned" there...
+ */
+ tmp_entry->map_aligned = FALSE;
+
+ tmp_entry->vme_end = copy_addr +
+ (tmp_entry->vme_end - tmp_entry->vme_start);
+ tmp_entry->vme_start = copy_addr;
+ assert(tmp_entry->vme_start < tmp_entry->vme_end);
+ copy_addr += tmp_entry->vme_end - tmp_entry->vme_start;
+ tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next;
+ }
+
+ *copy_result = copy;
+ return(KERN_SUCCESS);
+
+#undef RETURN
+}
+
+kern_return_t
+vm_map_copy_extract(
+ vm_map_t src_map,
+ vm_map_address_t src_addr,
+ vm_map_size_t len,
+ vm_map_copy_t *copy_result, /* OUT */
+ vm_prot_t *cur_prot, /* OUT */
+ vm_prot_t *max_prot)
+{
+ vm_map_offset_t src_start, src_end;
+ vm_map_copy_t copy;
+ kern_return_t kr;
+
+ /*
+ * Check for copies of zero bytes.
+ */
+
+ if (len == 0) {
+ *copy_result = VM_MAP_COPY_NULL;
+ return(KERN_SUCCESS);
+ }
+
+ /*
+ * Check that the end address doesn't overflow
+ */
+ src_end = src_addr + len;
+ if (src_end < src_addr)
+ return KERN_INVALID_ADDRESS;
+
+ /*
+ * Compute (page aligned) start and end of region
+ */
+ src_start = vm_map_trunc_page(src_addr, PAGE_MASK);
+ src_end = vm_map_round_page(src_end, PAGE_MASK);
+
+ /*
+ * Allocate a header element for the list.
+ *
+ * Use the start and end in the header to
+ * remember the endpoints prior to rounding.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ vm_map_copy_first_entry(copy) =
+ vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
+ copy->type = VM_MAP_COPY_ENTRY_LIST;
+ copy->cpy_hdr.nentries = 0;
+ copy->cpy_hdr.entries_pageable = TRUE;
+
+ vm_map_store_init(©->cpy_hdr);
+
+ copy->offset = 0;
+ copy->size = len;
+
+ kr = vm_map_remap_extract(src_map,
+ src_addr,
+ len,
+ FALSE, /* copy */
+ ©->cpy_hdr,
+ cur_prot,
+ max_prot,
+ VM_INHERIT_SHARE,
+ TRUE); /* pageable */
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+ return kr;
+ }
+
+ *copy_result = copy;
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_map_copyin_object:
+ *
+ * Create a copy object from an object.
+ * Our caller donates an object reference.
+ */
+
+kern_return_t
+vm_map_copyin_object(
+ vm_object_t object,
+ vm_object_offset_t offset, /* offset of region in object */
+ vm_object_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result) /* OUT */
+{
+ vm_map_copy_t copy; /* Resulting copy */
+
+ /*
+ * We drop the object into a special copy object
+ * that contains the object directly.
+ */
+
+ copy = (vm_map_copy_t) zalloc(vm_map_copy_zone);
+ copy->type = VM_MAP_COPY_OBJECT;
+ copy->cpy_object = object;
+ copy->offset = offset;
+ copy->size = size;
+
+ *copy_result = copy;
+ return(KERN_SUCCESS);
+}
+
+static void
+vm_map_fork_share(
+ vm_map_t old_map,
+ vm_map_entry_t old_entry,
+ vm_map_t new_map)
+{
+ vm_object_t object;
+ vm_map_entry_t new_entry;
+
+ /*
+ * New sharing code. New map entry
+ * references original object. Internal
+ * objects use asynchronous copy algorithm for
+ * future copies. First make sure we have
+ * the right object. If we need a shadow,
+ * or someone else already has one, then
+ * make a new shadow and share it.
+ */
+
+ object = old_entry->object.vm_object;
+ if (old_entry->is_sub_map) {
+ assert(old_entry->wired_count == 0);
+#ifndef NO_NESTED_PMAP
+ if(old_entry->use_pmap) {
+ kern_return_t result;
+
+ result = pmap_nest(new_map->pmap,
+ (old_entry->object.sub_map)->pmap,
+ (addr64_t)old_entry->vme_start,
+ (addr64_t)old_entry->vme_start,
+ (uint64_t)(old_entry->vme_end - old_entry->vme_start));
+ if(result)
+ panic("vm_map_fork_share: pmap_nest failed!");
+ }
+#endif /* NO_NESTED_PMAP */
+ } else if (object == VM_OBJECT_NULL) {
+ object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end -
+ old_entry->vme_start));
+ old_entry->offset = 0;
+ old_entry->object.vm_object = object;
+ assert(!old_entry->needs_copy);
+ } else if (object->copy_strategy !=
+ MEMORY_OBJECT_COPY_SYMMETRIC) {
+
+ /*
+ * We are already using an asymmetric
+ * copy, and therefore we already have
+ * the right object.
+ */
+
+ assert(! old_entry->needs_copy);
+ }
+ else if (old_entry->needs_copy || /* case 1 */
+ object->shadowed || /* case 2 */
+ (!object->true_share && /* case 3 */
+ !old_entry->is_shared &&
+ (object->vo_size >
+ (vm_map_size_t)(old_entry->vme_end -
+ old_entry->vme_start)))) {
+
+ /*
+ * We need to create a shadow.
+ * There are three cases here.
+ * In the first case, we need to
+ * complete a deferred symmetrical
+ * copy that we participated in.
+ * In the second and third cases,
+ * we need to create the shadow so
+ * that changes that we make to the
+ * object do not interfere with
+ * any symmetrical copies which
+ * have occured (case 2) or which
+ * might occur (case 3).
+ *
+ * The first case is when we had
+ * deferred shadow object creation
+ * via the entry->needs_copy mechanism.
+ * This mechanism only works when
+ * only one entry points to the source
+ * object, and we are about to create
+ * a second entry pointing to the
+ * same object. The problem is that
+ * there is no way of mapping from
+ * an object to the entries pointing
+ * to it. (Deferred shadow creation
+ * works with one entry because occurs
+ * at fault time, and we walk from the
+ * entry to the object when handling
+ * the fault.)
+ *
+ * The second case is when the object
+ * to be shared has already been copied
+ * with a symmetric copy, but we point
+ * directly to the object without
+ * needs_copy set in our entry. (This
+ * can happen because different ranges
+ * of an object can be pointed to by
+ * different entries. In particular,
+ * a single entry pointing to an object
+ * can be split by a call to vm_inherit,
+ * which, combined with task_create, can
+ * result in the different entries
+ * having different needs_copy values.)
+ * The shadowed flag in the object allows
+ * us to detect this case. The problem
+ * with this case is that if this object
+ * has or will have shadows, then we
+ * must not perform an asymmetric copy
+ * of this object, since such a copy
+ * allows the object to be changed, which
+ * will break the previous symmetrical
+ * copies (which rely upon the object
+ * not changing). In a sense, the shadowed
+ * flag says "don't change this object".
+ * We fix this by creating a shadow
+ * object for this object, and sharing
+ * that. This works because we are free
+ * to change the shadow object (and thus
+ * to use an asymmetric copy strategy);
+ * this is also semantically correct,
+ * since this object is temporary, and
+ * therefore a copy of the object is
+ * as good as the object itself. (This
+ * is not true for permanent objects,
+ * since the pager needs to see changes,
+ * which won't happen if the changes
+ * are made to a copy.)
+ *
+ * The third case is when the object
+ * to be shared has parts sticking
+ * outside of the entry we're working
+ * with, and thus may in the future
+ * be subject to a symmetrical copy.
+ * (This is a preemptive version of
+ * case 2.)
+ */
+ vm_object_shadow(&old_entry->object.vm_object,
+ &old_entry->offset,
+ (vm_map_size_t) (old_entry->vme_end -
+ old_entry->vme_start));
+
+ /*
+ * If we're making a shadow for other than
+ * copy on write reasons, then we have
+ * to remove write permission.
+ */
+
+ if (!old_entry->needs_copy &&
+ (old_entry->protection & VM_PROT_WRITE)) {
+ vm_prot_t prot;
+
+ prot = old_entry->protection & ~VM_PROT_WRITE;
+
+ if (override_nx(old_map, old_entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+
+ if (old_map->mapped_in_other_pmaps) {
+ vm_object_pmap_protect(
+ old_entry->object.vm_object,
+ old_entry->offset,
+ (old_entry->vme_end -
+ old_entry->vme_start),
+ PMAP_NULL,
+ old_entry->vme_start,
+ prot);
+ } else {
+ pmap_protect(old_map->pmap,
+ old_entry->vme_start,
+ old_entry->vme_end,
+ prot);
+ }
+ }
+
+ old_entry->needs_copy = FALSE;
+ object = old_entry->object.vm_object;
+ }
+
+
+ /*
+ * If object was using a symmetric copy strategy,
+ * change its copy strategy to the default
+ * asymmetric copy strategy, which is copy_delay
+ * in the non-norma case and copy_call in the
+ * norma case. Bump the reference count for the
+ * new entry.
+ */
+
+ if(old_entry->is_sub_map) {
+ vm_map_lock(old_entry->object.sub_map);
+ vm_map_reference(old_entry->object.sub_map);
+ vm_map_unlock(old_entry->object.sub_map);
+ } else {
+ vm_object_lock(object);
+ vm_object_reference_locked(object);
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+ vm_object_unlock(object);
+ }
+
+ /*
+ * Clone the entry, using object ref from above.
+ * Mark both entries as shared.
+ */
+
+ new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel
+ * map or descendants */
+ vm_map_entry_copy(new_entry, old_entry);
+ old_entry->is_shared = TRUE;
+ new_entry->is_shared = TRUE;
+
+ /*
+ * Insert the entry into the new map -- we
+ * know we're inserting at the end of the new
+ * map.
+ */
+
+ vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry);
+
+ /*
+ * Update the physical map
+ */
+
+ if (old_entry->is_sub_map) {
+ /* Bill Angell pmap support goes here */
+ } else {
+ pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start,
+ old_entry->vme_end - old_entry->vme_start,
+ old_entry->vme_start);
+ }
+}
+
+static boolean_t
+vm_map_fork_copy(
+ vm_map_t old_map,
+ vm_map_entry_t *old_entry_p,
+ vm_map_t new_map)
+{
+ vm_map_entry_t old_entry = *old_entry_p;
+ vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start;
+ vm_map_offset_t start = old_entry->vme_start;
+ vm_map_copy_t copy;
+ vm_map_entry_t last = vm_map_last_entry(new_map);
+
+ vm_map_unlock(old_map);
+ /*
+ * Use maxprot version of copyin because we
+ * care about whether this memory can ever
+ * be accessed, not just whether it's accessible
+ * right now.
+ */
+ if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, ©)
+ != KERN_SUCCESS) {
+ /*
+ * The map might have changed while it
+ * was unlocked, check it again. Skip
+ * any blank space or permanently
+ * unreadable region.
+ */
+ vm_map_lock(old_map);
+ if (!vm_map_lookup_entry(old_map, start, &last) ||
+ (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
+ last = last->vme_next;
+ }
+ *old_entry_p = last;
+
+ /*
+ * XXX For some error returns, want to
+ * XXX skip to the next element. Note
+ * that INVALID_ADDRESS and
+ * PROTECTION_FAILURE are handled above.
+ */
+
+ return FALSE;
+ }
+
+ /*
+ * Insert the copy into the new map
+ */
+
+ vm_map_copy_insert(new_map, last, copy);
+
+ /*
+ * Pick up the traversal at the end of
+ * the copied region.
+ */
+
+ vm_map_lock(old_map);
+ start += entry_size;
+ if (! vm_map_lookup_entry(old_map, start, &last)) {
+ last = last->vme_next;
+ } else {
+ if (last->vme_start == start) {
+ /*
+ * No need to clip here and we don't
+ * want to cause any unnecessary
+ * unnesting...
+ */
+ } else {
+ vm_map_clip_start(old_map, last, start);
+ }
+ }
+ *old_entry_p = last;
+
+ return TRUE;
+}
+
+/*
+ * vm_map_fork:
+ *
+ * Create and return a new map based on the old
+ * map, according to the inheritance values on the
+ * regions in that map.
+ *
+ * The source map must not be locked.
+ */
+vm_map_t
+vm_map_fork(
+ ledger_t ledger,
+ vm_map_t old_map)
+{
+ pmap_t new_pmap;
+ vm_map_t new_map;
+ vm_map_entry_t old_entry;
+ vm_map_size_t new_size = 0, entry_size;
+ vm_map_entry_t new_entry;
+ boolean_t src_needs_copy;
+ boolean_t new_entry_needs_copy;
+
+ new_pmap = pmap_create(ledger, (vm_map_size_t) 0,
+#if defined(__i386__) || defined(__x86_64__)
+ old_map->pmap->pm_task_map != TASK_MAP_32BIT
+#else
+#error Unknown architecture.
+#endif
+ );
+
+ vm_map_reference_swap(old_map);
+ vm_map_lock(old_map);
+
+ new_map = vm_map_create(new_pmap,
+ old_map->min_offset,
+ old_map->max_offset,
+ old_map->hdr.entries_pageable);
+ /* inherit the parent map's page size */
+ vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(old_map));
+ for (
+ old_entry = vm_map_first_entry(old_map);
+ old_entry != vm_map_to_entry(old_map);
+ ) {
+
+ entry_size = old_entry->vme_end - old_entry->vme_start;
+
+ switch (old_entry->inheritance) {
+ case VM_INHERIT_NONE:
+ break;
+
+ case VM_INHERIT_SHARE:
+ vm_map_fork_share(old_map, old_entry, new_map);
+ new_size += entry_size;
+ break;
+
+ case VM_INHERIT_COPY:
+
+ /*
+ * Inline the copy_quickly case;
+ * upon failure, fall back on call
+ * to vm_map_fork_copy.
+ */
+
+ if(old_entry->is_sub_map)
+ break;
+ if ((old_entry->wired_count != 0) ||
+ ((old_entry->object.vm_object != NULL) &&
+ (old_entry->object.vm_object->true_share))) {
+ goto slow_vm_map_fork_copy;
+ }
+
+ new_entry = vm_map_entry_create(new_map, FALSE); /* never the kernel map or descendants */
+ vm_map_entry_copy(new_entry, old_entry);
+ /* clear address space specifics */
+ new_entry->use_pmap = FALSE;
+
+ if (! vm_object_copy_quickly(
+ &new_entry->object.vm_object,
+ old_entry->offset,
+ (old_entry->vme_end -
+ old_entry->vme_start),
+ &src_needs_copy,
+ &new_entry_needs_copy)) {
+ vm_map_entry_dispose(new_map, new_entry);
+ goto slow_vm_map_fork_copy;
+ }
+
+ /*
+ * Handle copy-on-write obligations
+ */
+
+ if (src_needs_copy && !old_entry->needs_copy) {
+ vm_prot_t prot;
+
+ prot = old_entry->protection & ~VM_PROT_WRITE;
+
+ if (override_nx(old_map, old_entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+
+ vm_object_pmap_protect(
+ old_entry->object.vm_object,
+ old_entry->offset,
+ (old_entry->vme_end -
+ old_entry->vme_start),
+ ((old_entry->is_shared
+ || old_map->mapped_in_other_pmaps)
+ ? PMAP_NULL :
+ old_map->pmap),
+ old_entry->vme_start,
+ prot);
+
+ old_entry->needs_copy = TRUE;
+ }
+ new_entry->needs_copy = new_entry_needs_copy;
+
+ /*
+ * Insert the entry at the end
+ * of the map.
+ */
+
+ vm_map_store_entry_link(new_map, vm_map_last_entry(new_map),
+ new_entry);
+ new_size += entry_size;
+ break;
+
+ slow_vm_map_fork_copy:
+ if (vm_map_fork_copy(old_map, &old_entry, new_map)) {
+ new_size += entry_size;
+ }
+ continue;
+ }
+ old_entry = old_entry->vme_next;
+ }
+
+ new_map->size = new_size;
+ vm_map_unlock(old_map);
+ vm_map_deallocate(old_map);
+
+ return(new_map);
+}
+
+/*
+ * vm_map_exec:
+ *
+ * Setup the "new_map" with the proper execution environment according
+ * to the type of executable (platform, 64bit, chroot environment).
+ * Map the comm page and shared region, etc...
+ */
+kern_return_t
+vm_map_exec(
+ vm_map_t new_map,
+ task_t task,
+ void *fsroot,
+ cpu_type_t cpu)
+{
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): ->\n",
+ current_task(), new_map, task, fsroot, cpu));
+ (void) vm_commpage_enter(new_map, task);
+ (void) vm_shared_region_enter(new_map, task, fsroot, cpu);
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): <-\n",
+ current_task(), new_map, task, fsroot, cpu));
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_map_lookup_locked:
+ *
+ * Finds the VM object, offset, and
+ * protection for a given virtual address in the
+ * specified map, assuming a page fault of the
+ * type specified.
+ *
+ * Returns the (object, offset, protection) for
+ * this address, whether it is wired down, and whether
+ * this map has the only reference to the data in question.
+ * In order to later verify this lookup, a "version"
+ * is returned.
+ *
+ * The map MUST be locked by the caller and WILL be
+ * locked on exit. In order to guarantee the
+ * existence of the returned object, it is returned
+ * locked.
+ *
+ * If a lookup is requested with "write protection"
+ * specified, the map may be changed to perform virtual
+ * copying operations, although the data referenced will
+ * remain the same.
+ */
+kern_return_t
+vm_map_lookup_locked(
+ vm_map_t *var_map, /* IN/OUT */
+ vm_map_offset_t vaddr,
+ vm_prot_t fault_type,
+ int object_lock_type,
+ vm_map_version_t *out_version, /* OUT */
+ vm_object_t *object, /* OUT */
+ vm_object_offset_t *offset, /* OUT */
+ vm_prot_t *out_prot, /* OUT */
+ boolean_t *wired, /* OUT */
+ vm_object_fault_info_t fault_info, /* OUT */
+ vm_map_t *real_map)
+{
+ vm_map_entry_t entry;
+ register vm_map_t map = *var_map;
+ vm_map_t old_map = *var_map;
+ vm_map_t cow_sub_map_parent = VM_MAP_NULL;
+ vm_map_offset_t cow_parent_vaddr = 0;
+ vm_map_offset_t old_start = 0;
+ vm_map_offset_t old_end = 0;
+ register vm_prot_t prot;
+ boolean_t mask_protections;
+ vm_prot_t original_fault_type;
+
+ /*
+ * VM_PROT_MASK means that the caller wants us to use "fault_type"
+ * as a mask against the mapping's actual protections, not as an
+ * absolute value.
+ */
+ mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE;
+ fault_type &= ~VM_PROT_IS_MASK;
+ original_fault_type = fault_type;
+
+ *real_map = map;
+
+RetryLookup:
+ fault_type = original_fault_type;
+
+ /*
+ * If the map has an interesting hint, try it before calling
+ * full blown lookup routine.
+ */
+ entry = map->hint;
+
+ if ((entry == vm_map_to_entry(map)) ||
+ (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) {
+ vm_map_entry_t tmp_entry;
+
+ /*
+ * Entry was either not a valid hint, or the vaddr
+ * was not contained in the entry, so do a full lookup.
+ */
+ if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
+ if((cow_sub_map_parent) && (cow_sub_map_parent != map))
+ vm_map_unlock(cow_sub_map_parent);
+ if((*real_map != map)
+ && (*real_map != cow_sub_map_parent))
+ vm_map_unlock(*real_map);
+ return KERN_INVALID_ADDRESS;
+ }
+
+ entry = tmp_entry;
+ }
+ if(map == old_map) {
+ old_start = entry->vme_start;
+ old_end = entry->vme_end;
+ }
+
+ /*
+ * Handle submaps. Drop lock on upper map, submap is
+ * returned locked.
+ */
+
+submap_recurse:
+ if (entry->is_sub_map) {
+ vm_map_offset_t local_vaddr;
+ vm_map_offset_t end_delta;
+ vm_map_offset_t start_delta;
+ vm_map_entry_t submap_entry;
+ boolean_t mapped_needs_copy=FALSE;
+
+ local_vaddr = vaddr;
+
+ if ((entry->use_pmap && !(fault_type & VM_PROT_WRITE))) {
+ /* if real_map equals map we unlock below */
+ if ((*real_map != map) &&
+ (*real_map != cow_sub_map_parent))
+ vm_map_unlock(*real_map);
+ *real_map = entry->object.sub_map;
+ }
+
+ if(entry->needs_copy && (fault_type & VM_PROT_WRITE)) {
+ if (!mapped_needs_copy) {
+ if (vm_map_lock_read_to_write(map)) {
+ vm_map_lock_read(map);
+ *real_map = map;
+ goto RetryLookup;
+ }
+ vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
+ cow_sub_map_parent = map;
+ /* reset base to map before cow object */
+ /* this is the map which will accept */
+ /* the new cow object */
+ old_start = entry->vme_start;
+ old_end = entry->vme_end;
+ cow_parent_vaddr = vaddr;
+ mapped_needs_copy = TRUE;
+ } else {
+ vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
+ if((cow_sub_map_parent != map) &&
+ (*real_map != map))
+ vm_map_unlock(map);
+ }
+ } else {
+ vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
+ /* leave map locked if it is a target */
+ /* cow sub_map above otherwise, just */
+ /* follow the maps down to the object */
+ /* here we unlock knowing we are not */
+ /* revisiting the map. */
+ if((*real_map != map) && (map != cow_sub_map_parent))
+ vm_map_unlock_read(map);
+ }
+
+ map = *var_map;
+
+ /* calculate the offset in the submap for vaddr */
+ local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
+
+ RetrySubMap:
+ if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) {
+ if((cow_sub_map_parent) && (cow_sub_map_parent != map)){
+ vm_map_unlock(cow_sub_map_parent);
+ }
+ if((*real_map != map)
+ && (*real_map != cow_sub_map_parent)) {
+ vm_map_unlock(*real_map);
+ }
+ *real_map = map;
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /* find the attenuated shadow of the underlying object */
+ /* on our target map */
+
+ /* in english the submap object may extend beyond the */
+ /* region mapped by the entry or, may only fill a portion */
+ /* of it. For our purposes, we only care if the object */
+ /* doesn't fill. In this case the area which will */
+ /* ultimately be clipped in the top map will only need */
+ /* to be as big as the portion of the underlying entry */
+ /* which is mapped */
+ start_delta = submap_entry->vme_start > entry->offset ?
+ submap_entry->vme_start - entry->offset : 0;
+
+ end_delta =
+ (entry->offset + start_delta + (old_end - old_start)) <=
+ submap_entry->vme_end ?
+ 0 : (entry->offset +
+ (old_end - old_start))
+ - submap_entry->vme_end;
+
+ old_start += start_delta;
+ old_end -= end_delta;
+
+ if(submap_entry->is_sub_map) {
+ entry = submap_entry;
+ vaddr = local_vaddr;
+ goto submap_recurse;
+ }
+
+ if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) {
+
+ vm_object_t sub_object, copy_object;
+ vm_object_offset_t copy_offset;
+ vm_map_offset_t local_start;
+ vm_map_offset_t local_end;
+ boolean_t copied_slowly = FALSE;
+
+ if (vm_map_lock_read_to_write(map)) {
+ vm_map_lock_read(map);
+ old_start -= start_delta;
+ old_end += end_delta;
+ goto RetrySubMap;
+ }
+
+
+ sub_object = submap_entry->object.vm_object;
+ if (sub_object == VM_OBJECT_NULL) {
+ sub_object =
+ vm_object_allocate(
+ (vm_map_size_t)
+ (submap_entry->vme_end -
+ submap_entry->vme_start));
+ submap_entry->object.vm_object = sub_object;
+ submap_entry->offset = 0;
+ }
+ local_start = local_vaddr -
+ (cow_parent_vaddr - old_start);
+ local_end = local_vaddr +
+ (old_end - cow_parent_vaddr);
+ vm_map_clip_start(map, submap_entry, local_start);
+ vm_map_clip_end(map, submap_entry, local_end);
+ /* unnesting was done in vm_map_clip_start/end() */
+ assert(!submap_entry->use_pmap);
+
+ /* This is the COW case, lets connect */
+ /* an entry in our space to the underlying */
+ /* object in the submap, bypassing the */
+ /* submap. */
+
+
+ if(submap_entry->wired_count != 0 ||
+ (sub_object->copy_strategy ==
+ MEMORY_OBJECT_COPY_NONE)) {
+ vm_object_lock(sub_object);
+ vm_object_copy_slowly(sub_object,
+ submap_entry->offset,
+ (submap_entry->vme_end -
+ submap_entry->vme_start),
+ FALSE,
+ ©_object);
+ copied_slowly = TRUE;
+ } else {
+
+ /* set up shadow object */
+ copy_object = sub_object;
+ vm_object_reference(copy_object);
+ sub_object->shadowed = TRUE;
+ submap_entry->needs_copy = TRUE;
+
+ prot = submap_entry->protection & ~VM_PROT_WRITE;
+
+ if (override_nx(old_map, submap_entry->alias) && prot)
+ prot |= VM_PROT_EXECUTE;
+
+ vm_object_pmap_protect(
+ sub_object,
+ submap_entry->offset,
+ submap_entry->vme_end -
+ submap_entry->vme_start,
+ (submap_entry->is_shared
+ || map->mapped_in_other_pmaps) ?
+ PMAP_NULL : map->pmap,
+ submap_entry->vme_start,
+ prot);
+ }
+
+ /*
+ * Adjust the fault offset to the submap entry.
+ */
+ copy_offset = (local_vaddr -
+ submap_entry->vme_start +
+ submap_entry->offset);
+
+ /* This works diffently than the */
+ /* normal submap case. We go back */
+ /* to the parent of the cow map and*/
+ /* clip out the target portion of */
+ /* the sub_map, substituting the */
+ /* new copy object, */
+
+ vm_map_unlock(map);
+ local_start = old_start;
+ local_end = old_end;
+ map = cow_sub_map_parent;
+ *var_map = cow_sub_map_parent;
+ vaddr = cow_parent_vaddr;
+ cow_sub_map_parent = NULL;
+
+ if(!vm_map_lookup_entry(map,
+ vaddr, &entry)) {
+ vm_object_deallocate(
+ copy_object);
+ vm_map_lock_write_to_read(map);
+ return KERN_INVALID_ADDRESS;
+ }
+
+ /* clip out the portion of space */
+ /* mapped by the sub map which */
+ /* corresponds to the underlying */
+ /* object */
+
+ /*
+ * Clip (and unnest) the smallest nested chunk
+ * possible around the faulting address...
+ */
+ local_start = vaddr & ~(pmap_nesting_size_min - 1);
+ local_end = local_start + pmap_nesting_size_min;
+ /*
+ * ... but don't go beyond the "old_start" to "old_end"
+ * range, to avoid spanning over another VM region
+ * with a possibly different VM object and/or offset.
+ */
+ if (local_start < old_start) {
+ local_start = old_start;
+ }
+ if (local_end > old_end) {
+ local_end = old_end;
+ }
+ /*
+ * Adjust copy_offset to the start of the range.
+ */
+ copy_offset -= (vaddr - local_start);
+
+ vm_map_clip_start(map, entry, local_start);
+ vm_map_clip_end(map, entry, local_end);
+ /* unnesting was done in vm_map_clip_start/end() */
+ assert(!entry->use_pmap);
+
+ /* substitute copy object for */
+ /* shared map entry */
+ vm_map_deallocate(entry->object.sub_map);
+ entry->is_sub_map = FALSE;
+ entry->object.vm_object = copy_object;
+
+ /* propagate the submap entry's protections */
+ entry->protection |= submap_entry->protection;
+ entry->max_protection |= submap_entry->max_protection;
+
+ if(copied_slowly) {
+ entry->offset = local_start - old_start;
+ entry->needs_copy = FALSE;
+ entry->is_shared = FALSE;
+ } else {
+ entry->offset = copy_offset;
+ entry->needs_copy = TRUE;
+ if(entry->inheritance == VM_INHERIT_SHARE)
+ entry->inheritance = VM_INHERIT_COPY;
+ if (map != old_map)
+ entry->is_shared = TRUE;
+ }
+ if(entry->inheritance == VM_INHERIT_SHARE)
+ entry->inheritance = VM_INHERIT_COPY;
+
+ vm_map_lock_write_to_read(map);
+ } else {
+ if((cow_sub_map_parent)
+ && (cow_sub_map_parent != *real_map)
+ && (cow_sub_map_parent != map)) {
+ vm_map_unlock(cow_sub_map_parent);
+ }
+ entry = submap_entry;
+ vaddr = local_vaddr;
+ }
+ }
+
+ /*
+ * Check whether this task is allowed to have
+ * this page.
+ */
+
+ prot = entry->protection;
+
+ if (override_nx(old_map, entry->alias) && prot) {
+ /*
+ * HACK -- if not a stack, then allow execution
+ */
+ prot |= VM_PROT_EXECUTE;
+ }
+
+ if (mask_protections) {
+ fault_type &= prot;
+ if (fault_type == VM_PROT_NONE) {
+ goto protection_failure;
+ }
+ }
+ if ((fault_type & (prot)) != fault_type) {
+ protection_failure:
+ if (*real_map != map) {
+ vm_map_unlock(*real_map);
+ }
+ *real_map = map;
+
+ if ((fault_type & VM_PROT_EXECUTE) && prot)
+ log_stack_execution_failure((addr64_t)vaddr, prot);
+
+ DTRACE_VM2(prot_fault, int, 1, (uint64_t *), NULL);
+ return KERN_PROTECTION_FAILURE;
+ }
+
+ /*
+ * If this page is not pageable, we have to get
+ * it for all possible accesses.
+ */
+
+ *wired = (entry->wired_count != 0);
+ if (*wired)
+ fault_type = prot;
+
+ /*
+ * If the entry was copy-on-write, we either ...
+ */
+
+ if (entry->needs_copy) {
+ /*
+ * If we want to write the page, we may as well
+ * handle that now since we've got the map locked.
+ *
+ * If we don't need to write the page, we just
+ * demote the permissions allowed.
+ */
+
+ if ((fault_type & VM_PROT_WRITE) || *wired) {
+ /*
+ * Make a new object, and place it in the
+ * object chain. Note that no new references
+ * have appeared -- one just moved from the
+ * map to the new object.
+ */
+
+ if (vm_map_lock_read_to_write(map)) {
+ vm_map_lock_read(map);
+ goto RetryLookup;
+ }
+ vm_object_shadow(&entry->object.vm_object,
+ &entry->offset,
+ (vm_map_size_t) (entry->vme_end -
+ entry->vme_start));
+
+ entry->object.vm_object->shadowed = TRUE;
+ entry->needs_copy = FALSE;
+ vm_map_lock_write_to_read(map);
+ }
+ else {
+ /*
+ * We're attempting to read a copy-on-write
+ * page -- don't allow writes.
+ */
+
+ prot &= (~VM_PROT_WRITE);
+ }
+ }
+
+ /*
+ * Create an object if necessary.
+ */
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+
+ if (vm_map_lock_read_to_write(map)) {
+ vm_map_lock_read(map);
+ goto RetryLookup;
+ }
+
+ entry->object.vm_object = vm_object_allocate(
+ (vm_map_size_t)(entry->vme_end - entry->vme_start));
+ entry->offset = 0;
+ vm_map_lock_write_to_read(map);
+ }
+
+ /*
+ * Return the object/offset from this entry. If the entry
+ * was copy-on-write or empty, it has been fixed up. Also
+ * return the protection.
+ */
+
+ *offset = (vaddr - entry->vme_start) + entry->offset;
+ *object = entry->object.vm_object;
+ *out_prot = prot;
+
+ if (fault_info) {
+ fault_info->interruptible = THREAD_UNINT; /* for now... */
+ /* ... the caller will change "interruptible" if needed */
+ fault_info->cluster_size = 0;
+ fault_info->user_tag = entry->alias;
+ fault_info->behavior = entry->behavior;
+ fault_info->lo_offset = entry->offset;
+ fault_info->hi_offset = (entry->vme_end - entry->vme_start) + entry->offset;
+ fault_info->no_cache = entry->no_cache;
+ fault_info->stealth = FALSE;
+ fault_info->io_sync = FALSE;
+ fault_info->cs_bypass = (entry->used_for_jit)? TRUE : FALSE;
+ fault_info->mark_zf_absent = FALSE;
+ fault_info->batch_pmap_op = FALSE;
+ }
+
+ /*
+ * Lock the object to prevent it from disappearing
+ */
+ if (object_lock_type == OBJECT_LOCK_EXCLUSIVE)
+ vm_object_lock(*object);
+ else
+ vm_object_lock_shared(*object);
+
+ /*
+ * Save the version number
+ */
+
+ out_version->main_timestamp = map->timestamp;
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * vm_map_verify:
+ *
+ * Verifies that the map in question has not changed
+ * since the given version. If successful, the map
+ * will not change until vm_map_verify_done() is called.
+ */
+boolean_t
+vm_map_verify(
+ register vm_map_t map,
+ register vm_map_version_t *version) /* REF */
+{
+ boolean_t result;
+
+ vm_map_lock_read(map);
+ result = (map->timestamp == version->main_timestamp);
+
+ if (!result)
+ vm_map_unlock_read(map);
+
+ return(result);
+}
+
+/*
+ * vm_map_verify_done:
+ *
+ * Releases locks acquired by a vm_map_verify.
+ *
+ * This is now a macro in vm/vm_map.h. It does a
+ * vm_map_unlock_read on the map.
+ */
+
+
+/*
+ * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY
+ * Goes away after regular vm_region_recurse function migrates to
+ * 64 bits
+ * vm_region_recurse: A form of vm_region which follows the
+ * submaps in a target map
+ *
+ */
+
+kern_return_t
+vm_map_region_recurse_64(
+ vm_map_t map,
+ vm_map_offset_t *address, /* IN/OUT */
+ vm_map_size_t *size, /* OUT */
+ natural_t *nesting_depth, /* IN/OUT */
+ vm_region_submap_info_64_t submap_info, /* IN/OUT */
+ mach_msg_type_number_t *count) /* IN/OUT */
+{
+ mach_msg_type_number_t original_count;
+ vm_region_extended_info_data_t extended;
+ vm_map_entry_t tmp_entry;
+ vm_map_offset_t user_address;
+ unsigned int user_max_depth;
+
+ /*
+ * "curr_entry" is the VM map entry preceding or including the
+ * address we're looking for.
+ * "curr_map" is the map or sub-map containing "curr_entry".
+ * "curr_address" is the equivalent of the top map's "user_address"
+ * in the current map.
+ * "curr_offset" is the cumulated offset of "curr_map" in the
+ * target task's address space.
+ * "curr_depth" is the depth of "curr_map" in the chain of
+ * sub-maps.
+ *
+ * "curr_max_below" and "curr_max_above" limit the range (around
+ * "curr_address") we should take into account in the current (sub)map.
+ * They limit the range to what's visible through the map entries
+ * we've traversed from the top map to the current map.
+
+ */
+ vm_map_entry_t curr_entry;
+ vm_map_address_t curr_address;
+ vm_map_offset_t curr_offset;
+ vm_map_t curr_map;
+ unsigned int curr_depth;
+ vm_map_offset_t curr_max_below, curr_max_above;
+ vm_map_offset_t curr_skip;
+
+ /*
+ * "next_" is the same as "curr_" but for the VM region immediately
+ * after the address we're looking for. We need to keep track of this
+ * too because we want to return info about that region if the
+ * address we're looking for is not mapped.
+ */
+ vm_map_entry_t next_entry;
+ vm_map_offset_t next_offset;
+ vm_map_offset_t next_address;
+ vm_map_t next_map;
+ unsigned int next_depth;
+ vm_map_offset_t next_max_below, next_max_above;
+ vm_map_offset_t next_skip;
+
+ boolean_t look_for_pages;
+ vm_region_submap_short_info_64_t short_info;
+
+ if (map == VM_MAP_NULL) {
+ /* no address space to work on */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+
+ if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) {
+ /*
+ * "info" structure is not big enough and
+ * would overflow
+ */
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ original_count = *count;
+
+ if (original_count < VM_REGION_SUBMAP_INFO_V0_COUNT_64) {
+ *count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
+ look_for_pages = FALSE;
+ short_info = (vm_region_submap_short_info_64_t) submap_info;
+ submap_info = NULL;
+ } else {
+ look_for_pages = TRUE;
+ *count = VM_REGION_SUBMAP_INFO_V0_COUNT_64;
+ short_info = NULL;
+
+ if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) {
+ *count = VM_REGION_SUBMAP_INFO_V1_COUNT_64;
+ }
+ }
+
+ user_address = *address;
+ user_max_depth = *nesting_depth;
+
+ curr_entry = NULL;
+ curr_map = map;
+ curr_address = user_address;
+ curr_offset = 0;
+ curr_skip = 0;
+ curr_depth = 0;
+ curr_max_above = ((vm_map_offset_t) -1) - curr_address;
+ curr_max_below = curr_address;
+
+ next_entry = NULL;
+ next_map = NULL;
+ next_address = 0;
+ next_offset = 0;
+ next_skip = 0;
+ next_depth = 0;
+ next_max_above = (vm_map_offset_t) -1;
+ next_max_below = (vm_map_offset_t) -1;
+
+ if (not_in_kdp) {
+ vm_map_lock_read(curr_map);
+ }
+
+ for (;;) {
+ if (vm_map_lookup_entry(curr_map,
+ curr_address,
+ &tmp_entry)) {
+ /* tmp_entry contains the address we're looking for */
+ curr_entry = tmp_entry;
+ } else {
+ vm_map_offset_t skip;
+ /*
+ * The address is not mapped. "tmp_entry" is the
+ * map entry preceding the address. We want the next
+ * one, if it exists.
+ */
+ curr_entry = tmp_entry->vme_next;
+
+ if (curr_entry == vm_map_to_entry(curr_map) ||
+ (curr_entry->vme_start >=
+ curr_address + curr_max_above)) {
+ /* no next entry at this level: stop looking */
+ if (not_in_kdp) {
+ vm_map_unlock_read(curr_map);
+ }
+ curr_entry = NULL;
+ curr_map = NULL;
+ curr_offset = 0;
+ curr_depth = 0;
+ curr_max_above = 0;
+ curr_max_below = 0;
+ break;
+ }
+
+ /* adjust current address and offset */
+ skip = curr_entry->vme_start - curr_address;
+ curr_address = curr_entry->vme_start;
+ curr_skip = skip;
+ curr_offset += skip;
+ curr_max_above -= skip;
+ curr_max_below = 0;
+ }
+
+ /*
+ * Is the next entry at this level closer to the address (or
+ * deeper in the submap chain) than the one we had
+ * so far ?
+ */
+ tmp_entry = curr_entry->vme_next;
+ if (tmp_entry == vm_map_to_entry(curr_map)) {
+ /* no next entry at this level */
+ } else if (tmp_entry->vme_start >=
+ curr_address + curr_max_above) {
+ /*
+ * tmp_entry is beyond the scope of what we mapped of
+ * this submap in the upper level: ignore it.
+ */
+ } else if ((next_entry == NULL) ||
+ (tmp_entry->vme_start + curr_offset <=
+ next_entry->vme_start + next_offset)) {
+ /*
+ * We didn't have a "next_entry" or this one is
+ * closer to the address we're looking for:
+ * use this "tmp_entry" as the new "next_entry".
+ */
+ if (next_entry != NULL) {
+ /* unlock the last "next_map" */
+ if (next_map != curr_map && not_in_kdp) {
+ vm_map_unlock_read(next_map);
+ }
+ }
+ next_entry = tmp_entry;
+ next_map = curr_map;
+ next_depth = curr_depth;
+ next_address = next_entry->vme_start;
+ next_skip = curr_skip;
+ next_offset = curr_offset;
+ next_offset += (next_address - curr_address);
+ next_max_above = MIN(next_max_above, curr_max_above);
+ next_max_above = MIN(next_max_above,
+ next_entry->vme_end - next_address);
+ next_max_below = MIN(next_max_below, curr_max_below);
+ next_max_below = MIN(next_max_below,
+ next_address - next_entry->vme_start);
+ }
+
+ /*
+ * "curr_max_{above,below}" allow us to keep track of the
+ * portion of the submap that is actually mapped at this level:
+ * the rest of that submap is irrelevant to us, since it's not
+ * mapped here.
+ * The relevant portion of the map starts at
+ * "curr_entry->offset" up to the size of "curr_entry".
+ */
+ curr_max_above = MIN(curr_max_above,
+ curr_entry->vme_end - curr_address);
+ curr_max_below = MIN(curr_max_below,
+ curr_address - curr_entry->vme_start);
+
+ if (!curr_entry->is_sub_map ||
+ curr_depth >= user_max_depth) {
+ /*
+ * We hit a leaf map or we reached the maximum depth
+ * we could, so stop looking. Keep the current map
+ * locked.
+ */
+ break;
+ }
+
+ /*
+ * Get down to the next submap level.
+ */
+
+ /*
+ * Lock the next level and unlock the current level,
+ * unless we need to keep it locked to access the "next_entry"
+ * later.
+ */
+ if (not_in_kdp) {
+ vm_map_lock_read(curr_entry->object.sub_map);
+ }
+ if (curr_map == next_map) {
+ /* keep "next_map" locked in case we need it */
+ } else {
+ /* release this map */
+ if (not_in_kdp)
+ vm_map_unlock_read(curr_map);
+ }
+
+ /*
+ * Adjust the offset. "curr_entry" maps the submap
+ * at relative address "curr_entry->vme_start" in the
+ * curr_map but skips the first "curr_entry->offset"
+ * bytes of the submap.
+ * "curr_offset" always represents the offset of a virtual
+ * address in the curr_map relative to the absolute address
+ * space (i.e. the top-level VM map).
+ */
+ curr_offset +=
+ (curr_entry->offset - curr_entry->vme_start);
+ curr_address = user_address + curr_offset;
+ /* switch to the submap */
+ curr_map = curr_entry->object.sub_map;
+ curr_depth++;
+ curr_entry = NULL;
+ }
+
+ if (curr_entry == NULL) {
+ /* no VM region contains the address... */
+ if (next_entry == NULL) {
+ /* ... and no VM region follows it either */
+ return KERN_INVALID_ADDRESS;
+ }
+ /* ... gather info about the next VM region */
+ curr_entry = next_entry;
+ curr_map = next_map; /* still locked ... */
+ curr_address = next_address;
+ curr_skip = next_skip;
+ curr_offset = next_offset;
+ curr_depth = next_depth;
+ curr_max_above = next_max_above;
+ curr_max_below = next_max_below;
+ if (curr_map == map) {
+ user_address = curr_address;
+ }
+ } else {
+ /* we won't need "next_entry" after all */
+ if (next_entry != NULL) {
+ /* release "next_map" */
+ if (next_map != curr_map && not_in_kdp) {
+ vm_map_unlock_read(next_map);
+ }
+ }
+ }
+ next_entry = NULL;
+ next_map = NULL;
+ next_offset = 0;
+ next_skip = 0;
+ next_depth = 0;
+ next_max_below = -1;
+ next_max_above = -1;
+
+ *nesting_depth = curr_depth;
+ *size = curr_max_above + curr_max_below;
+ *address = user_address + curr_skip - curr_max_below;
+
+// LP64todo: all the current tools are 32bit, obviously never worked for 64b
+// so probably should be a real 32b ID vs. ptr.
+// Current users just check for equality
+#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p))
+
+ if (look_for_pages) {
+ submap_info->user_tag = curr_entry->alias;
+ submap_info->offset = curr_entry->offset;
+ submap_info->protection = curr_entry->protection;
+ submap_info->inheritance = curr_entry->inheritance;
+ submap_info->max_protection = curr_entry->max_protection;
+ submap_info->behavior = curr_entry->behavior;
+ submap_info->user_wired_count = curr_entry->user_wired_count;
+ submap_info->is_submap = curr_entry->is_sub_map;
+ submap_info->object_id = INFO_MAKE_OBJECT_ID(curr_entry->object.vm_object);
+ } else {
+ short_info->user_tag = curr_entry->alias;
+ short_info->offset = curr_entry->offset;
+ short_info->protection = curr_entry->protection;
+ short_info->inheritance = curr_entry->inheritance;
+ short_info->max_protection = curr_entry->max_protection;
+ short_info->behavior = curr_entry->behavior;
+ short_info->user_wired_count = curr_entry->user_wired_count;
+ short_info->is_submap = curr_entry->is_sub_map;
+ short_info->object_id = INFO_MAKE_OBJECT_ID(curr_entry->object.vm_object);
+ }
+
+ extended.pages_resident = 0;
+ extended.pages_swapped_out = 0;
+ extended.pages_shared_now_private = 0;
+ extended.pages_dirtied = 0;
+ extended.pages_reusable = 0;
+ extended.external_pager = 0;
+ extended.shadow_depth = 0;
+
+ if (not_in_kdp) {
+ if (!curr_entry->is_sub_map) {
+ vm_map_offset_t range_start, range_end;
+ range_start = MAX((curr_address - curr_max_below),
+ curr_entry->vme_start);
+ range_end = MIN((curr_address + curr_max_above),
+ curr_entry->vme_end);
+ vm_map_region_walk(curr_map,
+ range_start,
+ curr_entry,
+ (curr_entry->offset +
+ (range_start -
+ curr_entry->vme_start)),
+ range_end - range_start,
+ &extended,
+ look_for_pages, VM_REGION_EXTENDED_INFO_COUNT);
+ if (extended.external_pager &&
+ extended.ref_count == 2 &&
+ extended.share_mode == SM_SHARED) {
+ extended.share_mode = SM_PRIVATE;
+ }
+ } else {
+ if (curr_entry->use_pmap) {
+ extended.share_mode = SM_TRUESHARED;
+ } else {
+ extended.share_mode = SM_PRIVATE;
+ }
+ extended.ref_count =
+ curr_entry->object.sub_map->ref_count;
+ }
+ }
+
+ if (look_for_pages) {
+ submap_info->pages_resident = extended.pages_resident;
+ submap_info->pages_swapped_out = extended.pages_swapped_out;
+ submap_info->pages_shared_now_private =
+ extended.pages_shared_now_private;
+ submap_info->pages_dirtied = extended.pages_dirtied;
+ submap_info->external_pager = extended.external_pager;
+ submap_info->shadow_depth = extended.shadow_depth;
+ submap_info->share_mode = extended.share_mode;
+ submap_info->ref_count = extended.ref_count;
+
+ if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) {
+ submap_info->pages_reusable = extended.pages_reusable;
+ }
+ } else {
+ short_info->external_pager = extended.external_pager;
+ short_info->shadow_depth = extended.shadow_depth;
+ short_info->share_mode = extended.share_mode;
+ short_info->ref_count = extended.ref_count;
+ }
+
+ if (not_in_kdp) {
+ vm_map_unlock_read(curr_map);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_region:
+ *
+ * User call to obtain information about a region in
+ * a task's address map. Currently, only one flavor is
+ * supported.
+ *
+ * XXX The reserved and behavior fields cannot be filled
+ * in until the vm merge from the IK is completed, and
+ * vm_reserve is implemented.
+ */
+
+kern_return_t
+vm_map_region(
+ vm_map_t map,
+ vm_map_offset_t *address, /* IN/OUT */
+ vm_map_size_t *size, /* OUT */
+ vm_region_flavor_t flavor, /* IN */
+ vm_region_info_t info, /* OUT */
+ mach_msg_type_number_t *count, /* IN/OUT */
+ mach_port_t *object_name) /* OUT */
+{
+ vm_map_entry_t tmp_entry;
+ vm_map_entry_t entry;
+ vm_map_offset_t start;
+
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ switch (flavor) {
+
+ case VM_REGION_BASIC_INFO:
+ /* legacy for old 32-bit objects info */
+ {
+ vm_region_basic_info_t basic;
+
+ if (*count < VM_REGION_BASIC_INFO_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ basic = (vm_region_basic_info_t) info;
+ *count = VM_REGION_BASIC_INFO_COUNT;
+
+ vm_map_lock_read(map);
+
+ start = *address;
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ start = entry->vme_start;
+
+ basic->offset = (uint32_t)entry->offset;
+ basic->protection = entry->protection;
+ basic->inheritance = entry->inheritance;
+ basic->max_protection = entry->max_protection;
+ basic->behavior = entry->behavior;
+ basic->user_wired_count = entry->user_wired_count;
+ basic->reserved = entry->is_sub_map;
+ *address = start;
+ *size = (entry->vme_end - start);
+
+ if (object_name) *object_name = IP_NULL;
+ if (entry->is_sub_map) {
+ basic->shared = FALSE;
+ } else {
+ basic->shared = entry->is_shared;
+ }
+
+ vm_map_unlock_read(map);
+ return(KERN_SUCCESS);
+ }
+
+ case VM_REGION_BASIC_INFO_64:
+ {
+ vm_region_basic_info_64_t basic;
+
+ if (*count < VM_REGION_BASIC_INFO_COUNT_64)
+ return(KERN_INVALID_ARGUMENT);
+
+ basic = (vm_region_basic_info_64_t) info;
+ *count = VM_REGION_BASIC_INFO_COUNT_64;
+
+ vm_map_lock_read(map);
+
+ start = *address;
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+
+ start = entry->vme_start;
+
+ basic->offset = entry->offset;
+ basic->protection = entry->protection;
+ basic->inheritance = entry->inheritance;
+ basic->max_protection = entry->max_protection;
+ basic->behavior = entry->behavior;
+ basic->user_wired_count = entry->user_wired_count;
+ basic->reserved = entry->is_sub_map;
+ *address = start;
+ *size = (entry->vme_end - start);
+
+ if (object_name) *object_name = IP_NULL;
+ if (entry->is_sub_map) {
+ basic->shared = FALSE;
+ } else {
+ basic->shared = entry->is_shared;
+ }
+
+ vm_map_unlock_read(map);
+ return(KERN_SUCCESS);
+ }
+ case VM_REGION_EXTENDED_INFO:
+ if (*count < VM_REGION_EXTENDED_INFO_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+ /*fallthru*/
+ case VM_REGION_EXTENDED_INFO__legacy:
+ if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy)
+ return KERN_INVALID_ARGUMENT;
+
+ {
+ vm_region_extended_info_t extended;
+ mach_msg_type_number_t original_count;
+
+ extended = (vm_region_extended_info_t) info;
+
+ vm_map_lock_read(map);
+
+ start = *address;
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ } else {
+ entry = tmp_entry;
+ }
+ start = entry->vme_start;
+
+ extended->protection = entry->protection;
+ extended->user_tag = entry->alias;
+ extended->pages_resident = 0;
+ extended->pages_swapped_out = 0;
+ extended->pages_shared_now_private = 0;
+ extended->pages_dirtied = 0;
+ extended->external_pager = 0;
+ extended->shadow_depth = 0;
+
+ original_count = *count;
+ if (flavor == VM_REGION_EXTENDED_INFO__legacy) {
+ *count = VM_REGION_EXTENDED_INFO_COUNT__legacy;
+ } else {
+ extended->pages_reusable = 0;
+ *count = VM_REGION_EXTENDED_INFO_COUNT;
+ }
+
+ vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended, TRUE, *count);
+
+ if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED)
+ extended->share_mode = SM_PRIVATE;
+
+ if (object_name)
+ *object_name = IP_NULL;
+ *address = start;
+ *size = (entry->vme_end - start);
+
+ vm_map_unlock_read(map);
+ return(KERN_SUCCESS);
+ }
+ case VM_REGION_TOP_INFO:
+ {
+ vm_region_top_info_t top;
+
+ if (*count < VM_REGION_TOP_INFO_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ top = (vm_region_top_info_t) info;
+ *count = VM_REGION_TOP_INFO_COUNT;
+
+ vm_map_lock_read(map);
+
+ start = *address;
+ if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
+ if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
+ vm_map_unlock_read(map);
+ return(KERN_INVALID_ADDRESS);
+ }
+ } else {
+ entry = tmp_entry;
+
+ }
+ start = entry->vme_start;
+
+ top->private_pages_resident = 0;
+ top->shared_pages_resident = 0;
+
+ vm_map_region_top_walk(entry, top);
+
+ if (object_name)
+ *object_name = IP_NULL;
+ *address = start;
+ *size = (entry->vme_end - start);
+
+ vm_map_unlock_read(map);
+ return(KERN_SUCCESS);
+ }
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+}
+
+#define OBJ_RESIDENT_COUNT(obj, entry_size) \
+ MIN((entry_size), \
+ ((obj)->all_reusable ? \
+ (obj)->wired_page_count : \
+ (obj)->resident_page_count - (obj)->reusable_page_count))
+
+void
+vm_map_region_top_walk(
+ vm_map_entry_t entry,
+ vm_region_top_info_t top)
+{
+
+ if (entry->object.vm_object == 0 || entry->is_sub_map) {
+ top->share_mode = SM_EMPTY;
+ top->ref_count = 0;
+ top->obj_id = 0;
+ return;
+ }
+
+ {
+ struct vm_object *obj, *tmp_obj;
+ int ref_count;
+ uint32_t entry_size;
+
+ entry_size = (uint32_t) ((entry->vme_end - entry->vme_start) / PAGE_SIZE_64);
+
+ obj = entry->object.vm_object;
+
+ vm_object_lock(obj);
+
+ if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
+ ref_count--;
+
+ assert(obj->reusable_page_count <= obj->resident_page_count);
+ if (obj->shadow) {
+ if (ref_count == 1)
+ top->private_pages_resident =
+ OBJ_RESIDENT_COUNT(obj, entry_size);
+ else
+ top->shared_pages_resident =
+ OBJ_RESIDENT_COUNT(obj, entry_size);
+ top->ref_count = ref_count;
+ top->share_mode = SM_COW;
+
+ while ((tmp_obj = obj->shadow)) {
+ vm_object_lock(tmp_obj);
+ vm_object_unlock(obj);
+ obj = tmp_obj;
+
+ if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress)
+ ref_count--;
+
+ assert(obj->reusable_page_count <= obj->resident_page_count);
+ top->shared_pages_resident +=
+ OBJ_RESIDENT_COUNT(obj, entry_size);
+ top->ref_count += ref_count - 1;
+ }
+ } else {
+ if (entry->superpage_size) {
+ top->share_mode = SM_LARGE_PAGE;
+ top->shared_pages_resident = 0;
+ top->private_pages_resident = entry_size;
+ } else if (entry->needs_copy) {
+ top->share_mode = SM_COW;
+ top->shared_pages_resident =
+ OBJ_RESIDENT_COUNT(obj, entry_size);
+ } else {
+ if (ref_count == 1 ||
+ (ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) {
+ top->share_mode = SM_PRIVATE;
+ top->private_pages_resident =
+ OBJ_RESIDENT_COUNT(obj,
+ entry_size);
+ } else {
+ top->share_mode = SM_SHARED;
+ top->shared_pages_resident =
+ OBJ_RESIDENT_COUNT(obj,
+ entry_size);
+ }
+ }
+ top->ref_count = ref_count;