+bool
+vm_map_is_exotic(
+ vm_map_t map)
+{
+ return VM_MAP_IS_EXOTIC(map);
+}
+
+bool
+vm_map_is_alien(
+ vm_map_t map)
+{
+ return VM_MAP_IS_ALIEN(map);
+}
+
+#if XNU_TARGET_OS_OSX
+void
+vm_map_mark_alien(
+ vm_map_t map)
+{
+ vm_map_lock(map);
+ map->is_alien = true;
+ vm_map_unlock(map);
+}
+
+void
+vm_map_single_jit(
+ vm_map_t map)
+{
+ vm_map_lock(map);
+ map->single_jit = true;
+ vm_map_unlock(map);
+}
+#endif /* XNU_TARGET_OS_OSX */
+
+void vm_map_copy_to_physcopy(vm_map_copy_t copy_map, vm_map_t target_map);
+void
+vm_map_copy_to_physcopy(
+ vm_map_copy_t copy_map,
+ vm_map_t target_map)
+{
+ vm_map_size_t size;
+ vm_map_entry_t entry;
+ vm_map_entry_t new_entry;
+ vm_object_t new_object;
+ unsigned int pmap_flags;
+ pmap_t new_pmap;
+ vm_map_t new_map;
+ vm_map_address_t src_start, src_end, src_cur;
+ vm_map_address_t dst_start, dst_end, dst_cur;
+ kern_return_t kr;
+ void *kbuf;
+
+ /*
+ * Perform the equivalent of vm_allocate() and memcpy().
+ * Replace the mappings in "copy_map" with the newly allocated mapping.
+ */
+ DEBUG4K_COPY("copy_map %p (%d %d 0x%llx 0x%llx) BEFORE\n", copy_map, copy_map->cpy_hdr.page_shift, copy_map->cpy_hdr.nentries, copy_map->offset, (uint64_t)copy_map->size);
+
+ assert(copy_map->cpy_hdr.page_shift != VM_MAP_PAGE_MASK(target_map));
+
+ /* allocate new VM object */
+ size = VM_MAP_ROUND_PAGE(copy_map->size, PAGE_MASK);
+ new_object = vm_object_allocate(size);
+ assert(new_object);
+
+ /* allocate new VM map entry */
+ new_entry = vm_map_copy_entry_create(copy_map, FALSE);
+ assert(new_entry);
+
+ /* finish initializing new VM map entry */
+ new_entry->protection = VM_PROT_DEFAULT;
+ new_entry->max_protection = VM_PROT_DEFAULT;
+ new_entry->use_pmap = TRUE;
+
+ /* make new VM map entry point to new VM object */
+ new_entry->vme_start = 0;
+ new_entry->vme_end = size;
+ VME_OBJECT_SET(new_entry, new_object);
+ VME_OFFSET_SET(new_entry, 0);
+
+ /* create a new pmap to map "copy_map" */
+ pmap_flags = 0;
+ assert(copy_map->cpy_hdr.page_shift == FOURK_PAGE_SHIFT);
+#if PMAP_CREATE_FORCE_4K_PAGES
+ pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES;
+#endif /* PMAP_CREATE_FORCE_4K_PAGES */
+ pmap_flags |= PMAP_CREATE_64BIT;
+ new_pmap = pmap_create_options(NULL, (vm_map_size_t)0, pmap_flags);
+ assert(new_pmap);
+
+ /* create a new pageable VM map to map "copy_map" */
+ new_map = vm_map_create(new_pmap, 0, MACH_VM_MAX_ADDRESS, TRUE);
+ assert(new_map);
+ vm_map_set_page_shift(new_map, copy_map->cpy_hdr.page_shift);
+
+ /* map "copy_map" in the new VM map */
+ src_start = 0;
+ kr = vm_map_copyout_internal(
+ new_map,
+ &src_start,
+ copy_map,
+ copy_map->size,
+ FALSE, /* consume_on_success */
+ VM_PROT_DEFAULT,
+ VM_PROT_DEFAULT,
+ VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ src_end = src_start + copy_map->size;
+
+ /* map "new_object" in the new VM map */
+ vm_object_reference(new_object);
+ dst_start = 0;
+ kr = vm_map_enter(new_map,
+ &dst_start,
+ size,
+ 0, /* mask */
+ VM_FLAGS_ANYWHERE,
+ VM_MAP_KERNEL_FLAGS_NONE,
+ VM_KERN_MEMORY_OSFMK,
+ new_object,
+ 0, /* offset */
+ FALSE, /* needs copy */
+ VM_PROT_DEFAULT,
+ VM_PROT_DEFAULT,
+ VM_INHERIT_DEFAULT);
+ assert(kr == KERN_SUCCESS);
+ dst_end = dst_start + size;
+
+ /* get a kernel buffer */
+ kbuf = kheap_alloc(KHEAP_TEMP, PAGE_SIZE, Z_WAITOK);
+ assert(kbuf);
+
+ /* physically copy "copy_map" mappings to new VM object */
+ for (src_cur = src_start, dst_cur = dst_start;
+ src_cur < src_end;
+ src_cur += PAGE_SIZE, dst_cur += PAGE_SIZE) {
+ vm_size_t bytes;
+
+ bytes = PAGE_SIZE;
+ if (src_cur + PAGE_SIZE > src_end) {
+ /* partial copy for last page */
+ bytes = src_end - src_cur;
+ assert(bytes > 0 && bytes < PAGE_SIZE);
+ /* rest of dst page should be zero-filled */
+ }
+ /* get bytes from src mapping */
+ kr = copyinmap(new_map, src_cur, kbuf, bytes);
+ if (kr != KERN_SUCCESS) {
+ DEBUG4K_COPY("copyinmap(%p, 0x%llx, %p, 0x%llx) kr 0x%x\n", new_map, (uint64_t)src_cur, kbuf, (uint64_t)bytes, kr);
+ }
+ /* put bytes in dst mapping */
+ assert(dst_cur < dst_end);
+ assert(dst_cur + bytes <= dst_end);
+ kr = copyoutmap(new_map, kbuf, dst_cur, bytes);
+ if (kr != KERN_SUCCESS) {
+ DEBUG4K_COPY("copyoutmap(%p, %p, 0x%llx, 0x%llx) kr 0x%x\n", new_map, kbuf, (uint64_t)dst_cur, (uint64_t)bytes, kr);
+ }
+ }
+
+ /* free kernel buffer */
+ kheap_free(KHEAP_TEMP, kbuf, PAGE_SIZE);
+ kbuf = NULL;
+
+ /* destroy new map */
+ vm_map_destroy(new_map, VM_MAP_REMOVE_NO_FLAGS);
+ new_map = VM_MAP_NULL;
+
+ /* dispose of the old map entries in "copy_map" */
+ while (vm_map_copy_first_entry(copy_map) !=
+ vm_map_copy_to_entry(copy_map)) {
+ entry = vm_map_copy_first_entry(copy_map);
+ vm_map_copy_entry_unlink(copy_map, entry);
+ if (entry->is_sub_map) {
+ vm_map_deallocate(VME_SUBMAP(entry));
+ } else {
+ vm_object_deallocate(VME_OBJECT(entry));
+ }
+ vm_map_copy_entry_dispose(copy_map, entry);
+ }
+
+ /* change "copy_map"'s page_size to match "target_map" */
+ copy_map->cpy_hdr.page_shift = VM_MAP_PAGE_SHIFT(target_map);
+ copy_map->offset = 0;
+ copy_map->size = size;
+
+ /* insert new map entry in "copy_map" */
+ assert(vm_map_copy_last_entry(copy_map) == vm_map_copy_to_entry(copy_map));
+ vm_map_copy_entry_link(copy_map, vm_map_copy_last_entry(copy_map), new_entry);
+
+ DEBUG4K_COPY("copy_map %p (%d %d 0x%llx 0x%llx) AFTER\n", copy_map, copy_map->cpy_hdr.page_shift, copy_map->cpy_hdr.nentries, copy_map->offset, (uint64_t)copy_map->size);
+}
+
+void
+vm_map_copy_adjust_get_target_copy_map(
+ vm_map_copy_t copy_map,
+ vm_map_copy_t *target_copy_map_p);
+void
+vm_map_copy_adjust_get_target_copy_map(
+ vm_map_copy_t copy_map,
+ vm_map_copy_t *target_copy_map_p)
+{
+ vm_map_copy_t target_copy_map;
+ vm_map_entry_t entry, target_entry;
+
+ if (*target_copy_map_p != VM_MAP_COPY_NULL) {
+ /* the caller already has a "target_copy_map": use it */
+ return;
+ }
+
+ /* the caller wants us to create a new copy of "copy_map" */
+ target_copy_map = vm_map_copy_allocate();
+ target_copy_map->type = copy_map->type;
+ assert(target_copy_map->type == VM_MAP_COPY_ENTRY_LIST);
+ target_copy_map->offset = copy_map->offset;
+ target_copy_map->size = copy_map->size;
+ target_copy_map->cpy_hdr.page_shift = copy_map->cpy_hdr.page_shift;
+ vm_map_store_init(&target_copy_map->cpy_hdr);
+ for (entry = vm_map_copy_first_entry(copy_map);
+ entry != vm_map_copy_to_entry(copy_map);
+ entry = entry->vme_next) {
+ target_entry = vm_map_copy_entry_create(target_copy_map, FALSE);
+ vm_map_entry_copy_full(target_entry, entry);
+ if (target_entry->is_sub_map) {
+ vm_map_reference(VME_SUBMAP(target_entry));
+ } else {
+ vm_object_reference(VME_OBJECT(target_entry));
+ }
+ vm_map_copy_entry_link(
+ target_copy_map,
+ vm_map_copy_last_entry(target_copy_map),
+ target_entry);
+ }
+ entry = VM_MAP_ENTRY_NULL;
+ *target_copy_map_p = target_copy_map;
+}
+
+void
+vm_map_copy_trim(
+ vm_map_copy_t copy_map,
+ int new_page_shift,
+ vm_map_offset_t trim_start,
+ vm_map_offset_t trim_end);
+void
+vm_map_copy_trim(
+ vm_map_copy_t copy_map,
+ int new_page_shift,
+ vm_map_offset_t trim_start,
+ vm_map_offset_t trim_end)
+{
+ int copy_page_shift;
+ vm_map_entry_t entry, next_entry;
+
+ assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST);
+ assert(copy_map->cpy_hdr.nentries > 0);
+
+ trim_start += vm_map_copy_first_entry(copy_map)->vme_start;
+ trim_end += vm_map_copy_first_entry(copy_map)->vme_start;
+
+ /* use the new page_shift to do the clipping */
+ copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy_map);
+ copy_map->cpy_hdr.page_shift = new_page_shift;
+
+ for (entry = vm_map_copy_first_entry(copy_map);
+ entry != vm_map_copy_to_entry(copy_map);
+ entry = next_entry) {
+ next_entry = entry->vme_next;
+ if (entry->vme_end <= trim_start) {
+ /* entry fully before trim range: skip */
+ continue;
+ }
+ if (entry->vme_start >= trim_end) {
+ /* entry fully after trim range: done */
+ break;
+ }
+ /* clip entry if needed */
+ vm_map_copy_clip_start(copy_map, entry, trim_start);
+ vm_map_copy_clip_end(copy_map, entry, trim_end);
+ /* dispose of entry */
+ copy_map->size -= entry->vme_end - entry->vme_start;
+ vm_map_copy_entry_unlink(copy_map, entry);
+ if (entry->is_sub_map) {
+ vm_map_deallocate(VME_SUBMAP(entry));
+ } else {
+ vm_object_deallocate(VME_OBJECT(entry));
+ }
+ vm_map_copy_entry_dispose(copy_map, entry);
+ entry = VM_MAP_ENTRY_NULL;
+ }
+
+ /* restore copy_map's original page_shift */
+ copy_map->cpy_hdr.page_shift = copy_page_shift;
+}
+
+/*
+ * Make any necessary adjustments to "copy_map" to allow it to be
+ * mapped into "target_map".
+ * If no changes were necessary, "target_copy_map" points to the
+ * untouched "copy_map".
+ * If changes are necessary, changes will be made to "target_copy_map".
+ * If "target_copy_map" was NULL, we create a new "vm_map_copy_t" and
+ * copy the original "copy_map" to it before applying the changes.
+ * The caller should discard "target_copy_map" if it's not the same as
+ * the original "copy_map".
+ */
+/* TODO4K: also adjust to sub-range in the copy_map -> add start&end? */
+kern_return_t
+vm_map_copy_adjust_to_target(
+ vm_map_copy_t src_copy_map,
+ vm_map_offset_t offset,
+ vm_map_size_t size,
+ vm_map_t target_map,
+ boolean_t copy,
+ vm_map_copy_t *target_copy_map_p,
+ vm_map_offset_t *overmap_start_p,
+ vm_map_offset_t *overmap_end_p,
+ vm_map_offset_t *trimmed_start_p)
+{
+ vm_map_copy_t copy_map, target_copy_map;
+ vm_map_size_t target_size;
+ vm_map_size_t src_copy_map_size;
+ vm_map_size_t overmap_start, overmap_end;
+ int misalignments;
+ vm_map_entry_t entry, target_entry;
+ vm_map_offset_t addr_adjustment;
+ vm_map_offset_t new_start, new_end;
+ int copy_page_mask, target_page_mask;
+ int copy_page_shift, target_page_shift;
+ vm_map_offset_t trimmed_end;
+
+ /*
+ * Assert that the vm_map_copy is coming from the right
+ * zone and hasn't been forged
+ */
+ vm_map_copy_require(src_copy_map);
+ assert(src_copy_map->type == VM_MAP_COPY_ENTRY_LIST);
+
+ /*
+ * Start working with "src_copy_map" but we'll switch
+ * to "target_copy_map" as soon as we start making adjustments.
+ */
+ copy_map = src_copy_map;
+ src_copy_map_size = src_copy_map->size;
+
+ copy_page_shift = VM_MAP_COPY_PAGE_SHIFT(copy_map);
+ copy_page_mask = VM_MAP_COPY_PAGE_MASK(copy_map);
+ target_page_shift = VM_MAP_PAGE_SHIFT(target_map);
+ target_page_mask = VM_MAP_PAGE_MASK(target_map);
+
+ DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p...\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, *target_copy_map_p);
+
+ target_copy_map = *target_copy_map_p;
+ if (target_copy_map != VM_MAP_COPY_NULL) {
+ vm_map_copy_require(target_copy_map);
+ }
+
+ if (offset + size > copy_map->size) {
+ DEBUG4K_ERROR("copy_map %p (%d->%d) copy_map->size 0x%llx offset 0x%llx size 0x%llx KERN_INVALID_ARGUMENT\n", copy_map, copy_page_shift, target_page_shift, (uint64_t)copy_map->size, (uint64_t)offset, (uint64_t)size);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* trim the end */
+ trimmed_end = 0;
+ new_end = VM_MAP_ROUND_PAGE(offset + size, target_page_mask);
+ if (new_end < copy_map->size) {
+ trimmed_end = src_copy_map_size - new_end;
+ DEBUG4K_ADJUST("copy_map %p (%d->%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p... trim end from 0x%llx to 0x%llx\n", copy_map, copy_page_shift, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, target_copy_map, (uint64_t)new_end, (uint64_t)copy_map->size);
+ /* get "target_copy_map" if needed and adjust it */
+ vm_map_copy_adjust_get_target_copy_map(copy_map,
+ &target_copy_map);
+ copy_map = target_copy_map;
+ vm_map_copy_trim(target_copy_map, target_page_shift,
+ new_end, copy_map->size);
+ }
+
+ /* trim the start */
+ new_start = VM_MAP_TRUNC_PAGE(offset, target_page_mask);
+ if (new_start != 0) {
+ DEBUG4K_ADJUST("copy_map %p (%d->%d) copy %d offset 0x%llx size 0x%llx target_copy_map %p... trim start from 0x%llx to 0x%llx\n", copy_map, copy_page_shift, target_page_shift, copy, (uint64_t)offset, (uint64_t)size, target_copy_map, (uint64_t)0, (uint64_t)new_start);
+ /* get "target_copy_map" if needed and adjust it */
+ vm_map_copy_adjust_get_target_copy_map(copy_map,
+ &target_copy_map);
+ copy_map = target_copy_map;
+ vm_map_copy_trim(target_copy_map, target_page_shift,
+ 0, new_start);
+ }
+ *trimmed_start_p = new_start;
+
+ /* target_size starts with what's left after trimming */
+ target_size = copy_map->size;
+ assertf(target_size == src_copy_map_size - *trimmed_start_p - trimmed_end,
+ "target_size 0x%llx src_copy_map_size 0x%llx trimmed_start 0x%llx trimmed_end 0x%llx\n",
+ (uint64_t)target_size, (uint64_t)src_copy_map_size,
+ (uint64_t)*trimmed_start_p, (uint64_t)trimmed_end);
+
+ /* check for misalignments but don't adjust yet */
+ misalignments = 0;
+ overmap_start = 0;
+ overmap_end = 0;
+ if (copy_page_shift < target_page_shift) {
+ /*
+ * Remapping from 4K to 16K: check the VM object alignments
+ * throughout the range.
+ * If the start and end of the range are mis-aligned, we can
+ * over-map to re-align, and adjust the "overmap" start/end
+ * and "target_size" of the range accordingly.
+ * If there is any mis-alignment within the range:
+ * if "copy":
+ * we can do immediate-copy instead of copy-on-write,
+ * else:
+ * no way to remap and share; fail.
+ */
+ for (entry = vm_map_copy_first_entry(copy_map);
+ entry != vm_map_copy_to_entry(copy_map);
+ entry = entry->vme_next) {
+ vm_object_offset_t object_offset_start, object_offset_end;
+
+ object_offset_start = VME_OFFSET(entry);
+ object_offset_end = object_offset_start;
+ object_offset_end += entry->vme_end - entry->vme_start;
+ if (object_offset_start & target_page_mask) {
+ if (entry == vm_map_copy_first_entry(copy_map) && !copy) {
+ overmap_start++;
+ } else {
+ misalignments++;
+ }
+ }
+ if (object_offset_end & target_page_mask) {
+ if (entry->vme_next == vm_map_copy_to_entry(copy_map) && !copy) {
+ overmap_end++;
+ } else {
+ misalignments++;
+ }
+ }
+ }
+ }
+ entry = VM_MAP_ENTRY_NULL;
+
+ /* decide how to deal with misalignments */
+ assert(overmap_start <= 1);
+ assert(overmap_end <= 1);
+ if (!overmap_start && !overmap_end && !misalignments) {
+ /* copy_map is properly aligned for target_map ... */
+ if (*trimmed_start_p) {
+ /* ... but we trimmed it, so still need to adjust */
+ } else {
+ /* ... and we didn't trim anything: we're done */
+ if (target_copy_map == VM_MAP_COPY_NULL) {
+ target_copy_map = copy_map;
+ }
+ *target_copy_map_p = target_copy_map;
+ *overmap_start_p = 0;
+ *overmap_end_p = 0;
+ DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx) -> trimmed 0x%llx overmap start 0x%llx end 0x%llx KERN_SUCCESS\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p);
+ return KERN_SUCCESS;
+ }
+ } else if (misalignments && !copy) {
+ /* can't "share" if misaligned */
+ DEBUG4K_ADJUST("unsupported sharing\n");
+#if MACH_ASSERT
+ if (debug4k_panic_on_misaligned_sharing) {
+ panic("DEBUG4k %s:%d unsupported sharing\n", __FUNCTION__, __LINE__);
+ }
+#endif /* MACH_ASSERT */
+ DEBUG4K_ADJUST("copy_map %p (%d) target_map %p (%d) copy %d target_copy_map %p -> KERN_NOT_SUPPORTED\n", copy_map, copy_page_shift, target_map, target_page_shift, copy, *target_copy_map_p);
+ return KERN_NOT_SUPPORTED;
+ } else {
+ /* can't virtual-copy if misaligned (but can physical-copy) */
+ DEBUG4K_ADJUST("mis-aligned copying\n");
+ }
+
+ /* get a "target_copy_map" if needed and switch to it */
+ vm_map_copy_adjust_get_target_copy_map(copy_map, &target_copy_map);
+ copy_map = target_copy_map;
+
+ if (misalignments && copy) {
+ vm_map_size_t target_copy_map_size;
+
+ /*
+ * Can't do copy-on-write with misaligned mappings.
+ * Replace the mappings with a physical copy of the original
+ * mappings' contents.
+ */
+ target_copy_map_size = target_copy_map->size;
+ vm_map_copy_to_physcopy(target_copy_map, target_map);
+ *target_copy_map_p = target_copy_map;
+ *overmap_start_p = 0;
+ *overmap_end_p = target_copy_map->size - target_copy_map_size;
+ DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx)-> trimmed 0x%llx overmap start 0x%llx end 0x%llx PHYSCOPY\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p);
+ return KERN_SUCCESS;
+ }
+
+ /* apply the adjustments */
+ misalignments = 0;
+ overmap_start = 0;
+ overmap_end = 0;
+ /* remove copy_map->offset, so that everything starts at offset 0 */
+ addr_adjustment = copy_map->offset;
+ /* also remove whatever we trimmed from the start */
+ addr_adjustment += *trimmed_start_p;
+ for (target_entry = vm_map_copy_first_entry(target_copy_map);
+ target_entry != vm_map_copy_to_entry(target_copy_map);
+ target_entry = target_entry->vme_next) {
+ vm_object_offset_t object_offset_start, object_offset_end;
+
+ DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx BEFORE\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
+ object_offset_start = VME_OFFSET(target_entry);
+ if (object_offset_start & target_page_mask) {
+ DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx misaligned at start\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
+ if (target_entry == vm_map_copy_first_entry(target_copy_map)) {
+ /*
+ * start of 1st entry is mis-aligned:
+ * re-adjust by over-mapping.
+ */
+ overmap_start = object_offset_start - trunc_page_mask_64(object_offset_start, target_page_mask);
+ DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> overmap_start 0x%llx\n", target_entry, VME_OFFSET(target_entry), copy, (uint64_t)overmap_start);
+ VME_OFFSET_SET(target_entry, VME_OFFSET(target_entry) - overmap_start);
+ } else {
+ misalignments++;
+ DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> misalignments %d\n", target_entry, VME_OFFSET(target_entry), copy, misalignments);
+ assert(copy);
+ }
+ }
+
+ if (target_entry == vm_map_copy_first_entry(target_copy_map)) {
+ target_size += overmap_start;
+ } else {
+ target_entry->vme_start += overmap_start;
+ }
+ target_entry->vme_end += overmap_start;
+
+ object_offset_end = VME_OFFSET(target_entry) + target_entry->vme_end - target_entry->vme_start;
+ if (object_offset_end & target_page_mask) {
+ DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx misaligned at end\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
+ if (target_entry->vme_next == vm_map_copy_to_entry(target_copy_map)) {
+ /*
+ * end of last entry is mis-aligned: re-adjust by over-mapping.
+ */
+ overmap_end = round_page_mask_64(object_offset_end, target_page_mask) - object_offset_end;
+ DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> overmap_end 0x%llx\n", target_entry, VME_OFFSET(target_entry), copy, (uint64_t)overmap_end);
+ target_entry->vme_end += overmap_end;
+ target_size += overmap_end;
+ } else {
+ misalignments++;
+ DEBUG4K_ADJUST("entry %p offset 0x%llx copy %d -> misalignments %d\n", target_entry, VME_OFFSET(target_entry), copy, misalignments);
+ assert(copy);
+ }
+ }
+ target_entry->vme_start -= addr_adjustment;
+ target_entry->vme_end -= addr_adjustment;
+ DEBUG4K_ADJUST("copy %p (%d 0x%llx 0x%llx) entry %p [ 0x%llx 0x%llx ] object %p offset 0x%llx AFTER\n", target_copy_map, VM_MAP_COPY_PAGE_SHIFT(target_copy_map), target_copy_map->offset, (uint64_t)target_copy_map->size, target_entry, (uint64_t)target_entry->vme_start, (uint64_t)target_entry->vme_end, VME_OBJECT(target_entry), VME_OFFSET(target_entry));
+ }
+
+ target_copy_map->size = target_size;
+ target_copy_map->offset += overmap_start;
+ target_copy_map->offset -= addr_adjustment;
+ target_copy_map->cpy_hdr.page_shift = target_page_shift;
+
+// assert(VM_MAP_PAGE_ALIGNED(target_copy_map->size, target_page_mask));
+// assert(VM_MAP_PAGE_ALIGNED(target_copy_map->offset, FOURK_PAGE_MASK));
+ assert(overmap_start < VM_MAP_PAGE_SIZE(target_map));
+ assert(overmap_end < VM_MAP_PAGE_SIZE(target_map));
+
+ *target_copy_map_p = target_copy_map;
+ *overmap_start_p = overmap_start;
+ *overmap_end_p = overmap_end;
+
+ DEBUG4K_ADJUST("copy_map %p (%d offset 0x%llx size 0x%llx) target_map %p (%d) copy %d target_copy_map %p (%d offset 0x%llx size 0x%llx) -> trimmed 0x%llx overmap start 0x%llx end 0x%llx KERN_SUCCESS\n", copy_map, copy_page_shift, (uint64_t)copy_map->offset, (uint64_t)copy_map->size, target_map, target_page_shift, copy, *target_copy_map_p, VM_MAP_COPY_PAGE_SHIFT(*target_copy_map_p), (uint64_t)(*target_copy_map_p)->offset, (uint64_t)(*target_copy_map_p)->size, (uint64_t)*trimmed_start_p, (uint64_t)*overmap_start_p, (uint64_t)*overmap_end_p);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_map_range_physical_size(
+ vm_map_t map,
+ vm_map_address_t start,
+ mach_vm_size_t size,
+ mach_vm_size_t * phys_size)
+{
+ kern_return_t kr;
+ vm_map_copy_t copy_map, target_copy_map;
+ vm_map_offset_t adjusted_start, adjusted_end;
+ vm_map_size_t adjusted_size;
+ vm_prot_t cur_prot, max_prot;
+ vm_map_offset_t overmap_start, overmap_end, trimmed_start;
+ vm_map_kernel_flags_t vmk_flags;
+
+ adjusted_start = vm_map_trunc_page(start, VM_MAP_PAGE_MASK(map));
+ adjusted_end = vm_map_round_page(start + size, VM_MAP_PAGE_MASK(map));
+ adjusted_size = adjusted_end - adjusted_start;
+ *phys_size = adjusted_size;
+ if (VM_MAP_PAGE_SIZE(map) == PAGE_SIZE) {
+ return KERN_SUCCESS;
+ }
+ if (start == 0) {
+ adjusted_start = vm_map_trunc_page(start, PAGE_MASK);
+ adjusted_end = vm_map_round_page(start + size, PAGE_MASK);
+ adjusted_size = adjusted_end - adjusted_start;
+ *phys_size = adjusted_size;
+ return KERN_SUCCESS;
+ }
+ if (adjusted_size == 0) {
+ DEBUG4K_SHARE("map %p start 0x%llx size 0x%llx adjusted 0x%llx -> phys_size 0!\n", map, (uint64_t)start, (uint64_t)size, (uint64_t)adjusted_size);
+ *phys_size = 0;
+ return KERN_SUCCESS;
+ }
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_copy_pageable = TRUE;
+ vmk_flags.vmkf_copy_same_map = TRUE;
+ assert(adjusted_size != 0);
+ cur_prot = VM_PROT_NONE; /* legacy mode */
+ max_prot = VM_PROT_NONE; /* legacy mode */
+ kr = vm_map_copy_extract(map, adjusted_start, adjusted_size,
+ FALSE /* copy */,
+ ©_map,
+ &cur_prot, &max_prot, VM_INHERIT_DEFAULT,
+ vmk_flags);
+ if (kr != KERN_SUCCESS) {
+ DEBUG4K_ERROR("map %p start 0x%llx 0x%llx size 0x%llx 0x%llx kr 0x%x\n", map, (uint64_t)start, (uint64_t)adjusted_start, size, (uint64_t)adjusted_size, kr);
+ //assert(0);
+ *phys_size = 0;
+ return kr;
+ }
+ assert(copy_map != VM_MAP_COPY_NULL);
+ target_copy_map = copy_map;
+ DEBUG4K_ADJUST("adjusting...\n");
+ kr = vm_map_copy_adjust_to_target(
+ copy_map,
+ start - adjusted_start, /* offset */
+ size, /* size */
+ kernel_map,
+ FALSE, /* copy */
+ &target_copy_map,
+ &overmap_start,
+ &overmap_end,
+ &trimmed_start);
+ if (kr == KERN_SUCCESS) {
+ if (target_copy_map->size != *phys_size) {
+ DEBUG4K_ADJUST("map %p (%d) start 0x%llx size 0x%llx adjusted_start 0x%llx adjusted_end 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx phys_size 0x%llx -> 0x%llx\n", map, VM_MAP_PAGE_SHIFT(map), (uint64_t)start, (uint64_t)size, (uint64_t)adjusted_start, (uint64_t)adjusted_end, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)*phys_size, (uint64_t)target_copy_map->size);
+ }
+ *phys_size = target_copy_map->size;
+ } else {
+ DEBUG4K_ERROR("map %p start 0x%llx 0x%llx size 0x%llx 0x%llx kr 0x%x\n", map, (uint64_t)start, (uint64_t)adjusted_start, size, (uint64_t)adjusted_size, kr);
+ //assert(0);
+ *phys_size = 0;
+ }
+ vm_map_copy_discard(copy_map);
+ copy_map = VM_MAP_COPY_NULL;
+
+ return kr;
+}
+
+
+kern_return_t
+memory_entry_check_for_adjustment(
+ vm_map_t src_map,
+ ipc_port_t port,
+ vm_map_offset_t *overmap_start,
+ vm_map_offset_t *overmap_end)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ vm_map_copy_t copy_map = VM_MAP_COPY_NULL, target_copy_map = VM_MAP_COPY_NULL;
+
+ assert(port);
+ assertf(ip_kotype(port) == IKOT_NAMED_ENTRY, "Port Type expected: %d...received:%d\n", IKOT_NAMED_ENTRY, ip_kotype(port));
+
+ vm_named_entry_t named_entry;
+
+ named_entry = (vm_named_entry_t) ipc_kobject_get(port);
+ named_entry_lock(named_entry);
+ copy_map = named_entry->backing.copy;
+ target_copy_map = copy_map;
+
+ if (src_map && VM_MAP_PAGE_SHIFT(src_map) < PAGE_SHIFT) {
+ vm_map_offset_t trimmed_start;
+
+ trimmed_start = 0;
+ DEBUG4K_ADJUST("adjusting...\n");
+ kr = vm_map_copy_adjust_to_target(
+ copy_map,
+ 0, /* offset */
+ copy_map->size, /* size */
+ src_map,
+ FALSE, /* copy */
+ &target_copy_map,
+ overmap_start,
+ overmap_end,
+ &trimmed_start);
+ assert(trimmed_start == 0);
+ }
+ named_entry_unlock(named_entry);
+
+ return kr;
+}
+
+