+ while (shared_region->sr_mapping_in_progress) {
+ /* wait for our turn... */
+ vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
+ THREAD_UNINT);
+ }
+ assert(!shared_region->sr_mapping_in_progress);
+ assert(shared_region->sr_ref_count > 1);
+ /* let others know we're working in this shared region */
+ shared_region->sr_mapping_in_progress = TRUE;
+
+ vm_shared_region_unlock();
+
+ reset_shared_region_state = TRUE;
+
+ /* no need to lock because this data is never modified... */
+ sr_handle = shared_region->sr_mem_entry;
+ sr_mem_entry = (vm_named_entry_t) ip_get_kobject(sr_handle);
+ sr_map = sr_mem_entry->backing.map;
+ sr_base_address = shared_region->sr_base_address;
+ }
+ /*
+ * Undo the mappings we've established so far.
+ */
+ for (srfmp = &srf_mappings[0];
+ srfmp <= srf_mappings_current;
+ srfmp++) {
+ mappings = srfmp->mappings;
+ mappings_count = srfmp->mappings_count;
+ if (srfmp == srf_mappings_current) {
+ mappings_count = srf_current_mappings_count;
+ }
+
+ for (j = 0; j < mappings_count; j++) {
+ kern_return_t kr2;
+
+ if (mappings[j].sms_size == 0) {
+ /*
+ * We didn't establish this
+ * mapping, so nothing to undo.
+ */
+ continue;
+ }
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx "
+ "size:0x%016llx "
+ "offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x: "
+ "undoing...\n",
+ j,
+ (long long)mappings[j].sms_address,
+ (long long)mappings[j].sms_size,
+ (long long)mappings[j].sms_file_offset,
+ mappings[j].sms_max_prot,
+ mappings[j].sms_init_prot));
+ kr2 = mach_vm_deallocate(
+ sr_map,
+ (mappings[j].sms_address -
+ sr_base_address),
+ mappings[j].sms_size);
+ assert(kr2 == KERN_SUCCESS);
+ }
+ }
+
+ if (reset_shared_region_state) {
+ vm_shared_region_lock();
+ assert(shared_region->sr_ref_count > 1);
+ assert(shared_region->sr_mapping_in_progress);
+ /* we're done working on that shared region */
+ shared_region->sr_mapping_in_progress = FALSE;
+ thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
+ vm_shared_region_unlock();
+ reset_shared_region_state = FALSE;
+ }
+
+ vm_shared_region_deallocate(shared_region);
+}
+
+/*
+ * For now we only expect to see at most 2 regions to relocate/authenticate
+ * per file. One that's VM_PROT_SLIDE and one VM_PROT_SLIDE | VM_PROT_NOAUTH.
+ */
+#define VMSR_NUM_SLIDES 2
+
+/*
+ * First part of vm_shared_region_map_file(). Split out to
+ * avoid kernel stack overflow.
+ */
+__attribute__((noinline))
+static kern_return_t
+vm_shared_region_map_file_setup(
+ vm_shared_region_t shared_region,
+ int sr_file_mappings_count,
+ struct _sr_file_mappings *sr_file_mappings,
+ unsigned int *mappings_to_slide_cnt,
+ struct shared_file_mapping_slide_np **mappings_to_slide,
+ mach_vm_offset_t *slid_mappings,
+ memory_object_control_t *slid_file_controls,
+ mach_vm_offset_t *first_mapping,
+ mach_vm_offset_t *file_first_mappings,
+ mach_vm_offset_t *sfm_min_address,
+ mach_vm_offset_t *sfm_max_address,
+ vm_map_t *sr_map_ptr,
+ vm_map_offset_t *lowest_unnestable_addr_ptr)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ memory_object_control_t file_control;
+ vm_object_t file_object;
+ ipc_port_t sr_handle;
+ vm_named_entry_t sr_mem_entry;
+ vm_map_t sr_map;
+ mach_vm_offset_t sr_base_address;
+ unsigned int i = 0;
+ mach_port_t map_port;
+ vm_map_offset_t target_address;
+ vm_object_t object;
+ vm_object_size_t obj_size;
+ vm_map_offset_t lowest_unnestable_addr = 0;
+ vm_map_kernel_flags_t vmk_flags;
+ mach_vm_offset_t sfm_end;
+ uint32_t mappings_count;
+ struct shared_file_mapping_slide_np *mappings;
+ struct _sr_file_mappings *srfmp;
+ unsigned int current_file_index = 0;
+
+ vm_shared_region_lock();
+ assert(shared_region->sr_ref_count > 1);
+
+ /*
+ * Make sure we handle only one mapping at a time in a given
+ * shared region, to avoid race conditions. This should not
+ * happen frequently...
+ */
+ while (shared_region->sr_mapping_in_progress) {
+ /* wait for our turn... */
+ vm_shared_region_sleep(&shared_region->sr_mapping_in_progress,
+ THREAD_UNINT);
+ }
+ assert(!shared_region->sr_mapping_in_progress);
+ assert(shared_region->sr_ref_count > 1);
+ /* let others know we're working in this shared region */
+ shared_region->sr_mapping_in_progress = TRUE;
+
+ vm_shared_region_unlock();
+
+ /* no need to lock because this data is never modified... */
+ sr_handle = shared_region->sr_mem_entry;
+ sr_mem_entry = (vm_named_entry_t) ip_get_kobject(sr_handle);
+ sr_map = sr_mem_entry->backing.map;
+ sr_base_address = shared_region->sr_base_address;
+
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: -> map(%p)\n",
+ (void *)VM_KERNEL_ADDRPERM(shared_region)));
+
+ mappings_count = 0;
+ mappings = NULL;
+ srfmp = NULL;
+
+ /* process all the files to be mapped */
+ for (srfmp = &sr_file_mappings[0];
+ srfmp < &sr_file_mappings[sr_file_mappings_count];
+ srfmp++) {
+ mappings_count = srfmp->mappings_count;
+ mappings = srfmp->mappings;
+ file_control = srfmp->file_control;
+
+ if (mappings_count == 0) {
+ /* no mappings here... */
+ continue;
+ }
+
+ /*
+ * The code below can only correctly "slide" (perform relocations) for one
+ * value of the slide amount. So if a file has a non-zero slide, it has to
+ * match any previous value. A zero slide value is ok for things that are
+ * just directly mapped.
+ */
+ if (shared_region->sr_slide == 0 && srfmp->slide != 0) {
+ shared_region->sr_slide = srfmp->slide;
+ } else if (shared_region->sr_slide != 0 &&
+ srfmp->slide != 0 &&
+ shared_region->sr_slide != srfmp->slide) {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: more than 1 non-zero slide value amount "
+ "slide 1:0x%x slide 2:0x%x\n ",
+ shared_region->sr_slide, srfmp->slide));
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+#if __arm64__
+ if ((shared_region->sr_64bit ||
+ page_shift_user32 == SIXTEENK_PAGE_SHIFT) &&
+ ((srfmp->slide & SIXTEENK_PAGE_MASK) != 0)) {
+ printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n",
+ __FUNCTION__, srfmp->slide);
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+#endif /* __arm64__ */
+
+ /* get the VM object associated with the file to be mapped */
+ file_object = memory_object_control_to_vm_object(file_control);
+ assert(file_object);
+
+ /* establish the mappings for that file */
+ for (i = 0; i < mappings_count; i++) {
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx size:0x%016llx offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x\n",
+ i,
+ (long long)mappings[i].sms_address,
+ (long long)mappings[i].sms_size,
+ (long long)mappings[i].sms_file_offset,
+ mappings[i].sms_max_prot,
+ mappings[i].sms_init_prot));
+
+ if (mappings[i].sms_address < *sfm_min_address) {
+ *sfm_min_address = mappings[i].sms_address;
+ }
+
+ if (os_add_overflow(mappings[i].sms_address,
+ mappings[i].sms_size,
+ &sfm_end) ||
+ (vm_map_round_page(sfm_end, VM_MAP_PAGE_MASK(sr_map)) <
+ mappings[i].sms_address)) {
+ /* overflow */
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+ if (sfm_end > *sfm_max_address) {
+ *sfm_max_address = sfm_end;
+ }
+
+ if (mappings[i].sms_init_prot & VM_PROT_ZF) {
+ /* zero-filled memory */
+ map_port = MACH_PORT_NULL;
+ } else {
+ /* file-backed memory */
+ __IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager);
+ }
+
+ /*
+ * Remember which mappings need sliding.
+ */
+ if (mappings[i].sms_max_prot & VM_PROT_SLIDE) {
+ if (*mappings_to_slide_cnt == VMSR_NUM_SLIDES) {
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx size:0x%016llx "
+ "offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x "
+ "too many mappings to slide...\n",
+ i,
+ (long long)mappings[i].sms_address,
+ (long long)mappings[i].sms_size,
+ (long long)mappings[i].sms_file_offset,
+ mappings[i].sms_max_prot,
+ mappings[i].sms_init_prot));
+ } else {
+ mappings_to_slide[*mappings_to_slide_cnt] = &mappings[i];
+ *mappings_to_slide_cnt += 1;
+ }
+ }
+
+ /* mapping's address is relative to the shared region base */
+ target_address = (vm_map_offset_t)(mappings[i].sms_address - sr_base_address);
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_already = TRUE;
+ /* no copy-on-read for mapped binaries */
+ vmk_flags.vmkf_no_copy_on_read = 1;
+
+
+ /* establish that mapping, OK if it's "already" there */
+ if (map_port == MACH_PORT_NULL) {
+ /*
+ * We want to map some anonymous memory in a shared region.
+ * We have to create the VM object now, so that it can be mapped "copy-on-write".
+ */
+ obj_size = vm_map_round_page(mappings[i].sms_size, VM_MAP_PAGE_MASK(sr_map));
+ object = vm_object_allocate(obj_size);
+ if (object == VM_OBJECT_NULL) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ } else {
+ kr = vm_map_enter(
+ sr_map,
+ &target_address,
+ vm_map_round_page(mappings[i].sms_size,
+ VM_MAP_PAGE_MASK(sr_map)),
+ 0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ object,
+ 0,
+ TRUE,
+ mappings[i].sms_init_prot & VM_PROT_ALL,
+ mappings[i].sms_max_prot & VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ }
+ } else {
+ object = VM_OBJECT_NULL; /* no anonymous memory here */
+ kr = vm_map_enter_mem_object(
+ sr_map,
+ &target_address,
+ vm_map_round_page(mappings[i].sms_size,
+ VM_MAP_PAGE_MASK(sr_map)),
+ 0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ map_port,
+ mappings[i].sms_file_offset,
+ TRUE,
+ mappings[i].sms_init_prot & VM_PROT_ALL,
+ mappings[i].sms_max_prot & VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
+ }
+
+ if (kr == KERN_SUCCESS) {
+ /*
+ * Record the first (chronologically) successful
+ * mapping in this shared region.
+ * We're protected by "sr_mapping_in_progress" here,
+ * so no need to lock "shared_region".
+ */
+ assert(current_file_index < VMSR_NUM_SLIDES);
+ if (file_first_mappings[current_file_index] == (mach_vm_offset_t) -1) {
+ file_first_mappings[current_file_index] = target_address;
+ }
+
+ if (*mappings_to_slide_cnt > 0 &&
+ mappings_to_slide[*mappings_to_slide_cnt - 1] == &mappings[i]) {
+ slid_mappings[*mappings_to_slide_cnt - 1] = target_address;
+ slid_file_controls[*mappings_to_slide_cnt - 1] = file_control;
+ }
+
+ /*
+ * Record the lowest writable address in this
+ * sub map, to log any unexpected unnesting below
+ * that address (see log_unnest_badness()).
+ */
+ if ((mappings[i].sms_init_prot & VM_PROT_WRITE) &&
+ sr_map->is_nested_map &&
+ (lowest_unnestable_addr == 0 ||
+ (target_address < lowest_unnestable_addr))) {
+ lowest_unnestable_addr = target_address;
+ }
+ } else {
+ if (map_port == MACH_PORT_NULL) {
+ /*
+ * Get rid of the VM object we just created
+ * but failed to map.
+ */
+ vm_object_deallocate(object);
+ object = VM_OBJECT_NULL;
+ }
+ if (kr == KERN_MEMORY_PRESENT) {
+ /*
+ * This exact mapping was already there:
+ * that's fine.
+ */
+ SHARED_REGION_TRACE_INFO(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx size:0x%016llx "
+ "offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x "
+ "already mapped...\n",
+ i,
+ (long long)mappings[i].sms_address,
+ (long long)mappings[i].sms_size,
+ (long long)mappings[i].sms_file_offset,
+ mappings[i].sms_max_prot,
+ mappings[i].sms_init_prot));
+ /*
+ * We didn't establish this mapping ourselves;
+ * let's reset its size, so that we do not
+ * attempt to undo it if an error occurs later.
+ */
+ mappings[i].sms_size = 0;
+ kr = KERN_SUCCESS;
+ } else {
+ break;
+ }
+ }
+ }
+
+ if (kr != KERN_SUCCESS) {
+ break;
+ }
+
+ ++current_file_index;
+ }
+
+ if (file_first_mappings[0] != (mach_vm_offset_t)-1) {
+ *first_mapping = file_first_mappings[0];
+ }
+
+
+ if (kr != KERN_SUCCESS) {
+ /* the last mapping we tried (mappings[i]) failed ! */
+ assert(i < mappings_count);
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: mapping[%d]: "
+ "address:0x%016llx size:0x%016llx "
+ "offset:0x%016llx "
+ "maxprot:0x%x prot:0x%x failed 0x%x\n",
+ i,
+ (long long)mappings[i].sms_address,
+ (long long)mappings[i].sms_size,
+ (long long)mappings[i].sms_file_offset,
+ mappings[i].sms_max_prot,
+ mappings[i].sms_init_prot,
+ kr));
+
+ /*
+ * Respect the design of vm_shared_region_undo_mappings
+ * as we are holding the sr_mapping_in_progress == true here.
+ * So don't allow sr_map == NULL otherwise vm_shared_region_undo_mappings
+ * will be blocked at waiting sr_mapping_in_progress to be false.
+ */
+ assert(sr_map != NULL);
+ /* undo all the previous mappings */
+ vm_shared_region_undo_mappings(sr_map, sr_base_address, sr_file_mappings, srfmp, i);
+ return kr;
+ }
+
+ *lowest_unnestable_addr_ptr = lowest_unnestable_addr;
+ *sr_map_ptr = sr_map;
+ return KERN_SUCCESS;
+}
+
+/* forwared declaration */
+__attribute__((noinline))
+static void
+vm_shared_region_map_file_final(
+ vm_shared_region_t shared_region,
+ vm_map_t sr_map,
+ mach_vm_offset_t sfm_min_address,
+ mach_vm_offset_t sfm_max_address,
+ mach_vm_offset_t *file_first_mappings);
+
+/*
+ * Establish some mappings of a file in the shared region.
+ * This is used by "dyld" via the shared_region_map_np() system call
+ * to populate the shared region with the appropriate shared cache.
+ *
+ * One could also call it several times to incrementally load several
+ * libraries, as long as they do not overlap.
+ * It will return KERN_SUCCESS if the mappings were successfully established
+ * or if they were already established identically by another process.
+ */
+__attribute__((noinline))
+kern_return_t
+vm_shared_region_map_file(
+ vm_shared_region_t shared_region,
+ int sr_file_mappings_count,
+ struct _sr_file_mappings *sr_file_mappings)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ unsigned int i;
+ unsigned int mappings_to_slide_cnt = 0;
+ struct shared_file_mapping_slide_np *mappings_to_slide[VMSR_NUM_SLIDES] = {};
+ mach_vm_offset_t slid_mappings[VMSR_NUM_SLIDES];
+ memory_object_control_t slid_file_controls[VMSR_NUM_SLIDES];
+ mach_vm_offset_t first_mapping = (mach_vm_offset_t)-1;
+ mach_vm_offset_t sfm_min_address = (mach_vm_offset_t)-1;
+ mach_vm_offset_t sfm_max_address = 0;
+ vm_map_t sr_map = NULL;
+ vm_map_offset_t lowest_unnestable_addr = 0;
+ mach_vm_offset_t file_first_mappings[VMSR_NUM_SLIDES] = {(mach_vm_offset_t) -1, (mach_vm_offset_t) -1};
+
+ kr = vm_shared_region_map_file_setup(shared_region, sr_file_mappings_count, sr_file_mappings,
+ &mappings_to_slide_cnt, &mappings_to_slide[0], slid_mappings, slid_file_controls,
+ &first_mapping, &file_first_mappings[0],
+ &sfm_min_address, &sfm_max_address, &sr_map, &lowest_unnestable_addr);
+ if (kr != KERN_SUCCESS) {
+ vm_shared_region_lock();
+ goto done;
+ }
+
+ /*
+ * The call above installed direct mappings to the shared cache file.
+ * Now we go back and overwrite the mappings that need relocation
+ * with a special shared region pager.
+ */
+ for (i = 0; i < mappings_to_slide_cnt; ++i) {
+ kr = vm_shared_region_slide(shared_region->sr_slide,
+ mappings_to_slide[i]->sms_file_offset,
+ mappings_to_slide[i]->sms_size,
+ mappings_to_slide[i]->sms_slide_start,
+ mappings_to_slide[i]->sms_slide_size,
+ slid_mappings[i],
+ slid_file_controls[i],
+ mappings_to_slide[i]->sms_max_prot);
+ if (kr != KERN_SUCCESS) {
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: region_slide("
+ "slide:0x%x start:0x%016llx "
+ "size:0x%016llx) failed 0x%x\n",
+ shared_region->sr_slide,
+ (long long)mappings_to_slide[i]->sms_slide_start,
+ (long long)mappings_to_slide[i]->sms_slide_size,
+ kr));
+ vm_shared_region_lock();
+ goto done;
+ }
+ }
+
+ assert(kr == KERN_SUCCESS);
+
+ /* adjust the map's "lowest_unnestable_start" */
+ lowest_unnestable_addr &= ~(pmap_shared_region_size_min(sr_map->pmap) - 1);
+ if (lowest_unnestable_addr != sr_map->lowest_unnestable_start) {
+ vm_map_lock(sr_map);
+ sr_map->lowest_unnestable_start = lowest_unnestable_addr;
+ vm_map_unlock(sr_map);
+ }
+
+ vm_shared_region_lock();
+ assert(shared_region->sr_ref_count > 1);
+ assert(shared_region->sr_mapping_in_progress);
+
+ /* set "sr_first_mapping"; dyld uses it to validate the shared cache */
+ if (shared_region->sr_first_mapping == (mach_vm_offset_t) -1) {
+ shared_region->sr_first_mapping = first_mapping;
+ }
+
+ vm_shared_region_map_file_final(shared_region, sr_map, sfm_min_address, sfm_max_address,
+ &file_first_mappings[0]);
+
+done:
+ /*
+ * We're done working on that shared region.
+ * Wake up any waiting threads.
+ */
+ shared_region->sr_mapping_in_progress = FALSE;
+ thread_wakeup((event_t) &shared_region->sr_mapping_in_progress);
+ vm_shared_region_unlock();
+
+#if __has_feature(ptrauth_calls)
+ if (kr == KERN_SUCCESS) {
+ /*
+ * Since authenticated mappings were just added to the shared region,
+ * go back and remap them into private mappings for this task.
+ */
+ kr = vm_shared_region_auth_remap(shared_region);
+ }
+#endif /* __has_feature(ptrauth_calls) */
+
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: map(%p) <- 0x%x \n",
+ (void *)VM_KERNEL_ADDRPERM(shared_region), kr));
+ return kr;
+}
+
+/*
+ * Final part of vm_shared_region_map_file().
+ * Kept in separate function to avoid blowing out the stack.
+ */
+__attribute__((noinline))
+static void
+vm_shared_region_map_file_final(
+ vm_shared_region_t shared_region,
+ vm_map_t sr_map,
+ mach_vm_offset_t sfm_min_address,
+ mach_vm_offset_t sfm_max_address,
+ __unused mach_vm_offset_t *file_first_mappings)
+{
+ struct _dyld_cache_header sr_cache_header;
+ int error;
+ size_t image_array_length;
+ struct _dyld_cache_image_text_info *sr_image_layout;
+
+
+ /*
+ * copy in the shared region UUID to the shared region structure.
+ * we do this indirectly by first copying in the shared cache header
+ * and then copying the UUID from there because we'll need to look
+ * at other content from the shared cache header.
+ */
+ if (!shared_region->sr_uuid_copied) {
+ error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping),
+ (char *)&sr_cache_header,
+ sizeof(sr_cache_header));
+ if (error == 0) {
+ memcpy(&shared_region->sr_uuid, &sr_cache_header.uuid, sizeof(shared_region->sr_uuid));
+ shared_region->sr_uuid_copied = TRUE;
+ } else {
+#if DEVELOPMENT || DEBUG
+ panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
+ "offset:0 size:0x%016llx) failed with %d\n",
+ (long long)shared_region->sr_base_address,
+ (long long)shared_region->sr_first_mapping,
+ (long long)sizeof(sr_cache_header),
+ error);
+#endif /* DEVELOPMENT || DEBUG */
+ shared_region->sr_uuid_copied = FALSE;
+ }
+ }
+
+ /*
+ * If the shared cache is associated with the init task (and is therefore the system shared cache),
+ * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
+ */
+ boolean_t is_init_task = (task_pid(current_task()) == 1);
+ if (shared_region->sr_uuid_copied && is_init_task) {
+ /* Copy in the shared cache layout if we're running with a locally built shared cache */
+ if (sr_cache_header.locallyBuiltCache) {
+ KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_START);
+ image_array_length = (size_t)(sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info));
+ sr_image_layout = kheap_alloc(KHEAP_DATA_BUFFERS, image_array_length, Z_WAITOK);
+ error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping +
+ sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length);
+ if (error == 0) {
+ shared_region->sr_images = kalloc((vm_size_t)(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64)));
+ for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) {
+ memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid,
+ sizeof(shared_region->sr_images[index].imageUUID));
+ shared_region->sr_images[index].imageLoadAddress = sr_image_layout[index].loadAddress;
+ }
+
+ assert(sr_cache_header.imagesTextCount < UINT32_MAX);
+ shared_region->sr_images_count = (uint32_t) sr_cache_header.imagesTextCount;
+ } else {
+#if DEVELOPMENT || DEBUG
+ panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
+ "offset:0x%016llx size:0x%016llx) failed with %d\n",
+ (long long)shared_region->sr_base_address,
+ (long long)shared_region->sr_first_mapping,
+ (long long)sr_cache_header.imagesTextOffset,
+ (long long)image_array_length,
+ error);
+#endif /* DEVELOPMENT || DEBUG */
+ }
+ KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count);
+ kheap_free(KHEAP_DATA_BUFFERS, sr_image_layout, image_array_length);
+ sr_image_layout = NULL;
+ }
+ init_task_shared_region = shared_region;
+ }
+
+ /*
+ * If we succeeded, we know the bounds of the shared region.
+ * Trim our pmaps to only cover this range (if applicable to
+ * this platform).
+ */
+ if (VM_MAP_PAGE_SHIFT(current_map()) == VM_MAP_PAGE_SHIFT(sr_map)) {
+ pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_max_address - sfm_min_address);
+ }
+}
+
+/*
+ * Retrieve a task's shared region and grab an extra reference to
+ * make sure it doesn't disappear while the caller is using it.
+ * The caller is responsible for consuming that extra reference if
+ * necessary.
+ *
+ * This also tries to trim the pmap for the shared region.
+ */
+vm_shared_region_t
+vm_shared_region_trim_and_get(task_t task)
+{
+ vm_shared_region_t shared_region;
+ ipc_port_t sr_handle;
+ vm_named_entry_t sr_mem_entry;
+ vm_map_t sr_map;
+
+ /* Get the shared region and the map. */
+ shared_region = vm_shared_region_get(task);
+ if (shared_region == NULL) {
+ return NULL;
+ }
+
+ sr_handle = shared_region->sr_mem_entry;
+ sr_mem_entry = (vm_named_entry_t) ip_get_kobject(sr_handle);
+ sr_map = sr_mem_entry->backing.map;
+
+ /* Trim the pmap if possible. */
+ if (VM_MAP_PAGE_SHIFT(task->map) == VM_MAP_PAGE_SHIFT(sr_map)) {
+ pmap_trim(task->map->pmap, sr_map->pmap, 0, 0);
+ }
+
+ return shared_region;
+}
+
+/*
+ * Enter the appropriate shared region into "map" for "task".
+ * This involves looking up the shared region (and possibly creating a new
+ * one) for the desired environment, then mapping the VM sub map into the
+ * task's VM "map", with the appropriate level of pmap-nesting.
+ */
+kern_return_t
+vm_shared_region_enter(
+ struct _vm_map *map,
+ struct task *task,
+ boolean_t is_64bit,
+ void *fsroot,
+ cpu_type_t cpu,
+ cpu_subtype_t cpu_subtype,
+ boolean_t reslide)
+{
+ kern_return_t kr;
+ vm_shared_region_t shared_region;
+ vm_map_offset_t sr_address, sr_offset, target_address;
+ vm_map_size_t sr_size, mapping_size;
+ vm_map_offset_t sr_pmap_nesting_start;
+ vm_map_size_t sr_pmap_nesting_size;
+ ipc_port_t sr_handle;
+ vm_prot_t cur_prot, max_prot;
+
+ SHARED_REGION_TRACE_DEBUG(
+ ("shared_region: -> "
+ "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n",
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu, cpu_subtype, is_64bit));
+
+ /* lookup (create if needed) the shared region for this environment */
+ shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit, reslide);
+ if (shared_region == NULL) {
+ /* this should not happen ! */
+ SHARED_REGION_TRACE_ERROR(
+ ("shared_region: -> "
+ "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d,reslide=%d): "
+ "lookup failed !\n",
+ (void *)VM_KERNEL_ADDRPERM(map),
+ (void *)VM_KERNEL_ADDRPERM(task),
+ (void *)VM_KERNEL_ADDRPERM(fsroot),
+ cpu, cpu_subtype, is_64bit, reslide));