+/*
+ * Final part of vm_shared_region_map_file().
+ * Kept in separate function to avoid blowing out the stack.
+ */
+__attribute__((noinline))
+static void
+vm_shared_region_map_file_final(
+ vm_shared_region_t shared_region,
+ vm_map_t sr_map,
+ mach_vm_offset_t sfm_min_address,
+ mach_vm_offset_t sfm_max_address,
+ __unused mach_vm_offset_t *file_first_mappings)
+{
+ struct _dyld_cache_header sr_cache_header;
+ int error;
+ size_t image_array_length;
+ struct _dyld_cache_image_text_info *sr_image_layout;
+
+
+ /*
+ * copy in the shared region UUID to the shared region structure.
+ * we do this indirectly by first copying in the shared cache header
+ * and then copying the UUID from there because we'll need to look
+ * at other content from the shared cache header.
+ */
+ if (!shared_region->sr_uuid_copied) {
+ error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping),
+ (char *)&sr_cache_header,
+ sizeof(sr_cache_header));
+ if (error == 0) {
+ memcpy(&shared_region->sr_uuid, &sr_cache_header.uuid, sizeof(shared_region->sr_uuid));
+ shared_region->sr_uuid_copied = TRUE;
+ } else {
+#if DEVELOPMENT || DEBUG
+ panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
+ "offset:0 size:0x%016llx) failed with %d\n",
+ (long long)shared_region->sr_base_address,
+ (long long)shared_region->sr_first_mapping,
+ (long long)sizeof(sr_cache_header),
+ error);
+#endif /* DEVELOPMENT || DEBUG */
+ shared_region->sr_uuid_copied = FALSE;
+ }
+ }
+
+ /*
+ * If the shared cache is associated with the init task (and is therefore the system shared cache),
+ * check whether it is a custom built shared cache and copy in the shared cache layout accordingly.
+ */
+ boolean_t is_init_task = (task_pid(current_task()) == 1);
+ if (shared_region->sr_uuid_copied && is_init_task) {
+ /* Copy in the shared cache layout if we're running with a locally built shared cache */
+ if (sr_cache_header.locallyBuiltCache) {
+ KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_START);
+ image_array_length = (size_t)(sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info));
+ sr_image_layout = kheap_alloc(KHEAP_DATA_BUFFERS, image_array_length, Z_WAITOK);
+ error = copyin((user_addr_t)(shared_region->sr_base_address + shared_region->sr_first_mapping +
+ sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length);
+ if (error == 0) {
+ shared_region->sr_images = kalloc((vm_size_t)(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64)));
+ for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) {
+ memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid,
+ sizeof(shared_region->sr_images[index].imageUUID));
+ shared_region->sr_images[index].imageLoadAddress = sr_image_layout[index].loadAddress;
+ }
+
+ assert(sr_cache_header.imagesTextCount < UINT32_MAX);
+ shared_region->sr_images_count = (uint32_t) sr_cache_header.imagesTextCount;
+ } else {
+#if DEVELOPMENT || DEBUG
+ panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx "
+ "offset:0x%016llx size:0x%016llx) failed with %d\n",
+ (long long)shared_region->sr_base_address,
+ (long long)shared_region->sr_first_mapping,
+ (long long)sr_cache_header.imagesTextOffset,
+ (long long)image_array_length,
+ error);
+#endif /* DEVELOPMENT || DEBUG */
+ }
+ KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count);
+ kheap_free(KHEAP_DATA_BUFFERS, sr_image_layout, image_array_length);
+ sr_image_layout = NULL;
+ }
+ init_task_shared_region = shared_region;
+ }
+
+ /*
+ * If we succeeded, we know the bounds of the shared region.
+ * Trim our pmaps to only cover this range (if applicable to
+ * this platform).
+ */
+ if (VM_MAP_PAGE_SHIFT(current_map()) == VM_MAP_PAGE_SHIFT(sr_map)) {
+ pmap_trim(current_map()->pmap, sr_map->pmap, sfm_min_address, sfm_max_address - sfm_min_address);
+ }
+}
+
+/*
+ * Retrieve a task's shared region and grab an extra reference to
+ * make sure it doesn't disappear while the caller is using it.
+ * The caller is responsible for consuming that extra reference if
+ * necessary.
+ *
+ * This also tries to trim the pmap for the shared region.
+ */
+vm_shared_region_t
+vm_shared_region_trim_and_get(task_t task)
+{
+ vm_shared_region_t shared_region;
+ ipc_port_t sr_handle;
+ vm_named_entry_t sr_mem_entry;
+ vm_map_t sr_map;
+
+ /* Get the shared region and the map. */
+ shared_region = vm_shared_region_get(task);
+ if (shared_region == NULL) {
+ return NULL;
+ }
+
+ sr_handle = shared_region->sr_mem_entry;
+ sr_mem_entry = (vm_named_entry_t) ip_get_kobject(sr_handle);
+ sr_map = sr_mem_entry->backing.map;
+
+ /* Trim the pmap if possible. */
+ if (VM_MAP_PAGE_SHIFT(task->map) == VM_MAP_PAGE_SHIFT(sr_map)) {
+ pmap_trim(task->map->pmap, sr_map->pmap, 0, 0);
+ }
+
+ return shared_region;
+}
+