+/*
+ * Look up a pre-existing mapping in shared region, for replacement.
+ * Takes an extra object reference if found.
+ */
+static kern_return_t
+find_mapping_to_slide(vm_map_t map, vm_map_address_t addr, vm_map_entry_t entry)
+{
+ vm_map_entry_t found;
+
+ /* find the shared region's map entry to slide */
+ vm_map_lock_read(map);
+ if (!vm_map_lookup_entry(map, addr, &found)) {
+ /* no mapping there */
+ vm_map_unlock(map);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *entry = *found;
+ /* extra ref to keep object alive while map is unlocked */
+ vm_object_reference(VME_OBJECT(found));
+ vm_map_unlock_read(map);
+ return KERN_SUCCESS;
+}
+
+#if __has_feature(ptrauth_calls)
+
+/*
+ * Determine if this task is actually using pointer signing.
+ */
+static boolean_t
+task_sign_pointers(task_t task)
+{
+ if (task->map &&
+ task->map->pmap &&
+ !task->map->pmap->disable_jop) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * If the shared region contains mappings that are authenticated, then
+ * remap them into the task private map.
+ *
+ * Failures are possible in this routine when jetsam kills a process
+ * just as dyld is trying to set it up. The vm_map and task shared region
+ * info get torn down w/o waiting for this thread to finish up.
+ */
+__attribute__((noinline))
+kern_return_t
+vm_shared_region_auth_remap(vm_shared_region_t sr)
+{
+ memory_object_t sr_pager = MEMORY_OBJECT_NULL;
+ task_t task = current_task();
+ vm_shared_region_slide_info_t si;
+ uint_t i;
+ vm_object_t object;
+ vm_map_t sr_map;
+ struct vm_map_entry tmp_entry_store = {0};
+ vm_map_entry_t tmp_entry = NULL;
+ int vm_flags;
+ vm_map_kernel_flags_t vmk_flags;
+ vm_map_offset_t map_addr;
+ kern_return_t kr = KERN_SUCCESS;
+ boolean_t use_ptr_auth = task_sign_pointers(task);
+
+ /*
+ * Don't do this more than once and avoid any race conditions in finishing it.
+ */
+ vm_shared_region_lock();
+ while (sr->sr_mapping_in_progress) {
+ /* wait for our turn... */
+ vm_shared_region_sleep(&sr->sr_mapping_in_progress, THREAD_UNINT);
+ }
+ assert(!sr->sr_mapping_in_progress);
+ assert(sr->sr_ref_count > 1);
+
+ /* Just return if already done. */
+ if (task->shared_region_auth_remapped) {
+ vm_shared_region_unlock();
+ return KERN_SUCCESS;
+ }
+
+ /* let others know to wait while we're working in this shared region */
+ sr->sr_mapping_in_progress = TRUE;
+ vm_shared_region_unlock();
+
+ /*
+ * Remap any sections with pointer authentications into the private map.
+ */
+ for (i = 0; i < sr->sr_num_auth_section; ++i) {
+ si = sr->sr_auth_section[i];
+ assert(si != NULL);
+ assert(si->si_ptrauth);
+
+ /*
+ * We have mapping that needs to be private.
+ * Look for an existing slid mapping's pager with matching
+ * object, offset, slide info and shared_region_id to reuse.
+ */
+ object = si->si_slide_object;
+ sr_pager = shared_region_pager_match(object, si->si_start, si,
+ use_ptr_auth ? task->jop_pid : 0);
+ if (sr_pager == MEMORY_OBJECT_NULL) {
+ kr = KERN_FAILURE;
+ goto done;
+ }
+
+ /*
+ * verify matching jop_pid for this task and this pager
+ */
+ if (use_ptr_auth) {
+ shared_region_pager_match_task_key(sr_pager, task);
+ }
+
+ sr_map = vm_shared_region_vm_map(sr);
+ tmp_entry = NULL;
+
+ kr = find_mapping_to_slide(sr_map, si->si_slid_address - sr->sr_base_address, &tmp_entry_store);
+ if (kr != KERN_SUCCESS) {
+ goto done;
+ }
+ tmp_entry = &tmp_entry_store;
+
+ /*
+ * Check that the object exactly covers the region to slide.
+ */
+ if (VME_OFFSET(tmp_entry) != si->si_start ||
+ tmp_entry->vme_end - tmp_entry->vme_start != si->si_end - si->si_start) {
+ kr = KERN_FAILURE;
+ goto done;
+ }
+
+ /*
+ * map the pager over the portion of the mapping that needs sliding
+ */
+ vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_overwrite_immutable = TRUE;
+ map_addr = si->si_slid_address;
+ kr = vm_map_enter_mem_object(task->map,
+ &map_addr,
+ si->si_end - si->si_start,
+ (mach_vm_offset_t) 0,
+ vm_flags,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ (ipc_port_t)(uintptr_t) sr_pager,
+ 0,
+ TRUE,
+ tmp_entry->protection,
+ tmp_entry->max_protection,
+ tmp_entry->inheritance);
+ memory_object_deallocate(sr_pager);
+ sr_pager = MEMORY_OBJECT_NULL;
+ if (kr != KERN_SUCCESS) {
+ goto done;
+ }
+ assertf(map_addr == si->si_slid_address,
+ "map_addr=0x%llx si_slid_address=0x%llx tmp_entry=%p\n",
+ (uint64_t)map_addr,
+ (uint64_t)si->si_slid_address,
+ tmp_entry);
+
+ /* Drop the ref count grabbed by find_mapping_to_slide */
+ vm_object_deallocate(VME_OBJECT(tmp_entry));
+ tmp_entry = NULL;
+ }
+
+done:
+ if (tmp_entry) {
+ /* Drop the ref count grabbed by find_mapping_to_slide */
+ vm_object_deallocate(VME_OBJECT(tmp_entry));
+ tmp_entry = NULL;
+ }
+
+ /*
+ * Drop any extra reference to the pager in case we're quitting due to an error above.
+ */
+ if (sr_pager != MEMORY_OBJECT_NULL) {
+ memory_object_deallocate(sr_pager);
+ }
+
+ /*
+ * Mark the region as having it's auth sections remapped.
+ */
+ vm_shared_region_lock();
+ task->shared_region_auth_remapped = TRUE;
+ sr->sr_mapping_in_progress = FALSE;
+ thread_wakeup((event_t)&sr->sr_mapping_in_progress);
+ vm_shared_region_unlock();
+ return kr;
+}
+#endif /* __has_feature(ptrauth_calls) */
+