+/*
+ * If the process is not signed or if it contains entitlements, we
+ * need to communicate through the task_access_port to taskgated.
+ *
+ * taskgated will provide a detached code signature if present, and
+ * will enforce any restrictions on entitlements.
+ */
+
+static boolean_t
+taskgated_required(proc_t p, boolean_t *require_success)
+{
+ size_t length;
+ void *blob;
+ int error;
+
+ if ((p->p_csflags & CS_VALID) == 0) {
+ *require_success = FALSE;
+ return TRUE;
+ }
+
+ error = cs_entitlements_blob_get(p, &blob, &length);
+ if (error == 0 && blob != NULL) {
+ *require_success = TRUE; /* fatal on the desktop when entitlements are present */
+ return TRUE;
+ }
+
+ *require_success = FALSE;
+ return 0;
+}
+
+
+static int
+check_for_signature(proc_t p, struct image_params *imgp)
+{
+ mach_port_t port = NULL;
+ kern_return_t kr = KERN_FAILURE;
+ int error = EACCES;
+ boolean_t unexpected_failure = FALSE;
+ unsigned char hash[SHA1_RESULTLEN];
+ boolean_t require_success = FALSE;
+
+ /*
+ * Override inherited code signing flags with the
+ * ones for the process that is being successfully
+ * loaded
+ */
+ proc_lock(p);
+ p->p_csflags = imgp->ip_csflags;
+ proc_unlock(p);
+
+ /* Set the switch_protect flag on the map */
+ if(p->p_csflags & (CS_HARD|CS_KILL)) {
+ vm_map_switch_protect(get_task_map(p->task), TRUE);
+ }
+
+ /* check if callout to taskgated is needed */
+ if (!taskgated_required(p, &require_success)) {
+ error = 0;
+ goto done;
+ }
+
+ kr = task_get_task_access_port(p->task, &port);
+ if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) {
+ error = 0;
+ if (require_success)
+ error = EACCES;
+ goto done;
+ }
+
+ /*
+ * taskgated returns KERN_SUCCESS if it has completed its work
+ * and the exec should continue, KERN_FAILURE if the exec should
+ * fail, or it may error out with different error code in an
+ * event of mig failure (e.g. process was signalled during the
+ * rpc call, taskgated died, mig server died etc.).
+ */
+
+ kr = find_code_signature(port, p->p_pid);
+ switch (kr) {
+ case KERN_SUCCESS:
+ error = 0;
+ break;
+ case KERN_FAILURE:
+ error = EACCES;
+ goto done;
+ default:
+ error = EACCES;
+ unexpected_failure = TRUE;
+ goto done;
+ }
+
+ /* Only do this if exec_resettextvp() did not fail */
+ if (p->p_textvp != NULLVP) {
+ /*
+ * If there's a new code directory, mark this process
+ * as signed.
+ */
+ if (0 == ubc_cs_getcdhash(p->p_textvp, p->p_textoff, hash)) {
+ proc_lock(p);
+ p->p_csflags |= CS_VALID;
+ proc_unlock(p);
+ }
+ }
+
+done:
+ if (0 != error) {
+ if (!unexpected_failure)
+ p->p_csflags |= CS_KILLED;
+ /* make very sure execution fails */
+ psignal(p, SIGKILL);
+ }
+ return error;
+}
+
+/*
+ * Typically as soon as we start executing this process, the
+ * first instruction will trigger a VM fault to bring the text
+ * pages (as executable) into the address space, followed soon
+ * thereafter by dyld data structures (for dynamic executable).
+ * To optimize this, as well as improve support for hardware
+ * debuggers that can only access resident pages present
+ * in the process' page tables, we prefault some pages if
+ * possible. Errors are non-fatal.
+ */
+static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result)
+{
+ int ret;
+ size_t expected_all_image_infos_size;
+
+ /*
+ * Prefault executable or dyld entry point.
+ */
+ vm_fault(current_map(),
+ vm_map_trunc_page(load_result->entry_point,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ | VM_PROT_EXECUTE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos);
+ } else {
+ expected_all_image_infos_size = sizeof(struct user32_dyld_all_image_infos);
+ }
+
+ /* Decode dyld anchor structure from <mach-o/dyld_images.h> */
+ if (load_result->dynlinker &&
+ load_result->all_image_info_addr &&
+ load_result->all_image_info_size >= expected_all_image_infos_size) {
+ union {
+ struct user64_dyld_all_image_infos infos64;
+ struct user32_dyld_all_image_infos infos32;
+ } all_image_infos;
+
+ /*
+ * Pre-fault to avoid copyin() going through the trap handler
+ * and recovery path.
+ */
+ vm_fault(current_map(),
+ vm_map_trunc_page(load_result->all_image_info_addr,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) {
+ /* all_image_infos straddles a page */
+ vm_fault(current_map(),
+ vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ }
+
+ ret = copyin(load_result->all_image_info_addr,
+ &all_image_infos,
+ expected_all_image_infos_size);
+ if (ret == 0 && all_image_infos.infos32.version >= 9) {
+
+ user_addr_t notification_address;
+ user_addr_t dyld_image_address;
+ user_addr_t dyld_version_address;
+ user_addr_t dyld_all_image_infos_address;
+ user_addr_t dyld_slide_amount;
+
+ if (imgp->ip_flags & IMGPF_IS_64BIT) {
+ notification_address = all_image_infos.infos64.notification;
+ dyld_image_address = all_image_infos.infos64.dyldImageLoadAddress;
+ dyld_version_address = all_image_infos.infos64.dyldVersion;
+ dyld_all_image_infos_address = all_image_infos.infos64.dyldAllImageInfosAddress;
+ } else {
+ notification_address = all_image_infos.infos32.notification;
+ dyld_image_address = all_image_infos.infos32.dyldImageLoadAddress;
+ dyld_version_address = all_image_infos.infos32.dyldVersion;
+ dyld_all_image_infos_address = all_image_infos.infos32.dyldAllImageInfosAddress;
+ }
+
+ /*
+ * dyld statically sets up the all_image_infos in its Mach-O
+ * binary at static link time, with pointers relative to its default
+ * load address. Since ASLR might slide dyld before its first
+ * instruction is executed, "dyld_slide_amount" tells us how far
+ * dyld was loaded compared to its default expected load address.
+ * All other pointers into dyld's image should be adjusted by this
+ * amount. At some point later, dyld will fix up pointers to take
+ * into account the slide, at which point the all_image_infos_address
+ * field in the structure will match the runtime load address, and
+ * "dyld_slide_amount" will be 0, if we were to consult it again.
+ */
+
+ dyld_slide_amount = load_result->all_image_info_addr - dyld_all_image_infos_address;
+
+#if 0
+ kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ (uint64_t)load_result->all_image_info_addr,
+ all_image_infos.infos32.version,
+ (uint64_t)notification_address,
+ (uint64_t)dyld_image_address,
+ (uint64_t)dyld_version_address,
+ (uint64_t)dyld_all_image_infos_address);
+#endif
+
+ vm_fault(current_map(),
+ vm_map_trunc_page(notification_address + dyld_slide_amount,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ | VM_PROT_EXECUTE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ vm_fault(current_map(),
+ vm_map_trunc_page(dyld_image_address + dyld_slide_amount,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ | VM_PROT_EXECUTE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ vm_fault(current_map(),
+ vm_map_trunc_page(dyld_version_address + dyld_slide_amount,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ vm_fault(current_map(),
+ vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount,
+ vm_map_page_mask(current_map())),
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ }
+ }
+}