+#if __x86_64__
+static boolean_t
+check_if_simulator_binary(
+ struct image_params *imgp,
+ off_t file_offset,
+ off_t macho_size)
+{
+ struct mach_header *header;
+ char *ip_vdata = NULL;
+ kauth_cred_t cred = NULL;
+ uint32_t ncmds;
+ struct load_command *lcp;
+ boolean_t simulator_binary = FALSE;
+ void * addr = NULL;
+ vm_size_t alloc_size, cmds_size;
+ size_t offset;
+ proc_t p = current_proc(); /* XXXX */
+ int error;
+ int resid = 0;
+ size_t mach_header_sz = sizeof(struct mach_header);
+
+
+ cred = kauth_cred_proc_ref(p);
+
+ /* Allocate page to copyin mach header */
+ ip_vdata = kalloc(PAGE_SIZE);
+ if (ip_vdata == NULL) {
+ goto bad;
+ }
+
+ /* Read the Mach-O header */
+ error = vn_rdwr(UIO_READ, imgp->ip_vp, ip_vdata,
+ PAGE_SIZE, file_offset,
+ UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
+ cred, &resid, p);
+ if (error) {
+ goto bad;
+ }
+
+ header = (struct mach_header *)ip_vdata;
+
+ if (header->magic == MH_MAGIC_64 ||
+ header->magic == MH_CIGAM_64) {
+ mach_header_sz = sizeof(struct mach_header_64);
+ }
+
+ /* ensure header + sizeofcmds falls within the file */
+ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
+ (off_t)cmds_size > macho_size ||
+ round_page_overflow(cmds_size, &alloc_size)) {
+ goto bad;
+ }
+
+ /*
+ * Map the load commands into kernel memory.
+ */
+ addr = kalloc(alloc_size);
+ if (addr == NULL) {
+ goto bad;
+ }
+
+ error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, alloc_size, file_offset,
+ UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
+ if (error) {
+ goto bad;
+ }
+
+ if (resid) {
+ /* We must be able to read in as much as the mach_header indicated */
+ goto bad;
+ }
+
+ /*
+ * Loop through each of the load_commands indicated by the
+ * Mach-O header; if an absurd value is provided, we just
+ * run off the end of the reserved section by incrementing
+ * the offset too far, so we are implicitly fail-safe.
+ */
+ offset = mach_header_sz;
+ ncmds = header->ncmds;
+
+ while (ncmds--) {
+ /* ensure enough space for a minimal load command */
+ if (offset + sizeof(struct load_command) > cmds_size) {
+ break;
+ }
+
+ /*
+ * Get a pointer to the command.
+ */
+ lcp = (struct load_command *)(addr + offset);
+
+ /*
+ * Perform prevalidation of the struct load_command
+ * before we attempt to use its contents. Invalid
+ * values are ones which result in an overflow, or
+ * which can not possibly be valid commands, or which
+ * straddle or exist past the reserved section at the
+ * start of the image.
+ */
+ if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
+ lcp->cmdsize < sizeof(struct load_command) ||
+ offset > cmds_size) {
+ break;
+ }
+
+ /* Check if its a simulator binary. */
+ switch (lcp->cmd) {
+ case LC_VERSION_MIN_WATCHOS:
+ simulator_binary = TRUE;
+ break;
+
+ case LC_BUILD_VERSION: {
+ struct build_version_command *bvc;
+
+ bvc = (struct build_version_command *) lcp;
+ if (bvc->cmdsize < sizeof(*bvc)) {
+ /* unsafe to use this command struct if cmdsize
+ * validated above is too small for it to fit */
+ break;
+ }
+ if (bvc->platform == PLATFORM_IOSSIMULATOR ||
+ bvc->platform == PLATFORM_WATCHOSSIMULATOR) {
+ simulator_binary = TRUE;
+ }
+
+ break;
+ }
+
+ case LC_VERSION_MIN_IPHONEOS: {
+ simulator_binary = TRUE;
+ break;
+ }
+
+ default:
+ /* ignore other load commands */
+ break;
+ }
+
+ if (simulator_binary == TRUE) {
+ break;
+ }
+ }
+
+bad:
+ if (ip_vdata) {
+ kfree(ip_vdata, PAGE_SIZE);
+ }
+
+ if (cred) {
+ kauth_cred_unref(&cred);
+ }
+
+ if (addr) {
+ kfree(addr, alloc_size);
+ }
+
+ return simulator_binary;
+}
+#endif /* __x86_64__ */
+
+#if CONFIG_CODE_DECRYPTION
+
+#define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
+
+static load_return_t
+unprotect_dsmos_segment(
+ uint64_t file_off,
+ uint64_t file_size,
+ struct vnode *vp,
+ off_t macho_offset,
+ vm_map_t map,
+ vm_map_offset_t map_addr,
+ vm_map_size_t map_size)
+{
+ kern_return_t kr;
+
+ /*
+ * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
+ * this part of a Universal binary) are not protected...
+ * The rest needs to be "transformed".
+ */
+ if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
+ file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
+ /* it's all unprotected, nothing to do... */
+ kr = KERN_SUCCESS;
+ } else {
+ if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
+ /*
+ * We start mapping in the unprotected area.
+ * Skip the unprotected part...
+ */
+ vm_map_offset_t delta;
+
+ delta = APPLE_UNPROTECTED_HEADER_SIZE;
+ delta -= file_off;
+ map_addr += delta;
+ map_size -= delta;
+ }
+ /* ... transform the rest of the mapping. */
+ struct pager_crypt_info crypt_info;
+ crypt_info.page_decrypt = dsmos_page_transform;
+ crypt_info.crypt_ops = NULL;
+ crypt_info.crypt_end = NULL;
+#pragma unused(vp, macho_offset)
+ crypt_info.crypt_ops = (void *)0x2e69cf40;
+ vm_map_offset_t crypto_backing_offset;
+ crypto_backing_offset = -1; /* i.e. use map entry's offset */
+#if VM_MAP_DEBUG_APPLE_PROTECT
+ if (vm_map_debug_apple_protect) {
+ struct proc *p;
+ p = current_proc();
+ printf("APPLE_PROTECT: %d[%s] map %p "
+ "[0x%llx:0x%llx] %s(%s)\n",
+ p->p_pid, p->p_comm, map,
+ (uint64_t) map_addr,
+ (uint64_t) (map_addr + map_size),
+ __FUNCTION__, vp->v_name);
+ }
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+
+ /* The DSMOS pager can only be used by apple signed code */
+ struct cs_blob * blob = csvnode_get_blob(vp, file_off);
+ if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) {
+ return LOAD_FAILURE;
+ }
+
+ kr = vm_map_apple_protected(map,
+ map_addr,
+ map_addr + map_size,
+ crypto_backing_offset,
+ &crypt_info);
+ }
+
+ if (kr != KERN_SUCCESS) {
+ return LOAD_FAILURE;
+ }
+ return LOAD_SUCCESS;
+}
+#else /* CONFIG_CODE_DECRYPTION */
+static load_return_t
+unprotect_dsmos_segment(
+ __unused uint64_t file_off,
+ __unused uint64_t file_size,
+ __unused struct vnode *vp,
+ __unused off_t macho_offset,
+ __unused vm_map_t map,
+ __unused vm_map_offset_t map_addr,
+ __unused vm_map_size_t map_size)
+{
+ return LOAD_SUCCESS;
+}
+#endif /* CONFIG_CODE_DECRYPTION */
+
+
+/*
+ * map_segment:
+ * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
+ * page size) issues.
+ *
+ * The mapping might result in 1, 2 or 3 map entries:
+ * 1. for the first page, which could be overlap with the previous
+ * mapping,
+ * 2. for the center (if applicable),
+ * 3. for the last page, which could overlap with the next mapping.
+ *
+ * For each of those map entries, we might have to interpose a
+ * "fourk_pager" to deal with mis-alignment wrt the system page size,
+ * either in the mapping address and/or size or the file offset and/or
+ * size.
+ * The "fourk_pager" itself would be mapped with proper alignment
+ * wrt the system page size and would then be populated with the
+ * information about the intended mapping, with a "4KB" granularity.
+ */
+static kern_return_t
+map_segment(
+ vm_map_t map,
+ vm_map_offset_t vm_start,
+ vm_map_offset_t vm_end,
+ memory_object_control_t control,
+ vm_map_offset_t file_start,
+ vm_map_offset_t file_end,
+ vm_prot_t initprot,
+ vm_prot_t maxprot,
+ load_result_t *result)
+{
+ vm_map_offset_t cur_offset, cur_start, cur_end;
+ kern_return_t ret;
+ vm_map_offset_t effective_page_mask;
+ vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
+
+ if (vm_end < vm_start ||
+ file_end < file_start) {
+ return LOAD_BADMACHO;
+ }
+ if (vm_end == vm_start ||
+ file_end == file_start) {
+ /* nothing to map... */
+ return LOAD_SUCCESS;
+ }
+
+ effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ if (vm_map_page_aligned(vm_start, effective_page_mask) &&
+ vm_map_page_aligned(vm_end, effective_page_mask) &&
+ vm_map_page_aligned(file_start, effective_page_mask) &&
+ vm_map_page_aligned(file_end, effective_page_mask)) {
+ /* all page-aligned and map-aligned: proceed */
+ } else {
+#if __arm64__
+ /* use an intermediate "4K" pager */
+ vmk_flags.vmkf_fourk = TRUE;
+#else /* __arm64__ */
+ panic("map_segment: unexpected mis-alignment "
+ "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
+ (uint64_t) vm_start,
+ (uint64_t) vm_end,
+ (uint64_t) file_start,
+ (uint64_t) file_end);
+#endif /* __arm64__ */
+ }
+
+ cur_offset = 0;
+ cur_start = vm_start;
+ cur_end = vm_start;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
+ /* one 4K pager for the 1st page */
+ cur_end = vm_map_round_page(cur_start, effective_page_mask);
+ if (cur_end > vm_end) {
+ cur_end = vm_start + (file_end - file_start);
+ }
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ /* no copy-on-read for mapped binaries */
+ vmk_flags.vmkf_no_copy_on_read = 1;
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return LOAD_NOSPACE;
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
+ if (cur_end >= vm_start + (file_end - file_start)) {
+ /* all mapped: done */
+ goto done;
+ }
+ if (vm_map_round_page(cur_end, effective_page_mask) >=
+ vm_map_trunc_page(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* no middle */
+ } else {
+ cur_start = cur_end;
+ if ((vm_start & effective_page_mask) !=
+ (file_start & effective_page_mask)) {
+ /* one 4K pager for the middle */
+ cur_vmk_flags = vmk_flags;
+ } else {
+ /* regular mapping for the middle */
+ cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ }
+
+#if CONFIG_EMBEDDED
+ (void) result;
+#else /* CONFIG_EMBEDDED */
+ /*
+ * This process doesn't have its new csflags (from
+ * the image being loaded) yet, so tell VM to override the
+ * current process's CS_ENFORCEMENT for this mapping.
+ */
+ if (result->csflags & CS_ENFORCEMENT) {
+ cur_vmk_flags.vmkf_cs_enforcement = TRUE;
+ } else {
+ cur_vmk_flags.vmkf_cs_enforcement = FALSE;
+ }
+ cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
+#endif /* CONFIG_EMBEDDED */
+
+ cur_end = vm_map_trunc_page(vm_start + (file_end -
+ file_start),
+ effective_page_mask);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ /* no copy-on-read for mapped binaries */
+ cur_vmk_flags.vmkf_no_copy_on_read = 1;
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return LOAD_NOSPACE;
+ }
+ cur_offset += cur_end - cur_start;
+ }
+ if (cur_end >= vm_start + (file_end - file_start)) {
+ /* all mapped: done */
+ goto done;
+ }
+ cur_start = cur_end;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* one 4K pager for the last page */
+ cur_end = vm_start + (file_end - file_start);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ /* no copy-on-read for mapped binaries */
+ vmk_flags.vmkf_no_copy_on_read = 1;
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return LOAD_NOSPACE;
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
+done:
+ assert(cur_end >= vm_start + (file_end - file_start));
+ return LOAD_SUCCESS;
+}
+
+static
+load_return_t
+load_segment(
+ struct load_command *lcp,
+ uint32_t filetype,
+ void * control,
+ off_t pager_offset,
+ off_t macho_size,
+ struct vnode *vp,
+ vm_map_t map,
+ int64_t slide,
+ load_result_t *result)
+{
+ struct segment_command_64 segment_command, *scp;
+ kern_return_t ret;
+ vm_map_size_t delta_size;
+ vm_prot_t initprot;
+ vm_prot_t maxprot;
+ size_t segment_command_size, total_section_size,
+ single_section_size;
+ vm_map_offset_t file_offset, file_size;
+ vm_map_offset_t vm_offset, vm_size;
+ vm_map_offset_t vm_start, vm_end, vm_end_aligned;
+ vm_map_offset_t file_start, file_end;
+ kern_return_t kr;
+ boolean_t verbose;
+ vm_map_size_t effective_page_size;
+ vm_map_offset_t effective_page_mask;
+#if __arm64__
+ vm_map_kernel_flags_t vmk_flags;
+ boolean_t fourk_align;
+#endif /* __arm64__ */
+
+ effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
+ effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+
+ verbose = FALSE;
+ if (LC_SEGMENT_64 == lcp->cmd) {
+ segment_command_size = sizeof(struct segment_command_64);
+ single_section_size = sizeof(struct section_64);
+#if __arm64__
+ /* 64-bit binary: should already be 16K-aligned */
+ fourk_align = FALSE;
+#endif /* __arm64__ */
+ } else {
+ segment_command_size = sizeof(struct segment_command);
+ single_section_size = sizeof(struct section);
+#if __arm64__
+ /* 32-bit binary: might need 4K-alignment */
+ if (effective_page_size != FOURK_PAGE_SIZE) {
+ /* not using 4K page size: need fourk_pager */
+ fourk_align = TRUE;
+ verbose = TRUE;
+ } else {
+ /* using 4K page size: no need for re-alignment */
+ fourk_align = FALSE;
+ }
+#endif /* __arm64__ */
+ }
+ if (lcp->cmdsize < segment_command_size) {
+ return LOAD_BADMACHO;
+ }
+ total_section_size = lcp->cmdsize - segment_command_size;
+
+ if (LC_SEGMENT_64 == lcp->cmd) {
+ scp = (struct segment_command_64 *)lcp;
+ } else {
+ scp = &segment_command;
+ widen_segment_command((struct segment_command *)lcp, scp);
+ }
+
+ if (verbose) {
+ MACHO_PRINTF(("+++ load_segment %s "
+ "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
+ "prot %d/%d flags 0x%x\n",
+ scp->segname,
+ (uint64_t)(slide + scp->vmaddr),
+ (uint64_t)(slide + scp->vmaddr + scp->vmsize),
+ pager_offset + scp->fileoff,
+ pager_offset + scp->fileoff + scp->filesize,
+ scp->initprot,
+ scp->maxprot,
+ scp->flags));
+ }
+
+ /*
+ * Make sure what we get from the file is really ours (as specified
+ * by macho_size).
+ */
+ if (scp->fileoff + scp->filesize < scp->fileoff ||
+ scp->fileoff + scp->filesize > (uint64_t)macho_size) {
+ return LOAD_BADMACHO;
+ }
+ /*
+ * Ensure that the number of sections specified would fit
+ * within the load command size.
+ */
+ if (total_section_size / single_section_size < scp->nsects) {
+ return LOAD_BADMACHO;
+ }