+ /*
+ * If the virtual size of the segment is greater
+ * than the size from the file, we need to allocate
+ * zero fill memory for the rest.
+ */
+ if ((vm_end - vm_start) > (file_end - file_start)) {
+ delta_size = (vm_end - vm_start) - (file_end - file_start);
+ } else {
+ delta_size = 0;
+ }
+ if (delta_size > 0) {
+ vm_map_offset_t tmp_start;
+ vm_map_offset_t tmp_end;
+
+ if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) {
+ DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start));
+ return LOAD_NOSPACE;
+ }
+
+ if (os_add_overflow(tmp_start, delta_size, &tmp_end)) {
+ DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size);
+ return LOAD_NOSPACE;
+ }
+
+ if (verbose) {
+ MACHO_PRINTF(("++++++ load_segment: "
+ "delta mapping vm [0x%llx:0x%llx]\n",
+ (uint64_t) tmp_start,
+ (uint64_t) tmp_end));
+ }
+ kr = map_segment(map,
+ tmp_start,
+ tmp_end,
+ MEMORY_OBJECT_CONTROL_NULL,
+ 0,
+ delta_size,
+ scp->initprot,
+ scp->maxprot,
+ result);
+ if (kr != KERN_SUCCESS) {
+ DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr);
+ return LOAD_NOSPACE;
+ }
+ }
+
+ if ((scp->fileoff == 0) && (scp->filesize != 0)) {
+ result->mach_header = vm_offset;
+ }
+
+ if (scp->flags & SG_PROTECTED_VERSION_1) {
+ ret = unprotect_dsmos_segment(file_start,
+ file_end - file_start,
+ vp,
+ pager_offset,
+ map,
+ vm_start,
+ vm_end - vm_start);
+ if (ret != LOAD_SUCCESS) {
+ DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret);
+ return ret;
+ }
+ } else {
+ ret = LOAD_SUCCESS;
+ }
+
+ if (LOAD_SUCCESS == ret &&
+ filetype == MH_DYLINKER &&
+ result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
+ note_all_image_info_section(scp,
+ LC_SEGMENT_64 == lcp->cmd,
+ single_section_size,
+ ((const char *)lcp +
+ segment_command_size),
+ slide,
+ result);
+ }
+
+ if (result->entry_point != MACH_VM_MIN_ADDRESS) {
+ if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
+ if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) {
+ result->validentry = 1;
+ } else {
+ /* right range but wrong protections, unset if previously validated */
+ result->validentry = 0;
+ }
+ }
+ }
+
+ if (ret != LOAD_SUCCESS && verbose) {
+ DEBUG4K_ERROR("ret %d\n", ret);
+ }
+ return ret;
+}
+
+static
+load_return_t
+load_uuid(
+ struct uuid_command *uulp,
+ char *command_end,
+ load_result_t *result
+ )
+{
+ /*
+ * We need to check the following for this command:
+ * - The command size should be atleast the size of struct uuid_command
+ * - The UUID part of the command should be completely within the mach-o header
+ */
+
+ if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
+ (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
+ return LOAD_BADMACHO;
+ }
+
+ memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
+ return LOAD_SUCCESS;
+}
+
+static
+load_return_t
+load_version(
+ struct version_min_command *vmc,
+ boolean_t *found_version_cmd,
+ int ip_flags __unused,
+ load_result_t *result
+ )
+{
+ uint32_t platform = 0;
+ uint32_t sdk;
+ uint32_t min_sdk;
+
+ if (vmc->cmdsize < sizeof(*vmc)) {
+ return LOAD_BADMACHO;
+ }
+ if (*found_version_cmd == TRUE) {
+ return LOAD_BADMACHO;
+ }
+ *found_version_cmd = TRUE;
+ sdk = vmc->sdk;
+ min_sdk = vmc->version;
+ switch (vmc->cmd) {
+ case LC_VERSION_MIN_MACOSX:
+ platform = PLATFORM_MACOS;
+ break;
+#if __x86_64__ /* __x86_64__ */
+ case LC_VERSION_MIN_IPHONEOS:
+ platform = PLATFORM_IOSSIMULATOR;
+ break;
+ case LC_VERSION_MIN_WATCHOS:
+ platform = PLATFORM_WATCHOSSIMULATOR;
+ break;
+ case LC_VERSION_MIN_TVOS:
+ platform = PLATFORM_TVOSSIMULATOR;
+ break;
+#else
+ case LC_VERSION_MIN_IPHONEOS: {
+#if __arm64__
+ extern int legacy_footprint_entitlement_mode;
+ if (vmc->sdk < (12 << 16)) {
+ /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
+ result->legacy_footprint = TRUE;
+ }
+#endif /* __arm64__ */
+ platform = PLATFORM_IOS;
+ break;
+ }
+ case LC_VERSION_MIN_WATCHOS:
+ platform = PLATFORM_WATCHOS;
+ break;
+ case LC_VERSION_MIN_TVOS:
+ platform = PLATFORM_TVOS;
+ break;
+#endif /* __x86_64__ */
+ /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */
+ default:
+ sdk = (uint32_t)-1;
+ min_sdk = (uint32_t)-1;
+ __builtin_unreachable();
+ }
+ result->ip_platform = platform;
+ result->lr_min_sdk = min_sdk;
+ result->lr_sdk = sdk;
+ return LOAD_SUCCESS;
+}
+
+static
+load_return_t
+load_main(
+ struct entry_point_command *epc,
+ thread_t thread,
+ int64_t slide,
+ load_result_t *result
+ )
+{
+ mach_vm_offset_t addr;
+ kern_return_t ret;
+
+ if (epc->cmdsize < sizeof(*epc)) {
+ return LOAD_BADMACHO;
+ }
+ if (result->thread_count != 0) {
+ return LOAD_FAILURE;
+ }
+
+ if (thread == THREAD_NULL) {
+ return LOAD_SUCCESS;
+ }
+
+ /*
+ * LC_MAIN specifies stack size but not location.
+ * Add guard page to allocation size (MAXSSIZ includes guard page).
+ */
+ if (epc->stacksize) {
+ if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) {
+ /*
+ * We are going to immediately throw away this result, but we want
+ * to make sure we aren't loading a dangerously close to
+ * overflowing value, since this will have a guard page added to it
+ * and be rounded to page boundaries
+ */
+ return LOAD_BADMACHO;
+ }
+ result->user_stack_size = epc->stacksize;
+ if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
+ return LOAD_BADMACHO;
+ }
+ result->custom_stack = TRUE;
+ } else {
+ result->user_stack_alloc_size = MAXSSIZ;
+ }
+
+ /* use default location for stack */
+ ret = thread_userstackdefault(&addr, result->is_64bit_addr);
+ if (ret != KERN_SUCCESS) {
+ return LOAD_FAILURE;