X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/bsd/kern/mach_loader.c?ds=sidebyside diff --git a/bsd/kern/mach_loader.c b/bsd/kern/mach_loader.c index d51e05c70..28b5c4fd1 100644 --- a/bsd/kern/mach_loader.c +++ b/bsd/kern/mach_loader.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2010 Apple Inc. All rights reserved. + * Copyright (c) 2000-2020 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -47,12 +47,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include /* vm_allocate() */ @@ -84,7 +86,9 @@ #include #include #include +#include #include /* for kIOReturnNotPrivileged */ +#include /* for IOVnodeHasEntitlement */ #include @@ -94,6 +98,9 @@ */ extern pmap_t pmap_create_options(ledger_t ledger, vm_map_size_t size, unsigned int flags); +#if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX +extern void pmap_disable_user_jop(pmap_t pmap); +#endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */ /* XXX should have prototypes in a shared header file */ extern int get_map_nentries(vm_map_t); @@ -127,7 +134,10 @@ static const load_result_t load_result_null = { .max_vm_addr = MACH_VM_MIN_ADDRESS, .cs_end_offset = 0, .threadstate = NULL, - .threadstate_sz = 0 + .threadstate_sz = 0, + .is_cambria = 0, + .dynlinker_mach_header = MACH_VM_MIN_ADDRESS, + .dynlinker_fd = -1, }; /* @@ -159,7 +169,8 @@ load_segment( struct vnode *vp, vm_map_t map, int64_t slide, - load_result_t *result + load_result_t *result, + struct image_params *imgp ); static load_return_t @@ -173,6 +184,7 @@ static load_return_t load_version( struct version_min_command *vmc, boolean_t *found_version_cmd, + int ip_flags, load_result_t *result ); @@ -183,6 +195,7 @@ load_code_signature( off_t macho_offset, off_t macho_size, cpu_type_t cputype, + cpu_subtype_t cpusubtype, load_result_t *result, struct image_params *imgp); @@ -221,6 +234,7 @@ load_unixthread( struct thread_command *tcp, thread_t thread, int64_t slide, + boolean_t is_x86_64_compat_binary, load_result_t *result ); @@ -239,6 +253,7 @@ load_threadstack( uint32_t total_size, mach_vm_offset_t *user_stack, int *customstack, + boolean_t is_x86_64_compat_binary, load_result_t *result ); @@ -262,6 +277,7 @@ load_dylinker( struct image_params *imgp ); + #if __x86_64__ extern int bootarg_no32exec; static boolean_t @@ -281,7 +297,8 @@ get_macho_vnode( off_t *file_offset, off_t *macho_size, struct macho_data *macho_data, - struct vnode **vpp + struct vnode **vpp, + struct image_params *imgp ); static inline void @@ -313,7 +330,8 @@ note_all_image_info_section(const struct segment_command_64 *scp, unsigned int i; - if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) { + if (strncmp(scp->segname, "__DATA_DIRTY", sizeof(scp->segname)) != 0 && + strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) { return; } for (i = 0; i < scp->nsects; ++i) { @@ -341,6 +359,68 @@ const int fourk_binary_compatibility_unsafe = TRUE; const int fourk_binary_compatibility_allow_wx = FALSE; #endif /* __arm64__ */ +#if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX +/** + * Determines whether this is an arm64e process which may host in-process + * plugins. + */ +static inline bool +arm64e_plugin_host(struct image_params *imgp, load_result_t *result) +{ + if (imgp->ip_flags & IMGPF_NOJOP) { + return false; + } + + if (!result->platform_binary) { + return false; + } + + struct cs_blob *csblob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset); + const char *identity = csblob_get_identity(csblob); + if (!identity) { + return false; + } + + /* Check if override host plugin entitlement is present and posix spawn attribute to disable A keys is passed */ + if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, OVERRIDE_PLUGIN_HOST_ENTITLEMENT)) { + return imgp->ip_flags & IMGPF_PLUGIN_HOST_DISABLE_A_KEYS; + } + + /* Disabling library validation is a good signal that this process plans to host plugins */ + const char *const disable_lv_entitlements[] = { + "com.apple.security.cs.disable-library-validation", + "com.apple.private.cs.automator-plugins", + CLEAR_LV_ENTITLEMENT, + }; + for (size_t i = 0; i < ARRAY_COUNT(disable_lv_entitlements); i++) { + if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, disable_lv_entitlements[i])) { + return true; + } + } + + /* From /System/Library/Security/HardeningExceptions.plist */ + const char *const hardening_exceptions[] = { + "com.apple.perl5", /* Scripting engines may load third party code and jit*/ + "com.apple.perl", /* Scripting engines may load third party code and jit*/ + "org.python.python", /* Scripting engines may load third party code and jit*/ + "com.apple.expect", /* Scripting engines may load third party code and jit*/ + "com.tcltk.wish", /* Scripting engines may load third party code and jit*/ + "com.tcltk.tclsh", /* Scripting engines may load third party code and jit*/ + "com.apple.ruby", /* Scripting engines may load third party code and jit*/ + "com.apple.bash", /* Required for the 'enable' command */ + "com.apple.zsh", /* Required for the 'zmodload' command */ + "com.apple.ksh", /* Required for 'builtin' command */ + }; + for (size_t i = 0; i < ARRAY_COUNT(hardening_exceptions); i++) { + if (strncmp(hardening_exceptions[i], identity, strlen(hardening_exceptions[i])) == 0) { + return true; + } + } + + return false; +} +#endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */ + load_return_t load_machfile( struct image_params *imgp, @@ -353,6 +433,7 @@ load_machfile( struct vnode *vp = imgp->ip_vp; off_t file_offset = imgp->ip_arch_offset; off_t macho_size = imgp->ip_arch_size; + off_t total_size = 0; off_t file_size = imgp->ip_vattr->va_data_size; pmap_t pmap = 0; /* protected by create_map */ vm_map_t map; @@ -368,7 +449,8 @@ load_machfile( kern_return_t kret; unsigned int pmap_flags = 0; - if (macho_size > file_size) { + if (os_add_overflow(file_offset, macho_size, &total_size) || + total_size > file_size) { return LOAD_BADMACHO; } @@ -385,6 +467,16 @@ load_machfile( } else { ledger_task = task; } + +#if defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) + if (imgp->ip_px_sa != NULL) { + struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa; + if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) { + pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES; + } + } +#endif /* defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) */ + pmap = pmap_create_options(get_task_ledger(ledger_task), (vm_map_size_t) 0, pmap_flags); @@ -408,6 +500,13 @@ load_machfile( vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT); #endif /* __arm64__ */ +#if PMAP_CREATE_FORCE_4K_PAGES + if (pmap_flags & PMAP_CREATE_FORCE_4K_PAGES) { + DEBUG4K_LIFE("***** launching '%s' as 4k *****\n", vp->v_name); + vm_map_set_page_shift(map, FOURK_PAGE_SHIFT); + } +#endif /* PMAP_CREATE_FORCE_4K_PAGES */ + #ifndef CONFIG_ENFORCE_SIGNED_CODE /* This turns off faulting for executable pages, which allows * to circumvent Code Signing Enforcement. The per process @@ -443,6 +542,9 @@ load_machfile( aslr_page_offset += aslr_section_offset; } + if (vm_map_page_shift(map) < (int)PAGE_SHIFT) { + DEBUG4K_LOAD("slide=0x%llx dyld_slide=0x%llx\n", aslr_page_offset, dyld_aslr_page_offset); + } if (!result) { result = &myresult; @@ -498,12 +600,13 @@ load_machfile( if (enforce_hard_pagezero && (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) { #if __arm64__ - if (!result->is_64bit_addr && /* not 64-bit address space */ - !(header->flags & MH_PIE) && /* not PIE */ - (vm_map_page_shift(map) != FOURK_PAGE_SHIFT || - PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ - result->has_pagezero && /* has a "soft" page zero */ - fourk_binary_compatibility_unsafe) { + if ( + !result->is_64bit_addr && /* not 64-bit address space */ + !(header->flags & MH_PIE) && /* not PIE */ + (vm_map_page_shift(map) != FOURK_PAGE_SHIFT || + PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ + result->has_pagezero && /* has a "soft" page zero */ + fourk_binary_compatibility_unsafe) { /* * For backwards compatibility of "4K" apps on * a 16K system, do not enforce a hard page zero... @@ -516,6 +619,16 @@ load_machfile( } } +#if __arm64__ + if (enforce_hard_pagezero && result->is_64bit_addr && (header->cputype == CPU_TYPE_ARM64)) { + /* 64 bit ARM binary must have "hard page zero" of 4GB to cover the lower 32 bit address space */ + if (vm_map_has_hard_pagezero(map, 0x100000000) == FALSE) { + vm_map_deallocate(map); /* will lose pmap reference too */ + return LOAD_BADMACHO; + } + } +#endif + vm_commit_pagezero_status(map); /* @@ -563,6 +676,17 @@ load_machfile( } *mapp = map; +#if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) + /* + * arm64e plugin hosts currently run with JOP keys disabled, since they + * may need to run arm64 plugins. + */ + if (arm64e_plugin_host(imgp, result)) { + imgp->ip_flags |= IMGPF_NOJOP; + pmap_disable_user_jop(pmap); + } +#endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ + #ifdef CONFIG_32BIT_TELEMETRY if (!result->is_64bit_data) { /* @@ -586,6 +710,27 @@ int macho_printf = 0; } \ } while (0) + +static boolean_t +pie_required( + cpu_type_t exectype, + cpu_subtype_t execsubtype) +{ + switch (exectype) { + case CPU_TYPE_X86_64: + return FALSE; + case CPU_TYPE_ARM64: + return TRUE; + case CPU_TYPE_ARM: + switch (execsubtype) { + case CPU_SUBTYPE_ARM_V7K: + return TRUE; + } + break; + } + return FALSE; +} + /* * The file size of a mach-o file is limited to 32 bits; this is because * this is the limit on the kalloc() of enough bytes for a mach_header and @@ -617,7 +762,6 @@ parse_machfile( uint32_t ncmds; struct load_command *lcp; struct dylinker_command *dlp = 0; - integer_t dlarchbits = 0; void * control; load_return_t ret = LOAD_SUCCESS; void * addr; @@ -639,14 +783,15 @@ parse_machfile( int64_t slide = 0; boolean_t dyld_no_load_addr = FALSE; boolean_t is_dyld = FALSE; - vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + vm_map_offset_t effective_page_mask = PAGE_MASK; #if __arm64__ - uint32_t pagezero_end = 0; - uint32_t executable_end = 0; - uint32_t writable_start = 0; + uint64_t pagezero_end = 0; + uint64_t executable_end = 0; + uint64_t writable_start = 0; vm_map_size_t effective_page_size; - effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map)); + effective_page_mask = vm_map_page_mask(map); + effective_page_size = vm_map_page_size(map); #endif /* __arm64__ */ if (header->magic == MH_MAGIC_64 || @@ -657,7 +802,7 @@ parse_machfile( /* * Break infinite recursion */ - if (depth > 1) { + if (depth > 2) { return LOAD_FAILURE; } @@ -666,9 +811,14 @@ parse_machfile( /* * Check to see if right machine type. */ - if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) || - !grade_binary(header->cputype, - header->cpusubtype & ~CPU_SUBTYPE_MASK, TRUE)) { + if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) + ) { + return LOAD_BADARCH; + } + + if (!grade_binary(header->cputype, + header->cpusubtype & ~CPU_SUBTYPE_MASK, + header->cpusubtype & CPU_SUBTYPE_MASK, TRUE)) { return LOAD_BADARCH; } @@ -676,24 +826,23 @@ parse_machfile( switch (header->filetype) { case MH_EXECUTE: - if (depth != 1) { + if (depth != 1 && depth != 3) { return LOAD_FAILURE; } -#if CONFIG_EMBEDDED if (header->flags & MH_DYLDLINK) { /* Check properties of dynamic executables */ if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) { return LOAD_FAILURE; } result->needs_dynlinker = TRUE; + } else if (header->cputype == CPU_TYPE_X86_64) { + /* x86_64 static binaries allowed */ } else { /* Check properties of static executables (disallowed except for development) */ #if !(DEVELOPMENT || DEBUG) return LOAD_FAILURE; #endif } -#endif /* CONFIG_EMBEDDED */ - break; case MH_DYLINKER: if (depth != 2) { @@ -706,6 +855,13 @@ parse_machfile( return LOAD_FAILURE; } + /* + * For PIE and dyld, slide everything by the ASLR offset. + */ + if ((header->flags & MH_PIE) || is_dyld) { + slide = aslr_offset; + } + /* * Get the pager for the file. */ @@ -714,7 +870,8 @@ parse_machfile( /* ensure header + sizeofcmds falls within the file */ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || (off_t)cmds_size > macho_size || - round_page_overflow(cmds_size, &alloc_size)) { + round_page_overflow(cmds_size, &alloc_size) || + alloc_size > INT_MAX) { return LOAD_BADMACHO; } @@ -726,7 +883,7 @@ parse_machfile( return LOAD_NOSPACE; } - error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset, + error = vn_rdwr(UIO_READ, vp, addr, (int)alloc_size, file_offset, UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p); if (error) { kfree(addr, alloc_size); @@ -734,22 +891,17 @@ parse_machfile( } if (resid) { - /* We must be able to read in as much as the mach_header indicated */ - kfree(addr, alloc_size); - return LOAD_BADMACHO; - } - - /* - * For PIE and dyld, slide everything by the ASLR offset. - */ - if ((header->flags & MH_PIE) || is_dyld) { - slide = aslr_offset; + { + /* We must be able to read in as much as the mach_header indicated */ + kfree(addr, alloc_size); + return LOAD_BADMACHO; + } } /* * Scan through the commands, processing each one as necessary. * We parse in three passes through the headers: - * 0: determine if TEXT and DATA boundary can be page-aligned + * 0: determine if TEXT and DATA boundary can be page-aligned, load platform version * 1: thread state, uuid, code signature * 2: segments * 3: dyld, encryption, check entry point @@ -763,11 +915,7 @@ parse_machfile( #endif for (pass = 0; pass <= 3; pass++) { - if (pass == 0 && !slide_realign && !is_dyld) { - /* if we dont need to realign the slide or determine dyld's load - * address, pass 0 can be skipped */ - continue; - } else if (pass == 1) { + if (pass == 1) { #if __arm64__ boolean_t is_pie; int64_t adjust; @@ -829,14 +977,15 @@ parse_machfile( * it right after the main binary. If binresult == NULL, load * directly to the given slide. */ - slide = vm_map_round_page(slide + binresult->max_vm_addr, effective_page_mask); + mach_vm_address_t max_vm_addr = binresult->max_vm_addr; + slide = vm_map_round_page(slide + max_vm_addr, effective_page_mask); } } /* - * Check that the entry point is contained in an executable segments + * Check that the entry point is contained in an executable segment */ - if (pass == 3) { + if ((pass == 3) && (thread != THREAD_NULL)) { if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) { /* Driver binaries must have driverkit platform */ if (result->ip_platform == PLATFORM_DRIVERKIT) { @@ -928,21 +1077,21 @@ parse_machfile( if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) { /* PAGEZERO */ - if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) { + if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end) || pagezero_end > UINT32_MAX) { ret = LOAD_BADMACHO; break; } } if (scp->initprot & VM_PROT_EXECUTE) { /* TEXT */ - if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) { + if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end) || executable_end > UINT32_MAX) { ret = LOAD_BADMACHO; break; } } if (scp->initprot & VM_PROT_WRITE) { /* DATA */ - if (os_add_overflow(scp->vmaddr, slide, &writable_start)) { + if (os_add_overflow(scp->vmaddr, slide, &writable_start) || writable_start > UINT32_MAX) { ret = LOAD_BADMACHO; break; } @@ -976,7 +1125,8 @@ parse_machfile( vp, map, slide, - result); + result, + imgp); if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) { /* Enforce a single segment mapping offset zero, with R+X * protection. */ @@ -999,11 +1149,9 @@ parse_machfile( if (pass == 0) { if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) { dyld_no_load_addr = TRUE; - if (!slide_realign) { - /* got what we need, bail early on pass 0 */ - continue; - } } + /* got what we need, bail early on pass 0 */ + continue; } if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) { @@ -1031,7 +1179,8 @@ parse_machfile( vp, map, slide, - result); + result, + imgp); if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) { /* Enforce a single segment mapping offset zero, with R+X @@ -1046,7 +1195,8 @@ parse_machfile( break; } - case LC_UNIXTHREAD: + case LC_UNIXTHREAD: { + boolean_t is_x86_64_compat_binary = FALSE; if (pass != 1) { break; } @@ -1054,8 +1204,10 @@ parse_machfile( (struct thread_command *) lcp, thread, slide, + is_x86_64_compat_binary, result); break; + } case LC_MAIN: if (pass != 1) { break; @@ -1075,7 +1227,6 @@ parse_machfile( } if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; - dlarchbits = (header->cputype & CPU_ARCH_MASK); } else { ret = LOAD_FAILURE; } @@ -1092,6 +1243,7 @@ parse_machfile( if (pass != 1) { break; } + /* pager -> uip -> * load signatures & store in uip * set VM object "signed_pages" @@ -1102,6 +1254,7 @@ parse_machfile( file_offset, macho_size, header->cputype, + header->cpusubtype, result, imgp); if (ret != LOAD_SUCCESS) { @@ -1135,7 +1288,7 @@ parse_machfile( NULL, file_offset + off, addr + off, - PAGE_SIZE, + MIN(PAGE_SIZE, cmds_size), &tainted); if (!valid || (tainted & CS_VALIDATE_TAINTED)) { if (cs_debug) { @@ -1215,15 +1368,24 @@ parse_machfile( case LC_VERSION_MIN_TVOS: { struct version_min_command *vmc; - if (depth != 1 || pass != 1) { + if (depth != 1 || pass != 0) { break; } vmc = (struct version_min_command *) lcp; - ret = load_version(vmc, &found_version_cmd, result); + ret = load_version(vmc, &found_version_cmd, imgp->ip_flags, result); +#if XNU_TARGET_OS_OSX + if (ret == LOAD_SUCCESS) { + if (result->ip_platform == PLATFORM_IOS) { + vm_map_mark_alien(map); + } else { + assert(!vm_map_is_alien(map)); + } + } +#endif /* XNU_TARGET_OS_OSX */ break; } case LC_BUILD_VERSION: { - if (depth != 1 || pass != 1) { + if (depth != 1 || pass != 0) { break; } struct build_version_command* bvc = (struct build_version_command*)lcp; @@ -1236,7 +1398,16 @@ parse_machfile( break; } result->ip_platform = bvc->platform; + result->lr_sdk = bvc->sdk; + result->lr_min_sdk = bvc->minos; found_version_cmd = TRUE; +#if XNU_TARGET_OS_OSX + if (result->ip_platform == PLATFORM_IOS) { + vm_map_mark_alien(map); + } else { + assert(!vm_map_is_alien(map)); + } +#endif /* XNU_TARGET_OS_OSX */ break; } default: @@ -1268,10 +1439,11 @@ parse_machfile( * load the dylinker, and slide it by the independent DYLD ASLR * offset regardless of the PIE-ness of the main binary. */ - ret = load_dylinker(dlp, dlarchbits, map, thread, depth, + ret = load_dylinker(dlp, header->cputype, map, thread, depth, dyld_aslr_offset, result, imgp); } + if ((ret == LOAD_SUCCESS) && (depth == 1)) { if (result->thread_count == 0) { ret = LOAD_FAILURE; @@ -1342,6 +1514,7 @@ check_if_simulator_binary( /* Allocate page to copyin mach header */ ip_vdata = kalloc(PAGE_SIZE); + bzero(ip_vdata, PAGE_SIZE); if (ip_vdata == NULL) { goto bad; } @@ -1365,7 +1538,8 @@ check_if_simulator_binary( /* ensure header + sizeofcmds falls within the file */ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || (off_t)cmds_size > macho_size || - round_page_overflow(cmds_size, &alloc_size)) { + round_page_overflow(cmds_size, &alloc_size) || + alloc_size > INT_MAX) { goto bad; } @@ -1377,7 +1551,7 @@ check_if_simulator_binary( goto bad; } - error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, alloc_size, file_offset, + error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, (int)alloc_size, file_offset, UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p); if (error) { goto bad; @@ -1492,28 +1666,38 @@ unprotect_dsmos_segment( vm_map_size_t map_size) { kern_return_t kr; + uint64_t slice_off; /* * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of * this part of a Universal binary) are not protected... * The rest needs to be "transformed". */ - if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE && - file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) { + slice_off = file_off - macho_offset; + if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE && + slice_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) { /* it's all unprotected, nothing to do... */ kr = KERN_SUCCESS; } else { - if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) { + if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE) { /* * We start mapping in the unprotected area. * Skip the unprotected part... */ - vm_map_offset_t delta; + uint64_t delta_file; + vm_map_offset_t delta_map; - delta = APPLE_UNPROTECTED_HEADER_SIZE; - delta -= file_off; - map_addr += delta; - map_size -= delta; + delta_file = (uint64_t)APPLE_UNPROTECTED_HEADER_SIZE; + delta_file -= slice_off; + if (os_convert_overflow(delta_file, &delta_map)) { + return LOAD_BADMACHO; + } + if (os_add_overflow(map_addr, delta_map, &map_addr)) { + return LOAD_BADMACHO; + } + if (os_sub_overflow(map_size, delta_map, &map_size)) { + return LOAD_BADMACHO; + } } /* ... transform the rest of the mapping. */ struct pager_crypt_info crypt_info; @@ -1547,7 +1731,8 @@ unprotect_dsmos_segment( map_addr, map_addr + map_size, crypto_backing_offset, - &crypt_info); + &crypt_info, + CRYPTID_APP_ENCRYPTION); } if (kr != KERN_SUCCESS) { @@ -1617,7 +1802,7 @@ map_segment( return LOAD_SUCCESS; } - effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + effective_page_mask = vm_map_page_mask(map); vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; if (vm_map_page_aligned(vm_start, effective_page_mask) && @@ -1705,9 +1890,9 @@ map_segment( cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; } -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) (void) result; -#else /* CONFIG_EMBEDDED */ +#else /* !defined(XNU_TARGET_OS_OSX) */ /* * This process doesn't have its new csflags (from * the image being loaded) yet, so tell VM to override the @@ -1719,7 +1904,11 @@ map_segment( cur_vmk_flags.vmkf_cs_enforcement = FALSE; } cur_vmk_flags.vmkf_cs_enforcement_override = TRUE; -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ + + if (result->is_cambria && (initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE) { + cur_vmk_flags.vmkf_translated_allow_execute = TRUE; + } cur_end = vm_map_trunc_page(vm_start + (file_end - file_start), @@ -1823,7 +2012,8 @@ load_segment( struct vnode *vp, vm_map_t map, int64_t slide, - load_result_t *result) + load_result_t *result, + struct image_params *imgp) { struct segment_command_64 segment_command, *scp; kern_return_t ret; @@ -1832,8 +2022,9 @@ load_segment( vm_prot_t maxprot; size_t segment_command_size, total_section_size, single_section_size; - vm_map_offset_t file_offset, file_size; - vm_map_offset_t vm_offset, vm_size; + uint64_t file_offset, file_size; + vm_map_offset_t vm_offset; + size_t vm_size; vm_map_offset_t vm_start, vm_end, vm_end_aligned; vm_map_offset_t file_start, file_end; kern_return_t kr; @@ -1845,8 +2036,10 @@ load_segment( boolean_t fourk_align; #endif /* __arm64__ */ - effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map)); - effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + (void)imgp; + + effective_page_size = vm_map_page_size(map); + effective_page_mask = vm_map_page_mask(map); verbose = FALSE; if (LC_SEGMENT_64 == lcp->cmd) { @@ -1855,6 +2048,12 @@ load_segment( #if __arm64__ /* 64-bit binary: should already be 16K-aligned */ fourk_align = FALSE; + + if (vm_map_page_shift(map) == FOURK_PAGE_SHIFT && + PAGE_SHIFT != FOURK_PAGE_SHIFT) { + fourk_align = TRUE; + verbose = TRUE; + } #endif /* __arm64__ */ } else { segment_command_size = sizeof(struct segment_command); @@ -1872,6 +2071,7 @@ load_segment( #endif /* __arm64__ */ } if (lcp->cmdsize < segment_command_size) { + DEBUG4K_ERROR("LOAD_BADMACHO cmdsize %d < %zu\n", lcp->cmdsize, segment_command_size); return LOAD_BADMACHO; } total_section_size = lcp->cmdsize - segment_command_size; @@ -1903,6 +2103,7 @@ load_segment( */ if (scp->fileoff + scp->filesize < scp->fileoff || scp->fileoff + scp->filesize > (uint64_t)macho_size) { + DEBUG4K_ERROR("LOAD_BADMACHO fileoff 0x%llx filesize 0x%llx macho_size 0x%llx\n", scp->fileoff, scp->filesize, (uint64_t)macho_size); return LOAD_BADMACHO; } /* @@ -1910,12 +2111,16 @@ load_segment( * within the load command size. */ if (total_section_size / single_section_size < scp->nsects) { + DEBUG4K_ERROR("LOAD_BADMACHO 0x%zx 0x%zx %d\n", total_section_size, single_section_size, scp->nsects); return LOAD_BADMACHO; } /* * Make sure the segment is page-aligned in the file. */ - file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */ + if (os_add_overflow(pager_offset, scp->fileoff, &file_offset)) { + DEBUG4K_ERROR("LOAD_BADMACHO file_offset: 0x%llx + 0x%llx\n", pager_offset, scp->fileoff); + return LOAD_BADMACHO; + } file_size = scp->filesize; #if __arm64__ if (fourk_align) { @@ -1924,6 +2129,7 @@ load_segment( * we can't mmap() it if it's not at least 4KB-aligned * in the file */ + DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset); return LOAD_BADMACHO; } } else @@ -1936,6 +2142,7 @@ load_segment( * was what this process believe is the page size, so let's * fail here too for the sake of consistency. */ + DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset); return LOAD_BADMACHO; } @@ -1950,6 +2157,7 @@ load_segment( if (cs_debug) { printf("section outside code signature\n"); } + DEBUG4K_ERROR("LOAD_BADMACHO end_offset 0x%llx fileoff 0x%llx filesize 0x%llx\n", result->cs_end_offset, scp->fileoff, scp->filesize); return LOAD_BADMACHO; } @@ -1957,10 +2165,16 @@ load_segment( if (cs_debug) { printf("vmaddr too large\n"); } + DEBUG4K_ERROR("LOAD_BADMACHO vmaddr 0x%llx slide 0x%llx vm_offset 0x%llx\n", scp->vmaddr, slide, (uint64_t)vm_offset); + return LOAD_BADMACHO; + } + + if (scp->vmsize > SIZE_MAX) { + DEBUG4K_ERROR("LOAD_BADMACHO vmsize 0x%llx\n", scp->vmsize); return LOAD_BADMACHO; } - vm_size = scp->vmsize; + vm_size = (size_t)scp->vmsize; if (vm_size == 0) { return LOAD_SUCCESS; @@ -1970,6 +2184,10 @@ load_segment( vm_size != 0 && (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE && (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) { + if (map == VM_MAP_NULL) { + return LOAD_SUCCESS; + } + /* * For PIE, extend page zero rather than moving it. Extending * page zero keeps early allocations from falling predictably @@ -1983,10 +2201,12 @@ load_segment( * make it completely off limits by raising the VM map's * minimum offset. */ - vm_end = vm_offset + vm_size; + vm_end = (vm_map_offset_t)(vm_offset + vm_size); if (vm_end < vm_offset) { + DEBUG4K_ERROR("LOAD_BADMACHO vm_end 0x%llx vm_offset 0x%llx vm_size 0x%llx\n", (uint64_t)vm_end, (uint64_t)vm_offset, (uint64_t)vm_size); return LOAD_BADMACHO; } + if (verbose) { MACHO_PRINTF(("++++++ load_segment: " "page_zero up to 0x%llx\n", @@ -2031,16 +2251,18 @@ load_segment( #endif /* __arm64__ */ if (ret != KERN_SUCCESS) { + DEBUG4K_ERROR("LOAD_FAILURE ret 0x%x\n", ret); return LOAD_FAILURE; } return LOAD_SUCCESS; } else { -#if CONFIG_EMBEDDED +#if !defined(XNU_TARGET_OS_OSX) /* not PAGEZERO: should not be mapped at address 0 */ if (filetype != MH_DYLINKER && scp->vmaddr == 0) { + DEBUG4K_ERROR("LOAD_BADMACHO filetype %d vmaddr 0x%llx\n", filetype, scp->vmaddr); return LOAD_BADMACHO; } -#endif /* CONFIG_EMBEDDED */ +#endif /* !defined(XNU_TARGET_OS_OSX) */ } #if __arm64__ @@ -2054,6 +2276,18 @@ load_segment( FOURK_PAGE_MASK); vm_end = vm_map_round_page(vm_offset + vm_size, FOURK_PAGE_MASK); + + if (file_offset - file_start > FOURK_PAGE_MASK || + file_end - file_offset - file_size > FOURK_PAGE_MASK) { + DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap " + "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n", + file_offset, + file_offset + file_size, + (uint64_t) file_start, + (uint64_t) file_end); + return LOAD_BADMACHO; + } + if (!strncmp(scp->segname, "__LINKEDIT", 11) && page_aligned(file_start) && vm_map_page_aligned(file_start, vm_map_page_mask(map)) && @@ -2076,6 +2310,17 @@ load_segment( effective_page_mask); vm_end = vm_map_round_page(vm_offset + vm_size, effective_page_mask); + + if (file_offset - file_start > effective_page_mask || + file_end - file_offset - file_size > effective_page_mask) { + DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap " + "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n", + file_offset, + file_offset + file_size, + (uint64_t) file_start, + (uint64_t) file_end); + return LOAD_BADMACHO; + } } if (vm_start < result->min_vm_addr) { @@ -2114,6 +2359,7 @@ load_segment( maxprot, result); if (ret) { + DEBUG4K_ERROR("LOAD_NOSPACE start 0x%llx end 0x%llx ret 0x%x\n", (uint64_t)vm_start, (uint64_t)vm_end, ret); return LOAD_NOSPACE; } @@ -2128,6 +2374,7 @@ load_segment( ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD); if (ret != KERN_SUCCESS) { + DEBUG4K_ERROR("LOAD_RESOURCE delta_size 0x%llx ret 0x%x\n", delta_size, ret); return LOAD_RESOURCE; } @@ -2135,6 +2382,7 @@ load_segment( delta_size)) { (void) mach_vm_deallocate( kernel_map, tmp, delta_size); + DEBUG4K_ERROR("LOAD_FAILURE copyout 0x%llx 0x%llx\n", map_addr + scp->filesize, delta_size); return LOAD_FAILURE; } @@ -2154,18 +2402,28 @@ load_segment( delta_size = 0; } if (delta_size > 0) { - mach_vm_offset_t tmp; + vm_map_offset_t tmp_start; + vm_map_offset_t tmp_end; + + if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) { + DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start)); + return LOAD_NOSPACE; + } + + if (os_add_overflow(tmp_start, delta_size, &tmp_end)) { + DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size); + return LOAD_NOSPACE; + } - tmp = vm_start + (file_end - file_start); if (verbose) { MACHO_PRINTF(("++++++ load_segment: " "delta mapping vm [0x%llx:0x%llx]\n", - (uint64_t) tmp, - (uint64_t) (tmp + delta_size))); + (uint64_t) tmp_start, + (uint64_t) tmp_end)); } kr = map_segment(map, - tmp, - tmp + delta_size, + tmp_start, + tmp_end, MEMORY_OBJECT_CONTROL_NULL, 0, delta_size, @@ -2173,6 +2431,7 @@ load_segment( scp->maxprot, result); if (kr != KERN_SUCCESS) { + DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr); return LOAD_NOSPACE; } } @@ -2190,6 +2449,7 @@ load_segment( vm_start, vm_end - vm_start); if (ret != LOAD_SUCCESS) { + DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret); return ret; } } else { @@ -2219,6 +2479,9 @@ load_segment( } } + if (ret != LOAD_SUCCESS && verbose) { + DEBUG4K_ERROR("ret %d\n", ret); + } return ret; } @@ -2250,11 +2513,13 @@ load_return_t load_version( struct version_min_command *vmc, boolean_t *found_version_cmd, + int ip_flags __unused, load_result_t *result ) { uint32_t platform = 0; uint32_t sdk; + uint32_t min_sdk; if (vmc->cmdsize < sizeof(*vmc)) { return LOAD_BADMACHO; @@ -2264,6 +2529,7 @@ load_version( } *found_version_cmd = TRUE; sdk = vmc->sdk; + min_sdk = vmc->version; switch (vmc->cmd) { case LC_VERSION_MIN_MACOSX: platform = PLATFORM_MACOS; @@ -2300,9 +2566,11 @@ load_version( /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */ default: sdk = (uint32_t)-1; + min_sdk = (uint32_t)-1; __builtin_unreachable(); } result->ip_platform = platform; + result->lr_min_sdk = min_sdk; result->lr_sdk = sdk; return LOAD_SUCCESS; } @@ -2360,8 +2628,7 @@ load_main( } /* The stack slides down from the default location */ - result->user_stack = addr; - result->user_stack -= slide; + result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide); if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { /* Already processed LC_MAIN or LC_UNIXTHREAD */ @@ -2409,7 +2676,7 @@ setup_driver_main( } /* The stack slides down from the default location */ - result->user_stack = addr; + result->user_stack = (user_addr_t)addr; result->user_stack -= slide; if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { @@ -2436,6 +2703,7 @@ load_unixthread( struct thread_command *tcp, thread_t thread, int64_t slide, + boolean_t is_x86_64_compat_binary, load_result_t *result ) { @@ -2457,7 +2725,7 @@ load_unixthread( (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), tcp->cmdsize - sizeof(struct thread_command), - &addr, &customstack, result); + &addr, &customstack, is_x86_64_compat_binary, result); if (ret != LOAD_SUCCESS) { return ret; } @@ -2471,32 +2739,33 @@ load_unixthread( } /* The stack slides down from the default location */ - result->user_stack = addr; - result->user_stack -= slide; + result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide); - ret = load_threadentry(thread, - (uint32_t *)(((vm_offset_t)tcp) + - sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - &addr); - if (ret != LOAD_SUCCESS) { - return ret; - } + { + ret = load_threadentry(thread, + (uint32_t *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &addr); + if (ret != LOAD_SUCCESS) { + return ret; + } - if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { - /* Already processed LC_MAIN or LC_UNIXTHREAD */ - return LOAD_FAILURE; - } + if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { + /* Already processed LC_MAIN or LC_UNIXTHREAD */ + return LOAD_FAILURE; + } - result->entry_point = addr; - result->entry_point += slide; + result->entry_point = (user_addr_t)addr; + result->entry_point += slide; - ret = load_threadstate(thread, - (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - result); - if (ret != LOAD_SUCCESS) { - return ret; + ret = load_threadstate(thread, + (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + result); + if (ret != LOAD_SUCCESS) { + return ret; + } } result->unixproc = TRUE; @@ -2540,6 +2809,10 @@ load_threadstate( * activation time where we can't bail out cleanly. */ while (total_size > 0) { + if (total_size < 2 * sizeof(uint32_t)) { + return LOAD_BADMACHO; + } + flavor = *ts++; size = *ts++; @@ -2563,6 +2836,7 @@ bad: return ret; } + static load_return_t load_threadstack( @@ -2571,6 +2845,7 @@ load_threadstack( uint32_t total_size, mach_vm_offset_t *user_stack, int *customstack, + __unused boolean_t is_x86_64_compat_binary, load_result_t *result ) { @@ -2579,7 +2854,15 @@ load_threadstack( int flavor; uint32_t stack_size; + if (total_size == 0) { + return LOAD_BADMACHO; + } + while (total_size > 0) { + if (total_size < 2 * sizeof(uint32_t)) { + return LOAD_BADMACHO; + } + flavor = *ts++; size = *ts++; if (UINT32_MAX - 2 < size || @@ -2597,10 +2880,13 @@ load_threadstack( * to the appropriate type in thread_userstack() based on * the value of flavor. */ - ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); - if (ret != KERN_SUCCESS) { - return LOAD_FAILURE; + { + ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); + if (ret != KERN_SUCCESS) { + return LOAD_FAILURE; + } } + ts += size; /* ts is a (uint32_t *) */ } return LOAD_SUCCESS; @@ -2625,6 +2911,10 @@ load_threadentry( */ *entry_point = MACH_VM_MIN_ADDRESS; while (total_size > 0) { + if (total_size < 2 * sizeof(uint32_t)) { + return LOAD_BADMACHO; + } + flavor = *ts++; size = *ts++; if (UINT32_MAX - 2 < size || @@ -2669,7 +2959,7 @@ extern int use_alt_dyld; static load_return_t load_dylinker( struct dylinker_command *lcp, - integer_t archbits, + cpu_type_t cputype, vm_map_t map, thread_t thread, int depth, @@ -2736,13 +3026,17 @@ load_dylinker( /* Allocate wad-of-data from heap to reduce excessively deep stacks */ - MALLOC(dyld_data, void *, sizeof(*dyld_data), M_TEMP, M_WAITOK); + dyld_data = kheap_alloc(KHEAP_TEMP, sizeof(*dyld_data), Z_WAITOK); header = &dyld_data->__header; myresult = &dyld_data->__myresult; macho_data = &dyld_data->__macho_data; - ret = get_macho_vnode(name, archbits, header, - &file_offset, &macho_size, macho_data, &vp); + { + cputype = (cputype & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK); + } + + ret = get_macho_vnode(name, cputype, header, + &file_offset, &macho_size, macho_data, &vp, imgp); if (ret) { goto novp_out; } @@ -2770,25 +3064,29 @@ load_dylinker( if (myresult->platform_binary) { result->csflags |= CS_DYLD_PLATFORM; } + } - struct vnode_attr va; - VATTR_INIT(&va); - VATTR_WANTED(&va, va_fsid64); - VATTR_WANTED(&va, va_fsid); - VATTR_WANTED(&va, va_fileid); - int error = vnode_getattr(vp, &va, imgp->ip_vfs_context); + struct vnode_attr *va; + va = kheap_alloc(KHEAP_TEMP, sizeof(*va), Z_WAITOK | Z_ZERO); + VATTR_INIT(va); + VATTR_WANTED(va, va_fsid64); + VATTR_WANTED(va, va_fsid); + VATTR_WANTED(va, va_fileid); + int error = vnode_getattr(vp, va, imgp->ip_vfs_context); if (error == 0) { - imgp->ip_dyld_fsid = vnode_get_va_fsid(&va); - imgp->ip_dyld_fsobjid = va.va_fileid; + imgp->ip_dyld_fsid = vnode_get_va_fsid(va); + imgp->ip_dyld_fsobjid = va->va_fileid; } vnode_put(vp); + kheap_free(KHEAP_TEMP, va, sizeof(*va)); novp_out: - FREE(dyld_data, M_TEMP); + kheap_free(KHEAP_TEMP, dyld_data, sizeof(*dyld_data)); return ret; } + static load_return_t load_code_signature( struct linkedit_data_command *lcp, @@ -2796,6 +3094,7 @@ load_code_signature( off_t macho_offset, off_t macho_size, cpu_type_t cputype, + cpu_subtype_t cpusubtype, load_result_t *result, struct image_params *imgp) { @@ -2807,10 +3106,13 @@ load_code_signature( int error; vm_size_t blob_size; uint32_t sum; + boolean_t anyCPU; addr = 0; blob = NULL; + cpusubtype &= ~CPU_SUBTYPE_MASK; + if (lcp->cmdsize != sizeof(struct linkedit_data_command)) { ret = LOAD_BADMACHO; goto out; @@ -2822,11 +3124,13 @@ load_code_signature( goto out; } - blob = ubc_cs_blob_get(vp, cputype, macho_offset); + blob = ubc_cs_blob_get(vp, cputype, cpusubtype, macho_offset); if (blob != NULL) { - /* we already have a blob for this vnode and cputype */ - if (blob->csb_cpu_type != cputype || + /* we already have a blob for this vnode and cpu(sub)type */ + anyCPU = blob->csb_cpu_type == -1; + if ((blob->csb_cpu_type != cputype && + blob->csb_cpu_subtype != cpusubtype && !anyCPU) || blob->csb_base_offset != macho_offset) { /* the blob has changed for this vnode: fail ! */ ret = LOAD_BADMACHO; @@ -2834,16 +3138,23 @@ load_code_signature( } /* It matches the blob we want here, let's verify the version */ - if (ubc_cs_generation_check(vp) == 0) { + if (!anyCPU && ubc_cs_generation_check(vp) == 0) { /* No need to revalidate, we're good! */ ret = LOAD_SUCCESS; goto out; } /* That blob may be stale, let's revalidate. */ - error = ubc_cs_blob_revalidate(vp, blob, imgp, 0); + error = ubc_cs_blob_revalidate(vp, blob, imgp, 0, result->ip_platform); if (error == 0) { /* Revalidation succeeded, we're good! */ + /* If we were revaliding a CS blob with any CPU arch we adjust it */ + if (anyCPU) { + vnode_lock_spin(vp); + blob->csb_cpu_type = cputype; + blob->csb_cpu_subtype = cpusubtype; + vnode_unlock(vp); + } ret = LOAD_SUCCESS; goto out; } @@ -2887,7 +3198,9 @@ load_code_signature( } if (ubc_cs_blob_add(vp, + result->ip_platform, cputype, + cpusubtype, macho_offset, &addr, lcp->datasize, @@ -2896,6 +3209,7 @@ load_code_signature( &blob)) { if (addr) { ubc_cs_blob_deallocate(addr, blob_size); + addr = 0; } ret = LOAD_FAILURE; goto out; @@ -2980,15 +3294,12 @@ set_code_unprotect( return LOAD_FAILURE; } - MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (vpath == NULL) { - return LOAD_FAILURE; - } + vpath = zalloc(ZV_NAMEI); len = MAXPATHLEN; error = vn_getpath(vp, vpath, &len); if (error) { - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, vpath); return LOAD_FAILURE; } @@ -3007,7 +3318,7 @@ set_code_unprotect( p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + zfree(ZV_NAMEI, vpath); if (kr) { printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n", @@ -3044,21 +3355,23 @@ set_code_unprotect( if ((seg64->fileoff <= eip->cryptoff) && (seg64->fileoff + seg64->filesize >= eip->cryptoff + eip->cryptsize)) { - map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide; + map_offset = (vm_map_offset_t)(seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide); map_size = eip->cryptsize; crypto_backing_offset = macho_offset + eip->cryptoff; goto remap_now; } + break; case LC_SEGMENT: seg32 = (struct segment_command *)lcp; if ((seg32->fileoff <= eip->cryptoff) && (seg32->fileoff + seg32->filesize >= eip->cryptoff + eip->cryptsize)) { - map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide; + map_offset = (vm_map_offset_t)(seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide); map_size = eip->cryptsize; crypto_backing_offset = macho_offset + eip->cryptoff; goto remap_now; } + break; } } @@ -3074,7 +3387,8 @@ remap_now: map_offset, map_offset + map_size, crypto_backing_offset, - &crypt_info); + &crypt_info, + CRYPTID_APP_ENCRYPTION); if (kr) { printf("set_code_unprotect(): mapping failed with %x\n", kr); return LOAD_PROTECT; @@ -3095,12 +3409,13 @@ static load_return_t get_macho_vnode( const char *path, - integer_t archbits, + cpu_type_t cputype, struct mach_header *mach_header, off_t *file_offset, off_t *macho_size, struct macho_data *data, - struct vnode **vpp + struct vnode **vpp, + struct image_params *imgp ) { struct vnode *vp; @@ -3198,8 +3513,8 @@ get_macho_vnode( } /* Look up our architecture in the fat file. */ - error = fatfile_getarch_with_bits(archbits, - (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch); + error = fatfile_getbestarch_for_cputype(cputype, CPU_SUBTYPE_ANY, + (vm_offset_t)(&header->fat_header), sizeof(*header), imgp, &fat_arch); if (error != LOAD_SUCCESS) { goto bad2; } @@ -3238,7 +3553,7 @@ get_macho_vnode( * required, since the dynamic linker might work, but we will * refuse to load it because of this check. */ - if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) { + if ((cpu_type_t)header->mach_header.cputype != cputype) { error = LOAD_BADARCH; goto bad2; }