/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/fcntl.h>
#include <sys/ubc_internal.h>
#include <sys/imgact.h>
+#include <sys/codesign.h>
+#include <sys/proc_uuid_policy.h>
+#include <sys/reason.h>
+#include <sys/kdebug.h>
#include <mach/mach_types.h>
#include <mach/vm_map.h> /* vm_allocate() */
#include <machine/vmparam.h>
#include <machine/exec.h>
+#include <machine/pal_routines.h>
+#include <kern/ast.h>
#include <kern/kern_types.h>
#include <kern/cpu_number.h>
#include <kern/mach_loader.h>
#include <vm/vm_kern.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
-#include <vm/vm_protos.h>
+#include <vm/vm_protos.h>
+#include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
+#include <os/overflow.h>
-/*
- * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
- * when KERNEL is defined.
- */
-extern pmap_t pmap_create(vm_map_size_t size, boolean_t is_64bit);
-extern void pmap_switch(pmap_t);
+#if __x86_64__
+extern int bootarg_no32exec; /* bsd_init.c */
+#endif
/*
- * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
+ * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
* when KERNEL is defined.
*/
-extern kern_return_t thread_setstatus(thread_t thread, int flavor,
- thread_state_t tstate,
- mach_msg_type_number_t count);
-
-extern kern_return_t thread_state_initialize(thread_t thread);
-
+extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t size,
+ boolean_t is_64bit);
/* XXX should have prototypes in a shared header file */
extern int get_map_nentries(vm_map_t);
-extern kern_return_t thread_userstack(thread_t, int, thread_state_t,
- unsigned int, mach_vm_offset_t *, int *);
-extern kern_return_t thread_entrypoint(thread_t, int, thread_state_t,
- unsigned int, mach_vm_offset_t *);
extern kern_return_t memory_object_signed(memory_object_control_t control,
boolean_t is_signed);
/* An empty load_result_t */
-static load_result_t load_result_null = {
+static const load_result_t load_result_null = {
.mach_header = MACH_VM_MIN_ADDRESS,
.entry_point = MACH_VM_MIN_ADDRESS,
.user_stack = MACH_VM_MIN_ADDRESS,
+ .user_stack_size = 0,
+ .user_stack_alloc = MACH_VM_MIN_ADDRESS,
+ .user_stack_alloc_size = 0,
.all_image_info_addr = MACH_VM_MIN_ADDRESS,
.all_image_info_size = 0,
.thread_count = 0,
.unixproc = 0,
.dynlinker = 0,
- .customstack = 0,
+ .needs_dynlinker = 0,
+ .validentry = 0,
+ .using_lcmain = 0,
+ .is_64bit_addr = 0,
+ .is_64bit_data = 0,
.csflags = 0,
- .uuid = { 0 }
+ .has_pagezero = 0,
+ .uuid = { 0 },
+ .min_vm_addr = MACH_VM_MAX_ADDRESS,
+ .max_vm_addr = MACH_VM_MIN_ADDRESS,
+ .cs_end_offset = 0,
+ .threadstate = NULL,
+ .threadstate_sz = 0
};
/*
off_t file_offset,
off_t macho_size,
int depth,
- load_result_t *result
+ int64_t slide,
+ int64_t dyld_slide,
+ load_result_t *result,
+ load_result_t *binresult,
+ struct image_params *imgp
);
static load_return_t
off_t macho_size,
struct vnode *vp,
vm_map_t map,
+ int64_t slide,
+ load_result_t *result
+);
+
+static load_return_t
+load_uuid(
+ struct uuid_command *uulp,
+ char *command_end,
load_result_t *result
);
-int load_code_signature(
+static load_return_t
+load_code_signature(
struct linkedit_data_command *lcp,
struct vnode *vp,
off_t macho_offset,
off_t macho_size,
cpu_type_t cputype,
- load_result_t *result);
+ load_result_t *result,
+ struct image_params *imgp);
#if CONFIG_CODE_DECRYPTION
static load_return_t
struct encryption_info_command *lcp,
caddr_t addr,
vm_map_t map,
- struct vnode *vp);
+ int64_t slide,
+ struct vnode *vp,
+ off_t macho_offset,
+ cpu_type_t cputype,
+ cpu_subtype_t cpusubtype);
#endif
-static load_return_t
-load_unixthread(
- struct thread_command *tcp,
- thread_t thread,
- load_result_t *result
+static
+load_return_t
+load_main(
+ struct entry_point_command *epc,
+ thread_t thread,
+ int64_t slide,
+ load_result_t *result
);
static load_return_t
-load_thread(
+load_unixthread(
struct thread_command *tcp,
thread_t thread,
+ int64_t slide,
load_result_t *result
);
load_threadstate(
thread_t thread,
uint32_t *ts,
- uint32_t total_size
+ uint32_t total_size,
+ load_result_t *
);
static load_return_t
load_threadstack(
thread_t thread,
- uint32_t *ts,
- uint32_t total_size,
- user_addr_t *user_stack,
- int *customstack
+ uint32_t *ts,
+ uint32_t total_size,
+ mach_vm_offset_t *user_stack,
+ int *customstack,
+ load_result_t *result
);
static load_return_t
load_dylinker(
struct dylinker_command *lcp,
integer_t archbits,
- vm_map_t map,
- thread_t thread,
- int depth,
- load_result_t *result,
- boolean_t is_64bit
+ vm_map_t map,
+ thread_t thread,
+ int depth,
+ int64_t slide,
+ load_result_t *result,
+ struct image_params *imgp
);
+struct macho_data;
+
static load_return_t
get_macho_vnode(
- char *path,
+ const char *path,
integer_t archbits,
struct mach_header *mach_header,
off_t *file_offset,
off_t *macho_size,
+ struct macho_data *macho_data,
struct vnode **vpp
);
static void
note_all_image_info_section(const struct segment_command_64 *scp,
boolean_t is64, size_t section_size, const void *sections,
- load_result_t *result)
+ int64_t slide, load_result_t *result)
{
const union {
struct section s32;
} *sectionp;
unsigned int i;
+
if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
return;
for (i = 0; i < scp->nsects; ++i) {
sizeof(sectionp->s64.sectname))) {
result->all_image_info_addr =
is64 ? sectionp->s64.addr : sectionp->s32.addr;
+ result->all_image_info_addr += slide;
result->all_image_info_size =
is64 ? sectionp->s64.size : sectionp->s32.size;
return;
}
}
+#if __arm64__
+/*
+ * Allow bypassing some security rules (hard pagezero, no write+execute)
+ * in exchange for better binary compatibility for legacy apps built
+ * before 16KB-alignment was enforced.
+ */
+const int fourk_binary_compatibility_unsafe = TRUE;
+const int fourk_binary_compatibility_allow_wx = FALSE;
+#endif /* __arm64__ */
load_return_t
load_machfile(
struct image_params *imgp,
struct mach_header *header,
thread_t thread,
- vm_map_t new_map,
+ vm_map_t *mapp,
load_result_t *result
)
{
struct vnode *vp = imgp->ip_vp;
off_t file_offset = imgp->ip_arch_offset;
off_t macho_size = imgp->ip_arch_size;
-
+ off_t file_size = imgp->ip_vattr->va_data_size;
pmap_t pmap = 0; /* protected by create_map */
vm_map_t map;
- vm_map_t old_map;
- task_t old_task = TASK_NULL; /* protected by create_map */
load_result_t myresult;
load_return_t lret;
- boolean_t create_map = FALSE;
- int spawn = (imgp->ip_flags & IMGPF_SPAWN);
+ boolean_t enforce_hard_pagezero = TRUE;
+ int in_exec = (imgp->ip_flags & IMGPF_EXEC);
task_t task = current_task();
+ proc_t p = current_proc();
+ int64_t aslr_page_offset = 0;
+ int64_t dyld_aslr_page_offset = 0;
+ int64_t aslr_section_size = 0;
+ int64_t aslr_section_offset = 0;
+ kern_return_t kret;
+
+ if (macho_size > file_size) {
+ return(LOAD_BADMACHO);
+ }
+
+ result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
+ result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
- if (new_map == VM_MAP_NULL) {
- create_map = TRUE;
- old_task = current_task();
+ task_t ledger_task;
+ if (imgp->ip_new_thread) {
+ ledger_task = get_threadtask(imgp->ip_new_thread);
+ } else {
+ ledger_task = task;
}
+ pmap = pmap_create(get_task_ledger(ledger_task),
+ (vm_map_size_t) 0,
+ result->is_64bit_addr);
+ map = vm_map_create(pmap,
+ 0,
+ vm_compute_max_offset(result->is_64bit_addr),
+ TRUE);
- /*
- * If we are spawning, we have created backing objects for the process
- * already, which include non-lazily creating the task map. So we
- * are going to switch out the task map with one appropriate for the
- * bitness of the image being loaded.
+#if defined(__arm64__)
+ if (result->is_64bit_addr) {
+ /* enforce 16KB alignment of VM map entries */
+ vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
+ } else {
+ vm_map_set_page_shift(map, page_shift_user32);
+ }
+#elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
+ /* enforce 16KB alignment for watch targets with new ABI */
+ vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
+#endif /* __arm64__ */
+
+#ifndef CONFIG_ENFORCE_SIGNED_CODE
+ /* This turns off faulting for executable pages, which allows
+ * to circumvent Code Signing Enforcement. The per process
+ * flag (CS_ENFORCEMENT) is not set yet, but we can use the
+ * global flag.
*/
- if (spawn) {
- create_map = TRUE;
- old_task = get_threadtask(thread);
+ if ( !cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION) ) {
+ vm_map_disable_NX(map);
+ // TODO: Message Trace or log that this is happening
}
+#endif
- if (create_map) {
- pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT));
- map = vm_map_create(pmap,
- 0,
- vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
- TRUE);
- } else
- map = new_map;
+ /* Forcibly disallow execution from data pages on even if the arch
+ * normally permits it. */
+ if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
+ vm_map_disallow_data_exec(map);
+
+ /*
+ * Compute a random offset for ASLR, and an independent random offset for dyld.
+ */
+ if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
+ vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
+ aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
+
+ aslr_page_offset = random();
+ aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map);
+ aslr_page_offset <<= vm_map_page_shift(map);
+
+ dyld_aslr_page_offset = random();
+ dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map);
+ dyld_aslr_page_offset <<= vm_map_page_shift(map);
+
+ aslr_page_offset += aslr_section_offset;
+ }
- if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
- vm_map_disable_NX(map);
-
if (!result)
result = &myresult;
*result = load_result_null;
+ /*
+ * re-set the bitness on the load result since we cleared the load result above.
+ */
+ result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
+ result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
+
lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
- 0, result);
+ 0, aslr_page_offset, dyld_aslr_page_offset, result,
+ NULL, imgp);
if (lret != LOAD_SUCCESS) {
- if (create_map) {
- vm_map_deallocate(map); /* will lose pmap reference too */
- }
+ vm_map_deallocate(map); /* will lose pmap reference too */
return(lret);
}
+#if __x86_64__
+ /*
+ * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
+ */
+ if (!result->is_64bit_addr) {
+ enforce_hard_pagezero = FALSE;
+ }
+
+ /*
+ * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
+ * to the start address for "anywhere" memory allocations.
+ */
+#define VM_MAP_HIGH_START_BITS_COUNT 8
+#define VM_MAP_HIGH_START_BITS_SHIFT 27
+ if (result->is_64bit_addr &&
+ (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
+ int random_bits;
+ vm_map_offset_t high_start;
+
+ random_bits = random();
+ random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT)-1;
+ high_start = (((vm_map_offset_t)random_bits)
+ << VM_MAP_HIGH_START_BITS_SHIFT);
+ vm_map_set_high_start(map, high_start);
+ }
+#endif /* __x86_64__ */
+
/*
- * For 64-bit users, check for presence of a 4GB page zero
- * which will enable the kernel to share the user's address space
- * and hence avoid TLB flushes on kernel entry/exit
+ * Check to see if the page zero is enforced by the map->min_offset.
*/
+ if (enforce_hard_pagezero &&
+ (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
+#if __arm64__
+ if (!result->is_64bit_addr && /* not 64-bit address space */
+ !(header->flags & MH_PIE) && /* not PIE */
+ (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
+ PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
+ result->has_pagezero && /* has a "soft" page zero */
+ fourk_binary_compatibility_unsafe) {
+ /*
+ * For backwards compatibility of "4K" apps on
+ * a 16K system, do not enforce a hard page zero...
+ */
+ } else
+#endif /* __arm64__ */
+ {
+ vm_map_deallocate(map); /* will lose pmap reference too */
+ return (LOAD_BADMACHO);
+ }
+ }
- if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
- vm_map_has_4GB_pagezero(map))
- vm_map_set_4GB_pagezero(map);
+ vm_commit_pagezero_status(map);
/*
- * Commit to new map.
- *
- * Swap the new map for the old, which consumes our new map
- * reference but each leaves us responsible for the old_map reference.
- * That lets us get off the pmap associated with it, and
- * then we can release it.
+ * If this is an exec, then we are going to destroy the old
+ * task, and it's correct to halt it; if it's spawn, the
+ * task is not yet running, and it makes no sense.
*/
-
- if (create_map) {
+ if (in_exec) {
/*
- * If this is an exec, then we are going to destory the old
- * task, and it's correct to halt it; if it's spawn, the
- * task is not yet running, and it makes no sense.
+ * Mark the task as halting and start the other
+ * threads towards terminating themselves. Then
+ * make sure any threads waiting for a process
+ * transition get informed that we are committed to
+ * this transition, and then finally complete the
+ * task halting (wait for threads and then cleanup
+ * task resources).
+ *
+ * NOTE: task_start_halt() makes sure that no new
+ * threads are created in the task during the transition.
+ * We need to mark the workqueue as exiting before we
+ * wait for threads to terminate (at the end of which
+ * we no longer have a prohibition on thread creation).
+ *
+ * Finally, clean up any lingering workqueue data structures
+ * that may have been left behind by the workqueue threads
+ * as they exited (and then clean up the work queue itself).
*/
- if (!spawn) {
- /*
- * Mark the task as halting and start the other
- * threads towards terminating themselves. Then
- * make sure any threads waiting for a process
- * transition get informed that we are committed to
- * this transition, and then finally complete the
- * task halting (wait for threads and then cleanup
- * task resources).
- */
- task_start_halt(task);
- proc_transcommit(current_proc(), 0);
- task_complete_halt(task);
+ kret = task_start_halt(task);
+ if (kret != KERN_SUCCESS) {
+ vm_map_deallocate(map); /* will lose pmap reference too */
+ return (LOAD_FAILURE);
}
- old_map = swap_task_map(old_task, thread, map);
- vm_map_clear_4GB_pagezero(old_map);
- /* XXX L4 : For spawn the current task isn't running... */
- if (!spawn)
- pmap_switch(pmap); /* Make sure we are using the new pmap */
- vm_map_deallocate(old_map);
+ proc_transcommit(p, 0);
+ workq_mark_exiting(p);
+ task_complete_halt(task);
+ workq_exit(p);
+
+ /*
+ * Roll up accounting info to new task. The roll up is done after
+ * task_complete_halt to make sure the thread accounting info is
+ * rolled up to current_task.
+ */
+ task_rollup_accounting_info(get_threadtask(thread), task);
}
+ *mapp = map;
+
+#ifdef CONFIG_32BIT_TELEMETRY
+ if (!result->is_64bit_data) {
+ /*
+ * This may not need to be an AST; we merely need to ensure that
+ * we gather telemetry at the point where all of the information
+ * that we want has been added to the process.
+ */
+ task_set_32bit_log_flag(get_threadtask(thread));
+ act_set_astbsd(thread);
+ }
+#endif /* CONFIG_32BIT_TELEMETRY */
+
return(LOAD_SUCCESS);
}
+int macho_printf = 0;
+#define MACHO_PRINTF(args) \
+ do { \
+ if (macho_printf) { \
+ printf args; \
+ } \
+ } while (0)
+
/*
* The file size of a mach-o file is limited to 32 bits; this is because
* this is the limit on the kalloc() of enough bytes for a mach_header and
* bits in the file format itself. We read into the kernel buffer the
* commands section, and then parse it in order to parse the mach-o file
* format load_command segment(s). We are only interested in a subset of
- * the total set of possible commands.
+ * the total set of possible commands. If "map"==VM_MAP_NULL or
+ * "thread"==THREAD_NULL, do not make permament VM modifications,
+ * just preflight the parse.
*/
static
load_return_t
off_t file_offset,
off_t macho_size,
int depth,
- load_result_t *result
+ int64_t aslr_offset,
+ int64_t dyld_aslr_offset,
+ load_result_t *result,
+ load_result_t *binresult,
+ struct image_params *imgp
)
{
uint32_t ncmds;
struct load_command *lcp;
struct dylinker_command *dlp = 0;
- struct uuid_command *uulp = 0;
integer_t dlarchbits = 0;
void * control;
load_return_t ret = LOAD_SUCCESS;
- caddr_t addr;
- void * kl_addr;
- vm_size_t size,kl_size;
+ void * addr;
+ vm_size_t alloc_size, cmds_size;
size_t offset;
size_t oldoffset; /* for overflow check */
int pass;
proc_t p = current_proc(); /* XXXX */
int error;
- int resid=0;
- task_t task;
+ int resid = 0;
size_t mach_header_sz = sizeof(struct mach_header);
boolean_t abi64;
boolean_t got_code_signatures = FALSE;
+ boolean_t found_header_segment = FALSE;
+ boolean_t found_xhdr = FALSE;
+ int64_t slide = 0;
+ boolean_t dyld_no_load_addr = FALSE;
+ boolean_t is_dyld = FALSE;
+ vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+#if __arm64__
+ uint32_t pagezero_end = 0;
+ uint32_t executable_end = 0;
+ uint32_t writable_start = 0;
+ vm_map_size_t effective_page_size;
+
+ effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
+#endif /* __arm64__ */
if (header->magic == MH_MAGIC_64 ||
header->magic == MH_CIGAM_64) {
/*
* Break infinite recursion
*/
- if (depth > 6) {
+ if (depth > 1) {
return(LOAD_FAILURE);
}
- task = (task_t)get_threadtask(thread);
-
depth++;
/*
* Check to see if right machine type.
*/
- if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
- !grade_binary(header->cputype,
+ if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
+ !grade_binary(header->cputype,
header->cpusubtype & ~CPU_SUBTYPE_MASK))
return(LOAD_BADARCH);
-
+
+#if __x86_64__
+ if (bootarg_no32exec && (header->cputype == CPU_TYPE_X86)) {
+ return(LOAD_BADARCH_X86);
+ }
+#endif
+
abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
-
+
switch (header->filetype) {
-
- case MH_OBJECT:
+
case MH_EXECUTE:
- case MH_PRELOAD:
if (depth != 1) {
return (LOAD_FAILURE);
}
- break;
-
- case MH_FVMLIB:
- case MH_DYLIB:
- if (depth == 1) {
+#if CONFIG_EMBEDDED
+ if (header->flags & MH_DYLDLINK) {
+ /* Check properties of dynamic executables */
+ if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
+ return (LOAD_FAILURE);
+ }
+ result->needs_dynlinker = TRUE;
+ } else {
+ /* Check properties of static executables (disallowed except for development) */
+#if !(DEVELOPMENT || DEBUG)
return (LOAD_FAILURE);
+#endif
}
- break;
+#endif /* CONFIG_EMBEDDED */
+ break;
case MH_DYLINKER:
if (depth != 2) {
return (LOAD_FAILURE);
}
+ is_dyld = TRUE;
break;
default:
*/
control = ubc_getobject(vp, UBC_FLAGS_NONE);
- /*
- * Map portion that must be accessible directly into
- * kernel's map.
- */
- if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
- return(LOAD_BADMACHO);
-
- /*
- * Round size of Mach-O commands up to page boundry.
- */
- size = round_page(mach_header_sz + header->sizeofcmds);
- if (size <= 0)
- return(LOAD_BADMACHO);
+ /* ensure header + sizeofcmds falls within the file */
+ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
+ (off_t)cmds_size > macho_size ||
+ round_page_overflow(cmds_size, &alloc_size)) {
+ return LOAD_BADMACHO;
+ }
/*
* Map the load commands into kernel memory.
*/
- addr = 0;
- kl_size = size;
- kl_addr = kalloc(size);
- addr = (caddr_t)kl_addr;
- if (addr == NULL)
- return(LOAD_NOSPACE);
+ addr = kalloc(alloc_size);
+ if (addr == NULL) {
+ return LOAD_NOSPACE;
+ }
- error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
+ error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset,
UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
if (error) {
- if (kl_addr )
- kfree(kl_addr, kl_size);
- return(LOAD_IOERROR);
+ kfree(addr, alloc_size);
+ return LOAD_IOERROR;
}
-
+
+ if (resid) {
+ /* We must be able to read in as much as the mach_header indicated */
+ kfree(addr, alloc_size);
+ return LOAD_BADMACHO;
+ }
+
+ /*
+ * For PIE and dyld, slide everything by the ASLR offset.
+ */
+ if ((header->flags & MH_PIE) || is_dyld) {
+ slide = aslr_offset;
+ }
+
/*
- * Scan through the commands, processing each one as necessary.
+ * Scan through the commands, processing each one as necessary.
+ * We parse in three passes through the headers:
+ * 0: determine if TEXT and DATA boundary can be page-aligned
+ * 1: thread state, uuid, code signature
+ * 2: segments
+ * 3: dyld, encryption, check entry point
*/
- for (pass = 1; pass <= 2; pass++) {
+
+ boolean_t slide_realign = FALSE;
+#if __arm64__
+ if (!abi64) {
+ slide_realign = TRUE;
+ }
+#endif
+
+ for (pass = 0; pass <= 3; pass++) {
+
+ if (pass == 0 && !slide_realign && !is_dyld) {
+ /* if we dont need to realign the slide or determine dyld's load
+ * address, pass 0 can be skipped */
+ continue;
+ } else if (pass == 1) {
+#if __arm64__
+ boolean_t is_pie;
+ int64_t adjust;
+
+ is_pie = ((header->flags & MH_PIE) != 0);
+ if (pagezero_end != 0 &&
+ pagezero_end < effective_page_size) {
+ /* need at least 1 page for PAGEZERO */
+ adjust = effective_page_size;
+ MACHO_PRINTF(("pagezero boundary at "
+ "0x%llx; adjust slide from "
+ "0x%llx to 0x%llx%s\n",
+ (uint64_t) pagezero_end,
+ slide,
+ slide + adjust,
+ (is_pie
+ ? ""
+ : " BUT NO PIE ****** :-(")));
+ if (is_pie) {
+ slide += adjust;
+ pagezero_end += adjust;
+ executable_end += adjust;
+ writable_start += adjust;
+ }
+ }
+ if (pagezero_end != 0) {
+ result->has_pagezero = TRUE;
+ }
+ if (executable_end == writable_start &&
+ (executable_end & effective_page_mask) != 0 &&
+ (executable_end & FOURK_PAGE_MASK) == 0) {
+
+ /*
+ * The TEXT/DATA boundary is 4K-aligned but
+ * not page-aligned. Adjust the slide to make
+ * it page-aligned and avoid having a page
+ * with both write and execute permissions.
+ */
+ adjust =
+ (effective_page_size -
+ (executable_end & effective_page_mask));
+ MACHO_PRINTF(("page-unaligned X-W boundary at "
+ "0x%llx; adjust slide from "
+ "0x%llx to 0x%llx%s\n",
+ (uint64_t) executable_end,
+ slide,
+ slide + adjust,
+ (is_pie
+ ? ""
+ : " BUT NO PIE ****** :-(")));
+ if (is_pie)
+ slide += adjust;
+ }
+#endif /* __arm64__ */
+
+ if (dyld_no_load_addr && binresult) {
+ /*
+ * The dyld Mach-O does not specify a load address. Try to locate
+ * it right after the main binary. If binresult == NULL, load
+ * directly to the given slide.
+ */
+ slide = vm_map_round_page(slide + binresult->max_vm_addr, effective_page_mask);
+ }
+ }
+
+ /*
+ * Check that the entry point is contained in an executable segments
+ */
+ if ((pass == 3) && (!result->using_lcmain && result->validentry == 0)) {
+ thread_state_initialize(thread);
+ ret = LOAD_FAILURE;
+ break;
+ }
+
+ /*
+ * Check that some segment maps the start of the mach-o file, which is
+ * needed by the dynamic loader to read the mach headers, etc.
+ */
+ if ((pass == 3) && (found_header_segment == FALSE)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+
/*
* Loop through each of the load_commands indicated by the
* Mach-O header; if an absurd value is provided, we just
*/
offset = mach_header_sz;
ncmds = header->ncmds;
+
while (ncmds--) {
+
+ /* ensure enough space for a minimal load command */
+ if (offset + sizeof(struct load_command) > cmds_size) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+
/*
* Get a pointer to the command.
*/
lcp = (struct load_command *)(addr + offset);
oldoffset = offset;
- offset += lcp->cmdsize;
/*
* Perform prevalidation of the struct load_command
* straddle or exist past the reserved section at the
* start of the image.
*/
- if (oldoffset > offset ||
- lcp->cmdsize < sizeof(struct load_command) ||
- offset > header->sizeofcmds + mach_header_sz) {
+ if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
+ lcp->cmdsize < sizeof(struct load_command) ||
+ offset > cmds_size) {
ret = LOAD_BADMACHO;
break;
}
* intervention is required.
*/
switch(lcp->cmd) {
- case LC_SEGMENT:
- case LC_SEGMENT_64:
- if (pass != 1)
+ case LC_SEGMENT: {
+ struct segment_command *scp = (struct segment_command *) lcp;
+ if (pass == 0) {
+ if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
+ dyld_no_load_addr = TRUE;
+ if (!slide_realign) {
+ /* got what we need, bail early on pass 0 */
+ continue;
+ }
+ }
+
+#if __arm64__
+ assert(!abi64);
+
+ if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
+ /* PAGEZERO */
+ if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ }
+ if (scp->initprot & VM_PROT_EXECUTE) {
+ /* TEXT */
+ if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ }
+ if (scp->initprot & VM_PROT_WRITE) {
+ /* DATA */
+ if (os_add_overflow(scp->vmaddr, slide, &writable_start)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ }
+#endif /* __arm64__ */
+ break;
+ }
+
+ if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
+ found_xhdr = TRUE;
+ }
+
+ if (pass != 2)
+ break;
+
+ if (abi64) {
+ /*
+ * Having an LC_SEGMENT command for the
+ * wrong ABI is invalid <rdar://problem/11021230>
+ */
+ ret = LOAD_BADMACHO;
break;
+ }
+
ret = load_segment(lcp,
- header->filetype,
- control,
- file_offset,
- macho_size,
- vp,
- map,
- result);
+ header->filetype,
+ control,
+ file_offset,
+ macho_size,
+ vp,
+ map,
+ slide,
+ result);
+ if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
+ /* Enforce a single segment mapping offset zero, with R+X
+ * protection. */
+ if (found_header_segment ||
+ ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) != (VM_PROT_READ|VM_PROT_EXECUTE))) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ found_header_segment = TRUE;
+ }
+
break;
- case LC_THREAD:
+ }
+ case LC_SEGMENT_64: {
+ struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
+
+ if (pass == 0) {
+ if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
+ dyld_no_load_addr = TRUE;
+ if (!slide_realign) {
+ /* got what we need, bail early on pass 0 */
+ continue;
+ }
+ }
+ }
+
+ if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
+ found_xhdr = TRUE;
+ }
+
if (pass != 2)
break;
- ret = load_thread((struct thread_command *)lcp,
- thread,
- result);
+
+ if (!abi64) {
+ /*
+ * Having an LC_SEGMENT_64 command for the
+ * wrong ABI is invalid <rdar://problem/11021230>
+ */
+ ret = LOAD_BADMACHO;
+ break;
+ }
+
+ ret = load_segment(lcp,
+ header->filetype,
+ control,
+ file_offset,
+ macho_size,
+ vp,
+ map,
+ slide,
+ result);
+
+ if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
+ /* Enforce a single segment mapping offset zero, with R+X
+ * protection. */
+ if (found_header_segment ||
+ ((scp64->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) != (VM_PROT_READ|VM_PROT_EXECUTE))) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ found_header_segment = TRUE;
+ }
+
break;
+ }
case LC_UNIXTHREAD:
- if (pass != 2)
+ if (pass != 1)
break;
ret = load_unixthread(
(struct thread_command *) lcp,
- thread,
+ thread,
+ slide,
+ result);
+ break;
+ case LC_MAIN:
+ if (pass != 1)
+ break;
+ if (depth != 1)
+ break;
+ ret = load_main(
+ (struct entry_point_command *) lcp,
+ thread,
+ slide,
result);
break;
case LC_LOAD_DYLINKER:
- if (pass != 2)
+ if (pass != 3)
break;
if ((depth == 1) && (dlp == 0)) {
dlp = (struct dylinker_command *)lcp;
}
break;
case LC_UUID:
- if (pass == 2 && depth == 1) {
- uulp = (struct uuid_command *)lcp;
- memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
+ if (pass == 1 && depth == 1) {
+ ret = load_uuid((struct uuid_command *) lcp,
+ (char *)addr + cmds_size,
+ result);
}
break;
case LC_CODE_SIGNATURE:
/* CODE SIGNING */
- if (pass != 2)
+ if (pass != 1)
break;
/* pager -> uip ->
load signatures & store in uip
file_offset,
macho_size,
header->cputype,
- (depth == 1) ? result : NULL);
+ result,
+ imgp);
if (ret != LOAD_SUCCESS) {
printf("proc %d: load code signature error %d "
"for file \"%s\"\n",
p->p_pid, ret, vp->v_name);
- ret = LOAD_SUCCESS; /* ignore error */
+ /*
+ * Allow injections to be ignored on devices w/o enforcement enabled
+ */
+ if (!cs_process_global_enforcement())
+ ret = LOAD_SUCCESS; /* ignore error */
+
} else {
got_code_signatures = TRUE;
}
+
+ if (got_code_signatures) {
+ unsigned tainted = CS_VALIDATE_TAINTED;
+ boolean_t valid = FALSE;
+ vm_size_t off = 0;
+
+
+ if (cs_debug > 10)
+ printf("validating initial pages of %s\n", vp->v_name);
+
+ while (off < alloc_size && ret == LOAD_SUCCESS) {
+ tainted = CS_VALIDATE_TAINTED;
+
+ valid = cs_validate_range(vp,
+ NULL,
+ file_offset + off,
+ addr + off,
+ PAGE_SIZE,
+ &tainted);
+ if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
+ if (cs_debug)
+ printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
+ vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
+ if (cs_process_global_enforcement() ||
+ (result->csflags & (CS_HARD|CS_KILL|CS_ENFORCEMENT))) {
+ ret = LOAD_FAILURE;
+ }
+ result->csflags &= ~CS_VALID;
+ }
+ off += PAGE_SIZE;
+ }
+ }
+
break;
#if CONFIG_CODE_DECRYPTION
case LC_ENCRYPTION_INFO:
- if (pass != 2)
+ case LC_ENCRYPTION_INFO_64:
+ if (pass != 3)
break;
ret = set_code_unprotect(
(struct encryption_info_command *) lcp,
- addr, map, vp);
+ addr, map, slide, vp, file_offset,
+ header->cputype, header->cpusubtype);
if (ret != LOAD_SUCCESS) {
+ os_reason_t load_failure_reason = OS_REASON_NULL;
printf("proc %d: set_code_unprotect() error %d "
"for file \"%s\"\n",
p->p_pid, ret, vp->v_name);
- /* Don't let the app run if it's
+ /*
+ * Don't let the app run if it's
* encrypted but we failed to set up the
- * decrypter */
- psignal(p, SIGKILL);
+ * decrypter. If the keys are missing it will
+ * return LOAD_DECRYPTFAIL.
+ */
+ if (ret == LOAD_DECRYPTFAIL) {
+ /* failed to load due to missing FP keys */
+ proc_lock(p);
+ p->p_lflag |= P_LTERM_DECRYPTFAIL;
+ proc_unlock(p);
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
+ p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
+ load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
+ } else {
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
+ p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
+ load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
+ }
+
+ assert(load_failure_reason != OS_REASON_NULL);
+ psignal_with_reason(p, SIGKILL, load_failure_reason);
}
break;
#endif
+#if __arm64__
+ case LC_VERSION_MIN_IPHONEOS: {
+ struct version_min_command *vmc;
+
+ if (pass != 1) {
+ break;
+ }
+ vmc = (struct version_min_command *) lcp;
+ if (vmc->sdk < (12 << 16)) {
+ /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
+ result->legacy_footprint = TRUE;
+ }
+// printf("FBDP %s:%d vp %p (%s) sdk %d.%d.%d -> legacy_footprint=%d\n", __FUNCTION__, __LINE__, vp, vp->v_name, (vmc->sdk >> 16), ((vmc->sdk & 0xFF00) >> 8), (vmc->sdk & 0xFF), result->legacy_footprint);
+ break;
+ }
+#endif /* __arm64__ */
default:
/* Other commands are ignored by the kernel */
ret = LOAD_SUCCESS;
if (ret != LOAD_SUCCESS)
break;
}
- if (ret == LOAD_SUCCESS) {
- if (! got_code_signatures) {
- struct cs_blob *blob;
- /* no embedded signatures: look for detached ones */
- blob = ubc_cs_blob_get(vp, -1, file_offset);
- if (blob != NULL) {
- /* get flags to be applied to the process */
- result->csflags |= blob->csb_flags;
- }
- }
- if (dlp != 0)
- ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);
+ if (ret == LOAD_SUCCESS) {
+ if(!got_code_signatures && cs_process_global_enforcement()) {
+ ret = LOAD_FAILURE;
+ }
- if(depth == 1) {
- if (result->thread_count == 0) {
+ /* Make sure if we need dyld, we got it */
+ if (result->needs_dynlinker && !dlp) {
ret = LOAD_FAILURE;
- } else if ( abi64 ) {
-#ifdef __ppc__
- /* Map in 64-bit commpage */
+ }
+
+ if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
/*
- * PPC51: ppc64 is limited to 51-bit addresses.
- * Memory above that limit is handled specially
- * at the pmap level.
- *
- * <rdar://6640492> -- wrong task for vfork()/spawn()
+ * load the dylinker, and slide it by the independent DYLD ASLR
+ * offset regardless of the PIE-ness of the main binary.
*/
- pmap_map_sharedpage(current_task(), get_map_pmap(map));
-#endif /* __ppc__ */
+ ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
+ dyld_aslr_offset, result, imgp);
}
+
+ if ((ret == LOAD_SUCCESS) && (depth == 1)) {
+ if (result->thread_count == 0) {
+ ret = LOAD_FAILURE;
+ }
+#if CONFIG_ENFORCE_SIGNED_CODE
+ if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) {
+ ret = LOAD_FAILURE;
+ }
+#endif
}
}
- if (kl_addr )
- kfree(kl_addr, kl_size);
+ if (ret == LOAD_BADMACHO && found_xhdr) {
+ ret = LOAD_BADMACHO_UPX;
+ }
+
+ kfree(addr, alloc_size);
- return(ret);
+ return ret;
}
#if CONFIG_CODE_DECRYPTION
-#define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
+#define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
static load_return_t
-unprotect_segment(
+unprotect_dsmos_segment(
uint64_t file_off,
uint64_t file_size,
struct vnode *vp,
crypt_info.crypt_end = NULL;
#pragma unused(vp, macho_offset)
crypt_info.crypt_ops = (void *)0x2e69cf40;
+ vm_map_offset_t crypto_backing_offset;
+ crypto_backing_offset = -1; /* i.e. use map entry's offset */
+#if VM_MAP_DEBUG_APPLE_PROTECT
+ if (vm_map_debug_apple_protect) {
+ struct proc *p;
+ p = current_proc();
+ printf("APPLE_PROTECT: %d[%s] map %p "
+ "[0x%llx:0x%llx] %s(%s)\n",
+ p->p_pid, p->p_comm, map,
+ (uint64_t) map_addr,
+ (uint64_t) (map_addr + map_size),
+ __FUNCTION__, vp->v_name);
+ }
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+
+ /* The DSMOS pager can only be used by apple signed code */
+ struct cs_blob * blob = csvnode_get_blob(vp, file_off);
+ if( blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path)
+ {
+ return LOAD_FAILURE;
+ }
+
kr = vm_map_apple_protected(map,
map_addr,
map_addr + map_size,
+ crypto_backing_offset,
&crypt_info);
}
}
#else /* CONFIG_CODE_DECRYPTION */
static load_return_t
-unprotect_segment(
+unprotect_dsmos_segment(
__unused uint64_t file_off,
__unused uint64_t file_size,
__unused struct vnode *vp,
}
#endif /* CONFIG_CODE_DECRYPTION */
+
+/*
+ * map_segment:
+ * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
+ * page size) issues.
+ *
+ * The mapping might result in 1, 2 or 3 map entries:
+ * 1. for the first page, which could be overlap with the previous
+ * mapping,
+ * 2. for the center (if applicable),
+ * 3. for the last page, which could overlap with the next mapping.
+ *
+ * For each of those map entries, we might have to interpose a
+ * "fourk_pager" to deal with mis-alignment wrt the system page size,
+ * either in the mapping address and/or size or the file offset and/or
+ * size.
+ * The "fourk_pager" itself would be mapped with proper alignment
+ * wrt the system page size and would then be populated with the
+ * information about the intended mapping, with a "4KB" granularity.
+ */
+static kern_return_t
+map_segment(
+ vm_map_t map,
+ vm_map_offset_t vm_start,
+ vm_map_offset_t vm_end,
+ memory_object_control_t control,
+ vm_map_offset_t file_start,
+ vm_map_offset_t file_end,
+ vm_prot_t initprot,
+ vm_prot_t maxprot,
+ load_result_t *result)
+{
+ vm_map_offset_t cur_offset, cur_start, cur_end;
+ kern_return_t ret;
+ vm_map_offset_t effective_page_mask;
+ vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
+
+ if (vm_end < vm_start ||
+ file_end < file_start) {
+ return LOAD_BADMACHO;
+ }
+ if (vm_end == vm_start ||
+ file_end == file_start) {
+ /* nothing to map... */
+ return LOAD_SUCCESS;
+ }
+
+ effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ if (vm_map_page_aligned(vm_start, effective_page_mask) &&
+ vm_map_page_aligned(vm_end, effective_page_mask) &&
+ vm_map_page_aligned(file_start, effective_page_mask) &&
+ vm_map_page_aligned(file_end, effective_page_mask)) {
+ /* all page-aligned and map-aligned: proceed */
+ } else {
+#if __arm64__
+ /* use an intermediate "4K" pager */
+ vmk_flags.vmkf_fourk = TRUE;
+#else /* __arm64__ */
+ panic("map_segment: unexpected mis-alignment "
+ "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
+ (uint64_t) vm_start,
+ (uint64_t) vm_end,
+ (uint64_t) file_start,
+ (uint64_t) file_end);
+#endif /* __arm64__ */
+ }
+
+ cur_offset = 0;
+ cur_start = vm_start;
+ cur_end = vm_start;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
+ /* one 4K pager for the 1st page */
+ cur_end = vm_map_round_page(cur_start, effective_page_mask);
+ if (cur_end > vm_end) {
+ cur_end = vm_start + (file_end - file_start);
+ }
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
+ if (cur_end >= vm_start + (file_end - file_start)) {
+ /* all mapped: done */
+ goto done;
+ }
+ if (vm_map_round_page(cur_end, effective_page_mask) >=
+ vm_map_trunc_page(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* no middle */
+ } else {
+ cur_start = cur_end;
+ if ((vm_start & effective_page_mask) !=
+ (file_start & effective_page_mask)) {
+ /* one 4K pager for the middle */
+ cur_vmk_flags = vmk_flags;
+ } else {
+ /* regular mapping for the middle */
+ cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ }
+
+#if CONFIG_EMBEDDED
+ (void) result;
+#else /* CONFIG_EMBEDDED */
+ /*
+ * This process doesn't have its new csflags (from
+ * the image being loaded) yet, so tell VM to override the
+ * current process's CS_ENFORCEMENT for this mapping.
+ */
+ if (result->csflags & CS_ENFORCEMENT) {
+ cur_vmk_flags.vmkf_cs_enforcement = TRUE;
+ } else {
+ cur_vmk_flags.vmkf_cs_enforcement = FALSE;
+ }
+ cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
+#endif /* CONFIG_EMBEDDED */
+
+ cur_end = vm_map_trunc_page(vm_start + (file_end -
+ file_start),
+ effective_page_mask);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+ if (cur_end >= vm_start + (file_end - file_start)) {
+ /* all mapped: done */
+ goto done;
+ }
+ cur_start = cur_end;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* one 4K pager for the last page */
+ cur_end = vm_start + (file_end - file_start);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
+done:
+ assert(cur_end >= vm_start + (file_end - file_start));
+ return LOAD_SUCCESS;
+}
+
static
load_return_t
load_segment(
- struct load_command *lcp,
- uint32_t filetype,
- void * control,
- off_t pager_offset,
- off_t macho_size,
- struct vnode *vp,
- vm_map_t map,
- load_result_t *result
-)
+ struct load_command *lcp,
+ uint32_t filetype,
+ void * control,
+ off_t pager_offset,
+ off_t macho_size,
+ struct vnode *vp,
+ vm_map_t map,
+ int64_t slide,
+ load_result_t *result)
{
struct segment_command_64 segment_command, *scp;
kern_return_t ret;
- mach_vm_offset_t map_addr, map_offset;
- mach_vm_size_t map_size, seg_size, delta_size;
+ vm_map_size_t delta_size;
vm_prot_t initprot;
vm_prot_t maxprot;
size_t segment_command_size, total_section_size,
single_section_size;
-
+ vm_map_offset_t file_offset, file_size;
+ vm_map_offset_t vm_offset, vm_size;
+ vm_map_offset_t vm_start, vm_end, vm_end_aligned;
+ vm_map_offset_t file_start, file_end;
+ kern_return_t kr;
+ boolean_t verbose;
+ vm_map_size_t effective_page_size;
+ vm_map_offset_t effective_page_mask;
+#if __arm64__
+ vm_map_kernel_flags_t vmk_flags;
+ boolean_t fourk_align;
+#endif /* __arm64__ */
+
+ effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
+ effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+
+ verbose = FALSE;
if (LC_SEGMENT_64 == lcp->cmd) {
segment_command_size = sizeof(struct segment_command_64);
single_section_size = sizeof(struct section_64);
- scp = (struct segment_command_64 *)lcp;
+#if __arm64__
+ /* 64-bit binary: should already be 16K-aligned */
+ fourk_align = FALSE;
+#endif /* __arm64__ */
} else {
segment_command_size = sizeof(struct segment_command);
single_section_size = sizeof(struct section);
- scp = &segment_command;
- widen_segment_command((struct segment_command *)lcp, scp);
+#if __arm64__
+ /* 32-bit binary: might need 4K-alignment */
+ if (effective_page_size != FOURK_PAGE_SIZE) {
+ /* not using 4K page size: need fourk_pager */
+ fourk_align = TRUE;
+ verbose = TRUE;
+ } else {
+ /* using 4K page size: no need for re-alignment */
+ fourk_align = FALSE;
+ }
+#endif /* __arm64__ */
}
if (lcp->cmdsize < segment_command_size)
return (LOAD_BADMACHO);
total_section_size = lcp->cmdsize - segment_command_size;
+ if (LC_SEGMENT_64 == lcp->cmd) {
+ scp = (struct segment_command_64 *)lcp;
+ } else {
+ scp = &segment_command;
+ widen_segment_command((struct segment_command *)lcp, scp);
+ }
+
+ if (verbose) {
+ MACHO_PRINTF(("+++ load_segment %s "
+ "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
+ "prot %d/%d flags 0x%x\n",
+ scp->segname,
+ (uint64_t)(slide + scp->vmaddr),
+ (uint64_t)(slide + scp->vmaddr + scp->vmsize),
+ pager_offset + scp->fileoff,
+ pager_offset + scp->fileoff + scp->filesize,
+ scp->initprot,
+ scp->maxprot,
+ scp->flags));
+ }
+
/*
* Make sure what we get from the file is really ours (as specified
* by macho_size).
*/
if (scp->fileoff + scp->filesize < scp->fileoff ||
- scp->fileoff + scp->filesize > (uint64_t)macho_size)
+ scp->fileoff + scp->filesize > (uint64_t)macho_size) {
return (LOAD_BADMACHO);
+ }
/*
* Ensure that the number of sections specified would fit
* within the load command size.
*/
- if (total_section_size / single_section_size < scp->nsects)
+ if (total_section_size / single_section_size < scp->nsects) {
return (LOAD_BADMACHO);
+ }
/*
* Make sure the segment is page-aligned in the file.
*/
- if ((scp->fileoff & PAGE_MASK_64) != 0)
+ file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
+ file_size = scp->filesize;
+#if __arm64__
+ if (fourk_align) {
+ if ((file_offset & FOURK_PAGE_MASK) != 0) {
+ /*
+ * we can't mmap() it if it's not at least 4KB-aligned
+ * in the file
+ */
+ return LOAD_BADMACHO;
+ }
+ } else
+#endif /* __arm64__ */
+ if ((file_offset & PAGE_MASK_64) != 0 ||
+ /* we can't mmap() it if it's not page-aligned in the file */
+ (file_offset & vm_map_page_mask(map)) != 0) {
+ /*
+ * The 1st test would have failed if the system's page size
+ * was what this process believe is the page size, so let's
+ * fail here too for the sake of consistency.
+ */
return (LOAD_BADMACHO);
+ }
/*
- * Round sizes to page size.
+ * If we have a code signature attached for this slice
+ * require that the segments are within the signed part
+ * of the file.
*/
- seg_size = round_page_64(scp->vmsize);
- map_size = round_page_64(scp->filesize);
- map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
- if (seg_size == 0)
- return (KERN_SUCCESS);
- /* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
- if (map_addr == 0 &&
- map_size == 0 &&
- seg_size != 0 &&
- scp->cmd == LC_SEGMENT_64 &&
+ if (result->cs_end_offset &&
+ result->cs_end_offset < (off_t)scp->fileoff &&
+ result->cs_end_offset - scp->fileoff < scp->filesize)
+ {
+ if (cs_debug)
+ printf("section outside code signature\n");
+ return LOAD_BADMACHO;
+ }
+
+ vm_offset = scp->vmaddr + slide;
+ vm_size = scp->vmsize;
+
+ if (vm_size == 0)
+ return (LOAD_SUCCESS);
+ if (scp->vmaddr == 0 &&
+ file_size == 0 &&
+ vm_size != 0 &&
(scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
(scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
+ /*
+ * For PIE, extend page zero rather than moving it. Extending
+ * page zero keeps early allocations from falling predictably
+ * between the end of page zero and the beginning of the first
+ * slid segment.
+ */
/*
* This is a "page zero" segment: it starts at address 0,
* is not mapped from the binary file and is not accessible.
* make it completely off limits by raising the VM map's
* minimum offset.
*/
- ret = vm_map_raise_min_offset(map, seg_size);
+ vm_end = vm_offset + vm_size;
+ if (vm_end < vm_offset) {
+ return (LOAD_BADMACHO);
+ }
+ if (verbose) {
+ MACHO_PRINTF(("++++++ load_segment: "
+ "page_zero up to 0x%llx\n",
+ (uint64_t) vm_end));
+ }
+#if __arm64__
+ if (fourk_align) {
+ /* raise min_offset as much as page-alignment allows */
+ vm_end_aligned = vm_map_trunc_page(vm_end,
+ effective_page_mask);
+ } else
+#endif /* __arm64__ */
+ {
+ vm_end = vm_map_round_page(vm_end,
+ PAGE_MASK_64);
+ vm_end_aligned = vm_end;
+ }
+ ret = vm_map_raise_min_offset(map,
+ vm_end_aligned);
+#if __arm64__
+ if (ret == 0 &&
+ vm_end > vm_end_aligned) {
+ /* use fourk_pager to map the rest of pagezero */
+ assert(fourk_align);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_fourk = TRUE;
+ ret = vm_map_enter_mem_object(
+ map,
+ &vm_end_aligned,
+ vm_end - vm_end_aligned,
+ (mach_vm_offset_t) 0, /* mask */
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0,
+ FALSE, /* copy */
+ (scp->initprot & VM_PROT_ALL),
+ (scp->maxprot & VM_PROT_ALL),
+ VM_INHERIT_DEFAULT);
+ }
+#endif /* __arm64__ */
+
if (ret != KERN_SUCCESS) {
return (LOAD_FAILURE);
}
return (LOAD_SUCCESS);
+ } else {
+#if CONFIG_EMBEDDED
+ /* not PAGEZERO: should not be mapped at address 0 */
+ if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
+ return LOAD_BADMACHO;
+ }
+#endif /* CONFIG_EMBEDDED */
+ }
+
+#if __arm64__
+ if (fourk_align) {
+ /* 4K-align */
+ file_start = vm_map_trunc_page(file_offset,
+ FOURK_PAGE_MASK);
+ file_end = vm_map_round_page(file_offset + file_size,
+ FOURK_PAGE_MASK);
+ vm_start = vm_map_trunc_page(vm_offset,
+ FOURK_PAGE_MASK);
+ vm_end = vm_map_round_page(vm_offset + vm_size,
+ FOURK_PAGE_MASK);
+ if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
+ page_aligned(file_start) &&
+ vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
+ page_aligned(vm_start) &&
+ vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
+ /* XXX last segment: ignore mis-aligned tail */
+ file_end = vm_map_round_page(file_end,
+ effective_page_mask);
+ vm_end = vm_map_round_page(vm_end,
+ effective_page_mask);
+ }
+ } else
+#endif /* __arm64__ */
+ {
+ file_start = vm_map_trunc_page(file_offset,
+ effective_page_mask);
+ file_end = vm_map_round_page(file_offset + file_size,
+ effective_page_mask);
+ vm_start = vm_map_trunc_page(vm_offset,
+ effective_page_mask);
+ vm_end = vm_map_round_page(vm_offset + vm_size,
+ effective_page_mask);
}
- map_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
+ if (vm_start < result->min_vm_addr)
+ result->min_vm_addr = vm_start;
+ if (vm_end > result->max_vm_addr)
+ result->max_vm_addr = vm_end;
+
+ if (map == VM_MAP_NULL)
+ return (LOAD_SUCCESS);
- if (map_size > 0) {
+ if (vm_size > 0) {
initprot = (scp->initprot) & VM_PROT_ALL;
maxprot = (scp->maxprot) & VM_PROT_ALL;
/*
* Map a copy of the file into the address space.
*/
- ret = vm_map_enter_mem_object_control(map,
- &map_addr, map_size, (mach_vm_offset_t)0,
- VM_FLAGS_FIXED, control, map_offset, TRUE,
- initprot, maxprot,
- VM_INHERIT_DEFAULT);
- if (ret != KERN_SUCCESS)
- return (LOAD_NOSPACE);
-
+ if (verbose) {
+ MACHO_PRINTF(("++++++ load_segment: "
+ "mapping at vm [0x%llx:0x%llx] of "
+ "file [0x%llx:0x%llx]\n",
+ (uint64_t) vm_start,
+ (uint64_t) vm_end,
+ (uint64_t) file_start,
+ (uint64_t) file_end));
+ }
+ ret = map_segment(map,
+ vm_start,
+ vm_end,
+ control,
+ file_start,
+ file_end,
+ initprot,
+ maxprot,
+ result);
+ if (ret) {
+ return LOAD_NOSPACE;
+ }
+
+#if FIXME
/*
* If the file didn't end on a page boundary,
* we need to zero the leftover.
*/
delta_size = map_size - scp->filesize;
-#if FIXME
if (delta_size > 0) {
mach_vm_offset_t tmp;
- ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
- if (ret != KERN_SUCCESS)
+ ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD);
+ if (ret != KERN_SUCCESS) {
return(LOAD_RESOURCE);
+ }
if (copyout(tmp, map_addr + scp->filesize,
delta_size)) {
* than the size from the file, we need to allocate
* zero fill memory for the rest.
*/
- delta_size = seg_size - map_size;
+ if ((vm_end - vm_start) > (file_end - file_start)) {
+ delta_size = (vm_end - vm_start) - (file_end - file_start);
+ } else {
+ delta_size = 0;
+ }
if (delta_size > 0) {
- mach_vm_offset_t tmp = map_addr + map_size;
-
- ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
- NULL, 0, FALSE,
- scp->initprot, scp->maxprot,
- VM_INHERIT_DEFAULT);
- if (ret != KERN_SUCCESS)
+ mach_vm_offset_t tmp;
+
+ tmp = vm_start + (file_end - file_start);
+ if (verbose) {
+ MACHO_PRINTF(("++++++ load_segment: "
+ "delta mapping vm [0x%llx:0x%llx]\n",
+ (uint64_t) tmp,
+ (uint64_t) (tmp + delta_size)));
+ }
+ kr = map_segment(map,
+ tmp,
+ tmp + delta_size,
+ MEMORY_OBJECT_CONTROL_NULL,
+ 0,
+ delta_size,
+ scp->initprot,
+ scp->maxprot,
+ result);
+ if (kr != KERN_SUCCESS) {
return(LOAD_NOSPACE);
+ }
}
if ( (scp->fileoff == 0) && (scp->filesize != 0) )
- result->mach_header = map_addr;
+ result->mach_header = vm_offset;
if (scp->flags & SG_PROTECTED_VERSION_1) {
- ret = unprotect_segment(scp->fileoff,
- scp->filesize,
- vp,
- pager_offset,
- map,
- map_addr,
- map_size);
+ ret = unprotect_dsmos_segment(file_start,
+ file_end - file_start,
+ vp,
+ pager_offset,
+ map,
+ vm_start,
+ vm_end - vm_start);
+ if (ret != LOAD_SUCCESS) {
+ return ret;
+ }
} else {
ret = LOAD_SUCCESS;
}
- if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
- result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
+
+ if (LOAD_SUCCESS == ret &&
+ filetype == MH_DYLINKER &&
+ result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
note_all_image_info_section(scp,
- LC_SEGMENT_64 == lcp->cmd, single_section_size,
- (const char *)lcp + segment_command_size, result);
+ LC_SEGMENT_64 == lcp->cmd,
+ single_section_size,
+ ((const char *)lcp +
+ segment_command_size),
+ slide,
+ result);
+ }
+
+ if (result->entry_point != MACH_VM_MIN_ADDRESS) {
+ if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
+ if ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) == (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ result->validentry = 1;
+ } else {
+ /* right range but wrong protections, unset if previously validated */
+ result->validentry = 0;
+ }
+ }
+ }
return ret;
}
static
load_return_t
-load_thread(
- struct thread_command *tcp,
- thread_t thread,
+load_uuid(
+ struct uuid_command *uulp,
+ char *command_end,
load_result_t *result
)
{
- kern_return_t kret;
- load_return_t lret;
- task_t task;
- int customstack=0;
+ /*
+ * We need to check the following for this command:
+ * - The command size should be atleast the size of struct uuid_command
+ * - The UUID part of the command should be completely within the mach-o header
+ */
- if (tcp->cmdsize < sizeof(*tcp))
- return (LOAD_BADMACHO);
- task = get_threadtask(thread);
+ if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
+ (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
+ return (LOAD_BADMACHO);
+ }
+
+ memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
+ return (LOAD_SUCCESS);
+}
- /* if count is 0; same as thread */
+static
+load_return_t
+load_main(
+ struct entry_point_command *epc,
+ thread_t thread,
+ int64_t slide,
+ load_result_t *result
+)
+{
+ mach_vm_offset_t addr;
+ kern_return_t ret;
+
+ if (epc->cmdsize < sizeof(*epc))
+ return (LOAD_BADMACHO);
if (result->thread_count != 0) {
- kret = thread_create(task, &thread);
- if (kret != KERN_SUCCESS)
- return(LOAD_RESOURCE);
- thread_deallocate(thread);
- }
-
- lret = load_threadstate(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command));
- if (lret != LOAD_SUCCESS)
- return (lret);
-
- if (result->thread_count == 0) {
- lret = load_threadstack(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- &result->user_stack,
- &customstack);
- if (customstack)
- result->customstack = 1;
- else
- result->customstack = 0;
-
- if (lret != LOAD_SUCCESS)
- return(lret);
-
- lret = load_threadentry(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- &result->entry_point);
- if (lret != LOAD_SUCCESS)
- return(lret);
+ return (LOAD_FAILURE);
}
+
+ if (thread == THREAD_NULL)
+ return (LOAD_SUCCESS);
+
/*
- * Resume thread now, note that this means that the thread
- * commands should appear after all the load commands to
- * be sure they don't reference anything not yet mapped.
+ * LC_MAIN specifies stack size but not location.
+ * Add guard page to allocation size (MAXSSIZ includes guard page).
*/
- else
- thread_resume(thread);
-
+ if (epc->stacksize) {
+ if (os_add_overflow(epc->stacksize, 4*PAGE_SIZE, &result->user_stack_size)) {
+ /*
+ * We are going to immediately throw away this result, but we want
+ * to make sure we aren't loading a dangerously close to
+ * overflowing value, since this will have a guard page added to it
+ * and be rounded to page boundaries
+ */
+ return LOAD_BADMACHO;
+ }
+ result->user_stack_size = epc->stacksize;
+ if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
+ return LOAD_BADMACHO;
+ }
+ } else {
+ result->user_stack_alloc_size = MAXSSIZ;
+ }
+
+ /* use default location for stack */
+ ret = thread_userstackdefault(&addr, result->is_64bit_addr);
+ if (ret != KERN_SUCCESS)
+ return(LOAD_FAILURE);
+
+ /* The stack slides down from the default location */
+ result->user_stack = addr;
+ result->user_stack -= slide;
+
+ if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
+ /* Already processed LC_MAIN or LC_UNIXTHREAD */
+ return (LOAD_FAILURE);
+ }
+
+ /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
+ result->needs_dynlinker = TRUE;
+ result->using_lcmain = TRUE;
+
+ ret = thread_state_initialize( thread );
+ if (ret != KERN_SUCCESS) {
+ return(LOAD_FAILURE);
+ }
+
+ result->unixproc = TRUE;
result->thread_count++;
return(LOAD_SUCCESS);
}
+
static
load_return_t
load_unixthread(
struct thread_command *tcp,
thread_t thread,
+ int64_t slide,
load_result_t *result
)
{
load_return_t ret;
int customstack =0;
-
+ mach_vm_offset_t addr;
if (tcp->cmdsize < sizeof(*tcp))
return (LOAD_BADMACHO);
if (result->thread_count != 0) {
-printf("load_unixthread: already have a thread!");
return (LOAD_FAILURE);
}
+
+ if (thread == THREAD_NULL)
+ return (LOAD_SUCCESS);
ret = load_threadstack(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- &result->user_stack,
- &customstack);
+ (uint32_t *)(((vm_offset_t)tcp) +
+ sizeof(struct thread_command)),
+ tcp->cmdsize - sizeof(struct thread_command),
+ &addr, &customstack, result);
if (ret != LOAD_SUCCESS)
return(ret);
- if (customstack)
- result->customstack = 1;
- else
- result->customstack = 0;
+ /* LC_UNIXTHREAD optionally specifies stack size and location */
+
+ if (!customstack) {
+ result->user_stack_alloc_size = MAXSSIZ;
+ }
+
+ /* The stack slides down from the default location */
+ result->user_stack = addr;
+ result->user_stack -= slide;
+
ret = load_threadentry(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- &result->entry_point);
+ (uint32_t *)(((vm_offset_t)tcp) +
+ sizeof(struct thread_command)),
+ tcp->cmdsize - sizeof(struct thread_command),
+ &addr);
if (ret != LOAD_SUCCESS)
return(ret);
+ if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
+ /* Already processed LC_MAIN or LC_UNIXTHREAD */
+ return (LOAD_FAILURE);
+ }
+
+ result->entry_point = addr;
+ result->entry_point += slide;
+
ret = load_threadstate(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command));
+ (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
+ tcp->cmdsize - sizeof(struct thread_command),
+ result);
if (ret != LOAD_SUCCESS)
return (ret);
load_threadstate(
thread_t thread,
uint32_t *ts,
- uint32_t total_size
+ uint32_t total_size,
+ load_result_t *result
)
{
- kern_return_t ret;
uint32_t size;
int flavor;
uint32_t thread_size;
+ uint32_t *local_ts = NULL;
+ uint32_t local_ts_size = 0;
+ int ret;
+
+ (void)thread;
+
+ if (total_size > 0) {
+ local_ts_size = total_size;
+ local_ts = kalloc(local_ts_size);
+ if (local_ts == NULL) {
+ return LOAD_FAILURE;
+ }
+ memcpy(local_ts, ts, local_ts_size);
+ ts = local_ts;
+ }
- ret = thread_state_initialize( thread );
- if (ret != KERN_SUCCESS) {
- return(LOAD_FAILURE);
- }
-
/*
- * Set the new thread state; iterate through the state flavors in
- * the mach-o file.
+ * Validate the new thread state; iterate through the state flavors in
+ * the Mach-O file.
+ * XXX: we should validate the machine state here, to avoid failing at
+ * activation time where we can't bail out cleanly.
*/
while (total_size > 0) {
flavor = *ts++;
size = *ts++;
- if (UINT32_MAX-2 < size ||
- UINT32_MAX/sizeof(uint32_t) < size+2)
- return (LOAD_BADMACHO);
- thread_size = (size+2)*sizeof(uint32_t);
- if (thread_size > total_size)
- return(LOAD_BADMACHO);
- total_size -= thread_size;
- /*
- * Third argument is a kernel space pointer; it gets cast
- * to the appropriate type in machine_thread_set_state()
- * based on the value of flavor.
- */
- ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
- if (ret != KERN_SUCCESS) {
- return(LOAD_FAILURE);
+
+ if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
+ os_sub_overflow(total_size, thread_size, &total_size)) {
+ ret = LOAD_BADMACHO;
+ goto bad;
}
+
ts += size; /* ts is a (uint32_t *) */
}
- return(LOAD_SUCCESS);
+
+ result->threadstate = local_ts;
+ result->threadstate_sz = local_ts_size;
+ return LOAD_SUCCESS;
+
+bad:
+ if (local_ts) {
+ kfree(local_ts, local_ts_size);
+ }
+ return ret;
}
static
load_return_t
load_threadstack(
- thread_t thread,
- uint32_t *ts,
- uint32_t total_size,
- user_addr_t *user_stack,
- int *customstack
+ thread_t thread,
+ uint32_t *ts,
+ uint32_t total_size,
+ mach_vm_offset_t *user_stack,
+ int *customstack,
+ load_result_t *result
)
{
kern_return_t ret;
* to the appropriate type in thread_userstack() based on
* the value of flavor.
*/
- ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack);
+ ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
if (ret != KERN_SUCCESS) {
return(LOAD_FAILURE);
}
return(LOAD_SUCCESS);
}
+struct macho_data {
+ struct nameidata __nid;
+ union macho_vnode_header {
+ struct mach_header mach_header;
+ struct fat_header fat_header;
+ char __pad[512];
+ } __header;
+};
-static
-load_return_t
+#define DEFAULT_DYLD_PATH "/usr/lib/dyld"
+
+#if (DEVELOPMENT || DEBUG)
+extern char dyld_alt_path[];
+extern int use_alt_dyld;
+#endif
+
+static uint64_t get_va_fsid(struct vnode_attr *vap)
+{
+ if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
+ return *(uint64_t *)&vap->va_fsid64;
+ } else {
+ return vap->va_fsid;
+ }
+}
+
+static load_return_t
load_dylinker(
struct dylinker_command *lcp,
integer_t archbits,
vm_map_t map,
thread_t thread,
int depth,
+ int64_t slide,
load_result_t *result,
- boolean_t is_64bit
+ struct image_params *imgp
)
{
- char *name;
- char *p;
+ const char *name;
struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
- struct mach_header header;
+ struct mach_header *header;
off_t file_offset = 0; /* set by get_macho_vnode() */
off_t macho_size = 0; /* set by get_macho_vnode() */
- vm_map_t copy_map;
- load_result_t myresult;
+ load_result_t *myresult;
kern_return_t ret;
- vm_map_copy_t tmp;
- mach_vm_offset_t dyl_start, map_addr;
- mach_vm_size_t dyl_length;
+ struct macho_data *macho_data;
+ struct {
+ struct mach_header __header;
+ load_result_t __myresult;
+ struct macho_data __macho_data;
+ } *dyld_data;
+
+ if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize)
+ return LOAD_BADMACHO;
- if (lcp->cmdsize < sizeof(*lcp))
- return (LOAD_BADMACHO);
+ name = (const char *)lcp + lcp->name.offset;
- name = (char *)lcp + lcp->name.offset;
- /*
- * Check for a proper null terminated string.
- */
- p = name;
- do {
- if (p >= (char *)lcp + lcp->cmdsize)
- return(LOAD_BADMACHO);
- } while (*p++);
+ /* Check for a proper null terminated string. */
+ size_t maxsz = lcp->cmdsize - lcp->name.offset;
+ size_t namelen = strnlen(name, maxsz);
+ if (namelen >= maxsz) {
+ return LOAD_BADMACHO;
+ }
- ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
- if (ret)
- return (ret);
-
- myresult = load_result_null;
+#if (DEVELOPMENT || DEBUG)
+
+ /*
+ * rdar://23680808
+ * If an alternate dyld has been specified via boot args, check
+ * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
+ * executable and redirect the kernel to load that linker.
+ */
+
+ if (use_alt_dyld) {
+ int policy_error;
+ uint32_t policy_flags = 0;
+ int32_t policy_gencount = 0;
+
+ policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
+ if (policy_error == 0) {
+ if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
+ name = dyld_alt_path;
+ }
+ }
+ }
+#endif
- /*
- * First try to map dyld in directly. This should work most of
- * the time since there shouldn't normally be something already
- * mapped to its address.
- */
+#if !(DEVELOPMENT || DEBUG)
+ if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
+ return (LOAD_BADMACHO);
+ }
+#endif
- ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
- depth, &myresult);
+ /* Allocate wad-of-data from heap to reduce excessively deep stacks */
- /*
- * If it turned out something was in the way, then we'll take
- * take this longer path to map dyld into a temporary map and
- * copy it into destination map at a different address.
- */
+ MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK);
+ header = &dyld_data->__header;
+ myresult = &dyld_data->__myresult;
+ macho_data = &dyld_data->__macho_data;
- if (ret == LOAD_NOSPACE) {
+ ret = get_macho_vnode(name, archbits, header,
+ &file_offset, &macho_size, macho_data, &vp);
+ if (ret)
+ goto novp_out;
- /*
- * Load the Mach-O.
- * Use a temporary map to do the work.
- */
- copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
- is_64bit),
- get_map_min(map), get_map_max(map), TRUE);
- if (VM_MAP_NULL == copy_map) {
- ret = LOAD_RESOURCE;
- goto out;
- }
-
- myresult = load_result_null;
+ *myresult = load_result_null;
+ myresult->is_64bit_addr = result->is_64bit_addr;
+ myresult->is_64bit_data = result->is_64bit_data;
- ret = parse_machfile(vp, copy_map, thread, &header,
- file_offset, macho_size,
- depth, &myresult);
-
- if (ret) {
- vm_map_deallocate(copy_map);
- goto out;
- }
-
- if (get_map_nentries(copy_map) > 0) {
-
- dyl_start = mach_get_vm_start(copy_map);
- dyl_length = mach_get_vm_end(copy_map) - dyl_start;
-
- map_addr = dyl_start;
- ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);
-
- if (ret != KERN_SUCCESS) {
- vm_map_deallocate(copy_map);
- ret = LOAD_NOSPACE;
- goto out;
-
- }
+ ret = parse_machfile(vp, map, thread, header, file_offset,
+ macho_size, depth, slide, 0, myresult, result, imgp);
- ret = vm_map_copyin(copy_map,
- (vm_map_address_t)dyl_start,
- (vm_map_size_t)dyl_length,
- TRUE, &tmp);
- if (ret != KERN_SUCCESS) {
- (void) vm_map_remove(map,
- vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + dyl_length),
- VM_MAP_NO_FLAGS);
- vm_map_deallocate(copy_map);
- goto out;
- }
-
- ret = vm_map_copy_overwrite(map,
- (vm_map_address_t)map_addr,
- tmp, FALSE);
- if (ret != KERN_SUCCESS) {
- vm_map_copy_discard(tmp);
- (void) vm_map_remove(map,
- vm_map_trunc_page(map_addr),
- vm_map_round_page(map_addr + dyl_length),
- VM_MAP_NO_FLAGS);
- vm_map_deallocate(copy_map);
- goto out;
- }
-
- if (map_addr != dyl_start) {
- myresult.entry_point += (map_addr - dyl_start);
- myresult.all_image_info_addr +=
- (map_addr - dyl_start);
- }
- } else {
- ret = LOAD_FAILURE;
+ if (ret == LOAD_SUCCESS) {
+ if (result->threadstate) {
+ /* don't use the app's threadstate if we have a dyld */
+ kfree(result->threadstate, result->threadstate_sz);
}
+ result->threadstate = myresult->threadstate;
+ result->threadstate_sz = myresult->threadstate_sz;
- vm_map_deallocate(copy_map);
- }
-
- if (ret == LOAD_SUCCESS) {
result->dynlinker = TRUE;
- result->entry_point = myresult.entry_point;
- result->all_image_info_addr = myresult.all_image_info_addr;
- result->all_image_info_size = myresult.all_image_info_size;
+ result->entry_point = myresult->entry_point;
+ result->validentry = myresult->validentry;
+ result->all_image_info_addr = myresult->all_image_info_addr;
+ result->all_image_info_size = myresult->all_image_info_size;
+ if (myresult->platform_binary) {
+ result->csflags |= CS_DYLD_PLATFORM;
+ }
}
-out:
+
+ struct vnode_attr va;
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_fsid64);
+ VATTR_WANTED(&va, va_fsid);
+ VATTR_WANTED(&va, va_fileid);
+ int error = vnode_getattr(vp, &va, imgp->ip_vfs_context);
+ if (error == 0) {
+ imgp->ip_dyld_fsid = get_va_fsid(&va);
+ imgp->ip_dyld_fsobjid = va.va_fileid;
+ }
+
vnode_put(vp);
+novp_out:
+ FREE(dyld_data, M_TEMP);
return (ret);
}
-int
+static load_return_t
load_code_signature(
struct linkedit_data_command *lcp,
struct vnode *vp,
off_t macho_offset,
off_t macho_size,
cpu_type_t cputype,
- load_result_t *result)
+ load_result_t *result,
+ struct image_params *imgp)
{
int ret;
kern_return_t kr;
goto out;
}
- blob = ubc_cs_blob_get(vp, cputype, -1);
+ blob = ubc_cs_blob_get(vp, cputype, macho_offset);
+
if (blob != NULL) {
/* we already have a blob for this vnode and cputype */
- if (blob->csb_cpu_type == cputype &&
- blob->csb_base_offset == macho_offset &&
- blob->csb_mem_size == lcp->datasize) {
- /* it matches the blob we want here: we're done */
- ret = LOAD_SUCCESS;
- } else {
+ if (blob->csb_cpu_type != cputype ||
+ blob->csb_base_offset != macho_offset) {
/* the blob has changed for this vnode: fail ! */
ret = LOAD_BADMACHO;
+ goto out;
}
- goto out;
+
+ /* It matches the blob we want here, let's verify the version */
+ if (ubc_cs_generation_check(vp) == 0) {
+ /* No need to revalidate, we're good! */
+ ret = LOAD_SUCCESS;
+ goto out;
+ }
+
+ /* That blob may be stale, let's revalidate. */
+ error = ubc_cs_blob_revalidate(vp, blob, imgp, 0);
+ if (error == 0) {
+ /* Revalidation succeeded, we're good! */
+ ret = LOAD_SUCCESS;
+ goto out;
+ }
+
+ if (error != EAGAIN) {
+ printf("load_code_signature: revalidation failed: %d\n", error);
+ ret = LOAD_FAILURE;
+ goto out;
+ }
+
+ assert(error == EAGAIN);
+
+ /*
+ * Revalidation was not possible for this blob. We just continue as if there was no blob,
+ * rereading the signature, and ubc_cs_blob_add will do the right thing.
+ */
+ blob = NULL;
}
blob_size = lcp->datasize;
if (ubc_cs_blob_add(vp,
cputype,
macho_offset,
- addr,
- lcp->datasize)) {
+ &addr,
+ lcp->datasize,
+ imgp,
+ 0,
+ &blob)) {
+ if (addr) {
+ ubc_cs_blob_deallocate(addr, blob_size);
+ }
ret = LOAD_FAILURE;
goto out;
} else {
/* ubc_cs_blob_add() has consumed "addr" */
addr = 0;
}
-
- blob = ubc_cs_blob_get(vp, cputype, -1);
+#if CHECK_CS_VALIDATION_BITMAP
+ ubc_cs_validation_bitmap_allocate( vp );
+#endif
+
ret = LOAD_SUCCESS;
out:
- if (result && ret == LOAD_SUCCESS) {
+ if (ret == LOAD_SUCCESS) {
+ if (blob == NULL)
+ panic("success, but no blob!");
+
result->csflags |= blob->csb_flags;
+ result->platform_binary = blob->csb_platform_binary;
+ result->cs_end_offset = blob->csb_end_offset;
}
if (addr != 0) {
ubc_cs_blob_deallocate(addr, blob_size);
static load_return_t
set_code_unprotect(
- struct encryption_info_command *eip,
- caddr_t addr,
- vm_map_t map,
- struct vnode *vp)
+ struct encryption_info_command *eip,
+ caddr_t addr,
+ vm_map_t map,
+ int64_t slide,
+ struct vnode *vp,
+ off_t macho_offset,
+ cpu_type_t cputype,
+ cpu_subtype_t cpusubtype)
{
- int result, len;
- char vpath[MAXPATHLEN];
+ int error, len;
pager_crypt_info_t crypt_info;
const char * cryptname = 0;
+ char *vpath;
size_t offset;
struct segment_command_64 *seg64;
struct segment_command *seg32;
vm_map_offset_t map_offset, map_size;
+ vm_object_offset_t crypto_backing_offset;
kern_return_t kr;
- if (eip->cmdsize < sizeof(*eip))
- return LOAD_BADMACHO;
+ if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO;
switch(eip->cryptid) {
case 0:
return LOAD_BADMACHO;
}
+ if (map == VM_MAP_NULL) return (LOAD_SUCCESS);
+ if (NULL == text_crypter_create) return LOAD_FAILURE;
+
+ MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ if(vpath == NULL) return LOAD_FAILURE;
+
len = MAXPATHLEN;
- result = vn_getpath(vp, vpath, &len);
- if(result) return result;
+ error = vn_getpath(vp, vpath, &len);
+ if (error) {
+ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
+ return LOAD_FAILURE;
+ }
/* set up decrypter first */
- if(NULL==text_crypter_create) return LOAD_FAILURE;
- kr=text_crypter_create(&crypt_info, cryptname, (void*)vpath);
+ crypt_file_data_t crypt_data = {
+ .filename = vpath,
+ .cputype = cputype,
+ .cpusubtype = cpusubtype};
+ kr=text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
+#if VM_MAP_DEBUG_APPLE_PROTECT
+ if (vm_map_debug_apple_protect) {
+ struct proc *p;
+ p = current_proc();
+ printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
+ p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr);
+ }
+#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
+ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
if(kr) {
printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
cryptname, kr);
- return LOAD_RESOURCE;
+ if (kr == kIOReturnNotPrivileged) {
+ /* text encryption returned decryption failure */
+ return(LOAD_DECRYPTFAIL);
+ }else
+ return LOAD_RESOURCE;
}
/* this is terrible, but we have to rescan the load commands to find the
if ((seg64->fileoff <= eip->cryptoff) &&
(seg64->fileoff+seg64->filesize >=
eip->cryptoff+eip->cryptsize)) {
- map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff;
+ map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
map_size = eip->cryptsize;
+ crypto_backing_offset = macho_offset + eip->cryptoff;
goto remap_now;
}
case LC_SEGMENT:
if ((seg32->fileoff <= eip->cryptoff) &&
(seg32->fileoff+seg32->filesize >=
eip->cryptoff+eip->cryptsize)) {
- map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff;
+ map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
map_size = eip->cryptsize;
+ crypto_backing_offset = macho_offset + eip->cryptoff;
goto remap_now;
}
}
remap_now:
/* now remap using the decrypter */
- kr = vm_map_apple_protected(map, map_offset, map_offset+map_size, &crypt_info);
- if(kr) {
+ MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
+ (uint64_t) map_offset,
+ (uint64_t) (map_offset+map_size)));
+ kr = vm_map_apple_protected(map,
+ map_offset,
+ map_offset+map_size,
+ crypto_backing_offset,
+ &crypt_info);
+ if (kr) {
printf("set_code_unprotect(): mapping failed with %x\n", kr);
- crypt_info.crypt_end(crypt_info.crypt_ops);
return LOAD_PROTECT;
}
static
load_return_t
get_macho_vnode(
- char *path,
+ const char *path,
integer_t archbits,
struct mach_header *mach_header,
off_t *file_offset,
off_t *macho_size,
+ struct macho_data *data,
struct vnode **vpp
)
{
vfs_context_t ctx = vfs_context_current();
proc_t p = vfs_context_proc(ctx);
kauth_cred_t kerncred;
- struct nameidata nid, *ndp;
+ struct nameidata *ndp = &data->__nid;
boolean_t is_fat;
struct fat_arch fat_arch;
- int error = LOAD_SUCCESS;
+ int error;
int resid;
- union {
- struct mach_header mach_header;
- struct fat_header fat_header;
- char pad[512];
- } header;
+ union macho_vnode_header *header = &data->__header;
off_t fsize = (off_t)0;
- int err2;
-
+
/*
* Capture the kernel credential for use in the actual read of the
* file, since the user doing the execution may have execute rights
*/
kerncred = vfs_context_ucred(vfs_context_kernel());
- ndp = &nid;
-
/* init the namei data to point the file user's program name */
- NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
+ NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
if ((error = namei(ndp)) != 0) {
if (error == ENOENT) {
}
nameidone(ndp);
vp = ndp->ni_vp;
-
+
/* check for regular file */
if (vp->v_type != VREG) {
error = LOAD_PROTECT;
}
/* check access */
- if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx)) != 0) {
+ if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
error = LOAD_PROTECT;
goto bad1;
}
goto bad1;
}
- if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
+ if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof (*header), 0,
UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
error = LOAD_IOERROR;
goto bad2;
}
-
- if (header.mach_header.magic == MH_MAGIC ||
- header.mach_header.magic == MH_MAGIC_64)
- is_fat = FALSE;
- else if (header.fat_header.magic == FAT_MAGIC ||
- header.fat_header.magic == FAT_CIGAM)
+
+ if (resid) {
+ error = LOAD_BADMACHO;
+ goto bad2;
+ }
+
+ if (header->mach_header.magic == MH_MAGIC ||
+ header->mach_header.magic == MH_MAGIC_64) {
+ is_fat = FALSE;
+ } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
is_fat = TRUE;
- else {
- error = LOAD_BADMACHO;
- goto bad2;
+ } else {
+ error = LOAD_BADMACHO;
+ goto bad2;
}
if (is_fat) {
+
+ error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
+ sizeof(*header));
+ if (error != LOAD_SUCCESS) {
+ goto bad2;
+ }
+
/* Look up our architecture in the fat file. */
- error = fatfile_getarch_with_bits(vp, archbits, (vm_offset_t)(&header.fat_header), &fat_arch);
+ error = fatfile_getarch_with_bits(archbits,
+ (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch);
if (error != LOAD_SUCCESS)
goto bad2;
/* Read the Mach-O header out of it */
- error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header,
- sizeof(header.mach_header), fat_arch.offset,
- UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
+ error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
+ sizeof (header->mach_header), fat_arch.offset,
+ UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
if (error) {
error = LOAD_IOERROR;
goto bad2;
}
+ if (resid) {
+ error = LOAD_BADMACHO;
+ goto bad2;
+ }
+
/* Is this really a Mach-O? */
- if (header.mach_header.magic != MH_MAGIC &&
- header.mach_header.magic != MH_MAGIC_64) {
+ if (header->mach_header.magic != MH_MAGIC &&
+ header->mach_header.magic != MH_MAGIC_64) {
error = LOAD_BADMACHO;
goto bad2;
}
* required, since the dynamic linker might work, but we will
* refuse to load it because of this check.
*/
- if ((cpu_type_t)(header.mach_header.cputype & CPU_ARCH_MASK) != archbits)
- return(LOAD_BADARCH);
+ if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
+ error = LOAD_BADARCH;
+ goto bad2;
+ }
*file_offset = 0;
*macho_size = fsize;
}
- *mach_header = header.mach_header;
+ *mach_header = header->mach_header;
*vpp = vp;
ubc_setsize(vp, fsize);
-
return (error);
bad2:
- err2 = VNOP_CLOSE(vp, FREAD, ctx);
- vnode_put(vp);
- return (error);
-
+ (void) VNOP_CLOSE(vp, FREAD, ctx);
bad1:
vnode_put(vp);
return(error);