#include <machine/exec.h>
#include <machine/pal_routines.h>
+#include <kern/ast.h>
#include <kern/kern_types.h>
#include <kern/cpu_number.h>
#include <kern/mach_loader.h>
#include <vm/vm_kern.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
-#include <vm/vm_protos.h>
+#include <vm/vm_protos.h>
#include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
#include <os/overflow.h>
+#if __x86_64__
+extern int bootarg_no32exec; /* bsd_init.c */
+#endif
+
/*
* XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
* when KERNEL is defined.
.needs_dynlinker = 0,
.validentry = 0,
.using_lcmain = 0,
- .is64bit = 0,
+ .is_64bit_addr = 0,
+ .is_64bit_data = 0,
.csflags = 0,
.has_pagezero = 0,
.uuid = { 0 },
static load_return_t
get_macho_vnode(
- char *path,
+ const char *path,
integer_t archbits,
struct mach_header *mach_header,
off_t *file_offset,
} *sectionp;
unsigned int i;
+
if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
return;
for (i = 0; i < scp->nsects; ++i) {
}
}
+#if __arm64__
+/*
+ * Allow bypassing some security rules (hard pagezero, no write+execute)
+ * in exchange for better binary compatibility for legacy apps built
+ * before 16KB-alignment was enforced.
+ */
+const int fourk_binary_compatibility_unsafe = TRUE;
+const int fourk_binary_compatibility_allow_wx = FALSE;
+#endif /* __arm64__ */
load_return_t
load_machfile(
int in_exec = (imgp->ip_flags & IMGPF_EXEC);
task_t task = current_task();
proc_t p = current_proc();
- mach_vm_offset_t aslr_offset = 0;
- mach_vm_offset_t dyld_aslr_offset = 0;
+ int64_t aslr_page_offset = 0;
+ int64_t dyld_aslr_page_offset = 0;
+ int64_t aslr_section_size = 0;
+ int64_t aslr_section_offset = 0;
kern_return_t kret;
if (macho_size > file_size) {
return(LOAD_BADMACHO);
}
- result->is64bit = ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT);
+ result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
+ result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
task_t ledger_task;
if (imgp->ip_new_thread) {
}
pmap = pmap_create(get_task_ledger(ledger_task),
(vm_map_size_t) 0,
- result->is64bit);
+ result->is_64bit_addr);
map = vm_map_create(pmap,
0,
- vm_compute_max_offset(result->is64bit),
+ vm_compute_max_offset(result->is_64bit_addr),
TRUE);
-#if (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
+#if defined(__arm64__)
+ if (result->is_64bit_addr) {
+ /* enforce 16KB alignment of VM map entries */
+ vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
+ } else {
+ vm_map_set_page_shift(map, page_shift_user32);
+ }
+#elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
/* enforce 16KB alignment for watch targets with new ABI */
vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
#endif /* __arm64__ */
* flag (CS_ENFORCEMENT) is not set yet, but we can use the
* global flag.
*/
- if ( !cs_enforcement(NULL) && (header->flags & MH_ALLOW_STACK_EXECUTION) )
+ if ( !cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION) ) {
vm_map_disable_NX(map);
+ // TODO: Message Trace or log that this is happening
+ }
#endif
/* Forcibly disallow execution from data pages on even if the arch
* normally permits it. */
if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
vm_map_disallow_data_exec(map);
-
+
/*
* Compute a random offset for ASLR, and an independent random offset for dyld.
*/
if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
- uint64_t max_slide_pages;
+ vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
+ aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
- max_slide_pages = vm_map_get_max_aslr_slide_pages(map);
+ aslr_page_offset = random();
+ aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map);
+ aslr_page_offset <<= vm_map_page_shift(map);
- aslr_offset = random();
- aslr_offset %= max_slide_pages;
- aslr_offset <<= vm_map_page_shift(map);
+ dyld_aslr_page_offset = random();
+ dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map);
+ dyld_aslr_page_offset <<= vm_map_page_shift(map);
- dyld_aslr_offset = random();
- dyld_aslr_offset %= max_slide_pages;
- dyld_aslr_offset <<= vm_map_page_shift(map);
+ aslr_page_offset += aslr_section_offset;
}
-
+
if (!result)
result = &myresult;
/*
* re-set the bitness on the load result since we cleared the load result above.
*/
- result->is64bit = ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT);
+ result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
+ result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
- 0, (int64_t)aslr_offset, (int64_t)dyld_aslr_offset, result,
+ 0, aslr_page_offset, dyld_aslr_page_offset, result,
NULL, imgp);
if (lret != LOAD_SUCCESS) {
/*
* On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
*/
- if (!result->is64bit) {
+ if (!result->is_64bit_addr) {
enforce_hard_pagezero = FALSE;
}
-#endif
+
+ /*
+ * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
+ * to the start address for "anywhere" memory allocations.
+ */
+#define VM_MAP_HIGH_START_BITS_COUNT 8
+#define VM_MAP_HIGH_START_BITS_SHIFT 27
+ if (result->is_64bit_addr &&
+ (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
+ int random_bits;
+ vm_map_offset_t high_start;
+
+ random_bits = random();
+ random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT)-1;
+ high_start = (((vm_map_offset_t)random_bits)
+ << VM_MAP_HIGH_START_BITS_SHIFT);
+ vm_map_set_high_start(map, high_start);
+ }
+#endif /* __x86_64__ */
+
/*
* Check to see if the page zero is enforced by the map->min_offset.
*/
if (enforce_hard_pagezero &&
(vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
+#if __arm64__
+ if (!result->is_64bit_addr && /* not 64-bit address space */
+ !(header->flags & MH_PIE) && /* not PIE */
+ (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
+ PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
+ result->has_pagezero && /* has a "soft" page zero */
+ fourk_binary_compatibility_unsafe) {
+ /*
+ * For backwards compatibility of "4K" apps on
+ * a 16K system, do not enforce a hard page zero...
+ */
+ } else
+#endif /* __arm64__ */
{
vm_map_deallocate(map); /* will lose pmap reference too */
return (LOAD_BADMACHO);
return (LOAD_FAILURE);
}
proc_transcommit(p, 0);
- workqueue_mark_exiting(p);
+ workq_mark_exiting(p);
task_complete_halt(task);
- workqueue_exit(p);
- kqueue_dealloc(p->p_wqkqueue);
- p->p_wqkqueue = NULL;
+ workq_exit(p);
+
/*
* Roll up accounting info to new task. The roll up is done after
* task_complete_halt to make sure the thread accounting info is
task_rollup_accounting_info(get_threadtask(thread), task);
}
*mapp = map;
+
+#ifdef CONFIG_32BIT_TELEMETRY
+ if (!result->is_64bit_data) {
+ /*
+ * This may not need to be an AST; we merely need to ensure that
+ * we gather telemetry at the point where all of the information
+ * that we want has been added to the process.
+ */
+ task_set_32bit_log_flag(get_threadtask(thread));
+ act_set_astbsd(thread);
+ }
+#endif /* CONFIG_32BIT_TELEMETRY */
+
return(LOAD_SUCCESS);
}
integer_t dlarchbits = 0;
void * control;
load_return_t ret = LOAD_SUCCESS;
- caddr_t addr;
- void * kl_addr;
- vm_size_t size,kl_size;
+ void * addr;
+ vm_size_t alloc_size, cmds_size;
size_t offset;
size_t oldoffset; /* for overflow check */
int pass;
boolean_t dyld_no_load_addr = FALSE;
boolean_t is_dyld = FALSE;
vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+#if __arm64__
+ uint32_t pagezero_end = 0;
+ uint32_t executable_end = 0;
+ uint32_t writable_start = 0;
+ vm_map_size_t effective_page_size;
+
+ effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
+#endif /* __arm64__ */
if (header->magic == MH_MAGIC_64 ||
header->magic == MH_CIGAM_64) {
* Check to see if right machine type.
*/
if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
- !grade_binary(header->cputype,
+ !grade_binary(header->cputype,
header->cpusubtype & ~CPU_SUBTYPE_MASK))
return(LOAD_BADARCH);
-
+
+#if __x86_64__
+ if (bootarg_no32exec && (header->cputype == CPU_TYPE_X86)) {
+ return(LOAD_BADARCH_X86);
+ }
+#endif
+
abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
-
+
switch (header->filetype) {
-
+
case MH_EXECUTE:
if (depth != 1) {
return (LOAD_FAILURE);
}
+#if CONFIG_EMBEDDED
+ if (header->flags & MH_DYLDLINK) {
+ /* Check properties of dynamic executables */
+ if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
+ return (LOAD_FAILURE);
+ }
+ result->needs_dynlinker = TRUE;
+ } else {
+ /* Check properties of static executables (disallowed except for development) */
+#if !(DEVELOPMENT || DEBUG)
+ return (LOAD_FAILURE);
+#endif
+ }
+#endif /* CONFIG_EMBEDDED */
break;
case MH_DYLINKER:
*/
control = ubc_getobject(vp, UBC_FLAGS_NONE);
- /*
- * Map portion that must be accessible directly into
- * kernel's map.
- */
- if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
- return(LOAD_BADMACHO);
-
- /*
- * Round size of Mach-O commands up to page boundry.
- */
- size = round_page(mach_header_sz + header->sizeofcmds);
- if (size <= 0)
- return(LOAD_BADMACHO);
+ /* ensure header + sizeofcmds falls within the file */
+ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
+ (off_t)cmds_size > macho_size ||
+ round_page_overflow(cmds_size, &alloc_size)) {
+ return LOAD_BADMACHO;
+ }
/*
* Map the load commands into kernel memory.
*/
- addr = 0;
- kl_size = size;
- kl_addr = kalloc(size);
- addr = (caddr_t)kl_addr;
- if (addr == NULL)
- return(LOAD_NOSPACE);
+ addr = kalloc(alloc_size);
+ if (addr == NULL) {
+ return LOAD_NOSPACE;
+ }
- error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
+ error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset,
UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
if (error) {
- if (kl_addr)
- kfree(kl_addr, kl_size);
- return(LOAD_IOERROR);
+ kfree(addr, alloc_size);
+ return LOAD_IOERROR;
}
if (resid) {
/* We must be able to read in as much as the mach_header indicated */
- if (kl_addr)
- kfree(kl_addr, kl_size);
- return(LOAD_BADMACHO);
+ kfree(addr, alloc_size);
+ return LOAD_BADMACHO;
}
/*
*/
boolean_t slide_realign = FALSE;
+#if __arm64__
+ if (!abi64) {
+ slide_realign = TRUE;
+ }
+#endif
for (pass = 0; pass <= 3; pass++) {
* address, pass 0 can be skipped */
continue;
} else if (pass == 1) {
+#if __arm64__
+ boolean_t is_pie;
+ int64_t adjust;
+
+ is_pie = ((header->flags & MH_PIE) != 0);
+ if (pagezero_end != 0 &&
+ pagezero_end < effective_page_size) {
+ /* need at least 1 page for PAGEZERO */
+ adjust = effective_page_size;
+ MACHO_PRINTF(("pagezero boundary at "
+ "0x%llx; adjust slide from "
+ "0x%llx to 0x%llx%s\n",
+ (uint64_t) pagezero_end,
+ slide,
+ slide + adjust,
+ (is_pie
+ ? ""
+ : " BUT NO PIE ****** :-(")));
+ if (is_pie) {
+ slide += adjust;
+ pagezero_end += adjust;
+ executable_end += adjust;
+ writable_start += adjust;
+ }
+ }
+ if (pagezero_end != 0) {
+ result->has_pagezero = TRUE;
+ }
+ if (executable_end == writable_start &&
+ (executable_end & effective_page_mask) != 0 &&
+ (executable_end & FOURK_PAGE_MASK) == 0) {
+
+ /*
+ * The TEXT/DATA boundary is 4K-aligned but
+ * not page-aligned. Adjust the slide to make
+ * it page-aligned and avoid having a page
+ * with both write and execute permissions.
+ */
+ adjust =
+ (effective_page_size -
+ (executable_end & effective_page_mask));
+ MACHO_PRINTF(("page-unaligned X-W boundary at "
+ "0x%llx; adjust slide from "
+ "0x%llx to 0x%llx%s\n",
+ (uint64_t) executable_end,
+ slide,
+ slide + adjust,
+ (is_pie
+ ? ""
+ : " BUT NO PIE ****** :-(")));
+ if (is_pie)
+ slide += adjust;
+ }
+#endif /* __arm64__ */
if (dyld_no_load_addr && binresult) {
/*
ncmds = header->ncmds;
while (ncmds--) {
+
+ /* ensure enough space for a minimal load command */
+ if (offset + sizeof(struct load_command) > cmds_size) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+
/*
* Get a pointer to the command.
*/
lcp = (struct load_command *)(addr + offset);
oldoffset = offset;
- offset += lcp->cmdsize;
/*
* Perform prevalidation of the struct load_command
* straddle or exist past the reserved section at the
* start of the image.
*/
- if (oldoffset > offset ||
- lcp->cmdsize < sizeof(struct load_command) ||
- offset > header->sizeofcmds + mach_header_sz) {
+ if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
+ lcp->cmdsize < sizeof(struct load_command) ||
+ offset > cmds_size) {
ret = LOAD_BADMACHO;
break;
}
switch(lcp->cmd) {
case LC_SEGMENT: {
struct segment_command *scp = (struct segment_command *) lcp;
-
if (pass == 0) {
if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
dyld_no_load_addr = TRUE;
}
}
+#if __arm64__
+ assert(!abi64);
+
+ if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
+ /* PAGEZERO */
+ if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ }
+ if (scp->initprot & VM_PROT_EXECUTE) {
+ /* TEXT */
+ if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ }
+ if (scp->initprot & VM_PROT_WRITE) {
+ /* DATA */
+ if (os_add_overflow(scp->vmaddr, slide, &writable_start)) {
+ ret = LOAD_BADMACHO;
+ break;
+ }
+ }
+#endif /* __arm64__ */
break;
}
map,
slide,
result);
-
if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
/* Enforce a single segment mapping offset zero, with R+X
* protection. */
case LC_UUID:
if (pass == 1 && depth == 1) {
ret = load_uuid((struct uuid_command *) lcp,
- (char *)addr + mach_header_sz + header->sizeofcmds,
+ (char *)addr + cmds_size,
result);
}
break;
/*
* Allow injections to be ignored on devices w/o enforcement enabled
*/
- if (!cs_enforcement(NULL))
+ if (!cs_process_global_enforcement())
ret = LOAD_SUCCESS; /* ignore error */
} else {
if (cs_debug > 10)
printf("validating initial pages of %s\n", vp->v_name);
- while (off < size && ret == LOAD_SUCCESS) {
+ while (off < alloc_size && ret == LOAD_SUCCESS) {
tainted = CS_VALIDATE_TAINTED;
valid = cs_validate_range(vp,
if (cs_debug)
printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
- if (cs_enforcement(NULL) ||
+ if (cs_process_global_enforcement() ||
(result->csflags & (CS_HARD|CS_KILL|CS_ENFORCEMENT))) {
ret = LOAD_FAILURE;
}
}
break;
#endif
+#if __arm64__
+ case LC_VERSION_MIN_IPHONEOS: {
+ struct version_min_command *vmc;
+
+ if (pass != 1) {
+ break;
+ }
+ vmc = (struct version_min_command *) lcp;
+ if (vmc->sdk < (12 << 16)) {
+ /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
+ result->legacy_footprint = TRUE;
+ }
+// printf("FBDP %s:%d vp %p (%s) sdk %d.%d.%d -> legacy_footprint=%d\n", __FUNCTION__, __LINE__, vp, vp->v_name, (vmc->sdk >> 16), ((vmc->sdk & 0xFF00) >> 8), (vmc->sdk & 0xFF), result->legacy_footprint);
+ break;
+ }
+#endif /* __arm64__ */
default:
/* Other commands are ignored by the kernel */
ret = LOAD_SUCCESS;
break;
}
- if (ret == LOAD_SUCCESS) {
- if (! got_code_signatures) {
- if (cs_enforcement(NULL)) {
- ret = LOAD_FAILURE;
- } else {
- /*
- * No embedded signatures: look for detached by taskgated,
- * this is only done on OSX, on embedded platforms we expect everything
- * to be have embedded signatures.
- */
- struct cs_blob *blob;
-
- blob = ubc_cs_blob_get(vp, -1, file_offset);
- if (blob != NULL) {
- unsigned int cs_flag_data = blob->csb_flags;
- if(0 != ubc_cs_generation_check(vp)) {
- if (0 != ubc_cs_blob_revalidate(vp, blob, imgp, 0)) {
- /* clear out the flag data if revalidation fails */
- cs_flag_data = 0;
- result->csflags &= ~CS_VALID;
- }
- }
- /* get flags to be applied to the process */
- result->csflags |= cs_flag_data;
- }
- }
+ if (ret == LOAD_SUCCESS) {
+ if(!got_code_signatures && cs_process_global_enforcement()) {
+ ret = LOAD_FAILURE;
}
/* Make sure if we need dyld, we got it */
if (result->thread_count == 0) {
ret = LOAD_FAILURE;
}
+#if CONFIG_ENFORCE_SIGNED_CODE
+ if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) {
+ ret = LOAD_FAILURE;
+ }
+#endif
}
}
ret = LOAD_BADMACHO_UPX;
}
- if (kl_addr) {
- kfree(kl_addr, kl_size);
- }
+ kfree(addr, alloc_size);
- return(ret);
+ return ret;
}
#if CONFIG_CODE_DECRYPTION
vm_map_offset_t file_start,
vm_map_offset_t file_end,
vm_prot_t initprot,
- vm_prot_t maxprot)
+ vm_prot_t maxprot,
+ load_result_t *result)
{
- int extra_vm_flags, cur_extra_vm_flags;
vm_map_offset_t cur_offset, cur_start, cur_end;
kern_return_t ret;
vm_map_offset_t effective_page_mask;
+ vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
if (vm_end < vm_start ||
file_end < file_start) {
effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
- extra_vm_flags = 0;
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
if (vm_map_page_aligned(vm_start, effective_page_mask) &&
vm_map_page_aligned(vm_end, effective_page_mask) &&
vm_map_page_aligned(file_start, effective_page_mask) &&
vm_map_page_aligned(file_end, effective_page_mask)) {
/* all page-aligned and map-aligned: proceed */
} else {
+#if __arm64__
+ /* use an intermediate "4K" pager */
+ vmk_flags.vmkf_fourk = TRUE;
+#else /* __arm64__ */
panic("map_segment: unexpected mis-alignment "
"vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
(uint64_t) vm_start,
(uint64_t) vm_end,
(uint64_t) file_start,
(uint64_t) file_end);
+#endif /* __arm64__ */
}
cur_offset = 0;
cur_start = vm_start;
cur_end = vm_start;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
+ /* one 4K pager for the 1st page */
+ cur_end = vm_map_round_page(cur_start, effective_page_mask);
+ if (cur_end > vm_end) {
+ cur_end = vm_start + (file_end - file_start);
+ }
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
if (cur_end >= vm_start + (file_end - file_start)) {
/* all mapped: done */
goto done;
if ((vm_start & effective_page_mask) !=
(file_start & effective_page_mask)) {
/* one 4K pager for the middle */
- cur_extra_vm_flags = extra_vm_flags;
+ cur_vmk_flags = vmk_flags;
} else {
/* regular mapping for the middle */
- cur_extra_vm_flags = 0;
+ cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
}
+
+#if CONFIG_EMBEDDED
+ (void) result;
+#else /* CONFIG_EMBEDDED */
+ /*
+ * This process doesn't have its new csflags (from
+ * the image being loaded) yet, so tell VM to override the
+ * current process's CS_ENFORCEMENT for this mapping.
+ */
+ if (result->csflags & CS_ENFORCEMENT) {
+ cur_vmk_flags.vmkf_cs_enforcement = TRUE;
+ } else {
+ cur_vmk_flags.vmkf_cs_enforcement = FALSE;
+ }
+ cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
+#endif /* CONFIG_EMBEDDED */
+
cur_end = vm_map_trunc_page(vm_start + (file_end -
file_start),
effective_page_mask);
&cur_start,
cur_end - cur_start,
(mach_vm_offset_t)0,
- VM_FLAGS_FIXED | cur_extra_vm_flags,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
control,
file_start + cur_offset,
TRUE, /* copy */
&cur_start,
cur_end - cur_start,
(mach_vm_offset_t)0,
- VM_FLAGS_FIXED | cur_extra_vm_flags,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
IPC_PORT_NULL,
0, /* offset */
TRUE, /* copy */
goto done;
}
cur_start = cur_end;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* one 4K pager for the last page */
+ cur_end = vm_start + (file_end - file_start);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
done:
assert(cur_end >= vm_start + (file_end - file_start));
return LOAD_SUCCESS;
boolean_t verbose;
vm_map_size_t effective_page_size;
vm_map_offset_t effective_page_mask;
+#if __arm64__
+ vm_map_kernel_flags_t vmk_flags;
+ boolean_t fourk_align;
+#endif /* __arm64__ */
effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
if (LC_SEGMENT_64 == lcp->cmd) {
segment_command_size = sizeof(struct segment_command_64);
single_section_size = sizeof(struct section_64);
+#if __arm64__
+ /* 64-bit binary: should already be 16K-aligned */
+ fourk_align = FALSE;
+#endif /* __arm64__ */
} else {
segment_command_size = sizeof(struct segment_command);
single_section_size = sizeof(struct section);
+#if __arm64__
+ /* 32-bit binary: might need 4K-alignment */
+ if (effective_page_size != FOURK_PAGE_SIZE) {
+ /* not using 4K page size: need fourk_pager */
+ fourk_align = TRUE;
+ verbose = TRUE;
+ } else {
+ /* using 4K page size: no need for re-alignment */
+ fourk_align = FALSE;
+ }
+#endif /* __arm64__ */
}
if (lcp->cmdsize < segment_command_size)
return (LOAD_BADMACHO);
*/
file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
file_size = scp->filesize;
+#if __arm64__
+ if (fourk_align) {
+ if ((file_offset & FOURK_PAGE_MASK) != 0) {
+ /*
+ * we can't mmap() it if it's not at least 4KB-aligned
+ * in the file
+ */
+ return LOAD_BADMACHO;
+ }
+ } else
+#endif /* __arm64__ */
if ((file_offset & PAGE_MASK_64) != 0 ||
/* we can't mmap() it if it's not page-aligned in the file */
(file_offset & vm_map_page_mask(map)) != 0) {
"page_zero up to 0x%llx\n",
(uint64_t) vm_end));
}
+#if __arm64__
+ if (fourk_align) {
+ /* raise min_offset as much as page-alignment allows */
+ vm_end_aligned = vm_map_trunc_page(vm_end,
+ effective_page_mask);
+ } else
+#endif /* __arm64__ */
{
vm_end = vm_map_round_page(vm_end,
PAGE_MASK_64);
}
ret = vm_map_raise_min_offset(map,
vm_end_aligned);
+#if __arm64__
+ if (ret == 0 &&
+ vm_end > vm_end_aligned) {
+ /* use fourk_pager to map the rest of pagezero */
+ assert(fourk_align);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_fourk = TRUE;
+ ret = vm_map_enter_mem_object(
+ map,
+ &vm_end_aligned,
+ vm_end - vm_end_aligned,
+ (mach_vm_offset_t) 0, /* mask */
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0,
+ FALSE, /* copy */
+ (scp->initprot & VM_PROT_ALL),
+ (scp->maxprot & VM_PROT_ALL),
+ VM_INHERIT_DEFAULT);
+ }
+#endif /* __arm64__ */
if (ret != KERN_SUCCESS) {
return (LOAD_FAILURE);
}
return (LOAD_SUCCESS);
} else {
+#if CONFIG_EMBEDDED
+ /* not PAGEZERO: should not be mapped at address 0 */
+ if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
+ return LOAD_BADMACHO;
+ }
+#endif /* CONFIG_EMBEDDED */
}
+#if __arm64__
+ if (fourk_align) {
+ /* 4K-align */
+ file_start = vm_map_trunc_page(file_offset,
+ FOURK_PAGE_MASK);
+ file_end = vm_map_round_page(file_offset + file_size,
+ FOURK_PAGE_MASK);
+ vm_start = vm_map_trunc_page(vm_offset,
+ FOURK_PAGE_MASK);
+ vm_end = vm_map_round_page(vm_offset + vm_size,
+ FOURK_PAGE_MASK);
+ if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
+ page_aligned(file_start) &&
+ vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
+ page_aligned(vm_start) &&
+ vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
+ /* XXX last segment: ignore mis-aligned tail */
+ file_end = vm_map_round_page(file_end,
+ effective_page_mask);
+ vm_end = vm_map_round_page(vm_end,
+ effective_page_mask);
+ }
+ } else
+#endif /* __arm64__ */
{
file_start = vm_map_trunc_page(file_offset,
effective_page_mask);
file_start,
file_end,
initprot,
- maxprot);
+ maxprot,
+ result);
if (ret) {
return LOAD_NOSPACE;
}
if (delta_size > 0) {
mach_vm_offset_t tmp;
- ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE| VM_MAKE_TAG(VM_KERN_MEMORY_BSD));
+ ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD);
if (ret != KERN_SUCCESS) {
return(LOAD_RESOURCE);
}
0,
delta_size,
scp->initprot,
- scp->maxprot);
+ scp->maxprot,
+ result);
if (kr != KERN_SUCCESS) {
return(LOAD_NOSPACE);
}
}
/* use default location for stack */
- ret = thread_userstackdefault(&addr, result->is64bit);
+ ret = thread_userstackdefault(&addr, result->is_64bit_addr);
if (ret != KERN_SUCCESS)
return(LOAD_FAILURE);
load_return_t ret;
int customstack =0;
mach_vm_offset_t addr;
-
if (tcp->cmdsize < sizeof(*tcp))
return (LOAD_BADMACHO);
if (result->thread_count != 0) {
return (LOAD_SUCCESS);
ret = load_threadstack(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- &addr, &customstack, result);
+ (uint32_t *)(((vm_offset_t)tcp) +
+ sizeof(struct thread_command)),
+ tcp->cmdsize - sizeof(struct thread_command),
+ &addr, &customstack, result);
if (ret != LOAD_SUCCESS)
return(ret);
/* LC_UNIXTHREAD optionally specifies stack size and location */
-
+
if (!customstack) {
result->user_stack_alloc_size = MAXSSIZ;
}
result->user_stack -= slide;
ret = load_threadentry(thread,
- (uint32_t *)(((vm_offset_t)tcp) +
- sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- &addr);
+ (uint32_t *)(((vm_offset_t)tcp) +
+ sizeof(struct thread_command)),
+ tcp->cmdsize - sizeof(struct thread_command),
+ &addr);
if (ret != LOAD_SUCCESS)
return(ret);
result->entry_point += slide;
ret = load_threadstate(thread,
- (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
- tcp->cmdsize - sizeof(struct thread_command),
- result);
+ (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
+ tcp->cmdsize - sizeof(struct thread_command),
+ result);
if (ret != LOAD_SUCCESS)
return (ret);
* to the appropriate type in thread_userstack() based on
* the value of flavor.
*/
- ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is64bit);
+ ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
if (ret != KERN_SUCCESS) {
return(LOAD_FAILURE);
}
extern int use_alt_dyld;
#endif
+static uint64_t get_va_fsid(struct vnode_attr *vap)
+{
+ if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
+ return *(uint64_t *)&vap->va_fsid64;
+ } else {
+ return vap->va_fsid;
+ }
+}
+
static load_return_t
load_dylinker(
struct dylinker_command *lcp,
struct image_params *imgp
)
{
- char *name;
- char *p;
+ const char *name;
struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
struct mach_header *header;
off_t file_offset = 0; /* set by get_macho_vnode() */
struct macho_data __macho_data;
} *dyld_data;
- if (lcp->cmdsize < sizeof(*lcp))
- return (LOAD_BADMACHO);
+ if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize)
+ return LOAD_BADMACHO;
- name = (char *)lcp + lcp->name.offset;
+ name = (const char *)lcp + lcp->name.offset;
- /*
- * Check for a proper null terminated string.
- */
- p = name;
- do {
- if (p >= (char *)lcp + lcp->cmdsize)
- return(LOAD_BADMACHO);
- } while (*p++);
+ /* Check for a proper null terminated string. */
+ size_t maxsz = lcp->cmdsize - lcp->name.offset;
+ size_t namelen = strnlen(name, maxsz);
+ if (namelen >= maxsz) {
+ return LOAD_BADMACHO;
+ }
#if (DEVELOPMENT || DEBUG)
goto novp_out;
*myresult = load_result_null;
- myresult->is64bit = result->is64bit;
+ myresult->is_64bit_addr = result->is_64bit_addr;
+ myresult->is_64bit_data = result->is_64bit_data;
ret = parse_machfile(vp, map, thread, header, file_offset,
macho_size, depth, slide, 0, myresult, result, imgp);
}
}
+ struct vnode_attr va;
+ VATTR_INIT(&va);
+ VATTR_WANTED(&va, va_fsid64);
+ VATTR_WANTED(&va, va_fsid);
+ VATTR_WANTED(&va, va_fileid);
+ int error = vnode_getattr(vp, &va, imgp->ip_vfs_context);
+ if (error == 0) {
+ imgp->ip_dyld_fsid = get_va_fsid(&va);
+ imgp->ip_dyld_fsobjid = va.va_fileid;
+ }
+
vnode_put(vp);
novp_out:
FREE(dyld_data, M_TEMP);
}
blob = ubc_cs_blob_get(vp, cputype, macho_offset);
+
if (blob != NULL) {
/* we already have a blob for this vnode and cputype */
- if (blob->csb_cpu_type == cputype &&
- blob->csb_base_offset == macho_offset) {
- /* it matches the blob we want here, lets verify the version */
- if(0 != ubc_cs_generation_check(vp)) {
- if (0 != ubc_cs_blob_revalidate(vp, blob, imgp, 0)) {
- ret = LOAD_FAILURE; /* set error same as from ubc_cs_blob_add */
- goto out;
- }
- }
- ret = LOAD_SUCCESS;
- } else {
+ if (blob->csb_cpu_type != cputype ||
+ blob->csb_base_offset != macho_offset) {
/* the blob has changed for this vnode: fail ! */
ret = LOAD_BADMACHO;
+ goto out;
}
- goto out;
+
+ /* It matches the blob we want here, let's verify the version */
+ if (ubc_cs_generation_check(vp) == 0) {
+ /* No need to revalidate, we're good! */
+ ret = LOAD_SUCCESS;
+ goto out;
+ }
+
+ /* That blob may be stale, let's revalidate. */
+ error = ubc_cs_blob_revalidate(vp, blob, imgp, 0);
+ if (error == 0) {
+ /* Revalidation succeeded, we're good! */
+ ret = LOAD_SUCCESS;
+ goto out;
+ }
+
+ if (error != EAGAIN) {
+ printf("load_code_signature: revalidation failed: %d\n", error);
+ ret = LOAD_FAILURE;
+ goto out;
+ }
+
+ assert(error == EAGAIN);
+
+ /*
+ * Revalidation was not possible for this blob. We just continue as if there was no blob,
+ * rereading the signature, and ubc_cs_blob_add will do the right thing.
+ */
+ blob = NULL;
}
blob_size = lcp->datasize;
out:
if (ret == LOAD_SUCCESS) {
if (blob == NULL)
- panic("sucess, but no blob!");
+ panic("success, but no blob!");
result->csflags |= blob->csb_flags;
result->platform_binary = blob->csb_platform_binary;
static
load_return_t
get_macho_vnode(
- char *path,
+ const char *path,
integer_t archbits,
struct mach_header *mach_header,
off_t *file_offset,