+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ if (vm_map_page_aligned(vm_start, effective_page_mask) &&
+ vm_map_page_aligned(vm_end, effective_page_mask) &&
+ vm_map_page_aligned(file_start, effective_page_mask) &&
+ vm_map_page_aligned(file_end, effective_page_mask)) {
+ /* all page-aligned and map-aligned: proceed */
+ } else {
+#if __arm64__
+ /* use an intermediate "4K" pager */
+ vmk_flags.vmkf_fourk = TRUE;
+#else /* __arm64__ */
+ panic("map_segment: unexpected mis-alignment "
+ "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
+ (uint64_t) vm_start,
+ (uint64_t) vm_end,
+ (uint64_t) file_start,
+ (uint64_t) file_end);
+#endif /* __arm64__ */
+ }
+
+ cur_offset = 0;
+ cur_start = vm_start;
+ cur_end = vm_start;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
+ /* one 4K pager for the 1st page */
+ cur_end = vm_map_round_page(cur_start, effective_page_mask);
+ if (cur_end > vm_end) {
+ cur_end = vm_start + (file_end - file_start);
+ }
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
+ if (cur_end >= vm_start + (file_end - file_start)) {
+ /* all mapped: done */
+ goto done;
+ }
+ if (vm_map_round_page(cur_end, effective_page_mask) >=
+ vm_map_trunc_page(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* no middle */
+ } else {
+ cur_start = cur_end;
+ if ((vm_start & effective_page_mask) !=
+ (file_start & effective_page_mask)) {
+ /* one 4K pager for the middle */
+ cur_vmk_flags = vmk_flags;
+ } else {
+ /* regular mapping for the middle */
+ cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ }
+
+#if CONFIG_EMBEDDED
+ (void) result;
+#else /* CONFIG_EMBEDDED */
+ /*
+ * This process doesn't have its new csflags (from
+ * the image being loaded) yet, so tell VM to override the
+ * current process's CS_ENFORCEMENT for this mapping.
+ */
+ if (result->csflags & CS_ENFORCEMENT) {
+ cur_vmk_flags.vmkf_cs_enforcement = TRUE;
+ } else {
+ cur_vmk_flags.vmkf_cs_enforcement = FALSE;
+ }
+ cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
+#endif /* CONFIG_EMBEDDED */
+
+ cur_end = vm_map_trunc_page(vm_start + (file_end -
+ file_start),
+ effective_page_mask);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ cur_vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+ if (cur_end >= vm_start + (file_end - file_start)) {
+ /* all mapped: done */
+ goto done;
+ }
+ cur_start = cur_end;
+#if __arm64__
+ if (!vm_map_page_aligned(vm_start + (file_end - file_start),
+ effective_page_mask)) {
+ /* one 4K pager for the last page */
+ cur_end = vm_start + (file_end - file_start);
+ if (control != MEMORY_OBJECT_CONTROL_NULL) {
+ ret = vm_map_enter_mem_object_control(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ control,
+ file_start + cur_offset,
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ } else {
+ ret = vm_map_enter_mem_object(
+ map,
+ &cur_start,
+ cur_end - cur_start,
+ (mach_vm_offset_t)0,
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0, /* offset */
+ TRUE, /* copy */
+ initprot, maxprot,
+ VM_INHERIT_DEFAULT);
+ }
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_NOSPACE);
+ }
+ cur_offset += cur_end - cur_start;
+ }
+#endif /* __arm64__ */
+done:
+ assert(cur_end >= vm_start + (file_end - file_start));
+ return LOAD_SUCCESS;
+}
+
+static
+load_return_t
+load_segment(
+ struct load_command *lcp,
+ uint32_t filetype,
+ void * control,
+ off_t pager_offset,
+ off_t macho_size,
+ struct vnode *vp,
+ vm_map_t map,
+ int64_t slide,
+ load_result_t *result)
+{
+ struct segment_command_64 segment_command, *scp;
+ kern_return_t ret;
+ vm_map_size_t delta_size;
+ vm_prot_t initprot;
+ vm_prot_t maxprot;
+ size_t segment_command_size, total_section_size,
+ single_section_size;
+ vm_map_offset_t file_offset, file_size;
+ vm_map_offset_t vm_offset, vm_size;
+ vm_map_offset_t vm_start, vm_end, vm_end_aligned;
+ vm_map_offset_t file_start, file_end;
+ kern_return_t kr;
+ boolean_t verbose;
+ vm_map_size_t effective_page_size;
+ vm_map_offset_t effective_page_mask;
+#if __arm64__
+ vm_map_kernel_flags_t vmk_flags;
+ boolean_t fourk_align;
+#endif /* __arm64__ */
+
+ effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
+ effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
+
+ verbose = FALSE;
+ if (LC_SEGMENT_64 == lcp->cmd) {
+ segment_command_size = sizeof(struct segment_command_64);
+ single_section_size = sizeof(struct section_64);
+#if __arm64__
+ /* 64-bit binary: should already be 16K-aligned */
+ fourk_align = FALSE;
+#endif /* __arm64__ */
+ } else {
+ segment_command_size = sizeof(struct segment_command);
+ single_section_size = sizeof(struct section);
+#if __arm64__
+ /* 32-bit binary: might need 4K-alignment */
+ if (effective_page_size != FOURK_PAGE_SIZE) {
+ /* not using 4K page size: need fourk_pager */
+ fourk_align = TRUE;
+ verbose = TRUE;
+ } else {
+ /* using 4K page size: no need for re-alignment */
+ fourk_align = FALSE;
+ }
+#endif /* __arm64__ */
+ }
+ if (lcp->cmdsize < segment_command_size)
+ return (LOAD_BADMACHO);
+ total_section_size = lcp->cmdsize - segment_command_size;
+
+ if (LC_SEGMENT_64 == lcp->cmd) {
+ scp = (struct segment_command_64 *)lcp;
+ } else {
+ scp = &segment_command;
+ widen_segment_command((struct segment_command *)lcp, scp);
+ }
+
+ if (verbose) {
+ MACHO_PRINTF(("+++ load_segment %s "
+ "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
+ "prot %d/%d flags 0x%x\n",
+ scp->segname,
+ (uint64_t)(slide + scp->vmaddr),
+ (uint64_t)(slide + scp->vmaddr + scp->vmsize),
+ pager_offset + scp->fileoff,
+ pager_offset + scp->fileoff + scp->filesize,
+ scp->initprot,
+ scp->maxprot,
+ scp->flags));
+ }
+
+ /*
+ * Make sure what we get from the file is really ours (as specified
+ * by macho_size).
+ */
+ if (scp->fileoff + scp->filesize < scp->fileoff ||
+ scp->fileoff + scp->filesize > (uint64_t)macho_size) {
+ return (LOAD_BADMACHO);
+ }
+ /*
+ * Ensure that the number of sections specified would fit
+ * within the load command size.
+ */
+ if (total_section_size / single_section_size < scp->nsects) {
+ return (LOAD_BADMACHO);
+ }
+ /*
+ * Make sure the segment is page-aligned in the file.
+ */
+ file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
+ file_size = scp->filesize;
+#if __arm64__
+ if (fourk_align) {
+ if ((file_offset & FOURK_PAGE_MASK) != 0) {
+ /*
+ * we can't mmap() it if it's not at least 4KB-aligned
+ * in the file
+ */
+ return LOAD_BADMACHO;
+ }
+ } else
+#endif /* __arm64__ */
+ if ((file_offset & PAGE_MASK_64) != 0 ||
+ /* we can't mmap() it if it's not page-aligned in the file */
+ (file_offset & vm_map_page_mask(map)) != 0) {
+ /*
+ * The 1st test would have failed if the system's page size
+ * was what this process believe is the page size, so let's
+ * fail here too for the sake of consistency.
+ */
+ return (LOAD_BADMACHO);
+ }
+
+ /*
+ * If we have a code signature attached for this slice
+ * require that the segments are within the signed part
+ * of the file.
+ */
+ if (result->cs_end_offset &&
+ result->cs_end_offset < (off_t)scp->fileoff &&
+ result->cs_end_offset - scp->fileoff < scp->filesize)
+ {
+ if (cs_debug)
+ printf("section outside code signature\n");
+ return LOAD_BADMACHO;
+ }
+
+ vm_offset = scp->vmaddr + slide;
+ vm_size = scp->vmsize;
+
+ if (vm_size == 0)
+ return (LOAD_SUCCESS);
+ if (scp->vmaddr == 0 &&
+ file_size == 0 &&
+ vm_size != 0 &&
+ (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
+ (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
+ /*
+ * For PIE, extend page zero rather than moving it. Extending
+ * page zero keeps early allocations from falling predictably
+ * between the end of page zero and the beginning of the first
+ * slid segment.
+ */
+ /*
+ * This is a "page zero" segment: it starts at address 0,
+ * is not mapped from the binary file and is not accessible.
+ * User-space should never be able to access that memory, so
+ * make it completely off limits by raising the VM map's
+ * minimum offset.
+ */
+ vm_end = vm_offset + vm_size;
+ if (vm_end < vm_offset) {
+ return (LOAD_BADMACHO);
+ }
+ if (verbose) {
+ MACHO_PRINTF(("++++++ load_segment: "
+ "page_zero up to 0x%llx\n",
+ (uint64_t) vm_end));
+ }
+#if __arm64__
+ if (fourk_align) {
+ /* raise min_offset as much as page-alignment allows */
+ vm_end_aligned = vm_map_trunc_page(vm_end,
+ effective_page_mask);
+ } else
+#endif /* __arm64__ */
+ {
+ vm_end = vm_map_round_page(vm_end,
+ PAGE_MASK_64);
+ vm_end_aligned = vm_end;
+ }
+ ret = vm_map_raise_min_offset(map,
+ vm_end_aligned);
+#if __arm64__
+ if (ret == 0 &&
+ vm_end > vm_end_aligned) {
+ /* use fourk_pager to map the rest of pagezero */
+ assert(fourk_align);
+ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
+ vmk_flags.vmkf_fourk = TRUE;
+ ret = vm_map_enter_mem_object(
+ map,
+ &vm_end_aligned,
+ vm_end - vm_end_aligned,
+ (mach_vm_offset_t) 0, /* mask */
+ VM_FLAGS_FIXED,
+ vmk_flags,
+ VM_KERN_MEMORY_NONE,
+ IPC_PORT_NULL,
+ 0,
+ FALSE, /* copy */
+ (scp->initprot & VM_PROT_ALL),
+ (scp->maxprot & VM_PROT_ALL),
+ VM_INHERIT_DEFAULT);
+ }
+#endif /* __arm64__ */
+
+ if (ret != KERN_SUCCESS) {
+ return (LOAD_FAILURE);
+ }
+ return (LOAD_SUCCESS);
+ } else {
+#if CONFIG_EMBEDDED
+ /* not PAGEZERO: should not be mapped at address 0 */
+ if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
+ return LOAD_BADMACHO;
+ }
+#endif /* CONFIG_EMBEDDED */
+ }
+
+#if __arm64__
+ if (fourk_align) {
+ /* 4K-align */
+ file_start = vm_map_trunc_page(file_offset,
+ FOURK_PAGE_MASK);
+ file_end = vm_map_round_page(file_offset + file_size,
+ FOURK_PAGE_MASK);
+ vm_start = vm_map_trunc_page(vm_offset,
+ FOURK_PAGE_MASK);
+ vm_end = vm_map_round_page(vm_offset + vm_size,
+ FOURK_PAGE_MASK);
+ if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
+ page_aligned(file_start) &&
+ vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
+ page_aligned(vm_start) &&
+ vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
+ /* XXX last segment: ignore mis-aligned tail */
+ file_end = vm_map_round_page(file_end,
+ effective_page_mask);
+ vm_end = vm_map_round_page(vm_end,
+ effective_page_mask);
+ }
+ } else
+#endif /* __arm64__ */
+ {
+ file_start = vm_map_trunc_page(file_offset,
+ effective_page_mask);
+ file_end = vm_map_round_page(file_offset + file_size,
+ effective_page_mask);
+ vm_start = vm_map_trunc_page(vm_offset,
+ effective_page_mask);
+ vm_end = vm_map_round_page(vm_offset + vm_size,
+ effective_page_mask);
+ }
+
+ if (vm_start < result->min_vm_addr)
+ result->min_vm_addr = vm_start;
+ if (vm_end > result->max_vm_addr)
+ result->max_vm_addr = vm_end;
+
+ if (map == VM_MAP_NULL)
+ return (LOAD_SUCCESS);
+
+ if (vm_size > 0) {