]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/mach_loader.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
CommitLineData
1c79356b 1/*
6d2010ae 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
91447636 39
1c79356b 40#include <sys/param.h>
91447636 41#include <sys/vnode_internal.h>
1c79356b
A
42#include <sys/uio.h>
43#include <sys/namei.h>
91447636
A
44#include <sys/proc_internal.h>
45#include <sys/kauth.h>
1c79356b
A
46#include <sys/stat.h>
47#include <sys/malloc.h>
91447636 48#include <sys/mount_internal.h>
1c79356b 49#include <sys/fcntl.h>
91447636
A
50#include <sys/ubc_internal.h>
51#include <sys/imgact.h>
39236c6e 52#include <sys/codesign.h>
39037602
A
53#include <sys/proc_uuid_policy.h>
54#include <sys/reason.h>
55#include <sys/kdebug.h>
1c79356b 56
1c79356b 57#include <mach/mach_types.h>
0a7de745
A
58#include <mach/vm_map.h> /* vm_allocate() */
59#include <mach/mach_vm.h> /* mach_vm_allocate() */
91447636 60#include <mach/vm_statistics.h>
91447636
A
61#include <mach/task.h>
62#include <mach/thread_act.h>
63
64#include <machine/vmparam.h>
2d21ac55 65#include <machine/exec.h>
6d2010ae 66#include <machine/pal_routines.h>
1c79356b 67
527f9951 68#include <kern/ast.h>
91447636
A
69#include <kern/kern_types.h>
70#include <kern/cpu_number.h>
1c79356b 71#include <kern/mach_loader.h>
b0d623f7 72#include <kern/mach_fat.h>
91447636 73#include <kern/kalloc.h>
55e303ae 74#include <kern/task.h>
91447636 75#include <kern/thread.h>
593a1d5f 76#include <kern/page_decrypt.h>
1c79356b
A
77
78#include <mach-o/fat.h>
79#include <mach-o/loader.h>
80
91447636 81#include <vm/pmap.h>
1c79356b
A
82#include <vm/vm_map.h>
83#include <vm/vm_kern.h>
84#include <vm/vm_pager.h>
85#include <vm/vnode_pager.h>
a39ff7e2 86#include <vm/vm_protos.h>
0a7de745 87#include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
9bccf70c 88
490019cf
A
89#include <os/overflow.h>
90
91447636
A
91/*
92 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
93 * when KERNEL is defined.
94 */
cb323159
A
95extern pmap_t pmap_create_options(ledger_t ledger, vm_map_size_t size,
96 unsigned int flags);
91447636
A
97
98/* XXX should have prototypes in a shared header file */
0a7de745 99extern int get_map_nentries(vm_map_t);
91447636 100
0a7de745
A
101extern kern_return_t memory_object_signed(memory_object_control_t control,
102 boolean_t is_signed);
91447636
A
103
104/* An empty load_result_t */
39037602 105static const load_result_t load_result_null = {
2d21ac55
A
106 .mach_header = MACH_VM_MIN_ADDRESS,
107 .entry_point = MACH_VM_MIN_ADDRESS,
108 .user_stack = MACH_VM_MIN_ADDRESS,
316670eb 109 .user_stack_size = 0,
39037602
A
110 .user_stack_alloc = MACH_VM_MIN_ADDRESS,
111 .user_stack_alloc_size = 0,
b0d623f7
A
112 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
113 .all_image_info_size = 0,
2d21ac55
A
114 .thread_count = 0,
115 .unixproc = 0,
116 .dynlinker = 0,
316670eb 117 .needs_dynlinker = 0,
6d2010ae 118 .validentry = 0,
c18c124e 119 .using_lcmain = 0,
d9a64523
A
120 .is_64bit_addr = 0,
121 .is_64bit_data = 0,
0a7de745 122 .custom_stack = 0,
b0d623f7 123 .csflags = 0,
3e170ce0 124 .has_pagezero = 0,
6d2010ae
A
125 .uuid = { 0 },
126 .min_vm_addr = MACH_VM_MAX_ADDRESS,
04b8595b 127 .max_vm_addr = MACH_VM_MIN_ADDRESS,
490019cf
A
128 .cs_end_offset = 0,
129 .threadstate = NULL,
130 .threadstate_sz = 0
91447636 131};
9bccf70c 132
1c79356b
A
133/*
134 * Prototypes of static functions.
135 */
91447636 136static load_return_t
1c79356b 137parse_machfile(
0a7de745
A
138 struct vnode *vp,
139 vm_map_t map,
140 thread_t thread,
141 struct mach_header *header,
142 off_t file_offset,
143 off_t macho_size,
144 int depth,
145 int64_t slide,
146 int64_t dyld_slide,
147 load_result_t *result,
148 load_result_t *binresult,
149 struct image_params *imgp
150 );
91447636
A
151
152static load_return_t
1c79356b 153load_segment(
0a7de745
A
154 struct load_command *lcp,
155 uint32_t filetype,
156 void *control,
157 off_t pager_offset,
158 off_t macho_size,
159 struct vnode *vp,
160 vm_map_t map,
161 int64_t slide,
162 load_result_t *result
163 );
91447636 164
39236c6e
A
165static load_return_t
166load_uuid(
0a7de745
A
167 struct uuid_command *uulp,
168 char *command_end,
169 load_result_t *result
170 );
39236c6e 171
cb323159
A
172static load_return_t
173load_version(
174 struct version_min_command *vmc,
175 boolean_t *found_version_cmd,
176 load_result_t *result
177 );
178
6d2010ae
A
179static load_return_t
180load_code_signature(
0a7de745
A
181 struct linkedit_data_command *lcp,
182 struct vnode *vp,
183 off_t macho_offset,
184 off_t macho_size,
185 cpu_type_t cputype,
186 load_result_t *result,
187 struct image_params *imgp);
188
593a1d5f
A
189#if CONFIG_CODE_DECRYPTION
190static load_return_t
191set_code_unprotect(
0a7de745
A
192 struct encryption_info_command *lcp,
193 caddr_t addr,
194 vm_map_t map,
195 int64_t slide,
196 struct vnode *vp,
197 off_t macho_offset,
198 cpu_type_t cputype,
199 cpu_subtype_t cpusubtype);
593a1d5f
A
200#endif
201
316670eb
A
202static
203load_return_t
204load_main(
0a7de745
A
205 struct entry_point_command *epc,
206 thread_t thread,
207 int64_t slide,
208 load_result_t *result
209 );
316670eb 210
cb323159
A
211static
212load_return_t
213setup_driver_main(
214 thread_t thread,
215 int64_t slide,
216 load_result_t *result
217 );
218
91447636 219static load_return_t
1c79356b 220load_unixthread(
0a7de745
A
221 struct thread_command *tcp,
222 thread_t thread,
223 int64_t slide,
224 load_result_t *result
225 );
91447636
A
226
227static load_return_t
1c79356b 228load_threadstate(
0a7de745
A
229 thread_t thread,
230 uint32_t *ts,
231 uint32_t total_size,
490019cf 232 load_result_t *
0a7de745 233 );
91447636
A
234
235static load_return_t
1c79356b 236load_threadstack(
0a7de745
A
237 thread_t thread,
238 uint32_t *ts,
239 uint32_t total_size,
240 mach_vm_offset_t *user_stack,
241 int *customstack,
242 load_result_t *result
243 );
91447636
A
244
245static load_return_t
1c79356b 246load_threadentry(
0a7de745
A
247 thread_t thread,
248 uint32_t *ts,
249 uint32_t total_size,
250 mach_vm_offset_t *entry_point
251 );
91447636
A
252
253static load_return_t
1c79356b 254load_dylinker(
0a7de745
A
255 struct dylinker_command *lcp,
256 integer_t archbits,
257 vm_map_t map,
258 thread_t thread,
259 int depth,
260 int64_t slide,
261 load_result_t *result,
262 struct image_params *imgp
263 );
91447636 264
cb323159
A
265#if __x86_64__
266extern int bootarg_no32exec;
267static boolean_t
268check_if_simulator_binary(
269 struct image_params *imgp,
270 off_t file_offset,
271 off_t macho_size);
272#endif
273
6d2010ae
A
274struct macho_data;
275
91447636 276static load_return_t
1c79356b 277get_macho_vnode(
0a7de745
A
278 const char *path,
279 integer_t archbits,
280 struct mach_header *mach_header,
281 off_t *file_offset,
282 off_t *macho_size,
283 struct macho_data *macho_data,
284 struct vnode **vpp
285 );
1c79356b 286
b0d623f7
A
287static inline void
288widen_segment_command(const struct segment_command *scp32,
289 struct segment_command_64 *scp)
290{
291 scp->cmd = scp32->cmd;
292 scp->cmdsize = scp32->cmdsize;
293 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
294 scp->vmaddr = scp32->vmaddr;
295 scp->vmsize = scp32->vmsize;
296 scp->fileoff = scp32->fileoff;
297 scp->filesize = scp32->filesize;
298 scp->maxprot = scp32->maxprot;
299 scp->initprot = scp32->initprot;
300 scp->nsects = scp32->nsects;
301 scp->flags = scp32->flags;
302}
303
304static void
305note_all_image_info_section(const struct segment_command_64 *scp,
306 boolean_t is64, size_t section_size, const void *sections,
6d2010ae 307 int64_t slide, load_result_t *result)
b0d623f7
A
308{
309 const union {
310 struct section s32;
311 struct section_64 s64;
312 } *sectionp;
313 unsigned int i;
314
0a7de745
A
315
316 if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) {
b0d623f7 317 return;
0a7de745 318 }
b0d623f7
A
319 for (i = 0; i < scp->nsects; ++i) {
320 sectionp = (const void *)
321 ((const char *)sections + section_size * i);
322 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
323 sizeof(sectionp->s64.sectname))) {
324 result->all_image_info_addr =
325 is64 ? sectionp->s64.addr : sectionp->s32.addr;
6d2010ae 326 result->all_image_info_addr += slide;
b0d623f7
A
327 result->all_image_info_size =
328 is64 ? sectionp->s64.size : sectionp->s32.size;
329 return;
330 }
331 }
332}
333
5ba3f43e
A
334#if __arm64__
335/*
336 * Allow bypassing some security rules (hard pagezero, no write+execute)
337 * in exchange for better binary compatibility for legacy apps built
338 * before 16KB-alignment was enforced.
339 */
d9a64523
A
340const int fourk_binary_compatibility_unsafe = TRUE;
341const int fourk_binary_compatibility_allow_wx = FALSE;
5ba3f43e 342#endif /* __arm64__ */
3e170ce0 343
1c79356b
A
344load_return_t
345load_machfile(
0a7de745
A
346 struct image_params *imgp,
347 struct mach_header *header,
348 thread_t thread,
349 vm_map_t *mapp,
350 load_result_t *result
351 )
1c79356b 352{
0a7de745
A
353 struct vnode *vp = imgp->ip_vp;
354 off_t file_offset = imgp->ip_arch_offset;
355 off_t macho_size = imgp->ip_arch_size;
356 off_t file_size = imgp->ip_vattr->va_data_size;
357 pmap_t pmap = 0; /* protected by create_map */
358 vm_map_t map;
359 load_result_t myresult;
360 load_return_t lret;
fe8ab488 361 boolean_t enforce_hard_pagezero = TRUE;
743345f9 362 int in_exec = (imgp->ip_flags & IMGPF_EXEC);
b0d623f7 363 task_t task = current_task();
0a7de745
A
364 int64_t aslr_page_offset = 0;
365 int64_t dyld_aslr_page_offset = 0;
366 int64_t aslr_section_size = 0;
367 int64_t aslr_section_offset = 0;
368 kern_return_t kret;
cb323159 369 unsigned int pmap_flags = 0;
b0d623f7 370
316670eb 371 if (macho_size > file_size) {
0a7de745 372 return LOAD_BADMACHO;
316670eb
A
373 }
374
d9a64523
A
375 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
376 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
cb323159
A
377#if defined(HAS_APPLE_PAC)
378 pmap_flags |= (imgp->ip_flags & IMGPF_NOJOP) ? PMAP_CREATE_DISABLE_JOP : 0;
379#endif /* defined(HAS_APPLE_PAC) */
380 pmap_flags |= result->is_64bit_addr ? PMAP_CREATE_64BIT : 0;
39037602 381
743345f9
A
382 task_t ledger_task;
383 if (imgp->ip_new_thread) {
384 ledger_task = get_threadtask(imgp->ip_new_thread);
385 } else {
386 ledger_task = task;
0b4e3aa0 387 }
cb323159 388 pmap = pmap_create_options(get_task_ledger(ledger_task),
0a7de745 389 (vm_map_size_t) 0,
cb323159
A
390 pmap_flags);
391 if (pmap == NULL) {
392 return LOAD_RESOURCE;
393 }
743345f9 394 map = vm_map_create(pmap,
0a7de745
A
395 0,
396 vm_compute_max_offset(result->is_64bit_addr),
397 TRUE);
0c530ab8 398
5ba3f43e 399#if defined(__arm64__)
d9a64523 400 if (result->is_64bit_addr) {
5ba3f43e
A
401 /* enforce 16KB alignment of VM map entries */
402 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
403 } else {
404 vm_map_set_page_shift(map, page_shift_user32);
405 }
406#elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
3e170ce0
A
407 /* enforce 16KB alignment for watch targets with new ABI */
408 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
409#endif /* __arm64__ */
fe8ab488 410
0a7de745 411#ifndef CONFIG_ENFORCE_SIGNED_CODE
39236c6e
A
412 /* This turns off faulting for executable pages, which allows
413 * to circumvent Code Signing Enforcement. The per process
414 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
415 * global flag.
416 */
0a7de745
A
417 if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) {
418 vm_map_disable_NX(map);
419 // TODO: Message Trace or log that this is happening
d9a64523 420 }
7e4a7d39 421#endif
6d2010ae
A
422
423 /* Forcibly disallow execution from data pages on even if the arch
424 * normally permits it. */
0a7de745 425 if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) {
6d2010ae 426 vm_map_disallow_data_exec(map);
0a7de745 427 }
5ba3f43e 428
6d2010ae 429 /*
39236c6e 430 * Compute a random offset for ASLR, and an independent random offset for dyld.
6d2010ae
A
431 */
432 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
5ba3f43e
A
433 vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
434 aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
39236c6e 435
5ba3f43e
A
436 aslr_page_offset = random();
437 aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map);
438 aslr_page_offset <<= vm_map_page_shift(map);
39236c6e 439
5ba3f43e
A
440 dyld_aslr_page_offset = random();
441 dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map);
442 dyld_aslr_page_offset <<= vm_map_page_shift(map);
39236c6e 443
5ba3f43e 444 aslr_page_offset += aslr_section_offset;
6d2010ae 445 }
5ba3f43e 446
0a7de745 447 if (!result) {
1c79356b 448 result = &myresult;
0a7de745 449 }
1c79356b 450
91447636 451 *result = load_result_null;
1c79356b 452
39037602
A
453 /*
454 * re-set the bitness on the load result since we cleared the load result above.
455 */
d9a64523
A
456 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
457 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
39037602 458
2d21ac55 459 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
0a7de745
A
460 0, aslr_page_offset, dyld_aslr_page_offset, result,
461 NULL, imgp);
1c79356b
A
462
463 if (lret != LOAD_SUCCESS) {
0a7de745
A
464 vm_map_deallocate(map); /* will lose pmap reference too */
465 return lret;
1c79356b 466 }
55e303ae 467
fe8ab488
A
468#if __x86_64__
469 /*
470 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
471 */
d9a64523 472 if (!result->is_64bit_addr) {
fe8ab488
A
473 enforce_hard_pagezero = FALSE;
474 }
5ba3f43e
A
475
476 /*
477 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
478 * to the start address for "anywhere" memory allocations.
479 */
480#define VM_MAP_HIGH_START_BITS_COUNT 8
481#define VM_MAP_HIGH_START_BITS_SHIFT 27
d9a64523 482 if (result->is_64bit_addr &&
5ba3f43e
A
483 (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
484 int random_bits;
485 vm_map_offset_t high_start;
486
487 random_bits = random();
0a7de745 488 random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1;
5ba3f43e 489 high_start = (((vm_map_offset_t)random_bits)
0a7de745 490 << VM_MAP_HIGH_START_BITS_SHIFT);
5ba3f43e
A
491 vm_map_set_high_start(map, high_start);
492 }
493#endif /* __x86_64__ */
494
0c530ab8 495 /*
fe8ab488 496 * Check to see if the page zero is enforced by the map->min_offset.
0a7de745 497 */
3e170ce0
A
498 if (enforce_hard_pagezero &&
499 (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
5ba3f43e 500#if __arm64__
d9a64523 501 if (!result->is_64bit_addr && /* not 64-bit address space */
0a7de745 502 !(header->flags & MH_PIE) && /* not PIE */
5ba3f43e 503 (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
0a7de745
A
504 PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
505 result->has_pagezero && /* has a "soft" page zero */
5ba3f43e
A
506 fourk_binary_compatibility_unsafe) {
507 /*
508 * For backwards compatibility of "4K" apps on
509 * a 16K system, do not enforce a hard page zero...
510 */
511 } else
512#endif /* __arm64__ */
3e170ce0 513 {
0a7de745
A
514 vm_map_deallocate(map); /* will lose pmap reference too */
515 return LOAD_BADMACHO;
fe8ab488 516 }
316670eb 517 }
fe8ab488 518
39037602
A
519 vm_commit_pagezero_status(map);
520
743345f9
A
521 /*
522 * If this is an exec, then we are going to destroy the old
523 * task, and it's correct to halt it; if it's spawn, the
524 * task is not yet running, and it makes no sense.
525 */
526 if (in_exec) {
cb323159 527 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
b0d623f7 528 /*
743345f9
A
529 * Mark the task as halting and start the other
530 * threads towards terminating themselves. Then
531 * make sure any threads waiting for a process
532 * transition get informed that we are committed to
533 * this transition, and then finally complete the
534 * task halting (wait for threads and then cleanup
535 * task resources).
536 *
537 * NOTE: task_start_halt() makes sure that no new
538 * threads are created in the task during the transition.
539 * We need to mark the workqueue as exiting before we
540 * wait for threads to terminate (at the end of which
541 * we no longer have a prohibition on thread creation).
542 *
543 * Finally, clean up any lingering workqueue data structures
544 * that may have been left behind by the workqueue threads
545 * as they exited (and then clean up the work queue itself).
b0d623f7 546 */
743345f9
A
547 kret = task_start_halt(task);
548 if (kret != KERN_SUCCESS) {
0a7de745
A
549 vm_map_deallocate(map); /* will lose pmap reference too */
550 return LOAD_FAILURE;
316670eb 551 }
743345f9 552 proc_transcommit(p, 0);
d9a64523 553 workq_mark_exiting(p);
743345f9 554 task_complete_halt(task);
d9a64523 555 workq_exit(p);
5ba3f43e 556
743345f9
A
557 /*
558 * Roll up accounting info to new task. The roll up is done after
559 * task_complete_halt to make sure the thread accounting info is
560 * rolled up to current_task.
561 */
562 task_rollup_accounting_info(get_threadtask(thread), task);
0b4e3aa0 563 }
743345f9 564 *mapp = map;
527f9951
A
565
566#ifdef CONFIG_32BIT_TELEMETRY
d9a64523 567 if (!result->is_64bit_data) {
527f9951
A
568 /*
569 * This may not need to be an AST; we merely need to ensure that
570 * we gather telemetry at the point where all of the information
571 * that we want has been added to the process.
572 */
573 task_set_32bit_log_flag(get_threadtask(thread));
574 act_set_astbsd(thread);
575 }
576#endif /* CONFIG_32BIT_TELEMETRY */
577
0a7de745 578 return LOAD_SUCCESS;
1c79356b
A
579}
580
3e170ce0 581int macho_printf = 0;
0a7de745
A
582#define MACHO_PRINTF(args) \
583 do { \
584 if (macho_printf) { \
585 printf args; \
586 } \
3e170ce0
A
587 } while (0)
588
91447636
A
589/*
590 * The file size of a mach-o file is limited to 32 bits; this is because
591 * this is the limit on the kalloc() of enough bytes for a mach_header and
592 * the contents of its sizeofcmds, which is currently constrained to 32
593 * bits in the file format itself. We read into the kernel buffer the
594 * commands section, and then parse it in order to parse the mach-o file
595 * format load_command segment(s). We are only interested in a subset of
6d2010ae
A
596 * the total set of possible commands. If "map"==VM_MAP_NULL or
597 * "thread"==THREAD_NULL, do not make permament VM modifications,
598 * just preflight the parse.
91447636 599 */
1c79356b
A
600static
601load_return_t
602parse_machfile(
0a7de745
A
603 struct vnode *vp,
604 vm_map_t map,
605 thread_t thread,
606 struct mach_header *header,
607 off_t file_offset,
608 off_t macho_size,
609 int depth,
610 int64_t aslr_offset,
611 int64_t dyld_aslr_offset,
612 load_result_t *result,
613 load_result_t *binresult,
614 struct image_params *imgp
615 )
1c79356b 616{
0a7de745
A
617 uint32_t ncmds;
618 struct load_command *lcp;
619 struct dylinker_command *dlp = 0;
620 integer_t dlarchbits = 0;
621 void * control;
622 load_return_t ret = LOAD_SUCCESS;
623 void * addr;
624 vm_size_t alloc_size, cmds_size;
625 size_t offset;
626 size_t oldoffset; /* for overflow check */
627 int pass;
cb323159 628 proc_t p = vfs_context_proc(imgp->ip_vfs_context);
0a7de745
A
629 int error;
630 int resid = 0;
cb323159
A
631 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
632 int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC);
0a7de745
A
633 size_t mach_header_sz = sizeof(struct mach_header);
634 boolean_t abi64;
635 boolean_t got_code_signatures = FALSE;
636 boolean_t found_header_segment = FALSE;
637 boolean_t found_xhdr = FALSE;
cb323159 638 boolean_t found_version_cmd = FALSE;
0a7de745
A
639 int64_t slide = 0;
640 boolean_t dyld_no_load_addr = FALSE;
641 boolean_t is_dyld = FALSE;
642 vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
5ba3f43e 643#if __arm64__
0a7de745
A
644 uint32_t pagezero_end = 0;
645 uint32_t executable_end = 0;
646 uint32_t writable_start = 0;
647 vm_map_size_t effective_page_size;
5ba3f43e
A
648
649 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
650#endif /* __arm64__ */
91447636
A
651
652 if (header->magic == MH_MAGIC_64 ||
653 header->magic == MH_CIGAM_64) {
0a7de745 654 mach_header_sz = sizeof(struct mach_header_64);
91447636 655 }
1c79356b
A
656
657 /*
658 * Break infinite recursion
659 */
c18c124e 660 if (depth > 1) {
0a7de745 661 return LOAD_FAILURE;
2d21ac55 662 }
0b4e3aa0 663
1c79356b
A
664 depth++;
665
666 /*
667 * Check to see if right machine type.
668 */
39236c6e 669 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
a39ff7e2 670 !grade_binary(header->cputype,
cb323159 671 header->cpusubtype & ~CPU_SUBTYPE_MASK, TRUE)) {
0a7de745
A
672 return LOAD_BADARCH;
673 }
a39ff7e2 674
91447636 675 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
a39ff7e2 676
1c79356b 677 switch (header->filetype) {
1c79356b 678 case MH_EXECUTE:
2d21ac55 679 if (depth != 1) {
0a7de745 680 return LOAD_FAILURE;
2d21ac55 681 }
5ba3f43e
A
682#if CONFIG_EMBEDDED
683 if (header->flags & MH_DYLDLINK) {
684 /* Check properties of dynamic executables */
685 if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
0a7de745 686 return LOAD_FAILURE;
5ba3f43e
A
687 }
688 result->needs_dynlinker = TRUE;
689 } else {
690 /* Check properties of static executables (disallowed except for development) */
691#if !(DEVELOPMENT || DEBUG)
0a7de745 692 return LOAD_FAILURE;
5ba3f43e
A
693#endif
694 }
695#endif /* CONFIG_EMBEDDED */
1c79356b 696
c18c124e 697 break;
1c79356b 698 case MH_DYLINKER:
2d21ac55 699 if (depth != 2) {
0a7de745 700 return LOAD_FAILURE;
2d21ac55 701 }
39037602 702 is_dyld = TRUE;
1c79356b 703 break;
0a7de745 704
1c79356b 705 default:
0a7de745 706 return LOAD_FAILURE;
1c79356b
A
707 }
708
709 /*
710 * Get the pager for the file.
711 */
b0d623f7 712 control = ubc_getobject(vp, UBC_FLAGS_NONE);
1c79356b 713
5ba3f43e
A
714 /* ensure header + sizeofcmds falls within the file */
715 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
0a7de745
A
716 (off_t)cmds_size > macho_size ||
717 round_page_overflow(cmds_size, &alloc_size)) {
5ba3f43e
A
718 return LOAD_BADMACHO;
719 }
1c79356b
A
720
721 /*
722 * Map the load commands into kernel memory.
723 */
5ba3f43e
A
724 addr = kalloc(alloc_size);
725 if (addr == NULL) {
726 return LOAD_NOSPACE;
727 }
0b4e3aa0 728
5ba3f43e 729 error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset,
cb323159 730 UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p);
91447636 731 if (error) {
5ba3f43e
A
732 kfree(addr, alloc_size);
733 return LOAD_IOERROR;
1c79356b 734 }
6d2010ae 735
c18c124e
A
736 if (resid) {
737 /* We must be able to read in as much as the mach_header indicated */
5ba3f43e
A
738 kfree(addr, alloc_size);
739 return LOAD_BADMACHO;
c18c124e
A
740 }
741
6d2010ae
A
742 /*
743 * For PIE and dyld, slide everything by the ASLR offset.
744 */
39037602 745 if ((header->flags & MH_PIE) || is_dyld) {
6d2010ae
A
746 slide = aslr_offset;
747 }
748
39037602 749 /*
39236c6e
A
750 * Scan through the commands, processing each one as necessary.
751 * We parse in three passes through the headers:
3e170ce0 752 * 0: determine if TEXT and DATA boundary can be page-aligned
39236c6e
A
753 * 1: thread state, uuid, code signature
754 * 2: segments
755 * 3: dyld, encryption, check entry point
1c79356b 756 */
39037602
A
757
758 boolean_t slide_realign = FALSE;
5ba3f43e
A
759#if __arm64__
760 if (!abi64) {
761 slide_realign = TRUE;
762 }
763#endif
39037602 764
3e170ce0 765 for (pass = 0; pass <= 3; pass++) {
39037602
A
766 if (pass == 0 && !slide_realign && !is_dyld) {
767 /* if we dont need to realign the slide or determine dyld's load
768 * address, pass 0 can be skipped */
3e170ce0
A
769 continue;
770 } else if (pass == 1) {
5ba3f43e 771#if __arm64__
0a7de745
A
772 boolean_t is_pie;
773 int64_t adjust;
5ba3f43e
A
774
775 is_pie = ((header->flags & MH_PIE) != 0);
776 if (pagezero_end != 0 &&
777 pagezero_end < effective_page_size) {
778 /* need at least 1 page for PAGEZERO */
779 adjust = effective_page_size;
780 MACHO_PRINTF(("pagezero boundary at "
0a7de745
A
781 "0x%llx; adjust slide from "
782 "0x%llx to 0x%llx%s\n",
783 (uint64_t) pagezero_end,
784 slide,
785 slide + adjust,
786 (is_pie
787 ? ""
788 : " BUT NO PIE ****** :-(")));
5ba3f43e
A
789 if (is_pie) {
790 slide += adjust;
791 pagezero_end += adjust;
792 executable_end += adjust;
793 writable_start += adjust;
794 }
795 }
796 if (pagezero_end != 0) {
797 result->has_pagezero = TRUE;
798 }
0a7de745 799 if (executable_end == writable_start &&
5ba3f43e
A
800 (executable_end & effective_page_mask) != 0 &&
801 (executable_end & FOURK_PAGE_MASK) == 0) {
5ba3f43e
A
802 /*
803 * The TEXT/DATA boundary is 4K-aligned but
804 * not page-aligned. Adjust the slide to make
805 * it page-aligned and avoid having a page
806 * with both write and execute permissions.
807 */
808 adjust =
0a7de745
A
809 (effective_page_size -
810 (executable_end & effective_page_mask));
5ba3f43e 811 MACHO_PRINTF(("page-unaligned X-W boundary at "
0a7de745
A
812 "0x%llx; adjust slide from "
813 "0x%llx to 0x%llx%s\n",
814 (uint64_t) executable_end,
815 slide,
816 slide + adjust,
817 (is_pie
818 ? ""
819 : " BUT NO PIE ****** :-(")));
820 if (is_pie) {
5ba3f43e 821 slide += adjust;
0a7de745 822 }
5ba3f43e
A
823 }
824#endif /* __arm64__ */
39037602
A
825
826 if (dyld_no_load_addr && binresult) {
827 /*
828 * The dyld Mach-O does not specify a load address. Try to locate
829 * it right after the main binary. If binresult == NULL, load
830 * directly to the given slide.
831 */
832 slide = vm_map_round_page(slide + binresult->max_vm_addr, effective_page_mask);
833 }
3e170ce0 834 }
6d2010ae 835
6d2010ae
A
836 /*
837 * Check that the entry point is contained in an executable segments
0a7de745 838 */
cb323159
A
839 if (pass == 3) {
840 if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) {
841 /* Driver binaries must have driverkit platform */
842 if (result->ip_platform == PLATFORM_DRIVERKIT) {
843 /* Driver binaries have no entry point */
844 ret = setup_driver_main(thread, slide, result);
845 } else {
846 ret = LOAD_FAILURE;
847 }
848 } else if (!result->using_lcmain && result->validentry == 0) {
849 ret = LOAD_FAILURE;
850 }
851 if (ret != KERN_SUCCESS) {
852 thread_state_initialize(thread);
853 break;
854 }
6d2010ae 855 }
6d2010ae 856
39037602
A
857 /*
858 * Check that some segment maps the start of the mach-o file, which is
859 * needed by the dynamic loader to read the mach headers, etc.
860 */
861 if ((pass == 3) && (found_header_segment == FALSE)) {
862 ret = LOAD_BADMACHO;
863 break;
864 }
865
a3d08fcd
A
866 /*
867 * Loop through each of the load_commands indicated by the
868 * Mach-O header; if an absurd value is provided, we just
869 * run off the end of the reserved section by incrementing
870 * the offset too far, so we are implicitly fail-safe.
871 */
91447636 872 offset = mach_header_sz;
1c79356b 873 ncmds = header->ncmds;
6d2010ae 874
1c79356b 875 while (ncmds--) {
5ba3f43e
A
876 /* ensure enough space for a minimal load command */
877 if (offset + sizeof(struct load_command) > cmds_size) {
878 ret = LOAD_BADMACHO;
879 break;
880 }
881
1c79356b
A
882 /*
883 * Get a pointer to the command.
884 */
885 lcp = (struct load_command *)(addr + offset);
a3d08fcd 886 oldoffset = offset;
1c79356b
A
887
888 /*
a3d08fcd
A
889 * Perform prevalidation of the struct load_command
890 * before we attempt to use its contents. Invalid
891 * values are ones which result in an overflow, or
892 * which can not possibly be valid commands, or which
893 * straddle or exist past the reserved section at the
894 * start of the image.
1c79356b 895 */
5ba3f43e 896 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
0a7de745
A
897 lcp->cmdsize < sizeof(struct load_command) ||
898 offset > cmds_size) {
91447636 899 ret = LOAD_BADMACHO;
a3d08fcd 900 break;
1c79356b
A
901 }
902
903 /*
a3d08fcd
A
904 * Act on struct load_command's for which kernel
905 * intervention is required.
cb323159
A
906 * Note that each load command implementation is expected to validate
907 * that lcp->cmdsize is large enough to fit its specific struct type
908 * before dereferencing fields not covered by struct load_command.
1c79356b 909 */
0a7de745 910 switch (lcp->cmd) {
39037602
A
911 case LC_SEGMENT: {
912 struct segment_command *scp = (struct segment_command *) lcp;
cb323159
A
913 if (scp->cmdsize < sizeof(*scp)) {
914 ret = LOAD_BADMACHO;
915 break;
916 }
3e170ce0 917 if (pass == 0) {
39037602
A
918 if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
919 dyld_no_load_addr = TRUE;
920 if (!slide_realign) {
921 /* got what we need, bail early on pass 0 */
922 continue;
923 }
924 }
925
5ba3f43e
A
926#if __arm64__
927 assert(!abi64);
928
929 if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
930 /* PAGEZERO */
931 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) {
932 ret = LOAD_BADMACHO;
933 break;
934 }
935 }
936 if (scp->initprot & VM_PROT_EXECUTE) {
937 /* TEXT */
938 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) {
939 ret = LOAD_BADMACHO;
940 break;
941 }
942 }
943 if (scp->initprot & VM_PROT_WRITE) {
944 /* DATA */
945 if (os_add_overflow(scp->vmaddr, slide, &writable_start)) {
946 ret = LOAD_BADMACHO;
947 break;
948 }
949 }
950#endif /* __arm64__ */
3e170ce0
A
951 break;
952 }
953
39037602
A
954 if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
955 found_xhdr = TRUE;
956 }
957
0a7de745 958 if (pass != 2) {
39236c6e 959 break;
0a7de745 960 }
39236c6e
A
961
962 if (abi64) {
963 /*
964 * Having an LC_SEGMENT command for the
965 * wrong ABI is invalid <rdar://problem/11021230>
966 */
967 ret = LOAD_BADMACHO;
968 break;
969 }
970
971 ret = load_segment(lcp,
0a7de745
A
972 header->filetype,
973 control,
974 file_offset,
975 macho_size,
976 vp,
977 map,
978 slide,
979 result);
39037602
A
980 if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
981 /* Enforce a single segment mapping offset zero, with R+X
982 * protection. */
983 if (found_header_segment ||
0a7de745 984 ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
39037602
A
985 ret = LOAD_BADMACHO;
986 break;
987 }
988 found_header_segment = TRUE;
989 }
990
39236c6e 991 break;
39037602
A
992 }
993 case LC_SEGMENT_64: {
994 struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
cb323159
A
995 if (scp64->cmdsize < sizeof(*scp64)) {
996 ret = LOAD_BADMACHO;
997 break;
998 }
39037602
A
999 if (pass == 0) {
1000 if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
1001 dyld_no_load_addr = TRUE;
1002 if (!slide_realign) {
1003 /* got what we need, bail early on pass 0 */
1004 continue;
1005 }
1006 }
1007 }
1008
1009 if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
1010 found_xhdr = TRUE;
1011 }
1012
0a7de745 1013 if (pass != 2) {
1c79356b 1014 break;
0a7de745 1015 }
39236c6e
A
1016
1017 if (!abi64) {
1018 /*
1019 * Having an LC_SEGMENT_64 command for the
1020 * wrong ABI is invalid <rdar://problem/11021230>
1021 */
1022 ret = LOAD_BADMACHO;
1023 break;
1024 }
1025
b0d623f7 1026 ret = load_segment(lcp,
0a7de745
A
1027 header->filetype,
1028 control,
1029 file_offset,
1030 macho_size,
1031 vp,
1032 map,
1033 slide,
1034 result);
39037602
A
1035
1036 if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
1037 /* Enforce a single segment mapping offset zero, with R+X
1038 * protection. */
1039 if (found_header_segment ||
0a7de745 1040 ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) {
39037602
A
1041 ret = LOAD_BADMACHO;
1042 break;
1043 }
1044 found_header_segment = TRUE;
1045 }
1046
1c79356b 1047 break;
39037602 1048 }
1c79356b 1049 case LC_UNIXTHREAD:
0a7de745 1050 if (pass != 1) {
1c79356b 1051 break;
0a7de745 1052 }
1c79356b 1053 ret = load_unixthread(
0a7de745
A
1054 (struct thread_command *) lcp,
1055 thread,
1056 slide,
1057 result);
1c79356b 1058 break;
316670eb 1059 case LC_MAIN:
0a7de745 1060 if (pass != 1) {
316670eb 1061 break;
0a7de745
A
1062 }
1063 if (depth != 1) {
316670eb 1064 break;
0a7de745 1065 }
316670eb 1066 ret = load_main(
0a7de745
A
1067 (struct entry_point_command *) lcp,
1068 thread,
1069 slide,
1070 result);
316670eb 1071 break;
1c79356b 1072 case LC_LOAD_DYLINKER:
0a7de745 1073 if (pass != 3) {
1c79356b 1074 break;
0a7de745 1075 }
91447636 1076 if ((depth == 1) && (dlp == 0)) {
1c79356b 1077 dlp = (struct dylinker_command *)lcp;
91447636
A
1078 dlarchbits = (header->cputype & CPU_ARCH_MASK);
1079 } else {
1c79356b 1080 ret = LOAD_FAILURE;
91447636 1081 }
1c79356b 1082 break;
b0d623f7 1083 case LC_UUID:
6d2010ae 1084 if (pass == 1 && depth == 1) {
39236c6e 1085 ret = load_uuid((struct uuid_command *) lcp,
0a7de745
A
1086 (char *)addr + cmds_size,
1087 result);
b0d623f7
A
1088 }
1089 break;
2d21ac55
A
1090 case LC_CODE_SIGNATURE:
1091 /* CODE SIGNING */
0a7de745 1092 if (pass != 1) {
2d21ac55 1093 break;
0a7de745 1094 }
2d21ac55 1095 /* pager -> uip ->
0a7de745
A
1096 * load signatures & store in uip
1097 * set VM object "signed_pages"
1098 */
2d21ac55
A
1099 ret = load_code_signature(
1100 (struct linkedit_data_command *) lcp,
1101 vp,
1102 file_offset,
1103 macho_size,
1104 header->cputype,
39037602
A
1105 result,
1106 imgp);
2d21ac55
A
1107 if (ret != LOAD_SUCCESS) {
1108 printf("proc %d: load code signature error %d "
0a7de745
A
1109 "for file \"%s\"\n",
1110 p->p_pid, ret, vp->v_name);
c18c124e
A
1111 /*
1112 * Allow injections to be ignored on devices w/o enforcement enabled
1113 */
0a7de745
A
1114 if (!cs_process_global_enforcement()) {
1115 ret = LOAD_SUCCESS; /* ignore error */
1116 }
2d21ac55
A
1117 } else {
1118 got_code_signatures = TRUE;
1119 }
04b8595b
A
1120
1121 if (got_code_signatures) {
c18c124e
A
1122 unsigned tainted = CS_VALIDATE_TAINTED;
1123 boolean_t valid = FALSE;
04b8595b
A
1124 vm_size_t off = 0;
1125
1126
0a7de745 1127 if (cs_debug > 10) {
04b8595b 1128 printf("validating initial pages of %s\n", vp->v_name);
0a7de745
A
1129 }
1130
5ba3f43e 1131 while (off < alloc_size && ret == LOAD_SUCCESS) {
0a7de745
A
1132 tainted = CS_VALIDATE_TAINTED;
1133
1134 valid = cs_validate_range(vp,
1135 NULL,
1136 file_offset + off,
1137 addr + off,
1138 PAGE_SIZE,
1139 &tainted);
1140 if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
1141 if (cs_debug) {
1142 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
04b8595b 1143 vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
0a7de745
A
1144 }
1145 if (cs_process_global_enforcement() ||
1146 (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) {
1147 ret = LOAD_FAILURE;
1148 }
1149 result->csflags &= ~CS_VALID;
1150 }
1151 off += PAGE_SIZE;
04b8595b
A
1152 }
1153 }
1154
2d21ac55 1155 break;
593a1d5f
A
1156#if CONFIG_CODE_DECRYPTION
1157 case LC_ENCRYPTION_INFO:
39236c6e 1158 case LC_ENCRYPTION_INFO_64:
0a7de745 1159 if (pass != 3) {
593a1d5f 1160 break;
0a7de745 1161 }
593a1d5f
A
1162 ret = set_code_unprotect(
1163 (struct encryption_info_command *) lcp,
3e170ce0 1164 addr, map, slide, vp, file_offset,
39236c6e 1165 header->cputype, header->cpusubtype);
593a1d5f 1166 if (ret != LOAD_SUCCESS) {
39037602 1167 os_reason_t load_failure_reason = OS_REASON_NULL;
c910b4d9 1168 printf("proc %d: set_code_unprotect() error %d "
0a7de745
A
1169 "for file \"%s\"\n",
1170 p->p_pid, ret, vp->v_name);
1171 /*
1172 * Don't let the app run if it's
c910b4d9 1173 * encrypted but we failed to set up the
39236c6e
A
1174 * decrypter. If the keys are missing it will
1175 * return LOAD_DECRYPTFAIL.
1176 */
0a7de745 1177 if (ret == LOAD_DECRYPTFAIL) {
39236c6e
A
1178 /* failed to load due to missing FP keys */
1179 proc_lock(p);
1180 p->p_lflag |= P_LTERM_DECRYPTFAIL;
1181 proc_unlock(p);
39037602
A
1182
1183 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
0a7de745 1184 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
39037602 1185 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
0a7de745 1186 } else {
39037602 1187 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
0a7de745 1188 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
39037602 1189 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
0a7de745 1190 }
39037602 1191
cb323159
A
1192 /*
1193 * Don't signal the process if it was forked and in a partially constructed
1194 * state as part of a spawn -- it will just be torn down when the exec fails.
1195 */
1196 if (!spawn) {
1197 assert(load_failure_reason != OS_REASON_NULL);
1198 if (vfexec) {
1199 psignal_vfork_with_reason(p, get_threadtask(imgp->ip_new_thread), imgp->ip_new_thread, SIGKILL, load_failure_reason);
1200 load_failure_reason = OS_REASON_NULL;
1201 } else {
1202 psignal_with_reason(p, SIGKILL, load_failure_reason);
1203 load_failure_reason = OS_REASON_NULL;
1204 }
1205 } else {
1206 os_reason_free(load_failure_reason);
1207 load_failure_reason = OS_REASON_NULL;
1208 }
593a1d5f
A
1209 }
1210 break;
1211#endif
cb323159
A
1212 case LC_VERSION_MIN_IPHONEOS:
1213 case LC_VERSION_MIN_MACOSX:
1214 case LC_VERSION_MIN_WATCHOS:
1215 case LC_VERSION_MIN_TVOS: {
d9a64523
A
1216 struct version_min_command *vmc;
1217
cb323159 1218 if (depth != 1 || pass != 1) {
d9a64523
A
1219 break;
1220 }
1221 vmc = (struct version_min_command *) lcp;
cb323159
A
1222 ret = load_version(vmc, &found_version_cmd, result);
1223 break;
1224 }
1225 case LC_BUILD_VERSION: {
1226 if (depth != 1 || pass != 1) {
1227 break;
d9a64523 1228 }
cb323159
A
1229 struct build_version_command* bvc = (struct build_version_command*)lcp;
1230 if (bvc->cmdsize < sizeof(*bvc)) {
1231 ret = LOAD_BADMACHO;
1232 break;
1233 }
1234 if (found_version_cmd == TRUE) {
1235 ret = LOAD_BADMACHO;
1236 break;
1237 }
1238 result->ip_platform = bvc->platform;
1239 found_version_cmd = TRUE;
d9a64523
A
1240 break;
1241 }
1c79356b 1242 default:
a3d08fcd
A
1243 /* Other commands are ignored by the kernel */
1244 ret = LOAD_SUCCESS;
91447636 1245 break;
1c79356b 1246 }
0a7de745 1247 if (ret != LOAD_SUCCESS) {
1c79356b 1248 break;
0a7de745 1249 }
1c79356b 1250 }
0a7de745 1251 if (ret != LOAD_SUCCESS) {
1c79356b 1252 break;
0a7de745 1253 }
1c79356b 1254 }
fe8ab488 1255
527f9951 1256 if (ret == LOAD_SUCCESS) {
0a7de745 1257 if (!got_code_signatures && cs_process_global_enforcement()) {
527f9951 1258 ret = LOAD_FAILURE;
c18c124e 1259 }
91447636 1260
316670eb 1261 /* Make sure if we need dyld, we got it */
3e170ce0 1262 if (result->needs_dynlinker && !dlp) {
316670eb
A
1263 ret = LOAD_FAILURE;
1264 }
3e170ce0 1265
c18c124e 1266 if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
fe8ab488 1267 /*
c18c124e
A
1268 * load the dylinker, and slide it by the independent DYLD ASLR
1269 * offset regardless of the PIE-ness of the main binary.
1270 */
fe8ab488 1271 ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
0a7de745 1272 dyld_aslr_offset, result, imgp);
fe8ab488 1273 }
39037602
A
1274
1275 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
316670eb
A
1276 if (result->thread_count == 0) {
1277 ret = LOAD_FAILURE;
1278 }
d9a64523 1279#if CONFIG_ENFORCE_SIGNED_CODE
5ba3f43e
A
1280 if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) {
1281 ret = LOAD_FAILURE;
1282 }
1283#endif
0a7de745 1284 }
1c79356b
A
1285 }
1286
39037602
A
1287 if (ret == LOAD_BADMACHO && found_xhdr) {
1288 ret = LOAD_BADMACHO_UPX;
1289 }
1290
5ba3f43e 1291 kfree(addr, alloc_size);
0b4e3aa0 1292
5ba3f43e 1293 return ret;
1c79356b
A
1294}
1295
cb323159
A
1296load_return_t
1297validate_potential_simulator_binary(
1298 cpu_type_t exectype __unused,
1299 struct image_params *imgp __unused,
1300 off_t file_offset __unused,
1301 off_t macho_size __unused)
1302{
1303#if __x86_64__
1304 /* Allow 32 bit exec only for simulator binaries */
1305 if (bootarg_no32exec && imgp != NULL && exectype == CPU_TYPE_X86) {
1306 if (imgp->ip_simulator_binary == IMGPF_SB_DEFAULT) {
1307 boolean_t simulator_binary = check_if_simulator_binary(imgp, file_offset, macho_size);
1308 imgp->ip_simulator_binary = simulator_binary ? IMGPF_SB_TRUE : IMGPF_SB_FALSE;
1309 }
1310
1311 if (imgp->ip_simulator_binary != IMGPF_SB_TRUE) {
1312 return LOAD_BADARCH;
1313 }
1314 }
1315#endif
1316 return LOAD_SUCCESS;
1317}
1318
1319#if __x86_64__
1320static boolean_t
1321check_if_simulator_binary(
1322 struct image_params *imgp,
1323 off_t file_offset,
1324 off_t macho_size)
1325{
1326 struct mach_header *header;
1327 char *ip_vdata = NULL;
1328 kauth_cred_t cred = NULL;
1329 uint32_t ncmds;
1330 struct load_command *lcp;
1331 boolean_t simulator_binary = FALSE;
1332 void * addr = NULL;
1333 vm_size_t alloc_size, cmds_size;
1334 size_t offset;
1335 proc_t p = current_proc(); /* XXXX */
1336 int error;
1337 int resid = 0;
1338 size_t mach_header_sz = sizeof(struct mach_header);
1339
1340
1341 cred = kauth_cred_proc_ref(p);
1342
1343 /* Allocate page to copyin mach header */
1344 ip_vdata = kalloc(PAGE_SIZE);
1345 if (ip_vdata == NULL) {
1346 goto bad;
1347 }
1348
1349 /* Read the Mach-O header */
1350 error = vn_rdwr(UIO_READ, imgp->ip_vp, ip_vdata,
1351 PAGE_SIZE, file_offset,
1352 UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED),
1353 cred, &resid, p);
1354 if (error) {
1355 goto bad;
1356 }
1357
1358 header = (struct mach_header *)ip_vdata;
1359
1360 if (header->magic == MH_MAGIC_64 ||
1361 header->magic == MH_CIGAM_64) {
1362 mach_header_sz = sizeof(struct mach_header_64);
1363 }
1364
1365 /* ensure header + sizeofcmds falls within the file */
1366 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
1367 (off_t)cmds_size > macho_size ||
1368 round_page_overflow(cmds_size, &alloc_size)) {
1369 goto bad;
1370 }
1371
1372 /*
1373 * Map the load commands into kernel memory.
1374 */
1375 addr = kalloc(alloc_size);
1376 if (addr == NULL) {
1377 goto bad;
1378 }
1379
1380 error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, alloc_size, file_offset,
1381 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
1382 if (error) {
1383 goto bad;
1384 }
1385
1386 if (resid) {
1387 /* We must be able to read in as much as the mach_header indicated */
1388 goto bad;
1389 }
1390
1391 /*
1392 * Loop through each of the load_commands indicated by the
1393 * Mach-O header; if an absurd value is provided, we just
1394 * run off the end of the reserved section by incrementing
1395 * the offset too far, so we are implicitly fail-safe.
1396 */
1397 offset = mach_header_sz;
1398 ncmds = header->ncmds;
1399
1400 while (ncmds--) {
1401 /* ensure enough space for a minimal load command */
1402 if (offset + sizeof(struct load_command) > cmds_size) {
1403 break;
1404 }
1405
1406 /*
1407 * Get a pointer to the command.
1408 */
1409 lcp = (struct load_command *)(addr + offset);
1410
1411 /*
1412 * Perform prevalidation of the struct load_command
1413 * before we attempt to use its contents. Invalid
1414 * values are ones which result in an overflow, or
1415 * which can not possibly be valid commands, or which
1416 * straddle or exist past the reserved section at the
1417 * start of the image.
1418 */
1419 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
1420 lcp->cmdsize < sizeof(struct load_command) ||
1421 offset > cmds_size) {
1422 break;
1423 }
1424
1425 /* Check if its a simulator binary. */
1426 switch (lcp->cmd) {
1427 case LC_VERSION_MIN_WATCHOS:
1428 simulator_binary = TRUE;
1429 break;
1430
1431 case LC_BUILD_VERSION: {
1432 struct build_version_command *bvc;
1433
1434 bvc = (struct build_version_command *) lcp;
1435 if (bvc->cmdsize < sizeof(*bvc)) {
1436 /* unsafe to use this command struct if cmdsize
1437 * validated above is too small for it to fit */
1438 break;
1439 }
1440 if (bvc->platform == PLATFORM_IOSSIMULATOR ||
1441 bvc->platform == PLATFORM_WATCHOSSIMULATOR) {
1442 simulator_binary = TRUE;
1443 }
1444
1445 break;
1446 }
1447
1448 case LC_VERSION_MIN_IPHONEOS: {
1449 simulator_binary = TRUE;
1450 break;
1451 }
1452
1453 default:
1454 /* ignore other load commands */
1455 break;
1456 }
1457
1458 if (simulator_binary == TRUE) {
1459 break;
1460 }
1461 }
1462
1463bad:
1464 if (ip_vdata) {
1465 kfree(ip_vdata, PAGE_SIZE);
1466 }
1467
1468 if (cred) {
1469 kauth_cred_unref(&cred);
1470 }
1471
1472 if (addr) {
1473 kfree(addr, alloc_size);
1474 }
1475
1476 return simulator_binary;
1477}
1478#endif /* __x86_64__ */
1479
593a1d5f 1480#if CONFIG_CODE_DECRYPTION
0c530ab8 1481
0a7de745 1482#define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
0c530ab8
A
1483
1484static load_return_t
fe8ab488 1485unprotect_dsmos_segment(
0a7de745
A
1486 uint64_t file_off,
1487 uint64_t file_size,
1488 struct vnode *vp,
1489 off_t macho_offset,
1490 vm_map_t map,
1491 vm_map_offset_t map_addr,
1492 vm_map_size_t map_size)
0c530ab8 1493{
0a7de745 1494 kern_return_t kr;
0c530ab8
A
1495
1496 /*
1497 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
1498 * this part of a Universal binary) are not protected...
1499 * The rest needs to be "transformed".
1500 */
1501 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
1502 file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
1503 /* it's all unprotected, nothing to do... */
1504 kr = KERN_SUCCESS;
1505 } else {
1506 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
1507 /*
1508 * We start mapping in the unprotected area.
1509 * Skip the unprotected part...
1510 */
0a7de745 1511 vm_map_offset_t delta;
0c530ab8
A
1512
1513 delta = APPLE_UNPROTECTED_HEADER_SIZE;
1514 delta -= file_off;
1515 map_addr += delta;
1516 map_size -= delta;
1517 }
1518 /* ... transform the rest of the mapping. */
593a1d5f
A
1519 struct pager_crypt_info crypt_info;
1520 crypt_info.page_decrypt = dsmos_page_transform;
1521 crypt_info.crypt_ops = NULL;
1522 crypt_info.crypt_end = NULL;
b0d623f7
A
1523#pragma unused(vp, macho_offset)
1524 crypt_info.crypt_ops = (void *)0x2e69cf40;
3e170ce0
A
1525 vm_map_offset_t crypto_backing_offset;
1526 crypto_backing_offset = -1; /* i.e. use map entry's offset */
39037602
A
1527#if VM_MAP_DEBUG_APPLE_PROTECT
1528 if (vm_map_debug_apple_protect) {
1529 struct proc *p;
1530 p = current_proc();
1531 printf("APPLE_PROTECT: %d[%s] map %p "
0a7de745
A
1532 "[0x%llx:0x%llx] %s(%s)\n",
1533 p->p_pid, p->p_comm, map,
1534 (uint64_t) map_addr,
1535 (uint64_t) (map_addr + map_size),
1536 __FUNCTION__, vp->v_name);
39037602
A
1537 }
1538#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
7e41aa88
A
1539
1540 /* The DSMOS pager can only be used by apple signed code */
1541 struct cs_blob * blob = csvnode_get_blob(vp, file_off);
0a7de745 1542 if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) {
7e41aa88
A
1543 return LOAD_FAILURE;
1544 }
1545
0c530ab8 1546 kr = vm_map_apple_protected(map,
0a7de745
A
1547 map_addr,
1548 map_addr + map_size,
1549 crypto_backing_offset,
1550 &crypt_info);
0c530ab8
A
1551 }
1552
1553 if (kr != KERN_SUCCESS) {
1554 return LOAD_FAILURE;
1555 }
1556 return LOAD_SUCCESS;
1557}
0a7de745 1558#else /* CONFIG_CODE_DECRYPTION */
b0d623f7 1559static load_return_t
fe8ab488 1560unprotect_dsmos_segment(
0a7de745
A
1561 __unused uint64_t file_off,
1562 __unused uint64_t file_size,
1563 __unused struct vnode *vp,
1564 __unused off_t macho_offset,
1565 __unused vm_map_t map,
1566 __unused vm_map_offset_t map_addr,
1567 __unused vm_map_size_t map_size)
1c79356b 1568{
b0d623f7 1569 return LOAD_SUCCESS;
1c79356b 1570}
0a7de745 1571#endif /* CONFIG_CODE_DECRYPTION */
1c79356b 1572
3e170ce0
A
1573
1574/*
1575 * map_segment:
1576 * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
1577 * page size) issues.
0a7de745 1578 *
3e170ce0 1579 * The mapping might result in 1, 2 or 3 map entries:
0a7de745
A
1580 * 1. for the first page, which could be overlap with the previous
1581 * mapping,
1582 * 2. for the center (if applicable),
1583 * 3. for the last page, which could overlap with the next mapping.
3e170ce0
A
1584 *
1585 * For each of those map entries, we might have to interpose a
1586 * "fourk_pager" to deal with mis-alignment wrt the system page size,
1587 * either in the mapping address and/or size or the file offset and/or
1588 * size.
1589 * The "fourk_pager" itself would be mapped with proper alignment
1590 * wrt the system page size and would then be populated with the
1591 * information about the intended mapping, with a "4KB" granularity.
1592 */
1593static kern_return_t
1594map_segment(
0a7de745
A
1595 vm_map_t map,
1596 vm_map_offset_t vm_start,
1597 vm_map_offset_t vm_end,
1598 memory_object_control_t control,
1599 vm_map_offset_t file_start,
1600 vm_map_offset_t file_end,
1601 vm_prot_t initprot,
1602 vm_prot_t maxprot,
1603 load_result_t *result)
3e170ce0 1604{
0a7de745
A
1605 vm_map_offset_t cur_offset, cur_start, cur_end;
1606 kern_return_t ret;
1607 vm_map_offset_t effective_page_mask;
5ba3f43e 1608 vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
0a7de745 1609
3e170ce0
A
1610 if (vm_end < vm_start ||
1611 file_end < file_start) {
1612 return LOAD_BADMACHO;
1613 }
1614 if (vm_end == vm_start ||
1615 file_end == file_start) {
1616 /* nothing to map... */
1617 return LOAD_SUCCESS;
1618 }
1619
1620 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1621
5ba3f43e 1622 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
3e170ce0
A
1623 if (vm_map_page_aligned(vm_start, effective_page_mask) &&
1624 vm_map_page_aligned(vm_end, effective_page_mask) &&
1625 vm_map_page_aligned(file_start, effective_page_mask) &&
1626 vm_map_page_aligned(file_end, effective_page_mask)) {
1627 /* all page-aligned and map-aligned: proceed */
1628 } else {
5ba3f43e
A
1629#if __arm64__
1630 /* use an intermediate "4K" pager */
1631 vmk_flags.vmkf_fourk = TRUE;
1632#else /* __arm64__ */
3e170ce0 1633 panic("map_segment: unexpected mis-alignment "
0a7de745
A
1634 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
1635 (uint64_t) vm_start,
1636 (uint64_t) vm_end,
1637 (uint64_t) file_start,
1638 (uint64_t) file_end);
5ba3f43e 1639#endif /* __arm64__ */
3e170ce0
A
1640 }
1641
1642 cur_offset = 0;
1643 cur_start = vm_start;
1644 cur_end = vm_start;
5ba3f43e
A
1645#if __arm64__
1646 if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
1647 /* one 4K pager for the 1st page */
1648 cur_end = vm_map_round_page(cur_start, effective_page_mask);
1649 if (cur_end > vm_end) {
1650 cur_end = vm_start + (file_end - file_start);
1651 }
1652 if (control != MEMORY_OBJECT_CONTROL_NULL) {
cb323159
A
1653 /* no copy-on-read for mapped binaries */
1654 vmk_flags.vmkf_no_copy_on_read = 1;
5ba3f43e
A
1655 ret = vm_map_enter_mem_object_control(
1656 map,
1657 &cur_start,
1658 cur_end - cur_start,
1659 (mach_vm_offset_t)0,
1660 VM_FLAGS_FIXED,
1661 vmk_flags,
1662 VM_KERN_MEMORY_NONE,
1663 control,
1664 file_start + cur_offset,
1665 TRUE, /* copy */
1666 initprot, maxprot,
1667 VM_INHERIT_DEFAULT);
1668 } else {
1669 ret = vm_map_enter_mem_object(
1670 map,
1671 &cur_start,
1672 cur_end - cur_start,
1673 (mach_vm_offset_t)0,
1674 VM_FLAGS_FIXED,
1675 vmk_flags,
1676 VM_KERN_MEMORY_NONE,
1677 IPC_PORT_NULL,
1678 0, /* offset */
1679 TRUE, /* copy */
1680 initprot, maxprot,
1681 VM_INHERIT_DEFAULT);
1682 }
1683 if (ret != KERN_SUCCESS) {
0a7de745 1684 return LOAD_NOSPACE;
5ba3f43e
A
1685 }
1686 cur_offset += cur_end - cur_start;
1687 }
1688#endif /* __arm64__ */
3e170ce0
A
1689 if (cur_end >= vm_start + (file_end - file_start)) {
1690 /* all mapped: done */
1691 goto done;
1692 }
1693 if (vm_map_round_page(cur_end, effective_page_mask) >=
1694 vm_map_trunc_page(vm_start + (file_end - file_start),
0a7de745 1695 effective_page_mask)) {
3e170ce0
A
1696 /* no middle */
1697 } else {
1698 cur_start = cur_end;
1699 if ((vm_start & effective_page_mask) !=
1700 (file_start & effective_page_mask)) {
1701 /* one 4K pager for the middle */
5ba3f43e 1702 cur_vmk_flags = vmk_flags;
3e170ce0
A
1703 } else {
1704 /* regular mapping for the middle */
5ba3f43e 1705 cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
3e170ce0 1706 }
d9a64523
A
1707
1708#if CONFIG_EMBEDDED
1709 (void) result;
1710#else /* CONFIG_EMBEDDED */
1711 /*
1712 * This process doesn't have its new csflags (from
1713 * the image being loaded) yet, so tell VM to override the
1714 * current process's CS_ENFORCEMENT for this mapping.
1715 */
1716 if (result->csflags & CS_ENFORCEMENT) {
1717 cur_vmk_flags.vmkf_cs_enforcement = TRUE;
1718 } else {
1719 cur_vmk_flags.vmkf_cs_enforcement = FALSE;
1720 }
1721 cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
1722#endif /* CONFIG_EMBEDDED */
1723
3e170ce0 1724 cur_end = vm_map_trunc_page(vm_start + (file_end -
0a7de745
A
1725 file_start),
1726 effective_page_mask);
3e170ce0 1727 if (control != MEMORY_OBJECT_CONTROL_NULL) {
cb323159
A
1728 /* no copy-on-read for mapped binaries */
1729 cur_vmk_flags.vmkf_no_copy_on_read = 1;
3e170ce0
A
1730 ret = vm_map_enter_mem_object_control(
1731 map,
1732 &cur_start,
1733 cur_end - cur_start,
1734 (mach_vm_offset_t)0,
5ba3f43e
A
1735 VM_FLAGS_FIXED,
1736 cur_vmk_flags,
1737 VM_KERN_MEMORY_NONE,
3e170ce0
A
1738 control,
1739 file_start + cur_offset,
1740 TRUE, /* copy */
1741 initprot, maxprot,
1742 VM_INHERIT_DEFAULT);
1743 } else {
1744 ret = vm_map_enter_mem_object(
1745 map,
1746 &cur_start,
1747 cur_end - cur_start,
1748 (mach_vm_offset_t)0,
5ba3f43e
A
1749 VM_FLAGS_FIXED,
1750 cur_vmk_flags,
1751 VM_KERN_MEMORY_NONE,
3e170ce0
A
1752 IPC_PORT_NULL,
1753 0, /* offset */
1754 TRUE, /* copy */
1755 initprot, maxprot,
1756 VM_INHERIT_DEFAULT);
1757 }
1758 if (ret != KERN_SUCCESS) {
0a7de745 1759 return LOAD_NOSPACE;
3e170ce0
A
1760 }
1761 cur_offset += cur_end - cur_start;
1762 }
1763 if (cur_end >= vm_start + (file_end - file_start)) {
1764 /* all mapped: done */
1765 goto done;
1766 }
1767 cur_start = cur_end;
5ba3f43e
A
1768#if __arm64__
1769 if (!vm_map_page_aligned(vm_start + (file_end - file_start),
0a7de745 1770 effective_page_mask)) {
5ba3f43e
A
1771 /* one 4K pager for the last page */
1772 cur_end = vm_start + (file_end - file_start);
1773 if (control != MEMORY_OBJECT_CONTROL_NULL) {
cb323159
A
1774 /* no copy-on-read for mapped binaries */
1775 vmk_flags.vmkf_no_copy_on_read = 1;
5ba3f43e
A
1776 ret = vm_map_enter_mem_object_control(
1777 map,
1778 &cur_start,
1779 cur_end - cur_start,
1780 (mach_vm_offset_t)0,
1781 VM_FLAGS_FIXED,
1782 vmk_flags,
1783 VM_KERN_MEMORY_NONE,
1784 control,
1785 file_start + cur_offset,
1786 TRUE, /* copy */
1787 initprot, maxprot,
1788 VM_INHERIT_DEFAULT);
1789 } else {
1790 ret = vm_map_enter_mem_object(
1791 map,
1792 &cur_start,
1793 cur_end - cur_start,
1794 (mach_vm_offset_t)0,
1795 VM_FLAGS_FIXED,
1796 vmk_flags,
1797 VM_KERN_MEMORY_NONE,
1798 IPC_PORT_NULL,
1799 0, /* offset */
1800 TRUE, /* copy */
1801 initprot, maxprot,
1802 VM_INHERIT_DEFAULT);
1803 }
1804 if (ret != KERN_SUCCESS) {
0a7de745 1805 return LOAD_NOSPACE;
5ba3f43e
A
1806 }
1807 cur_offset += cur_end - cur_start;
1808 }
1809#endif /* __arm64__ */
3e170ce0
A
1810done:
1811 assert(cur_end >= vm_start + (file_end - file_start));
1812 return LOAD_SUCCESS;
1813}
1814
1c79356b
A
1815static
1816load_return_t
b0d623f7 1817load_segment(
0a7de745
A
1818 struct load_command *lcp,
1819 uint32_t filetype,
1820 void * control,
1821 off_t pager_offset,
1822 off_t macho_size,
1823 struct vnode *vp,
1824 vm_map_t map,
1825 int64_t slide,
1826 load_result_t *result)
1c79356b 1827{
b0d623f7 1828 struct segment_command_64 segment_command, *scp;
0a7de745
A
1829 kern_return_t ret;
1830 vm_map_size_t delta_size;
1831 vm_prot_t initprot;
1832 vm_prot_t maxprot;
1833 size_t segment_command_size, total_section_size,
1834 single_section_size;
1835 vm_map_offset_t file_offset, file_size;
1836 vm_map_offset_t vm_offset, vm_size;
1837 vm_map_offset_t vm_start, vm_end, vm_end_aligned;
1838 vm_map_offset_t file_start, file_end;
1839 kern_return_t kr;
1840 boolean_t verbose;
1841 vm_map_size_t effective_page_size;
1842 vm_map_offset_t effective_page_mask;
5ba3f43e 1843#if __arm64__
0a7de745
A
1844 vm_map_kernel_flags_t vmk_flags;
1845 boolean_t fourk_align;
5ba3f43e 1846#endif /* __arm64__ */
3e170ce0
A
1847
1848 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
1849 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1850
1851 verbose = FALSE;
b0d623f7
A
1852 if (LC_SEGMENT_64 == lcp->cmd) {
1853 segment_command_size = sizeof(struct segment_command_64);
1854 single_section_size = sizeof(struct section_64);
5ba3f43e
A
1855#if __arm64__
1856 /* 64-bit binary: should already be 16K-aligned */
1857 fourk_align = FALSE;
1858#endif /* __arm64__ */
b0d623f7
A
1859 } else {
1860 segment_command_size = sizeof(struct segment_command);
1861 single_section_size = sizeof(struct section);
5ba3f43e
A
1862#if __arm64__
1863 /* 32-bit binary: might need 4K-alignment */
1864 if (effective_page_size != FOURK_PAGE_SIZE) {
1865 /* not using 4K page size: need fourk_pager */
1866 fourk_align = TRUE;
1867 verbose = TRUE;
1868 } else {
1869 /* using 4K page size: no need for re-alignment */
1870 fourk_align = FALSE;
1871 }
1872#endif /* __arm64__ */
b0d623f7 1873 }
0a7de745
A
1874 if (lcp->cmdsize < segment_command_size) {
1875 return LOAD_BADMACHO;
1876 }
b0d623f7
A
1877 total_section_size = lcp->cmdsize - segment_command_size;
1878
3e170ce0 1879 if (LC_SEGMENT_64 == lcp->cmd) {
6d2010ae 1880 scp = (struct segment_command_64 *)lcp;
3e170ce0 1881 } else {
6d2010ae
A
1882 scp = &segment_command;
1883 widen_segment_command((struct segment_command *)lcp, scp);
1884 }
1885
3e170ce0
A
1886 if (verbose) {
1887 MACHO_PRINTF(("+++ load_segment %s "
0a7de745
A
1888 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
1889 "prot %d/%d flags 0x%x\n",
1890 scp->segname,
1891 (uint64_t)(slide + scp->vmaddr),
1892 (uint64_t)(slide + scp->vmaddr + scp->vmsize),
1893 pager_offset + scp->fileoff,
1894 pager_offset + scp->fileoff + scp->filesize,
1895 scp->initprot,
1896 scp->maxprot,
1897 scp->flags));
3e170ce0
A
1898 }
1899
91447636
A
1900 /*
1901 * Make sure what we get from the file is really ours (as specified
1902 * by macho_size).
1903 */
b0d623f7 1904 if (scp->fileoff + scp->filesize < scp->fileoff ||
3e170ce0 1905 scp->fileoff + scp->filesize > (uint64_t)macho_size) {
0a7de745 1906 return LOAD_BADMACHO;
3e170ce0 1907 }
b0d623f7
A
1908 /*
1909 * Ensure that the number of sections specified would fit
1910 * within the load command size.
1911 */
3e170ce0 1912 if (total_section_size / single_section_size < scp->nsects) {
0a7de745 1913 return LOAD_BADMACHO;
3e170ce0 1914 }
2d21ac55
A
1915 /*
1916 * Make sure the segment is page-aligned in the file.
1917 */
0a7de745 1918 file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
3e170ce0 1919 file_size = scp->filesize;
5ba3f43e
A
1920#if __arm64__
1921 if (fourk_align) {
1922 if ((file_offset & FOURK_PAGE_MASK) != 0) {
1923 /*
1924 * we can't mmap() it if it's not at least 4KB-aligned
1925 * in the file
1926 */
1927 return LOAD_BADMACHO;
1928 }
1929 } else
1930#endif /* __arm64__ */
3e170ce0 1931 if ((file_offset & PAGE_MASK_64) != 0 ||
0a7de745 1932 /* we can't mmap() it if it's not page-aligned in the file */
3e170ce0
A
1933 (file_offset & vm_map_page_mask(map)) != 0) {
1934 /*
1935 * The 1st test would have failed if the system's page size
1936 * was what this process believe is the page size, so let's
1937 * fail here too for the sake of consistency.
1938 */
0a7de745 1939 return LOAD_BADMACHO;
3e170ce0 1940 }
91447636 1941
04b8595b
A
1942 /*
1943 * If we have a code signature attached for this slice
1944 * require that the segments are within the signed part
1945 * of the file.
1946 */
1947 if (result->cs_end_offset &&
1948 result->cs_end_offset < (off_t)scp->fileoff &&
0a7de745
A
1949 result->cs_end_offset - scp->fileoff < scp->filesize) {
1950 if (cs_debug) {
04b8595b 1951 printf("section outside code signature\n");
0a7de745 1952 }
04b8595b
A
1953 return LOAD_BADMACHO;
1954 }
1955
cb323159
A
1956 if (os_add_overflow(scp->vmaddr, slide, &vm_offset)) {
1957 if (cs_debug) {
1958 printf("vmaddr too large\n");
1959 }
1960 return LOAD_BADMACHO;
1961 }
1962
3e170ce0
A
1963 vm_size = scp->vmsize;
1964
0a7de745
A
1965 if (vm_size == 0) {
1966 return LOAD_SUCCESS;
1967 }
3e170ce0
A
1968 if (scp->vmaddr == 0 &&
1969 file_size == 0 &&
1970 vm_size != 0 &&
b0d623f7
A
1971 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
1972 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
0c530ab8 1973 /*
6d2010ae
A
1974 * For PIE, extend page zero rather than moving it. Extending
1975 * page zero keeps early allocations from falling predictably
1976 * between the end of page zero and the beginning of the first
1977 * slid segment.
0c530ab8 1978 */
fe8ab488
A
1979 /*
1980 * This is a "page zero" segment: it starts at address 0,
1981 * is not mapped from the binary file and is not accessible.
1982 * User-space should never be able to access that memory, so
1983 * make it completely off limits by raising the VM map's
1984 * minimum offset.
1985 */
3e170ce0
A
1986 vm_end = vm_offset + vm_size;
1987 if (vm_end < vm_offset) {
0a7de745 1988 return LOAD_BADMACHO;
3e170ce0
A
1989 }
1990 if (verbose) {
1991 MACHO_PRINTF(("++++++ load_segment: "
0a7de745
A
1992 "page_zero up to 0x%llx\n",
1993 (uint64_t) vm_end));
3e170ce0 1994 }
5ba3f43e
A
1995#if __arm64__
1996 if (fourk_align) {
1997 /* raise min_offset as much as page-alignment allows */
1998 vm_end_aligned = vm_map_trunc_page(vm_end,
0a7de745 1999 effective_page_mask);
5ba3f43e
A
2000 } else
2001#endif /* __arm64__ */
3e170ce0
A
2002 {
2003 vm_end = vm_map_round_page(vm_end,
0a7de745 2004 PAGE_MASK_64);
3e170ce0
A
2005 vm_end_aligned = vm_end;
2006 }
2007 ret = vm_map_raise_min_offset(map,
0a7de745 2008 vm_end_aligned);
5ba3f43e
A
2009#if __arm64__
2010 if (ret == 0 &&
2011 vm_end > vm_end_aligned) {
2012 /* use fourk_pager to map the rest of pagezero */
2013 assert(fourk_align);
2014 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
2015 vmk_flags.vmkf_fourk = TRUE;
2016 ret = vm_map_enter_mem_object(
2017 map,
2018 &vm_end_aligned,
2019 vm_end - vm_end_aligned,
0a7de745 2020 (mach_vm_offset_t) 0, /* mask */
5ba3f43e
A
2021 VM_FLAGS_FIXED,
2022 vmk_flags,
2023 VM_KERN_MEMORY_NONE,
2024 IPC_PORT_NULL,
2025 0,
0a7de745 2026 FALSE, /* copy */
5ba3f43e
A
2027 (scp->initprot & VM_PROT_ALL),
2028 (scp->maxprot & VM_PROT_ALL),
2029 VM_INHERIT_DEFAULT);
2030 }
2031#endif /* __arm64__ */
0a7de745 2032
fe8ab488 2033 if (ret != KERN_SUCCESS) {
0a7de745 2034 return LOAD_FAILURE;
0c530ab8 2035 }
0a7de745 2036 return LOAD_SUCCESS;
3e170ce0 2037 } else {
5ba3f43e
A
2038#if CONFIG_EMBEDDED
2039 /* not PAGEZERO: should not be mapped at address 0 */
2040 if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
2041 return LOAD_BADMACHO;
2042 }
2043#endif /* CONFIG_EMBEDDED */
0c530ab8
A
2044 }
2045
5ba3f43e
A
2046#if __arm64__
2047 if (fourk_align) {
2048 /* 4K-align */
2049 file_start = vm_map_trunc_page(file_offset,
0a7de745 2050 FOURK_PAGE_MASK);
5ba3f43e 2051 file_end = vm_map_round_page(file_offset + file_size,
0a7de745 2052 FOURK_PAGE_MASK);
5ba3f43e 2053 vm_start = vm_map_trunc_page(vm_offset,
0a7de745 2054 FOURK_PAGE_MASK);
5ba3f43e 2055 vm_end = vm_map_round_page(vm_offset + vm_size,
0a7de745 2056 FOURK_PAGE_MASK);
5ba3f43e
A
2057 if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
2058 page_aligned(file_start) &&
2059 vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
2060 page_aligned(vm_start) &&
2061 vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
2062 /* XXX last segment: ignore mis-aligned tail */
2063 file_end = vm_map_round_page(file_end,
0a7de745 2064 effective_page_mask);
5ba3f43e 2065 vm_end = vm_map_round_page(vm_end,
0a7de745 2066 effective_page_mask);
5ba3f43e
A
2067 }
2068 } else
2069#endif /* __arm64__ */
3e170ce0
A
2070 {
2071 file_start = vm_map_trunc_page(file_offset,
0a7de745 2072 effective_page_mask);
3e170ce0 2073 file_end = vm_map_round_page(file_offset + file_size,
0a7de745 2074 effective_page_mask);
3e170ce0 2075 vm_start = vm_map_trunc_page(vm_offset,
0a7de745 2076 effective_page_mask);
3e170ce0 2077 vm_end = vm_map_round_page(vm_offset + vm_size,
0a7de745 2078 effective_page_mask);
3e170ce0 2079 }
6d2010ae 2080
0a7de745 2081 if (vm_start < result->min_vm_addr) {
3e170ce0 2082 result->min_vm_addr = vm_start;
0a7de745
A
2083 }
2084 if (vm_end > result->max_vm_addr) {
3e170ce0 2085 result->max_vm_addr = vm_end;
0a7de745 2086 }
6d2010ae 2087
0a7de745
A
2088 if (map == VM_MAP_NULL) {
2089 return LOAD_SUCCESS;
2090 }
6d2010ae 2091
3e170ce0 2092 if (vm_size > 0) {
b0d623f7
A
2093 initprot = (scp->initprot) & VM_PROT_ALL;
2094 maxprot = (scp->maxprot) & VM_PROT_ALL;
91447636
A
2095 /*
2096 * Map a copy of the file into the address space.
2097 */
3e170ce0
A
2098 if (verbose) {
2099 MACHO_PRINTF(("++++++ load_segment: "
0a7de745
A
2100 "mapping at vm [0x%llx:0x%llx] of "
2101 "file [0x%llx:0x%llx]\n",
2102 (uint64_t) vm_start,
2103 (uint64_t) vm_end,
2104 (uint64_t) file_start,
2105 (uint64_t) file_end));
39236c6e 2106 }
3e170ce0 2107 ret = map_segment(map,
0a7de745
A
2108 vm_start,
2109 vm_end,
2110 control,
2111 file_start,
2112 file_end,
2113 initprot,
2114 maxprot,
2115 result);
3e170ce0
A
2116 if (ret) {
2117 return LOAD_NOSPACE;
2118 }
2119
2120#if FIXME
91447636
A
2121 /*
2122 * If the file didn't end on a page boundary,
2123 * we need to zero the leftover.
2124 */
b0d623f7 2125 delta_size = map_size - scp->filesize;
91447636 2126 if (delta_size > 0) {
0a7de745
A
2127 mach_vm_offset_t tmp;
2128
5ba3f43e 2129 ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD);
3e170ce0 2130 if (ret != KERN_SUCCESS) {
0a7de745 2131 return LOAD_RESOURCE;
3e170ce0 2132 }
0a7de745 2133
b0d623f7 2134 if (copyout(tmp, map_addr + scp->filesize,
0a7de745 2135 delta_size)) {
b0d623f7 2136 (void) mach_vm_deallocate(
0a7de745
A
2137 kernel_map, tmp, delta_size);
2138 return LOAD_FAILURE;
91447636 2139 }
0a7de745 2140
b0d623f7 2141 (void) mach_vm_deallocate(kernel_map, tmp, delta_size);
91447636
A
2142 }
2143#endif /* FIXME */
2144 }
1c79356b 2145
91447636
A
2146 /*
2147 * If the virtual size of the segment is greater
2148 * than the size from the file, we need to allocate
2149 * zero fill memory for the rest.
2150 */
3e170ce0
A
2151 if ((vm_end - vm_start) > (file_end - file_start)) {
2152 delta_size = (vm_end - vm_start) - (file_end - file_start);
2153 } else {
2154 delta_size = 0;
2155 }
91447636 2156 if (delta_size > 0) {
3e170ce0
A
2157 mach_vm_offset_t tmp;
2158
2159 tmp = vm_start + (file_end - file_start);
2160 if (verbose) {
2161 MACHO_PRINTF(("++++++ load_segment: "
0a7de745
A
2162 "delta mapping vm [0x%llx:0x%llx]\n",
2163 (uint64_t) tmp,
2164 (uint64_t) (tmp + delta_size)));
3e170ce0
A
2165 }
2166 kr = map_segment(map,
0a7de745
A
2167 tmp,
2168 tmp + delta_size,
2169 MEMORY_OBJECT_CONTROL_NULL,
2170 0,
2171 delta_size,
2172 scp->initprot,
2173 scp->maxprot,
2174 result);
3e170ce0 2175 if (kr != KERN_SUCCESS) {
0a7de745 2176 return LOAD_NOSPACE;
3e170ce0 2177 }
91447636 2178 }
1c79356b 2179
0a7de745 2180 if ((scp->fileoff == 0) && (scp->filesize != 0)) {
3e170ce0 2181 result->mach_header = vm_offset;
0a7de745 2182 }
0c530ab8 2183
b0d623f7 2184 if (scp->flags & SG_PROTECTED_VERSION_1) {
3e170ce0 2185 ret = unprotect_dsmos_segment(file_start,
0a7de745
A
2186 file_end - file_start,
2187 vp,
2188 pager_offset,
2189 map,
2190 vm_start,
2191 vm_end - vm_start);
3e170ce0
A
2192 if (ret != LOAD_SUCCESS) {
2193 return ret;
2194 }
0c530ab8
A
2195 } else {
2196 ret = LOAD_SUCCESS;
2197 }
3e170ce0
A
2198
2199 if (LOAD_SUCCESS == ret &&
2200 filetype == MH_DYLINKER &&
2201 result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
b0d623f7 2202 note_all_image_info_section(scp,
0a7de745
A
2203 LC_SEGMENT_64 == lcp->cmd,
2204 single_section_size,
2205 ((const char *)lcp +
2206 segment_command_size),
2207 slide,
2208 result);
3e170ce0 2209 }
1c79356b 2210
c18c124e 2211 if (result->entry_point != MACH_VM_MIN_ADDRESS) {
3e170ce0 2212 if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
0a7de745 2213 if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) {
c18c124e
A
2214 result->validentry = 1;
2215 } else {
2216 /* right range but wrong protections, unset if previously validated */
2217 result->validentry = 0;
2218 }
2219 }
2220 }
0b4e3aa0 2221
6d2010ae 2222 return ret;
1c79356b
A
2223}
2224
39236c6e
A
2225static
2226load_return_t
2227load_uuid(
0a7de745
A
2228 struct uuid_command *uulp,
2229 char *command_end,
2230 load_result_t *result
2231 )
39236c6e 2232{
0a7de745
A
2233 /*
2234 * We need to check the following for this command:
2235 * - The command size should be atleast the size of struct uuid_command
2236 * - The UUID part of the command should be completely within the mach-o header
2237 */
316670eb 2238
0a7de745
A
2239 if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
2240 (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
2241 return LOAD_BADMACHO;
2242 }
2243
2244 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
2245 return LOAD_SUCCESS;
39236c6e 2246}
316670eb 2247
cb323159
A
2248static
2249load_return_t
2250load_version(
2251 struct version_min_command *vmc,
2252 boolean_t *found_version_cmd,
2253 load_result_t *result
2254 )
2255{
2256 uint32_t platform = 0;
2257 uint32_t sdk;
2258
2259 if (vmc->cmdsize < sizeof(*vmc)) {
2260 return LOAD_BADMACHO;
2261 }
2262 if (*found_version_cmd == TRUE) {
2263 return LOAD_BADMACHO;
2264 }
2265 *found_version_cmd = TRUE;
2266 sdk = vmc->sdk;
2267 switch (vmc->cmd) {
2268 case LC_VERSION_MIN_MACOSX:
2269 platform = PLATFORM_MACOS;
2270 break;
2271#if __x86_64__ /* __x86_64__ */
2272 case LC_VERSION_MIN_IPHONEOS:
2273 platform = PLATFORM_IOSSIMULATOR;
2274 break;
2275 case LC_VERSION_MIN_WATCHOS:
2276 platform = PLATFORM_WATCHOSSIMULATOR;
2277 break;
2278 case LC_VERSION_MIN_TVOS:
2279 platform = PLATFORM_TVOSSIMULATOR;
2280 break;
2281#else
2282 case LC_VERSION_MIN_IPHONEOS: {
2283#if __arm64__
2284 extern int legacy_footprint_entitlement_mode;
2285 if (vmc->sdk < (12 << 16)) {
2286 /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
2287 result->legacy_footprint = TRUE;
2288 }
2289#endif /* __arm64__ */
2290 platform = PLATFORM_IOS;
2291 break;
2292 }
2293 case LC_VERSION_MIN_WATCHOS:
2294 platform = PLATFORM_WATCHOS;
2295 break;
2296 case LC_VERSION_MIN_TVOS:
2297 platform = PLATFORM_TVOS;
2298 break;
2299#endif /* __x86_64__ */
2300 /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */
2301 default:
2302 sdk = (uint32_t)-1;
2303 __builtin_unreachable();
2304 }
2305 result->ip_platform = platform;
2306 result->lr_sdk = sdk;
2307 return LOAD_SUCCESS;
2308}
2309
316670eb
A
2310static
2311load_return_t
2312load_main(
0a7de745
A
2313 struct entry_point_command *epc,
2314 thread_t thread,
2315 int64_t slide,
2316 load_result_t *result
2317 )
316670eb
A
2318{
2319 mach_vm_offset_t addr;
0a7de745
A
2320 kern_return_t ret;
2321
2322 if (epc->cmdsize < sizeof(*epc)) {
2323 return LOAD_BADMACHO;
2324 }
316670eb 2325 if (result->thread_count != 0) {
0a7de745
A
2326 return LOAD_FAILURE;
2327 }
2328
2329 if (thread == THREAD_NULL) {
2330 return LOAD_SUCCESS;
316670eb
A
2331 }
2332
39037602
A
2333 /*
2334 * LC_MAIN specifies stack size but not location.
2335 * Add guard page to allocation size (MAXSSIZ includes guard page).
2336 */
316670eb 2337 if (epc->stacksize) {
0a7de745 2338 if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) {
39037602
A
2339 /*
2340 * We are going to immediately throw away this result, but we want
2341 * to make sure we aren't loading a dangerously close to
2342 * overflowing value, since this will have a guard page added to it
2343 * and be rounded to page boundaries
2344 */
2345 return LOAD_BADMACHO;
2346 }
316670eb 2347 result->user_stack_size = epc->stacksize;
39037602
A
2348 if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
2349 return LOAD_BADMACHO;
2350 }
0a7de745 2351 result->custom_stack = TRUE;
316670eb 2352 } else {
39037602 2353 result->user_stack_alloc_size = MAXSSIZ;
316670eb 2354 }
316670eb
A
2355
2356 /* use default location for stack */
d9a64523 2357 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
0a7de745
A
2358 if (ret != KERN_SUCCESS) {
2359 return LOAD_FAILURE;
2360 }
316670eb
A
2361
2362 /* The stack slides down from the default location */
2363 result->user_stack = addr;
2364 result->user_stack -= slide;
2365
c18c124e
A
2366 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2367 /* Already processed LC_MAIN or LC_UNIXTHREAD */
0a7de745 2368 return LOAD_FAILURE;
c18c124e
A
2369 }
2370
316670eb
A
2371 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
2372 result->needs_dynlinker = TRUE;
c18c124e 2373 result->using_lcmain = TRUE;
39037602
A
2374
2375 ret = thread_state_initialize( thread );
2376 if (ret != KERN_SUCCESS) {
0a7de745 2377 return LOAD_FAILURE;
39037602
A
2378 }
2379
316670eb
A
2380 result->unixproc = TRUE;
2381 result->thread_count++;
2382
0a7de745 2383 return LOAD_SUCCESS;
316670eb
A
2384}
2385
cb323159
A
2386static
2387load_return_t
2388setup_driver_main(
2389 thread_t thread,
2390 int64_t slide,
2391 load_result_t *result
2392 )
2393{
2394 mach_vm_offset_t addr;
2395 kern_return_t ret;
2396
2397 /* Driver binaries have no LC_MAIN, use defaults */
2398
2399 if (thread == THREAD_NULL) {
2400 return LOAD_SUCCESS;
2401 }
2402
2403 result->user_stack_alloc_size = MAXSSIZ;
2404
2405 /* use default location for stack */
2406 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2407 if (ret != KERN_SUCCESS) {
2408 return LOAD_FAILURE;
2409 }
2410
2411 /* The stack slides down from the default location */
2412 result->user_stack = addr;
2413 result->user_stack -= slide;
2414
2415 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2416 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2417 return LOAD_FAILURE;
2418 }
2419
2420 result->needs_dynlinker = TRUE;
2421
2422 ret = thread_state_initialize( thread );
2423 if (ret != KERN_SUCCESS) {
2424 return LOAD_FAILURE;
2425 }
2426
2427 result->unixproc = TRUE;
2428 result->thread_count++;
2429
2430 return LOAD_SUCCESS;
2431}
316670eb 2432
91447636
A
2433static
2434load_return_t
2435load_unixthread(
0a7de745
A
2436 struct thread_command *tcp,
2437 thread_t thread,
2438 int64_t slide,
2439 load_result_t *result
2440 )
91447636 2441{
0a7de745
A
2442 load_return_t ret;
2443 int customstack = 0;
316670eb 2444 mach_vm_offset_t addr;
0a7de745
A
2445 if (tcp->cmdsize < sizeof(*tcp)) {
2446 return LOAD_BADMACHO;
2447 }
2d21ac55 2448 if (result->thread_count != 0) {
0a7de745
A
2449 return LOAD_FAILURE;
2450 }
2451
2452 if (thread == THREAD_NULL) {
2453 return LOAD_SUCCESS;
2d21ac55 2454 }
6d2010ae 2455
91447636 2456 ret = load_threadstack(thread,
0a7de745
A
2457 (uint32_t *)(((vm_offset_t)tcp) +
2458 sizeof(struct thread_command)),
2459 tcp->cmdsize - sizeof(struct thread_command),
2460 &addr, &customstack, result);
2461 if (ret != LOAD_SUCCESS) {
2462 return ret;
2463 }
91447636 2464
316670eb 2465 /* LC_UNIXTHREAD optionally specifies stack size and location */
d9a64523 2466
0a7de745
A
2467 if (customstack) {
2468 result->custom_stack = TRUE;
2469 } else {
39037602 2470 result->user_stack_alloc_size = MAXSSIZ;
316670eb 2471 }
6d2010ae 2472
316670eb
A
2473 /* The stack slides down from the default location */
2474 result->user_stack = addr;
2475 result->user_stack -= slide;
6d2010ae 2476
91447636 2477 ret = load_threadentry(thread,
0a7de745
A
2478 (uint32_t *)(((vm_offset_t)tcp) +
2479 sizeof(struct thread_command)),
2480 tcp->cmdsize - sizeof(struct thread_command),
2481 &addr);
2482 if (ret != LOAD_SUCCESS) {
2483 return ret;
2484 }
91447636 2485
c18c124e
A
2486 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2487 /* Already processed LC_MAIN or LC_UNIXTHREAD */
0a7de745 2488 return LOAD_FAILURE;
c18c124e
A
2489 }
2490
316670eb 2491 result->entry_point = addr;
6d2010ae
A
2492 result->entry_point += slide;
2493
91447636 2494 ret = load_threadstate(thread,
0a7de745
A
2495 (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
2496 tcp->cmdsize - sizeof(struct thread_command),
2497 result);
2498 if (ret != LOAD_SUCCESS) {
2499 return ret;
2500 }
91447636
A
2501
2502 result->unixproc = TRUE;
2503 result->thread_count++;
2504
0a7de745 2505 return LOAD_SUCCESS;
91447636
A
2506}
2507
1c79356b
A
2508static
2509load_return_t
2510load_threadstate(
0a7de745
A
2511 thread_t thread,
2512 uint32_t *ts,
2513 uint32_t total_size,
2514 load_result_t *result
2515 )
1c79356b 2516{
0a7de745
A
2517 uint32_t size;
2518 int flavor;
2519 uint32_t thread_size;
490019cf
A
2520 uint32_t *local_ts = NULL;
2521 uint32_t local_ts_size = 0;
0a7de745 2522 int ret;
1c79356b 2523
490019cf 2524 (void)thread;
04b8595b 2525
04b8595b
A
2526 if (total_size > 0) {
2527 local_ts_size = total_size;
2528 local_ts = kalloc(local_ts_size);
2529 if (local_ts == NULL) {
490019cf 2530 return LOAD_FAILURE;
04b8595b
A
2531 }
2532 memcpy(local_ts, ts, local_ts_size);
2533 ts = local_ts;
2534 }
2535
1c79356b 2536 /*
490019cf
A
2537 * Validate the new thread state; iterate through the state flavors in
2538 * the Mach-O file.
2539 * XXX: we should validate the machine state here, to avoid failing at
2540 * activation time where we can't bail out cleanly.
1c79356b 2541 */
1c79356b
A
2542 while (total_size > 0) {
2543 flavor = *ts++;
2544 size = *ts++;
490019cf 2545
39037602 2546 if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
490019cf 2547 os_sub_overflow(total_size, thread_size, &total_size)) {
04b8595b 2548 ret = LOAD_BADMACHO;
490019cf 2549 goto bad;
2d21ac55 2550 }
490019cf 2551
0a7de745 2552 ts += size; /* ts is a (uint32_t *) */
1c79356b 2553 }
04b8595b 2554
490019cf
A
2555 result->threadstate = local_ts;
2556 result->threadstate_sz = local_ts_size;
2557 return LOAD_SUCCESS;
2558
2559bad:
2560 if (local_ts) {
04b8595b 2561 kfree(local_ts, local_ts_size);
04b8595b
A
2562 }
2563 return ret;
1c79356b
A
2564}
2565
2566static
2567load_return_t
2568load_threadstack(
0a7de745
A
2569 thread_t thread,
2570 uint32_t *ts,
2571 uint32_t total_size,
2572 mach_vm_offset_t *user_stack,
2573 int *customstack,
2574 load_result_t *result
2575 )
1c79356b 2576{
0a7de745
A
2577 kern_return_t ret;
2578 uint32_t size;
2579 int flavor;
2580 uint32_t stack_size;
1c79356b 2581
1c79356b
A
2582 while (total_size > 0) {
2583 flavor = *ts++;
2584 size = *ts++;
0a7de745
A
2585 if (UINT32_MAX - 2 < size ||
2586 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2587 return LOAD_BADMACHO;
2588 }
2589 stack_size = (size + 2) * sizeof(uint32_t);
2590 if (stack_size > total_size) {
2591 return LOAD_BADMACHO;
2592 }
91447636
A
2593 total_size -= stack_size;
2594
2595 /*
2596 * Third argument is a kernel space pointer; it gets cast
2597 * to the appropriate type in thread_userstack() based on
2598 * the value of flavor.
2599 */
d9a64523 2600 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
2d21ac55 2601 if (ret != KERN_SUCCESS) {
0a7de745 2602 return LOAD_FAILURE;
2d21ac55 2603 }
0a7de745 2604 ts += size; /* ts is a (uint32_t *) */
1c79356b 2605 }
0a7de745 2606 return LOAD_SUCCESS;
1c79356b
A
2607}
2608
2609static
2610load_return_t
2611load_threadentry(
0a7de745
A
2612 thread_t thread,
2613 uint32_t *ts,
2614 uint32_t total_size,
2615 mach_vm_offset_t *entry_point
2616 )
1c79356b 2617{
0a7de745
A
2618 kern_return_t ret;
2619 uint32_t size;
2620 int flavor;
2621 uint32_t entry_size;
1c79356b
A
2622
2623 /*
2624 * Set the thread state.
2625 */
91447636 2626 *entry_point = MACH_VM_MIN_ADDRESS;
1c79356b
A
2627 while (total_size > 0) {
2628 flavor = *ts++;
2629 size = *ts++;
0a7de745
A
2630 if (UINT32_MAX - 2 < size ||
2631 UINT32_MAX / sizeof(uint32_t) < size + 2) {
2632 return LOAD_BADMACHO;
2633 }
2634 entry_size = (size + 2) * sizeof(uint32_t);
2635 if (entry_size > total_size) {
2636 return LOAD_BADMACHO;
2637 }
91447636
A
2638 total_size -= entry_size;
2639 /*
2640 * Third argument is a kernel space pointer; it gets cast
2641 * to the appropriate type in thread_entrypoint() based on
2642 * the value of flavor.
2643 */
2644 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
2d21ac55 2645 if (ret != KERN_SUCCESS) {
0a7de745 2646 return LOAD_FAILURE;
2d21ac55 2647 }
0a7de745 2648 ts += size; /* ts is a (uint32_t *) */
1c79356b 2649 }
0a7de745 2650 return LOAD_SUCCESS;
1c79356b
A
2651}
2652
6d2010ae 2653struct macho_data {
0a7de745 2654 struct nameidata __nid;
6d2010ae 2655 union macho_vnode_header {
0a7de745
A
2656 struct mach_header mach_header;
2657 struct fat_header fat_header;
2658 char __pad[512];
6d2010ae
A
2659 } __header;
2660};
1c79356b 2661
c18c124e
A
2662#define DEFAULT_DYLD_PATH "/usr/lib/dyld"
2663
39037602
A
2664#if (DEVELOPMENT || DEBUG)
2665extern char dyld_alt_path[];
2666extern int use_alt_dyld;
2667#endif
2668
6d2010ae 2669static load_return_t
1c79356b 2670load_dylinker(
0a7de745
A
2671 struct dylinker_command *lcp,
2672 integer_t archbits,
2673 vm_map_t map,
2674 thread_t thread,
2675 int depth,
2676 int64_t slide,
2677 load_result_t *result,
2678 struct image_params *imgp
2679 )
1c79356b 2680{
0a7de745
A
2681 const char *name;
2682 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
2683 struct mach_header *header;
2684 off_t file_offset = 0; /* set by get_macho_vnode() */
2685 off_t macho_size = 0; /* set by get_macho_vnode() */
2686 load_result_t *myresult;
2687 kern_return_t ret;
2688 struct macho_data *macho_data;
6d2010ae 2689 struct {
0a7de745
A
2690 struct mach_header __header;
2691 load_result_t __myresult;
2692 struct macho_data __macho_data;
6d2010ae 2693 } *dyld_data;
1c79356b 2694
0a7de745 2695 if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) {
5ba3f43e 2696 return LOAD_BADMACHO;
0a7de745 2697 }
b0d623f7 2698
5ba3f43e 2699 name = (const char *)lcp + lcp->name.offset;
39037602 2700
5ba3f43e
A
2701 /* Check for a proper null terminated string. */
2702 size_t maxsz = lcp->cmdsize - lcp->name.offset;
2703 size_t namelen = strnlen(name, maxsz);
2704 if (namelen >= maxsz) {
2705 return LOAD_BADMACHO;
2706 }
1c79356b 2707
39037602
A
2708#if (DEVELOPMENT || DEBUG)
2709
0a7de745
A
2710 /*
2711 * rdar://23680808
2712 * If an alternate dyld has been specified via boot args, check
2713 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
2714 * executable and redirect the kernel to load that linker.
2715 */
2716
2717 if (use_alt_dyld) {
2718 int policy_error;
2719 uint32_t policy_flags = 0;
2720 int32_t policy_gencount = 0;
2721
2722 policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
2723 if (policy_error == 0) {
2724 if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
2725 name = dyld_alt_path;
2726 }
2727 }
2728 }
39037602
A
2729#endif
2730
c18c124e
A
2731#if !(DEVELOPMENT || DEBUG)
2732 if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
0a7de745 2733 return LOAD_BADMACHO;
c18c124e
A
2734 }
2735#endif
2736
6d2010ae
A
2737 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
2738
0a7de745 2739 MALLOC(dyld_data, void *, sizeof(*dyld_data), M_TEMP, M_WAITOK);
6d2010ae
A
2740 header = &dyld_data->__header;
2741 myresult = &dyld_data->__myresult;
2742 macho_data = &dyld_data->__macho_data;
2743
2744 ret = get_macho_vnode(name, archbits, header,
2745 &file_offset, &macho_size, macho_data, &vp);
0a7de745 2746 if (ret) {
6d2010ae 2747 goto novp_out;
0a7de745 2748 }
6d2010ae
A
2749
2750 *myresult = load_result_null;
d9a64523
A
2751 myresult->is_64bit_addr = result->is_64bit_addr;
2752 myresult->is_64bit_data = result->is_64bit_data;
1c79356b 2753
6d2010ae 2754 ret = parse_machfile(vp, map, thread, header, file_offset,
0a7de745 2755 macho_size, depth, slide, 0, myresult, result, imgp);
6d2010ae 2756
490019cf
A
2757 if (ret == LOAD_SUCCESS) {
2758 if (result->threadstate) {
2759 /* don't use the app's threadstate if we have a dyld */
2760 kfree(result->threadstate, result->threadstate_sz);
2761 }
2762 result->threadstate = myresult->threadstate;
2763 result->threadstate_sz = myresult->threadstate_sz;
2764
1c79356b 2765 result->dynlinker = TRUE;
6d2010ae 2766 result->entry_point = myresult->entry_point;
316670eb 2767 result->validentry = myresult->validentry;
6d2010ae
A
2768 result->all_image_info_addr = myresult->all_image_info_addr;
2769 result->all_image_info_size = myresult->all_image_info_size;
fe8ab488
A
2770 if (myresult->platform_binary) {
2771 result->csflags |= CS_DYLD_PLATFORM;
2772 }
1c79356b 2773 }
39037602 2774
5ba3f43e
A
2775 struct vnode_attr va;
2776 VATTR_INIT(&va);
2777 VATTR_WANTED(&va, va_fsid64);
2778 VATTR_WANTED(&va, va_fsid);
2779 VATTR_WANTED(&va, va_fileid);
2780 int error = vnode_getattr(vp, &va, imgp->ip_vfs_context);
2781 if (error == 0) {
0a7de745 2782 imgp->ip_dyld_fsid = vnode_get_va_fsid(&va);
5ba3f43e
A
2783 imgp->ip_dyld_fsobjid = va.va_fileid;
2784 }
2785
91447636 2786 vnode_put(vp);
6d2010ae
A
2787novp_out:
2788 FREE(dyld_data, M_TEMP);
0a7de745 2789 return ret;
1c79356b
A
2790}
2791
6d2010ae 2792static load_return_t
2d21ac55 2793load_code_signature(
0a7de745
A
2794 struct linkedit_data_command *lcp,
2795 struct vnode *vp,
2796 off_t macho_offset,
2797 off_t macho_size,
2798 cpu_type_t cputype,
2799 load_result_t *result,
2800 struct image_params *imgp)
2d21ac55 2801{
0a7de745
A
2802 int ret;
2803 kern_return_t kr;
2804 vm_offset_t addr;
2805 int resid;
2806 struct cs_blob *blob;
2807 int error;
2808 vm_size_t blob_size;
cb323159 2809 uint32_t sum;
2d21ac55
A
2810
2811 addr = 0;
2812 blob = NULL;
2813
cb323159
A
2814 if (lcp->cmdsize != sizeof(struct linkedit_data_command)) {
2815 ret = LOAD_BADMACHO;
2816 goto out;
2817 }
2818
2819 sum = 0;
2820 if (os_add_overflow(lcp->dataoff, lcp->datasize, &sum) || sum > macho_size) {
2d21ac55
A
2821 ret = LOAD_BADMACHO;
2822 goto out;
2823 }
2824
04b8595b 2825 blob = ubc_cs_blob_get(vp, cputype, macho_offset);
d9a64523 2826
fe8ab488
A
2827 if (blob != NULL) {
2828 /* we already have a blob for this vnode and cputype */
d9a64523
A
2829 if (blob->csb_cpu_type != cputype ||
2830 blob->csb_base_offset != macho_offset) {
fe8ab488
A
2831 /* the blob has changed for this vnode: fail ! */
2832 ret = LOAD_BADMACHO;
d9a64523 2833 goto out;
fe8ab488 2834 }
d9a64523
A
2835
2836 /* It matches the blob we want here, let's verify the version */
2837 if (ubc_cs_generation_check(vp) == 0) {
2838 /* No need to revalidate, we're good! */
2839 ret = LOAD_SUCCESS;
2840 goto out;
2841 }
2842
2843 /* That blob may be stale, let's revalidate. */
2844 error = ubc_cs_blob_revalidate(vp, blob, imgp, 0);
2845 if (error == 0) {
2846 /* Revalidation succeeded, we're good! */
2847 ret = LOAD_SUCCESS;
2848 goto out;
2849 }
2850
2851 if (error != EAGAIN) {
2852 printf("load_code_signature: revalidation failed: %d\n", error);
2853 ret = LOAD_FAILURE;
2854 goto out;
2855 }
2856
2857 assert(error == EAGAIN);
2858
2859 /*
2860 * Revalidation was not possible for this blob. We just continue as if there was no blob,
2861 * rereading the signature, and ubc_cs_blob_add will do the right thing.
2862 */
2863 blob = NULL;
2d21ac55
A
2864 }
2865
593a1d5f
A
2866 blob_size = lcp->datasize;
2867 kr = ubc_cs_blob_allocate(&addr, &blob_size);
2d21ac55
A
2868 if (kr != KERN_SUCCESS) {
2869 ret = LOAD_NOSPACE;
2870 goto out;
2871 }
0a7de745 2872
2d21ac55
A
2873 resid = 0;
2874 error = vn_rdwr(UIO_READ,
0a7de745
A
2875 vp,
2876 (caddr_t) addr,
2877 lcp->datasize,
2878 macho_offset + lcp->dataoff,
2879 UIO_SYSSPACE,
2880 0,
2881 kauth_cred_get(),
2882 &resid,
2883 current_proc());
2d21ac55
A
2884 if (error || resid != 0) {
2885 ret = LOAD_IOERROR;
2886 goto out;
2887 }
2888
2889 if (ubc_cs_blob_add(vp,
0a7de745
A
2890 cputype,
2891 macho_offset,
2892 &addr,
2893 lcp->datasize,
2894 imgp,
2895 0,
2896 &blob)) {
39037602
A
2897 if (addr) {
2898 ubc_cs_blob_deallocate(addr, blob_size);
2899 }
2d21ac55
A
2900 ret = LOAD_FAILURE;
2901 goto out;
2902 } else {
2903 /* ubc_cs_blob_add() has consumed "addr" */
2904 addr = 0;
2905 }
6d2010ae
A
2906
2907#if CHECK_CS_VALIDATION_BITMAP
2908 ubc_cs_validation_bitmap_allocate( vp );
2909#endif
0a7de745 2910
2d21ac55
A
2911 ret = LOAD_SUCCESS;
2912out:
fe8ab488 2913 if (ret == LOAD_SUCCESS) {
0a7de745 2914 if (blob == NULL) {
5ba3f43e 2915 panic("success, but no blob!");
0a7de745 2916 }
3e170ce0 2917
2d21ac55 2918 result->csflags |= blob->csb_flags;
fe8ab488 2919 result->platform_binary = blob->csb_platform_binary;
04b8595b 2920 result->cs_end_offset = blob->csb_end_offset;
2d21ac55
A
2921 }
2922 if (addr != 0) {
593a1d5f 2923 ubc_cs_blob_deallocate(addr, blob_size);
2d21ac55
A
2924 addr = 0;
2925 }
2926
2927 return ret;
2928}
2929
593a1d5f
A
2930
2931#if CONFIG_CODE_DECRYPTION
2932
2933static load_return_t
2934set_code_unprotect(
3e170ce0 2935 struct encryption_info_command *eip,
0a7de745 2936 caddr_t addr,
3e170ce0
A
2937 vm_map_t map,
2938 int64_t slide,
2939 struct vnode *vp,
2940 off_t macho_offset,
2941 cpu_type_t cputype,
2942 cpu_subtype_t cpusubtype)
593a1d5f 2943{
3e170ce0 2944 int error, len;
593a1d5f
A
2945 pager_crypt_info_t crypt_info;
2946 const char * cryptname = 0;
6d2010ae 2947 char *vpath;
0a7de745 2948
593a1d5f
A
2949 size_t offset;
2950 struct segment_command_64 *seg64;
2951 struct segment_command *seg32;
2952 vm_map_offset_t map_offset, map_size;
3e170ce0 2953 vm_object_offset_t crypto_backing_offset;
593a1d5f 2954 kern_return_t kr;
b0d623f7 2955
0a7de745
A
2956 if (eip->cmdsize < sizeof(*eip)) {
2957 return LOAD_BADMACHO;
2958 }
2959
2960 switch (eip->cryptid) {
2961 case 0:
2962 /* not encrypted, just an empty load command */
2963 return LOAD_SUCCESS;
2964 case 1:
2965 cryptname = "com.apple.unfree";
2966 break;
2967 case 0x10:
2968 /* some random cryptid that you could manually put into
2969 * your binary if you want NULL */
2970 cryptname = "com.apple.null";
2971 break;
2972 default:
2973 return LOAD_BADMACHO;
2974 }
2975
2976 if (map == VM_MAP_NULL) {
2977 return LOAD_SUCCESS;
2978 }
2979 if (NULL == text_crypter_create) {
2980 return LOAD_FAILURE;
593a1d5f 2981 }
6d2010ae
A
2982
2983 MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
0a7de745
A
2984 if (vpath == NULL) {
2985 return LOAD_FAILURE;
2986 }
2987
593a1d5f 2988 len = MAXPATHLEN;
3e170ce0
A
2989 error = vn_getpath(vp, vpath, &len);
2990 if (error) {
6d2010ae
A
2991 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2992 return LOAD_FAILURE;
2993 }
0a7de745 2994
593a1d5f 2995 /* set up decrypter first */
39236c6e
A
2996 crypt_file_data_t crypt_data = {
2997 .filename = vpath,
2998 .cputype = cputype,
0a7de745
A
2999 .cpusubtype = cpusubtype
3000 };
3001 kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
39037602
A
3002#if VM_MAP_DEBUG_APPLE_PROTECT
3003 if (vm_map_debug_apple_protect) {
3004 struct proc *p;
3005 p = current_proc();
3006 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
0a7de745 3007 p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr);
39037602
A
3008 }
3009#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
6d2010ae 3010 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
0a7de745
A
3011
3012 if (kr) {
c910b4d9 3013 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
0a7de745 3014 cryptname, kr);
39236c6e
A
3015 if (kr == kIOReturnNotPrivileged) {
3016 /* text encryption returned decryption failure */
0a7de745
A
3017 return LOAD_DECRYPTFAIL;
3018 } else {
39236c6e 3019 return LOAD_RESOURCE;
0a7de745 3020 }
593a1d5f 3021 }
0a7de745 3022
593a1d5f
A
3023 /* this is terrible, but we have to rescan the load commands to find the
3024 * virtual address of this encrypted stuff. This code is gonna look like
3025 * the dyld source one day... */
3026 struct mach_header *header = (struct mach_header *)addr;
3027 size_t mach_header_sz = sizeof(struct mach_header);
3028 if (header->magic == MH_MAGIC_64 ||
3029 header->magic == MH_CIGAM_64) {
0a7de745 3030 mach_header_sz = sizeof(struct mach_header_64);
593a1d5f
A
3031 }
3032 offset = mach_header_sz;
3033 uint32_t ncmds = header->ncmds;
3034 while (ncmds--) {
3035 /*
3036 * Get a pointer to the command.
3037 */
3038 struct load_command *lcp = (struct load_command *)(addr + offset);
3039 offset += lcp->cmdsize;
0a7de745
A
3040
3041 switch (lcp->cmd) {
3042 case LC_SEGMENT_64:
3043 seg64 = (struct segment_command_64 *)lcp;
3044 if ((seg64->fileoff <= eip->cryptoff) &&
3045 (seg64->fileoff + seg64->filesize >=
3046 eip->cryptoff + eip->cryptsize)) {
3047 map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
3048 map_size = eip->cryptsize;
3049 crypto_backing_offset = macho_offset + eip->cryptoff;
3050 goto remap_now;
3051 }
3052 case LC_SEGMENT:
3053 seg32 = (struct segment_command *)lcp;
3054 if ((seg32->fileoff <= eip->cryptoff) &&
3055 (seg32->fileoff + seg32->filesize >=
3056 eip->cryptoff + eip->cryptsize)) {
3057 map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
3058 map_size = eip->cryptsize;
3059 crypto_backing_offset = macho_offset + eip->cryptoff;
3060 goto remap_now;
3061 }
593a1d5f
A
3062 }
3063 }
0a7de745 3064
593a1d5f 3065 /* if we get here, did not find anything */
c910b4d9 3066 return LOAD_BADMACHO;
0a7de745 3067
593a1d5f
A
3068remap_now:
3069 /* now remap using the decrypter */
3e170ce0 3070 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
0a7de745
A
3071 (uint64_t) map_offset,
3072 (uint64_t) (map_offset + map_size)));
3e170ce0 3073 kr = vm_map_apple_protected(map,
0a7de745
A
3074 map_offset,
3075 map_offset + map_size,
3076 crypto_backing_offset,
3077 &crypt_info);
3e170ce0 3078 if (kr) {
c910b4d9 3079 printf("set_code_unprotect(): mapping failed with %x\n", kr);
c910b4d9
A
3080 return LOAD_PROTECT;
3081 }
0a7de745 3082
593a1d5f
A
3083 return LOAD_SUCCESS;
3084}
3085
3086#endif
3087
91447636
A
3088/*
3089 * This routine exists to support the load_dylinker().
3090 *
3091 * This routine has its own, separate, understanding of the FAT file format,
3092 * which is terrifically unfortunate.
3093 */
1c79356b
A
3094static
3095load_return_t
3096get_macho_vnode(
0a7de745
A
3097 const char *path,
3098 integer_t archbits,
3099 struct mach_header *mach_header,
3100 off_t *file_offset,
3101 off_t *macho_size,
3102 struct macho_data *data,
3103 struct vnode **vpp
3104 )
1c79356b 3105{
0a7de745
A
3106 struct vnode *vp;
3107 vfs_context_t ctx = vfs_context_current();
3108 proc_t p = vfs_context_proc(ctx);
3109 kauth_cred_t kerncred;
3110 struct nameidata *ndp = &data->__nid;
3111 boolean_t is_fat;
3112 struct fat_arch fat_arch;
3113 int error;
1c79356b 3114 int resid;
6d2010ae 3115 union macho_vnode_header *header = &data->__header;
0b4e3aa0 3116 off_t fsize = (off_t)0;
6d2010ae 3117
2d21ac55
A
3118 /*
3119 * Capture the kernel credential for use in the actual read of the
3120 * file, since the user doing the execution may have execute rights
3121 * but not read rights, but to exec something, we have to either map
3122 * or read it into the new process address space, which requires
3123 * read rights. This is to deal with lack of common credential
3124 * serialization code which would treat NOCRED as "serialize 'root'".
3125 */
3126 kerncred = vfs_context_ucred(vfs_context_kernel());
91447636 3127
1c79356b 3128 /* init the namei data to point the file user's program name */
6d2010ae 3129 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
1c79356b 3130
91447636 3131 if ((error = namei(ndp)) != 0) {
2d21ac55 3132 if (error == ENOENT) {
55e303ae 3133 error = LOAD_ENOENT;
2d21ac55 3134 } else {
55e303ae 3135 error = LOAD_FAILURE;
2d21ac55 3136 }
0a7de745 3137 return error;
55e303ae 3138 }
91447636 3139 nameidone(ndp);
1c79356b 3140 vp = ndp->ni_vp;
6d2010ae 3141
1c79356b
A
3142 /* check for regular file */
3143 if (vp->v_type != VREG) {
55e303ae 3144 error = LOAD_PROTECT;
1c79356b
A
3145 goto bad1;
3146 }
3147
91447636 3148 /* get size */
2d21ac55 3149 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
55e303ae 3150 error = LOAD_FAILURE;
1c79356b 3151 goto bad1;
55e303ae 3152 }
1c79356b
A
3153
3154 /* Check mount point */
3155 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
55e303ae 3156 error = LOAD_PROTECT;
1c79356b
A
3157 goto bad1;
3158 }
3159
91447636 3160 /* check access */
39236c6e 3161 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
55e303ae 3162 error = LOAD_PROTECT;
1c79356b 3163 goto bad1;
55e303ae 3164 }
0b4e3aa0 3165
1c79356b 3166 /* try to open it */
2d21ac55 3167 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
55e303ae 3168 error = LOAD_PROTECT;
1c79356b 3169 goto bad1;
0b4e3aa0
A
3170 }
3171
0a7de745 3172 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0,
b0d623f7 3173 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
55e303ae 3174 error = LOAD_IOERROR;
1c79356b 3175 goto bad2;
55e303ae 3176 }
6d2010ae 3177
c18c124e
A
3178 if (resid) {
3179 error = LOAD_BADMACHO;
3180 goto bad2;
3181 }
3182
6d2010ae
A
3183 if (header->mach_header.magic == MH_MAGIC ||
3184 header->mach_header.magic == MH_MAGIC_64) {
3185 is_fat = FALSE;
c18c124e 3186 } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
0a7de745 3187 is_fat = TRUE;
6d2010ae
A
3188 } else {
3189 error = LOAD_BADMACHO;
3190 goto bad2;
1c79356b
A
3191 }
3192
3193 if (is_fat) {
c18c124e 3194 error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
0a7de745 3195 sizeof(*header));
c18c124e
A
3196 if (error != LOAD_SUCCESS) {
3197 goto bad2;
3198 }
3199
0b4e3aa0 3200 /* Look up our architecture in the fat file. */
c18c124e 3201 error = fatfile_getarch_with_bits(archbits,
0a7de745
A
3202 (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch);
3203 if (error != LOAD_SUCCESS) {
1c79356b 3204 goto bad2;
0a7de745 3205 }
0b4e3aa0
A
3206
3207 /* Read the Mach-O header out of it */
6d2010ae 3208 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
0a7de745 3209 sizeof(header->mach_header), fat_arch.offset,
6d2010ae 3210 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
1c79356b 3211 if (error) {
55e303ae 3212 error = LOAD_IOERROR;
1c79356b
A
3213 goto bad2;
3214 }
3215
c18c124e
A
3216 if (resid) {
3217 error = LOAD_BADMACHO;
3218 goto bad2;
3219 }
3220
0b4e3aa0 3221 /* Is this really a Mach-O? */
6d2010ae
A
3222 if (header->mach_header.magic != MH_MAGIC &&
3223 header->mach_header.magic != MH_MAGIC_64) {
1c79356b
A
3224 error = LOAD_BADMACHO;
3225 goto bad2;
3226 }
0b4e3aa0 3227
1c79356b 3228 *file_offset = fat_arch.offset;
2d21ac55 3229 *macho_size = fat_arch.size;
1c79356b 3230 } else {
91447636
A
3231 /*
3232 * Force get_macho_vnode() to fail if the architecture bits
3233 * do not match the expected architecture bits. This in
3234 * turn causes load_dylinker() to fail for the same reason,
3235 * so it ensures the dynamic linker and the binary are in
3236 * lock-step. This is potentially bad, if we ever add to
3237 * the CPU_ARCH_* bits any bits that are desirable but not
3238 * required, since the dynamic linker might work, but we will
3239 * refuse to load it because of this check.
3240 */
6d2010ae
A
3241 if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
3242 error = LOAD_BADARCH;
3243 goto bad2;
3244 }
0b4e3aa0 3245
1c79356b 3246 *file_offset = 0;
91447636 3247 *macho_size = fsize;
1c79356b
A
3248 }
3249
6d2010ae 3250 *mach_header = header->mach_header;
0b4e3aa0 3251 *vpp = vp;
91447636
A
3252
3253 ubc_setsize(vp, fsize);
0a7de745 3254 return error;
0b4e3aa0 3255
1c79356b 3256bad2:
6d2010ae 3257 (void) VNOP_CLOSE(vp, FREAD, ctx);
1c79356b 3258bad1:
91447636 3259 vnode_put(vp);
0a7de745 3260 return error;
1c79356b 3261}