]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/mach_loader.c
xnu-1486.2.11.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
5d5c5d0d 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
91447636 39
1c79356b 40#include <sys/param.h>
91447636 41#include <sys/vnode_internal.h>
1c79356b
A
42#include <sys/uio.h>
43#include <sys/namei.h>
91447636
A
44#include <sys/proc_internal.h>
45#include <sys/kauth.h>
1c79356b
A
46#include <sys/stat.h>
47#include <sys/malloc.h>
91447636 48#include <sys/mount_internal.h>
1c79356b 49#include <sys/fcntl.h>
91447636
A
50#include <sys/ubc_internal.h>
51#include <sys/imgact.h>
1c79356b 52
1c79356b 53#include <mach/mach_types.h>
91447636
A
54#include <mach/vm_map.h> /* vm_allocate() */
55#include <mach/mach_vm.h> /* mach_vm_allocate() */
56#include <mach/vm_statistics.h>
91447636
A
57#include <mach/task.h>
58#include <mach/thread_act.h>
59
60#include <machine/vmparam.h>
2d21ac55 61#include <machine/exec.h>
1c79356b 62
91447636
A
63#include <kern/kern_types.h>
64#include <kern/cpu_number.h>
1c79356b 65#include <kern/mach_loader.h>
b0d623f7 66#include <kern/mach_fat.h>
91447636 67#include <kern/kalloc.h>
55e303ae 68#include <kern/task.h>
91447636 69#include <kern/thread.h>
593a1d5f 70#include <kern/page_decrypt.h>
1c79356b
A
71
72#include <mach-o/fat.h>
73#include <mach-o/loader.h>
74
91447636 75#include <vm/pmap.h>
1c79356b
A
76#include <vm/vm_map.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_pager.h>
79#include <vm/vnode_pager.h>
91447636 80#include <vm/vm_protos.h>
9bccf70c 81
b0d623f7 82
91447636
A
83/*
84 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
85 * when KERNEL is defined.
86 */
0c530ab8 87extern pmap_t pmap_create(vm_map_size_t size, boolean_t is_64bit);
91447636 88extern void pmap_switch(pmap_t);
91447636
A
89
90/*
91 * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
92 * when KERNEL is defined.
93 */
94extern kern_return_t thread_setstatus(thread_t thread, int flavor,
95 thread_state_t tstate,
96 mach_msg_type_number_t count);
97
98extern kern_return_t thread_state_initialize(thread_t thread);
99
100
101/* XXX should have prototypes in a shared header file */
91447636
A
102extern int get_map_nentries(vm_map_t);
103extern kern_return_t thread_userstack(thread_t, int, thread_state_t,
104 unsigned int, mach_vm_offset_t *, int *);
105extern kern_return_t thread_entrypoint(thread_t, int, thread_state_t,
106 unsigned int, mach_vm_offset_t *);
107
2d21ac55
A
108extern kern_return_t memory_object_signed(memory_object_control_t control,
109 boolean_t is_signed);
91447636
A
110
111/* An empty load_result_t */
112static load_result_t load_result_null = {
2d21ac55
A
113 .mach_header = MACH_VM_MIN_ADDRESS,
114 .entry_point = MACH_VM_MIN_ADDRESS,
115 .user_stack = MACH_VM_MIN_ADDRESS,
b0d623f7
A
116 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
117 .all_image_info_size = 0,
2d21ac55
A
118 .thread_count = 0,
119 .unixproc = 0,
120 .dynlinker = 0,
121 .customstack = 0,
b0d623f7
A
122 .csflags = 0,
123 .uuid = { 0 }
91447636 124};
9bccf70c 125
1c79356b
A
126/*
127 * Prototypes of static functions.
128 */
91447636 129static load_return_t
1c79356b
A
130parse_machfile(
131 struct vnode *vp,
91447636 132 vm_map_t map,
2d21ac55 133 thread_t thread,
1c79356b 134 struct mach_header *header,
91447636
A
135 off_t file_offset,
136 off_t macho_size,
91447636
A
137 int depth,
138 load_result_t *result
139);
140
141static load_return_t
1c79356b 142load_segment(
b0d623f7
A
143 struct load_command *lcp,
144 uint32_t filetype,
145 void *control,
91447636
A
146 off_t pager_offset,
147 off_t macho_size,
b0d623f7 148 struct vnode *vp,
91447636
A
149 vm_map_t map,
150 load_result_t *result
151);
152
2d21ac55
A
153int load_code_signature(
154 struct linkedit_data_command *lcp,
155 struct vnode *vp,
156 off_t macho_offset,
157 off_t macho_size,
158 cpu_type_t cputype,
159 load_result_t *result);
160
593a1d5f
A
161#if CONFIG_CODE_DECRYPTION
162static load_return_t
163set_code_unprotect(
164 struct encryption_info_command *lcp,
165 caddr_t addr,
166 vm_map_t map,
167 struct vnode *vp);
168#endif
169
91447636 170static load_return_t
1c79356b
A
171load_unixthread(
172 struct thread_command *tcp,
2d21ac55 173 thread_t thread,
0b4e3aa0 174 load_result_t *result
91447636
A
175);
176
177static load_return_t
1c79356b
A
178load_thread(
179 struct thread_command *tcp,
2d21ac55 180 thread_t thread,
0b4e3aa0 181 load_result_t *result
91447636
A
182);
183
184static load_return_t
1c79356b 185load_threadstate(
0b4e3aa0 186 thread_t thread,
b0d623f7
A
187 uint32_t *ts,
188 uint32_t total_size
91447636
A
189);
190
191static load_return_t
1c79356b 192load_threadstack(
0b4e3aa0 193 thread_t thread,
b0d623f7
A
194 uint32_t *ts,
195 uint32_t total_size,
2d21ac55 196 user_addr_t *user_stack,
0b4e3aa0 197 int *customstack
91447636
A
198);
199
200static load_return_t
1c79356b 201load_threadentry(
0b4e3aa0 202 thread_t thread,
b0d623f7
A
203 uint32_t *ts,
204 uint32_t total_size,
91447636
A
205 mach_vm_offset_t *entry_point
206);
207
208static load_return_t
1c79356b
A
209load_dylinker(
210 struct dylinker_command *lcp,
91447636 211 integer_t archbits,
0b4e3aa0 212 vm_map_t map,
2d21ac55 213 thread_t thread,
0b4e3aa0 214 int depth,
55e303ae 215 load_result_t *result,
0c530ab8 216 boolean_t is_64bit
91447636
A
217);
218
219static load_return_t
1c79356b 220get_macho_vnode(
0b4e3aa0 221 char *path,
91447636 222 integer_t archbits,
1c79356b 223 struct mach_header *mach_header,
91447636
A
224 off_t *file_offset,
225 off_t *macho_size,
1c79356b
A
226 struct vnode **vpp
227);
228
b0d623f7
A
229static inline void
230widen_segment_command(const struct segment_command *scp32,
231 struct segment_command_64 *scp)
232{
233 scp->cmd = scp32->cmd;
234 scp->cmdsize = scp32->cmdsize;
235 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
236 scp->vmaddr = scp32->vmaddr;
237 scp->vmsize = scp32->vmsize;
238 scp->fileoff = scp32->fileoff;
239 scp->filesize = scp32->filesize;
240 scp->maxprot = scp32->maxprot;
241 scp->initprot = scp32->initprot;
242 scp->nsects = scp32->nsects;
243 scp->flags = scp32->flags;
244}
245
246static void
247note_all_image_info_section(const struct segment_command_64 *scp,
248 boolean_t is64, size_t section_size, const void *sections,
249 load_result_t *result)
250{
251 const union {
252 struct section s32;
253 struct section_64 s64;
254 } *sectionp;
255 unsigned int i;
256
257 if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
258 return;
259 for (i = 0; i < scp->nsects; ++i) {
260 sectionp = (const void *)
261 ((const char *)sections + section_size * i);
262 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
263 sizeof(sectionp->s64.sectname))) {
264 result->all_image_info_addr =
265 is64 ? sectionp->s64.addr : sectionp->s32.addr;
266 result->all_image_info_size =
267 is64 ? sectionp->s64.size : sectionp->s32.size;
268 return;
269 }
270 }
271}
272
273
1c79356b
A
274load_return_t
275load_machfile(
91447636 276 struct image_params *imgp,
1c79356b 277 struct mach_header *header,
2d21ac55 278 thread_t thread,
55e303ae 279 vm_map_t new_map,
91447636 280 load_result_t *result
1c79356b
A
281)
282{
91447636
A
283 struct vnode *vp = imgp->ip_vp;
284 off_t file_offset = imgp->ip_arch_offset;
285 off_t macho_size = imgp->ip_arch_size;
286
287 pmap_t pmap = 0; /* protected by create_map */
1c79356b
A
288 vm_map_t map;
289 vm_map_t old_map;
b0d623f7 290 task_t old_task = TASK_NULL; /* protected by create_map */
1c79356b 291 load_result_t myresult;
1c79356b 292 load_return_t lret;
b0d623f7
A
293 boolean_t create_map = FALSE;
294 int spawn = (imgp->ip_flags & IMGPF_SPAWN);
295 task_t task = current_task();
296
297 if (new_map == VM_MAP_NULL) {
298 create_map = TRUE;
299 old_task = current_task();
300 }
0b4e3aa0 301
b0d623f7
A
302 /*
303 * If we are spawning, we have created backing objects for the process
304 * already, which include non-lazily creating the task map. So we
305 * are going to switch out the task map with one appropriate for the
306 * bitness of the image being loaded.
307 */
308 if (spawn) {
309 create_map = TRUE;
310 old_task = get_threadtask(thread);
0b4e3aa0 311 }
1c79356b 312
0b4e3aa0 313 if (create_map) {
0c530ab8 314 pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT));
0b4e3aa0 315 map = vm_map_create(pmap,
0c530ab8
A
316 0,
317 vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
318 TRUE);
0b4e3aa0
A
319 } else
320 map = new_map;
0c530ab8 321
7e4a7d39
A
322#ifndef CONFIG_ENFORCE_SIGNED_CODE
323 /* This turns off faulting for executable pages, which allows to
324 * circumvent Code Signing Enforcement */
0c530ab8
A
325 if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
326 vm_map_disable_NX(map);
7e4a7d39
A
327#endif
328
1c79356b
A
329 if (!result)
330 result = &myresult;
331
91447636 332 *result = load_result_null;
1c79356b 333
2d21ac55
A
334 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
335 0, result);
1c79356b
A
336
337 if (lret != LOAD_SUCCESS) {
55e303ae 338 if (create_map) {
0b4e3aa0 339 vm_map_deallocate(map); /* will lose pmap reference too */
55e303ae 340 }
1c79356b
A
341 return(lret);
342 }
55e303ae 343
0c530ab8
A
344 /*
345 * For 64-bit users, check for presence of a 4GB page zero
346 * which will enable the kernel to share the user's address space
347 * and hence avoid TLB flushes on kernel entry/exit
348 */
b0d623f7 349
0c530ab8
A
350 if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
351 vm_map_has_4GB_pagezero(map))
352 vm_map_set_4GB_pagezero(map);
353
1c79356b 354 /*
b0d623f7 355 * Commit to new map.
1c79356b 356 *
55e303ae
A
357 * Swap the new map for the old, which consumes our new map
358 * reference but each leaves us responsible for the old_map reference.
1c79356b
A
359 * That lets us get off the pmap associated with it, and
360 * then we can release it.
361 */
0c530ab8 362
0b4e3aa0 363 if (create_map) {
b0d623f7
A
364 /*
365 * If this is an exec, then we are going to destory the old
366 * task, and it's correct to halt it; if it's spawn, the
367 * task is not yet running, and it makes no sense.
368 */
369 if (!spawn) {
370 /*
371 * Mark the task as halting and start the other
372 * threads towards terminating themselves. Then
373 * make sure any threads waiting for a process
374 * transition get informed that we are committed to
375 * this transition, and then finally complete the
376 * task halting (wait for threads and then cleanup
377 * task resources).
378 */
379 task_start_halt(task);
380 proc_transcommit(current_proc(), 0);
381 task_complete_halt(task);
382 }
383 old_map = swap_task_map(old_task, thread, map);
0c530ab8 384 vm_map_clear_4GB_pagezero(old_map);
b0d623f7
A
385 /* XXX L4 : For spawn the current task isn't running... */
386 if (!spawn)
387 pmap_switch(pmap); /* Make sure we are using the new pmap */
0b4e3aa0
A
388 vm_map_deallocate(old_map);
389 }
1c79356b
A
390 return(LOAD_SUCCESS);
391}
392
91447636
A
393/*
394 * The file size of a mach-o file is limited to 32 bits; this is because
395 * this is the limit on the kalloc() of enough bytes for a mach_header and
396 * the contents of its sizeofcmds, which is currently constrained to 32
397 * bits in the file format itself. We read into the kernel buffer the
398 * commands section, and then parse it in order to parse the mach-o file
399 * format load_command segment(s). We are only interested in a subset of
400 * the total set of possible commands.
401 */
1c79356b
A
402static
403load_return_t
404parse_machfile(
91447636 405 struct vnode *vp,
1c79356b 406 vm_map_t map,
2d21ac55 407 thread_t thread,
1c79356b 408 struct mach_header *header,
91447636
A
409 off_t file_offset,
410 off_t macho_size,
1c79356b 411 int depth,
91447636 412 load_result_t *result
1c79356b
A
413)
414{
a3d08fcd 415 uint32_t ncmds;
91447636 416 struct load_command *lcp;
1c79356b 417 struct dylinker_command *dlp = 0;
b0d623f7 418 struct uuid_command *uulp = 0;
91447636 419 integer_t dlarchbits = 0;
b0d623f7 420 void * control;
55e303ae 421 load_return_t ret = LOAD_SUCCESS;
91447636
A
422 caddr_t addr;
423 void * kl_addr;
1c79356b 424 vm_size_t size,kl_size;
a3d08fcd
A
425 size_t offset;
426 size_t oldoffset; /* for overflow check */
1c79356b 427 int pass;
2d21ac55 428 proc_t p = current_proc(); /* XXXX */
1c79356b
A
429 int error;
430 int resid=0;
0b4e3aa0 431 task_t task;
91447636
A
432 size_t mach_header_sz = sizeof(struct mach_header);
433 boolean_t abi64;
2d21ac55 434 boolean_t got_code_signatures = FALSE;
91447636
A
435
436 if (header->magic == MH_MAGIC_64 ||
437 header->magic == MH_CIGAM_64) {
438 mach_header_sz = sizeof(struct mach_header_64);
439 }
1c79356b
A
440
441 /*
442 * Break infinite recursion
443 */
2d21ac55 444 if (depth > 6) {
1c79356b 445 return(LOAD_FAILURE);
2d21ac55 446 }
0b4e3aa0 447
2d21ac55 448 task = (task_t)get_threadtask(thread);
0b4e3aa0 449
1c79356b
A
450 depth++;
451
452 /*
453 * Check to see if right machine type.
454 */
91447636 455 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
2d21ac55
A
456 !grade_binary(header->cputype,
457 header->cpusubtype & ~CPU_SUBTYPE_MASK))
1c79356b
A
458 return(LOAD_BADARCH);
459
91447636
A
460 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
461
1c79356b
A
462 switch (header->filetype) {
463
464 case MH_OBJECT:
465 case MH_EXECUTE:
466 case MH_PRELOAD:
2d21ac55 467 if (depth != 1) {
1c79356b 468 return (LOAD_FAILURE);
2d21ac55 469 }
1c79356b
A
470 break;
471
472 case MH_FVMLIB:
473 case MH_DYLIB:
2d21ac55 474 if (depth == 1) {
1c79356b 475 return (LOAD_FAILURE);
2d21ac55 476 }
1c79356b
A
477 break;
478
479 case MH_DYLINKER:
2d21ac55 480 if (depth != 2) {
1c79356b 481 return (LOAD_FAILURE);
2d21ac55 482 }
1c79356b
A
483 break;
484
485 default:
486 return (LOAD_FAILURE);
487 }
488
489 /*
490 * Get the pager for the file.
491 */
b0d623f7 492 control = ubc_getobject(vp, UBC_FLAGS_NONE);
1c79356b
A
493
494 /*
495 * Map portion that must be accessible directly into
496 * kernel's map.
497 */
b0d623f7 498 if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
1c79356b
A
499 return(LOAD_BADMACHO);
500
501 /*
502 * Round size of Mach-O commands up to page boundry.
503 */
91447636 504 size = round_page(mach_header_sz + header->sizeofcmds);
1c79356b
A
505 if (size <= 0)
506 return(LOAD_BADMACHO);
507
508 /*
509 * Map the load commands into kernel memory.
510 */
511 addr = 0;
1c79356b
A
512 kl_size = size;
513 kl_addr = kalloc(size);
91447636 514 addr = (caddr_t)kl_addr;
0b4e3aa0 515 if (addr == NULL)
1c79356b 516 return(LOAD_NOSPACE);
0b4e3aa0 517
91447636 518 error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
b0d623f7 519 UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
91447636 520 if (error) {
0b4e3aa0
A
521 if (kl_addr )
522 kfree(kl_addr, kl_size);
55e303ae 523 return(LOAD_IOERROR);
1c79356b 524 }
1c79356b 525
1c79356b
A
526 /*
527 * Scan through the commands, processing each one as necessary.
528 */
529 for (pass = 1; pass <= 2; pass++) {
a3d08fcd
A
530 /*
531 * Loop through each of the load_commands indicated by the
532 * Mach-O header; if an absurd value is provided, we just
533 * run off the end of the reserved section by incrementing
534 * the offset too far, so we are implicitly fail-safe.
535 */
91447636 536 offset = mach_header_sz;
1c79356b
A
537 ncmds = header->ncmds;
538 while (ncmds--) {
539 /*
540 * Get a pointer to the command.
541 */
542 lcp = (struct load_command *)(addr + offset);
a3d08fcd 543 oldoffset = offset;
1c79356b
A
544 offset += lcp->cmdsize;
545
546 /*
a3d08fcd
A
547 * Perform prevalidation of the struct load_command
548 * before we attempt to use its contents. Invalid
549 * values are ones which result in an overflow, or
550 * which can not possibly be valid commands, or which
551 * straddle or exist past the reserved section at the
552 * start of the image.
1c79356b 553 */
a3d08fcd
A
554 if (oldoffset > offset ||
555 lcp->cmdsize < sizeof(struct load_command) ||
91447636
A
556 offset > header->sizeofcmds + mach_header_sz) {
557 ret = LOAD_BADMACHO;
a3d08fcd 558 break;
1c79356b
A
559 }
560
561 /*
a3d08fcd
A
562 * Act on struct load_command's for which kernel
563 * intervention is required.
1c79356b
A
564 */
565 switch(lcp->cmd) {
566 case LC_SEGMENT:
b0d623f7 567 case LC_SEGMENT_64:
1c79356b
A
568 if (pass != 1)
569 break;
b0d623f7
A
570 ret = load_segment(lcp,
571 header->filetype,
572 control,
91447636 573 file_offset,
1c79356b 574 macho_size,
b0d623f7 575 vp,
1c79356b
A
576 map,
577 result);
578 break;
579 case LC_THREAD:
580 if (pass != 2)
581 break;
91447636 582 ret = load_thread((struct thread_command *)lcp,
2d21ac55 583 thread,
1c79356b
A
584 result);
585 break;
586 case LC_UNIXTHREAD:
587 if (pass != 2)
588 break;
589 ret = load_unixthread(
91447636 590 (struct thread_command *) lcp,
2d21ac55 591 thread,
1c79356b
A
592 result);
593 break;
1c79356b
A
594 case LC_LOAD_DYLINKER:
595 if (pass != 2)
596 break;
91447636 597 if ((depth == 1) && (dlp == 0)) {
1c79356b 598 dlp = (struct dylinker_command *)lcp;
91447636
A
599 dlarchbits = (header->cputype & CPU_ARCH_MASK);
600 } else {
1c79356b 601 ret = LOAD_FAILURE;
91447636 602 }
1c79356b 603 break;
b0d623f7
A
604 case LC_UUID:
605 if (pass == 2 && depth == 1) {
606 uulp = (struct uuid_command *)lcp;
607 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
608 }
609 break;
2d21ac55
A
610 case LC_CODE_SIGNATURE:
611 /* CODE SIGNING */
612 if (pass != 2)
613 break;
614 /* pager -> uip ->
615 load signatures & store in uip
616 set VM object "signed_pages"
617 */
618 ret = load_code_signature(
619 (struct linkedit_data_command *) lcp,
620 vp,
621 file_offset,
622 macho_size,
623 header->cputype,
624 (depth == 1) ? result : NULL);
625 if (ret != LOAD_SUCCESS) {
626 printf("proc %d: load code signature error %d "
627 "for file \"%s\"\n",
628 p->p_pid, ret, vp->v_name);
629 ret = LOAD_SUCCESS; /* ignore error */
630 } else {
631 got_code_signatures = TRUE;
632 }
633 break;
593a1d5f
A
634#if CONFIG_CODE_DECRYPTION
635 case LC_ENCRYPTION_INFO:
636 if (pass != 2)
637 break;
638 ret = set_code_unprotect(
639 (struct encryption_info_command *) lcp,
640 addr, map, vp);
641 if (ret != LOAD_SUCCESS) {
c910b4d9 642 printf("proc %d: set_code_unprotect() error %d "
593a1d5f
A
643 "for file \"%s\"\n",
644 p->p_pid, ret, vp->v_name);
c910b4d9
A
645 /* Don't let the app run if it's
646 * encrypted but we failed to set up the
647 * decrypter */
648 psignal(p, SIGKILL);
593a1d5f
A
649 }
650 break;
651#endif
1c79356b 652 default:
a3d08fcd
A
653 /* Other commands are ignored by the kernel */
654 ret = LOAD_SUCCESS;
91447636 655 break;
1c79356b
A
656 }
657 if (ret != LOAD_SUCCESS)
658 break;
659 }
660 if (ret != LOAD_SUCCESS)
661 break;
662 }
91447636 663 if (ret == LOAD_SUCCESS) {
2d21ac55
A
664 if (! got_code_signatures) {
665 struct cs_blob *blob;
666 /* no embedded signatures: look for detached ones */
667 blob = ubc_cs_blob_get(vp, -1, file_offset);
668 if (blob != NULL) {
669 /* get flags to be applied to the process */
670 result->csflags |= blob->csb_flags;
671 }
672 }
91447636 673
91447636 674 if (dlp != 0)
2d21ac55 675 ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);
91447636
A
676
677 if(depth == 1) {
2d21ac55 678 if (result->thread_count == 0) {
91447636 679 ret = LOAD_FAILURE;
2d21ac55
A
680 } else if ( abi64 ) {
681#ifdef __ppc__
91447636 682 /* Map in 64-bit commpage */
0c530ab8 683 /*
2d21ac55
A
684 * PPC51: ppc64 is limited to 51-bit addresses.
685 * Memory above that limit is handled specially
686 * at the pmap level.
b0d623f7
A
687 *
688 * <rdar://6640492> -- wrong task for vfork()/spawn()
0c530ab8 689 */
2d21ac55
A
690 pmap_map_sharedpage(current_task(), get_map_pmap(map));
691#endif /* __ppc__ */
91447636 692 }
91447636 693 }
1c79356b
A
694 }
695
0b4e3aa0
A
696 if (kl_addr )
697 kfree(kl_addr, kl_size);
698
1c79356b
A
699 return(ret);
700}
701
593a1d5f 702#if CONFIG_CODE_DECRYPTION
0c530ab8
A
703
704#define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
705
706static load_return_t
b0d623f7 707unprotect_segment(
0c530ab8
A
708 uint64_t file_off,
709 uint64_t file_size,
b0d623f7
A
710 struct vnode *vp,
711 off_t macho_offset,
0c530ab8
A
712 vm_map_t map,
713 vm_map_offset_t map_addr,
714 vm_map_size_t map_size)
715{
716 kern_return_t kr;
717
718 /*
719 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
720 * this part of a Universal binary) are not protected...
721 * The rest needs to be "transformed".
722 */
723 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
724 file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
725 /* it's all unprotected, nothing to do... */
726 kr = KERN_SUCCESS;
727 } else {
728 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
729 /*
730 * We start mapping in the unprotected area.
731 * Skip the unprotected part...
732 */
733 vm_map_offset_t delta;
734
735 delta = APPLE_UNPROTECTED_HEADER_SIZE;
736 delta -= file_off;
737 map_addr += delta;
738 map_size -= delta;
739 }
740 /* ... transform the rest of the mapping. */
593a1d5f
A
741 struct pager_crypt_info crypt_info;
742 crypt_info.page_decrypt = dsmos_page_transform;
743 crypt_info.crypt_ops = NULL;
744 crypt_info.crypt_end = NULL;
b0d623f7
A
745#pragma unused(vp, macho_offset)
746 crypt_info.crypt_ops = (void *)0x2e69cf40;
0c530ab8
A
747 kr = vm_map_apple_protected(map,
748 map_addr,
593a1d5f
A
749 map_addr + map_size,
750 &crypt_info);
0c530ab8
A
751 }
752
753 if (kr != KERN_SUCCESS) {
754 return LOAD_FAILURE;
755 }
756 return LOAD_SUCCESS;
757}
593a1d5f 758#else /* CONFIG_CODE_DECRYPTION */
b0d623f7
A
759static load_return_t
760unprotect_segment(
761 __unused uint64_t file_off,
762 __unused uint64_t file_size,
763 __unused struct vnode *vp,
764 __unused off_t macho_offset,
765 __unused vm_map_t map,
766 __unused vm_map_offset_t map_addr,
767 __unused vm_map_size_t map_size)
1c79356b 768{
b0d623f7 769 return LOAD_SUCCESS;
1c79356b 770}
b0d623f7 771#endif /* CONFIG_CODE_DECRYPTION */
1c79356b
A
772
773static
774load_return_t
b0d623f7
A
775load_segment(
776 struct load_command *lcp,
777 uint32_t filetype,
778 void * control,
91447636
A
779 off_t pager_offset,
780 off_t macho_size,
b0d623f7 781 struct vnode *vp,
91447636 782 vm_map_t map,
1c79356b
A
783 load_result_t *result
784)
785{
b0d623f7 786 struct segment_command_64 segment_command, *scp;
91447636
A
787 kern_return_t ret;
788 mach_vm_offset_t map_addr, map_offset;
789 mach_vm_size_t map_size, seg_size, delta_size;
790 vm_prot_t initprot;
791 vm_prot_t maxprot;
b0d623f7
A
792 size_t segment_command_size, total_section_size,
793 single_section_size;
1c79356b 794
b0d623f7
A
795 if (LC_SEGMENT_64 == lcp->cmd) {
796 segment_command_size = sizeof(struct segment_command_64);
797 single_section_size = sizeof(struct section_64);
798 scp = (struct segment_command_64 *)lcp;
799 } else {
800 segment_command_size = sizeof(struct segment_command);
801 single_section_size = sizeof(struct section);
802 scp = &segment_command;
803 widen_segment_command((struct segment_command *)lcp, scp);
804 }
805 if (lcp->cmdsize < segment_command_size)
806 return (LOAD_BADMACHO);
807 total_section_size = lcp->cmdsize - segment_command_size;
808
91447636
A
809 /*
810 * Make sure what we get from the file is really ours (as specified
811 * by macho_size).
812 */
b0d623f7
A
813 if (scp->fileoff + scp->filesize < scp->fileoff ||
814 scp->fileoff + scp->filesize > (uint64_t)macho_size)
815 return (LOAD_BADMACHO);
816 /*
817 * Ensure that the number of sections specified would fit
818 * within the load command size.
819 */
820 if (total_section_size / single_section_size < scp->nsects)
91447636 821 return (LOAD_BADMACHO);
2d21ac55
A
822 /*
823 * Make sure the segment is page-aligned in the file.
824 */
b0d623f7
A
825 if ((scp->fileoff & PAGE_MASK_64) != 0)
826 return (LOAD_BADMACHO);
91447636
A
827
828 /*
829 * Round sizes to page size.
830 */
b0d623f7
A
831 seg_size = round_page_64(scp->vmsize);
832 map_size = round_page_64(scp->filesize);
833 map_addr = trunc_page_64(scp->vmaddr); /* JVXXX note that in XNU TOT this is round instead of trunc for 64 bits */
834 if (seg_size == 0)
835 return (KERN_SUCCESS);
836 /* XXX (4596982) this interferes with Rosetta, so limit to 64-bit tasks */
0c530ab8
A
837 if (map_addr == 0 &&
838 map_size == 0 &&
839 seg_size != 0 &&
b0d623f7
A
840 scp->cmd == LC_SEGMENT_64 &&
841 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
842 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
0c530ab8
A
843 /*
844 * This is a "page zero" segment: it starts at address 0,
845 * is not mapped from the binary file and is not accessible.
846 * User-space should never be able to access that memory, so
847 * make it completely off limits by raising the VM map's
848 * minimum offset.
849 */
850 ret = vm_map_raise_min_offset(map, seg_size);
851 if (ret != KERN_SUCCESS) {
b0d623f7 852 return (LOAD_FAILURE);
0c530ab8 853 }
b0d623f7 854 return (LOAD_SUCCESS);
0c530ab8
A
855 }
856
b0d623f7 857 map_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
91447636
A
858
859 if (map_size > 0) {
b0d623f7
A
860 initprot = (scp->initprot) & VM_PROT_ALL;
861 maxprot = (scp->maxprot) & VM_PROT_ALL;
91447636
A
862 /*
863 * Map a copy of the file into the address space.
864 */
b0d623f7 865 ret = vm_map_enter_mem_object_control(map,
91447636 866 &map_addr, map_size, (mach_vm_offset_t)0,
b0d623f7 867 VM_FLAGS_FIXED, control, map_offset, TRUE,
91447636
A
868 initprot, maxprot,
869 VM_INHERIT_DEFAULT);
870 if (ret != KERN_SUCCESS)
b0d623f7 871 return (LOAD_NOSPACE);
91447636
A
872
873 /*
874 * If the file didn't end on a page boundary,
875 * we need to zero the leftover.
876 */
b0d623f7 877 delta_size = map_size - scp->filesize;
91447636
A
878#if FIXME
879 if (delta_size > 0) {
880 mach_vm_offset_t tmp;
881
b0d623f7 882 ret = mach_vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
91447636
A
883 if (ret != KERN_SUCCESS)
884 return(LOAD_RESOURCE);
885
b0d623f7 886 if (copyout(tmp, map_addr + scp->filesize,
91447636 887 delta_size)) {
b0d623f7 888 (void) mach_vm_deallocate(
91447636 889 kernel_map, tmp, delta_size);
2d21ac55 890 return (LOAD_FAILURE);
91447636 891 }
1c79356b 892
b0d623f7 893 (void) mach_vm_deallocate(kernel_map, tmp, delta_size);
91447636
A
894 }
895#endif /* FIXME */
896 }
1c79356b 897
91447636
A
898 /*
899 * If the virtual size of the segment is greater
900 * than the size from the file, we need to allocate
901 * zero fill memory for the rest.
902 */
903 delta_size = seg_size - map_size;
904 if (delta_size > 0) {
905 mach_vm_offset_t tmp = map_addr + map_size;
1c79356b 906
0c530ab8
A
907 ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
908 NULL, 0, FALSE,
b0d623f7 909 scp->initprot, scp->maxprot,
0c530ab8 910 VM_INHERIT_DEFAULT);
91447636
A
911 if (ret != KERN_SUCCESS)
912 return(LOAD_NOSPACE);
913 }
1c79356b 914
b0d623f7 915 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
6601e61a 916 result->mach_header = map_addr;
0c530ab8 917
b0d623f7
A
918 if (scp->flags & SG_PROTECTED_VERSION_1) {
919 ret = unprotect_segment(scp->fileoff,
920 scp->filesize,
921 vp,
922 pager_offset,
923 map,
924 map_addr,
925 map_size);
0c530ab8
A
926 } else {
927 ret = LOAD_SUCCESS;
928 }
b0d623f7
A
929 if (LOAD_SUCCESS == ret && filetype == MH_DYLINKER &&
930 result->all_image_info_addr == MACH_VM_MIN_ADDRESS)
931 note_all_image_info_section(scp,
932 LC_SEGMENT_64 == lcp->cmd, single_section_size,
933 (const char *)lcp + segment_command_size, result);
0c530ab8
A
934
935 return ret;
1c79356b
A
936}
937
938static
939load_return_t
940load_thread(
941 struct thread_command *tcp,
91447636 942 thread_t thread,
1c79356b
A
943 load_result_t *result
944)
945{
1c79356b
A
946 kern_return_t kret;
947 load_return_t lret;
0b4e3aa0
A
948 task_t task;
949 int customstack=0;
1c79356b 950
b0d623f7
A
951 if (tcp->cmdsize < sizeof(*tcp))
952 return (LOAD_BADMACHO);
55e303ae 953 task = get_threadtask(thread);
0b4e3aa0 954
2d21ac55 955 /* if count is 0; same as thread */
0b4e3aa0
A
956 if (result->thread_count != 0) {
957 kret = thread_create(task, &thread);
1c79356b
A
958 if (kret != KERN_SUCCESS)
959 return(LOAD_RESOURCE);
91447636 960 thread_deallocate(thread);
1c79356b
A
961 }
962
963 lret = load_threadstate(thread,
b0d623f7 964 (uint32_t *)(((vm_offset_t)tcp) +
1c79356b
A
965 sizeof(struct thread_command)),
966 tcp->cmdsize - sizeof(struct thread_command));
967 if (lret != LOAD_SUCCESS)
968 return (lret);
969
970 if (result->thread_count == 0) {
0b4e3aa0 971 lret = load_threadstack(thread,
b0d623f7 972 (uint32_t *)(((vm_offset_t)tcp) +
1c79356b
A
973 sizeof(struct thread_command)),
974 tcp->cmdsize - sizeof(struct thread_command),
0b4e3aa0
A
975 &result->user_stack,
976 &customstack);
977 if (customstack)
978 result->customstack = 1;
979 else
980 result->customstack = 0;
981
1c79356b
A
982 if (lret != LOAD_SUCCESS)
983 return(lret);
984
0b4e3aa0 985 lret = load_threadentry(thread,
b0d623f7 986 (uint32_t *)(((vm_offset_t)tcp) +
1c79356b
A
987 sizeof(struct thread_command)),
988 tcp->cmdsize - sizeof(struct thread_command),
989 &result->entry_point);
990 if (lret != LOAD_SUCCESS)
991 return(lret);
992 }
993 /*
994 * Resume thread now, note that this means that the thread
995 * commands should appear after all the load commands to
996 * be sure they don't reference anything not yet mapped.
997 */
998 else
999 thread_resume(thread);
1000
1001 result->thread_count++;
1002
1003 return(LOAD_SUCCESS);
1004}
1005
91447636
A
1006static
1007load_return_t
1008load_unixthread(
1009 struct thread_command *tcp,
1010 thread_t thread,
1011 load_result_t *result
1012)
1013{
1014 load_return_t ret;
1015 int customstack =0;
1016
b0d623f7
A
1017 if (tcp->cmdsize < sizeof(*tcp))
1018 return (LOAD_BADMACHO);
2d21ac55
A
1019 if (result->thread_count != 0) {
1020printf("load_unixthread: already have a thread!");
91447636 1021 return (LOAD_FAILURE);
2d21ac55 1022 }
91447636
A
1023
1024 ret = load_threadstack(thread,
b0d623f7 1025 (uint32_t *)(((vm_offset_t)tcp) +
91447636
A
1026 sizeof(struct thread_command)),
1027 tcp->cmdsize - sizeof(struct thread_command),
1028 &result->user_stack,
1029 &customstack);
1030 if (ret != LOAD_SUCCESS)
1031 return(ret);
1032
1033 if (customstack)
1034 result->customstack = 1;
1035 else
1036 result->customstack = 0;
1037 ret = load_threadentry(thread,
b0d623f7 1038 (uint32_t *)(((vm_offset_t)tcp) +
91447636
A
1039 sizeof(struct thread_command)),
1040 tcp->cmdsize - sizeof(struct thread_command),
1041 &result->entry_point);
1042 if (ret != LOAD_SUCCESS)
1043 return(ret);
1044
1045 ret = load_threadstate(thread,
b0d623f7 1046 (uint32_t *)(((vm_offset_t)tcp) +
91447636
A
1047 sizeof(struct thread_command)),
1048 tcp->cmdsize - sizeof(struct thread_command));
1049 if (ret != LOAD_SUCCESS)
1050 return (ret);
1051
1052 result->unixproc = TRUE;
1053 result->thread_count++;
1054
1055 return(LOAD_SUCCESS);
1056}
1057
1c79356b
A
1058static
1059load_return_t
1060load_threadstate(
1061 thread_t thread,
b0d623f7
A
1062 uint32_t *ts,
1063 uint32_t total_size
1c79356b
A
1064)
1065{
1066 kern_return_t ret;
b0d623f7 1067 uint32_t size;
1c79356b 1068 int flavor;
b0d623f7 1069 uint32_t thread_size;
1c79356b 1070
91447636 1071 ret = thread_state_initialize( thread );
2d21ac55 1072 if (ret != KERN_SUCCESS) {
91447636 1073 return(LOAD_FAILURE);
2d21ac55 1074 }
91447636 1075
1c79356b 1076 /*
91447636
A
1077 * Set the new thread state; iterate through the state flavors in
1078 * the mach-o file.
1c79356b 1079 */
1c79356b
A
1080 while (total_size > 0) {
1081 flavor = *ts++;
1082 size = *ts++;
b0d623f7
A
1083 if (UINT32_MAX-2 < size ||
1084 UINT32_MAX/sizeof(uint32_t) < size+2)
1085 return (LOAD_BADMACHO);
1086 thread_size = (size+2)*sizeof(uint32_t);
91447636 1087 if (thread_size > total_size)
1c79356b 1088 return(LOAD_BADMACHO);
91447636
A
1089 total_size -= thread_size;
1090 /*
1091 * Third argument is a kernel space pointer; it gets cast
1092 * to the appropriate type in machine_thread_set_state()
1093 * based on the value of flavor.
1094 */
1095 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
2d21ac55 1096 if (ret != KERN_SUCCESS) {
1c79356b 1097 return(LOAD_FAILURE);
2d21ac55 1098 }
b0d623f7 1099 ts += size; /* ts is a (uint32_t *) */
1c79356b
A
1100 }
1101 return(LOAD_SUCCESS);
1102}
1103
1104static
1105load_return_t
1106load_threadstack(
1107 thread_t thread,
b0d623f7
A
1108 uint32_t *ts,
1109 uint32_t total_size,
91447636 1110 user_addr_t *user_stack,
0b4e3aa0 1111 int *customstack
1c79356b
A
1112)
1113{
1114 kern_return_t ret;
b0d623f7 1115 uint32_t size;
1c79356b 1116 int flavor;
b0d623f7 1117 uint32_t stack_size;
1c79356b 1118
1c79356b
A
1119 while (total_size > 0) {
1120 flavor = *ts++;
1121 size = *ts++;
b0d623f7
A
1122 if (UINT32_MAX-2 < size ||
1123 UINT32_MAX/sizeof(uint32_t) < size+2)
1124 return (LOAD_BADMACHO);
1125 stack_size = (size+2)*sizeof(uint32_t);
91447636 1126 if (stack_size > total_size)
1c79356b 1127 return(LOAD_BADMACHO);
91447636
A
1128 total_size -= stack_size;
1129
1130 /*
1131 * Third argument is a kernel space pointer; it gets cast
1132 * to the appropriate type in thread_userstack() based on
1133 * the value of flavor.
1134 */
1135 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack);
2d21ac55 1136 if (ret != KERN_SUCCESS) {
1c79356b 1137 return(LOAD_FAILURE);
2d21ac55 1138 }
b0d623f7 1139 ts += size; /* ts is a (uint32_t *) */
1c79356b
A
1140 }
1141 return(LOAD_SUCCESS);
1142}
1143
1144static
1145load_return_t
1146load_threadentry(
1147 thread_t thread,
b0d623f7
A
1148 uint32_t *ts,
1149 uint32_t total_size,
91447636 1150 mach_vm_offset_t *entry_point
1c79356b
A
1151)
1152{
1153 kern_return_t ret;
b0d623f7 1154 uint32_t size;
1c79356b 1155 int flavor;
b0d623f7 1156 uint32_t entry_size;
1c79356b
A
1157
1158 /*
1159 * Set the thread state.
1160 */
91447636 1161 *entry_point = MACH_VM_MIN_ADDRESS;
1c79356b
A
1162 while (total_size > 0) {
1163 flavor = *ts++;
1164 size = *ts++;
b0d623f7
A
1165 if (UINT32_MAX-2 < size ||
1166 UINT32_MAX/sizeof(uint32_t) < size+2)
1167 return (LOAD_BADMACHO);
1168 entry_size = (size+2)*sizeof(uint32_t);
91447636 1169 if (entry_size > total_size)
1c79356b 1170 return(LOAD_BADMACHO);
91447636
A
1171 total_size -= entry_size;
1172 /*
1173 * Third argument is a kernel space pointer; it gets cast
1174 * to the appropriate type in thread_entrypoint() based on
1175 * the value of flavor.
1176 */
1177 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
2d21ac55 1178 if (ret != KERN_SUCCESS) {
1c79356b 1179 return(LOAD_FAILURE);
2d21ac55 1180 }
b0d623f7 1181 ts += size; /* ts is a (uint32_t *) */
1c79356b
A
1182 }
1183 return(LOAD_SUCCESS);
1184}
1185
1c79356b
A
1186
1187static
1188load_return_t
1189load_dylinker(
1190 struct dylinker_command *lcp,
91447636 1191 integer_t archbits,
1c79356b 1192 vm_map_t map,
2d21ac55 1193 thread_t thread,
1c79356b 1194 int depth,
55e303ae 1195 load_result_t *result,
0c530ab8 1196 boolean_t is_64bit
1c79356b
A
1197)
1198{
1199 char *name;
1200 char *p;
2d21ac55 1201 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
1c79356b 1202 struct mach_header header;
2d21ac55
A
1203 off_t file_offset = 0; /* set by get_macho_vnode() */
1204 off_t macho_size = 0; /* set by get_macho_vnode() */
1c79356b
A
1205 vm_map_t copy_map;
1206 load_result_t myresult;
1207 kern_return_t ret;
1208 vm_map_copy_t tmp;
91447636
A
1209 mach_vm_offset_t dyl_start, map_addr;
1210 mach_vm_size_t dyl_length;
1c79356b 1211
b0d623f7
A
1212 if (lcp->cmdsize < sizeof(*lcp))
1213 return (LOAD_BADMACHO);
1214
1c79356b
A
1215 name = (char *)lcp + lcp->name.offset;
1216 /*
1217 * Check for a proper null terminated string.
1218 */
1219 p = name;
1220 do {
1221 if (p >= (char *)lcp + lcp->cmdsize)
1222 return(LOAD_BADMACHO);
1223 } while (*p++);
1224
91447636 1225 ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
1c79356b
A
1226 if (ret)
1227 return (ret);
1228
91447636 1229 myresult = load_result_null;
1c79356b 1230
2d21ac55
A
1231 /*
1232 * First try to map dyld in directly. This should work most of
1233 * the time since there shouldn't normally be something already
1234 * mapped to its address.
1235 */
1c79356b 1236
2d21ac55
A
1237 ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
1238 depth, &myresult);
1c79356b 1239
2d21ac55
A
1240 /*
1241 * If it turned out something was in the way, then we'll take
1242 * take this longer path to map dyld into a temporary map and
1243 * copy it into destination map at a different address.
1244 */
1c79356b 1245
2d21ac55 1246 if (ret == LOAD_NOSPACE) {
1c79356b 1247
2d21ac55
A
1248 /*
1249 * Load the Mach-O.
1250 * Use a temporary map to do the work.
1251 */
1252 copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
1253 is_64bit),
1254 get_map_min(map), get_map_max(map), TRUE);
1255 if (VM_MAP_NULL == copy_map) {
1256 ret = LOAD_RESOURCE;
1c79356b 1257 goto out;
1c79356b 1258 }
2d21ac55
A
1259
1260 myresult = load_result_null;
1261
1262 ret = parse_machfile(vp, copy_map, thread, &header,
1263 file_offset, macho_size,
1264 depth, &myresult);
1265
1266 if (ret) {
1267 vm_map_deallocate(copy_map);
1c79356b
A
1268 goto out;
1269 }
2d21ac55
A
1270
1271 if (get_map_nentries(copy_map) > 0) {
1272
1273 dyl_start = mach_get_vm_start(copy_map);
1274 dyl_length = mach_get_vm_end(copy_map) - dyl_start;
1275
1276 map_addr = dyl_start;
1277 ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);
1278
1279 if (ret != KERN_SUCCESS) {
1280 vm_map_deallocate(copy_map);
1281 ret = LOAD_NOSPACE;
1282 goto out;
1283
1284 }
1c79356b 1285
2d21ac55
A
1286 ret = vm_map_copyin(copy_map,
1287 (vm_map_address_t)dyl_start,
1288 (vm_map_size_t)dyl_length,
1289 TRUE, &tmp);
1290 if (ret != KERN_SUCCESS) {
1291 (void) vm_map_remove(map,
1292 vm_map_trunc_page(map_addr),
1293 vm_map_round_page(map_addr + dyl_length),
1294 VM_MAP_NO_FLAGS);
1295 vm_map_deallocate(copy_map);
1296 goto out;
1297 }
1298
1299 ret = vm_map_copy_overwrite(map,
1300 (vm_map_address_t)map_addr,
1301 tmp, FALSE);
1302 if (ret != KERN_SUCCESS) {
1303 vm_map_copy_discard(tmp);
1304 (void) vm_map_remove(map,
1305 vm_map_trunc_page(map_addr),
1306 vm_map_round_page(map_addr + dyl_length),
1307 VM_MAP_NO_FLAGS);
1308 vm_map_deallocate(copy_map);
1309 goto out;
1310 }
1311
b0d623f7 1312 if (map_addr != dyl_start) {
2d21ac55 1313 myresult.entry_point += (map_addr - dyl_start);
b0d623f7
A
1314 myresult.all_image_info_addr +=
1315 (map_addr - dyl_start);
1316 }
2d21ac55
A
1317 } else {
1318 ret = LOAD_FAILURE;
91447636 1319 }
1c79356b 1320
2d21ac55
A
1321 vm_map_deallocate(copy_map);
1322 }
1c79356b
A
1323
1324 if (ret == LOAD_SUCCESS) {
1325 result->dynlinker = TRUE;
1326 result->entry_point = myresult.entry_point;
b0d623f7
A
1327 result->all_image_info_addr = myresult.all_image_info_addr;
1328 result->all_image_info_size = myresult.all_image_info_size;
1c79356b
A
1329 }
1330out:
91447636 1331 vnode_put(vp);
1c79356b
A
1332 return (ret);
1333
1334}
1335
2d21ac55
A
1336int
1337load_code_signature(
1338 struct linkedit_data_command *lcp,
1339 struct vnode *vp,
1340 off_t macho_offset,
1341 off_t macho_size,
1342 cpu_type_t cputype,
1343 load_result_t *result)
1344{
1345 int ret;
1346 kern_return_t kr;
1347 vm_offset_t addr;
1348 int resid;
1349 struct cs_blob *blob;
1350 int error;
593a1d5f 1351 vm_size_t blob_size;
2d21ac55
A
1352
1353 addr = 0;
1354 blob = NULL;
1355
1356 if (lcp->cmdsize != sizeof (struct linkedit_data_command) ||
1357 lcp->dataoff + lcp->datasize > macho_size) {
1358 ret = LOAD_BADMACHO;
1359 goto out;
1360 }
1361
1362 blob = ubc_cs_blob_get(vp, cputype, -1);
1363 if (blob != NULL) {
1364 /* we already have a blob for this vnode and cputype */
1365 if (blob->csb_cpu_type == cputype &&
1366 blob->csb_base_offset == macho_offset &&
1367 blob->csb_mem_size == lcp->datasize) {
1368 /* it matches the blob we want here: we're done */
1369 ret = LOAD_SUCCESS;
1370 } else {
1371 /* the blob has changed for this vnode: fail ! */
1372 ret = LOAD_BADMACHO;
1373 }
1374 goto out;
1375 }
1376
593a1d5f
A
1377 blob_size = lcp->datasize;
1378 kr = ubc_cs_blob_allocate(&addr, &blob_size);
2d21ac55
A
1379 if (kr != KERN_SUCCESS) {
1380 ret = LOAD_NOSPACE;
1381 goto out;
1382 }
1383
1384 resid = 0;
1385 error = vn_rdwr(UIO_READ,
1386 vp,
1387 (caddr_t) addr,
1388 lcp->datasize,
1389 macho_offset + lcp->dataoff,
b0d623f7 1390 UIO_SYSSPACE,
2d21ac55
A
1391 0,
1392 kauth_cred_get(),
1393 &resid,
1394 current_proc());
1395 if (error || resid != 0) {
1396 ret = LOAD_IOERROR;
1397 goto out;
1398 }
1399
1400 if (ubc_cs_blob_add(vp,
1401 cputype,
1402 macho_offset,
1403 addr,
1404 lcp->datasize)) {
1405 ret = LOAD_FAILURE;
1406 goto out;
1407 } else {
1408 /* ubc_cs_blob_add() has consumed "addr" */
1409 addr = 0;
1410 }
1411
1412 blob = ubc_cs_blob_get(vp, cputype, -1);
1413
1414 ret = LOAD_SUCCESS;
1415out:
1416 if (result && ret == LOAD_SUCCESS) {
1417 result->csflags |= blob->csb_flags;
1418 }
1419 if (addr != 0) {
593a1d5f 1420 ubc_cs_blob_deallocate(addr, blob_size);
2d21ac55
A
1421 addr = 0;
1422 }
1423
1424 return ret;
1425}
1426
593a1d5f
A
1427
1428#if CONFIG_CODE_DECRYPTION
1429
1430static load_return_t
1431set_code_unprotect(
1432 struct encryption_info_command *eip,
1433 caddr_t addr,
1434 vm_map_t map,
1435 struct vnode *vp)
1436{
1437 int result, len;
1438 char vpath[MAXPATHLEN];
1439 pager_crypt_info_t crypt_info;
1440 const char * cryptname = 0;
1441
1442 size_t offset;
1443 struct segment_command_64 *seg64;
1444 struct segment_command *seg32;
1445 vm_map_offset_t map_offset, map_size;
1446 kern_return_t kr;
b0d623f7
A
1447
1448 if (eip->cmdsize < sizeof(*eip))
1449 return LOAD_BADMACHO;
593a1d5f
A
1450
1451 switch(eip->cryptid) {
1452 case 0:
1453 /* not encrypted, just an empty load command */
1454 return LOAD_SUCCESS;
1455 case 1:
1456 cryptname="com.apple.unfree";
1457 break;
1458 case 0x10:
1459 /* some random cryptid that you could manually put into
1460 * your binary if you want NULL */
1461 cryptname="com.apple.null";
1462 break;
1463 default:
c910b4d9 1464 return LOAD_BADMACHO;
593a1d5f
A
1465 }
1466
1467 len = MAXPATHLEN;
1468 result = vn_getpath(vp, vpath, &len);
1469 if(result) return result;
1470
1471 /* set up decrypter first */
1472 if(NULL==text_crypter_create) return LOAD_FAILURE;
1473 kr=text_crypter_create(&crypt_info, cryptname, (void*)vpath);
1474
1475 if(kr) {
c910b4d9 1476 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
593a1d5f 1477 cryptname, kr);
c910b4d9 1478 return LOAD_RESOURCE;
593a1d5f
A
1479 }
1480
1481 /* this is terrible, but we have to rescan the load commands to find the
1482 * virtual address of this encrypted stuff. This code is gonna look like
1483 * the dyld source one day... */
1484 struct mach_header *header = (struct mach_header *)addr;
1485 size_t mach_header_sz = sizeof(struct mach_header);
1486 if (header->magic == MH_MAGIC_64 ||
1487 header->magic == MH_CIGAM_64) {
1488 mach_header_sz = sizeof(struct mach_header_64);
1489 }
1490 offset = mach_header_sz;
1491 uint32_t ncmds = header->ncmds;
1492 while (ncmds--) {
1493 /*
1494 * Get a pointer to the command.
1495 */
1496 struct load_command *lcp = (struct load_command *)(addr + offset);
1497 offset += lcp->cmdsize;
1498
1499 switch(lcp->cmd) {
1500 case LC_SEGMENT_64:
1501 seg64 = (struct segment_command_64 *)lcp;
1502 if ((seg64->fileoff <= eip->cryptoff) &&
1503 (seg64->fileoff+seg64->filesize >=
1504 eip->cryptoff+eip->cryptsize)) {
1505 map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff;
1506 map_size = eip->cryptsize;
1507 goto remap_now;
1508 }
1509 case LC_SEGMENT:
1510 seg32 = (struct segment_command *)lcp;
1511 if ((seg32->fileoff <= eip->cryptoff) &&
1512 (seg32->fileoff+seg32->filesize >=
1513 eip->cryptoff+eip->cryptsize)) {
1514 map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff;
1515 map_size = eip->cryptsize;
1516 goto remap_now;
1517 }
1518 }
1519 }
1520
1521 /* if we get here, did not find anything */
c910b4d9 1522 return LOAD_BADMACHO;
593a1d5f
A
1523
1524remap_now:
1525 /* now remap using the decrypter */
1526 kr = vm_map_apple_protected(map, map_offset, map_offset+map_size, &crypt_info);
c910b4d9
A
1527 if(kr) {
1528 printf("set_code_unprotect(): mapping failed with %x\n", kr);
1529 crypt_info.crypt_end(crypt_info.crypt_ops);
1530 return LOAD_PROTECT;
1531 }
593a1d5f
A
1532
1533 return LOAD_SUCCESS;
1534}
1535
1536#endif
1537
91447636
A
1538/*
1539 * This routine exists to support the load_dylinker().
1540 *
1541 * This routine has its own, separate, understanding of the FAT file format,
1542 * which is terrifically unfortunate.
1543 */
1c79356b
A
1544static
1545load_return_t
1546get_macho_vnode(
1547 char *path,
91447636 1548 integer_t archbits,
1c79356b 1549 struct mach_header *mach_header,
91447636
A
1550 off_t *file_offset,
1551 off_t *macho_size,
1c79356b
A
1552 struct vnode **vpp
1553)
1554{
1555 struct vnode *vp;
2d21ac55
A
1556 vfs_context_t ctx = vfs_context_current();
1557 proc_t p = vfs_context_proc(ctx);
1558 kauth_cred_t kerncred;
1c79356b 1559 struct nameidata nid, *ndp;
1c79356b
A
1560 boolean_t is_fat;
1561 struct fat_arch fat_arch;
55e303ae 1562 int error = LOAD_SUCCESS;
1c79356b
A
1563 int resid;
1564 union {
1565 struct mach_header mach_header;
1566 struct fat_header fat_header;
1567 char pad[512];
1568 } header;
0b4e3aa0 1569 off_t fsize = (off_t)0;
55e303ae 1570 int err2;
1c79356b 1571
2d21ac55
A
1572 /*
1573 * Capture the kernel credential for use in the actual read of the
1574 * file, since the user doing the execution may have execute rights
1575 * but not read rights, but to exec something, we have to either map
1576 * or read it into the new process address space, which requires
1577 * read rights. This is to deal with lack of common credential
1578 * serialization code which would treat NOCRED as "serialize 'root'".
1579 */
1580 kerncred = vfs_context_ucred(vfs_context_kernel());
91447636 1581
1c79356b 1582 ndp = &nid;
1c79356b
A
1583
1584 /* init the namei data to point the file user's program name */
b0d623f7 1585 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
1c79356b 1586
91447636 1587 if ((error = namei(ndp)) != 0) {
2d21ac55 1588 if (error == ENOENT) {
55e303ae 1589 error = LOAD_ENOENT;
2d21ac55 1590 } else {
55e303ae 1591 error = LOAD_FAILURE;
2d21ac55 1592 }
1c79356b 1593 return(error);
55e303ae 1594 }
91447636 1595 nameidone(ndp);
1c79356b
A
1596 vp = ndp->ni_vp;
1597
1598 /* check for regular file */
1599 if (vp->v_type != VREG) {
55e303ae 1600 error = LOAD_PROTECT;
1c79356b
A
1601 goto bad1;
1602 }
1603
91447636 1604 /* get size */
2d21ac55 1605 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
55e303ae 1606 error = LOAD_FAILURE;
1c79356b 1607 goto bad1;
55e303ae 1608 }
1c79356b
A
1609
1610 /* Check mount point */
1611 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
55e303ae 1612 error = LOAD_PROTECT;
1c79356b
A
1613 goto bad1;
1614 }
1615
91447636 1616 /* check access */
2d21ac55 1617 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx)) != 0) {
55e303ae 1618 error = LOAD_PROTECT;
1c79356b 1619 goto bad1;
55e303ae 1620 }
0b4e3aa0 1621
1c79356b 1622 /* try to open it */
2d21ac55 1623 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
55e303ae 1624 error = LOAD_PROTECT;
1c79356b 1625 goto bad1;
0b4e3aa0
A
1626 }
1627
91447636 1628 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
b0d623f7 1629 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
55e303ae 1630 error = LOAD_IOERROR;
1c79356b 1631 goto bad2;
55e303ae 1632 }
1c79356b 1633
91447636
A
1634 if (header.mach_header.magic == MH_MAGIC ||
1635 header.mach_header.magic == MH_MAGIC_64)
1c79356b
A
1636 is_fat = FALSE;
1637 else if (header.fat_header.magic == FAT_MAGIC ||
1638 header.fat_header.magic == FAT_CIGAM)
1639 is_fat = TRUE;
1640 else {
1641 error = LOAD_BADMACHO;
1642 goto bad2;
1643 }
1644
1645 if (is_fat) {
0b4e3aa0 1646 /* Look up our architecture in the fat file. */
91447636 1647 error = fatfile_getarch_with_bits(vp, archbits, (vm_offset_t)(&header.fat_header), &fat_arch);
0b4e3aa0 1648 if (error != LOAD_SUCCESS)
1c79356b 1649 goto bad2;
0b4e3aa0
A
1650
1651 /* Read the Mach-O header out of it */
55e303ae 1652 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header,
1c79356b 1653 sizeof(header.mach_header), fat_arch.offset,
b0d623f7 1654 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
1c79356b 1655 if (error) {
55e303ae 1656 error = LOAD_IOERROR;
1c79356b
A
1657 goto bad2;
1658 }
1659
0b4e3aa0 1660 /* Is this really a Mach-O? */
91447636
A
1661 if (header.mach_header.magic != MH_MAGIC &&
1662 header.mach_header.magic != MH_MAGIC_64) {
1c79356b
A
1663 error = LOAD_BADMACHO;
1664 goto bad2;
1665 }
0b4e3aa0 1666
1c79356b 1667 *file_offset = fat_arch.offset;
2d21ac55 1668 *macho_size = fat_arch.size;
1c79356b 1669 } else {
91447636
A
1670 /*
1671 * Force get_macho_vnode() to fail if the architecture bits
1672 * do not match the expected architecture bits. This in
1673 * turn causes load_dylinker() to fail for the same reason,
1674 * so it ensures the dynamic linker and the binary are in
1675 * lock-step. This is potentially bad, if we ever add to
1676 * the CPU_ARCH_* bits any bits that are desirable but not
1677 * required, since the dynamic linker might work, but we will
1678 * refuse to load it because of this check.
1679 */
1680 if ((cpu_type_t)(header.mach_header.cputype & CPU_ARCH_MASK) != archbits)
1681 return(LOAD_BADARCH);
0b4e3aa0 1682
1c79356b 1683 *file_offset = 0;
91447636 1684 *macho_size = fsize;
1c79356b
A
1685 }
1686
0b4e3aa0
A
1687 *mach_header = header.mach_header;
1688 *vpp = vp;
91447636
A
1689
1690 ubc_setsize(vp, fsize);
0b4e3aa0 1691
0b4e3aa0
A
1692 return (error);
1693
1c79356b 1694bad2:
2d21ac55 1695 err2 = VNOP_CLOSE(vp, FREAD, ctx);
91447636 1696 vnode_put(vp);
1c79356b 1697 return (error);
0b4e3aa0 1698
1c79356b 1699bad1:
91447636 1700 vnode_put(vp);
1c79356b
A
1701 return(error);
1702}