]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/mach_loader.c
xnu-792.22.5.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
CommitLineData
1c79356b 1/*
5d5c5d0d
A
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
91447636 39
1c79356b 40#include <sys/param.h>
91447636 41#include <sys/vnode_internal.h>
1c79356b
A
42#include <sys/uio.h>
43#include <sys/namei.h>
91447636
A
44#include <sys/proc_internal.h>
45#include <sys/kauth.h>
1c79356b
A
46#include <sys/stat.h>
47#include <sys/malloc.h>
91447636 48#include <sys/mount_internal.h>
1c79356b 49#include <sys/fcntl.h>
91447636
A
50#include <sys/ubc_internal.h>
51#include <sys/imgact.h>
1c79356b 52
1c79356b 53#include <mach/mach_types.h>
91447636
A
54#include <mach/vm_map.h> /* vm_allocate() */
55#include <mach/mach_vm.h> /* mach_vm_allocate() */
56#include <mach/vm_statistics.h>
57#include <mach/shared_memory_server.h>
58#include <mach/task.h>
59#include <mach/thread_act.h>
60
61#include <machine/vmparam.h>
1c79356b 62
91447636
A
63#include <kern/kern_types.h>
64#include <kern/cpu_number.h>
1c79356b 65#include <kern/mach_loader.h>
91447636 66#include <kern/kalloc.h>
55e303ae 67#include <kern/task.h>
91447636 68#include <kern/thread.h>
1c79356b
A
69
70#include <mach-o/fat.h>
71#include <mach-o/loader.h>
72
91447636 73#include <vm/pmap.h>
1c79356b
A
74#include <vm/vm_map.h>
75#include <vm/vm_kern.h>
76#include <vm/vm_pager.h>
77#include <vm/vnode_pager.h>
9bccf70c 78#include <vm/vm_shared_memory_server.h>
91447636 79#include <vm/vm_protos.h>
9bccf70c 80
91447636
A
81/*
82 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
83 * when KERNEL is defined.
84 */
4452a7af 85extern pmap_t pmap_create(vm_map_size_t size, boolean_t is_64bit);
91447636
A
86extern void pmap_switch(pmap_t);
87extern void pmap_map_sharedpage(task_t task, pmap_t pmap);
88
89/*
90 * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE
91 * when KERNEL is defined.
92 */
93extern kern_return_t thread_setstatus(thread_t thread, int flavor,
94 thread_state_t tstate,
95 mach_msg_type_number_t count);
96
97extern kern_return_t thread_state_initialize(thread_t thread);
98
99
100/* XXX should have prototypes in a shared header file */
91447636
A
101extern int get_map_nentries(vm_map_t);
102extern kern_return_t thread_userstack(thread_t, int, thread_state_t,
103 unsigned int, mach_vm_offset_t *, int *);
104extern kern_return_t thread_entrypoint(thread_t, int, thread_state_t,
105 unsigned int, mach_vm_offset_t *);
106
107
108/* An empty load_result_t */
109static load_result_t load_result_null = {
110 MACH_VM_MIN_ADDRESS,
111 MACH_VM_MIN_ADDRESS,
112 MACH_VM_MIN_ADDRESS,
113 0,
114 0,
115 0,
116 0
117};
9bccf70c 118
1c79356b
A
119/*
120 * Prototypes of static functions.
121 */
91447636 122static load_return_t
1c79356b
A
123parse_machfile(
124 struct vnode *vp,
91447636
A
125 vm_map_t map,
126 thread_t thr_act,
1c79356b 127 struct mach_header *header,
91447636
A
128 off_t file_offset,
129 off_t macho_size,
130 boolean_t shared_regions,
131 boolean_t clean_regions,
132 int depth,
133 load_result_t *result
134);
135
136static load_return_t
1c79356b
A
137load_segment(
138 struct segment_command *scp,
0b4e3aa0 139 void * pager,
91447636
A
140 off_t pager_offset,
141 off_t macho_size,
142 off_t end_of_file,
0b4e3aa0
A
143 vm_map_t map,
144 load_result_t *result
91447636
A
145);
146
147static load_return_t
148load_segment_64(
149 struct segment_command_64 *scp64,
150 void *pager,
151 off_t pager_offset,
152 off_t macho_size,
153 off_t end_of_file,
154 vm_map_t map,
155 load_result_t *result
156);
157
158static load_return_t
1c79356b
A
159load_unixthread(
160 struct thread_command *tcp,
91447636 161 thread_t thr_act,
0b4e3aa0 162 load_result_t *result
91447636
A
163);
164
165static load_return_t
1c79356b
A
166load_thread(
167 struct thread_command *tcp,
91447636 168 thread_t thr_act,
0b4e3aa0 169 load_result_t *result
91447636
A
170);
171
172static load_return_t
1c79356b 173load_threadstate(
0b4e3aa0 174 thread_t thread,
1c79356b
A
175 unsigned long *ts,
176 unsigned long total_size
91447636
A
177);
178
179static load_return_t
1c79356b 180load_threadstack(
0b4e3aa0 181 thread_t thread,
1c79356b
A
182 unsigned long *ts,
183 unsigned long total_size,
91447636 184 mach_vm_offset_t *user_stack,
0b4e3aa0 185 int *customstack
91447636
A
186);
187
188static load_return_t
1c79356b 189load_threadentry(
0b4e3aa0 190 thread_t thread,
1c79356b
A
191 unsigned long *ts,
192 unsigned long total_size,
91447636
A
193 mach_vm_offset_t *entry_point
194);
195
196static load_return_t
1c79356b
A
197load_dylinker(
198 struct dylinker_command *lcp,
91447636 199 integer_t archbits,
0b4e3aa0 200 vm_map_t map,
91447636 201 thread_t thr_act,
0b4e3aa0 202 int depth,
55e303ae 203 load_result_t *result,
4452a7af
A
204 boolean_t clean_regions,
205 boolean_t is_64bit
91447636
A
206);
207
208static load_return_t
1c79356b 209get_macho_vnode(
0b4e3aa0 210 char *path,
91447636 211 integer_t archbits,
1c79356b 212 struct mach_header *mach_header,
91447636
A
213 off_t *file_offset,
214 off_t *macho_size,
1c79356b
A
215 struct vnode **vpp
216);
217
218load_return_t
219load_machfile(
91447636 220 struct image_params *imgp,
1c79356b 221 struct mach_header *header,
91447636 222 thread_t thr_act,
55e303ae 223 vm_map_t new_map,
91447636
A
224 boolean_t clean_regions,
225 load_result_t *result
1c79356b
A
226)
227{
91447636
A
228 struct vnode *vp = imgp->ip_vp;
229 off_t file_offset = imgp->ip_arch_offset;
230 off_t macho_size = imgp->ip_arch_size;
231
232 pmap_t pmap = 0; /* protected by create_map */
1c79356b
A
233 vm_map_t map;
234 vm_map_t old_map;
235 load_result_t myresult;
1c79356b 236 load_return_t lret;
0b4e3aa0
A
237 boolean_t create_map = TRUE;
238
239 if (new_map != VM_MAP_NULL) {
240 create_map = FALSE;
241 }
1c79356b 242
0b4e3aa0
A
243 if (create_map) {
244 old_map = current_map();
4452a7af 245#ifdef NO_NESTED_PMAP
0b4e3aa0
A
246 pmap = get_task_pmap(current_task());
247 pmap_reference(pmap);
4452a7af
A
248#else /* NO_NESTED_PMAP */
249 pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT));
250#endif /* NO_NESTED_PMAP */
0b4e3aa0 251 map = vm_map_create(pmap,
4452a7af
A
252 0,
253 vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)),
254 TRUE);
0b4e3aa0
A
255 } else
256 map = new_map;
4452a7af
A
257
258 if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
259 vm_map_disable_NX(map);
260
1c79356b
A
261 if (!result)
262 result = &myresult;
263
91447636 264 *result = load_result_null;
1c79356b 265
0b4e3aa0 266 lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size,
91447636
A
267 ((imgp->ip_flags & IMGPF_IS_64BIT) == 0), /* shared regions? */
268 clean_regions, 0, result);
1c79356b
A
269
270 if (lret != LOAD_SUCCESS) {
55e303ae 271 if (create_map) {
0b4e3aa0 272 vm_map_deallocate(map); /* will lose pmap reference too */
55e303ae 273 }
1c79356b
A
274 return(lret);
275 }
55e303ae 276
4452a7af
A
277 /*
278 * For 64-bit users, check for presence of a 4GB page zero
279 * which will enable the kernel to share the user's address space
280 * and hence avoid TLB flushes on kernel entry/exit
281 */
282 if ((imgp->ip_flags & IMGPF_IS_64BIT) &&
283 vm_map_has_4GB_pagezero(map))
284 vm_map_set_4GB_pagezero(map);
285
1c79356b
A
286 /*
287 * Commit to new map. First make sure that the current
288 * users of the task get done with it, and that we clean
289 * up the old contents of IPC and memory. The task is
290 * guaranteed to be single threaded upon return (us).
291 *
55e303ae
A
292 * Swap the new map for the old, which consumes our new map
293 * reference but each leaves us responsible for the old_map reference.
1c79356b
A
294 * That lets us get off the pmap associated with it, and
295 * then we can release it.
296 */
4452a7af 297
0b4e3aa0
A
298 if (create_map) {
299 task_halt(current_task());
1c79356b 300
0b4e3aa0 301 old_map = swap_task_map(current_task(), map);
4452a7af
A
302 vm_map_clear_4GB_pagezero(old_map);
303#ifndef NO_NESTED_PMAP
0b4e3aa0 304 pmap_switch(pmap); /* Make sure we are using the new pmap */
4452a7af 305#endif /* !NO_NESTED_PMAP */
0b4e3aa0
A
306 vm_map_deallocate(old_map);
307 }
1c79356b
A
308 return(LOAD_SUCCESS);
309}
310
311int dylink_test = 1;
1c79356b 312
91447636
A
313/*
314 * The file size of a mach-o file is limited to 32 bits; this is because
315 * this is the limit on the kalloc() of enough bytes for a mach_header and
316 * the contents of its sizeofcmds, which is currently constrained to 32
317 * bits in the file format itself. We read into the kernel buffer the
318 * commands section, and then parse it in order to parse the mach-o file
319 * format load_command segment(s). We are only interested in a subset of
320 * the total set of possible commands.
321 */
1c79356b
A
322static
323load_return_t
324parse_machfile(
91447636 325 struct vnode *vp,
1c79356b 326 vm_map_t map,
91447636 327 thread_t thr_act,
1c79356b 328 struct mach_header *header,
91447636
A
329 off_t file_offset,
330 off_t macho_size,
331 boolean_t shared_regions,
332 boolean_t clean_regions,
1c79356b 333 int depth,
91447636 334 load_result_t *result
1c79356b
A
335)
336{
a3d08fcd 337 uint32_t ncmds;
91447636 338 struct load_command *lcp;
1c79356b 339 struct dylinker_command *dlp = 0;
91447636 340 integer_t dlarchbits = 0;
1c79356b 341 void * pager;
55e303ae 342 load_return_t ret = LOAD_SUCCESS;
91447636
A
343 caddr_t addr;
344 void * kl_addr;
1c79356b 345 vm_size_t size,kl_size;
a3d08fcd
A
346 size_t offset;
347 size_t oldoffset; /* for overflow check */
1c79356b
A
348 int pass;
349 struct proc *p = current_proc(); /* XXXX */
350 int error;
351 int resid=0;
0b4e3aa0 352 task_t task;
91447636
A
353 size_t mach_header_sz = sizeof(struct mach_header);
354 boolean_t abi64;
355
356 if (header->magic == MH_MAGIC_64 ||
357 header->magic == MH_CIGAM_64) {
358 mach_header_sz = sizeof(struct mach_header_64);
359 }
1c79356b
A
360
361 /*
362 * Break infinite recursion
363 */
364 if (depth > 6)
365 return(LOAD_FAILURE);
0b4e3aa0
A
366
367 task = (task_t)get_threadtask(thr_act);
368
1c79356b
A
369 depth++;
370
371 /*
372 * Check to see if right machine type.
373 */
91447636
A
374 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
375 !grade_binary(header->cputype, header->cpusubtype))
1c79356b
A
376 return(LOAD_BADARCH);
377
91447636
A
378 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
379
1c79356b
A
380 switch (header->filetype) {
381
382 case MH_OBJECT:
383 case MH_EXECUTE:
384 case MH_PRELOAD:
385 if (depth != 1)
386 return (LOAD_FAILURE);
387 break;
388
389 case MH_FVMLIB:
390 case MH_DYLIB:
391 if (depth == 1)
392 return (LOAD_FAILURE);
393 break;
394
395 case MH_DYLINKER:
396 if (depth != 2)
397 return (LOAD_FAILURE);
398 break;
399
400 default:
401 return (LOAD_FAILURE);
402 }
403
404 /*
405 * Get the pager for the file.
406 */
407 UBCINFOCHECK("parse_machfile", vp);
408 pager = (void *) ubc_getpager(vp);
409
410 /*
411 * Map portion that must be accessible directly into
412 * kernel's map.
413 */
91447636 414 if ((mach_header_sz + header->sizeofcmds) > macho_size)
1c79356b
A
415 return(LOAD_BADMACHO);
416
417 /*
418 * Round size of Mach-O commands up to page boundry.
419 */
91447636 420 size = round_page(mach_header_sz + header->sizeofcmds);
1c79356b
A
421 if (size <= 0)
422 return(LOAD_BADMACHO);
423
424 /*
425 * Map the load commands into kernel memory.
426 */
427 addr = 0;
1c79356b
A
428 kl_size = size;
429 kl_addr = kalloc(size);
91447636 430 addr = (caddr_t)kl_addr;
0b4e3aa0 431 if (addr == NULL)
1c79356b 432 return(LOAD_NOSPACE);
0b4e3aa0 433
91447636
A
434 error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
435 UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p);
436 if (error) {
0b4e3aa0
A
437 if (kl_addr )
438 kfree(kl_addr, kl_size);
55e303ae 439 return(LOAD_IOERROR);
1c79356b 440 }
91447636 441 /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */
1c79356b 442
1c79356b
A
443 /*
444 * Scan through the commands, processing each one as necessary.
445 */
446 for (pass = 1; pass <= 2; pass++) {
a3d08fcd
A
447 /*
448 * Loop through each of the load_commands indicated by the
449 * Mach-O header; if an absurd value is provided, we just
450 * run off the end of the reserved section by incrementing
451 * the offset too far, so we are implicitly fail-safe.
452 */
91447636 453 offset = mach_header_sz;
1c79356b
A
454 ncmds = header->ncmds;
455 while (ncmds--) {
456 /*
457 * Get a pointer to the command.
458 */
459 lcp = (struct load_command *)(addr + offset);
a3d08fcd 460 oldoffset = offset;
1c79356b
A
461 offset += lcp->cmdsize;
462
463 /*
a3d08fcd
A
464 * Perform prevalidation of the struct load_command
465 * before we attempt to use its contents. Invalid
466 * values are ones which result in an overflow, or
467 * which can not possibly be valid commands, or which
468 * straddle or exist past the reserved section at the
469 * start of the image.
1c79356b 470 */
a3d08fcd
A
471 if (oldoffset > offset ||
472 lcp->cmdsize < sizeof(struct load_command) ||
91447636
A
473 offset > header->sizeofcmds + mach_header_sz) {
474 ret = LOAD_BADMACHO;
a3d08fcd 475 break;
1c79356b
A
476 }
477
478 /*
a3d08fcd
A
479 * Act on struct load_command's for which kernel
480 * intervention is required.
1c79356b
A
481 */
482 switch(lcp->cmd) {
91447636
A
483 case LC_SEGMENT_64:
484 if (pass != 1)
485 break;
486 ret = load_segment_64(
487 (struct segment_command_64 *)lcp,
488 pager,
489 file_offset,
490 macho_size,
491 ubc_getsize(vp),
492 map,
493 result);
494 break;
1c79356b
A
495 case LC_SEGMENT:
496 if (pass != 1)
497 break;
498 ret = load_segment(
499 (struct segment_command *) lcp,
91447636
A
500 pager,
501 file_offset,
1c79356b 502 macho_size,
91447636 503 ubc_getsize(vp),
1c79356b
A
504 map,
505 result);
506 break;
507 case LC_THREAD:
508 if (pass != 2)
509 break;
91447636
A
510 ret = load_thread((struct thread_command *)lcp,
511 thr_act,
1c79356b
A
512 result);
513 break;
514 case LC_UNIXTHREAD:
515 if (pass != 2)
516 break;
517 ret = load_unixthread(
91447636
A
518 (struct thread_command *) lcp,
519 thr_act,
1c79356b
A
520 result);
521 break;
1c79356b
A
522 case LC_LOAD_DYLINKER:
523 if (pass != 2)
524 break;
91447636 525 if ((depth == 1) && (dlp == 0)) {
1c79356b 526 dlp = (struct dylinker_command *)lcp;
91447636
A
527 dlarchbits = (header->cputype & CPU_ARCH_MASK);
528 } else {
1c79356b 529 ret = LOAD_FAILURE;
91447636 530 }
1c79356b
A
531 break;
532 default:
a3d08fcd
A
533 /* Other commands are ignored by the kernel */
534 ret = LOAD_SUCCESS;
91447636 535 break;
1c79356b
A
536 }
537 if (ret != LOAD_SUCCESS)
538 break;
539 }
540 if (ret != LOAD_SUCCESS)
541 break;
542 }
91447636
A
543 if (ret == LOAD_SUCCESS) {
544
545 if (shared_regions) {
546 vm_offset_t vmaddr;
1c79356b
A
547 shared_region_mapping_t shared_region;
548 struct shared_region_task_mappings map_info;
549 shared_region_mapping_t next;
550
551RedoLookup:
0b4e3aa0 552 vm_get_shared_region(task, &shared_region);
1c79356b
A
553 map_info.self = (vm_offset_t)shared_region;
554 shared_region_mapping_info(shared_region,
555 &(map_info.text_region),
556 &(map_info.text_size),
557 &(map_info.data_region),
558 &(map_info.data_size),
559 &(map_info.region_mappings),
560 &(map_info.client_base),
561 &(map_info.alternate_base),
562 &(map_info.alternate_next),
55e303ae
A
563 &(map_info.fs_base),
564 &(map_info.system),
1c79356b
A
565 &(map_info.flags), &next);
566
55e303ae
A
567 if((map_info.flags & SHARED_REGION_FULL) ||
568 (map_info.flags & SHARED_REGION_STALE)) {
569 shared_region_mapping_t system_region;
570 system_region = lookup_default_shared_region(
571 map_info.fs_base, map_info.system);
572 if((map_info.self != (vm_offset_t)system_region) &&
573 (map_info.flags & SHARED_REGION_SYSTEM)) {
574 if(system_region == NULL) {
575 shared_file_boot_time_init(
576 map_info.fs_base, map_info.system);
577 } else {
578 vm_set_shared_region(task, system_region);
579 }
580 shared_region_mapping_dealloc(
1c79356b 581 (shared_region_mapping_t)map_info.self);
55e303ae
A
582 goto RedoLookup;
583 } else if (map_info.flags & SHARED_REGION_SYSTEM) {
584 shared_region_mapping_dealloc(system_region);
585 shared_file_boot_time_init(
586 map_info.fs_base, map_info.system);
587 shared_region_mapping_dealloc(
588 (shared_region_mapping_t)map_info.self);
589 } else {
590 shared_region_mapping_dealloc(system_region);
591 }
1c79356b
A
592 }
593
1c79356b 594 if (dylink_test) {
9bccf70c 595 p->p_flag |= P_NOSHLIB; /* no shlibs in use */
91447636 596 vmaddr = map_info.client_base;
55e303ae 597 if(clean_regions) {
91447636
A
598 vm_map(map, &vmaddr, map_info.text_size,
599 0, SHARED_LIB_ALIAS|VM_FLAGS_FIXED,
55e303ae
A
600 map_info.text_region, 0, FALSE,
601 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
602 } else {
91447636 603 vm_map(map, &vmaddr, map_info.text_size, 0,
1c79356b 604 (VM_MEMORY_SHARED_PMAP << 24)
91447636 605 | SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1c79356b
A
606 map_info.text_region, 0, FALSE,
607 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
55e303ae 608 }
91447636
A
609 vmaddr = map_info.client_base + map_info.text_size;
610 vm_map(map, &vmaddr, map_info.data_size,
611 0, SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1c79356b
A
612 map_info.data_region, 0, TRUE,
613 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
55e303ae
A
614
615 while (next) {
616 /* this should be fleshed out for the general case */
617 /* but this is not necessary for now. Indeed we */
618 /* are handling the com page inside of the */
619 /* shared_region mapping create calls for now for */
620 /* simplicities sake. If more general support is */
621 /* needed the code to manipulate the shared range */
622 /* chain can be pulled out and moved to the callers*/
623 shared_region_mapping_info(next,
624 &(map_info.text_region),
625 &(map_info.text_size),
626 &(map_info.data_region),
627 &(map_info.data_size),
628 &(map_info.region_mappings),
629 &(map_info.client_base),
630 &(map_info.alternate_base),
631 &(map_info.alternate_next),
632 &(map_info.fs_base),
633 &(map_info.system),
634 &(map_info.flags), &next);
635
91447636
A
636 vmaddr = map_info.client_base;
637 vm_map(map, &vmaddr, map_info.text_size,
638 0, SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
55e303ae
A
639 map_info.text_region, 0, FALSE,
640 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
641 }
1c79356b 642 }
91447636
A
643 }
644 if (dlp != 0)
4452a7af 645 ret = load_dylinker(dlp, dlarchbits, map, thr_act, depth, result, clean_regions, abi64);
91447636
A
646
647 if(depth == 1) {
648 if (result->thread_count == 0)
649 ret = LOAD_FAILURE;
91447636
A
650 else if ( abi64 ) {
651 /* Map in 64-bit commpage */
652 /* LP64todo - make this clean */
653 pmap_map_sharedpage(current_task(), get_map_pmap(map));
654 vm_map_commpage64(map);
4452a7af
A
655 } else {
656#ifdef __i386__
657 /*
658 * On Intel, the comm page doesn't get mapped
659 * automatically because it goes beyond the current end
660 * of the VM map in the current 3GB/1GB address space
661 * model.
662 * XXX This will probably become unnecessary when we
663 * switch to the 4GB/4GB address space model.
664 */
665 vm_map_commpage32(map);
666#endif /* __i386__ */
91447636 667 }
91447636 668 }
1c79356b
A
669 }
670
0b4e3aa0
A
671 if (kl_addr )
672 kfree(kl_addr, kl_size);
673
1c79356b 674 if (ret == LOAD_SUCCESS)
91447636 675 (void)ubc_map(vp, PROT_EXEC);
1c79356b
A
676
677 return(ret);
678}
679
4452a7af
A
680#ifndef SG_PROTECTED_VERSION_1
681#define SG_PROTECTED_VERSION_1 0x8
682#endif /* SG_PROTECTED_VERSION_1 */
683
684#ifdef __i386__
685
686#define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64)
687
688static load_return_t
689unprotect_segment_64(
690 uint64_t file_off,
691 uint64_t file_size,
692 vm_map_t map,
693 vm_map_offset_t map_addr,
694 vm_map_size_t map_size)
695{
696 kern_return_t kr;
697
698 /*
699 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
700 * this part of a Universal binary) are not protected...
701 * The rest needs to be "transformed".
702 */
703 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
704 file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
705 /* it's all unprotected, nothing to do... */
706 kr = KERN_SUCCESS;
707 } else {
708 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
709 /*
710 * We start mapping in the unprotected area.
711 * Skip the unprotected part...
712 */
713 vm_map_offset_t delta;
714
715 delta = APPLE_UNPROTECTED_HEADER_SIZE;
716 delta -= file_off;
717 map_addr += delta;
718 map_size -= delta;
719 }
720 /* ... transform the rest of the mapping. */
721 kr = vm_map_apple_protected(map,
722 map_addr,
723 map_addr + map_size);
724 }
725
726 if (kr != KERN_SUCCESS) {
727 return LOAD_FAILURE;
728 }
729 return LOAD_SUCCESS;
730}
731#else /* __i386__ */
732#define unprotect_segment_64(file_off, file_size, map, map_addr, map_size) \
733 LOAD_SUCCESS
734#endif /* __i386__ */
735
1c79356b
A
736static
737load_return_t
738load_segment(
739 struct segment_command *scp,
740 void * pager,
91447636
A
741 off_t pager_offset,
742 off_t macho_size,
743 __unused off_t end_of_file,
1c79356b
A
744 vm_map_t map,
745 load_result_t *result
746)
747{
748 kern_return_t ret;
749 vm_offset_t map_addr, map_offset;
750 vm_size_t map_size, seg_size, delta_size;
1c79356b
A
751 vm_prot_t initprot;
752 vm_prot_t maxprot;
1c79356b
A
753
754 /*
755 * Make sure what we get from the file is really ours (as specified
756 * by macho_size).
757 */
758 if (scp->fileoff + scp->filesize > macho_size)
759 return (LOAD_BADMACHO);
760
91447636 761 seg_size = round_page(scp->vmsize);
1c79356b
A
762 if (seg_size == 0)
763 return(KERN_SUCCESS);
764
765 /*
766 * Round sizes to page size.
767 */
91447636
A
768 map_size = round_page(scp->filesize);
769 map_addr = trunc_page(scp->vmaddr);
1c79356b 770
4452a7af
A
771#if 0 /* XXX (4596982) this interferes with Rosetta */
772 if (map_addr == 0 &&
773 map_size == 0 &&
774 seg_size != 0 &&
775 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
776 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
777 /*
778 * This is a "page zero" segment: it starts at address 0,
779 * is not mapped from the binary file and is not accessible.
780 * User-space should never be able to access that memory, so
781 * make it completely off limits by raising the VM map's
782 * minimum offset.
783 */
784 ret = vm_map_raise_min_offset(map, (vm_map_offset_t) seg_size);
785 if (ret != KERN_SUCCESS) {
786 return LOAD_FAILURE;
787 }
788 return LOAD_SUCCESS;
789 }
790#endif
791
1c79356b
A
792 map_offset = pager_offset + scp->fileoff;
793
794 if (map_size > 0) {
795 initprot = (scp->initprot) & VM_PROT_ALL;
796 maxprot = (scp->maxprot) & VM_PROT_ALL;
797 /*
798 * Map a copy of the file into the address space.
799 */
800 ret = vm_map(map,
91447636
A
801 &map_addr, map_size, (vm_offset_t)0,
802 VM_FLAGS_FIXED, pager, map_offset, TRUE,
1c79356b
A
803 initprot, maxprot,
804 VM_INHERIT_DEFAULT);
805 if (ret != KERN_SUCCESS)
806 return(LOAD_NOSPACE);
807
1c79356b
A
808 /*
809 * If the file didn't end on a page boundary,
810 * we need to zero the leftover.
811 */
812 delta_size = map_size - scp->filesize;
813#if FIXME
814 if (delta_size > 0) {
815 vm_offset_t tmp;
816
91447636 817 ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
1c79356b
A
818 if (ret != KERN_SUCCESS)
819 return(LOAD_RESOURCE);
820
821 if (copyout(tmp, map_addr + scp->filesize,
822 delta_size)) {
823 (void) vm_deallocate(
824 kernel_map, tmp, delta_size);
825 return(LOAD_FAILURE);
826 }
827
828 (void) vm_deallocate(kernel_map, tmp, delta_size);
829 }
830#endif /* FIXME */
831 }
832
833 /*
834 * If the virtual size of the segment is greater
835 * than the size from the file, we need to allocate
836 * zero fill memory for the rest.
837 */
838 delta_size = seg_size - map_size;
839 if (delta_size > 0) {
840 vm_offset_t tmp = map_addr + map_size;
841
4452a7af
A
842 ret = vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
843 NULL, 0, FALSE,
844 scp->initprot, scp->maxprot,
845 VM_INHERIT_DEFAULT);
1c79356b
A
846 if (ret != KERN_SUCCESS)
847 return(LOAD_NOSPACE);
848 }
849
21362eb3
A
850 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
851 result->mach_header = map_addr;
4452a7af
A
852
853 if (scp->flags & SG_PROTECTED_VERSION_1) {
854 ret = unprotect_segment_64((uint64_t) scp->fileoff,
855 (uint64_t) scp->filesize,
856 map,
857 (vm_map_offset_t) map_addr,
858 (vm_map_size_t) map_size);
859 } else {
860 ret = LOAD_SUCCESS;
861 }
862
863 return ret;
1c79356b
A
864}
865
866static
867load_return_t
91447636
A
868load_segment_64(
869 struct segment_command_64 *scp64,
870 void * pager,
871 off_t pager_offset,
872 off_t macho_size,
873 __unused off_t end_of_file,
874 vm_map_t map,
1c79356b
A
875 load_result_t *result
876)
877{
91447636
A
878 kern_return_t ret;
879 mach_vm_offset_t map_addr, map_offset;
880 mach_vm_size_t map_size, seg_size, delta_size;
881 vm_prot_t initprot;
882 vm_prot_t maxprot;
1c79356b 883
91447636
A
884 /*
885 * Make sure what we get from the file is really ours (as specified
886 * by macho_size).
887 */
888 if (scp64->fileoff + scp64->filesize > (uint64_t)macho_size)
889 return (LOAD_BADMACHO);
890
891 seg_size = round_page_64(scp64->vmsize);
892 if (seg_size == 0)
893 return(KERN_SUCCESS);
894
895 /*
896 * Round sizes to page size.
897 */
898 map_size = round_page_64(scp64->filesize); /* limited to 32 bits */
899 map_addr = round_page_64(scp64->vmaddr);
900
4452a7af
A
901 if (map_addr == 0 &&
902 map_size == 0 &&
903 seg_size != 0 &&
904 (scp64->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
905 (scp64->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
906 /*
907 * This is a "page zero" segment: it starts at address 0,
908 * is not mapped from the binary file and is not accessible.
909 * User-space should never be able to access that memory, so
910 * make it completely off limits by raising the VM map's
911 * minimum offset.
912 */
913 ret = vm_map_raise_min_offset(map, seg_size);
914 if (ret != KERN_SUCCESS) {
915 return LOAD_FAILURE;
916 }
917 return LOAD_SUCCESS;
918 }
919
91447636
A
920 map_offset = pager_offset + scp64->fileoff; /* limited to 32 bits */
921
922 if (map_size > 0) {
923 initprot = (scp64->initprot) & VM_PROT_ALL;
924 maxprot = (scp64->maxprot) & VM_PROT_ALL;
925 /*
926 * Map a copy of the file into the address space.
927 */
928 ret = mach_vm_map(map,
929 &map_addr, map_size, (mach_vm_offset_t)0,
930 VM_FLAGS_FIXED, pager, map_offset, TRUE,
931 initprot, maxprot,
932 VM_INHERIT_DEFAULT);
933 if (ret != KERN_SUCCESS)
934 return(LOAD_NOSPACE);
935
936 /*
937 * If the file didn't end on a page boundary,
938 * we need to zero the leftover.
939 */
940 delta_size = map_size - scp64->filesize;
941#if FIXME
942 if (delta_size > 0) {
943 mach_vm_offset_t tmp;
944
945 ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE);
946 if (ret != KERN_SUCCESS)
947 return(LOAD_RESOURCE);
948
949 if (copyout(tmp, map_addr + scp64->filesize,
950 delta_size)) {
951 (void) vm_deallocate(
952 kernel_map, tmp, delta_size);
1c79356b 953 return (LOAD_FAILURE);
91447636 954 }
1c79356b 955
91447636
A
956 (void) vm_deallocate(kernel_map, tmp, delta_size);
957 }
958#endif /* FIXME */
959 }
1c79356b 960
91447636
A
961 /*
962 * If the virtual size of the segment is greater
963 * than the size from the file, we need to allocate
964 * zero fill memory for the rest.
965 */
966 delta_size = seg_size - map_size;
967 if (delta_size > 0) {
968 mach_vm_offset_t tmp = map_addr + map_size;
1c79356b 969
4452a7af
A
970 ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED,
971 NULL, 0, FALSE,
972 scp64->initprot, scp64->maxprot,
973 VM_INHERIT_DEFAULT);
91447636
A
974 if (ret != KERN_SUCCESS)
975 return(LOAD_NOSPACE);
976 }
1c79356b 977
21362eb3
A
978 if ( (scp64->fileoff == 0) && (scp64->filesize != 0) )
979 result->mach_header = map_addr;
4452a7af
A
980
981 if (scp64->flags & SG_PROTECTED_VERSION_1) {
982 ret = unprotect_segment_64(scp64->fileoff,
983 scp64->filesize,
984 map,
985 map_addr,
986 map_size);
987 } else {
988 ret = LOAD_SUCCESS;
989 }
990
991 return ret;
1c79356b
A
992}
993
994static
995load_return_t
996load_thread(
997 struct thread_command *tcp,
91447636 998 thread_t thread,
1c79356b
A
999 load_result_t *result
1000)
1001{
1c79356b
A
1002 kern_return_t kret;
1003 load_return_t lret;
0b4e3aa0
A
1004 task_t task;
1005 int customstack=0;
1c79356b 1006
55e303ae 1007 task = get_threadtask(thread);
0b4e3aa0
A
1008
1009 /* if count is 0; same as thr_act */
1010 if (result->thread_count != 0) {
1011 kret = thread_create(task, &thread);
1c79356b
A
1012 if (kret != KERN_SUCCESS)
1013 return(LOAD_RESOURCE);
91447636 1014 thread_deallocate(thread);
1c79356b
A
1015 }
1016
1017 lret = load_threadstate(thread,
1018 (unsigned long *)(((vm_offset_t)tcp) +
1019 sizeof(struct thread_command)),
1020 tcp->cmdsize - sizeof(struct thread_command));
1021 if (lret != LOAD_SUCCESS)
1022 return (lret);
1023
1024 if (result->thread_count == 0) {
0b4e3aa0 1025 lret = load_threadstack(thread,
1c79356b
A
1026 (unsigned long *)(((vm_offset_t)tcp) +
1027 sizeof(struct thread_command)),
1028 tcp->cmdsize - sizeof(struct thread_command),
0b4e3aa0
A
1029 &result->user_stack,
1030 &customstack);
1031 if (customstack)
1032 result->customstack = 1;
1033 else
1034 result->customstack = 0;
1035
1c79356b
A
1036 if (lret != LOAD_SUCCESS)
1037 return(lret);
1038
0b4e3aa0 1039 lret = load_threadentry(thread,
1c79356b
A
1040 (unsigned long *)(((vm_offset_t)tcp) +
1041 sizeof(struct thread_command)),
1042 tcp->cmdsize - sizeof(struct thread_command),
1043 &result->entry_point);
1044 if (lret != LOAD_SUCCESS)
1045 return(lret);
1046 }
1047 /*
1048 * Resume thread now, note that this means that the thread
1049 * commands should appear after all the load commands to
1050 * be sure they don't reference anything not yet mapped.
1051 */
1052 else
1053 thread_resume(thread);
1054
1055 result->thread_count++;
1056
1057 return(LOAD_SUCCESS);
1058}
1059
91447636
A
1060static
1061load_return_t
1062load_unixthread(
1063 struct thread_command *tcp,
1064 thread_t thread,
1065 load_result_t *result
1066)
1067{
1068 load_return_t ret;
1069 int customstack =0;
1070
1071 if (result->thread_count != 0)
1072 return (LOAD_FAILURE);
1073
1074 ret = load_threadstack(thread,
1075 (unsigned long *)(((vm_offset_t)tcp) +
1076 sizeof(struct thread_command)),
1077 tcp->cmdsize - sizeof(struct thread_command),
1078 &result->user_stack,
1079 &customstack);
1080 if (ret != LOAD_SUCCESS)
1081 return(ret);
1082
1083 if (customstack)
1084 result->customstack = 1;
1085 else
1086 result->customstack = 0;
1087 ret = load_threadentry(thread,
1088 (unsigned long *)(((vm_offset_t)tcp) +
1089 sizeof(struct thread_command)),
1090 tcp->cmdsize - sizeof(struct thread_command),
1091 &result->entry_point);
1092 if (ret != LOAD_SUCCESS)
1093 return(ret);
1094
1095 ret = load_threadstate(thread,
1096 (unsigned long *)(((vm_offset_t)tcp) +
1097 sizeof(struct thread_command)),
1098 tcp->cmdsize - sizeof(struct thread_command));
1099 if (ret != LOAD_SUCCESS)
1100 return (ret);
1101
1102 result->unixproc = TRUE;
1103 result->thread_count++;
1104
1105 return(LOAD_SUCCESS);
1106}
1107
1c79356b
A
1108static
1109load_return_t
1110load_threadstate(
1111 thread_t thread,
1112 unsigned long *ts,
1113 unsigned long total_size
1114)
1115{
1116 kern_return_t ret;
1117 unsigned long size;
1118 int flavor;
91447636 1119 unsigned long thread_size;
1c79356b 1120
91447636
A
1121 ret = thread_state_initialize( thread );
1122 if (ret != KERN_SUCCESS)
1123 return(LOAD_FAILURE);
1124
1c79356b 1125 /*
91447636
A
1126 * Set the new thread state; iterate through the state flavors in
1127 * the mach-o file.
1c79356b 1128 */
1c79356b
A
1129 while (total_size > 0) {
1130 flavor = *ts++;
1131 size = *ts++;
91447636
A
1132 thread_size = (size+2)*sizeof(unsigned long);
1133 if (thread_size > total_size)
1c79356b 1134 return(LOAD_BADMACHO);
91447636
A
1135 total_size -= thread_size;
1136 /*
1137 * Third argument is a kernel space pointer; it gets cast
1138 * to the appropriate type in machine_thread_set_state()
1139 * based on the value of flavor.
1140 */
1141 ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size);
1c79356b
A
1142 if (ret != KERN_SUCCESS)
1143 return(LOAD_FAILURE);
1144 ts += size; /* ts is a (unsigned long *) */
1145 }
1146 return(LOAD_SUCCESS);
1147}
1148
1149static
1150load_return_t
1151load_threadstack(
1152 thread_t thread,
1153 unsigned long *ts,
1154 unsigned long total_size,
91447636 1155 user_addr_t *user_stack,
0b4e3aa0 1156 int *customstack
1c79356b
A
1157)
1158{
1159 kern_return_t ret;
1160 unsigned long size;
1161 int flavor;
91447636 1162 unsigned long stack_size;
1c79356b 1163
1c79356b
A
1164 while (total_size > 0) {
1165 flavor = *ts++;
1166 size = *ts++;
91447636
A
1167 stack_size = (size+2)*sizeof(unsigned long);
1168 if (stack_size > total_size)
1c79356b 1169 return(LOAD_BADMACHO);
91447636
A
1170 total_size -= stack_size;
1171
1172 /*
1173 * Third argument is a kernel space pointer; it gets cast
1174 * to the appropriate type in thread_userstack() based on
1175 * the value of flavor.
1176 */
1177 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack);
1c79356b
A
1178 if (ret != KERN_SUCCESS)
1179 return(LOAD_FAILURE);
1180 ts += size; /* ts is a (unsigned long *) */
1181 }
1182 return(LOAD_SUCCESS);
1183}
1184
1185static
1186load_return_t
1187load_threadentry(
1188 thread_t thread,
1189 unsigned long *ts,
1190 unsigned long total_size,
91447636 1191 mach_vm_offset_t *entry_point
1c79356b
A
1192)
1193{
1194 kern_return_t ret;
1195 unsigned long size;
1196 int flavor;
91447636 1197 unsigned long entry_size;
1c79356b
A
1198
1199 /*
1200 * Set the thread state.
1201 */
91447636 1202 *entry_point = MACH_VM_MIN_ADDRESS;
1c79356b
A
1203 while (total_size > 0) {
1204 flavor = *ts++;
1205 size = *ts++;
91447636
A
1206 entry_size = (size+2)*sizeof(unsigned long);
1207 if (entry_size > total_size)
1c79356b 1208 return(LOAD_BADMACHO);
91447636
A
1209 total_size -= entry_size;
1210 /*
1211 * Third argument is a kernel space pointer; it gets cast
1212 * to the appropriate type in thread_entrypoint() based on
1213 * the value of flavor.
1214 */
1215 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
1c79356b
A
1216 if (ret != KERN_SUCCESS)
1217 return(LOAD_FAILURE);
1218 ts += size; /* ts is a (unsigned long *) */
1219 }
1220 return(LOAD_SUCCESS);
1221}
1222
1c79356b
A
1223
1224static
1225load_return_t
1226load_dylinker(
1227 struct dylinker_command *lcp,
91447636 1228 integer_t archbits,
1c79356b 1229 vm_map_t map,
91447636 1230 thread_t thr_act,
1c79356b 1231 int depth,
55e303ae 1232 load_result_t *result,
4452a7af
A
1233 boolean_t clean_regions,
1234 boolean_t is_64bit
1c79356b
A
1235)
1236{
1237 char *name;
1238 char *p;
1239 struct vnode *vp;
1240 struct mach_header header;
91447636
A
1241 off_t file_offset;
1242 off_t macho_size;
1c79356b
A
1243 vm_map_t copy_map;
1244 load_result_t myresult;
1245 kern_return_t ret;
1246 vm_map_copy_t tmp;
91447636
A
1247 mach_vm_offset_t dyl_start, map_addr;
1248 mach_vm_size_t dyl_length;
1c79356b
A
1249
1250 name = (char *)lcp + lcp->name.offset;
1251 /*
1252 * Check for a proper null terminated string.
1253 */
1254 p = name;
1255 do {
1256 if (p >= (char *)lcp + lcp->cmdsize)
1257 return(LOAD_BADMACHO);
1258 } while (*p++);
1259
91447636 1260 ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
1c79356b
A
1261 if (ret)
1262 return (ret);
1263
1c79356b
A
1264 /*
1265 * Load the Mach-O.
91447636 1266 * Use a temporary map to do the work.
1c79356b 1267 */
4452a7af
A
1268 copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
1269 is_64bit),
91447636
A
1270 get_map_min(map), get_map_max(map), TRUE);
1271 if (VM_MAP_NULL == copy_map) {
1272 ret = LOAD_RESOURCE;
1273 goto out;
1274 }
1275
1276 myresult = load_result_null;
1c79356b 1277
0b4e3aa0 1278 ret = parse_machfile(vp, copy_map, thr_act, &header,
1c79356b 1279 file_offset, macho_size,
91447636 1280 FALSE, clean_regions, depth, &myresult);
1c79356b
A
1281
1282 if (ret)
1283 goto out;
1284
1285 if (get_map_nentries(copy_map) > 0) {
1286
91447636
A
1287 dyl_start = mach_get_vm_start(copy_map);
1288 dyl_length = mach_get_vm_end(copy_map) - dyl_start;
1c79356b
A
1289
1290 map_addr = dyl_start;
91447636 1291 ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_FIXED);
1c79356b 1292 if (ret != KERN_SUCCESS) {
91447636 1293 ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);
1c79356b
A
1294 }
1295
1296 if (ret != KERN_SUCCESS) {
1297 ret = LOAD_NOSPACE;
1298 goto out;
1299
1300 }
91447636
A
1301 ret = vm_map_copyin(copy_map,
1302 (vm_map_address_t)dyl_start,
1303 (vm_map_size_t)dyl_length,
1304 TRUE, &tmp);
1c79356b
A
1305 if (ret != KERN_SUCCESS) {
1306 (void) vm_map_remove(map,
91447636
A
1307 vm_map_trunc_page(map_addr),
1308 vm_map_round_page(map_addr + dyl_length),
1309 VM_MAP_NO_FLAGS);
1c79356b
A
1310 goto out;
1311 }
1312
91447636
A
1313 ret = vm_map_copy_overwrite(map,
1314 (vm_map_address_t)map_addr,
1315 tmp, FALSE);
1c79356b 1316 if (ret != KERN_SUCCESS) {
91447636
A
1317 vm_map_copy_discard(tmp);
1318 (void) vm_map_remove(map,
1319 vm_map_trunc_page(map_addr),
1320 vm_map_round_page(map_addr + dyl_length),
1321 VM_MAP_NO_FLAGS);
1322 goto out;
1323 }
1c79356b
A
1324
1325 if (map_addr != dyl_start)
1326 myresult.entry_point += (map_addr - dyl_start);
1327 } else
1328 ret = LOAD_FAILURE;
1329
1330 if (ret == LOAD_SUCCESS) {
1331 result->dynlinker = TRUE;
1332 result->entry_point = myresult.entry_point;
91447636 1333 (void)ubc_map(vp, PROT_EXEC);
1c79356b
A
1334 }
1335out:
1336 vm_map_deallocate(copy_map);
1337
91447636 1338 vnode_put(vp);
1c79356b
A
1339 return (ret);
1340
1341}
1342
91447636
A
1343/*
1344 * This routine exists to support the load_dylinker().
1345 *
1346 * This routine has its own, separate, understanding of the FAT file format,
1347 * which is terrifically unfortunate.
1348 */
1c79356b
A
1349static
1350load_return_t
1351get_macho_vnode(
1352 char *path,
91447636 1353 integer_t archbits,
1c79356b 1354 struct mach_header *mach_header,
91447636
A
1355 off_t *file_offset,
1356 off_t *macho_size,
1c79356b
A
1357 struct vnode **vpp
1358)
1359{
1360 struct vnode *vp;
91447636 1361 struct vfs_context context;
1c79356b
A
1362 struct nameidata nid, *ndp;
1363 struct proc *p = current_proc(); /* XXXX */
1364 boolean_t is_fat;
1365 struct fat_arch fat_arch;
55e303ae 1366 int error = LOAD_SUCCESS;
1c79356b
A
1367 int resid;
1368 union {
1369 struct mach_header mach_header;
1370 struct fat_header fat_header;
1371 char pad[512];
1372 } header;
0b4e3aa0 1373 off_t fsize = (off_t)0;
91447636 1374 struct ucred *cred = kauth_cred_get();
55e303ae 1375 int err2;
1c79356b 1376
91447636
A
1377 context.vc_proc = p;
1378 context.vc_ucred = cred;
1379
1c79356b 1380 ndp = &nid;
1c79356b
A
1381
1382 /* init the namei data to point the file user's program name */
91447636 1383 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32, CAST_USER_ADDR_T(path), &context);
1c79356b 1384
91447636 1385 if ((error = namei(ndp)) != 0) {
55e303ae
A
1386 if (error == ENOENT)
1387 error = LOAD_ENOENT;
1388 else
1389 error = LOAD_FAILURE;
1c79356b 1390 return(error);
55e303ae 1391 }
91447636 1392 nameidone(ndp);
1c79356b
A
1393 vp = ndp->ni_vp;
1394
1395 /* check for regular file */
1396 if (vp->v_type != VREG) {
55e303ae 1397 error = LOAD_PROTECT;
1c79356b
A
1398 goto bad1;
1399 }
1400
91447636
A
1401 /* get size */
1402 if ((error = vnode_size(vp, &fsize, &context)) != 0) {
55e303ae 1403 error = LOAD_FAILURE;
1c79356b 1404 goto bad1;
55e303ae 1405 }
1c79356b
A
1406
1407 /* Check mount point */
1408 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
55e303ae 1409 error = LOAD_PROTECT;
1c79356b
A
1410 goto bad1;
1411 }
1412
91447636
A
1413 /* check access */
1414 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, &context)) != 0) {
55e303ae 1415 error = LOAD_PROTECT;
1c79356b 1416 goto bad1;
55e303ae 1417 }
0b4e3aa0 1418
1c79356b 1419 /* try to open it */
91447636 1420 if ((error = VNOP_OPEN(vp, FREAD, &context)) != 0) {
55e303ae 1421 error = LOAD_PROTECT;
1c79356b 1422 goto bad1;
0b4e3aa0
A
1423 }
1424
91447636
A
1425 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
1426 UIO_SYSSPACE32, IO_NODELOCKED, cred, &resid, p)) != 0) {
55e303ae 1427 error = LOAD_IOERROR;
1c79356b 1428 goto bad2;
55e303ae 1429 }
1c79356b 1430
91447636
A
1431 if (header.mach_header.magic == MH_MAGIC ||
1432 header.mach_header.magic == MH_MAGIC_64)
1c79356b
A
1433 is_fat = FALSE;
1434 else if (header.fat_header.magic == FAT_MAGIC ||
1435 header.fat_header.magic == FAT_CIGAM)
1436 is_fat = TRUE;
1437 else {
1438 error = LOAD_BADMACHO;
1439 goto bad2;
1440 }
1441
1442 if (is_fat) {
0b4e3aa0 1443 /* Look up our architecture in the fat file. */
91447636 1444 error = fatfile_getarch_with_bits(vp, archbits, (vm_offset_t)(&header.fat_header), &fat_arch);
0b4e3aa0 1445 if (error != LOAD_SUCCESS)
1c79356b 1446 goto bad2;
0b4e3aa0
A
1447
1448 /* Read the Mach-O header out of it */
55e303ae 1449 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header,
1c79356b 1450 sizeof(header.mach_header), fat_arch.offset,
91447636 1451 UIO_SYSSPACE32, IO_NODELOCKED, cred, &resid, p);
1c79356b 1452 if (error) {
55e303ae 1453 error = LOAD_IOERROR;
1c79356b
A
1454 goto bad2;
1455 }
1456
0b4e3aa0 1457 /* Is this really a Mach-O? */
91447636
A
1458 if (header.mach_header.magic != MH_MAGIC &&
1459 header.mach_header.magic != MH_MAGIC_64) {
1c79356b
A
1460 error = LOAD_BADMACHO;
1461 goto bad2;
1462 }
0b4e3aa0 1463
1c79356b 1464 *file_offset = fat_arch.offset;
0b4e3aa0 1465 *macho_size = fsize = fat_arch.size;
1c79356b 1466 } else {
91447636
A
1467 /*
1468 * Force get_macho_vnode() to fail if the architecture bits
1469 * do not match the expected architecture bits. This in
1470 * turn causes load_dylinker() to fail for the same reason,
1471 * so it ensures the dynamic linker and the binary are in
1472 * lock-step. This is potentially bad, if we ever add to
1473 * the CPU_ARCH_* bits any bits that are desirable but not
1474 * required, since the dynamic linker might work, but we will
1475 * refuse to load it because of this check.
1476 */
1477 if ((cpu_type_t)(header.mach_header.cputype & CPU_ARCH_MASK) != archbits)
1478 return(LOAD_BADARCH);
0b4e3aa0 1479
1c79356b 1480 *file_offset = 0;
91447636 1481 *macho_size = fsize;
1c79356b
A
1482 }
1483
0b4e3aa0
A
1484 *mach_header = header.mach_header;
1485 *vpp = vp;
91447636
A
1486
1487 ubc_setsize(vp, fsize);
0b4e3aa0 1488
0b4e3aa0
A
1489 return (error);
1490
1c79356b 1491bad2:
91447636
A
1492 err2 = VNOP_CLOSE(vp, FREAD, &context);
1493 vnode_put(vp);
1c79356b 1494 return (error);
0b4e3aa0 1495
1c79356b 1496bad1:
91447636 1497 vnode_put(vp);
1c79356b
A
1498 return(error);
1499}