]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
7de9bed8c0976a066abb95252e9007a52c9a5224
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (C) 1988, 1989, NeXT, Inc.
27 *
28 * File: kern/mach_loader.c
29 * Author: Avadis Tevanian, Jr.
30 *
31 * Mach object file loader (kernel version, for now).
32 *
33 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
34 * Started.
35 */
36 #include <sys/param.h>
37 #include <sys/vnode.h>
38 #include <sys/uio.h>
39 #include <sys/namei.h>
40 #include <sys/proc.h>
41 #include <sys/stat.h>
42 #include <sys/malloc.h>
43 #include <sys/mount.h>
44 #include <sys/fcntl.h>
45 #include <sys/ubc.h>
46
47 #include <mach/mach_types.h>
48
49 #include <kern/mach_loader.h>
50
51 #include <mach-o/fat.h>
52 #include <mach-o/loader.h>
53
54 #include <kern/cpu_number.h>
55
56 #include <vm/vm_map.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vnode_pager.h>
60 #include <mach/vm_statistics.h>
61
62 #include <mach/shared_memory_server.h>
63 #include <vm/vm_shared_memory_server.h>
64
65 #include <machine/vmparam.h>
66
67 /*
68 * Prototypes of static functions.
69 */
70 static
71 load_return_t
72 parse_machfile(
73 struct vnode *vp,
74 vm_map_t map,
75 thread_act_t thr_act,
76 struct mach_header *header,
77 unsigned long file_offset,
78 unsigned long macho_size,
79 int depth,
80 load_result_t *result,
81 boolean_t clean_regions
82 ),
83 load_segment(
84 struct segment_command *scp,
85 void * pager,
86 unsigned long pager_offset,
87 unsigned long macho_size,
88 unsigned long end_of_file,
89 vm_map_t map,
90 load_result_t *result
91 ),
92 load_unixthread(
93 struct thread_command *tcp,
94 thread_act_t thr_act,
95 load_result_t *result
96 ),
97 load_thread(
98 struct thread_command *tcp,
99 thread_act_t thr_act,
100 load_result_t *result
101 ),
102 load_threadstate(
103 thread_t thread,
104 unsigned long *ts,
105 unsigned long total_size
106 ),
107 load_threadstack(
108 thread_t thread,
109 unsigned long *ts,
110 unsigned long total_size,
111 vm_offset_t *user_stack,
112 int *customstack
113 ),
114 load_threadentry(
115 thread_t thread,
116 unsigned long *ts,
117 unsigned long total_size,
118 vm_offset_t *entry_point
119 ),
120 load_dylinker(
121 struct dylinker_command *lcp,
122 vm_map_t map,
123 thread_act_t thr_act,
124 int depth,
125 load_result_t *result,
126 boolean_t clean_regions
127 ),
128 get_macho_vnode(
129 char *path,
130 struct mach_header *mach_header,
131 unsigned long *file_offset,
132 unsigned long *macho_size,
133 struct vnode **vpp
134 );
135
136 load_return_t
137 load_machfile(
138 struct vnode *vp,
139 struct mach_header *header,
140 unsigned long file_offset,
141 unsigned long macho_size,
142 load_result_t *result,
143 thread_act_t thr_act,
144 vm_map_t new_map,
145 boolean_t clean_regions
146 )
147 {
148 pmap_t pmap;
149 vm_map_t map;
150 vm_map_t old_map;
151 load_result_t myresult;
152 kern_return_t kret;
153 load_return_t lret;
154 boolean_t create_map = TRUE;
155
156 if (new_map != VM_MAP_NULL) {
157 create_map = FALSE;
158 }
159
160 if (create_map) {
161 old_map = current_map();
162 #ifdef i386
163 pmap = get_task_pmap(current_task());
164 pmap_reference(pmap);
165 #else
166 pmap = pmap_create((vm_size_t) 0);
167 #endif
168 map = vm_map_create(pmap,
169 get_map_min(old_map),
170 get_map_max(old_map),
171 TRUE); /**** FIXME ****/
172 } else
173 map = new_map;
174
175 if (!result)
176 result = &myresult;
177
178 *result = (load_result_t) { 0 };
179
180 lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size,
181 0, result, clean_regions);
182
183 if (lret != LOAD_SUCCESS) {
184 if (create_map) {
185 vm_map_deallocate(map); /* will lose pmap reference too */
186 }
187 return(lret);
188 }
189
190 /*
191 * Commit to new map. First make sure that the current
192 * users of the task get done with it, and that we clean
193 * up the old contents of IPC and memory. The task is
194 * guaranteed to be single threaded upon return (us).
195 *
196 * Swap the new map for the old at the task level and at
197 * our activation. The latter consumes our new map reference
198 * but each leaves us responsible for the old_map reference.
199 * That lets us get off the pmap associated with it, and
200 * then we can release it.
201 */
202 if (create_map) {
203 task_halt(current_task());
204
205 old_map = swap_task_map(current_task(), map);
206 vm_map_deallocate(old_map);
207
208 old_map = swap_act_map(current_act(), map);
209
210 #ifndef i386
211 pmap_switch(pmap); /* Make sure we are using the new pmap */
212 #endif
213 vm_map_deallocate(old_map);
214 }
215 return(LOAD_SUCCESS);
216 }
217
218 int dylink_test = 1;
219
220 static
221 load_return_t
222 parse_machfile(
223 struct vnode *vp,
224 vm_map_t map,
225 thread_act_t thr_act,
226 struct mach_header *header,
227 unsigned long file_offset,
228 unsigned long macho_size,
229 int depth,
230 load_result_t *result,
231 boolean_t clean_regions
232 )
233 {
234 struct machine_slot *ms;
235 int ncmds;
236 struct load_command *lcp, *next;
237 struct dylinker_command *dlp = 0;
238 void * pager;
239 load_return_t ret = LOAD_SUCCESS;
240 vm_offset_t addr, kl_addr;
241 vm_size_t size,kl_size;
242 int offset;
243 int pass;
244 struct proc *p = current_proc(); /* XXXX */
245 int error;
246 int resid=0;
247 task_t task;
248
249 /*
250 * Break infinite recursion
251 */
252 if (depth > 6)
253 return(LOAD_FAILURE);
254
255 task = (task_t)get_threadtask(thr_act);
256
257 depth++;
258
259 /*
260 * Check to see if right machine type.
261 */
262 ms = &machine_slot[cpu_number()];
263 if ((header->cputype != ms->cpu_type) ||
264 !check_cpu_subtype(header->cpusubtype))
265 return(LOAD_BADARCH);
266
267 switch (header->filetype) {
268
269 case MH_OBJECT:
270 case MH_EXECUTE:
271 case MH_PRELOAD:
272 if (depth != 1)
273 return (LOAD_FAILURE);
274 break;
275
276 case MH_FVMLIB:
277 case MH_DYLIB:
278 if (depth == 1)
279 return (LOAD_FAILURE);
280 break;
281
282 case MH_DYLINKER:
283 if (depth != 2)
284 return (LOAD_FAILURE);
285 break;
286
287 default:
288 return (LOAD_FAILURE);
289 }
290
291 /*
292 * Get the pager for the file.
293 */
294 UBCINFOCHECK("parse_machfile", vp);
295 pager = (void *) ubc_getpager(vp);
296
297 /*
298 * Map portion that must be accessible directly into
299 * kernel's map.
300 */
301 if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size)
302 return(LOAD_BADMACHO);
303
304 /*
305 * Round size of Mach-O commands up to page boundry.
306 */
307 size = round_page_32(sizeof (struct mach_header) + header->sizeofcmds);
308 if (size <= 0)
309 return(LOAD_BADMACHO);
310
311 /*
312 * Map the load commands into kernel memory.
313 */
314 addr = 0;
315 kl_size = size;
316 kl_addr = kalloc(size);
317 addr = kl_addr;
318 if (addr == NULL)
319 return(LOAD_NOSPACE);
320
321 if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
322 UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) {
323 if (kl_addr )
324 kfree(kl_addr, kl_size);
325 return(EIO);
326 }
327 /* ubc_map(vp); */ /* NOT HERE */
328
329 /*
330 * Scan through the commands, processing each one as necessary.
331 */
332 for (pass = 1; pass <= 2; pass++) {
333 offset = sizeof(struct mach_header);
334 ncmds = header->ncmds;
335 while (ncmds--) {
336 /*
337 * Get a pointer to the command.
338 */
339 lcp = (struct load_command *)(addr + offset);
340 offset += lcp->cmdsize;
341
342 /*
343 * Check for valid lcp pointer by checking
344 * next offset.
345 */
346 if (offset > header->sizeofcmds
347 + sizeof(struct mach_header)) {
348 if (kl_addr )
349 kfree(kl_addr, kl_size);
350 return(LOAD_BADMACHO);
351 }
352
353 /*
354 * Check for valid command.
355 */
356 switch(lcp->cmd) {
357 case LC_SEGMENT:
358 if (pass != 1)
359 break;
360 ret = load_segment(
361 (struct segment_command *) lcp,
362 pager, file_offset,
363 macho_size,
364 (unsigned long)ubc_getsize(vp),
365 map,
366 result);
367 break;
368 case LC_THREAD:
369 if (pass != 2)
370 break;
371 ret = load_thread((struct thread_command *)lcp, thr_act,
372 result);
373 break;
374 case LC_UNIXTHREAD:
375 if (pass != 2)
376 break;
377 ret = load_unixthread(
378 (struct thread_command *) lcp, thr_act,
379 result);
380 break;
381 case LC_LOAD_DYLINKER:
382 if (pass != 2)
383 break;
384 if ((depth == 1) && (dlp == 0))
385 dlp = (struct dylinker_command *)lcp;
386 else
387 ret = LOAD_FAILURE;
388 break;
389 default:
390 ret = LOAD_SUCCESS;/* ignore other stuff */
391 }
392 if (ret != LOAD_SUCCESS)
393 break;
394 }
395 if (ret != LOAD_SUCCESS)
396 break;
397 }
398 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
399 vm_offset_t addr;
400 shared_region_mapping_t shared_region;
401 struct shared_region_task_mappings map_info;
402 shared_region_mapping_t next;
403
404 RedoLookup:
405 vm_get_shared_region(task, &shared_region);
406 map_info.self = (vm_offset_t)shared_region;
407 shared_region_mapping_info(shared_region,
408 &(map_info.text_region),
409 &(map_info.text_size),
410 &(map_info.data_region),
411 &(map_info.data_size),
412 &(map_info.region_mappings),
413 &(map_info.client_base),
414 &(map_info.alternate_base),
415 &(map_info.alternate_next),
416 &(map_info.fs_base),
417 &(map_info.system),
418 &(map_info.flags), &next);
419
420 if((map_info.flags & SHARED_REGION_FULL) ||
421 (map_info.flags & SHARED_REGION_STALE)) {
422 shared_region_mapping_t system_region;
423 system_region = lookup_default_shared_region(
424 map_info.fs_base, map_info.system);
425 if((map_info.self != (vm_offset_t)system_region) &&
426 (map_info.flags & SHARED_REGION_SYSTEM)) {
427 if(system_region == NULL) {
428 shared_file_boot_time_init(
429 map_info.fs_base, map_info.system);
430 } else {
431 vm_set_shared_region(task, system_region);
432 }
433 shared_region_mapping_dealloc(
434 (shared_region_mapping_t)map_info.self);
435 goto RedoLookup;
436 } else if (map_info.flags & SHARED_REGION_SYSTEM) {
437 shared_region_mapping_dealloc(system_region);
438 shared_file_boot_time_init(
439 map_info.fs_base, map_info.system);
440 shared_region_mapping_dealloc(
441 (shared_region_mapping_t)map_info.self);
442 } else {
443 shared_region_mapping_dealloc(system_region);
444 }
445 }
446
447
448 if (dylink_test) {
449 p->p_flag |= P_NOSHLIB; /* no shlibs in use */
450 addr = map_info.client_base;
451 if(clean_regions) {
452 vm_map(map, &addr, map_info.text_size,
453 0, SHARED_LIB_ALIAS,
454 map_info.text_region, 0, FALSE,
455 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
456 } else {
457 vm_map(map, &addr, map_info.text_size, 0,
458 (VM_MEMORY_SHARED_PMAP << 24)
459 | SHARED_LIB_ALIAS,
460 map_info.text_region, 0, FALSE,
461 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
462 }
463 addr = map_info.client_base + map_info.text_size;
464 vm_map(map, &addr, map_info.data_size,
465 0, SHARED_LIB_ALIAS,
466 map_info.data_region, 0, TRUE,
467 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
468
469 while (next) {
470 /* this should be fleshed out for the general case */
471 /* but this is not necessary for now. Indeed we */
472 /* are handling the com page inside of the */
473 /* shared_region mapping create calls for now for */
474 /* simplicities sake. If more general support is */
475 /* needed the code to manipulate the shared range */
476 /* chain can be pulled out and moved to the callers*/
477 shared_region_mapping_info(next,
478 &(map_info.text_region),
479 &(map_info.text_size),
480 &(map_info.data_region),
481 &(map_info.data_size),
482 &(map_info.region_mappings),
483 &(map_info.client_base),
484 &(map_info.alternate_base),
485 &(map_info.alternate_next),
486 &(map_info.fs_base),
487 &(map_info.system),
488 &(map_info.flags), &next);
489
490 addr = map_info.client_base;
491 vm_map(map, &addr, map_info.text_size,
492 0, SHARED_LIB_ALIAS,
493 map_info.text_region, 0, FALSE,
494 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
495 }
496 }
497 if (dlp != 0) {
498 ret = load_dylinker(dlp, map, thr_act,
499 depth, result, clean_regions);
500 }
501 }
502
503 if (kl_addr )
504 kfree(kl_addr, kl_size);
505
506 if ((ret == LOAD_SUCCESS) && (depth == 1) &&
507 (result->thread_count == 0))
508 ret = LOAD_FAILURE;
509 if (ret == LOAD_SUCCESS)
510 ubc_map(vp);
511
512 return(ret);
513 }
514
515 static
516 load_return_t
517 load_segment(
518 struct segment_command *scp,
519 void * pager,
520 unsigned long pager_offset,
521 unsigned long macho_size,
522 unsigned long end_of_file,
523 vm_map_t map,
524 load_result_t *result
525 )
526 {
527 kern_return_t ret;
528 vm_offset_t map_addr, map_offset;
529 vm_size_t map_size, seg_size, delta_size;
530 caddr_t tmp;
531 vm_prot_t initprot;
532 vm_prot_t maxprot;
533 #if 1
534 extern int print_map_addr;
535 #endif /* 1 */
536
537 /*
538 * Make sure what we get from the file is really ours (as specified
539 * by macho_size).
540 */
541 if (scp->fileoff + scp->filesize > macho_size)
542 return (LOAD_BADMACHO);
543
544 seg_size = round_page_32(scp->vmsize);
545 if (seg_size == 0)
546 return(KERN_SUCCESS);
547
548 /*
549 * Round sizes to page size.
550 */
551 map_size = round_page_32(scp->filesize);
552 map_addr = trunc_page_32(scp->vmaddr);
553
554 map_offset = pager_offset + scp->fileoff;
555
556 if (map_size > 0) {
557 initprot = (scp->initprot) & VM_PROT_ALL;
558 maxprot = (scp->maxprot) & VM_PROT_ALL;
559 /*
560 * Map a copy of the file into the address space.
561 */
562 ret = vm_map(map,
563 &map_addr, map_size, (vm_offset_t)0, FALSE,
564 pager, map_offset, TRUE,
565 initprot, maxprot,
566 VM_INHERIT_DEFAULT);
567 if (ret != KERN_SUCCESS)
568 return(LOAD_NOSPACE);
569
570 #if 1
571 if (print_map_addr)
572 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size);
573 #endif /* 1 */
574 /*
575 * If the file didn't end on a page boundary,
576 * we need to zero the leftover.
577 */
578 delta_size = map_size - scp->filesize;
579 #if FIXME
580 if (delta_size > 0) {
581 vm_offset_t tmp;
582
583 ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE);
584 if (ret != KERN_SUCCESS)
585 return(LOAD_RESOURCE);
586
587 if (copyout(tmp, map_addr + scp->filesize,
588 delta_size)) {
589 (void) vm_deallocate(
590 kernel_map, tmp, delta_size);
591 return(LOAD_FAILURE);
592 }
593
594 (void) vm_deallocate(kernel_map, tmp, delta_size);
595 }
596 #endif /* FIXME */
597 }
598
599 /*
600 * If the virtual size of the segment is greater
601 * than the size from the file, we need to allocate
602 * zero fill memory for the rest.
603 */
604 delta_size = seg_size - map_size;
605 if (delta_size > 0) {
606 vm_offset_t tmp = map_addr + map_size;
607
608 ret = vm_allocate(map, &tmp, delta_size, FALSE);
609 if (ret != KERN_SUCCESS)
610 return(LOAD_NOSPACE);
611 }
612
613 /*
614 * Set protection values. (Note: ignore errors!)
615 */
616
617 if (scp->maxprot != VM_PROT_DEFAULT) {
618 (void) vm_protect(map,
619 map_addr, seg_size,
620 TRUE, scp->maxprot);
621 }
622 if (scp->initprot != VM_PROT_DEFAULT) {
623 (void) vm_protect(map,
624 map_addr, seg_size,
625 FALSE, scp->initprot);
626 }
627 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
628 result->mach_header = map_addr;
629 return(LOAD_SUCCESS);
630 }
631
632 static
633 load_return_t
634 load_unixthread(
635 struct thread_command *tcp,
636 thread_act_t thr_act,
637 load_result_t *result
638 )
639 {
640 thread_t thread = current_thread();
641 load_return_t ret;
642 int customstack =0;
643
644 if (result->thread_count != 0)
645 return (LOAD_FAILURE);
646
647 thread = getshuttle_thread(thr_act);
648 ret = load_threadstack(thread,
649 (unsigned long *)(((vm_offset_t)tcp) +
650 sizeof(struct thread_command)),
651 tcp->cmdsize - sizeof(struct thread_command),
652 &result->user_stack,
653 &customstack);
654 if (ret != LOAD_SUCCESS)
655 return(ret);
656
657 if (customstack)
658 result->customstack = 1;
659 else
660 result->customstack = 0;
661 ret = load_threadentry(thread,
662 (unsigned long *)(((vm_offset_t)tcp) +
663 sizeof(struct thread_command)),
664 tcp->cmdsize - sizeof(struct thread_command),
665 &result->entry_point);
666 if (ret != LOAD_SUCCESS)
667 return(ret);
668
669 ret = load_threadstate(thread,
670 (unsigned long *)(((vm_offset_t)tcp) +
671 sizeof(struct thread_command)),
672 tcp->cmdsize - sizeof(struct thread_command));
673 if (ret != LOAD_SUCCESS)
674 return (ret);
675
676 result->unixproc = TRUE;
677 result->thread_count++;
678
679 return(LOAD_SUCCESS);
680 }
681
682 static
683 load_return_t
684 load_thread(
685 struct thread_command *tcp,
686 thread_act_t thr_act,
687 load_result_t *result
688 )
689 {
690 thread_t thread;
691 kern_return_t kret;
692 load_return_t lret;
693 task_t task;
694 int customstack=0;
695
696 task = get_threadtask(thr_act);
697 thread = getshuttle_thread(thr_act);
698
699 /* if count is 0; same as thr_act */
700 if (result->thread_count != 0) {
701 kret = thread_create(task, &thread);
702 if (kret != KERN_SUCCESS)
703 return(LOAD_RESOURCE);
704 thread_deallocate(thread);
705 }
706
707 lret = load_threadstate(thread,
708 (unsigned long *)(((vm_offset_t)tcp) +
709 sizeof(struct thread_command)),
710 tcp->cmdsize - sizeof(struct thread_command));
711 if (lret != LOAD_SUCCESS)
712 return (lret);
713
714 if (result->thread_count == 0) {
715 lret = load_threadstack(thread,
716 (unsigned long *)(((vm_offset_t)tcp) +
717 sizeof(struct thread_command)),
718 tcp->cmdsize - sizeof(struct thread_command),
719 &result->user_stack,
720 &customstack);
721 if (customstack)
722 result->customstack = 1;
723 else
724 result->customstack = 0;
725
726 if (lret != LOAD_SUCCESS)
727 return(lret);
728
729 lret = load_threadentry(thread,
730 (unsigned long *)(((vm_offset_t)tcp) +
731 sizeof(struct thread_command)),
732 tcp->cmdsize - sizeof(struct thread_command),
733 &result->entry_point);
734 if (lret != LOAD_SUCCESS)
735 return(lret);
736 }
737 /*
738 * Resume thread now, note that this means that the thread
739 * commands should appear after all the load commands to
740 * be sure they don't reference anything not yet mapped.
741 */
742 else
743 thread_resume(thread);
744
745 result->thread_count++;
746
747 return(LOAD_SUCCESS);
748 }
749
750 static
751 load_return_t
752 load_threadstate(
753 thread_t thread,
754 unsigned long *ts,
755 unsigned long total_size
756 )
757 {
758 kern_return_t ret;
759 unsigned long size;
760 int flavor;
761
762 /*
763 * Set the thread state.
764 */
765
766 while (total_size > 0) {
767 flavor = *ts++;
768 size = *ts++;
769 total_size -= (size+2)*sizeof(unsigned long);
770 if (total_size < 0)
771 return(LOAD_BADMACHO);
772 ret = thread_setstatus(getact_thread(thread), flavor, ts, size);
773 if (ret != KERN_SUCCESS)
774 return(LOAD_FAILURE);
775 ts += size; /* ts is a (unsigned long *) */
776 }
777 return(LOAD_SUCCESS);
778 }
779
780 static
781 load_return_t
782 load_threadstack(
783 thread_t thread,
784 unsigned long *ts,
785 unsigned long total_size,
786 vm_offset_t *user_stack,
787 int *customstack
788 )
789 {
790 kern_return_t ret;
791 unsigned long size;
792 int flavor;
793
794 while (total_size > 0) {
795 flavor = *ts++;
796 size = *ts++;
797 total_size -= (size+2)*sizeof(unsigned long);
798 if (total_size < 0)
799 return(LOAD_BADMACHO);
800 *user_stack = USRSTACK;
801 ret = thread_userstack(thread, flavor, ts, size,
802 user_stack, customstack);
803 if (ret != KERN_SUCCESS)
804 return(LOAD_FAILURE);
805 ts += size; /* ts is a (unsigned long *) */
806 }
807 return(LOAD_SUCCESS);
808 }
809
810 static
811 load_return_t
812 load_threadentry(
813 thread_t thread,
814 unsigned long *ts,
815 unsigned long total_size,
816 vm_offset_t *entry_point
817 )
818 {
819 kern_return_t ret;
820 unsigned long size;
821 int flavor;
822
823 /*
824 * Set the thread state.
825 */
826 *entry_point = 0;
827 while (total_size > 0) {
828 flavor = *ts++;
829 size = *ts++;
830 total_size -= (size+2)*sizeof(unsigned long);
831 if (total_size < 0)
832 return(LOAD_BADMACHO);
833 ret = thread_entrypoint(thread, flavor, ts, size, entry_point);
834 if (ret != KERN_SUCCESS)
835 return(LOAD_FAILURE);
836 ts += size; /* ts is a (unsigned long *) */
837 }
838 return(LOAD_SUCCESS);
839 }
840
841
842 static
843 load_return_t
844 load_dylinker(
845 struct dylinker_command *lcp,
846 vm_map_t map,
847 thread_act_t thr_act,
848 int depth,
849 load_result_t *result,
850 boolean_t clean_regions
851 )
852 {
853 char *name;
854 char *p;
855 struct vnode *vp;
856 struct mach_header header;
857 unsigned long file_offset;
858 unsigned long macho_size;
859 vm_map_t copy_map;
860 load_result_t myresult;
861 kern_return_t ret;
862 vm_map_copy_t tmp;
863 vm_offset_t dyl_start, map_addr;
864 vm_size_t dyl_length;
865
866 name = (char *)lcp + lcp->name.offset;
867 /*
868 * Check for a proper null terminated string.
869 */
870 p = name;
871 do {
872 if (p >= (char *)lcp + lcp->cmdsize)
873 return(LOAD_BADMACHO);
874 } while (*p++);
875
876 ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
877 if (ret)
878 return (ret);
879
880 myresult = (load_result_t) { 0 };
881
882 /*
883 * Load the Mach-O.
884 */
885
886 copy_map = vm_map_create(pmap_create(macho_size),
887 get_map_min(map), get_map_max( map), TRUE);
888
889 ret = parse_machfile(vp, copy_map, thr_act, &header,
890 file_offset, macho_size,
891 depth, &myresult, clean_regions);
892
893 if (ret)
894 goto out;
895
896 if (get_map_nentries(copy_map) > 0) {
897
898 dyl_start = get_map_start(copy_map);
899 dyl_length = get_map_end(copy_map) - dyl_start;
900
901 map_addr = dyl_start;
902 ret = vm_allocate(map, &map_addr, dyl_length, FALSE);
903 if (ret != KERN_SUCCESS) {
904 ret = vm_allocate(map, &map_addr, dyl_length, TRUE);
905 }
906
907 if (ret != KERN_SUCCESS) {
908 ret = LOAD_NOSPACE;
909 goto out;
910
911 }
912 ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE,
913 &tmp);
914 if (ret != KERN_SUCCESS) {
915 (void) vm_map_remove(map,
916 map_addr,
917 map_addr + dyl_length,
918 VM_MAP_NO_FLAGS);
919 goto out;
920 }
921
922 ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE);
923 if (ret != KERN_SUCCESS) {
924 vm_map_copy_discard(tmp);
925 (void) vm_map_remove(map,
926 map_addr,
927 map_addr + dyl_length,
928 VM_MAP_NO_FLAGS);
929 goto out; }
930
931 if (map_addr != dyl_start)
932 myresult.entry_point += (map_addr - dyl_start);
933 } else
934 ret = LOAD_FAILURE;
935
936 if (ret == LOAD_SUCCESS) {
937 result->dynlinker = TRUE;
938 result->entry_point = myresult.entry_point;
939 ubc_map(vp);
940 }
941 out:
942 vm_map_deallocate(copy_map);
943
944 vrele(vp);
945 return (ret);
946
947 }
948
949 static
950 load_return_t
951 get_macho_vnode(
952 char *path,
953 struct mach_header *mach_header,
954 unsigned long *file_offset,
955 unsigned long *macho_size,
956 struct vnode **vpp
957 )
958 {
959 struct vnode *vp;
960 struct vattr attr, *atp;
961 struct nameidata nid, *ndp;
962 struct proc *p = current_proc(); /* XXXX */
963 boolean_t is_fat;
964 struct fat_arch fat_arch;
965 int error = KERN_SUCCESS;
966 int resid;
967 union {
968 struct mach_header mach_header;
969 struct fat_header fat_header;
970 char pad[512];
971 } header;
972 off_t fsize = (off_t)0;
973 struct ucred *cred = p->p_ucred;
974
975 ndp = &nid;
976 atp = &attr;
977
978 /* init the namei data to point the file user's program name */
979 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
980
981 if (error = namei(ndp))
982 return(error);
983
984 vp = ndp->ni_vp;
985
986 /* check for regular file */
987 if (vp->v_type != VREG) {
988 error = EACCES;
989 goto bad1;
990 }
991
992 /* get attributes */
993 if (error = VOP_GETATTR(vp, &attr, cred, p))
994 goto bad1;
995
996 /* Check mount point */
997 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
998 error = EACCES;
999 goto bad1;
1000 }
1001
1002 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
1003 atp->va_mode &= ~(VSUID | VSGID);
1004
1005 /* check access. for root we have to see if any exec bit on */
1006 if (error = VOP_ACCESS(vp, VEXEC, cred, p))
1007 goto bad1;
1008 if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
1009 error = EACCES;
1010 goto bad1;
1011 }
1012
1013 /* hold the vnode for the IO */
1014 if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) {
1015 error = ENOENT;
1016 goto bad1;
1017 }
1018
1019 /* try to open it */
1020 if (error = VOP_OPEN(vp, FREAD, cred, p)) {
1021 ubc_rele(vp);
1022 goto bad1;
1023 }
1024
1025 if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
1026 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p))
1027 goto bad2;
1028
1029 if (header.mach_header.magic == MH_MAGIC)
1030 is_fat = FALSE;
1031 else if (header.fat_header.magic == FAT_MAGIC ||
1032 header.fat_header.magic == FAT_CIGAM)
1033 is_fat = TRUE;
1034 else {
1035 error = LOAD_BADMACHO;
1036 goto bad2;
1037 }
1038
1039 if (is_fat) {
1040 /* Look up our architecture in the fat file. */
1041 error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch);
1042 if (error != LOAD_SUCCESS)
1043 goto bad2;
1044
1045 /* Read the Mach-O header out of it */
1046 error = vn_rdwr(UIO_READ, vp, &header.mach_header,
1047 sizeof(header.mach_header), fat_arch.offset,
1048 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
1049 if (error) {
1050 error = LOAD_FAILURE;
1051 goto bad2;
1052 }
1053
1054 /* Is this really a Mach-O? */
1055 if (header.mach_header.magic != MH_MAGIC) {
1056 error = LOAD_BADMACHO;
1057 goto bad2;
1058 }
1059
1060 *file_offset = fat_arch.offset;
1061 *macho_size = fsize = fat_arch.size;
1062 } else {
1063
1064 *file_offset = 0;
1065 *macho_size = fsize = attr.va_size;
1066 }
1067
1068 *mach_header = header.mach_header;
1069 *vpp = vp;
1070 if (UBCISVALID(vp))
1071 ubc_setsize(vp, fsize); /* XXX why? */
1072
1073 VOP_UNLOCK(vp, 0, p);
1074 ubc_rele(vp);
1075 return (error);
1076
1077 bad2:
1078 VOP_UNLOCK(vp, 0, p);
1079 error = VOP_CLOSE(vp, FREAD, cred, p);
1080 ubc_rele(vp);
1081 vrele(vp);
1082 return (error);
1083
1084 bad1:
1085 vput(vp);
1086 return(error);
1087 }