]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (C) 1988, 1989, NeXT, Inc.
24 *
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
27 *
28 * Mach object file loader (kernel version, for now).
29 *
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
31 * Started.
32 */
33 #include <sys/param.h>
34 #include <sys/vnode.h>
35 #include <sys/uio.h>
36 #include <sys/namei.h>
37 #include <sys/proc.h>
38 #include <sys/stat.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
42 #include <sys/ubc.h>
43
44 #include <mach/mach_types.h>
45
46 #include <kern/mach_loader.h>
47 #include <kern/task.h>
48
49 #include <mach-o/fat.h>
50 #include <mach-o/loader.h>
51
52 #include <kern/cpu_number.h>
53
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_pager.h>
57 #include <vm/vnode_pager.h>
58 #include <mach/vm_statistics.h>
59
60 #include <mach/shared_memory_server.h>
61 #include <vm/vm_shared_memory_server.h>
62
63 #include <machine/vmparam.h>
64
65 /*
66 * Prototypes of static functions.
67 */
68 static
69 load_return_t
70 parse_machfile(
71 struct vnode *vp,
72 vm_map_t map,
73 thread_act_t thr_act,
74 struct mach_header *header,
75 unsigned long file_offset,
76 unsigned long macho_size,
77 int depth,
78 load_result_t *result,
79 boolean_t clean_regions
80 ),
81 load_segment(
82 struct segment_command *scp,
83 void * pager,
84 unsigned long pager_offset,
85 unsigned long macho_size,
86 unsigned long end_of_file,
87 vm_map_t map,
88 load_result_t *result
89 ),
90 load_unixthread(
91 struct thread_command *tcp,
92 thread_act_t thr_act,
93 load_result_t *result
94 ),
95 load_thread(
96 struct thread_command *tcp,
97 thread_act_t thr_act,
98 load_result_t *result
99 ),
100 load_threadstate(
101 thread_t thread,
102 unsigned long *ts,
103 unsigned long total_size
104 ),
105 load_threadstack(
106 thread_t thread,
107 unsigned long *ts,
108 unsigned long total_size,
109 vm_offset_t *user_stack,
110 int *customstack
111 ),
112 load_threadentry(
113 thread_t thread,
114 unsigned long *ts,
115 unsigned long total_size,
116 vm_offset_t *entry_point
117 ),
118 load_dylinker(
119 struct dylinker_command *lcp,
120 vm_map_t map,
121 thread_act_t thr_act,
122 int depth,
123 load_result_t *result,
124 boolean_t clean_regions
125 ),
126 get_macho_vnode(
127 char *path,
128 struct mach_header *mach_header,
129 unsigned long *file_offset,
130 unsigned long *macho_size,
131 struct vnode **vpp
132 );
133
134 load_return_t
135 load_machfile(
136 struct vnode *vp,
137 struct mach_header *header,
138 unsigned long file_offset,
139 unsigned long macho_size,
140 load_result_t *result,
141 thread_act_t thr_act,
142 vm_map_t new_map,
143 boolean_t clean_regions
144 )
145 {
146 pmap_t pmap;
147 vm_map_t map;
148 vm_map_t old_map;
149 load_result_t myresult;
150 kern_return_t kret;
151 load_return_t lret;
152 boolean_t create_map = TRUE;
153 #ifndef i386
154 extern pmap_t pmap_create(vm_size_t size); /* XXX */
155 #endif
156
157 if (new_map != VM_MAP_NULL) {
158 create_map = FALSE;
159 }
160
161 if (create_map) {
162 old_map = current_map();
163 #ifdef i386
164 pmap = get_task_pmap(current_task());
165 pmap_reference(pmap);
166 #else
167 pmap = pmap_create((vm_size_t) 0);
168 #endif
169 map = vm_map_create(pmap,
170 get_map_min(old_map),
171 get_map_max(old_map),
172 TRUE); /**** FIXME ****/
173 } else
174 map = new_map;
175
176 if (!result)
177 result = &myresult;
178
179 *result = (load_result_t) { 0 };
180
181 lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size,
182 0, result, clean_regions);
183
184 if (lret != LOAD_SUCCESS) {
185 if (create_map) {
186 vm_map_deallocate(map); /* will lose pmap reference too */
187 }
188 return(lret);
189 }
190
191 /*
192 * Commit to new map. First make sure that the current
193 * users of the task get done with it, and that we clean
194 * up the old contents of IPC and memory. The task is
195 * guaranteed to be single threaded upon return (us).
196 *
197 * Swap the new map for the old, which consumes our new map
198 * reference but each leaves us responsible for the old_map reference.
199 * That lets us get off the pmap associated with it, and
200 * then we can release it.
201 */
202 if (create_map) {
203 task_halt(current_task());
204
205 old_map = swap_task_map(current_task(), map);
206 #ifndef i386
207 pmap_switch(pmap); /* Make sure we are using the new pmap */
208 #endif
209 vm_map_deallocate(old_map);
210 }
211 return(LOAD_SUCCESS);
212 }
213
214 int dylink_test = 1;
215
216 static
217 load_return_t
218 parse_machfile(
219 struct vnode *vp,
220 vm_map_t map,
221 thread_act_t thr_act,
222 struct mach_header *header,
223 unsigned long file_offset,
224 unsigned long macho_size,
225 int depth,
226 load_result_t *result,
227 boolean_t clean_regions
228 )
229 {
230 struct machine_slot *ms;
231 int ncmds;
232 struct load_command *lcp, *next;
233 struct dylinker_command *dlp = 0;
234 void * pager;
235 load_return_t ret = LOAD_SUCCESS;
236 vm_offset_t addr, kl_addr;
237 vm_size_t size,kl_size;
238 int offset;
239 int pass;
240 struct proc *p = current_proc(); /* XXXX */
241 int error;
242 int resid=0;
243 task_t task;
244
245 /*
246 * Break infinite recursion
247 */
248 if (depth > 6)
249 return(LOAD_FAILURE);
250
251 task = (task_t)get_threadtask(thr_act);
252
253 depth++;
254
255 /*
256 * Check to see if right machine type.
257 */
258 ms = &machine_slot[cpu_number()];
259 if ((header->cputype != ms->cpu_type) ||
260 !check_cpu_subtype(header->cpusubtype))
261 return(LOAD_BADARCH);
262
263 switch (header->filetype) {
264
265 case MH_OBJECT:
266 case MH_EXECUTE:
267 case MH_PRELOAD:
268 if (depth != 1)
269 return (LOAD_FAILURE);
270 break;
271
272 case MH_FVMLIB:
273 case MH_DYLIB:
274 if (depth == 1)
275 return (LOAD_FAILURE);
276 break;
277
278 case MH_DYLINKER:
279 if (depth != 2)
280 return (LOAD_FAILURE);
281 break;
282
283 default:
284 return (LOAD_FAILURE);
285 }
286
287 /*
288 * Get the pager for the file.
289 */
290 UBCINFOCHECK("parse_machfile", vp);
291 pager = (void *) ubc_getpager(vp);
292
293 /*
294 * Map portion that must be accessible directly into
295 * kernel's map.
296 */
297 if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size)
298 return(LOAD_BADMACHO);
299
300 /*
301 * Round size of Mach-O commands up to page boundry.
302 */
303 size = round_page_32(sizeof (struct mach_header) + header->sizeofcmds);
304 if (size <= 0)
305 return(LOAD_BADMACHO);
306
307 /*
308 * Map the load commands into kernel memory.
309 */
310 addr = 0;
311 kl_size = size;
312 kl_addr = kalloc(size);
313 addr = kl_addr;
314 if (addr == NULL)
315 return(LOAD_NOSPACE);
316
317 if(error = vn_rdwr(UIO_READ, vp, (caddr_t)addr, size, file_offset,
318 UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) {
319 if (kl_addr )
320 kfree(kl_addr, kl_size);
321 return(LOAD_IOERROR);
322 }
323 /* ubc_map(vp); */ /* NOT HERE */
324
325 /*
326 * Scan through the commands, processing each one as necessary.
327 */
328 for (pass = 1; pass <= 2; pass++) {
329 offset = sizeof(struct mach_header);
330 ncmds = header->ncmds;
331 while (ncmds--) {
332 /*
333 * Get a pointer to the command.
334 */
335 lcp = (struct load_command *)(addr + offset);
336 offset += lcp->cmdsize;
337
338 /*
339 * Check for valid lcp pointer by checking
340 * next offset.
341 */
342 if (offset > header->sizeofcmds
343 + sizeof(struct mach_header)) {
344 if (kl_addr )
345 kfree(kl_addr, kl_size);
346 return(LOAD_BADMACHO);
347 }
348
349 /*
350 * Check for valid command.
351 */
352 switch(lcp->cmd) {
353 case LC_SEGMENT:
354 if (pass != 1)
355 break;
356 ret = load_segment(
357 (struct segment_command *) lcp,
358 pager, file_offset,
359 macho_size,
360 (unsigned long)ubc_getsize(vp),
361 map,
362 result);
363 break;
364 case LC_THREAD:
365 if (pass != 2)
366 break;
367 ret = load_thread((struct thread_command *)lcp, thr_act,
368 result);
369 break;
370 case LC_UNIXTHREAD:
371 if (pass != 2)
372 break;
373 ret = load_unixthread(
374 (struct thread_command *) lcp, thr_act,
375 result);
376 break;
377 case LC_LOAD_DYLINKER:
378 if (pass != 2)
379 break;
380 if ((depth == 1) && (dlp == 0))
381 dlp = (struct dylinker_command *)lcp;
382 else
383 ret = LOAD_FAILURE;
384 break;
385 default:
386 ret = LOAD_SUCCESS;/* ignore other stuff */
387 }
388 if (ret != LOAD_SUCCESS)
389 break;
390 }
391 if (ret != LOAD_SUCCESS)
392 break;
393 }
394 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
395 vm_offset_t addr;
396 shared_region_mapping_t shared_region;
397 struct shared_region_task_mappings map_info;
398 shared_region_mapping_t next;
399
400 RedoLookup:
401 vm_get_shared_region(task, &shared_region);
402 map_info.self = (vm_offset_t)shared_region;
403 shared_region_mapping_info(shared_region,
404 &(map_info.text_region),
405 &(map_info.text_size),
406 &(map_info.data_region),
407 &(map_info.data_size),
408 &(map_info.region_mappings),
409 &(map_info.client_base),
410 &(map_info.alternate_base),
411 &(map_info.alternate_next),
412 &(map_info.fs_base),
413 &(map_info.system),
414 &(map_info.flags), &next);
415
416 if((map_info.flags & SHARED_REGION_FULL) ||
417 (map_info.flags & SHARED_REGION_STALE)) {
418 shared_region_mapping_t system_region;
419 system_region = lookup_default_shared_region(
420 map_info.fs_base, map_info.system);
421 if((map_info.self != (vm_offset_t)system_region) &&
422 (map_info.flags & SHARED_REGION_SYSTEM)) {
423 if(system_region == NULL) {
424 shared_file_boot_time_init(
425 map_info.fs_base, map_info.system);
426 } else {
427 vm_set_shared_region(task, system_region);
428 }
429 shared_region_mapping_dealloc(
430 (shared_region_mapping_t)map_info.self);
431 goto RedoLookup;
432 } else if (map_info.flags & SHARED_REGION_SYSTEM) {
433 shared_region_mapping_dealloc(system_region);
434 shared_file_boot_time_init(
435 map_info.fs_base, map_info.system);
436 shared_region_mapping_dealloc(
437 (shared_region_mapping_t)map_info.self);
438 } else {
439 shared_region_mapping_dealloc(system_region);
440 }
441 }
442
443
444 if (dylink_test) {
445 p->p_flag |= P_NOSHLIB; /* no shlibs in use */
446 addr = map_info.client_base;
447 if(clean_regions) {
448 vm_map(map, &addr, map_info.text_size,
449 0, SHARED_LIB_ALIAS,
450 map_info.text_region, 0, FALSE,
451 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
452 } else {
453 vm_map(map, &addr, map_info.text_size, 0,
454 (VM_MEMORY_SHARED_PMAP << 24)
455 | SHARED_LIB_ALIAS,
456 map_info.text_region, 0, FALSE,
457 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
458 }
459 addr = map_info.client_base + map_info.text_size;
460 vm_map(map, &addr, map_info.data_size,
461 0, SHARED_LIB_ALIAS,
462 map_info.data_region, 0, TRUE,
463 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
464
465 while (next) {
466 /* this should be fleshed out for the general case */
467 /* but this is not necessary for now. Indeed we */
468 /* are handling the com page inside of the */
469 /* shared_region mapping create calls for now for */
470 /* simplicities sake. If more general support is */
471 /* needed the code to manipulate the shared range */
472 /* chain can be pulled out and moved to the callers*/
473 shared_region_mapping_info(next,
474 &(map_info.text_region),
475 &(map_info.text_size),
476 &(map_info.data_region),
477 &(map_info.data_size),
478 &(map_info.region_mappings),
479 &(map_info.client_base),
480 &(map_info.alternate_base),
481 &(map_info.alternate_next),
482 &(map_info.fs_base),
483 &(map_info.system),
484 &(map_info.flags), &next);
485
486 addr = map_info.client_base;
487 vm_map(map, &addr, map_info.text_size,
488 0, SHARED_LIB_ALIAS,
489 map_info.text_region, 0, FALSE,
490 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
491 }
492 }
493 if (dlp != 0) {
494 ret = load_dylinker(dlp, map, thr_act,
495 depth, result, clean_regions);
496 }
497 }
498
499 if (kl_addr )
500 kfree(kl_addr, kl_size);
501
502 if ((ret == LOAD_SUCCESS) && (depth == 1) &&
503 (result->thread_count == 0))
504 ret = LOAD_FAILURE;
505 if (ret == LOAD_SUCCESS)
506 ubc_map(vp);
507
508 return(ret);
509 }
510
511 static
512 load_return_t
513 load_segment(
514 struct segment_command *scp,
515 void * pager,
516 unsigned long pager_offset,
517 unsigned long macho_size,
518 unsigned long end_of_file,
519 vm_map_t map,
520 load_result_t *result
521 )
522 {
523 kern_return_t ret;
524 vm_offset_t map_addr, map_offset;
525 vm_size_t map_size, seg_size, delta_size;
526 caddr_t tmp;
527 vm_prot_t initprot;
528 vm_prot_t maxprot;
529
530 /*
531 * Make sure what we get from the file is really ours (as specified
532 * by macho_size).
533 */
534 if (scp->fileoff + scp->filesize > macho_size)
535 return (LOAD_BADMACHO);
536
537 seg_size = round_page_32(scp->vmsize);
538 if (seg_size == 0)
539 return(KERN_SUCCESS);
540
541 /*
542 * Round sizes to page size.
543 */
544 map_size = round_page_32(scp->filesize);
545 map_addr = trunc_page_32(scp->vmaddr);
546
547 map_offset = pager_offset + scp->fileoff;
548
549 if (map_size > 0) {
550 initprot = (scp->initprot) & VM_PROT_ALL;
551 maxprot = (scp->maxprot) & VM_PROT_ALL;
552 /*
553 * Map a copy of the file into the address space.
554 */
555 ret = vm_map(map,
556 &map_addr, map_size, (vm_offset_t)0, FALSE,
557 pager, map_offset, TRUE,
558 initprot, maxprot,
559 VM_INHERIT_DEFAULT);
560 if (ret != KERN_SUCCESS)
561 return(LOAD_NOSPACE);
562
563 /*
564 * If the file didn't end on a page boundary,
565 * we need to zero the leftover.
566 */
567 delta_size = map_size - scp->filesize;
568 #if FIXME
569 if (delta_size > 0) {
570 vm_offset_t tmp;
571
572 ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE);
573 if (ret != KERN_SUCCESS)
574 return(LOAD_RESOURCE);
575
576 if (copyout(tmp, map_addr + scp->filesize,
577 delta_size)) {
578 (void) vm_deallocate(
579 kernel_map, tmp, delta_size);
580 return(LOAD_FAILURE);
581 }
582
583 (void) vm_deallocate(kernel_map, tmp, delta_size);
584 }
585 #endif /* FIXME */
586 }
587
588 /*
589 * If the virtual size of the segment is greater
590 * than the size from the file, we need to allocate
591 * zero fill memory for the rest.
592 */
593 delta_size = seg_size - map_size;
594 if (delta_size > 0) {
595 vm_offset_t tmp = map_addr + map_size;
596
597 ret = vm_allocate(map, &tmp, delta_size, FALSE);
598 if (ret != KERN_SUCCESS)
599 return(LOAD_NOSPACE);
600 }
601
602 /*
603 * Set protection values. (Note: ignore errors!)
604 */
605
606 if (scp->maxprot != VM_PROT_DEFAULT) {
607 (void) vm_protect(map,
608 map_addr, seg_size,
609 TRUE, scp->maxprot);
610 }
611 if (scp->initprot != VM_PROT_DEFAULT) {
612 (void) vm_protect(map,
613 map_addr, seg_size,
614 FALSE, scp->initprot);
615 }
616 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
617 result->mach_header = map_addr;
618 return(LOAD_SUCCESS);
619 }
620
621 static
622 load_return_t
623 load_unixthread(
624 struct thread_command *tcp,
625 thread_act_t thread,
626 load_result_t *result
627 )
628 {
629 load_return_t ret;
630 int customstack =0;
631
632 if (result->thread_count != 0)
633 return (LOAD_FAILURE);
634
635 ret = load_threadstack(thread,
636 (unsigned long *)(((vm_offset_t)tcp) +
637 sizeof(struct thread_command)),
638 tcp->cmdsize - sizeof(struct thread_command),
639 &result->user_stack,
640 &customstack);
641 if (ret != LOAD_SUCCESS)
642 return(ret);
643
644 if (customstack)
645 result->customstack = 1;
646 else
647 result->customstack = 0;
648 ret = load_threadentry(thread,
649 (unsigned long *)(((vm_offset_t)tcp) +
650 sizeof(struct thread_command)),
651 tcp->cmdsize - sizeof(struct thread_command),
652 &result->entry_point);
653 if (ret != LOAD_SUCCESS)
654 return(ret);
655
656 ret = load_threadstate(thread,
657 (unsigned long *)(((vm_offset_t)tcp) +
658 sizeof(struct thread_command)),
659 tcp->cmdsize - sizeof(struct thread_command));
660 if (ret != LOAD_SUCCESS)
661 return (ret);
662
663 result->unixproc = TRUE;
664 result->thread_count++;
665
666 return(LOAD_SUCCESS);
667 }
668
669 static
670 load_return_t
671 load_thread(
672 struct thread_command *tcp,
673 thread_act_t thread,
674 load_result_t *result
675 )
676 {
677 kern_return_t kret;
678 load_return_t lret;
679 task_t task;
680 int customstack=0;
681
682 task = get_threadtask(thread);
683
684 /* if count is 0; same as thr_act */
685 if (result->thread_count != 0) {
686 kret = thread_create(task, &thread);
687 if (kret != KERN_SUCCESS)
688 return(LOAD_RESOURCE);
689 act_deallocate(thread);
690 }
691
692 lret = load_threadstate(thread,
693 (unsigned long *)(((vm_offset_t)tcp) +
694 sizeof(struct thread_command)),
695 tcp->cmdsize - sizeof(struct thread_command));
696 if (lret != LOAD_SUCCESS)
697 return (lret);
698
699 if (result->thread_count == 0) {
700 lret = load_threadstack(thread,
701 (unsigned long *)(((vm_offset_t)tcp) +
702 sizeof(struct thread_command)),
703 tcp->cmdsize - sizeof(struct thread_command),
704 &result->user_stack,
705 &customstack);
706 if (customstack)
707 result->customstack = 1;
708 else
709 result->customstack = 0;
710
711 if (lret != LOAD_SUCCESS)
712 return(lret);
713
714 lret = load_threadentry(thread,
715 (unsigned long *)(((vm_offset_t)tcp) +
716 sizeof(struct thread_command)),
717 tcp->cmdsize - sizeof(struct thread_command),
718 &result->entry_point);
719 if (lret != LOAD_SUCCESS)
720 return(lret);
721 }
722 /*
723 * Resume thread now, note that this means that the thread
724 * commands should appear after all the load commands to
725 * be sure they don't reference anything not yet mapped.
726 */
727 else
728 thread_resume(thread);
729
730 result->thread_count++;
731
732 return(LOAD_SUCCESS);
733 }
734
735 static
736 load_return_t
737 load_threadstate(
738 thread_t thread,
739 unsigned long *ts,
740 unsigned long total_size
741 )
742 {
743 kern_return_t ret;
744 unsigned long size;
745 int flavor;
746
747 /*
748 * Set the thread state.
749 */
750
751 while (total_size > 0) {
752 flavor = *ts++;
753 size = *ts++;
754 total_size -= (size+2)*sizeof(unsigned long);
755 if (total_size < 0)
756 return(LOAD_BADMACHO);
757 ret = thread_setstatus(thread, flavor, ts, size);
758 if (ret != KERN_SUCCESS)
759 return(LOAD_FAILURE);
760 ts += size; /* ts is a (unsigned long *) */
761 }
762 return(LOAD_SUCCESS);
763 }
764
765 static
766 load_return_t
767 load_threadstack(
768 thread_t thread,
769 unsigned long *ts,
770 unsigned long total_size,
771 vm_offset_t *user_stack,
772 int *customstack
773 )
774 {
775 kern_return_t ret;
776 unsigned long size;
777 int flavor;
778
779 while (total_size > 0) {
780 flavor = *ts++;
781 size = *ts++;
782 total_size -= (size+2)*sizeof(unsigned long);
783 if (total_size < 0)
784 return(LOAD_BADMACHO);
785 *user_stack = USRSTACK;
786 ret = thread_userstack(thread, flavor, ts, size,
787 user_stack, customstack);
788 if (ret != KERN_SUCCESS)
789 return(LOAD_FAILURE);
790 ts += size; /* ts is a (unsigned long *) */
791 }
792 return(LOAD_SUCCESS);
793 }
794
795 static
796 load_return_t
797 load_threadentry(
798 thread_t thread,
799 unsigned long *ts,
800 unsigned long total_size,
801 vm_offset_t *entry_point
802 )
803 {
804 kern_return_t ret;
805 unsigned long size;
806 int flavor;
807
808 /*
809 * Set the thread state.
810 */
811 *entry_point = 0;
812 while (total_size > 0) {
813 flavor = *ts++;
814 size = *ts++;
815 total_size -= (size+2)*sizeof(unsigned long);
816 if (total_size < 0)
817 return(LOAD_BADMACHO);
818 ret = thread_entrypoint(thread, flavor, ts, size, entry_point);
819 if (ret != KERN_SUCCESS)
820 return(LOAD_FAILURE);
821 ts += size; /* ts is a (unsigned long *) */
822 }
823 return(LOAD_SUCCESS);
824 }
825
826
827 static
828 load_return_t
829 load_dylinker(
830 struct dylinker_command *lcp,
831 vm_map_t map,
832 thread_act_t thr_act,
833 int depth,
834 load_result_t *result,
835 boolean_t clean_regions
836 )
837 {
838 char *name;
839 char *p;
840 struct vnode *vp;
841 struct mach_header header;
842 unsigned long file_offset;
843 unsigned long macho_size;
844 vm_map_t copy_map;
845 load_result_t myresult;
846 kern_return_t ret;
847 vm_map_copy_t tmp;
848 vm_offset_t dyl_start, map_addr;
849 vm_size_t dyl_length;
850 extern pmap_t pmap_create(vm_size_t size); /* XXX */
851
852 name = (char *)lcp + lcp->name.offset;
853 /*
854 * Check for a proper null terminated string.
855 */
856 p = name;
857 do {
858 if (p >= (char *)lcp + lcp->cmdsize)
859 return(LOAD_BADMACHO);
860 } while (*p++);
861
862 ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
863 if (ret)
864 return (ret);
865
866 myresult = (load_result_t) { 0 };
867
868 /*
869 * Load the Mach-O.
870 */
871
872 copy_map = vm_map_create(pmap_create(macho_size),
873 get_map_min(map), get_map_max( map), TRUE);
874
875 ret = parse_machfile(vp, copy_map, thr_act, &header,
876 file_offset, macho_size,
877 depth, &myresult, clean_regions);
878
879 if (ret)
880 goto out;
881
882 if (get_map_nentries(copy_map) > 0) {
883
884 dyl_start = get_map_start(copy_map);
885 dyl_length = get_map_end(copy_map) - dyl_start;
886
887 map_addr = dyl_start;
888 ret = vm_allocate(map, &map_addr, dyl_length, FALSE);
889 if (ret != KERN_SUCCESS) {
890 ret = vm_allocate(map, &map_addr, dyl_length, TRUE);
891 }
892
893 if (ret != KERN_SUCCESS) {
894 ret = LOAD_NOSPACE;
895 goto out;
896
897 }
898 ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE,
899 &tmp);
900 if (ret != KERN_SUCCESS) {
901 (void) vm_map_remove(map,
902 map_addr,
903 map_addr + dyl_length,
904 VM_MAP_NO_FLAGS);
905 goto out;
906 }
907
908 ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE);
909 if (ret != KERN_SUCCESS) {
910 vm_map_copy_discard(tmp);
911 (void) vm_map_remove(map,
912 map_addr,
913 map_addr + dyl_length,
914 VM_MAP_NO_FLAGS);
915 goto out; }
916
917 if (map_addr != dyl_start)
918 myresult.entry_point += (map_addr - dyl_start);
919 } else
920 ret = LOAD_FAILURE;
921
922 if (ret == LOAD_SUCCESS) {
923 result->dynlinker = TRUE;
924 result->entry_point = myresult.entry_point;
925 ubc_map(vp);
926 }
927 out:
928 vm_map_deallocate(copy_map);
929
930 vrele(vp);
931 return (ret);
932
933 }
934
935 static
936 load_return_t
937 get_macho_vnode(
938 char *path,
939 struct mach_header *mach_header,
940 unsigned long *file_offset,
941 unsigned long *macho_size,
942 struct vnode **vpp
943 )
944 {
945 struct vnode *vp;
946 struct vattr attr, *atp;
947 struct nameidata nid, *ndp;
948 struct proc *p = current_proc(); /* XXXX */
949 boolean_t is_fat;
950 struct fat_arch fat_arch;
951 int error = LOAD_SUCCESS;
952 int resid;
953 union {
954 struct mach_header mach_header;
955 struct fat_header fat_header;
956 char pad[512];
957 } header;
958 off_t fsize = (off_t)0;
959 struct ucred *cred = p->p_ucred;
960 int err2;
961
962 ndp = &nid;
963 atp = &attr;
964
965 /* init the namei data to point the file user's program name */
966 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
967
968 if (error = namei(ndp)) {
969 if (error == ENOENT)
970 error = LOAD_ENOENT;
971 else
972 error = LOAD_FAILURE;
973 return(error);
974 }
975
976 vp = ndp->ni_vp;
977
978 /* check for regular file */
979 if (vp->v_type != VREG) {
980 error = LOAD_PROTECT;
981 goto bad1;
982 }
983
984 /* get attributes */
985 if (error = VOP_GETATTR(vp, &attr, cred, p)) {
986 error = LOAD_FAILURE;
987 goto bad1;
988 }
989
990 /* Check mount point */
991 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
992 error = LOAD_PROTECT;
993 goto bad1;
994 }
995
996 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
997 atp->va_mode &= ~(VSUID | VSGID);
998
999 /* check access. for root we have to see if any exec bit on */
1000 if (error = VOP_ACCESS(vp, VEXEC, cred, p)) {
1001 error = LOAD_PROTECT;
1002 goto bad1;
1003 }
1004 if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
1005 error = LOAD_PROTECT;
1006 goto bad1;
1007 }
1008
1009 /* hold the vnode for the IO */
1010 if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) {
1011 error = LOAD_ENOENT;
1012 goto bad1;
1013 }
1014
1015 /* try to open it */
1016 if (error = VOP_OPEN(vp, FREAD, cred, p)) {
1017 error = LOAD_PROTECT;
1018 ubc_rele(vp);
1019 goto bad1;
1020 }
1021
1022 if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
1023 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p)) {
1024 error = LOAD_IOERROR;
1025 goto bad2;
1026 }
1027
1028 if (header.mach_header.magic == MH_MAGIC)
1029 is_fat = FALSE;
1030 else if (header.fat_header.magic == FAT_MAGIC ||
1031 header.fat_header.magic == FAT_CIGAM)
1032 is_fat = TRUE;
1033 else {
1034 error = LOAD_BADMACHO;
1035 goto bad2;
1036 }
1037
1038 if (is_fat) {
1039 /* Look up our architecture in the fat file. */
1040 error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch);
1041 if (error != LOAD_SUCCESS)
1042 goto bad2;
1043
1044 /* Read the Mach-O header out of it */
1045 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header,
1046 sizeof(header.mach_header), fat_arch.offset,
1047 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
1048 if (error) {
1049 error = LOAD_IOERROR;
1050 goto bad2;
1051 }
1052
1053 /* Is this really a Mach-O? */
1054 if (header.mach_header.magic != MH_MAGIC) {
1055 error = LOAD_BADMACHO;
1056 goto bad2;
1057 }
1058
1059 *file_offset = fat_arch.offset;
1060 *macho_size = fsize = fat_arch.size;
1061 } else {
1062
1063 *file_offset = 0;
1064 *macho_size = fsize = attr.va_size;
1065 }
1066
1067 *mach_header = header.mach_header;
1068 *vpp = vp;
1069 if (UBCISVALID(vp))
1070 ubc_setsize(vp, fsize); /* XXX why? */
1071
1072 VOP_UNLOCK(vp, 0, p);
1073 ubc_rele(vp);
1074 return (error);
1075
1076 bad2:
1077 VOP_UNLOCK(vp, 0, p);
1078 err2 = VOP_CLOSE(vp, FREAD, cred, p);
1079 ubc_rele(vp);
1080 vrele(vp);
1081 return (error);
1082
1083 bad1:
1084 vput(vp);
1085 return(error);
1086 }