]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
xnu-344.49.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (C) 1988, 1989, NeXT, Inc.
27 *
28 * File: kern/mach_loader.c
29 * Author: Avadis Tevanian, Jr.
30 *
31 * Mach object file loader (kernel version, for now).
32 *
33 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
34 * Started.
35 */
36 #include <sys/param.h>
37 #include <sys/vnode.h>
38 #include <sys/uio.h>
39 #include <sys/namei.h>
40 #include <sys/proc.h>
41 #include <sys/stat.h>
42 #include <sys/malloc.h>
43 #include <sys/mount.h>
44 #include <sys/fcntl.h>
45 #include <sys/ubc.h>
46
47 #include <mach/mach_types.h>
48
49 #include <kern/mach_loader.h>
50
51 #include <mach-o/fat.h>
52 #include <mach-o/loader.h>
53
54 #include <kern/cpu_number.h>
55
56 #include <vm/vm_map.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_pager.h>
59 #include <vm/vnode_pager.h>
60 #include <mach/vm_statistics.h>
61
62 #include <mach/shared_memory_server.h>
63 #include <vm/vm_shared_memory_server.h>
64
65 #include <machine/vmparam.h>
66
67 /*
68 * Prototypes of static functions.
69 */
70 static
71 load_return_t
72 parse_machfile(
73 struct vnode *vp,
74 vm_map_t map,
75 thread_act_t thr_act,
76 struct mach_header *header,
77 unsigned long file_offset,
78 unsigned long macho_size,
79 int depth,
80 load_result_t *result
81 ),
82 load_segment(
83 struct segment_command *scp,
84 void * pager,
85 unsigned long pager_offset,
86 unsigned long macho_size,
87 unsigned long end_of_file,
88 vm_map_t map,
89 load_result_t *result
90 ),
91 load_unixthread(
92 struct thread_command *tcp,
93 thread_act_t thr_act,
94 load_result_t *result
95 ),
96 load_thread(
97 struct thread_command *tcp,
98 thread_act_t thr_act,
99 load_result_t *result
100 ),
101 load_threadstate(
102 thread_t thread,
103 unsigned long *ts,
104 unsigned long total_size
105 ),
106 load_threadstack(
107 thread_t thread,
108 unsigned long *ts,
109 unsigned long total_size,
110 vm_offset_t *user_stack,
111 int *customstack
112 ),
113 load_threadentry(
114 thread_t thread,
115 unsigned long *ts,
116 unsigned long total_size,
117 vm_offset_t *entry_point
118 ),
119 load_dylinker(
120 struct dylinker_command *lcp,
121 vm_map_t map,
122 thread_act_t thr_act,
123 int depth,
124 load_result_t *result
125 ),
126 get_macho_vnode(
127 char *path,
128 struct mach_header *mach_header,
129 unsigned long *file_offset,
130 unsigned long *macho_size,
131 struct vnode **vpp
132 );
133
134 load_return_t
135 load_machfile(
136 struct vnode *vp,
137 struct mach_header *header,
138 unsigned long file_offset,
139 unsigned long macho_size,
140 load_result_t *result,
141 thread_act_t thr_act,
142 vm_map_t new_map
143 )
144 {
145 pmap_t pmap;
146 vm_map_t map;
147 vm_map_t old_map;
148 load_result_t myresult;
149 kern_return_t kret;
150 load_return_t lret;
151 boolean_t create_map = TRUE;
152
153 if (new_map != VM_MAP_NULL) {
154 create_map = FALSE;
155 }
156
157 if (create_map) {
158 old_map = current_map();
159 #ifdef i386
160 pmap = get_task_pmap(current_task());
161 pmap_reference(pmap);
162 #else
163 pmap = pmap_create((vm_size_t) 0);
164 #endif
165 map = vm_map_create(pmap,
166 get_map_min(old_map),
167 get_map_max(old_map),
168 TRUE); /**** FIXME ****/
169 } else
170 map = new_map;
171
172 if (!result)
173 result = &myresult;
174
175 *result = (load_result_t) { 0 };
176
177 lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size,
178 0, result);
179
180 if (lret != LOAD_SUCCESS) {
181 if (create_map)
182 vm_map_deallocate(map); /* will lose pmap reference too */
183 return(lret);
184 }
185 /*
186 * Commit to new map. First make sure that the current
187 * users of the task get done with it, and that we clean
188 * up the old contents of IPC and memory. The task is
189 * guaranteed to be single threaded upon return (us).
190 *
191 * Swap the new map for the old at the task level and at
192 * our activation. The latter consumes our new map reference
193 * but each leaves us responsible for the old_map reference.
194 * That lets us get off the pmap associated with it, and
195 * then we can release it.
196 */
197 if (create_map) {
198 task_halt(current_task());
199
200 old_map = swap_task_map(current_task(), map);
201 vm_map_deallocate(old_map);
202
203 old_map = swap_act_map(current_act(), map);
204
205 #ifndef i386
206 pmap_switch(pmap); /* Make sure we are using the new pmap */
207 #endif
208 vm_map_deallocate(old_map);
209 }
210 return(LOAD_SUCCESS);
211 }
212
213 int dylink_test = 1;
214 extern vm_offset_t system_shared_region;
215
216 static
217 load_return_t
218 parse_machfile(
219 struct vnode *vp,
220 vm_map_t map,
221 thread_act_t thr_act,
222 struct mach_header *header,
223 unsigned long file_offset,
224 unsigned long macho_size,
225 int depth,
226 load_result_t *result
227 )
228 {
229 struct machine_slot *ms;
230 int ncmds;
231 struct load_command *lcp, *next;
232 struct dylinker_command *dlp = 0;
233 void * pager;
234 load_return_t ret;
235 vm_offset_t addr, kl_addr;
236 vm_size_t size,kl_size;
237 int offset;
238 int pass;
239 struct proc *p = current_proc(); /* XXXX */
240 int error;
241 int resid=0;
242 task_t task;
243
244 /*
245 * Break infinite recursion
246 */
247 if (depth > 6)
248 return(LOAD_FAILURE);
249
250 task = (task_t)get_threadtask(thr_act);
251
252 depth++;
253
254 /*
255 * Check to see if right machine type.
256 */
257 ms = &machine_slot[cpu_number()];
258 if ((header->cputype != ms->cpu_type) ||
259 !check_cpu_subtype(header->cpusubtype))
260 return(LOAD_BADARCH);
261
262 switch (header->filetype) {
263
264 case MH_OBJECT:
265 case MH_EXECUTE:
266 case MH_PRELOAD:
267 if (depth != 1)
268 return (LOAD_FAILURE);
269 break;
270
271 case MH_FVMLIB:
272 case MH_DYLIB:
273 if (depth == 1)
274 return (LOAD_FAILURE);
275 break;
276
277 case MH_DYLINKER:
278 if (depth != 2)
279 return (LOAD_FAILURE);
280 break;
281
282 default:
283 return (LOAD_FAILURE);
284 }
285
286 /*
287 * Get the pager for the file.
288 */
289 UBCINFOCHECK("parse_machfile", vp);
290 pager = (void *) ubc_getpager(vp);
291
292 /*
293 * Map portion that must be accessible directly into
294 * kernel's map.
295 */
296 if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size)
297 return(LOAD_BADMACHO);
298
299 /*
300 * Round size of Mach-O commands up to page boundry.
301 */
302 size = round_page(sizeof (struct mach_header) + header->sizeofcmds);
303 if (size <= 0)
304 return(LOAD_BADMACHO);
305
306 /*
307 * Map the load commands into kernel memory.
308 */
309 addr = 0;
310 kl_size = size;
311 kl_addr = kalloc(size);
312 addr = kl_addr;
313 if (addr == NULL)
314 return(LOAD_NOSPACE);
315
316 if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
317 UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) {
318 if (kl_addr )
319 kfree(kl_addr, kl_size);
320 return(EIO);
321 }
322 /* ubc_map(vp); */ /* NOT HERE */
323
324 /*
325 * Scan through the commands, processing each one as necessary.
326 */
327 for (pass = 1; pass <= 2; pass++) {
328 offset = sizeof(struct mach_header);
329 ncmds = header->ncmds;
330 while (ncmds--) {
331 /*
332 * Get a pointer to the command.
333 */
334 lcp = (struct load_command *)(addr + offset);
335 offset += lcp->cmdsize;
336
337 /*
338 * Check for valid lcp pointer by checking
339 * next offset.
340 */
341 if (offset > header->sizeofcmds
342 + sizeof(struct mach_header)) {
343 if (kl_addr )
344 kfree(kl_addr, kl_size);
345 return(LOAD_BADMACHO);
346 }
347
348 /*
349 * Check for valid command.
350 */
351 switch(lcp->cmd) {
352 case LC_SEGMENT:
353 if (pass != 1)
354 break;
355 ret = load_segment(
356 (struct segment_command *) lcp,
357 pager, file_offset,
358 macho_size,
359 (unsigned long)ubc_getsize(vp),
360 map,
361 result);
362 break;
363 case LC_THREAD:
364 if (pass != 2)
365 break;
366 ret = load_thread((struct thread_command *)lcp, thr_act,
367 result);
368 break;
369 case LC_UNIXTHREAD:
370 if (pass != 2)
371 break;
372 ret = load_unixthread(
373 (struct thread_command *) lcp, thr_act,
374 result);
375 break;
376 case LC_LOAD_DYLINKER:
377 if (pass != 2)
378 break;
379 if (depth == 1 || dlp == 0)
380 dlp = (struct dylinker_command *)lcp;
381 else
382 ret = LOAD_FAILURE;
383 break;
384 default:
385 ret = KERN_SUCCESS;/* ignore other stuff */
386 }
387 if (ret != LOAD_SUCCESS)
388 break;
389 }
390 if (ret != LOAD_SUCCESS)
391 break;
392 }
393 if (ret == LOAD_SUCCESS && dlp != 0) {
394 vm_offset_t addr;
395 shared_region_mapping_t shared_region;
396 struct shared_region_task_mappings map_info;
397 shared_region_mapping_t next;
398
399 RedoLookup:
400 vm_get_shared_region(task, &shared_region);
401 map_info.self = (vm_offset_t)shared_region;
402 shared_region_mapping_info(shared_region,
403 &(map_info.text_region),
404 &(map_info.text_size),
405 &(map_info.data_region),
406 &(map_info.data_size),
407 &(map_info.region_mappings),
408 &(map_info.client_base),
409 &(map_info.alternate_base),
410 &(map_info.alternate_next),
411 &(map_info.flags), &next);
412
413 if((map_info.self != (vm_offset_t)system_shared_region) &&
414 (map_info.flags & SHARED_REGION_SYSTEM)) {
415 shared_region_mapping_ref(system_shared_region);
416 vm_set_shared_region(task, system_shared_region);
417 shared_region_mapping_dealloc(
418 (shared_region_mapping_t)map_info.self);
419 goto RedoLookup;
420 }
421
422
423 if (dylink_test) {
424 p->p_flag |= P_NOSHLIB; /* no shlibs in use */
425 addr = map_info.client_base;
426 vm_map(map, &addr, map_info.text_size, 0,
427 (VM_MEMORY_SHARED_PMAP << 24)
428 | SHARED_LIB_ALIAS,
429 map_info.text_region, 0, FALSE,
430 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
431 addr = map_info.client_base + map_info.text_size;
432 vm_map(map, &addr, map_info.data_size,
433 0, SHARED_LIB_ALIAS,
434 map_info.data_region, 0, TRUE,
435 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
436 }
437 ret = load_dylinker(dlp, map, thr_act, depth, result);
438 }
439
440 if (kl_addr )
441 kfree(kl_addr, kl_size);
442
443 if ((ret == LOAD_SUCCESS) && (depth == 1) &&
444 (result->thread_count == 0))
445 ret = LOAD_FAILURE;
446 if (ret == LOAD_SUCCESS)
447 ubc_map(vp);
448
449 return(ret);
450 }
451
452 static
453 load_return_t
454 load_segment(
455 struct segment_command *scp,
456 void * pager,
457 unsigned long pager_offset,
458 unsigned long macho_size,
459 unsigned long end_of_file,
460 vm_map_t map,
461 load_result_t *result
462 )
463 {
464 kern_return_t ret;
465 vm_offset_t map_addr, map_offset;
466 vm_size_t map_size, seg_size, delta_size;
467 caddr_t tmp;
468 vm_prot_t initprot;
469 vm_prot_t maxprot;
470 #if 1
471 extern int print_map_addr;
472 #endif /* 1 */
473
474 /*
475 * Make sure what we get from the file is really ours (as specified
476 * by macho_size).
477 */
478 if (scp->fileoff + scp->filesize > macho_size)
479 return (LOAD_BADMACHO);
480
481 seg_size = round_page(scp->vmsize);
482 if (seg_size == 0)
483 return(KERN_SUCCESS);
484
485 /*
486 * Round sizes to page size.
487 */
488 map_size = round_page(scp->filesize);
489 map_addr = trunc_page(scp->vmaddr);
490
491 map_offset = pager_offset + scp->fileoff;
492
493 if (map_size > 0) {
494 initprot = (scp->initprot) & VM_PROT_ALL;
495 maxprot = (scp->maxprot) & VM_PROT_ALL;
496 /*
497 * Map a copy of the file into the address space.
498 */
499 ret = vm_map(map,
500 &map_addr, map_size, (vm_offset_t)0, FALSE,
501 pager, map_offset, TRUE,
502 initprot, maxprot,
503 VM_INHERIT_DEFAULT);
504 if (ret != KERN_SUCCESS)
505 return(LOAD_NOSPACE);
506
507 #if 1
508 if (print_map_addr)
509 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size);
510 #endif /* 1 */
511 /*
512 * If the file didn't end on a page boundary,
513 * we need to zero the leftover.
514 */
515 delta_size = map_size - scp->filesize;
516 #if FIXME
517 if (delta_size > 0) {
518 vm_offset_t tmp;
519
520 ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE);
521 if (ret != KERN_SUCCESS)
522 return(LOAD_RESOURCE);
523
524 if (copyout(tmp, map_addr + scp->filesize,
525 delta_size)) {
526 (void) vm_deallocate(
527 kernel_map, tmp, delta_size);
528 return(LOAD_FAILURE);
529 }
530
531 (void) vm_deallocate(kernel_map, tmp, delta_size);
532 }
533 #endif /* FIXME */
534 }
535
536 /*
537 * If the virtual size of the segment is greater
538 * than the size from the file, we need to allocate
539 * zero fill memory for the rest.
540 */
541 delta_size = seg_size - map_size;
542 if (delta_size > 0) {
543 vm_offset_t tmp = map_addr + map_size;
544
545 ret = vm_allocate(map, &tmp, delta_size, FALSE);
546 if (ret != KERN_SUCCESS)
547 return(LOAD_NOSPACE);
548 }
549
550 /*
551 * Set protection values. (Note: ignore errors!)
552 */
553
554 if (scp->maxprot != VM_PROT_DEFAULT) {
555 (void) vm_protect(map,
556 map_addr, seg_size,
557 TRUE, scp->maxprot);
558 }
559 if (scp->initprot != VM_PROT_DEFAULT) {
560 (void) vm_protect(map,
561 map_addr, seg_size,
562 FALSE, scp->initprot);
563 }
564 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
565 result->mach_header = map_addr;
566 return(LOAD_SUCCESS);
567 }
568
569 static
570 load_return_t
571 load_unixthread(
572 struct thread_command *tcp,
573 thread_act_t thr_act,
574 load_result_t *result
575 )
576 {
577 thread_t thread = current_thread();
578 load_return_t ret;
579 int customstack =0;
580
581 if (result->thread_count != 0)
582 return (LOAD_FAILURE);
583
584 thread = getshuttle_thread(thr_act);
585 ret = load_threadstack(thread,
586 (unsigned long *)(((vm_offset_t)tcp) +
587 sizeof(struct thread_command)),
588 tcp->cmdsize - sizeof(struct thread_command),
589 &result->user_stack,
590 &customstack);
591 if (ret != LOAD_SUCCESS)
592 return(ret);
593
594 if (customstack)
595 result->customstack = 1;
596 else
597 result->customstack = 0;
598 ret = load_threadentry(thread,
599 (unsigned long *)(((vm_offset_t)tcp) +
600 sizeof(struct thread_command)),
601 tcp->cmdsize - sizeof(struct thread_command),
602 &result->entry_point);
603 if (ret != LOAD_SUCCESS)
604 return(ret);
605
606 ret = load_threadstate(thread,
607 (unsigned long *)(((vm_offset_t)tcp) +
608 sizeof(struct thread_command)),
609 tcp->cmdsize - sizeof(struct thread_command));
610 if (ret != LOAD_SUCCESS)
611 return (ret);
612
613 result->unixproc = TRUE;
614 result->thread_count++;
615
616 return(LOAD_SUCCESS);
617 }
618
619 static
620 load_return_t
621 load_thread(
622 struct thread_command *tcp,
623 thread_act_t thr_act,
624 load_result_t *result
625 )
626 {
627 thread_t thread;
628 kern_return_t kret;
629 load_return_t lret;
630 task_t task;
631 int customstack=0;
632
633 task = get_threadtask(thr_act);
634 thread = getshuttle_thread(thr_act);
635
636 /* if count is 0; same as thr_act */
637 if (result->thread_count != 0) {
638 kret = thread_create(task, &thread);
639 if (kret != KERN_SUCCESS)
640 return(LOAD_RESOURCE);
641 thread_deallocate(thread);
642 }
643
644 lret = load_threadstate(thread,
645 (unsigned long *)(((vm_offset_t)tcp) +
646 sizeof(struct thread_command)),
647 tcp->cmdsize - sizeof(struct thread_command));
648 if (lret != LOAD_SUCCESS)
649 return (lret);
650
651 if (result->thread_count == 0) {
652 lret = load_threadstack(thread,
653 (unsigned long *)(((vm_offset_t)tcp) +
654 sizeof(struct thread_command)),
655 tcp->cmdsize - sizeof(struct thread_command),
656 &result->user_stack,
657 &customstack);
658 if (customstack)
659 result->customstack = 1;
660 else
661 result->customstack = 0;
662
663 if (lret != LOAD_SUCCESS)
664 return(lret);
665
666 lret = load_threadentry(thread,
667 (unsigned long *)(((vm_offset_t)tcp) +
668 sizeof(struct thread_command)),
669 tcp->cmdsize - sizeof(struct thread_command),
670 &result->entry_point);
671 if (lret != LOAD_SUCCESS)
672 return(lret);
673 }
674 /*
675 * Resume thread now, note that this means that the thread
676 * commands should appear after all the load commands to
677 * be sure they don't reference anything not yet mapped.
678 */
679 else
680 thread_resume(thread);
681
682 result->thread_count++;
683
684 return(LOAD_SUCCESS);
685 }
686
687 static
688 load_return_t
689 load_threadstate(
690 thread_t thread,
691 unsigned long *ts,
692 unsigned long total_size
693 )
694 {
695 kern_return_t ret;
696 unsigned long size;
697 int flavor;
698
699 /*
700 * Set the thread state.
701 */
702
703 while (total_size > 0) {
704 flavor = *ts++;
705 size = *ts++;
706 total_size -= (size+2)*sizeof(unsigned long);
707 if (total_size < 0)
708 return(LOAD_BADMACHO);
709 ret = thread_setstatus(getact_thread(thread), flavor, ts, size);
710 if (ret != KERN_SUCCESS)
711 return(LOAD_FAILURE);
712 ts += size; /* ts is a (unsigned long *) */
713 }
714 return(LOAD_SUCCESS);
715 }
716
717 static
718 load_return_t
719 load_threadstack(
720 thread_t thread,
721 unsigned long *ts,
722 unsigned long total_size,
723 vm_offset_t *user_stack,
724 int *customstack
725 )
726 {
727 kern_return_t ret;
728 unsigned long size;
729 int flavor;
730
731 while (total_size > 0) {
732 flavor = *ts++;
733 size = *ts++;
734 total_size -= (size+2)*sizeof(unsigned long);
735 if (total_size < 0)
736 return(LOAD_BADMACHO);
737 *user_stack = USRSTACK;
738 ret = thread_userstack(thread, flavor, ts, size,
739 user_stack, customstack);
740 if (ret != KERN_SUCCESS)
741 return(LOAD_FAILURE);
742 ts += size; /* ts is a (unsigned long *) */
743 }
744 return(LOAD_SUCCESS);
745 }
746
747 static
748 load_return_t
749 load_threadentry(
750 thread_t thread,
751 unsigned long *ts,
752 unsigned long total_size,
753 vm_offset_t *entry_point
754 )
755 {
756 kern_return_t ret;
757 unsigned long size;
758 int flavor;
759
760 /*
761 * Set the thread state.
762 */
763 *entry_point = 0;
764 while (total_size > 0) {
765 flavor = *ts++;
766 size = *ts++;
767 total_size -= (size+2)*sizeof(unsigned long);
768 if (total_size < 0)
769 return(LOAD_BADMACHO);
770 ret = thread_entrypoint(thread, flavor, ts, size, entry_point);
771 if (ret != KERN_SUCCESS)
772 return(LOAD_FAILURE);
773 ts += size; /* ts is a (unsigned long *) */
774 }
775 return(LOAD_SUCCESS);
776 }
777
778
779 static
780 load_return_t
781 load_dylinker(
782 struct dylinker_command *lcp,
783 vm_map_t map,
784 thread_act_t thr_act,
785 int depth,
786 load_result_t *result
787 )
788 {
789 char *name;
790 char *p;
791 struct vnode *vp;
792 struct mach_header header;
793 unsigned long file_offset;
794 unsigned long macho_size;
795 vm_map_t copy_map;
796 load_result_t myresult;
797 kern_return_t ret;
798 vm_map_copy_t tmp;
799 vm_offset_t dyl_start, map_addr;
800 vm_size_t dyl_length;
801
802 name = (char *)lcp + lcp->name.offset;
803 /*
804 * Check for a proper null terminated string.
805 */
806 p = name;
807 do {
808 if (p >= (char *)lcp + lcp->cmdsize)
809 return(LOAD_BADMACHO);
810 } while (*p++);
811
812 ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
813 if (ret)
814 return (ret);
815
816 myresult = (load_result_t) { 0 };
817
818 /*
819 * Load the Mach-O.
820 */
821
822 copy_map = vm_map_create(pmap_create(macho_size),
823 get_map_min(map), get_map_max( map), TRUE);
824
825 ret = parse_machfile(vp, copy_map, thr_act, &header,
826 file_offset, macho_size,
827 depth, &myresult);
828
829 if (ret)
830 goto out;
831
832 if (get_map_nentries(copy_map) > 0) {
833
834 dyl_start = get_map_start(copy_map);
835 dyl_length = get_map_end(copy_map) - dyl_start;
836
837 map_addr = dyl_start;
838 ret = vm_allocate(map, &map_addr, dyl_length, FALSE);
839 if (ret != KERN_SUCCESS) {
840 ret = vm_allocate(map, &map_addr, dyl_length, TRUE);
841 }
842
843 if (ret != KERN_SUCCESS) {
844 ret = LOAD_NOSPACE;
845 goto out;
846
847 }
848 ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE,
849 &tmp);
850 if (ret != KERN_SUCCESS) {
851 (void) vm_map_remove(map,
852 map_addr,
853 map_addr + dyl_length,
854 VM_MAP_NO_FLAGS);
855 goto out;
856 }
857
858 ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE);
859 if (ret != KERN_SUCCESS) {
860 vm_map_copy_discard(tmp);
861 (void) vm_map_remove(map,
862 map_addr,
863 map_addr + dyl_length,
864 VM_MAP_NO_FLAGS);
865 goto out; }
866
867 if (map_addr != dyl_start)
868 myresult.entry_point += (map_addr - dyl_start);
869 } else
870 ret = LOAD_FAILURE;
871
872 if (ret == LOAD_SUCCESS) {
873 result->dynlinker = TRUE;
874 result->entry_point = myresult.entry_point;
875 ubc_map(vp);
876 }
877 out:
878 vm_map_deallocate(copy_map);
879
880 vrele(vp);
881 return (ret);
882
883 }
884
885 static
886 load_return_t
887 get_macho_vnode(
888 char *path,
889 struct mach_header *mach_header,
890 unsigned long *file_offset,
891 unsigned long *macho_size,
892 struct vnode **vpp
893 )
894 {
895 struct vnode *vp;
896 struct vattr attr, *atp;
897 struct nameidata nid, *ndp;
898 struct proc *p = current_proc(); /* XXXX */
899 boolean_t is_fat;
900 struct fat_arch fat_arch;
901 int error = KERN_SUCCESS;
902 int resid;
903 union {
904 struct mach_header mach_header;
905 struct fat_header fat_header;
906 char pad[512];
907 } header;
908 off_t fsize = (off_t)0;
909 struct ucred *cred = p->p_ucred;
910
911 ndp = &nid;
912 atp = &attr;
913
914 /* init the namei data to point the file user's program name */
915 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
916
917 if (error = namei(ndp))
918 return(error);
919
920 vp = ndp->ni_vp;
921
922 /* check for regular file */
923 if (vp->v_type != VREG) {
924 error = EACCES;
925 goto bad1;
926 }
927
928 /* get attributes */
929 if (error = VOP_GETATTR(vp, &attr, cred, p))
930 goto bad1;
931
932 /* Check mount point */
933 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
934 error = EACCES;
935 goto bad1;
936 }
937
938 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
939 atp->va_mode &= ~(VSUID | VSGID);
940
941 /* check access. for root we have to see if any exec bit on */
942 if (error = VOP_ACCESS(vp, VEXEC, cred, p))
943 goto bad1;
944 if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
945 error = EACCES;
946 goto bad1;
947 }
948
949 /* hold the vnode for the IO */
950 if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) {
951 error = ENOENT;
952 goto bad1;
953 }
954
955 /* try to open it */
956 if (error = VOP_OPEN(vp, FREAD, cred, p)) {
957 ubc_rele(vp);
958 goto bad1;
959 }
960
961 if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
962 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p))
963 goto bad2;
964
965 if (header.mach_header.magic == MH_MAGIC)
966 is_fat = FALSE;
967 else if (header.fat_header.magic == FAT_MAGIC ||
968 header.fat_header.magic == FAT_CIGAM)
969 is_fat = TRUE;
970 else {
971 error = LOAD_BADMACHO;
972 goto bad2;
973 }
974
975 if (is_fat) {
976 /* Look up our architecture in the fat file. */
977 error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch);
978 if (error != LOAD_SUCCESS)
979 goto bad2;
980
981 /* Read the Mach-O header out of it */
982 error = vn_rdwr(UIO_READ, vp, &header.mach_header,
983 sizeof(header.mach_header), fat_arch.offset,
984 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
985 if (error) {
986 error = LOAD_FAILURE;
987 goto bad2;
988 }
989
990 /* Is this really a Mach-O? */
991 if (header.mach_header.magic != MH_MAGIC) {
992 error = LOAD_BADMACHO;
993 goto bad2;
994 }
995
996 *file_offset = fat_arch.offset;
997 *macho_size = fsize = fat_arch.size;
998 } else {
999
1000 *file_offset = 0;
1001 *macho_size = fsize = attr.va_size;
1002 }
1003
1004 *mach_header = header.mach_header;
1005 *vpp = vp;
1006 if (UBCISVALID(vp))
1007 ubc_setsize(vp, fsize); /* XXX why? */
1008
1009 VOP_UNLOCK(vp, 0, p);
1010 ubc_rele(vp);
1011 return (error);
1012
1013 bad2:
1014 VOP_UNLOCK(vp, 0, p);
1015 error = VOP_CLOSE(vp, FREAD, cred, p);
1016 ubc_rele(vp);
1017 vrele(vp);
1018 return (error);
1019
1020 bad1:
1021 vput(vp);
1022 return(error);
1023 }