]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/mach_loader.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (C) 1988, 1989, NeXT, Inc.
24 *
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
27 *
28 * Mach object file loader (kernel version, for now).
29 *
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
31 * Started.
32 */
33 #include <sys/param.h>
34 #include <sys/vnode.h>
35 #include <sys/uio.h>
36 #include <sys/namei.h>
37 #include <sys/proc.h>
38 #include <sys/stat.h>
39 #include <sys/malloc.h>
40 #include <sys/mount.h>
41 #include <sys/fcntl.h>
42 #include <sys/ubc.h>
43
44 #include <mach/mach_types.h>
45
46 #include <kern/mach_loader.h>
47
48 #include <mach-o/fat.h>
49 #include <mach-o/loader.h>
50
51 #include <kern/cpu_number.h>
52
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_pager.h>
56 #include <vm/vnode_pager.h>
57 #include <mach/vm_statistics.h>
58
59 #include <mach/shared_memory_server.h>
60 #include <vm/vm_shared_memory_server.h>
61
62 #include <machine/vmparam.h>
63
64 /*
65 * Prototypes of static functions.
66 */
67 static
68 load_return_t
69 parse_machfile(
70 struct vnode *vp,
71 vm_map_t map,
72 thread_act_t thr_act,
73 struct mach_header *header,
74 unsigned long file_offset,
75 unsigned long macho_size,
76 int depth,
77 load_result_t *result
78 ),
79 load_segment(
80 struct segment_command *scp,
81 void * pager,
82 unsigned long pager_offset,
83 unsigned long macho_size,
84 unsigned long end_of_file,
85 vm_map_t map,
86 load_result_t *result
87 ),
88 load_unixthread(
89 struct thread_command *tcp,
90 thread_act_t thr_act,
91 load_result_t *result
92 ),
93 load_thread(
94 struct thread_command *tcp,
95 thread_act_t thr_act,
96 load_result_t *result
97 ),
98 load_threadstate(
99 thread_t thread,
100 unsigned long *ts,
101 unsigned long total_size
102 ),
103 load_threadstack(
104 thread_t thread,
105 unsigned long *ts,
106 unsigned long total_size,
107 vm_offset_t *user_stack,
108 int *customstack
109 ),
110 load_threadentry(
111 thread_t thread,
112 unsigned long *ts,
113 unsigned long total_size,
114 vm_offset_t *entry_point
115 ),
116 load_dylinker(
117 struct dylinker_command *lcp,
118 vm_map_t map,
119 thread_act_t thr_act,
120 int depth,
121 load_result_t *result
122 ),
123 get_macho_vnode(
124 char *path,
125 struct mach_header *mach_header,
126 unsigned long *file_offset,
127 unsigned long *macho_size,
128 struct vnode **vpp
129 );
130
131 load_return_t
132 load_machfile(
133 struct vnode *vp,
134 struct mach_header *header,
135 unsigned long file_offset,
136 unsigned long macho_size,
137 load_result_t *result,
138 thread_act_t thr_act,
139 vm_map_t new_map
140 )
141 {
142 pmap_t pmap;
143 vm_map_t map;
144 vm_map_t old_map;
145 load_result_t myresult;
146 kern_return_t kret;
147 load_return_t lret;
148 boolean_t create_map = TRUE;
149
150 if (new_map != VM_MAP_NULL) {
151 create_map = FALSE;
152 }
153
154 if (create_map) {
155 old_map = current_map();
156 #ifdef i386
157 pmap = get_task_pmap(current_task());
158 pmap_reference(pmap);
159 #else
160 pmap = pmap_create((vm_size_t) 0);
161 #endif
162 map = vm_map_create(pmap,
163 get_map_min(old_map),
164 get_map_max(old_map),
165 TRUE); /**** FIXME ****/
166 } else
167 map = new_map;
168
169 if (!result)
170 result = &myresult;
171
172 *result = (load_result_t) { 0 };
173
174 lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size,
175 0, result);
176
177 if (lret != LOAD_SUCCESS) {
178 if (create_map)
179 vm_map_deallocate(map); /* will lose pmap reference too */
180 return(lret);
181 }
182 /*
183 * Commit to new map. First make sure that the current
184 * users of the task get done with it, and that we clean
185 * up the old contents of IPC and memory. The task is
186 * guaranteed to be single threaded upon return (us).
187 *
188 * Swap the new map for the old at the task level and at
189 * our activation. The latter consumes our new map reference
190 * but each leaves us responsible for the old_map reference.
191 * That lets us get off the pmap associated with it, and
192 * then we can release it.
193 */
194 if (create_map) {
195 task_halt(current_task());
196
197 old_map = swap_task_map(current_task(), map);
198 vm_map_deallocate(old_map);
199
200 old_map = swap_act_map(current_act(), map);
201
202 #ifndef i386
203 pmap_switch(pmap); /* Make sure we are using the new pmap */
204 #endif
205 vm_map_deallocate(old_map);
206 }
207 return(LOAD_SUCCESS);
208 }
209
210 int dylink_test = 1;
211 extern vm_offset_t system_shared_region;
212
213 static
214 load_return_t
215 parse_machfile(
216 struct vnode *vp,
217 vm_map_t map,
218 thread_act_t thr_act,
219 struct mach_header *header,
220 unsigned long file_offset,
221 unsigned long macho_size,
222 int depth,
223 load_result_t *result
224 )
225 {
226 struct machine_slot *ms;
227 int ncmds;
228 struct load_command *lcp, *next;
229 struct dylinker_command *dlp = 0;
230 void * pager;
231 load_return_t ret;
232 vm_offset_t addr, kl_addr;
233 vm_size_t size,kl_size;
234 int offset;
235 int pass;
236 struct proc *p = current_proc(); /* XXXX */
237 int error;
238 int resid=0;
239 task_t task;
240
241 /*
242 * Break infinite recursion
243 */
244 if (depth > 6)
245 return(LOAD_FAILURE);
246
247 task = (task_t)get_threadtask(thr_act);
248
249 depth++;
250
251 /*
252 * Check to see if right machine type.
253 */
254 ms = &machine_slot[cpu_number()];
255 if ((header->cputype != ms->cpu_type) ||
256 !check_cpu_subtype(header->cpusubtype))
257 return(LOAD_BADARCH);
258
259 switch (header->filetype) {
260
261 case MH_OBJECT:
262 case MH_EXECUTE:
263 case MH_PRELOAD:
264 if (depth != 1)
265 return (LOAD_FAILURE);
266 break;
267
268 case MH_FVMLIB:
269 case MH_DYLIB:
270 if (depth == 1)
271 return (LOAD_FAILURE);
272 break;
273
274 case MH_DYLINKER:
275 if (depth != 2)
276 return (LOAD_FAILURE);
277 break;
278
279 default:
280 return (LOAD_FAILURE);
281 }
282
283 /*
284 * Get the pager for the file.
285 */
286 UBCINFOCHECK("parse_machfile", vp);
287 pager = (void *) ubc_getpager(vp);
288
289 /*
290 * Map portion that must be accessible directly into
291 * kernel's map.
292 */
293 if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size)
294 return(LOAD_BADMACHO);
295
296 /*
297 * Round size of Mach-O commands up to page boundry.
298 */
299 size = round_page(sizeof (struct mach_header) + header->sizeofcmds);
300 if (size <= 0)
301 return(LOAD_BADMACHO);
302
303 /*
304 * Map the load commands into kernel memory.
305 */
306 addr = 0;
307 kl_size = size;
308 kl_addr = kalloc(size);
309 addr = kl_addr;
310 if (addr == NULL)
311 return(LOAD_NOSPACE);
312
313 if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
314 UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) {
315 if (kl_addr )
316 kfree(kl_addr, kl_size);
317 return(EIO);
318 }
319 /* ubc_map(vp); */ /* NOT HERE */
320
321 /*
322 * Scan through the commands, processing each one as necessary.
323 */
324 for (pass = 1; pass <= 2; pass++) {
325 offset = sizeof(struct mach_header);
326 ncmds = header->ncmds;
327 while (ncmds--) {
328 /*
329 * Get a pointer to the command.
330 */
331 lcp = (struct load_command *)(addr + offset);
332 offset += lcp->cmdsize;
333
334 /*
335 * Check for valid lcp pointer by checking
336 * next offset.
337 */
338 if (offset > header->sizeofcmds
339 + sizeof(struct mach_header)) {
340 if (kl_addr )
341 kfree(kl_addr, kl_size);
342 return(LOAD_BADMACHO);
343 }
344
345 /*
346 * Check for valid command.
347 */
348 switch(lcp->cmd) {
349 case LC_SEGMENT:
350 if (pass != 1)
351 break;
352 ret = load_segment(
353 (struct segment_command *) lcp,
354 pager, file_offset,
355 macho_size,
356 (unsigned long)ubc_getsize(vp),
357 map,
358 result);
359 break;
360 case LC_THREAD:
361 if (pass != 2)
362 break;
363 ret = load_thread((struct thread_command *)lcp, thr_act,
364 result);
365 break;
366 case LC_UNIXTHREAD:
367 if (pass != 2)
368 break;
369 ret = load_unixthread(
370 (struct thread_command *) lcp, thr_act,
371 result);
372 break;
373 case LC_LOAD_DYLINKER:
374 if (pass != 2)
375 break;
376 if (depth == 1 || dlp == 0)
377 dlp = (struct dylinker_command *)lcp;
378 else
379 ret = LOAD_FAILURE;
380 break;
381 default:
382 ret = KERN_SUCCESS;/* ignore other stuff */
383 }
384 if (ret != LOAD_SUCCESS)
385 break;
386 }
387 if (ret != LOAD_SUCCESS)
388 break;
389 }
390 if (ret == LOAD_SUCCESS && dlp != 0) {
391 vm_offset_t addr;
392 shared_region_mapping_t shared_region;
393 struct shared_region_task_mappings map_info;
394 shared_region_mapping_t next;
395
396 RedoLookup:
397 vm_get_shared_region(task, &shared_region);
398 map_info.self = (vm_offset_t)shared_region;
399 shared_region_mapping_info(shared_region,
400 &(map_info.text_region),
401 &(map_info.text_size),
402 &(map_info.data_region),
403 &(map_info.data_size),
404 &(map_info.region_mappings),
405 &(map_info.client_base),
406 &(map_info.alternate_base),
407 &(map_info.alternate_next),
408 &(map_info.flags), &next);
409
410 if((map_info.self != (vm_offset_t)system_shared_region) &&
411 (map_info.flags & SHARED_REGION_SYSTEM)) {
412 shared_region_mapping_ref(system_shared_region);
413 vm_set_shared_region(task, system_shared_region);
414 shared_region_mapping_dealloc(
415 (shared_region_mapping_t)map_info.self);
416 goto RedoLookup;
417 }
418
419
420 if (dylink_test) {
421 p->p_flag |= P_NOSHLIB; /* no shlibs in use */
422 addr = map_info.client_base;
423 vm_map(map, &addr, map_info.text_size, 0,
424 (VM_MEMORY_SHARED_PMAP << 24)
425 | SHARED_LIB_ALIAS,
426 map_info.text_region, 0, FALSE,
427 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
428 addr = map_info.client_base + map_info.text_size;
429 vm_map(map, &addr, map_info.data_size,
430 0, SHARED_LIB_ALIAS,
431 map_info.data_region, 0, TRUE,
432 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
433 }
434 ret = load_dylinker(dlp, map, thr_act, depth, result);
435 }
436
437 if (kl_addr )
438 kfree(kl_addr, kl_size);
439
440 if ((ret == LOAD_SUCCESS) && (depth == 1) &&
441 (result->thread_count == 0))
442 ret = LOAD_FAILURE;
443 if (ret == LOAD_SUCCESS)
444 ubc_map(vp);
445
446 return(ret);
447 }
448
449 static
450 load_return_t
451 load_segment(
452 struct segment_command *scp,
453 void * pager,
454 unsigned long pager_offset,
455 unsigned long macho_size,
456 unsigned long end_of_file,
457 vm_map_t map,
458 load_result_t *result
459 )
460 {
461 kern_return_t ret;
462 vm_offset_t map_addr, map_offset;
463 vm_size_t map_size, seg_size, delta_size;
464 caddr_t tmp;
465 vm_prot_t initprot;
466 vm_prot_t maxprot;
467 #if 1
468 extern int print_map_addr;
469 #endif /* 1 */
470
471 /*
472 * Make sure what we get from the file is really ours (as specified
473 * by macho_size).
474 */
475 if (scp->fileoff + scp->filesize > macho_size)
476 return (LOAD_BADMACHO);
477
478 seg_size = round_page(scp->vmsize);
479 if (seg_size == 0)
480 return(KERN_SUCCESS);
481
482 /*
483 * Round sizes to page size.
484 */
485 map_size = round_page(scp->filesize);
486 map_addr = trunc_page(scp->vmaddr);
487
488 map_offset = pager_offset + scp->fileoff;
489
490 if (map_size > 0) {
491 initprot = (scp->initprot) & VM_PROT_ALL;
492 maxprot = (scp->maxprot) & VM_PROT_ALL;
493 /*
494 * Map a copy of the file into the address space.
495 */
496 ret = vm_map(map,
497 &map_addr, map_size, (vm_offset_t)0, FALSE,
498 pager, map_offset, TRUE,
499 initprot, maxprot,
500 VM_INHERIT_DEFAULT);
501 if (ret != KERN_SUCCESS)
502 return(LOAD_NOSPACE);
503
504 #if 1
505 if (print_map_addr)
506 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size);
507 #endif /* 1 */
508 /*
509 * If the file didn't end on a page boundary,
510 * we need to zero the leftover.
511 */
512 delta_size = map_size - scp->filesize;
513 #if FIXME
514 if (delta_size > 0) {
515 vm_offset_t tmp;
516
517 ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE);
518 if (ret != KERN_SUCCESS)
519 return(LOAD_RESOURCE);
520
521 if (copyout(tmp, map_addr + scp->filesize,
522 delta_size)) {
523 (void) vm_deallocate(
524 kernel_map, tmp, delta_size);
525 return(LOAD_FAILURE);
526 }
527
528 (void) vm_deallocate(kernel_map, tmp, delta_size);
529 }
530 #endif /* FIXME */
531 }
532
533 /*
534 * If the virtual size of the segment is greater
535 * than the size from the file, we need to allocate
536 * zero fill memory for the rest.
537 */
538 delta_size = seg_size - map_size;
539 if (delta_size > 0) {
540 vm_offset_t tmp = map_addr + map_size;
541
542 ret = vm_allocate(map, &tmp, delta_size, FALSE);
543 if (ret != KERN_SUCCESS)
544 return(LOAD_NOSPACE);
545 }
546
547 /*
548 * Set protection values. (Note: ignore errors!)
549 */
550
551 if (scp->maxprot != VM_PROT_DEFAULT) {
552 (void) vm_protect(map,
553 map_addr, seg_size,
554 TRUE, scp->maxprot);
555 }
556 if (scp->initprot != VM_PROT_DEFAULT) {
557 (void) vm_protect(map,
558 map_addr, seg_size,
559 FALSE, scp->initprot);
560 }
561 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
562 result->mach_header = map_addr;
563 return(LOAD_SUCCESS);
564 }
565
566 static
567 load_return_t
568 load_unixthread(
569 struct thread_command *tcp,
570 thread_act_t thr_act,
571 load_result_t *result
572 )
573 {
574 thread_t thread = current_thread();
575 load_return_t ret;
576 int customstack =0;
577
578 if (result->thread_count != 0)
579 return (LOAD_FAILURE);
580
581 thread = getshuttle_thread(thr_act);
582 ret = load_threadstack(thread,
583 (unsigned long *)(((vm_offset_t)tcp) +
584 sizeof(struct thread_command)),
585 tcp->cmdsize - sizeof(struct thread_command),
586 &result->user_stack,
587 &customstack);
588 if (ret != LOAD_SUCCESS)
589 return(ret);
590
591 if (customstack)
592 result->customstack = 1;
593 else
594 result->customstack = 0;
595 ret = load_threadentry(thread,
596 (unsigned long *)(((vm_offset_t)tcp) +
597 sizeof(struct thread_command)),
598 tcp->cmdsize - sizeof(struct thread_command),
599 &result->entry_point);
600 if (ret != LOAD_SUCCESS)
601 return(ret);
602
603 ret = load_threadstate(thread,
604 (unsigned long *)(((vm_offset_t)tcp) +
605 sizeof(struct thread_command)),
606 tcp->cmdsize - sizeof(struct thread_command));
607 if (ret != LOAD_SUCCESS)
608 return (ret);
609
610 result->unixproc = TRUE;
611 result->thread_count++;
612
613 return(LOAD_SUCCESS);
614 }
615
616 static
617 load_return_t
618 load_thread(
619 struct thread_command *tcp,
620 thread_act_t thr_act,
621 load_result_t *result
622 )
623 {
624 thread_t thread;
625 kern_return_t kret;
626 load_return_t lret;
627 task_t task;
628 int customstack=0;
629
630 task = get_threadtask(thr_act);
631 thread = getshuttle_thread(thr_act);
632
633 /* if count is 0; same as thr_act */
634 if (result->thread_count != 0) {
635 kret = thread_create(task, &thread);
636 if (kret != KERN_SUCCESS)
637 return(LOAD_RESOURCE);
638 thread_deallocate(thread);
639 }
640
641 lret = load_threadstate(thread,
642 (unsigned long *)(((vm_offset_t)tcp) +
643 sizeof(struct thread_command)),
644 tcp->cmdsize - sizeof(struct thread_command));
645 if (lret != LOAD_SUCCESS)
646 return (lret);
647
648 if (result->thread_count == 0) {
649 lret = load_threadstack(thread,
650 (unsigned long *)(((vm_offset_t)tcp) +
651 sizeof(struct thread_command)),
652 tcp->cmdsize - sizeof(struct thread_command),
653 &result->user_stack,
654 &customstack);
655 if (customstack)
656 result->customstack = 1;
657 else
658 result->customstack = 0;
659
660 if (lret != LOAD_SUCCESS)
661 return(lret);
662
663 lret = load_threadentry(thread,
664 (unsigned long *)(((vm_offset_t)tcp) +
665 sizeof(struct thread_command)),
666 tcp->cmdsize - sizeof(struct thread_command),
667 &result->entry_point);
668 if (lret != LOAD_SUCCESS)
669 return(lret);
670 }
671 /*
672 * Resume thread now, note that this means that the thread
673 * commands should appear after all the load commands to
674 * be sure they don't reference anything not yet mapped.
675 */
676 else
677 thread_resume(thread);
678
679 result->thread_count++;
680
681 return(LOAD_SUCCESS);
682 }
683
684 static
685 load_return_t
686 load_threadstate(
687 thread_t thread,
688 unsigned long *ts,
689 unsigned long total_size
690 )
691 {
692 kern_return_t ret;
693 unsigned long size;
694 int flavor;
695
696 /*
697 * Set the thread state.
698 */
699
700 while (total_size > 0) {
701 flavor = *ts++;
702 size = *ts++;
703 total_size -= (size+2)*sizeof(unsigned long);
704 if (total_size < 0)
705 return(LOAD_BADMACHO);
706 ret = thread_setstatus(getact_thread(thread), flavor, ts, size);
707 if (ret != KERN_SUCCESS)
708 return(LOAD_FAILURE);
709 ts += size; /* ts is a (unsigned long *) */
710 }
711 return(LOAD_SUCCESS);
712 }
713
714 static
715 load_return_t
716 load_threadstack(
717 thread_t thread,
718 unsigned long *ts,
719 unsigned long total_size,
720 vm_offset_t *user_stack,
721 int *customstack
722 )
723 {
724 kern_return_t ret;
725 unsigned long size;
726 int flavor;
727
728 while (total_size > 0) {
729 flavor = *ts++;
730 size = *ts++;
731 total_size -= (size+2)*sizeof(unsigned long);
732 if (total_size < 0)
733 return(LOAD_BADMACHO);
734 *user_stack = USRSTACK;
735 ret = thread_userstack(thread, flavor, ts, size,
736 user_stack, customstack);
737 if (ret != KERN_SUCCESS)
738 return(LOAD_FAILURE);
739 ts += size; /* ts is a (unsigned long *) */
740 }
741 return(LOAD_SUCCESS);
742 }
743
744 static
745 load_return_t
746 load_threadentry(
747 thread_t thread,
748 unsigned long *ts,
749 unsigned long total_size,
750 vm_offset_t *entry_point
751 )
752 {
753 kern_return_t ret;
754 unsigned long size;
755 int flavor;
756
757 /*
758 * Set the thread state.
759 */
760 *entry_point = 0;
761 while (total_size > 0) {
762 flavor = *ts++;
763 size = *ts++;
764 total_size -= (size+2)*sizeof(unsigned long);
765 if (total_size < 0)
766 return(LOAD_BADMACHO);
767 ret = thread_entrypoint(thread, flavor, ts, size, entry_point);
768 if (ret != KERN_SUCCESS)
769 return(LOAD_FAILURE);
770 ts += size; /* ts is a (unsigned long *) */
771 }
772 return(LOAD_SUCCESS);
773 }
774
775
776 static
777 load_return_t
778 load_dylinker(
779 struct dylinker_command *lcp,
780 vm_map_t map,
781 thread_act_t thr_act,
782 int depth,
783 load_result_t *result
784 )
785 {
786 char *name;
787 char *p;
788 struct vnode *vp;
789 struct mach_header header;
790 unsigned long file_offset;
791 unsigned long macho_size;
792 vm_map_t copy_map;
793 load_result_t myresult;
794 kern_return_t ret;
795 vm_map_copy_t tmp;
796 vm_offset_t dyl_start, map_addr;
797 vm_size_t dyl_length;
798
799 name = (char *)lcp + lcp->name.offset;
800 /*
801 * Check for a proper null terminated string.
802 */
803 p = name;
804 do {
805 if (p >= (char *)lcp + lcp->cmdsize)
806 return(LOAD_BADMACHO);
807 } while (*p++);
808
809 ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
810 if (ret)
811 return (ret);
812
813 myresult = (load_result_t) { 0 };
814
815 /*
816 * Load the Mach-O.
817 */
818
819 copy_map = vm_map_create(pmap_create(macho_size),
820 get_map_min(map), get_map_max( map), TRUE);
821
822 ret = parse_machfile(vp, copy_map, thr_act, &header,
823 file_offset, macho_size,
824 depth, &myresult);
825
826 if (ret)
827 goto out;
828
829 if (get_map_nentries(copy_map) > 0) {
830
831 dyl_start = get_map_start(copy_map);
832 dyl_length = get_map_end(copy_map) - dyl_start;
833
834 map_addr = dyl_start;
835 ret = vm_allocate(map, &map_addr, dyl_length, FALSE);
836 if (ret != KERN_SUCCESS) {
837 ret = vm_allocate(map, &map_addr, dyl_length, TRUE);
838 }
839
840 if (ret != KERN_SUCCESS) {
841 ret = LOAD_NOSPACE;
842 goto out;
843
844 }
845 ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE,
846 &tmp);
847 if (ret != KERN_SUCCESS) {
848 (void) vm_map_remove(map,
849 map_addr,
850 map_addr + dyl_length,
851 VM_MAP_NO_FLAGS);
852 goto out;
853 }
854
855 ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE);
856 if (ret != KERN_SUCCESS) {
857 vm_map_copy_discard(tmp);
858 (void) vm_map_remove(map,
859 map_addr,
860 map_addr + dyl_length,
861 VM_MAP_NO_FLAGS);
862 goto out; }
863
864 if (map_addr != dyl_start)
865 myresult.entry_point += (map_addr - dyl_start);
866 } else
867 ret = LOAD_FAILURE;
868
869 if (ret == LOAD_SUCCESS) {
870 result->dynlinker = TRUE;
871 result->entry_point = myresult.entry_point;
872 ubc_map(vp);
873 }
874 out:
875 vm_map_deallocate(copy_map);
876
877 vrele(vp);
878 return (ret);
879
880 }
881
882 static
883 load_return_t
884 get_macho_vnode(
885 char *path,
886 struct mach_header *mach_header,
887 unsigned long *file_offset,
888 unsigned long *macho_size,
889 struct vnode **vpp
890 )
891 {
892 struct vnode *vp;
893 struct vattr attr, *atp;
894 struct nameidata nid, *ndp;
895 struct proc *p = current_proc(); /* XXXX */
896 boolean_t is_fat;
897 struct fat_arch fat_arch;
898 int error = KERN_SUCCESS;
899 int resid;
900 union {
901 struct mach_header mach_header;
902 struct fat_header fat_header;
903 char pad[512];
904 } header;
905 off_t fsize = (off_t)0;
906 struct ucred *cred = p->p_ucred;
907
908 ndp = &nid;
909 atp = &attr;
910
911 /* init the namei data to point the file user's program name */
912 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p);
913
914 if (error = namei(ndp))
915 return(error);
916
917 vp = ndp->ni_vp;
918
919 /* check for regular file */
920 if (vp->v_type != VREG) {
921 error = EACCES;
922 goto bad1;
923 }
924
925 /* get attributes */
926 if (error = VOP_GETATTR(vp, &attr, cred, p))
927 goto bad1;
928
929 /* Check mount point */
930 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
931 error = EACCES;
932 goto bad1;
933 }
934
935 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
936 atp->va_mode &= ~(VSUID | VSGID);
937
938 /* check access. for root we have to see if any exec bit on */
939 if (error = VOP_ACCESS(vp, VEXEC, cred, p))
940 goto bad1;
941 if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
942 error = EACCES;
943 goto bad1;
944 }
945
946 /* hold the vnode for the IO */
947 if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) {
948 error = ENOENT;
949 goto bad1;
950 }
951
952 /* try to open it */
953 if (error = VOP_OPEN(vp, FREAD, cred, p)) {
954 ubc_rele(vp);
955 goto bad1;
956 }
957
958 if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
959 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p))
960 goto bad2;
961
962 if (header.mach_header.magic == MH_MAGIC)
963 is_fat = FALSE;
964 else if (header.fat_header.magic == FAT_MAGIC ||
965 header.fat_header.magic == FAT_CIGAM)
966 is_fat = TRUE;
967 else {
968 error = LOAD_BADMACHO;
969 goto bad2;
970 }
971
972 if (is_fat) {
973 /* Look up our architecture in the fat file. */
974 error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch);
975 if (error != LOAD_SUCCESS)
976 goto bad2;
977
978 /* Read the Mach-O header out of it */
979 error = vn_rdwr(UIO_READ, vp, &header.mach_header,
980 sizeof(header.mach_header), fat_arch.offset,
981 UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p);
982 if (error) {
983 error = LOAD_FAILURE;
984 goto bad2;
985 }
986
987 /* Is this really a Mach-O? */
988 if (header.mach_header.magic != MH_MAGIC) {
989 error = LOAD_BADMACHO;
990 goto bad2;
991 }
992
993 *file_offset = fat_arch.offset;
994 *macho_size = fsize = fat_arch.size;
995 } else {
996
997 *file_offset = 0;
998 *macho_size = fsize = attr.va_size;
999 }
1000
1001 *mach_header = header.mach_header;
1002 *vpp = vp;
1003 if (UBCISVALID(vp))
1004 ubc_setsize(vp, fsize); /* XXX why? */
1005
1006 VOP_UNLOCK(vp, 0, p);
1007 ubc_rele(vp);
1008 return (error);
1009
1010 bad2:
1011 VOP_UNLOCK(vp, 0, p);
1012 error = VOP_CLOSE(vp, FREAD, cred, p);
1013 ubc_rele(vp);
1014 vrele(vp);
1015 return (error);
1016
1017 bad1:
1018 vput(vp);
1019 return(error);
1020 }