]> git.saurik.com Git - apple/xnu.git/blame - bsd/kern/mach_loader.c
xnu-124.1.tar.gz
[apple/xnu.git] / bsd / kern / mach_loader.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (C) 1988, 1989, NeXT, Inc.
24 *
25 * File: kern/mach_loader.c
26 * Author: Avadis Tevanian, Jr.
27 *
28 * Mach object file loader (kernel version, for now).
29 *
30 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
31 * Started.
32 */
33#include <sys/param.h>
34#include <sys/vnode.h>
35#include <sys/uio.h>
36#include <sys/namei.h>
37#include <sys/proc.h>
38#include <sys/stat.h>
39#include <sys/malloc.h>
40#include <sys/mount.h>
41#include <sys/fcntl.h>
42#include <sys/ubc.h>
43
44#include <ufs/ufs/lockf.h>
45#include <ufs/ufs/quota.h>
46#include <ufs/ufs/inode.h>
47
48#include <mach/mach_types.h>
49
50#include <kern/mach_loader.h>
51
52#include <mach-o/fat.h>
53#include <mach-o/loader.h>
54
55#include <kern/cpu_number.h>
56
57#include <vm/vm_map.h>
58#include <vm/vm_kern.h>
59#include <vm/vm_pager.h>
60#include <vm/vnode_pager.h>
61#include <mach/shared_memory_server.h>
62#include <mach/vm_statistics.h>
63
64/*
65 * Prototypes of static functions.
66 */
67static
68load_return_t
69parse_machfile(
70 struct vnode *vp,
71 vm_map_t map,
72 struct mach_header *header,
73 unsigned long file_offset,
74 unsigned long macho_size,
75 int depth,
76 unsigned long *lib_version,
77 load_result_t *result
78),
79load_segment(
80 struct segment_command *scp,
81 void * pager,
82 unsigned long pager_offset,
83 unsigned long macho_size,
84 unsigned long end_of_file,
85 vm_map_t map,
86 load_result_t *result
87),
88load_unixthread(
89 struct thread_command *tcp,
90 load_result_t *result
91),
92load_thread(
93 struct thread_command *tcp,
94 load_result_t *result
95),
96load_threadstate(
97 thread_t thread,
98 unsigned long *ts,
99 unsigned long total_size
100),
101load_threadstack(
102 thread_t thread,
103 unsigned long *ts,
104 unsigned long total_size,
105 vm_offset_t *user_stack
106),
107load_threadentry(
108 thread_t thread,
109 unsigned long *ts,
110 unsigned long total_size,
111 vm_offset_t *entry_point
112),
113load_fvmlib(
114 struct fvmlib_command *lcp,
115 vm_map_t map,
116 int depth
117),
118load_idfvmlib(
119 struct fvmlib_command *lcp,
120 unsigned long *version
121),
122load_dylinker(
123 struct dylinker_command *lcp,
124 vm_map_t map,
125 int depth,
126 load_result_t *result
127),
128get_macho_vnode(
129 char *path,
130 struct mach_header *mach_header,
131 unsigned long *file_offset,
132 unsigned long *macho_size,
133 struct vnode **vpp
134);
135
136load_return_t
137load_machfile(
138 struct vnode *vp,
139 struct mach_header *header,
140 unsigned long file_offset,
141 unsigned long macho_size,
142 load_result_t *result
143)
144{
145 pmap_t pmap;
146 vm_map_t map;
147 vm_map_t old_map;
148 load_result_t myresult;
149 kern_return_t kret;
150 load_return_t lret;
151
152 old_map = current_map();
153#ifdef i386
154 pmap = get_task_pmap(current_task());
155 pmap_reference(pmap);
156#else
157 pmap = pmap_create((vm_size_t) 0);
158#endif
159 map = vm_map_create(pmap,
160 get_map_min(old_map),
161 get_map_max(old_map),
162 TRUE); /**** FIXME ****/
163
164 if (!result)
165 result = &myresult;
166
167 *result = (load_result_t) { 0 };
168
169 lret = parse_machfile(vp, map, header, file_offset, macho_size,
170 0, (unsigned long *)0, result);
171
172 if (lret != LOAD_SUCCESS) {
173 vm_map_deallocate(map); /* will lose pmap reference too */
174 return(lret);
175 }
176 /*
177 * Commit to new map. First make sure that the current
178 * users of the task get done with it, and that we clean
179 * up the old contents of IPC and memory. The task is
180 * guaranteed to be single threaded upon return (us).
181 *
182 * Swap the new map for the old at the task level and at
183 * our activation. The latter consumes our new map reference
184 * but each leaves us responsible for the old_map reference.
185 * That lets us get off the pmap associated with it, and
186 * then we can release it.
187 */
188 task_halt(current_task());
189
190 old_map = swap_task_map(current_task(), map);
191 vm_map_deallocate(old_map);
192
193 old_map = swap_act_map(current_act(), map);
194
195#ifndef i386
196 pmap_switch(pmap); /* Make sure we are using the new pmap */
197#endif
198
199 vm_map_deallocate(old_map);
200 return(LOAD_SUCCESS);
201}
202
203int dylink_test = 1;
204extern vm_offset_t system_shared_region;
205
206static
207load_return_t
208parse_machfile(
209 struct vnode *vp,
210 vm_map_t map,
211 struct mach_header *header,
212 unsigned long file_offset,
213 unsigned long macho_size,
214 int depth,
215 unsigned long *lib_version,
216 load_result_t *result
217)
218{
219 struct machine_slot *ms;
220 int ncmds;
221 struct load_command *lcp, *next;
222 struct dylinker_command *dlp = 0;
223 void * pager;
224 load_return_t ret;
225 vm_offset_t addr, kl_addr;
226 vm_size_t size,kl_size;
227 int offset;
228 int pass;
229 struct proc *p = current_proc(); /* XXXX */
230 int error;
231 int resid=0;
232
233 /*
234 * Break infinite recursion
235 */
236 if (depth > 6)
237 return(LOAD_FAILURE);
238 depth++;
239
240 /*
241 * Check to see if right machine type.
242 */
243 ms = &machine_slot[cpu_number()];
244 if ((header->cputype != ms->cpu_type) ||
245 !check_cpu_subtype(header->cpusubtype))
246 return(LOAD_BADARCH);
247
248 switch (header->filetype) {
249
250 case MH_OBJECT:
251 case MH_EXECUTE:
252 case MH_PRELOAD:
253 if (depth != 1)
254 return (LOAD_FAILURE);
255 break;
256
257 case MH_FVMLIB:
258 case MH_DYLIB:
259 if (depth == 1)
260 return (LOAD_FAILURE);
261 break;
262
263 case MH_DYLINKER:
264 if (depth != 2)
265 return (LOAD_FAILURE);
266 break;
267
268 default:
269 return (LOAD_FAILURE);
270 }
271
272 /*
273 * Get the pager for the file.
274 */
275 UBCINFOCHECK("parse_machfile", vp);
276 pager = (void *) ubc_getpager(vp);
277
278 /*
279 * Map portion that must be accessible directly into
280 * kernel's map.
281 */
282 if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size)
283 return(LOAD_BADMACHO);
284
285 /*
286 * Round size of Mach-O commands up to page boundry.
287 */
288 size = round_page(sizeof (struct mach_header) + header->sizeofcmds);
289 if (size <= 0)
290 return(LOAD_BADMACHO);
291
292 /*
293 * Map the load commands into kernel memory.
294 */
295 addr = 0;
296#if 0 /* [
297#if FIXME
298 ret = vm_allocate_with_pager(kernel_map, &addr, size, TRUE, pager,
299 file_offset);
300#else
301 ret = vm_map(kernel_map,&addr,size,0,TRUE, pager, file_offset, FALSE,
302 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
303#endif /* FIXME */
304 if (ret != KERN_SUCCESS) {
305 return(LOAD_NOSPACE);
306 }
307 ubc_map(vp);
308#else /* 0 ][ */
309 kl_size = size;
310 kl_addr = kalloc(size);
311 addr = kl_addr;
312 if (addr == NULL) {
313 printf("No space to readin load commands\n");
314 return(LOAD_NOSPACE);
315 }
316 if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
317 UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) {
318 printf("Load command read over nfs failed\n");
319 if (kl_addr ) kfree(kl_addr,kl_size);
320 return(EIO);
321 }
322 /* ubc_map(vp); */ /* NOT HERE */
323
324#endif /* 0 ] */
325 /*
326 * Scan through the commands, processing each one as necessary.
327 */
328 for (pass = 1; pass <= 2; pass++) {
329 offset = sizeof(struct mach_header);
330 ncmds = header->ncmds;
331 while (ncmds--) {
332 /*
333 * Get a pointer to the command.
334 */
335 lcp = (struct load_command *)(addr + offset);
336 offset += lcp->cmdsize;
337
338 /*
339 * Check for valid lcp pointer by checking
340 * next offset.
341 */
342 if (offset > header->sizeofcmds
343 + sizeof(struct mach_header)) {
344#if 0
345 vm_map_remove(kernel_map, addr, addr + size);
346#endif
347 if (kl_addr ) kfree(kl_addr,kl_size);
348 return(LOAD_BADMACHO);
349 }
350
351 /*
352 * Check for valid command.
353 */
354 switch(lcp->cmd) {
355 case LC_SEGMENT:
356 if (pass != 1)
357 break;
358 ret = load_segment(
359 (struct segment_command *) lcp,
360 pager, file_offset,
361 macho_size,
362 (unsigned long)ubc_getsize(vp),
363 map,
364 result);
365 break;
366 case LC_THREAD:
367 if (pass != 2)
368 break;
369 ret = load_thread((struct thread_command *)lcp,
370 result);
371 break;
372 case LC_UNIXTHREAD:
373 if (pass != 2)
374 break;
375 ret = load_unixthread(
376 (struct thread_command *) lcp,
377 result);
378 break;
379 case LC_LOADFVMLIB:
380 if (pass != 1)
381 break;
382 ret = load_fvmlib((struct fvmlib_command *)lcp,
383 map, depth);
384 break;
385 case LC_IDFVMLIB:
386 if (pass != 1)
387 break;
388 if (lib_version) {
389 ret = load_idfvmlib(
390 (struct fvmlib_command *)lcp,
391 lib_version);
392 }
393 break;
394 case LC_LOAD_DYLINKER:
395 if (pass != 2)
396 break;
397 if (depth == 1 || dlp == 0)
398 dlp = (struct dylinker_command *)lcp;
399 else
400 ret = LOAD_FAILURE;
401 break;
402 default:
403 ret = KERN_SUCCESS;/* ignore other stuff */
404 }
405 if (ret != LOAD_SUCCESS)
406 break;
407 }
408 if (ret != LOAD_SUCCESS)
409 break;
410 }
411 if (ret == LOAD_SUCCESS && dlp != 0) {
412 vm_offset_t addr;
413 shared_region_mapping_t shared_region;
414 struct shared_region_task_mappings map_info;
415 shared_region_mapping_t next;
416
417RedoLookup:
418 vm_get_shared_region(current_task(), &shared_region);
419 map_info.self = (vm_offset_t)shared_region;
420 shared_region_mapping_info(shared_region,
421 &(map_info.text_region),
422 &(map_info.text_size),
423 &(map_info.data_region),
424 &(map_info.data_size),
425 &(map_info.region_mappings),
426 &(map_info.client_base),
427 &(map_info.alternate_base),
428 &(map_info.alternate_next),
429 &(map_info.flags), &next);
430
431 if((map_info.flags & SHARED_REGION_FULL) &&
432 (map_info.flags & SHARED_REGION_SYSTEM)) {
433 if(map_info.self != (vm_offset_t)system_shared_region) {
434 shared_region_mapping_ref(system_shared_region);
435 vm_set_shared_region(current_task(),
436 system_shared_region);
437 shared_region_mapping_dealloc(
438 (shared_region_mapping_t)map_info.self);
439 goto RedoLookup;
440 }
441 }
442
443
444 if (dylink_test) {
445 addr = map_info.client_base;
446 vm_map(map, &addr, map_info.text_size, 0,
447 (VM_MEMORY_SHARED_PMAP << 24)
448 | SHARED_LIB_ALIAS,
449 map_info.text_region, 0, FALSE,
450 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
451 addr = map_info.client_base + map_info.text_size;
452 vm_map(map, &addr, map_info.data_size,
453 0, SHARED_LIB_ALIAS,
454 map_info.data_region, 0, TRUE,
455 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
456 }
457 ret = load_dylinker(dlp, map, depth, result);
458 }
459
460 if (kl_addr ) kfree(kl_addr,kl_size);
461#if 0
462 vm_map_remove(kernel_map, addr, addr + size);
463#endif
464 if ((ret == LOAD_SUCCESS) && (depth == 1) &&
465 (result->thread_count == 0))
466 ret = LOAD_FAILURE;
467 if (ret == LOAD_SUCCESS)
468 ubc_map(vp);
469
470 return(ret);
471}
472
473static
474load_return_t
475load_segment(
476 struct segment_command *scp,
477 void * pager,
478 unsigned long pager_offset,
479 unsigned long macho_size,
480 unsigned long end_of_file,
481 vm_map_t map,
482 load_result_t *result
483)
484{
485 kern_return_t ret;
486 vm_offset_t map_addr, map_offset;
487 vm_size_t map_size, seg_size, delta_size;
488 caddr_t tmp;
489 vm_prot_t initprot;
490 vm_prot_t maxprot;
491#if 1
492 extern int print_map_addr;
493#endif /* 1 */
494
495 /*
496 * Make sure what we get from the file is really ours (as specified
497 * by macho_size).
498 */
499 if (scp->fileoff + scp->filesize > macho_size)
500 return (LOAD_BADMACHO);
501
502 seg_size = round_page(scp->vmsize);
503 if (seg_size == 0)
504 return(KERN_SUCCESS);
505
506 /*
507 * Round sizes to page size.
508 */
509 map_size = round_page(scp->filesize);
510 map_addr = trunc_page(scp->vmaddr);
511
512 map_offset = pager_offset + scp->fileoff;
513
514 if (map_size > 0) {
515 initprot = (scp->initprot) & VM_PROT_ALL;
516 maxprot = (scp->maxprot) & VM_PROT_ALL;
517 /*
518 * Map a copy of the file into the address space.
519 */
520 ret = vm_map(map,
521 &map_addr, map_size, (vm_offset_t)0, FALSE,
522 pager, map_offset, TRUE,
523 initprot, maxprot,
524 VM_INHERIT_DEFAULT);
525 if (ret != KERN_SUCCESS)
526 return(LOAD_NOSPACE);
527
528#if 1
529 if (print_map_addr)
530 printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size);
531#endif /* 1 */
532 /*
533 * If the file didn't end on a page boundary,
534 * we need to zero the leftover.
535 */
536 delta_size = map_size - scp->filesize;
537#if FIXME
538 if (delta_size > 0) {
539 vm_offset_t tmp;
540
541 ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE);
542 if (ret != KERN_SUCCESS)
543 return(LOAD_RESOURCE);
544
545 if (copyout(tmp, map_addr + scp->filesize,
546 delta_size)) {
547 (void) vm_deallocate(
548 kernel_map, tmp, delta_size);
549 return(LOAD_FAILURE);
550 }
551
552 (void) vm_deallocate(kernel_map, tmp, delta_size);
553 }
554#endif /* FIXME */
555 }
556
557 /*
558 * If the virtual size of the segment is greater
559 * than the size from the file, we need to allocate
560 * zero fill memory for the rest.
561 */
562 delta_size = seg_size - map_size;
563 if (delta_size > 0) {
564 vm_offset_t tmp = map_addr + map_size;
565
566 ret = vm_allocate(map, &tmp, delta_size, FALSE);
567 if (ret != KERN_SUCCESS)
568 return(LOAD_NOSPACE);
569 }
570
571 /*
572 * Set protection values. (Note: ignore errors!)
573 */
574
575 if (scp->maxprot != VM_PROT_DEFAULT) {
576 (void) vm_protect(map,
577 map_addr, seg_size,
578 TRUE, scp->maxprot);
579 }
580 if (scp->initprot != VM_PROT_DEFAULT) {
581 (void) vm_protect(map,
582 map_addr, seg_size,
583 FALSE, scp->initprot);
584 }
585 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
586 result->mach_header = map_addr;
587 return(LOAD_SUCCESS);
588}
589
590static
591load_return_t
592load_unixthread(
593 struct thread_command *tcp,
594 load_result_t *result
595)
596{
597 thread_t thread = current_thread();
598 load_return_t ret;
599
600 if (result->thread_count != 0)
601 return (LOAD_FAILURE);
602
603 ret = load_threadstack(thread,
604 (unsigned long *)(((vm_offset_t)tcp) +
605 sizeof(struct thread_command)),
606 tcp->cmdsize - sizeof(struct thread_command),
607 &result->user_stack);
608 if (ret != LOAD_SUCCESS)
609 return(ret);
610
611 ret = load_threadentry(thread,
612 (unsigned long *)(((vm_offset_t)tcp) +
613 sizeof(struct thread_command)),
614 tcp->cmdsize - sizeof(struct thread_command),
615 &result->entry_point);
616 if (ret != LOAD_SUCCESS)
617 return(ret);
618
619 ret = load_threadstate(thread,
620 (unsigned long *)(((vm_offset_t)tcp) +
621 sizeof(struct thread_command)),
622 tcp->cmdsize - sizeof(struct thread_command));
623 if (ret != LOAD_SUCCESS)
624 return (ret);
625
626 result->unixproc = TRUE;
627 result->thread_count++;
628
629 return(LOAD_SUCCESS);
630}
631
632static
633load_return_t
634load_thread(
635 struct thread_command *tcp,
636 load_result_t *result
637)
638{
639 thread_t thread;
640 kern_return_t kret;
641 load_return_t lret;
642
643 if (result->thread_count == 0)
644 thread = current_thread();
645 else {
646 kret = thread_create(current_task(), &thread);
647 if (kret != KERN_SUCCESS)
648 return(LOAD_RESOURCE);
649 thread_deallocate(thread);
650 }
651
652 lret = load_threadstate(thread,
653 (unsigned long *)(((vm_offset_t)tcp) +
654 sizeof(struct thread_command)),
655 tcp->cmdsize - sizeof(struct thread_command));
656 if (lret != LOAD_SUCCESS)
657 return (lret);
658
659 if (result->thread_count == 0) {
660 lret = load_threadstack(current_thread(),
661 (unsigned long *)(((vm_offset_t)tcp) +
662 sizeof(struct thread_command)),
663 tcp->cmdsize - sizeof(struct thread_command),
664 &result->user_stack);
665 if (lret != LOAD_SUCCESS)
666 return(lret);
667
668 lret = load_threadentry(current_thread(),
669 (unsigned long *)(((vm_offset_t)tcp) +
670 sizeof(struct thread_command)),
671 tcp->cmdsize - sizeof(struct thread_command),
672 &result->entry_point);
673 if (lret != LOAD_SUCCESS)
674 return(lret);
675 }
676 /*
677 * Resume thread now, note that this means that the thread
678 * commands should appear after all the load commands to
679 * be sure they don't reference anything not yet mapped.
680 */
681 else
682 thread_resume(thread);
683
684 result->thread_count++;
685
686 return(LOAD_SUCCESS);
687}
688
689static
690load_return_t
691load_threadstate(
692 thread_t thread,
693 unsigned long *ts,
694 unsigned long total_size
695)
696{
697 kern_return_t ret;
698 unsigned long size;
699 int flavor;
700
701 /*
702 * Set the thread state.
703 */
704
705 while (total_size > 0) {
706 flavor = *ts++;
707 size = *ts++;
708 total_size -= (size+2)*sizeof(unsigned long);
709 if (total_size < 0)
710 return(LOAD_BADMACHO);
711 ret = thread_setstatus(getact_thread(thread), flavor, ts, size);
712 if (ret != KERN_SUCCESS)
713 return(LOAD_FAILURE);
714 ts += size; /* ts is a (unsigned long *) */
715 }
716 return(LOAD_SUCCESS);
717}
718
719static
720load_return_t
721load_threadstack(
722 thread_t thread,
723 unsigned long *ts,
724 unsigned long total_size,
725 vm_offset_t *user_stack
726)
727{
728 kern_return_t ret;
729 unsigned long size;
730 int flavor;
731
732 /*
733 * Set the thread state.
734 */
735 *user_stack = 0;
736 while (total_size > 0) {
737 flavor = *ts++;
738 size = *ts++;
739 total_size -= (size+2)*sizeof(unsigned long);
740 if (total_size < 0)
741 return(LOAD_BADMACHO);
742 ret = thread_userstack(thread, flavor, ts, size, user_stack);
743 if (ret != KERN_SUCCESS)
744 return(LOAD_FAILURE);
745 ts += size; /* ts is a (unsigned long *) */
746 }
747 return(LOAD_SUCCESS);
748}
749
750static
751load_return_t
752load_threadentry(
753 thread_t thread,
754 unsigned long *ts,
755 unsigned long total_size,
756 vm_offset_t *entry_point
757)
758{
759 kern_return_t ret;
760 unsigned long size;
761 int flavor;
762
763 /*
764 * Set the thread state.
765 */
766 *entry_point = 0;
767 while (total_size > 0) {
768 flavor = *ts++;
769 size = *ts++;
770 total_size -= (size+2)*sizeof(unsigned long);
771 if (total_size < 0)
772 return(LOAD_BADMACHO);
773 ret = thread_entrypoint(thread, flavor, ts, size, entry_point);
774 if (ret != KERN_SUCCESS)
775 return(LOAD_FAILURE);
776 ts += size; /* ts is a (unsigned long *) */
777 }
778 return(LOAD_SUCCESS);
779}
780
781static
782load_return_t
783load_fvmlib(
784 struct fvmlib_command *lcp,
785 vm_map_t map,
786 int depth
787)
788{
789 char *name;
790 char *p;
791 struct vnode *vp;
792 struct mach_header header;
793 unsigned long file_offset;
794 unsigned long macho_size;
795 unsigned long lib_version;
796 load_result_t myresult;
797 kern_return_t ret;
798
799 name = (char *)lcp + lcp->fvmlib.name.offset;
800 /*
801 * Check for a proper null terminated string.
802 */
803 p = name;
804 do {
805 if (p >= (char *)lcp + lcp->cmdsize)
806 return(LOAD_BADMACHO);
807 } while (*p++);
808
809 ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
810 if (ret)
811 return (ret);
812
813 myresult = (load_result_t) { 0 };
814
815 /*
816 * Load the Mach-O.
817 */
818 ret = parse_machfile(vp, map, &header,
819 file_offset, macho_size,
820 depth, &lib_version, &myresult);
821
822 if ((ret == LOAD_SUCCESS) &&
823 (lib_version < lcp->fvmlib.minor_version))
824 ret = LOAD_SHLIB;
825
826 vrele(vp);
827 return(ret);
828}
829
830static
831load_return_t
832load_idfvmlib(
833 struct fvmlib_command *lcp,
834 unsigned long *version
835)
836{
837 *version = lcp->fvmlib.minor_version;
838 return(LOAD_SUCCESS);
839}
840
841static
842load_return_t
843load_dylinker(
844 struct dylinker_command *lcp,
845 vm_map_t map,
846 int depth,
847 load_result_t *result
848)
849{
850 char *name;
851 char *p;
852 struct vnode *vp;
853 struct mach_header header;
854 unsigned long file_offset;
855 unsigned long macho_size;
856 vm_map_t copy_map;
857 load_result_t myresult;
858 kern_return_t ret;
859 vm_map_copy_t tmp;
860 vm_offset_t dyl_start, map_addr;
861 vm_size_t dyl_length;
862
863 name = (char *)lcp + lcp->name.offset;
864 /*
865 * Check for a proper null terminated string.
866 */
867 p = name;
868 do {
869 if (p >= (char *)lcp + lcp->cmdsize)
870 return(LOAD_BADMACHO);
871 } while (*p++);
872
873 ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp);
874 if (ret)
875 return (ret);
876
877 myresult = (load_result_t) { 0 };
878
879 /*
880 * Load the Mach-O.
881 */
882
883 copy_map = vm_map_create(pmap_create(macho_size),
884 get_map_min(map), get_map_max( map), TRUE);
885
886 ret = parse_machfile(vp, copy_map, &header,
887 file_offset, macho_size,
888 depth, 0, &myresult);
889
890 if (ret)
891 goto out;
892
893 if (get_map_nentries(copy_map) > 0) {
894
895 dyl_start = get_map_start(copy_map);
896 dyl_length = get_map_end(copy_map) - dyl_start;
897
898 map_addr = dyl_start;
899 ret = vm_allocate(map, &map_addr, dyl_length, FALSE);
900 if (ret != KERN_SUCCESS) {
901 ret = vm_allocate(map, &map_addr, dyl_length, TRUE);
902 }
903
904 if (ret != KERN_SUCCESS) {
905 ret = LOAD_NOSPACE;
906 goto out;
907
908 }
909 ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE,
910 &tmp);
911 if (ret != KERN_SUCCESS) {
912 (void) vm_map_remove(map,
913 map_addr,
914 map_addr + dyl_length,
915 VM_MAP_NO_FLAGS);
916 goto out;
917 }
918
919 ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE);
920 if (ret != KERN_SUCCESS) {
921 vm_map_copy_discard(tmp);
922 (void) vm_map_remove(map,
923 map_addr,
924 map_addr + dyl_length,
925 VM_MAP_NO_FLAGS);
926 goto out; }
927
928 if (map_addr != dyl_start)
929 myresult.entry_point += (map_addr - dyl_start);
930 } else
931 ret = LOAD_FAILURE;
932
933 if (ret == LOAD_SUCCESS) {
934 result->dynlinker = TRUE;
935 result->entry_point = myresult.entry_point;
936 ubc_map(vp);
937 }
938out:
939 vm_map_deallocate(copy_map);
940
941 vrele(vp);
942 return (ret);
943
944}
945
946static
947load_return_t
948get_macho_vnode(
949 char *path,
950 struct mach_header *mach_header,
951 unsigned long *file_offset,
952 unsigned long *macho_size,
953 struct vnode **vpp
954)
955{
956 struct vnode *vp;
957 struct vattr attr, *atp;
958 struct nameidata nid, *ndp;
959 struct proc *p = current_proc(); /* XXXX */
960 boolean_t is_fat;
961 struct fat_arch fat_arch;
962 int error;
963 int resid;
964 union {
965 struct mach_header mach_header;
966 struct fat_header fat_header;
967 char pad[512];
968 } header;
969 error = KERN_SUCCESS;
970
971 ndp = &nid;
972 atp = &attr;
973
974 /* init the namei data to point the file user's program name */
975 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | SAVENAME, UIO_SYSSPACE, path, p);
976
977 if (error = namei(ndp))
978 return(error);
979
980 vp = ndp->ni_vp;
981
982 /* check for regular file */
983 if (vp->v_type != VREG) {
984 error = EACCES;
985 goto bad1;
986 }
987
988 /* get attributes */
989 if (error = VOP_GETATTR(vp, &attr, p->p_ucred, p))
990 goto bad1;
991
992 /* Check mount point */
993 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
994 error = EACCES;
995 goto bad1;
996 }
997
998 if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED))
999 atp->va_mode &= ~(VSUID | VSGID);
1000
1001 /* check access. for root we have to see if any exec bit on */
1002 if (error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p))
1003 goto bad1;
1004 if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
1005 error = EACCES;
1006 goto bad1;
1007 }
1008
1009 /* try to open it */
1010 if (error = VOP_OPEN(vp, FREAD, p->p_ucred, p))
1011 goto bad1;
1012 if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
1013 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p))
1014 goto bad2;
1015
1016/* XXXX WMG - we should check for a short read of the header here */
1017
1018 if (header.mach_header.magic == MH_MAGIC)
1019 is_fat = FALSE;
1020 else if (header.fat_header.magic == FAT_MAGIC ||
1021 header.fat_header.magic == FAT_CIGAM)
1022 is_fat = TRUE;
1023 else {
1024 error = LOAD_BADMACHO;
1025 goto bad2;
1026 }
1027
1028 if (is_fat) {
1029 /*
1030 * Look up our architecture in the fat file.
1031 */
1032 error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch);
1033 if (error != LOAD_SUCCESS) {
1034 goto bad2;
1035 }
1036 /*
1037 * Read the Mach-O header out of it
1038 */
1039 error = vn_rdwr(UIO_READ, vp, &header.mach_header,
1040 sizeof(header.mach_header), fat_arch.offset,
1041 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
1042 if (error) {
1043 error = LOAD_FAILURE;
1044 goto bad2;
1045 }
1046
1047 /*
1048 * Is this really a Mach-O?
1049 */
1050 if (header.mach_header.magic != MH_MAGIC) {
1051 error = LOAD_BADMACHO;
1052 goto bad2;
1053 }
1054
1055 *mach_header = header.mach_header;
1056 *file_offset = fat_arch.offset;
1057 *macho_size = fat_arch.size;
1058 *vpp = vp;
1059 /* leaks otherwise - A.R */
1060 FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI);
1061
1062 /* i_lock exclusive panics, otherwise during pageins */
1063 VOP_UNLOCK(vp, 0, p);
1064 return (error);
1065 } else {
1066
1067 *mach_header = header.mach_header;
1068 *file_offset = 0;
1069 if (UBCISVALID(vp))
1070 ubc_setsize(vp, attr.va_size); /* XXX why? */
1071 *macho_size = attr.va_size;
1072 *vpp = vp;
1073 /* leaks otherwise - A.R */
1074 FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI);
1075
1076 /* i_lock exclusive panics, otherwise during pageins */
1077 VOP_UNLOCK(vp, 0, p);
1078 return (error);
1079 }
1080
1081bad2:
1082 /*
1083 * unlock and close the vnode, restore the old one, free the
1084 * pathname buf, and punt.
1085 */
1086 VOP_UNLOCK(vp, 0, p);
1087 vn_close(vp, FREAD, p->p_ucred, p);
1088 FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI);
1089 return (error);
1090bad1:
1091 /*
1092 * free the namei pathname buffer, and put the vnode
1093 * (which we don't yet have open).
1094 */
1095 FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI);
1096 vput(vp);
1097 return(error);
1098}