2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  29  *      Copyright (C) 1988, 1989,  NeXT, Inc. 
  31  *      File:   kern/mach_loader.c 
  32  *      Author: Avadis Tevanian, Jr. 
  34  *      Mach object file loader (kernel version, for now). 
  36  * 21-Jul-88  Avadis Tevanian, Jr. (avie) at NeXT 
  40 #include <sys/param.h> 
  41 #include <sys/vnode_internal.h> 
  43 #include <sys/namei.h> 
  44 #include <sys/proc_internal.h> 
  45 #include <sys/kauth.h> 
  47 #include <sys/malloc.h> 
  48 #include <sys/mount_internal.h> 
  49 #include <sys/fcntl.h> 
  50 #include <sys/ubc_internal.h> 
  51 #include <sys/imgact.h> 
  53 #include <mach/mach_types.h> 
  54 #include <mach/vm_map.h>        /* vm_allocate() */ 
  55 #include <mach/mach_vm.h>       /* mach_vm_allocate() */ 
  56 #include <mach/vm_statistics.h> 
  57 #include <mach/task.h> 
  58 #include <mach/thread_act.h> 
  60 #include <machine/vmparam.h> 
  61 #include <machine/exec.h> 
  63 #include <kern/kern_types.h> 
  64 #include <kern/cpu_number.h> 
  65 #include <kern/mach_loader.h> 
  66 #include <kern/kalloc.h> 
  67 #include <kern/task.h> 
  68 #include <kern/thread.h> 
  70 #include <mach-o/fat.h> 
  71 #include <mach-o/loader.h> 
  74 #include <vm/vm_map.h> 
  75 #include <vm/vm_kern.h> 
  76 #include <vm/vm_pager.h> 
  77 #include <vm/vnode_pager.h> 
  78 #include <vm/vm_protos.h>  
  81  * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE 
  82  * when KERNEL is defined. 
  84 extern pmap_t   
pmap_create(vm_map_size_t size
, boolean_t is_64bit
); 
  85 extern void     pmap_switch(pmap_t
); 
  88  * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE 
  89  * when KERNEL is defined. 
  91 extern kern_return_t    
thread_setstatus(thread_t thread
, int flavor
, 
  92                                 thread_state_t tstate
, 
  93                                 mach_msg_type_number_t count
); 
  95 extern kern_return_t    
thread_state_initialize(thread_t thread
); 
  98 /* XXX should have prototypes in a shared header file */ 
  99 extern int      get_map_nentries(vm_map_t
); 
 100 extern kern_return_t    
thread_userstack(thread_t
, int, thread_state_t
, 
 101                                 unsigned int, mach_vm_offset_t 
*, int *); 
 102 extern kern_return_t    
thread_entrypoint(thread_t
, int, thread_state_t
, 
 103                                 unsigned int, mach_vm_offset_t 
*); 
 105 extern kern_return_t    
memory_object_signed(memory_object_control_t control
, 
 106                                              boolean_t is_signed
); 
 108 /* An empty load_result_t */ 
 109 static load_result_t load_result_null 
= { 
 110         .mach_header 
= MACH_VM_MIN_ADDRESS
, 
 111         .entry_point 
= MACH_VM_MIN_ADDRESS
, 
 112         .user_stack 
= MACH_VM_MIN_ADDRESS
, 
 121  * Prototypes of static functions. 
 128         struct mach_header      
*header
, 
 132         load_result_t           
*result
 
 137         struct segment_command  
*scp
, 
 143         load_result_t                   
*result
 
 148         struct segment_command_64       
*scp64
, 
 154         load_result_t                   
*result
 
 157 int load_code_signature( 
 158         struct linkedit_data_command    
*lcp
, 
 163         load_result_t                   
*result
); 
 167         struct thread_command   
*tcp
, 
 169         load_result_t                   
*result
 
 174         struct thread_command   
*tcp
, 
 176         load_result_t                   
*result
 
 183         unsigned long   total_size
 
 190         unsigned long   total_size
, 
 191         user_addr_t     
*user_stack
, 
 199         unsigned long   total_size
, 
 200         mach_vm_offset_t        
*entry_point
 
 205         struct dylinker_command 
*lcp
, 
 210         load_result_t                   
*result
, 
 218         struct mach_header      
*mach_header
, 
 226         struct image_params     
*imgp
, 
 227         struct mach_header      
*header
, 
 230         load_result_t           
*result
 
 233         struct vnode            
*vp 
= imgp
->ip_vp
; 
 234         off_t                   file_offset 
= imgp
->ip_arch_offset
; 
 235         off_t                   macho_size 
= imgp
->ip_arch_size
; 
 237         pmap_t                  pmap 
= 0;       /* protected by create_map */ 
 240         load_result_t           myresult
; 
 242         boolean_t create_map 
= TRUE
; 
 244         if (new_map 
!= VM_MAP_NULL
) { 
 249                 old_map 
= current_map(); 
 250                 pmap 
= pmap_create((vm_map_size_t
) 0, (imgp
->ip_flags 
& IMGPF_IS_64BIT
)); 
 251                 map 
= vm_map_create(pmap
, 
 253                                 vm_compute_max_offset((imgp
->ip_flags 
& IMGPF_IS_64BIT
)), 
 258         if ( (header
->flags 
& MH_ALLOW_STACK_EXECUTION
) ) 
 259                 vm_map_disable_NX(map
); 
 264         *result 
= load_result_null
; 
 266         lret 
= parse_machfile(vp
, map
, thread
, header
, file_offset
, macho_size
, 
 269         if (lret 
!= LOAD_SUCCESS
) { 
 271                         vm_map_deallocate(map
); /* will lose pmap reference too */ 
 277          * For 64-bit users, check for presence of a 4GB page zero 
 278          * which will enable the kernel to share the user's address space 
 279          * and hence avoid TLB flushes on kernel entry/exit 
 281         if ((imgp
->ip_flags 
& IMGPF_IS_64BIT
) && 
 282              vm_map_has_4GB_pagezero(map
)) 
 283                 vm_map_set_4GB_pagezero(map
); 
 286          *      Commit to new map.  First make sure that the current 
 287          *      users of the task get done with it, and that we clean 
 288          *      up the old contents of IPC and memory.  The task is 
 289          *      guaranteed to be single threaded upon return (us). 
 291          *      Swap the new map for the old, which  consumes our new map 
 292          *      reference but each leaves us responsible for the old_map reference. 
 293          *      That lets us get off the pmap associated with it, and 
 294          *      then we can release it. 
 298                 task_halt(current_task()); 
 300                 old_map 
= swap_task_map(current_task(), map
); 
 301                 vm_map_clear_4GB_pagezero(old_map
); 
 302                 pmap_switch(pmap
);      /* Make sure we are using the new pmap */ 
 303                 vm_map_deallocate(old_map
); 
 305         return(LOAD_SUCCESS
); 
 309  * The file size of a mach-o file is limited to 32 bits; this is because 
 310  * this is the limit on the kalloc() of enough bytes for a mach_header and 
 311  * the contents of its sizeofcmds, which is currently constrained to 32 
 312  * bits in the file format itself.  We read into the kernel buffer the 
 313  * commands section, and then parse it in order to parse the mach-o file 
 314  * format load_command segment(s).  We are only interested in a subset of 
 315  * the total set of possible commands. 
 323         struct mach_header      
*header
, 
 327         load_result_t           
*result
 
 331         struct load_command     
*lcp
; 
 332         struct dylinker_command 
*dlp 
= 0; 
 333         integer_t               dlarchbits 
= 0; 
 335         load_return_t           ret 
= LOAD_SUCCESS
; 
 338         vm_size_t               size
,kl_size
; 
 340         size_t                  oldoffset
;      /* for overflow check */ 
 342         proc_t                  p 
= current_proc();             /* XXXX */ 
 346         size_t                  mach_header_sz 
= sizeof(struct mach_header
); 
 348         boolean_t               got_code_signatures 
= FALSE
; 
 350         if (header
->magic 
== MH_MAGIC_64 
|| 
 351             header
->magic 
== MH_CIGAM_64
) { 
 352                 mach_header_sz 
= sizeof(struct mach_header_64
); 
 356          *      Break infinite recursion 
 359                 return(LOAD_FAILURE
); 
 362         task 
= (task_t
)get_threadtask(thread
); 
 367          *      Check to see if right machine type. 
 369         if (((cpu_type_t
)(header
->cputype 
& ~CPU_ARCH_MASK
) != cpu_type()) || 
 370             !grade_binary(header
->cputype
,  
 371                 header
->cpusubtype 
& ~CPU_SUBTYPE_MASK
)) 
 372                 return(LOAD_BADARCH
); 
 374         abi64 
= ((header
->cputype 
& CPU_ARCH_ABI64
) == CPU_ARCH_ABI64
); 
 376         switch (header
->filetype
) { 
 382                         return (LOAD_FAILURE
); 
 389                         return (LOAD_FAILURE
); 
 395                         return (LOAD_FAILURE
); 
 400                 return (LOAD_FAILURE
); 
 404          *      Get the pager for the file. 
 406         pager 
= (void *) ubc_getpager(vp
); 
 409          *      Map portion that must be accessible directly into 
 412         if ((mach_header_sz 
+ header
->sizeofcmds
) > macho_size
) 
 413                 return(LOAD_BADMACHO
); 
 416          *      Round size of Mach-O commands up to page boundry. 
 418         size 
= round_page(mach_header_sz 
+ header
->sizeofcmds
); 
 420                 return(LOAD_BADMACHO
); 
 423          * Map the load commands into kernel memory. 
 427         kl_addr 
= kalloc(size
); 
 428         addr 
= (caddr_t
)kl_addr
; 
 430                 return(LOAD_NOSPACE
); 
 432         error 
= vn_rdwr(UIO_READ
, vp
, addr
, size
, file_offset
, 
 433             UIO_SYSSPACE32
, 0, kauth_cred_get(), &resid
, p
); 
 436                         kfree(kl_addr
, kl_size
); 
 437                 return(LOAD_IOERROR
); 
 439         /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */ 
 442          *      Scan through the commands, processing each one as necessary. 
 444         for (pass 
= 1; pass 
<= 2; pass
++) { 
 446                  * Loop through each of the load_commands indicated by the 
 447                  * Mach-O header; if an absurd value is provided, we just 
 448                  * run off the end of the reserved section by incrementing 
 449                  * the offset too far, so we are implicitly fail-safe. 
 451                 offset 
= mach_header_sz
; 
 452                 ncmds 
= header
->ncmds
; 
 455                          *      Get a pointer to the command. 
 457                         lcp 
= (struct load_command 
*)(addr 
+ offset
); 
 459                         offset 
+= lcp
->cmdsize
; 
 462                          * Perform prevalidation of the struct load_command 
 463                          * before we attempt to use its contents.  Invalid 
 464                          * values are ones which result in an overflow, or 
 465                          * which can not possibly be valid commands, or which 
 466                          * straddle or exist past the reserved section at the 
 467                          * start of the image. 
 469                         if (oldoffset 
> offset 
|| 
 470                             lcp
->cmdsize 
< sizeof(struct load_command
) || 
 471                             offset 
> header
->sizeofcmds 
+ mach_header_sz
) { 
 477                          * Act on struct load_command's for which kernel 
 478                          * intervention is required. 
 484                                 ret 
= load_segment_64( 
 485                                                (struct segment_command_64 
*)lcp
, 
 497                                                (struct segment_command 
*) lcp
, 
 508                                 ret 
= load_thread((struct thread_command 
*)lcp
, 
 515                                 ret 
= load_unixthread( 
 516                                                  (struct thread_command 
*) lcp
, 
 520                         case LC_LOAD_DYLINKER
: 
 523                                 if ((depth 
== 1) && (dlp 
== 0)) { 
 524                                         dlp 
= (struct dylinker_command 
*)lcp
; 
 525                                         dlarchbits 
= (header
->cputype 
& CPU_ARCH_MASK
); 
 530                         case LC_CODE_SIGNATURE
: 
 535                                    load signatures & store in uip 
 536                                    set VM object "signed_pages" 
 538                                 ret 
= load_code_signature( 
 539                                         (struct linkedit_data_command 
*) lcp
, 
 544                                         (depth 
== 1) ? result 
: NULL
); 
 545                                 if (ret 
!= LOAD_SUCCESS
) { 
 546                                         printf("proc %d: load code signature error %d " 
 548                                                p
->p_pid
, ret
, vp
->v_name
); 
 549                                         ret 
= LOAD_SUCCESS
; /* ignore error */ 
 551                                         got_code_signatures 
= TRUE
; 
 555                                 /* Other commands are ignored by the kernel */ 
 559                         if (ret 
!= LOAD_SUCCESS
) 
 562                 if (ret 
!= LOAD_SUCCESS
) 
 565         if (ret 
== LOAD_SUCCESS
) {  
 566             if (! got_code_signatures
) { 
 567                     struct cs_blob 
*blob
; 
 568                     /* no embedded signatures: look for detached ones */ 
 569                     blob 
= ubc_cs_blob_get(vp
, -1, file_offset
); 
 571                             /* get flags to be applied to the process */ 
 572                             result
->csflags 
|= blob
->csb_flags
; 
 577                         ret 
= load_dylinker(dlp
, dlarchbits
, map
, thread
, depth
, result
, abi64
); 
 580                 if (result
->thread_count 
== 0) { 
 582                 } else if ( abi64 
) { 
 584                         /* Map in 64-bit commpage */ 
 585                         /* LP64todo - make this clean */ 
 587                          * PPC51: ppc64 is limited to 51-bit addresses. 
 588                          * Memory above that limit is handled specially 
 591                         pmap_map_sharedpage(current_task(), get_map_pmap(map
)); 
 598                 kfree(kl_addr
, kl_size
); 
 600         if (ret 
== LOAD_SUCCESS
) 
 601                 (void)ubc_map(vp
, PROT_READ 
| PROT_EXEC
); 
 608 #define APPLE_UNPROTECTED_HEADER_SIZE   (3 * PAGE_SIZE_64) 
 611 unprotect_segment_64( 
 615         vm_map_offset_t map_addr
, 
 616         vm_map_size_t   map_size
) 
 621          * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of 
 622          * this part of a Universal binary) are not protected... 
 623          * The rest needs to be "transformed". 
 625         if (file_off 
<= APPLE_UNPROTECTED_HEADER_SIZE 
&& 
 626             file_off 
+ file_size 
<= APPLE_UNPROTECTED_HEADER_SIZE
) { 
 627                 /* it's all unprotected, nothing to do... */ 
 630                 if (file_off 
<= APPLE_UNPROTECTED_HEADER_SIZE
) { 
 632                          * We start mapping in the unprotected area. 
 633                          * Skip the unprotected part... 
 635                         vm_map_offset_t delta
; 
 637                         delta 
= APPLE_UNPROTECTED_HEADER_SIZE
; 
 642                 /* ... transform the rest of the mapping. */ 
 643                 kr 
= vm_map_apple_protected(map
, 
 645                                             map_addr 
+ map_size
); 
 648         if (kr 
!= KERN_SUCCESS
) { 
 654 #define unprotect_segment_64(file_off, file_size, map, map_addr, map_size) \ 
 656 #endif  /* __i386__ */ 
 661         struct segment_command  
*scp
, 
 665         __unused off_t          end_of_file
, 
 667         load_result_t           
*result
 
 671         vm_offset_t             map_addr
, map_offset
; 
 672         vm_size_t               map_size
, seg_size
, delta_size
; 
 677          * Make sure what we get from the file is really ours (as specified 
 680         if (scp
->fileoff 
+ scp
->filesize 
> macho_size
) 
 681                 return (LOAD_BADMACHO
); 
 683          * Make sure the segment is page-aligned in the file. 
 685         if ((scp
->fileoff 
& PAGE_MASK
) != 0) 
 686                 return LOAD_BADMACHO
; 
 688         seg_size 
= round_page(scp
->vmsize
); 
 690                 return(KERN_SUCCESS
); 
 693          *      Round sizes to page size. 
 695         map_size 
= round_page(scp
->filesize
); 
 696         map_addr 
= trunc_page(scp
->vmaddr
); 
 698 #if 0   /* XXX (4596982) this interferes with Rosetta */ 
 702             (scp
->initprot 
& VM_PROT_ALL
) == VM_PROT_NONE 
&& 
 703             (scp
->maxprot 
& VM_PROT_ALL
) == VM_PROT_NONE
) { 
 705                  * This is a "page zero" segment:  it starts at address 0, 
 706                  * is not mapped from the binary file and is not accessible. 
 707                  * User-space should never be able to access that memory, so 
 708                  * make it completely off limits by raising the VM map's 
 711                 ret 
= vm_map_raise_min_offset(map
, (vm_map_offset_t
) seg_size
); 
 712                 if (ret 
!= KERN_SUCCESS
) { 
 719         map_offset 
= pager_offset 
+ scp
->fileoff
; 
 722                 initprot 
= (scp
->initprot
) & VM_PROT_ALL
; 
 723                 maxprot 
= (scp
->maxprot
) & VM_PROT_ALL
; 
 725                  *      Map a copy of the file into the address space. 
 728                                 &map_addr
, map_size
, (vm_offset_t
)0, 
 729                                 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
, 
 732                 if (ret 
!= KERN_SUCCESS
) 
 733                         return(LOAD_NOSPACE
); 
 736                  *      If the file didn't end on a page boundary, 
 737                  *      we need to zero the leftover. 
 739                 delta_size 
= map_size 
- scp
->filesize
; 
 741                 if (delta_size 
> 0) { 
 744                         ret 
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
); 
 745                         if (ret 
!= KERN_SUCCESS
) 
 746                                 return(LOAD_RESOURCE
); 
 748                         if (copyout(tmp
, map_addr 
+ scp
->filesize
, 
 750                                 (void) vm_deallocate( 
 751                                                 kernel_map
, tmp
, delta_size
); 
 752                                 return(LOAD_FAILURE
); 
 755                         (void) vm_deallocate(kernel_map
, tmp
, delta_size
); 
 761          *      If the virtual size of the segment is greater 
 762          *      than the size from the file, we need to allocate 
 763          *      zero fill memory for the rest. 
 765         delta_size 
= seg_size 
- map_size
; 
 766         if (delta_size 
> 0) { 
 767                 vm_offset_t     tmp 
= map_addr 
+ map_size
; 
 769                 ret 
= vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
, 
 771                              scp
->initprot
, scp
->maxprot
, 
 773                 if (ret 
!= KERN_SUCCESS
) 
 774                         return(LOAD_NOSPACE
); 
 777         if ( (scp
->fileoff 
== 0) && (scp
->filesize 
!= 0) ) 
 778                 result
->mach_header 
= map_addr
; 
 780         if (scp
->flags 
& SG_PROTECTED_VERSION_1
) { 
 781                 ret 
= unprotect_segment_64((uint64_t) scp
->fileoff
, 
 782                                            (uint64_t) scp
->filesize
, 
 784                                            (vm_map_offset_t
) map_addr
, 
 785                                            (vm_map_size_t
) map_size
); 
 796         struct segment_command_64       
*scp64
, 
 800         __unused off_t                  end_of_file
, 
 802         load_result_t           
*result
 
 806         mach_vm_offset_t        map_addr
, map_offset
; 
 807         mach_vm_size_t          map_size
, seg_size
, delta_size
; 
 812          * Make sure what we get from the file is really ours (as specified 
 815         if (scp64
->fileoff 
+ scp64
->filesize 
> (uint64_t)macho_size
) 
 816                 return (LOAD_BADMACHO
); 
 818          * Make sure the segment is page-aligned in the file. 
 820         if ((scp64
->fileoff 
& PAGE_MASK_64
) != 0) 
 821                 return LOAD_BADMACHO
; 
 823         seg_size 
= round_page_64(scp64
->vmsize
); 
 825                 return(KERN_SUCCESS
); 
 828          *      Round sizes to page size. 
 830         map_size 
= round_page_64(scp64
->filesize
);      /* limited to 32 bits */ 
 831         map_addr 
= round_page_64(scp64
->vmaddr
); 
 836             (scp64
->initprot 
& VM_PROT_ALL
) == VM_PROT_NONE 
&& 
 837             (scp64
->maxprot 
& VM_PROT_ALL
) == VM_PROT_NONE
) { 
 839                  * This is a "page zero" segment:  it starts at address 0, 
 840                  * is not mapped from the binary file and is not accessible. 
 841                  * User-space should never be able to access that memory, so 
 842                  * make it completely off limits by raising the VM map's 
 845                 ret 
= vm_map_raise_min_offset(map
, seg_size
); 
 846                 if (ret 
!= KERN_SUCCESS
) { 
 852         map_offset 
= pager_offset 
+ scp64
->fileoff
;     /* limited to 32 bits */ 
 855                 initprot 
= (scp64
->initprot
) & VM_PROT_ALL
; 
 856                 maxprot 
= (scp64
->maxprot
) & VM_PROT_ALL
; 
 858                  *      Map a copy of the file into the address space. 
 860                 ret 
= mach_vm_map(map
, 
 861                                 &map_addr
, map_size
, (mach_vm_offset_t
)0, 
 862                                 VM_FLAGS_FIXED
, pager
, map_offset
, TRUE
, 
 865                 if (ret 
!= KERN_SUCCESS
) 
 866                         return(LOAD_NOSPACE
); 
 869                  *      If the file didn't end on a page boundary, 
 870                  *      we need to zero the leftover. 
 872                 delta_size 
= map_size 
- scp64
->filesize
; 
 874                 if (delta_size 
> 0) { 
 875                         mach_vm_offset_t        tmp
; 
 877                         ret 
= vm_allocate(kernel_map
, &tmp
, delta_size
, VM_FLAGS_ANYWHERE
); 
 878                         if (ret 
!= KERN_SUCCESS
) 
 879                                 return(LOAD_RESOURCE
); 
 881                         if (copyout(tmp
, map_addr 
+ scp64
->filesize
, 
 883                                 (void) vm_deallocate( 
 884                                                 kernel_map
, tmp
, delta_size
); 
 885                                 return (LOAD_FAILURE
); 
 888                         (void) vm_deallocate(kernel_map
, tmp
, delta_size
); 
 894          *      If the virtual size of the segment is greater 
 895          *      than the size from the file, we need to allocate 
 896          *      zero fill memory for the rest. 
 898         delta_size 
= seg_size 
- map_size
; 
 899         if (delta_size 
> 0) { 
 900                 mach_vm_offset_t tmp 
= map_addr 
+ map_size
; 
 902                 ret 
= mach_vm_map(map
, &tmp
, delta_size
, 0, VM_FLAGS_FIXED
, 
 904                                   scp64
->initprot
, scp64
->maxprot
, 
 906                 if (ret 
!= KERN_SUCCESS
) 
 907                         return(LOAD_NOSPACE
); 
 910         if ( (scp64
->fileoff 
== 0) && (scp64
->filesize 
!= 0) ) 
 911                 result
->mach_header 
= map_addr
; 
 913         if (scp64
->flags 
& SG_PROTECTED_VERSION_1
) { 
 914                 ret 
= unprotect_segment_64(scp64
->fileoff
, 
 929         struct thread_command   
*tcp
, 
 931         load_result_t           
*result
 
 939         task 
= get_threadtask(thread
); 
 941         /* if count is 0; same as thread */ 
 942         if (result
->thread_count 
!= 0) { 
 943                 kret 
= thread_create(task
, &thread
); 
 944                 if (kret 
!= KERN_SUCCESS
) 
 945                         return(LOAD_RESOURCE
); 
 946                 thread_deallocate(thread
); 
 949         lret 
= load_threadstate(thread
, 
 950                        (unsigned long *)(((vm_offset_t
)tcp
) +  
 951                                 sizeof(struct thread_command
)), 
 952                        tcp
->cmdsize 
- sizeof(struct thread_command
)); 
 953         if (lret 
!= LOAD_SUCCESS
) 
 956         if (result
->thread_count 
== 0) { 
 957                 lret 
= load_threadstack(thread
, 
 958                                 (unsigned long *)(((vm_offset_t
)tcp
) +  
 959                                         sizeof(struct thread_command
)), 
 960                                 tcp
->cmdsize 
- sizeof(struct thread_command
), 
 964                                 result
->customstack 
= 1; 
 966                                 result
->customstack 
= 0; 
 968                 if (lret 
!= LOAD_SUCCESS
) 
 971                 lret 
= load_threadentry(thread
, 
 972                                 (unsigned long *)(((vm_offset_t
)tcp
) +  
 973                                         sizeof(struct thread_command
)), 
 974                                 tcp
->cmdsize 
- sizeof(struct thread_command
), 
 975                                 &result
->entry_point
); 
 976                 if (lret 
!= LOAD_SUCCESS
) 
 980          *      Resume thread now, note that this means that the thread 
 981          *      commands should appear after all the load commands to 
 982          *      be sure they don't reference anything not yet mapped. 
 985                 thread_resume(thread
); 
 987         result
->thread_count
++; 
 989         return(LOAD_SUCCESS
); 
 995         struct thread_command   
*tcp
, 
 997         load_result_t           
*result
 
1003         if (result
->thread_count 
!= 0) { 
1004 printf("load_unixthread: already have a thread!"); 
1005                 return (LOAD_FAILURE
); 
1008         ret 
= load_threadstack(thread
, 
1009                        (unsigned long *)(((vm_offset_t
)tcp
) +  
1010                                 sizeof(struct thread_command
)), 
1011                        tcp
->cmdsize 
- sizeof(struct thread_command
), 
1012                        &result
->user_stack
, 
1014         if (ret 
!= LOAD_SUCCESS
) 
1018                         result
->customstack 
= 1; 
1020                         result
->customstack 
= 0; 
1021         ret 
= load_threadentry(thread
, 
1022                        (unsigned long *)(((vm_offset_t
)tcp
) +  
1023                                 sizeof(struct thread_command
)), 
1024                        tcp
->cmdsize 
- sizeof(struct thread_command
), 
1025                        &result
->entry_point
); 
1026         if (ret 
!= LOAD_SUCCESS
) 
1029         ret 
= load_threadstate(thread
, 
1030                        (unsigned long *)(((vm_offset_t
)tcp
) +  
1031                                 sizeof(struct thread_command
)), 
1032                        tcp
->cmdsize 
- sizeof(struct thread_command
)); 
1033         if (ret 
!= LOAD_SUCCESS
) 
1036         result
->unixproc 
= TRUE
; 
1037         result
->thread_count
++; 
1039         return(LOAD_SUCCESS
); 
1047         unsigned long   total_size
 
1053         unsigned long   thread_size
; 
1055     ret 
= thread_state_initialize( thread 
); 
1056     if (ret 
!= KERN_SUCCESS
) { 
1057         return(LOAD_FAILURE
); 
1061          *      Set the new thread state; iterate through the state flavors in 
1064         while (total_size 
> 0) { 
1067                 thread_size 
= (size
+2)*sizeof(unsigned long); 
1068                 if (thread_size 
> total_size
) 
1069                         return(LOAD_BADMACHO
); 
1070                 total_size 
-= thread_size
; 
1072                  * Third argument is a kernel space pointer; it gets cast 
1073                  * to the appropriate type in machine_thread_set_state() 
1074                  * based on the value of flavor. 
1076                 ret 
= thread_setstatus(thread
, flavor
, (thread_state_t
)ts
, size
); 
1077                 if (ret 
!= KERN_SUCCESS
) { 
1078                         return(LOAD_FAILURE
); 
1080                 ts 
+= size
;     /* ts is a (unsigned long *) */ 
1082         return(LOAD_SUCCESS
); 
1090         unsigned long   total_size
, 
1091         user_addr_t     
*user_stack
, 
1098         unsigned long   stack_size
; 
1100         while (total_size 
> 0) { 
1103                 stack_size 
= (size
+2)*sizeof(unsigned long); 
1104                 if (stack_size 
> total_size
) 
1105                         return(LOAD_BADMACHO
); 
1106                 total_size 
-= stack_size
; 
1109                  * Third argument is a kernel space pointer; it gets cast 
1110                  * to the appropriate type in thread_userstack() based on 
1111                  * the value of flavor. 
1113                 ret 
= thread_userstack(thread
, flavor
, (thread_state_t
)ts
, size
, user_stack
, customstack
); 
1114                 if (ret 
!= KERN_SUCCESS
) { 
1115                         return(LOAD_FAILURE
); 
1117                 ts 
+= size
;     /* ts is a (unsigned long *) */ 
1119         return(LOAD_SUCCESS
); 
1127         unsigned long   total_size
, 
1128         mach_vm_offset_t        
*entry_point
 
1134         unsigned long   entry_size
; 
1137          *      Set the thread state. 
1139         *entry_point 
= MACH_VM_MIN_ADDRESS
; 
1140         while (total_size 
> 0) { 
1143                 entry_size 
= (size
+2)*sizeof(unsigned long); 
1144                 if (entry_size 
> total_size
) 
1145                         return(LOAD_BADMACHO
); 
1146                 total_size 
-= entry_size
; 
1148                  * Third argument is a kernel space pointer; it gets cast 
1149                  * to the appropriate type in thread_entrypoint() based on 
1150                  * the value of flavor. 
1152                 ret 
= thread_entrypoint(thread
, flavor
, (thread_state_t
)ts
, size
, entry_point
); 
1153                 if (ret 
!= KERN_SUCCESS
) { 
1154                         return(LOAD_FAILURE
); 
1156                 ts 
+= size
;     /* ts is a (unsigned long *) */ 
1158         return(LOAD_SUCCESS
); 
1165         struct dylinker_command 
*lcp
, 
1170         load_result_t           
*result
, 
1176         struct vnode            
*vp 
= NULLVP
;   /* set by get_macho_vnode() */ 
1177         struct mach_header      header
; 
1178         off_t                   file_offset 
= 0; /* set by get_macho_vnode() */ 
1179         off_t                   macho_size 
= 0; /* set by get_macho_vnode() */ 
1181         load_result_t           myresult
; 
1184         mach_vm_offset_t        dyl_start
, map_addr
; 
1185         mach_vm_size_t          dyl_length
; 
1187         name 
= (char *)lcp 
+ lcp
->name
.offset
; 
1189          *      Check for a proper null terminated string. 
1193                 if (p 
>= (char *)lcp 
+ lcp
->cmdsize
) 
1194                         return(LOAD_BADMACHO
); 
1197         ret 
= get_macho_vnode(name
, archbits
, &header
, &file_offset
, &macho_size
, &vp
); 
1201         myresult 
= load_result_null
; 
1204          *      First try to map dyld in directly.  This should work most of 
1205          *      the time since there shouldn't normally be something already 
1206          *      mapped to its address. 
1209         ret 
= parse_machfile(vp
, map
, thread
, &header
, file_offset
, macho_size
, 
1213          *      If it turned out something was in the way, then we'll take 
1214          *      take this longer path to map dyld into a temporary map and 
1215          *      copy it into destination map at a different address. 
1218         if (ret 
== LOAD_NOSPACE
) { 
1222                  *      Use a temporary map to do the work. 
1224                 copy_map 
= vm_map_create(pmap_create(vm_map_round_page(macho_size
), 
1226                                          get_map_min(map
), get_map_max(map
), TRUE
); 
1227                 if (VM_MAP_NULL 
== copy_map
) { 
1228                         ret 
= LOAD_RESOURCE
; 
1232                 myresult 
= load_result_null
; 
1234                 ret 
= parse_machfile(vp
, copy_map
, thread
, &header
, 
1235                                         file_offset
, macho_size
, 
1239                         vm_map_deallocate(copy_map
); 
1243                 if (get_map_nentries(copy_map
) > 0) { 
1245                         dyl_start 
= mach_get_vm_start(copy_map
); 
1246                         dyl_length 
= mach_get_vm_end(copy_map
) - dyl_start
; 
1248                         map_addr 
= dyl_start
; 
1249                         ret 
= mach_vm_allocate(map
, &map_addr
, dyl_length
, VM_FLAGS_ANYWHERE
); 
1251                         if (ret 
!= KERN_SUCCESS
) { 
1252                                 vm_map_deallocate(copy_map
); 
1258                         ret 
= vm_map_copyin(copy_map
, 
1259                                             (vm_map_address_t
)dyl_start
, 
1260                                             (vm_map_size_t
)dyl_length
, 
1262                         if (ret 
!= KERN_SUCCESS
) { 
1263                                 (void) vm_map_remove(map
, 
1264                                              vm_map_trunc_page(map_addr
), 
1265                                              vm_map_round_page(map_addr 
+ dyl_length
), 
1267                                 vm_map_deallocate(copy_map
); 
1271                         ret 
= vm_map_copy_overwrite(map
, 
1272                                              (vm_map_address_t
)map_addr
, 
1274                         if (ret 
!= KERN_SUCCESS
) { 
1275                                 vm_map_copy_discard(tmp
); 
1276                                 (void) vm_map_remove(map
, 
1277                                              vm_map_trunc_page(map_addr
), 
1278                                              vm_map_round_page(map_addr 
+ dyl_length
), 
1280                                 vm_map_deallocate(copy_map
); 
1284                         if (map_addr 
!= dyl_start
) 
1285                                 myresult
.entry_point 
+= (map_addr 
- dyl_start
); 
1290                 vm_map_deallocate(copy_map
); 
1293         if (ret 
== LOAD_SUCCESS
) {               
1294                 result
->dynlinker 
= TRUE
; 
1295                 result
->entry_point 
= myresult
.entry_point
; 
1296                 (void)ubc_map(vp
, PROT_READ 
| PROT_EXEC
); 
1305 load_code_signature( 
1306         struct linkedit_data_command    
*lcp
, 
1311         load_result_t                   
*result
) 
1317         struct cs_blob  
*blob
; 
1323         if (lcp
->cmdsize 
!= sizeof (struct linkedit_data_command
) || 
1324             lcp
->dataoff 
+ lcp
->datasize 
> macho_size
) { 
1325                 ret 
= LOAD_BADMACHO
; 
1329         blob 
= ubc_cs_blob_get(vp
, cputype
, -1); 
1331                 /* we already have a blob for this vnode and cputype */ 
1332                 if (blob
->csb_cpu_type 
== cputype 
&& 
1333                     blob
->csb_base_offset 
== macho_offset 
&& 
1334                     blob
->csb_mem_size 
== lcp
->datasize
) { 
1335                         /* it matches the blob we want here: we're done */ 
1338                         /* the blob has changed for this vnode: fail ! */ 
1339                         ret 
= LOAD_BADMACHO
; 
1344         kr 
= kmem_alloc(kernel_map
, &addr
, round_page(lcp
->datasize
)); 
1345         if (kr 
!= KERN_SUCCESS
) { 
1351         error 
= vn_rdwr(UIO_READ
, 
1355                         macho_offset 
+ lcp
->dataoff
, 
1361         if (error 
|| resid 
!= 0) { 
1366         if (ubc_cs_blob_add(vp
, 
1374                 /* ubc_cs_blob_add() has consumed "addr" */ 
1378         blob 
= ubc_cs_blob_get(vp
, cputype
, -1); 
1382         if (result 
&& ret 
== LOAD_SUCCESS
) { 
1383                 result
->csflags 
|= blob
->csb_flags
; 
1386                 kmem_free(kernel_map
, addr
, round_page(lcp
->datasize
)); 
1394  * This routine exists to support the load_dylinker(). 
1396  * This routine has its own, separate, understanding of the FAT file format, 
1397  * which is terrifically unfortunate. 
1404         struct mach_header      
*mach_header
, 
1411         vfs_context_t           ctx 
= vfs_context_current(); 
1412         proc_t                  p 
= vfs_context_proc(ctx
); 
1413         kauth_cred_t            kerncred
; 
1414         struct nameidata nid
, *ndp
; 
1416         struct fat_arch         fat_arch
; 
1417         int                     error 
= LOAD_SUCCESS
; 
1420                 struct mach_header      mach_header
; 
1421                 struct fat_header       fat_header
; 
1424         off_t fsize 
= (off_t
)0; 
1428          * Capture the kernel credential for use in the actual read of the 
1429          * file, since the user doing the execution may have execute rights 
1430          * but not read rights, but to exec something, we have to either map 
1431          * or read it into the new process address space, which requires 
1432          * read rights.  This is to deal with lack of common credential 
1433          * serialization code which would treat NOCRED as "serialize 'root'". 
1435         kerncred 
= vfs_context_ucred(vfs_context_kernel()); 
1439         /* init the namei data to point the file user's program name */ 
1440         NDINIT(ndp
, LOOKUP
, FOLLOW 
| LOCKLEAF
, UIO_SYSSPACE32
, CAST_USER_ADDR_T(path
), ctx
); 
1442         if ((error 
= namei(ndp
)) != 0) { 
1443                 if (error 
== ENOENT
) { 
1444                         error 
= LOAD_ENOENT
; 
1446                         error 
= LOAD_FAILURE
; 
1453         /* check for regular file */ 
1454         if (vp
->v_type 
!= VREG
) { 
1455                 error 
= LOAD_PROTECT
; 
1460         if ((error 
= vnode_size(vp
, &fsize
, ctx
)) != 0) { 
1461                 error 
= LOAD_FAILURE
; 
1465         /* Check mount point */ 
1466         if (vp
->v_mount
->mnt_flag 
& MNT_NOEXEC
) { 
1467                 error 
= LOAD_PROTECT
; 
1472         if ((error 
= vnode_authorize(vp
, NULL
, KAUTH_VNODE_EXECUTE
, ctx
)) != 0) { 
1473                 error 
= LOAD_PROTECT
; 
1477         /* try to open it */ 
1478         if ((error 
= VNOP_OPEN(vp
, FREAD
, ctx
)) != 0) { 
1479                 error 
= LOAD_PROTECT
; 
1483         if ((error 
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
, sizeof(header
), 0, 
1484             UIO_SYSSPACE32
, IO_NODELOCKED
, kerncred
, &resid
, p
)) != 0) { 
1485                 error 
= LOAD_IOERROR
; 
1489         if (header
.mach_header
.magic 
== MH_MAGIC 
|| 
1490             header
.mach_header
.magic 
== MH_MAGIC_64
) 
1492         else if (header
.fat_header
.magic 
== FAT_MAGIC 
|| 
1493                  header
.fat_header
.magic 
== FAT_CIGAM
) 
1496             error 
= LOAD_BADMACHO
; 
1501                 /* Look up our architecture in the fat file. */ 
1502                 error 
= fatfile_getarch_with_bits(vp
, archbits
, (vm_offset_t
)(&header
.fat_header
), &fat_arch
); 
1503                 if (error 
!= LOAD_SUCCESS
) 
1506                 /* Read the Mach-O header out of it */ 
1507                 error 
= vn_rdwr(UIO_READ
, vp
, (caddr_t
)&header
.mach_header
, 
1508                                 sizeof(header
.mach_header
), fat_arch
.offset
, 
1509                                 UIO_SYSSPACE32
, IO_NODELOCKED
, kerncred
, &resid
, p
); 
1511                         error 
= LOAD_IOERROR
; 
1515                 /* Is this really a Mach-O? */ 
1516                 if (header
.mach_header
.magic 
!= MH_MAGIC 
&& 
1517                     header
.mach_header
.magic 
!= MH_MAGIC_64
) { 
1518                         error 
= LOAD_BADMACHO
; 
1522                 *file_offset 
= fat_arch
.offset
; 
1523                 *macho_size 
= fat_arch
.size
; 
1526                  * Force get_macho_vnode() to fail if the architecture bits 
1527                  * do not match the expected architecture bits.  This in 
1528                  * turn causes load_dylinker() to fail for the same reason, 
1529                  * so it ensures the dynamic linker and the binary are in 
1530                  * lock-step.  This is potentially bad, if we ever add to 
1531                  * the CPU_ARCH_* bits any bits that are desirable but not 
1532                  * required, since the dynamic linker might work, but we will 
1533                  * refuse to load it because of this check. 
1535                 if ((cpu_type_t
)(header
.mach_header
.cputype 
& CPU_ARCH_MASK
) != archbits
) 
1536                         return(LOAD_BADARCH
); 
1539                 *macho_size 
= fsize
; 
1542         *mach_header 
= header
.mach_header
; 
1545         ubc_setsize(vp
, fsize
); 
1550         err2 
= VNOP_CLOSE(vp
, FREAD
, ctx
);