2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
29 #include <uuid/uuid.h>
30 #include <mach/mach.h>
32 #include <sys/types.h>
33 #include <sys/sysctl.h>
35 #include <sys/dtrace.h>
36 #include <sys/errno.h>
38 #include <System/sys/mman.h>
39 #include <System/sys/csr.h>
40 #include <System/machine/cpu_capabilities.h>
41 #include <bootstrap.h>
42 #include <CommonCrypto/CommonDigest.h>
44 #include <sandbox/private.h>
45 #include <dispatch/dispatch.h>
47 #include "LaunchCache.h"
48 #include "LaunchCacheFormat.h"
51 #include "MachOParser.h"
53 #include "dyld_cache_format.h"
56 #include "closuredProtocol.h"
60 void log(const char* m
, ...);
68 static bool sandboxBlocked(const char* path
, const char* kind
)
70 #if BUILDING_LIBDYLD || !TARGET_IPHONE_SIMULATOR
71 sandbox_filter_type filter
= (sandbox_filter_type
)(SANDBOX_FILTER_PATH
| SANDBOX_CHECK_NO_REPORT
);
72 return ( sandbox_check(getpid(), kind
, filter
, path
) > 0 );
74 // sandbox calls not yet supported in dyld_sim
79 static bool sandboxBlockedMmap(const char* path
)
81 return sandboxBlocked(path
, "file-map-executable");
84 static bool sandboxBlockedOpen(const char* path
)
86 return sandboxBlocked(path
, "file-read-data");
89 static bool sandboxBlockedStat(const char* path
)
91 return sandboxBlocked(path
, "file-read-metadata");
94 #if TARGET_OS_WATCH || TARGET_OS_BRIDGE
95 static uint64_t pageAlign(uint64_t value
)
98 return (value
+ 0x3FFF) & (-0x4000);
100 return (value
+ 0xFFF) & (-0x1000);
105 static void updateSliceOffset(uint64_t& sliceOffset
, uint64_t codeSignEndOffset
, size_t fileLen
)
107 #if TARGET_OS_WATCH || TARGET_OS_BRIDGE
108 if ( sliceOffset
!= 0 ) {
109 if ( pageAlign(codeSignEndOffset
) == pageAlign(fileLen
) ) {
110 // cache builder saw fat file, but file is now thin
118 static const mach_header
* mapImage(const dyld3::launch_cache::Image image
, Diagnostics
& diag
, LogFunc log_loads
, LogFunc log_segments
)
120 uint64_t sliceOffset
= image
.sliceOffsetInFile();
121 const uint64_t totalVMSize
= image
.vmSizeToMap();
122 const uint32_t codeSignFileOffset
= image
.asDiskImage()->codeSignFileOffset
;
123 const uint32_t codeSignFileSize
= image
.asDiskImage()->codeSignFileSize
;
126 int fd
= ::open(image
.path(), O_RDONLY
, 0);
129 if ( (openErr
== EPERM
) && sandboxBlockedOpen(image
.path()) )
130 diag
.error("file system sandbox blocked open(\"%s\", O_RDONLY)", image
.path());
132 diag
.error("open(\"%s\", O_RDONLY) failed with errno=%d", image
.path(), openErr
);
138 #if TARGET_IPHONE_SIMULATOR
139 if ( stat(image
.path(), &statBuf
) != 0 ) {
141 if ( fstat(fd
, &statBuf
) != 0 ) {
144 if ( (statErr
== EPERM
) && sandboxBlockedStat(image
.path()) )
145 diag
.error("file system sandbox blocked stat(\"%s\")", image
.path());
147 diag
.error("stat(\"%s\") failed with errno=%d", image
.path(), statErr
);
152 // verify file has not changed since closure was built
153 if ( image
.validateUsingModTimeAndInode() ) {
154 if ( (statBuf
.st_mtime
!= image
.fileModTime()) || (statBuf
.st_ino
!= image
.fileINode()) ) {
155 diag
.error("file mtime/inode changed since closure was built for '%s'", image
.path());
161 // handle OS dylibs being thinned after closure was built
162 if ( image
.group().groupNum() == 1 )
163 updateSliceOffset(sliceOffset
, codeSignFileOffset
+codeSignFileSize
, (size_t)statBuf
.st_size
);
165 // register code signature
166 uint64_t coveredCodeLength
= UINT64_MAX
;
167 if ( codeSignFileOffset
!= 0 ) {
168 fsignatures_t siginfo
;
169 siginfo
.fs_file_start
= sliceOffset
; // start of mach-o slice in fat file
170 siginfo
.fs_blob_start
= (void*)(long)(codeSignFileOffset
); // start of CD in mach-o file
171 siginfo
.fs_blob_size
= codeSignFileSize
; // size of CD
172 int result
= fcntl(fd
, F_ADDFILESIGS_RETURN
, &siginfo
);
173 if ( result
== -1 ) {
174 int errnoCopy
= errno
;
175 if ( (errnoCopy
== EPERM
) || (errnoCopy
== EBADEXEC
) ) {
176 diag
.error("code signature invalid (errno=%d) sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
177 errnoCopy
, sliceOffset
, codeSignFileOffset
, codeSignFileSize
, image
.path());
180 diag
.error("fcntl(fd, F_ADDFILESIGS_RETURN) failed with errno=%d, sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
181 errnoCopy
, sliceOffset
, codeSignFileOffset
, codeSignFileSize
, image
.path());
186 coveredCodeLength
= siginfo
.fs_file_start
;
187 if ( coveredCodeLength
< image
.asDiskImage()->codeSignFileOffset
) {
188 diag
.error("code signature does not cover entire file up to signature");
193 // <rdar://problem/32684903> always call F_CHECK_LV to preflight
195 char messageBuffer
[512];
196 messageBuffer
[0] = '\0';
197 checkInfo
.lv_file_start
= sliceOffset
;
198 checkInfo
.lv_error_message_size
= sizeof(messageBuffer
);
199 checkInfo
.lv_error_message
= messageBuffer
;
200 int res
= fcntl(fd
, F_CHECK_LV
, &checkInfo
);
202 diag
.error("code signature in (%s) not valid for use in process: %s", image
.path(), messageBuffer
);
208 // reserve address range
209 vm_address_t loadAddress
= 0;
210 kern_return_t r
= vm_allocate(mach_task_self(), &loadAddress
, (vm_size_t
)totalVMSize
, VM_FLAGS_ANYWHERE
);
211 if ( r
!= KERN_SUCCESS
) {
212 diag
.error("vm_allocate(size=0x%0llX) failed with result=%d", totalVMSize
, r
);
217 if ( sliceOffset
!= 0 )
218 log_segments("dyld: Mapping %s (slice offset=%llu)\n", image
.path(), sliceOffset
);
220 log_segments("dyld: Mapping %s\n", image
.path());
223 __block
bool mmapFailure
= false;
224 __block
const uint8_t* codeSignatureStartAddress
= nullptr;
225 __block
const uint8_t* linkeditEndAddress
= nullptr;
226 __block
bool mappedFirstSegment
= false;
227 image
.forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool& stop
) {
228 // <rdar://problem/32363581> Mapping zero filled segments fails with mmap of size 0
231 void* segAddress
= mmap((void*)(loadAddress
+vmOffset
), fileSize
, permissions
, MAP_FIXED
| MAP_PRIVATE
, fd
, sliceOffset
+fileOffset
);
233 if ( segAddress
== MAP_FAILED
) {
234 if ( mmapErr
== EPERM
) {
235 if ( sandboxBlockedMmap(image
.path()) )
236 diag
.error("file system sandbox blocked mmap() of '%s'", image
.path());
238 diag
.error("code signing blocked mmap() of '%s'", image
.path());
241 diag
.error("mmap(addr=0x%0llX, size=0x%08X) failed with errno=%d for %s", loadAddress
+vmOffset
, fileSize
, mmapErr
, image
.path());
246 else if ( codeSignFileOffset
> fileOffset
) {
247 codeSignatureStartAddress
= (uint8_t*)segAddress
+ (codeSignFileOffset
-fileOffset
);
248 linkeditEndAddress
= (uint8_t*)segAddress
+ vmSize
;
250 // sanity check first segment is mach-o header
251 if ( (segAddress
!= MAP_FAILED
) && !mappedFirstSegment
) {
252 mappedFirstSegment
= true;
253 if ( !MachOParser::isMachO(diag
, segAddress
, fileSize
) ) {
258 if ( !mmapFailure
) {
259 MachOParser
parser((mach_header
*)loadAddress
);
260 log_segments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", parser
.segmentName(segIndex
),
261 (permissions
& PROT_READ
) ? 'r' : '.', (permissions
& PROT_WRITE
) ? 'w' : '.', (permissions
& PROT_EXEC
) ? 'x' : '.' ,
262 (long)segAddress
, (long)segAddress
+vmSize
-1);
266 vm_deallocate(mach_task_self(), loadAddress
, (vm_size_t
)totalVMSize
);
275 // verify file has not changed since closure was built by checking code signature has not changed
276 if ( image
.validateUsingCdHash() ) {
277 if ( codeSignatureStartAddress
== nullptr ) {
278 diag
.error("code signature missing");
280 else if ( codeSignatureStartAddress
+codeSignFileSize
> linkeditEndAddress
) {
281 diag
.error("code signature extends beyond end of __LINKEDIT");
285 if ( MachOParser::cdHashOfCodeSignature(codeSignatureStartAddress
, codeSignFileSize
, cdHash
) ) {
286 if ( memcmp(image
.cdHash16(), cdHash
, 16) != 0 )
287 diag
.error("code signature changed since closure was built");
290 diag
.error("code signature format invalid");
293 if ( diag
.hasError() ) {
294 vm_deallocate(mach_task_self(), loadAddress
, (vm_size_t
)totalVMSize
);
300 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
301 // tell kernel about fairplay encrypted regions
302 uint32_t fpTextOffset
;
304 if ( image
.isFairPlayEncrypted(fpTextOffset
, fpSize
) ) {
305 const mach_header
* mh
= (mach_header
*)loadAddress
;
306 int result
= mremap_encrypted(((uint8_t*)mh
) + fpTextOffset
, fpSize
, 1, mh
->cputype
, mh
->cpusubtype
);
307 diag
.error("could not register fairplay decryption, mremap_encrypted() => %d", result
);
308 vm_deallocate(mach_task_self(), loadAddress
, (vm_size_t
)totalVMSize
);
313 log_loads("dyld: load %s\n", image
.path());
315 return (mach_header
*)loadAddress
;
319 void unmapImage(const launch_cache::binary_format::Image
* binImage
, const mach_header
* loadAddress
)
321 assert(loadAddress
!= nullptr);
322 launch_cache::Image
image(binImage
);
323 vm_deallocate(mach_task_self(), (vm_address_t
)loadAddress
, (vm_size_t
)(image
.vmSizeToMap()));
327 static void applyFixupsToImage(Diagnostics
& diag
, const mach_header
* imageMH
, const launch_cache::binary_format::Image
* imageData
,
328 launch_cache::TargetSymbolValue::LoadedImages
& imageResolver
, LogFunc log_fixups
)
330 launch_cache::Image
image(imageData
);
331 MachOParser
imageParser(imageMH
);
332 // Note, these are cached here to avoid recalculating them on every loop iteration
333 const launch_cache::ImageGroup
& imageGroup
= image
.group();
334 const char* leafName
= image
.leafName();
335 intptr_t slide
= imageParser
.getSlide();
336 image
.forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t protections
, bool& segStop
) {
337 if ( !image
.segmentHasFixups(segIndex
) )
339 const launch_cache::MemoryRange segContent
= { (char*)imageMH
+ vmOffset
, vmSize
};
341 bool textRelocs
= ((protections
& VM_PROT_WRITE
) == 0);
343 kern_return_t r
= vm_protect(mach_task_self(), (vm_address_t
)segContent
.address
, (vm_size_t
)segContent
.size
, false, VM_PROT_WRITE
| VM_PROT_READ
);
344 if ( r
!= KERN_SUCCESS
) {
345 diag
.error("vm_protect() failed trying to make text segment writable, result=%d", r
);
350 if ( (protections
& VM_PROT_WRITE
) == 0 ) {
351 diag
.error("fixups found in non-writable segment of %s", image
.path());
355 image
.forEachFixup(segIndex
, segContent
, ^(uint64_t segOffset
, launch_cache::Image::FixupKind kind
, launch_cache::TargetSymbolValue targetValue
, bool& stop
) {
356 if ( segOffset
> segContent
.size
) {
357 diag
.error("fixup is past end of segment. segOffset=0x%0llX, segSize=0x%0llX, segIndex=%d", segOffset
, segContent
.size
, segIndex
);
361 uintptr_t* fixUpLoc
= (uintptr_t*)((char*)(segContent
.address
) + segOffset
);
367 //dyld::log("fixup loc=%p\n", fixUpLoc);
370 case launch_cache::Image::FixupKind::rebase64
:
372 case launch_cache::Image::FixupKind::rebase32
:
375 log_fixups("dyld: fixup: %s:%p += %p\n", leafName
, fixUpLoc
, (void*)slide
);
378 case launch_cache::Image::FixupKind::bind64
:
380 case launch_cache::Image::FixupKind::bind32
:
382 value
= targetValue
.resolveTarget(diag
, imageGroup
, imageResolver
);
383 log_fixups("dyld: fixup: %s:%p = %p\n", leafName
, fixUpLoc
, (void*)value
);
387 case launch_cache::Image::FixupKind::rebaseText32
:
388 log_fixups("dyld: text fixup: %s:%p += %p\n", leafName
, fixUpLoc
, (void*)slide
);
391 case launch_cache::Image::FixupKind::bindText32
:
392 value
= targetValue
.resolveTarget(diag
, imageGroup
, imageResolver
);
393 log_fixups("dyld: text fixup: %s:%p = %p\n", leafName
, fixUpLoc
, (void*)value
);
396 case launch_cache::Image::FixupKind::bindTextRel32
:
397 // CALL instruction uses pc-rel value
398 value
= targetValue
.resolveTarget(diag
, imageGroup
, imageResolver
);
399 log_fixups("dyld: CALL fixup: %s:%p = %p (pc+0x%08X)\n", leafName
, fixUpLoc
, (void*)value
, (value
- (uintptr_t)(fixUpLoc
)));
400 *fixUpLoc
= (value
- (uintptr_t)(fixUpLoc
));
402 case launch_cache::Image::FixupKind::bindImportJmp32
:
403 // JMP instruction in __IMPORT segment uses pc-rel value
404 jumpSlot
= (uint8_t*)fixUpLoc
;
405 value
= targetValue
.resolveTarget(diag
, imageGroup
, imageResolver
);
406 rel32
= (value
- ((uintptr_t)(fixUpLoc
)+5));
407 log_fixups("dyld: JMP fixup: %s:%p = %p (pc+0x%08X)\n", leafName
, fixUpLoc
, (void*)value
, rel32
);
408 jumpSlot
[0] = 0xE9; // JMP rel32
409 jumpSlot
[1] = rel32
& 0xFF;
410 jumpSlot
[2] = (rel32
>> 8) & 0xFF;
411 jumpSlot
[3] = (rel32
>> 16) & 0xFF;
412 jumpSlot
[4] = (rel32
>> 24) & 0xFF;
416 diag
.error("unknown fixup kind %d", kind
);
418 if ( diag
.hasError() )
423 kern_return_t r
= vm_protect(mach_task_self(), (vm_address_t
)segContent
.address
, (vm_size_t
)segContent
.size
, false, protections
);
424 if ( r
!= KERN_SUCCESS
) {
425 diag
.error("vm_protect() failed trying to make text segment non-writable, result=%d", r
);
435 class VIS_HIDDEN CurrentLoadImages
: public launch_cache::TargetSymbolValue::LoadedImages
438 CurrentLoadImages(launch_cache::DynArray
<ImageInfo
>& images
, const uint8_t* cacheAddr
)
439 : _dyldCacheLoadAddress(cacheAddr
), _images(images
) { }
441 virtual const uint8_t* dyldCacheLoadAddressForImage();
442 virtual const mach_header
* loadAddressFromGroupAndIndex(uint32_t groupNum
, uint32_t indexInGroup
);
443 virtual void forEachImage(void (^handler
)(uint32_t anIndex
, const launch_cache::binary_format::Image
*, const mach_header
*, bool& stop
));
444 virtual void setAsNeverUnload(uint32_t anIndex
) { _images
[anIndex
].neverUnload
= true; }
446 const uint8_t* _dyldCacheLoadAddress
;
447 launch_cache::DynArray
<ImageInfo
>& _images
;
450 const uint8_t* CurrentLoadImages::dyldCacheLoadAddressForImage()
452 return _dyldCacheLoadAddress
;
455 const mach_header
* CurrentLoadImages::loadAddressFromGroupAndIndex(uint32_t groupNum
, uint32_t indexInGroup
)
457 __block
const mach_header
* result
= nullptr;
458 forEachImage(^(uint32_t anIndex
, const launch_cache::binary_format::Image
* imageData
, const mach_header
* mh
, bool& stop
) {
459 launch_cache::Image
image(imageData
);
460 launch_cache::ImageGroup imageGroup
= image
.group();
461 if ( imageGroup
.groupNum() != groupNum
)
463 if ( imageGroup
.indexInGroup(imageData
) == indexInGroup
) {
471 void CurrentLoadImages::forEachImage(void (^handler
)(uint32_t anIndex
, const launch_cache::binary_format::Image
*, const mach_header
*, bool& stop
))
474 for (int i
=0; i
< _images
.count(); ++i
) {
475 ImageInfo
& info
= _images
[i
];
476 handler(i
, info
.imageData
, info
.loadAddress
, stop
);
484 const mach_header
* imageHeader
;
485 const char* imageShortName
;
488 static void registerDOFs(const DOFInfo
* dofs
, uint32_t dofSectionCount
, LogFunc log_dofs
)
490 if ( dofSectionCount
!= 0 ) {
491 int fd
= open("/dev/" DTRACEMNR_HELPER
, O_RDWR
);
493 log_dofs("can't open /dev/" DTRACEMNR_HELPER
" to register dtrace DOF sections\n");
496 // allocate a buffer on the stack for the variable length dof_ioctl_data_t type
497 uint8_t buffer
[sizeof(dof_ioctl_data_t
) + dofSectionCount
*sizeof(dof_helper_t
)];
498 dof_ioctl_data_t
* ioctlData
= (dof_ioctl_data_t
*)buffer
;
500 // fill in buffer with one dof_helper_t per DOF section
501 ioctlData
->dofiod_count
= dofSectionCount
;
502 for (unsigned int i
=0; i
< dofSectionCount
; ++i
) {
503 strlcpy(ioctlData
->dofiod_helpers
[i
].dofhp_mod
, dofs
[i
].imageShortName
, DTRACE_MODNAMELEN
);
504 ioctlData
->dofiod_helpers
[i
].dofhp_dof
= (uintptr_t)(dofs
[i
].dof
);
505 ioctlData
->dofiod_helpers
[i
].dofhp_addr
= (uintptr_t)(dofs
[i
].dof
);
508 // tell kernel about all DOF sections en mas
509 // pass pointer to ioctlData because ioctl() only copies a fixed size amount of data into kernel
510 user_addr_t val
= (user_addr_t
)(unsigned long)ioctlData
;
511 if ( ioctl(fd
, DTRACEHIOC_ADDDOF
, &val
) != -1 ) {
512 // kernel returns a unique identifier for each section in the dofiod_helpers[].dofhp_dof field.
513 // Note, the closure marked the image as being never unload, so we don't need to keep the ID around
514 // or support unregistering it later.
515 for (unsigned int i
=0; i
< dofSectionCount
; ++i
) {
516 log_dofs("dyld: registering DOF section %p in %s with dtrace, ID=0x%08X\n",
517 dofs
[i
].dof
, dofs
[i
].imageShortName
, (int)(ioctlData
->dofiod_helpers
[i
].dofhp_dof
));
521 //dyld::log( "dyld: ioctl to register dtrace DOF section failed\n");
529 void mapAndFixupImages(Diagnostics
& diag
, launch_cache::DynArray
<ImageInfo
>& images
, const uint8_t* cacheLoadAddress
,
530 LogFunc log_loads
, LogFunc log_segments
, LogFunc log_fixups
, LogFunc log_dofs
)
532 // scan array and map images not already loaded
533 for (int i
=0; i
< images
.count(); ++i
) {
534 ImageInfo
& info
= images
[i
];
535 const dyld3::launch_cache::Image
image(info
.imageData
);
536 if ( info
.loadAddress
!= nullptr ) {
537 // log main executable's segments
538 if ( (info
.groupNum
== 2) && (info
.loadAddress
->filetype
== MH_EXECUTE
) && !info
.previouslyFixedUp
) {
539 if ( log_segments("dyld: mapped by kernel %s\n", image
.path()) ) {
540 MachOParser
parser(info
.loadAddress
);
541 image
.forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool& stop
) {
542 uint64_t start
= (long)info
.loadAddress
+ vmOffset
;
543 uint64_t end
= start
+vmSize
-1;
544 if ( (segIndex
== 0) && (permissions
== 0) ) {
547 log_segments("%14s (%c%c%c) 0x%012llX->0x%012llX \n", parser
.segmentName(segIndex
),
548 (permissions
& PROT_READ
) ? 'r' : '.', (permissions
& PROT_WRITE
) ? 'w' : '.', (permissions
& PROT_EXEC
) ? 'x' : '.' ,
553 // skip over ones already loaded
556 if ( image
.isDiskImage() ) {
557 //dyld::log("need to load image[%d] %s\n", i, image.path());
558 info
.loadAddress
= mapImage(image
, diag
, log_loads
, log_segments
);
559 if ( diag
.hasError() ) {
560 break; // out of for loop
562 info
.justMapped
= true;
565 bool expectedOnDisk
= image
.group().dylibsExpectedOnDisk();
566 bool overridableDylib
= image
.overridableDylib();
567 if ( expectedOnDisk
|| overridableDylib
) {
569 if ( ::stat(image
.path(), &statBuf
) == 0 ) {
570 if ( expectedOnDisk
) {
571 // macOS case: verify dylib file info matches what it was when cache was built
572 if ( image
.fileModTime() != statBuf
.st_mtime
) {
573 diag
.error("cached dylib mod-time has changed, dylib cache has: 0x%08llX, file has: 0x%08lX, for: %s", image
.fileModTime(), (long)statBuf
.st_mtime
, image
.path());
574 break; // out of for loop
576 if ( image
.fileINode() != statBuf
.st_ino
) {
577 diag
.error("cached dylib inode has changed, dylib cache has: 0x%08llX, file has: 0x%08llX, for: %s", image
.fileINode(), statBuf
.st_ino
, image
.path());
578 break; // out of for loop
582 // iOS internal: dylib override installed
583 diag
.error("cached dylib overridden: %s", image
.path());
584 break; // out of for loop
588 if ( expectedOnDisk
) {
589 // macOS case: dylib that existed when cache built no longer exists
590 diag
.error("missing cached dylib: %s", image
.path());
591 break; // out of for loop
595 info
.loadAddress
= (mach_header
*)(cacheLoadAddress
+ image
.cacheOffset());
596 info
.justUsedFromDyldCache
= true;
597 if ( log_segments("dyld: Using from dyld cache %s\n", image
.path()) ) {
598 MachOParser
parser(info
.loadAddress
);
599 image
.forEachCacheSegment(^(uint32_t segIndex
, uint64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool &stop
) {
600 log_segments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", parser
.segmentName(segIndex
),
601 (permissions
& PROT_READ
) ? 'r' : '.', (permissions
& PROT_WRITE
) ? 'w' : '.', (permissions
& PROT_EXEC
) ? 'x' : '.' ,
602 (long)cacheLoadAddress
+vmOffset
, (long)cacheLoadAddress
+vmOffset
+vmSize
-1);
607 if ( diag
.hasError() ) {
608 // back out and unmapped images all loaded so far
609 for (uint32_t j
=0; j
< images
.count(); ++j
) {
610 ImageInfo
& anInfo
= images
[j
];
611 if ( anInfo
.justMapped
)
612 unmapImage(anInfo
.imageData
, anInfo
.loadAddress
);
613 anInfo
.loadAddress
= nullptr;
619 CurrentLoadImages
fixupHelper(images
, cacheLoadAddress
);
620 for (int i
=0; i
< images
.count(); ++i
) {
621 ImageInfo
& info
= images
[i
];
622 // images in shared cache do not need fixups applied
623 launch_cache::Image
image(info
.imageData
);
624 if ( !image
.isDiskImage() )
626 // previously loaded images were previously fixed up
627 if ( info
.previouslyFixedUp
)
629 //dyld::log("apply fixups to mh=%p, path=%s\n", info.loadAddress, Image(info.imageData).path());
630 dyld3::loader::applyFixupsToImage(diag
, info
.loadAddress
, info
.imageData
, fixupHelper
, log_fixups
);
631 if ( diag
.hasError() )
635 // Record dtrace DOFs
636 // if ( /* FIXME! register dofs */ )
638 __block
uint32_t dofCount
= 0;
639 for (int i
=0; i
< images
.count(); ++i
) {
640 ImageInfo
& info
= images
[i
];
641 launch_cache::Image
image(info
.imageData
);
642 // previously loaded images were previously fixed up
643 if ( info
.previouslyFixedUp
)
645 image
.forEachDOF(nullptr, ^(const void* section
) {
646 // DOFs cause the image to be never-unload
647 assert(image
.neverUnload());
652 // struct RegisteredDOF { const mach_header* mh; int registrationID; };
653 DOFInfo dofImages
[dofCount
];
654 __block DOFInfo
* dofImagesBase
= dofImages
;
656 for (int i
=0; i
< images
.count(); ++i
) {
657 ImageInfo
& info
= images
[i
];
658 launch_cache::Image
image(info
.imageData
);
659 // previously loaded images were previously fixed up
660 if ( info
.previouslyFixedUp
)
662 image
.forEachDOF(info
.loadAddress
, ^(const void* section
) {
664 dofInfo
.dof
= section
;
665 dofInfo
.imageHeader
= info
.loadAddress
;
666 dofInfo
.imageShortName
= image
.leafName();
667 dofImagesBase
[dofCount
++] = dofInfo
;
670 registerDOFs(dofImages
, dofCount
, log_dofs
);
675 void forEachLineInFile(const char* path
, void (^lineHandler
)(const char* line
, bool& stop
))
677 int fd
= dyld::my_open(path
, O_RDONLY
, 0);
680 if ( fstat(fd
, &statBuf
) == 0 ) {
681 const char* lines
= (const char*)mmap(nullptr, (size_t)statBuf
.st_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
682 if ( lines
!= MAP_FAILED
) {
684 const char* const eof
= &lines
[statBuf
.st_size
];
685 for (const char* s
= lines
; s
< eof
; ++s
) {
686 char lineBuffer
[MAXPATHLEN
];
687 char* t
= lineBuffer
;
688 char* tEnd
= &lineBuffer
[MAXPATHLEN
];
689 while ( (s
< eof
) && (t
!= tEnd
) ) {
695 lineHandler(lineBuffer
, stop
);
699 munmap((void*)lines
, (size_t)statBuf
.st_size
);
707 bool internalInstall()
709 #if TARGET_IPHONE_SIMULATOR
711 #elif __IPHONE_OS_VERSION_MIN_REQUIRED
712 uint32_t devFlags
= *((uint32_t*)_COMM_PAGE_DEV_FIRM
);
713 return ( (devFlags
& 1) == 1 );
715 return ( csr_check(CSR_ALLOW_APPLE_INTERNAL
) == 0 );
719 /* Checks to see if there are any args that impact dyld. These args
720 * can be set sevaral ways. These will only be honored on development
721 * and Apple Internal builds.
723 * First the existence of a file is checked for:
724 * /S/L/C/com.apple.dyld/dyld-bootargs
725 * If it exists it will be mapped and scanned line by line. If the executable
726 * exists in the file then the arguments on its line will be applied. "*" may
727 * be used a wildcard to represent all apps. First matching line will be used,
728 * the wild card must be one the last line. Additionally, lines must end with
734 /bin/ls:force_dyld2=1
735 /usr/bin/sw_vers:force_dyld2=1
739 If no file exists then the kernel boot-args will be scanned.
741 bool bootArgsContains(const char* arg
)
743 //FIXME: Use strnstr(). Unfortunately we are missing an imp libc.
744 #if TARGET_IPHONE_SIMULATOR
747 // don't check for boot-args on customer installs
748 if ( !internalInstall() )
751 char pathBuffer
[MAXPATHLEN
+1];
752 #if __IPHONE_OS_VERSION_MIN_REQUIRED
753 strlcpy(pathBuffer
, IPHONE_DYLD_SHARED_CACHE_DIR
, sizeof(IPHONE_DYLD_SHARED_CACHE_DIR
));
755 strlcpy(pathBuffer
, MACOSX_DYLD_SHARED_CACHE_DIR
, sizeof(MACOSX_DYLD_SHARED_CACHE_DIR
));
757 strlcat(pathBuffer
, "dyld-bootargs", MAXPATHLEN
+1);
758 __block
bool result
= false;
759 forEachLineInFile(pathBuffer
, ^(const char* line
, bool& stop
) {
760 const char* delim
= strchr(line
, ':');
761 if ( delim
== nullptr )
763 char binary
[MAXPATHLEN
];
764 char options
[MAXPATHLEN
];
765 strlcpy(binary
, line
, MAXPATHLEN
);
766 binary
[delim
-line
] = '\0';
767 strlcpy(options
, delim
+1, MAXPATHLEN
);
768 if ( (strcmp(dyld::getExecutablePath(), binary
) == 0) || (strcmp("*", binary
) == 0) ) {
769 result
= (strstr(options
, arg
) != nullptr);
774 // get length of full boot-args string
776 if ( sysctlbyname("kern.bootargs", NULL
, &len
, NULL
, 0) != 0 )
779 // get copy of boot-args string
780 char bootArgsBuffer
[len
];
781 if ( sysctlbyname("kern.bootargs", bootArgsBuffer
, &len
, NULL
, 0) != 0 )
784 // return true if 'arg' is a sub-string of boot-args
785 return (strstr(bootArgsBuffer
, arg
) != nullptr);
791 // hack because libdyld.dylib should not link with libc++.dylib
792 extern "C" void __cxa_pure_virtual() __attribute__((visibility("hidden")));
793 void __cxa_pure_virtual()
800 #endif // DYLD_IN_PROCESS
802 } // namespace loader