2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
30 #include <uuid/uuid.h>
31 #include <mach/mach.h>
33 #include <sys/types.h>
34 #include <sys/sysctl.h>
36 #include <sys/dtrace.h>
37 #include <sys/errno.h>
39 #include <System/sys/mman.h>
40 #include <System/sys/csr.h>
41 #include <System/machine/cpu_capabilities.h>
42 #if !TARGET_OS_SIMULATOR && !TARGET_OS_DRIVERKIT
44 #include <sandbox/private.h>
46 //#include <dispatch/dispatch.h>
47 #include <mach/vm_page_size.h>
49 #include "MachOFile.h"
50 #include "MachOLoaded.h"
51 #include "MachOAnalyzer.h"
56 #include "dyld_cache_format.h"
58 #include "objc-shared-cache.h"
61 void log(const char* m
, ...);
67 // utility to track a set of ImageNum's in use
68 class VIS_HIDDEN ImageNumSet
71 void add(dyld3::closure::ImageNum num
);
72 bool contains(dyld3::closure::ImageNum num
) const;
75 std::bitset
<5120> _bitmap
;
76 dyld3::OverflowSafeArray
<dyld3::closure::ImageNum
> _overflowArray
;
79 void ImageNumSet::add(dyld3::closure::ImageNum num
)
84 _overflowArray
.push_back(num
);
87 bool ImageNumSet::contains(dyld3::closure::ImageNum num
) const
90 return _bitmap
.test(num
);
92 for (dyld3::closure::ImageNum existingNum
: _overflowArray
) {
93 if ( existingNum
== num
)
98 } // namespace anonymous
103 Loader::Loader(const Array
<LoadedImage
>& existingImages
, Array
<LoadedImage
>& newImagesStorage
,
104 const void* cacheAddress
, const Array
<const dyld3::closure::ImageArray
*>& imagesArrays
,
105 const closure::ObjCSelectorOpt
* selOpt
, const Array
<closure::Image::ObjCSelectorImage
>& selImages
,
106 LogFunc logLoads
, LogFunc logSegments
, LogFunc logFixups
, LogFunc logDofs
)
107 : _existingImages(existingImages
), _newImages(newImagesStorage
),
108 _imagesArrays(imagesArrays
), _dyldCacheAddress(cacheAddress
), _dyldCacheSelectorOpt(nullptr),
109 _closureSelectorOpt(selOpt
), _closureSelectorImages(selImages
),
110 _logLoads(logLoads
), _logSegments(logSegments
), _logFixups(logFixups
), _logDofs(logDofs
)
113 // This is only needed for dyld and the launch closure, not the dlopen closures
114 if ( _dyldCacheAddress
!= nullptr ) {
115 _dyldCacheSelectorOpt
= ((const DyldSharedCache
*)_dyldCacheAddress
)->objcOpt()->selopt();
120 void Loader::addImage(const LoadedImage
& li
)
122 _newImages
.push_back(li
);
125 LoadedImage
* Loader::findImage(closure::ImageNum targetImageNum
)
128 // The launch images are different in dyld vs libdyld. In dyld, the new images are
129 // the launch images, while in libdyld, the existing images are the launch images
130 if (LoadedImage
* info
= _launchImagesCache
.findImage(targetImageNum
, _newImages
)) {
134 for (uint64_t index
= 0; index
!= _newImages
.count(); ++index
) {
135 LoadedImage
& info
= _newImages
[index
];
136 if ( info
.image()->representsImageNum(targetImageNum
) ) {
137 // Try cache this entry for next time
138 _launchImagesCache
.tryAddImage(targetImageNum
, index
);
142 #elif BUILDING_LIBDYLD
143 for (const LoadedImage
& info
: _existingImages
) {
144 if ( info
.image()->representsImageNum(targetImageNum
) )
145 return (LoadedImage
*)&info
;
147 for (LoadedImage
& info
: _newImages
) {
148 if ( info
.image()->representsImageNum(targetImageNum
) )
152 #error Must be building dyld or libdyld
157 uintptr_t Loader::resolveTarget(closure::Image::ResolvedSymbolTarget target
)
159 const LoadedImage
* info
;
160 switch ( target
.sharedCache
.kind
) {
161 case closure::Image::ResolvedSymbolTarget::kindSharedCache
:
162 assert(_dyldCacheAddress
!= nullptr);
163 return (uintptr_t)_dyldCacheAddress
+ (uintptr_t)target
.sharedCache
.offset
;
165 case closure::Image::ResolvedSymbolTarget::kindImage
:
166 info
= findImage(target
.image
.imageNum
);
167 assert(info
!= nullptr);
168 return (uintptr_t)(info
->loadedAddress()) + (uintptr_t)target
.image
.offset
;
170 case closure::Image::ResolvedSymbolTarget::kindAbsolute
:
171 if ( target
.absolute
.value
& (1ULL << 62) )
172 return (uintptr_t)(target
.absolute
.value
| 0xC000000000000000ULL
);
174 return (uintptr_t)target
.absolute
.value
;
176 assert(0 && "malformed ResolvedSymbolTarget");
181 void Loader::completeAllDependents(Diagnostics
& diag
, bool& someCacheImageOverridden
)
183 // accumulate all image overrides
184 STACK_ALLOC_ARRAY(ImageOverride
, overrides
, _existingImages
.maxCount() + _newImages
.maxCount());
185 for (const auto anArray
: _imagesArrays
) {
186 // ignore prebuilt Image* in dyld cache
187 if ( anArray
->startImageNum() < dyld3::closure::kFirstLaunchClosureImageNum
)
189 anArray
->forEachImage(^(const dyld3::closure::Image
* image
, bool& stop
) {
190 ImageOverride overrideEntry
;
191 if ( image
->isOverrideOfDyldCacheImage(overrideEntry
.inCache
) ) {
192 someCacheImageOverridden
= true;
193 overrideEntry
.replacement
= image
->imageNum();
194 overrides
.push_back(overrideEntry
);
199 // make cache for fast lookup of already loaded images
200 __block ImageNumSet alreadyLoaded
;
201 for (const LoadedImage
& info
: _existingImages
) {
202 alreadyLoaded
.add(info
.image()->imageNum());
204 alreadyLoaded
.add(_newImages
.begin()->image()->imageNum());
206 // for each image in _newImages, starting at the top image, make sure its dependents are in _allImages
208 while ( (index
< _newImages
.count()) && diag
.noError() ) {
209 const closure::Image
* image
= _newImages
[index
].image();
210 //fprintf(stderr, "completeAllDependents(): looking at dependents of %s\n", image->path());
211 image
->forEachDependentImage(^(uint32_t depIndex
, closure::Image::LinkKind kind
, closure::ImageNum depImageNum
, bool& stop
) {
212 // check if imageNum needs to be changed to an override
213 for (const ImageOverride
& entry
: overrides
) {
214 if ( entry
.inCache
== depImageNum
) {
215 depImageNum
= entry
.replacement
;
219 // check if this dependent is already loaded
220 if ( !alreadyLoaded
.contains(depImageNum
) ) {
221 // if not, look in imagesArrays
222 const closure::Image
* depImage
= closure::ImageArray::findImage(_imagesArrays
, depImageNum
);
223 if ( depImage
!= nullptr ) {
224 //dyld::log(" load imageNum=0x%05X, image path=%s\n", depImageNum, depImage->path());
225 if ( _newImages
.freeCount() == 0 ) {
226 diag
.error("too many initial images");
230 _newImages
.push_back(LoadedImage::make(depImage
));
232 alreadyLoaded
.add(depImageNum
);
235 diag
.error("unable to locate imageNum=0x%04X, depIndex=%d of %s", depImageNum
, depIndex
, image
->path());
244 void Loader::mapAndFixupAllImages(Diagnostics
& diag
, bool processDOFs
, bool fromOFI
)
246 // scan array and map images not already loaded
247 for (LoadedImage
& info
: _newImages
) {
248 if ( info
.loadedAddress() != nullptr ) {
249 // log main executable's segments
250 if ( (info
.loadedAddress()->filetype
== MH_EXECUTE
) && (info
.state() == LoadedImage::State::mapped
) ) {
251 if ( _logSegments("dyld: mapped by kernel %s\n", info
.image()->path()) ) {
252 info
.image()->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool laterReadOnly
, bool& stop
) {
253 uint64_t start
= (long)info
.loadedAddress() + vmOffset
;
254 uint64_t end
= start
+vmSize
-1;
255 if ( (segIndex
== 0) && (permissions
== 0) ) {
258 _logSegments("%14s (%c%c%c) 0x%012llX->0x%012llX \n", info
.loadedAddress()->segmentName(segIndex
),
259 (permissions
& PROT_READ
) ? 'r' : '.', (permissions
& PROT_WRITE
) ? 'w' : '.', (permissions
& PROT_EXEC
) ? 'x' : '.' ,
264 // skip over ones already loaded
267 if ( info
.image()->inDyldCache() ) {
268 if ( info
.image()->overridableDylib() ) {
270 if ( stat(info
.image()->path(), &statBuf
) == 0 ) {
271 // verify file has not changed since closure was built
274 if ( info
.image()->hasFileModTimeAndInode(inode
, mtime
) ) {
275 if ( (statBuf
.st_mtime
!= mtime
) || (statBuf
.st_ino
!= inode
) ) {
276 diag
.error("dylib file mtime/inode changed since closure was built for '%s'", info
.image()->path());
280 diag
.error("dylib file not expected on disk, must be a root '%s'", info
.image()->path());
283 else if ( (_dyldCacheAddress
!= nullptr) && ((dyld_cache_header
*)_dyldCacheAddress
)->dylibsExpectedOnDisk
) {
284 diag
.error("dylib file missing, was in dyld shared cache '%s'", info
.image()->path());
287 if ( diag
.noError() ) {
288 info
.setLoadedAddress((MachOLoaded
*)((uintptr_t)_dyldCacheAddress
+ info
.image()->cacheOffset()));
289 info
.setState(LoadedImage::State::fixedUp
);
290 if ( _logSegments("dyld: Using from dyld cache %s\n", info
.image()->path()) ) {
291 info
.image()->forEachCacheSegment(^(uint32_t segIndex
, uint64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool &stop
) {
292 _logSegments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", info
.loadedAddress()->segmentName(segIndex
),
293 (permissions
& PROT_READ
) ? 'r' : '.', (permissions
& PROT_WRITE
) ? 'w' : '.', (permissions
& PROT_EXEC
) ? 'x' : '.' ,
294 (long)info
.loadedAddress()+(long)vmOffset
, (long)info
.loadedAddress()+(long)vmOffset
+(long)vmSize
-1);
300 mapImage(diag
, info
, fromOFI
);
301 if ( diag
.hasError() )
302 break; // out of for loop
306 if ( diag
.hasError() ) {
307 // bummer, need to clean up by unmapping any images just mapped
308 for (LoadedImage
& info
: _newImages
) {
309 if ( (info
.state() == LoadedImage::State::mapped
) && !info
.image()->inDyldCache() && !info
.leaveMapped() ) {
310 _logSegments("dyld: unmapping %s\n", info
.image()->path());
318 for (LoadedImage
& info
: _newImages
) {
319 // images in shared cache do not need fixups applied
320 if ( info
.image()->inDyldCache() )
322 // previously loaded images were previously fixed up
323 if ( info
.state() < LoadedImage::State::fixedUp
) {
324 applyFixupsToImage(diag
, info
);
325 if ( diag
.hasError() )
327 info
.setState(LoadedImage::State::fixedUp
);
331 // find and register dtrace DOFs
333 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(DOFInfo
, dofImages
, _newImages
.count());
334 for (LoadedImage
& info
: _newImages
) {
335 info
.image()->forEachDOF(info
.loadedAddress(), ^(const void* section
) {
337 dofInfo
.dof
= section
;
338 dofInfo
.imageHeader
= info
.loadedAddress();
339 dofInfo
.imageShortName
= info
.image()->leafName();
340 dofImages
.push_back(dofInfo
);
343 registerDOFs(dofImages
);
347 bool Loader::sandboxBlocked(const char* path
, const char* kind
)
349 #if TARGET_OS_SIMULATOR || TARGET_OS_DRIVERKIT
350 // sandbox calls not yet supported in dyld_sim
353 sandbox_filter_type filter
= (sandbox_filter_type
)(SANDBOX_FILTER_PATH
| SANDBOX_CHECK_NO_REPORT
);
354 return ( sandbox_check(getpid(), kind
, filter
, path
) > 0 );
358 bool Loader::sandboxBlockedMmap(const char* path
)
360 return sandboxBlocked(path
, "file-map-executable");
363 bool Loader::sandboxBlockedOpen(const char* path
)
365 return sandboxBlocked(path
, "file-read-data");
368 bool Loader::sandboxBlockedStat(const char* path
)
370 return sandboxBlocked(path
, "file-read-metadata");
373 void Loader::mapImage(Diagnostics
& diag
, LoadedImage
& info
, bool fromOFI
)
375 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_MAP_IMAGE
, info
.image()->path(), 0, 0);
377 const closure::Image
* image
= info
.image();
378 uint64_t sliceOffset
= image
->sliceOffsetInFile();
379 const uint64_t totalVMSize
= image
->vmSizeToMap();
380 uint32_t codeSignFileOffset
;
381 uint32_t codeSignFileSize
;
382 bool isCodeSigned
= image
->hasCodeSignature(codeSignFileOffset
, codeSignFileSize
);
386 int fd
= dyld::my_open(info
.image()->path(), O_RDONLY
, 0);
388 int fd
= ::open(info
.image()->path(), O_RDONLY
, 0);
392 if ( (openErr
== EPERM
) && sandboxBlockedOpen(image
->path()) )
393 diag
.error("file system sandbox blocked open(\"%s\", O_RDONLY)", image
->path());
395 diag
.error("open(\"%s\", O_RDONLY) failed with errno=%d", image
->path(), openErr
);
401 #if TARGET_OS_SIMULATOR
402 if ( stat(image
->path(), &statBuf
) != 0 ) {
404 if ( fstat(fd
, &statBuf
) != 0 ) {
407 if ( (statErr
== EPERM
) && sandboxBlockedStat(image
->path()) )
408 diag
.error("file system sandbox blocked stat(\"%s\")", image
->path());
410 diag
.error("stat(\"%s\") failed with errno=%d", image
->path(), statErr
);
415 // verify file has not changed since closure was built
418 if ( image
->hasFileModTimeAndInode(inode
, mtime
) ) {
419 if ( (statBuf
.st_mtime
!= mtime
) || (statBuf
.st_ino
!= inode
) ) {
420 diag
.error("file mtime/inode changed since closure was built for '%s'", image
->path());
426 // handle case on iOS where sliceOffset in closure is wrong because file was thinned after cache was built
427 if ( (_dyldCacheAddress
!= nullptr) && !(((dyld_cache_header
*)_dyldCacheAddress
)->dylibsExpectedOnDisk
) ) {
428 if ( sliceOffset
!= 0 ) {
429 if ( round_page_kernel(codeSignFileOffset
+codeSignFileSize
) == round_page_kernel(statBuf
.st_size
) ) {
436 // register code signature
437 uint64_t coveredCodeLength
= UINT64_MAX
;
438 if ( isCodeSigned
) {
439 auto sigTimer
= ScopedTimer(DBG_DYLD_TIMING_ATTACH_CODESIGNATURE
, 0, 0, 0);
440 fsignatures_t siginfo
;
441 siginfo
.fs_file_start
= sliceOffset
; // start of mach-o slice in fat file
442 siginfo
.fs_blob_start
= (void*)(long)(codeSignFileOffset
); // start of CD in mach-o file
443 siginfo
.fs_blob_size
= codeSignFileSize
; // size of CD
444 int result
= fcntl(fd
, F_ADDFILESIGS_RETURN
, &siginfo
);
445 if ( result
== -1 ) {
446 int errnoCopy
= errno
;
447 if ( (errnoCopy
== EPERM
) || (errnoCopy
== EBADEXEC
) ) {
448 diag
.error("code signature invalid (errno=%d) sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
449 errnoCopy
, sliceOffset
, codeSignFileOffset
, codeSignFileSize
, image
->path());
452 diag
.error("fcntl(fd, F_ADDFILESIGS_RETURN) failed with errno=%d, sliceOffset=0x%08llX, codeBlobOffset=0x%08X, codeBlobSize=0x%08X for '%s'",
453 errnoCopy
, sliceOffset
, codeSignFileOffset
, codeSignFileSize
, image
->path());
458 coveredCodeLength
= siginfo
.fs_file_start
;
459 if ( coveredCodeLength
< codeSignFileOffset
) {
460 diag
.error("code signature does not cover entire file up to signature");
466 // <rdar://problem/41015217> dyld should use F_CHECK_LV even on unsigned binaries
468 // <rdar://problem/32684903> always call F_CHECK_LV to preflight
470 char messageBuffer
[512];
471 messageBuffer
[0] = '\0';
472 checkInfo
.lv_file_start
= sliceOffset
;
473 checkInfo
.lv_error_message_size
= sizeof(messageBuffer
);
474 checkInfo
.lv_error_message
= messageBuffer
;
475 int res
= fcntl(fd
, F_CHECK_LV
, &checkInfo
);
477 diag
.error("code signature in (%s) not valid for use in process: %s", image
->path(), messageBuffer
);
483 // reserve address range
484 vm_address_t loadAddress
= 0;
485 kern_return_t r
= vm_allocate(mach_task_self(), &loadAddress
, (vm_size_t
)totalVMSize
, VM_FLAGS_ANYWHERE
);
486 if ( r
!= KERN_SUCCESS
) {
487 diag
.error("vm_allocate(size=0x%0llX) failed with result=%d", totalVMSize
, r
);
492 if ( sliceOffset
!= 0 )
493 _logSegments("dyld: Mapping %s (slice offset=%llu)\n", image
->path(), sliceOffset
);
495 _logSegments("dyld: Mapping %s\n", image
->path());
498 __block
bool mmapFailure
= false;
499 __block
const uint8_t* codeSignatureStartAddress
= nullptr;
500 __block
const uint8_t* linkeditEndAddress
= nullptr;
501 __block
bool mappedFirstSegment
= false;
502 __block
uint64_t maxFileOffset
= 0;
503 image
->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool laterReadOnly
, bool& stop
) {
504 // <rdar://problem/32363581> Mapping zero filled segments fails with mmap of size 0
507 void* segAddress
= mmap((void*)(loadAddress
+vmOffset
), fileSize
, permissions
, MAP_FIXED
| MAP_PRIVATE
, fd
, sliceOffset
+fileOffset
);
509 if ( segAddress
== MAP_FAILED
) {
510 if ( mmapErr
== EPERM
) {
511 if ( sandboxBlockedMmap(image
->path()) )
512 diag
.error("file system sandbox blocked mmap() of '%s'", image
->path());
514 diag
.error("code signing blocked mmap() of '%s'", image
->path());
517 diag
.error("mmap(addr=0x%0llX, size=0x%08X) failed with errno=%d for %s", loadAddress
+vmOffset
, fileSize
, mmapErr
, image
->path());
522 else if ( codeSignFileOffset
> fileOffset
) {
523 codeSignatureStartAddress
= (uint8_t*)segAddress
+ (codeSignFileOffset
-fileOffset
);
524 linkeditEndAddress
= (uint8_t*)segAddress
+ vmSize
;
526 // sanity check first segment is mach-o header
527 if ( (segAddress
!= MAP_FAILED
) && !mappedFirstSegment
) {
528 mappedFirstSegment
= true;
529 const MachOFile
* mf
= (MachOFile
*)segAddress
;
530 if ( !mf
->isMachO(diag
, fileSize
) ) {
535 if ( !mmapFailure
) {
536 const MachOLoaded
* lmo
= (MachOLoaded
*)loadAddress
;
537 _logSegments("%14s (%c%c%c) 0x%012lX->0x%012lX \n", lmo
->segmentName(segIndex
),
538 (permissions
& PROT_READ
) ? 'r' : '.', (permissions
& PROT_WRITE
) ? 'w' : '.', (permissions
& PROT_EXEC
) ? 'x' : '.' ,
539 (long)segAddress
, (long)segAddress
+(long)vmSize
-1);
541 maxFileOffset
= fileOffset
+ fileSize
;
544 ::vm_deallocate(mach_task_self(), loadAddress
, (vm_size_t
)totalVMSize
);
549 // <rdar://problem/47163421> speculatively read whole slice
550 fspecread_t specread
= {} ;
551 specread
.fsr_offset
= sliceOffset
;
552 specread
.fsr_length
= maxFileOffset
;
553 specread
.fsr_flags
= 0;
554 fcntl(fd
, F_SPECULATIVE_READ
, &specread
);
555 _logSegments("dyld: Speculatively read offset=0x%08llX, len=0x%08llX, path=%s\n", sliceOffset
, maxFileOffset
, image
->path());
561 // verify file has not changed since closure was built by checking code signature has not changed
562 struct CDHashWrapper
{
566 // Get all the hashes for the image
567 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(CDHashWrapper
, expectedCDHashes
, 1);
568 image
->forEachCDHash(^(const uint8_t *cdHash
, bool &stop
) {
569 CDHashWrapper cdHashWrapper
;
570 memcpy(cdHashWrapper
.cdHash
, cdHash
, sizeof(CDHashWrapper::cdHash
));
571 expectedCDHashes
.push_back(cdHashWrapper
);
574 if (!expectedCDHashes
.empty()) {
575 if (expectedCDHashes
.count() != 1) {
576 // We should only see a single hash for dylibs
577 diag
.error("code signature count invalid");
578 } else if ( codeSignatureStartAddress
== nullptr ) {
579 diag
.error("code signature missing");
581 else if ( codeSignatureStartAddress
+codeSignFileSize
> linkeditEndAddress
) {
582 diag
.error("code signature extends beyond end of __LINKEDIT");
585 // Get all the cd hashes for the macho
586 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(CDHashWrapper
, foundCDHashes
, 1);
587 const MachOLoaded
* lmo
= (MachOLoaded
*)loadAddress
;
588 lmo
->forEachCDHashOfCodeSignature(codeSignatureStartAddress
, codeSignFileSize
,
589 ^(const uint8_t *cdHash
) {
590 CDHashWrapper cdHashWrapper
;
591 memcpy(cdHashWrapper
.cdHash
, cdHash
, sizeof(CDHashWrapper::cdHash
));
592 foundCDHashes
.push_back(cdHashWrapper
);
595 if (foundCDHashes
.empty()) {
596 diag
.error("code signature format invalid");
597 } else if (expectedCDHashes
.count() != foundCDHashes
.count()) {
598 diag
.error("code signature count invalid");
600 // We found a hash, so make sure its equal.
601 if ( ::memcmp(foundCDHashes
[0].cdHash
, expectedCDHashes
[0].cdHash
, 20) != 0 )
602 diag
.error("code signature changed since closure was built");
605 if ( diag
.hasError() ) {
606 ::vm_deallocate(mach_task_self(), loadAddress
, (vm_size_t
)totalVMSize
);
613 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_OS_SIMULATOR
614 // tell kernel about fairplay encrypted regions
615 uint32_t fpTextOffset
;
617 if ( image
->isFairPlayEncrypted(fpTextOffset
, fpSize
) ) {
618 const mach_header
* mh
= (mach_header
*)loadAddress
;
619 int result
= ::mremap_encrypted(((uint8_t*)mh
) + fpTextOffset
, fpSize
, 1, mh
->cputype
, mh
->cpusubtype
);
621 diag
.error("could not register fairplay decryption, mremap_encrypted() => %d", result
);
622 ::vm_deallocate(mach_task_self(), loadAddress
, (vm_size_t
)totalVMSize
);
628 _logLoads("dyld: load %s\n", image
->path());
630 timer
.setData4((uint64_t)loadAddress
);
631 info
.setLoadedAddress((MachOLoaded
*)loadAddress
);
632 info
.setState(LoadedImage::State::mapped
);
635 void Loader::unmapImage(LoadedImage
& info
)
637 assert(info
.loadedAddress() != nullptr);
638 ::vm_deallocate(mach_task_self(), (vm_address_t
)info
.loadedAddress(), (vm_size_t
)(info
.image()->vmSizeToMap()));
639 info
.setLoadedAddress(nullptr);
642 void Loader::registerDOFs(const Array
<DOFInfo
>& dofs
)
647 int fd
= open("/dev/" DTRACEMNR_HELPER
, O_RDWR
);
649 _logDofs("can't open /dev/" DTRACEMNR_HELPER
" to register dtrace DOF sections\n");
652 // allocate a buffer on the stack for the variable length dof_ioctl_data_t type
653 uint8_t buffer
[sizeof(dof_ioctl_data_t
) + dofs
.count()*sizeof(dof_helper_t
)];
654 dof_ioctl_data_t
* ioctlData
= (dof_ioctl_data_t
*)buffer
;
656 // fill in buffer with one dof_helper_t per DOF section
657 ioctlData
->dofiod_count
= dofs
.count();
658 for (unsigned int i
=0; i
< dofs
.count(); ++i
) {
659 strlcpy(ioctlData
->dofiod_helpers
[i
].dofhp_mod
, dofs
[i
].imageShortName
, DTRACE_MODNAMELEN
);
660 ioctlData
->dofiod_helpers
[i
].dofhp_dof
= (uintptr_t)(dofs
[i
].dof
);
661 ioctlData
->dofiod_helpers
[i
].dofhp_addr
= (uintptr_t)(dofs
[i
].dof
);
664 // tell kernel about all DOF sections en mas
665 // pass pointer to ioctlData because ioctl() only copies a fixed size amount of data into kernel
666 user_addr_t val
= (user_addr_t
)(unsigned long)ioctlData
;
667 if ( ioctl(fd
, DTRACEHIOC_ADDDOF
, &val
) != -1 ) {
668 // kernel returns a unique identifier for each section in the dofiod_helpers[].dofhp_dof field.
669 // Note, the closure marked the image as being never unload, so we don't need to keep the ID around
670 // or support unregistering it later.
671 for (unsigned int i
=0; i
< dofs
.count(); ++i
) {
672 _logDofs("dyld: registering DOF section %p in %s with dtrace, ID=0x%08X\n",
673 dofs
[i
].dof
, dofs
[i
].imageShortName
, (int)(ioctlData
->dofiod_helpers
[i
].dofhp_dof
));
677 _logDofs("dyld: ioctl to register dtrace DOF section failed\n");
683 bool Loader::dtraceUserProbesEnabled()
685 uint8_t dofEnabled
= *((uint8_t*)_COMM_PAGE_DTRACE_DOF_ENABLED
);
686 return ( (dofEnabled
& 1) );
690 void Loader::vmAccountingSetSuspended(bool suspend
, LogFunc logger
)
692 #if __arm__ || __arm64__
693 // <rdar://problem/29099600> dyld should tell the kernel when it is doing fix-ups caused by roots
694 logger("vm.footprint_suspend=%d\n", suspend
);
695 int newValue
= suspend
? 1 : 0;
697 size_t newlen
= sizeof(newValue
);
698 size_t oldlen
= sizeof(oldValue
);
699 sysctlbyname("vm.footprint_suspend", &oldValue
, &oldlen
, &newValue
, newlen
);
703 void Loader::applyFixupsToImage(Diagnostics
& diag
, LoadedImage
& info
)
705 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_APPLY_FIXUPS
, (uint64_t)info
.loadedAddress(), 0, 0);
706 closure::ImageNum cacheImageNum
;
707 const char* leafName
= info
.image()->leafName();
708 const closure::Image
* image
= info
.image();
709 const uint8_t* imageLoadAddress
= (uint8_t*)info
.loadedAddress();
710 uintptr_t slide
= info
.loadedAddress()->getSlide();
711 bool overrideOfCache
= info
.image()->isOverrideOfDyldCacheImage(cacheImageNum
);
713 if ( overrideOfCache
)
714 vmAccountingSetSuspended(true, _logFixups
);
715 image
->forEachFixup(^(uint64_t imageOffsetToRebase
, bool& stop
) {
716 // this is a rebase, add slide
717 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToRebase
);
719 _logFixups("dyld: fixup: %s:%p += %p\n", leafName
, fixUpLoc
, (void*)slide
);
721 ^(uint64_t imageOffsetToBind
, closure::Image::ResolvedSymbolTarget bindTarget
, bool& stop
) {
722 // this is a bind, set to target
723 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToBind
);
724 uintptr_t value
= resolveTarget(bindTarget
);
725 _logFixups("dyld: fixup: %s:%p = %p\n", leafName
, fixUpLoc
, (void*)value
);
728 ^(uint64_t imageOffsetToStartsInfo
, const Array
<closure::Image::ResolvedSymbolTarget
>& targets
, bool& stop
) {
729 // this is a chain of fixups, fix up all
730 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const void*, targetAddrs
, 128);
731 targetAddrs
.reserve(targets
.count());
732 for (uint32_t i
=0; i
< targets
.count(); ++i
)
733 targetAddrs
.push_back((void*)resolveTarget(targets
[i
]));
734 ((dyld3::MachOAnalyzer
*)(info
.loadedAddress()))->withChainStarts(diag
, imageOffsetToStartsInfo
, ^(const dyld_chained_starts_in_image
* starts
) {
735 info
.loadedAddress()->fixupAllChainedFixups(diag
, starts
, slide
, targetAddrs
, ^(void* loc
, void* newValue
) {
736 _logFixups("dyld: fixup: %s:%p = %p\n", leafName
, loc
, newValue
);
740 ^(uint64_t imageOffsetToFixup
) {
741 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToFixup
);
742 _logFixups("dyld: fixup objc image info: %s Setting objc image info for precomputed objc\n", leafName
);
744 MachOAnalyzer::ObjCImageInfo
*imageInfo
= (MachOAnalyzer::ObjCImageInfo
*)fixUpLoc
;
745 ((MachOAnalyzer::ObjCImageInfo
*)imageInfo
)->flags
|= MachOAnalyzer::ObjCImageInfo::dyldPreoptimized
;
747 ^(uint64_t imageOffsetToBind
, closure::Image::ResolvedSymbolTarget bindTarget
, bool& stop
) {
748 // this is a bind, set to target
749 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToBind
);
750 uintptr_t value
= resolveTarget(bindTarget
);
751 _logFixups("dyld: fixup objc protocol: %s:%p = %p\n", leafName
, fixUpLoc
, (void*)value
);
754 ^(uint64_t imageOffsetToFixup
, uint32_t selectorIndex
, bool inSharedCache
, bool &stop
) {
756 closure::Image::ResolvedSymbolTarget fixupTarget
;
757 if ( inSharedCache
) {
758 const char* selectorString
= _dyldCacheSelectorOpt
->getEntryForIndex(selectorIndex
);
759 fixupTarget
.sharedCache
.kind
= closure::Image::ResolvedSymbolTarget::kindSharedCache
;
760 fixupTarget
.sharedCache
.offset
= (uint64_t)selectorString
- (uint64_t)_dyldCacheAddress
;
762 closure::ImageNum imageNum
;
764 bool gotLocation
= _closureSelectorOpt
->getStringLocation(selectorIndex
, _closureSelectorImages
, imageNum
, vmOffset
);
766 fixupTarget
.image
.kind
= closure::Image::ResolvedSymbolTarget::kindImage
;
767 fixupTarget
.image
.imageNum
= imageNum
;
768 fixupTarget
.image
.offset
= vmOffset
;
771 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToFixup
);
772 uintptr_t value
= resolveTarget(fixupTarget
);
773 _logFixups("dyld: fixup objc selector: %s:%p(was '%s') = %p(now '%s')\n", leafName
, fixUpLoc
, (const char*)*fixUpLoc
, (void*)value
, (const char*)value
);
775 }, ^(uint64_t imageOffsetToFixup
, bool &stop
) {
776 // fixupObjCStableSwift
777 // Class really is stable Swift, pretending to be pre-stable.
779 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToFixup
);
780 uintptr_t value
= ((*fixUpLoc
) | MachOAnalyzer::ObjCClassInfo::FAST_IS_SWIFT_STABLE
) & ~MachOAnalyzer::ObjCClassInfo::FAST_IS_SWIFT_LEGACY
;
781 _logFixups("dyld: fixup objc stable Swift: %s:%p = %p\n", leafName
, fixUpLoc
, (void*)value
);
783 }, ^(uint64_t imageOffsetToFixup
, bool &stop
) {
784 // TODO: Implement this
788 __block
bool segmentsMadeWritable
= false;
789 image
->forEachTextReloc(^(uint32_t imageOffsetToRebase
, bool& stop
) {
790 if ( !segmentsMadeWritable
)
791 setSegmentProtects(info
, true);
792 uintptr_t* fixUpLoc
= (uintptr_t*)(imageLoadAddress
+ imageOffsetToRebase
);
794 _logFixups("dyld: fixup: %s:%p += %p\n", leafName
, fixUpLoc
, (void*)slide
);
796 ^(uint32_t imageOffsetToBind
, closure::Image::ResolvedSymbolTarget bindTarget
, bool& stop
) {
799 if ( segmentsMadeWritable
)
800 setSegmentProtects(info
, false);
803 // make any read-only data segments read-only
804 if ( image
->hasReadOnlyData() && !image
->inDyldCache() ) {
805 image
->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t permissions
, bool laterReadOnly
, bool& segStop
) {
806 if ( laterReadOnly
) {
807 ::mprotect((void*)(imageLoadAddress
+vmOffset
), (size_t)vmSize
, VM_PROT_READ
);
812 if ( overrideOfCache
)
813 vmAccountingSetSuspended(false, _logFixups
);
817 void Loader::setSegmentProtects(const LoadedImage
& info
, bool write
)
819 info
.image()->forEachDiskSegment(^(uint32_t segIndex
, uint32_t fileOffset
, uint32_t fileSize
, int64_t vmOffset
, uint64_t vmSize
, uint8_t protections
, bool laterReadOnly
, bool& segStop
) {
820 if ( protections
& VM_PROT_WRITE
)
822 uint32_t regionProt
= protections
;
824 regionProt
= VM_PROT_WRITE
| VM_PROT_READ
;
825 kern_return_t r
= vm_protect(mach_task_self(), ((uintptr_t)info
.loadedAddress())+(uintptr_t)vmOffset
, (uintptr_t)vmSize
, false, regionProt
);
826 assert( r
== KERN_SUCCESS
);
832 LoadedImage
* Loader::LaunchImagesCache::findImage(closure::ImageNum imageNum
,
833 Array
<LoadedImage
>& images
) const {
834 if ( (imageNum
< _firstImageNum
) || (imageNum
>= _lastImageNum
) )
837 uint64_t cacheIndex
= imageNum
- _firstImageNum
;
838 uint32_t imagesIndex
= _imageIndices
[cacheIndex
];
839 if ( imagesIndex
== 0 )
842 // Note the index is offset by 1 so that 0's are not yet set
843 return &images
[imagesIndex
- 1];
846 void Loader::LaunchImagesCache::tryAddImage(closure::ImageNum imageNum
,
847 uint64_t allImagesIndex
) {
848 if ( (imageNum
< _firstImageNum
) || (imageNum
>= _lastImageNum
) )
851 uint64_t cacheIndex
= imageNum
- _firstImageNum
;
853 // Note the index is offset by 1 so that 0's are not yet set
854 _imageIndices
[cacheIndex
] = (uint32_t)allImagesIndex
+ 1;
857 void forEachLineInFile(const char* buffer
, size_t bufferLen
, void (^lineHandler
)(const char* line
, bool& stop
))
860 const char* const eof
= &buffer
[bufferLen
];
861 for (const char* s
= buffer
; s
< eof
; ++s
) {
862 char lineBuffer
[MAXPATHLEN
];
863 char* t
= lineBuffer
;
864 char* tEnd
= &lineBuffer
[MAXPATHLEN
];
865 while ( (s
< eof
) && (t
!= tEnd
) ) {
871 lineHandler(lineBuffer
, stop
);
877 void forEachLineInFile(const char* path
, void (^lineHandler
)(const char* line
, bool& stop
))
879 int fd
= dyld::my_open(path
, O_RDONLY
, 0);
882 if ( fstat(fd
, &statBuf
) == 0 ) {
883 const char* lines
= (const char*)mmap(nullptr, (size_t)statBuf
.st_size
, PROT_READ
, MAP_PRIVATE
, fd
, 0);
884 if ( lines
!= MAP_FAILED
) {
885 forEachLineInFile(lines
, (size_t)statBuf
.st_size
, lineHandler
);
886 munmap((void*)lines
, (size_t)statBuf
.st_size
);
895 #if (BUILDING_LIBDYLD || BUILDING_DYLD)
896 bool internalInstall()
898 #if TARGET_OS_SIMULATOR
900 #elif __IPHONE_OS_VERSION_MIN_REQUIRED
901 uint32_t devFlags
= *((uint32_t*)_COMM_PAGE_DEV_FIRM
);
902 return ( (devFlags
& 1) == 1 );
904 return ( csr_check(CSR_ALLOW_APPLE_INTERNAL
) == 0 );
910 // hack because libdyld.dylib should not link with libc++.dylib
911 extern "C" void __cxa_pure_virtual() __attribute__((visibility("hidden")));
912 void __cxa_pure_virtual()