2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 #include <sys/types.h>
26 #include <sys/errno.h>
29 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/sysctl.h>
36 #include "mach-o/dyld_priv.h"
38 #include "ClosureWriter.h"
39 #include "ClosureBuilder.h"
40 #include "MachOAnalyzer.h"
41 #include "libdyldEntryVector.h"
47 const DlopenClosure
* ClosureBuilder::sRetryDlopenClosure
= (const DlopenClosure
*)(-1);
49 ClosureBuilder::ClosureBuilder(uint32_t startImageNum
, const FileSystem
& fileSystem
, const DyldSharedCache
* dyldCache
, bool dyldCacheIsLive
,
50 const PathOverrides
& pathOverrides
, AtPath atPathHandling
, LaunchErrorInfo
* errorInfo
,
51 const char* archName
, Platform platform
,
52 const CacheDylibsBindingHandlers
* handlers
)
53 : _fileSystem(fileSystem
), _dyldCache(dyldCache
), _pathOverrides(pathOverrides
), _archName(archName
), _platform(platform
), _startImageNum(startImageNum
),
54 _handlers(handlers
), _atPathHandling(atPathHandling
), _launchErrorInfo(errorInfo
), _dyldCacheIsLive(dyldCacheIsLive
)
56 if ( dyldCache
!= nullptr ) {
57 _dyldImageArray
= dyldCache
->cachedDylibsImageArray();
58 if ( (dyldCache
->header
.otherImageArrayAddr
!= 0) && (dyldCache
->header
.progClosuresSize
== 0) )
59 _makingClosuresInCache
= true;
64 ClosureBuilder::~ClosureBuilder() {
65 if ( _tempPaths
!= nullptr )
66 PathPool::deallocate(_tempPaths
);
67 if ( _mustBeMissingPaths
!= nullptr )
68 PathPool::deallocate(_mustBeMissingPaths
);
71 bool ClosureBuilder::findImage(const char* loadPath
, const LoadedImageChain
& forImageChain
, BuilderLoadedImage
*& foundImage
, bool staticLinkage
, bool allowOther
)
73 __block
bool result
= false;
75 _pathOverrides
.forEachPathVariant(loadPath
, ^(const char* possiblePath
, bool isFallbackPath
, bool& stop
) {
76 bool unmapWhenDone
= false;
77 bool contentRebased
= false;
78 bool hasInits
= false;
79 bool fileFound
= false;
80 bool markNeverUnload
= staticLinkage
? forImageChain
.image
.markNeverUnload
: false;
81 ImageNum overrideImageNum
= 0;
82 ImageNum foundImageNum
= 0;
83 const MachOAnalyzer
* mh
= nullptr;
84 const char* filePath
= nullptr;
85 LoadedFileInfo loadedFileInfo
;
87 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
88 bool isRPath
= (strncmp(possiblePath
, "@rpath/", 7) == 0);
90 // passing a leaf name to dlopen() allows rpath searching for it
91 bool implictRPath
= !staticLinkage
&& (loadPath
[0] != '/') && (loadPath
== possiblePath
) && (_atPathHandling
!= AtPath::none
);
94 const char* prePathVarExpansion
= possiblePath
;
95 possiblePath
= resolvePathVar(possiblePath
, forImageChain
, implictRPath
);
96 if ( prePathVarExpansion
!= possiblePath
)
99 // look at already loaded images
100 const char* leafName
= strrchr(possiblePath
, '/');
101 for (BuilderLoadedImage
& li
: _loadedImages
) {
102 if ( strcmp(li
.path(), possiblePath
) == 0 ) {
108 else if ( isRPath
) {
109 // Special case @rpath/ because name in li.fileInfo.path is full path.
110 // Getting installName is expensive, so first see if an already loaded image
111 // has same leaf name and if so see if its installName matches request @rpath
112 if (const char* aLeaf
= strrchr(li
.path(), '/')) {
113 if ( strcmp(aLeaf
, leafName
) == 0 ) {
114 if ( li
.loadAddress()->isDylib() && (strcmp(loadPath
, li
.loadAddress()->installName()) == 0) ) {
125 // look to see if image already loaded via a different symlink
126 if ( _fileSystem
.fileExists(possiblePath
, &loadedFileInfo
.inode
, &loadedFileInfo
.mtime
) ) {
128 for (BuilderLoadedImage
& li
: _loadedImages
) {
129 if ( (li
.loadedFileInfo
.inode
== loadedFileInfo
.inode
) && (li
.loadedFileInfo
.mtime
== loadedFileInfo
.mtime
) ) {
138 // look in dyld cache
139 filePath
= possiblePath
;
140 char realPath
[MAXPATHLEN
];
141 if ( _dyldImageArray
!= nullptr && (_dyldCache
->header
.formatVersion
== dyld3::closure::kFormatVersion
) ) {
142 uint32_t dyldCacheImageIndex
;
143 bool foundInCache
= _dyldCache
->hasImagePath(possiblePath
, dyldCacheImageIndex
);
144 if ( !foundInCache
&& fileFound
) {
145 // see if this is an OS dylib/bundle with a pre-built dlopen closure
147 if (const dyld3::closure::Image
* otherImage
= _dyldCache
->findDlopenOtherImage(possiblePath
) ) {
148 uint64_t expectedInode
;
149 uint64_t expectedModTime
;
150 if ( !otherImage
->isInvalid() ) {
151 bool hasInodeInfo
= otherImage
->hasFileModTimeAndInode(expectedInode
, expectedModTime
);
152 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
153 if ( !hasInodeInfo
|| ((expectedInode
== loadedFileInfo
.inode
) && (expectedModTime
== loadedFileInfo
.mtime
)) ) {
154 loadedFileInfo
= MachOAnalyzer::load(_diag
, _fileSystem
, possiblePath
, _archName
, _platform
);
155 if ( _diag
.noError() ) {
156 mh
= (const MachOAnalyzer
*)loadedFileInfo
.fileContent
;
157 foundImageNum
= otherImage
->imageNum();
158 unmapWhenDone
= true;
159 contentRebased
= false;
160 hasInits
= otherImage
->hasInitializers() || otherImage
->mayHavePlusLoads();
166 // if not found in cache, may be a symlink to something in cache
167 if ( mh
== nullptr ) {
168 if ( _fileSystem
.getRealPath(possiblePath
, realPath
) ) {
169 foundInCache
= _dyldCache
->hasImagePath(realPath
, dyldCacheImageIndex
);
170 if ( foundInCache
) {
173 // handle case where OS dylib was updated after this process launched
174 if ( foundInCache
) {
175 for (BuilderLoadedImage
& li
: _loadedImages
) {
176 if ( strcmp(li
.path(), realPath
) == 0 ) {
190 // if using a cached dylib, look to see if there is an override
191 if ( foundInCache
) {
192 ImageNum dyldCacheImageNum
= dyldCacheImageIndex
+ 1;
193 bool useCache
= true;
194 markNeverUnload
= true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
195 const Image
* image
= _dyldImageArray
->imageForNum(dyldCacheImageNum
);
196 if ( image
->overridableDylib() ) {
197 if ( fileFound
&& (_platform
== MachOFile::currentPlatform()) ) {
198 uint64_t expectedInode
;
199 uint64_t expectedModTime
;
200 if ( image
->hasFileModTimeAndInode(expectedInode
, expectedModTime
) ) {
201 // macOS where dylibs remain on disk. only use cache if mtime and inode have not changed
202 useCache
= ( (loadedFileInfo
.inode
== expectedInode
) && (loadedFileInfo
.mtime
== expectedModTime
) );
204 else if ( _makingClosuresInCache
) {
205 // during iOS cache build, don't look at files on disk, use ones in cache
209 // iOS internal build. Any disk on cache overrides cache
214 overrideImageNum
= dyldCacheImageNum
;
217 foundImageNum
= dyldCacheImageNum
;
218 mh
= (MachOAnalyzer
*)_dyldCache
->getIndexedImageEntry(foundImageNum
-1, loadedFileInfo
.mtime
, loadedFileInfo
.inode
);
219 unmapWhenDone
= false;
220 // if we are building ImageArray in dyld cache, content is not rebased
221 contentRebased
= !_makingDyldCacheImages
&& _dyldCacheIsLive
;
222 hasInits
= image
->hasInitializers() || image
->mayHavePlusLoads();
227 // If we are building the cache, and don't find an image, then it might be weak so just return
228 if (_makingDyldCacheImages
) {
229 addMustBeMissingPath(possiblePath
);
233 // if not found yet, mmap file
234 if ( mh
== nullptr ) {
235 loadedFileInfo
= MachOAnalyzer::load(_diag
, _fileSystem
, filePath
, _archName
, _platform
);
236 mh
= (const MachOAnalyzer
*)loadedFileInfo
.fileContent
;
237 if ( mh
== nullptr ) {
238 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
239 if (_isLaunchClosure
) {
240 addMustBeMissingPath(possiblePath
);
244 if ( staticLinkage
) {
245 // LC_LOAD_DYLIB can only link with dylibs
246 if ( !mh
->isDylib() ) {
247 _diag
.error("not a dylib");
251 else if ( mh
->isMainExecutable() ) {
252 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
253 if ( !mh
->isPIE() || !mh
->isDynamicExecutable() ) {
254 _diag
.error("not PIE");
258 foundImageNum
= _startImageNum
+ _nextIndex
++;
259 unmapWhenDone
= true;
261 loadedFileInfo
.fileContent
= mh
;
264 // if path is not original path
265 if ( filePath
!= loadPath
) {
266 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
267 filePath
= strdup_temp(filePath
);
268 // check if this overrides what would have been found in cache
269 if ( overrideImageNum
== 0 ) {
270 if ( _dyldImageArray
!= nullptr ) {
271 uint32_t dyldCacheImageIndex
;
272 if ( _dyldCache
->hasImagePath(loadPath
, dyldCacheImageIndex
) ) {
273 ImageNum possibleOverrideNum
= dyldCacheImageIndex
+1;
274 if ( possibleOverrideNum
!= foundImageNum
)
275 overrideImageNum
= possibleOverrideNum
;
281 if ( !markNeverUnload
) {
282 // If the parent didn't force us to be never unload, other conditions still may
283 if ( mh
->hasThreadLocalVariables() ) {
284 markNeverUnload
= true;
285 } else if ( mh
->hasObjC() && mh
->isDylib() ) {
286 markNeverUnload
= true;
288 // record if image has DOF sections
289 __block
bool hasDOFs
= false;
290 mh
->forEachDOFSection(_diag
, ^(uint32_t offset
) {
294 markNeverUnload
= true;
298 // Set the path again just in case it was strdup'ed.
299 loadedFileInfo
.path
= filePath
;
302 BuilderLoadedImage entry
;
303 entry
.loadedFileInfo
= loadedFileInfo
;
304 entry
.imageNum
= foundImageNum
;
305 entry
.unmapWhenDone
= unmapWhenDone
;
306 entry
.contentRebased
= contentRebased
;
307 entry
.hasInits
= hasInits
;
308 entry
.markNeverUnload
= markNeverUnload
;
309 entry
.rtldLocal
= false;
310 entry
.isBadImage
= false;
311 entry
.overrideImageNum
= overrideImageNum
;
312 _loadedImages
.push_back(entry
);
313 foundImage
= &_loadedImages
.back();
314 if ( isFallbackPath
)
315 _fallbackPathUsed
= true;
323 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath
, bool fromLCRPATH
, const BuilderLoadedImage
& loadedImage
, char fixedPath
[])
325 switch ( _atPathHandling
) {
328 case AtPath::onlyInRPaths
:
329 if ( !fromLCRPATH
) {
330 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
331 if ( _isLaunchClosure
)
338 if ( strncmp(loadPath
, "@loader_path/", 13) != 0 )
341 strlcpy(fixedPath
, loadedImage
.path(), PATH_MAX
);
342 char* lastSlash
= strrchr(fixedPath
, '/');
343 if ( lastSlash
!= nullptr ) {
344 strcpy(lastSlash
+1, &loadPath
[13]);
350 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath
, bool fromLCRPATH
, char fixedPath
[])
352 switch ( _atPathHandling
) {
355 case AtPath::onlyInRPaths
:
362 if ( strncmp(loadPath
, "@executable_path/", 17) != 0 )
365 if ( _atPathHandling
!= AtPath::all
)
368 strlcpy(fixedPath
, _loadedImages
[_mainProgLoadIndex
].path(), PATH_MAX
);
369 char* lastSlash
= strrchr(fixedPath
, '/');
370 if ( lastSlash
!= nullptr ) {
371 strcpy(lastSlash
+1, &loadPath
[17]);
377 const char* ClosureBuilder::resolvePathVar(const char* loadPath
, const LoadedImageChain
& forImageChain
, bool implictRPath
)
379 // don't expand @ path if disallowed
380 if ( (_atPathHandling
== AtPath::none
) && (loadPath
[0] == '@') )
383 // quick out if not @ path or not implicit rpath
384 if ( !implictRPath
&& (loadPath
[0] != '@') )
387 // expand @loader_path
388 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath
, PATH_MAX
); // read as: char tempPath[PATH_MAX];
389 if ( expandAtLoaderPath(loadPath
, false, forImageChain
.image
, tempPath
) )
390 return strdup_temp(tempPath
);
392 // expand @executable_path
393 if ( expandAtExecutablePath(loadPath
, false, tempPath
) )
394 return strdup_temp(tempPath
);
397 const char* rpathTail
= nullptr;
398 char implicitRpathBuffer
[PATH_MAX
];
399 if ( strncmp(loadPath
, "@rpath/", 7) == 0 ) {
400 // note: rpathTail starts with '/'
401 rpathTail
= &loadPath
[6];
403 else if ( implictRPath
) {
404 // make rpathTail starts with '/'
405 strlcpy(implicitRpathBuffer
, "/", PATH_MAX
);
406 strlcat(implicitRpathBuffer
, loadPath
, PATH_MAX
);
407 rpathTail
= implicitRpathBuffer
;
409 if ( rpathTail
!= nullptr ) {
410 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
411 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
412 __block
const char* result
= nullptr;
413 for (const LoadedImageChain
* link
= &forImageChain
; (link
!= nullptr) && (result
== nullptr); link
= link
->previous
) {
414 link
->image
.loadAddress()->forEachRPath(^(const char* rPath
, bool& stop
) {
415 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.fileInfo.path);
416 if ( expandAtLoaderPath(rPath
, true, link
->image
, tempPath
) || expandAtExecutablePath(rPath
, true, tempPath
) ) {
417 strlcat(tempPath
, rpathTail
, PATH_MAX
);
420 strlcpy(tempPath
, rPath
, PATH_MAX
);
421 strlcat(tempPath
, rpathTail
, PATH_MAX
);
423 if ( _fileSystem
.fileExists(tempPath
) ) {
425 result
= strdup_temp(tempPath
);
428 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
429 if (_isLaunchClosure
) {
430 addMustBeMissingPath(tempPath
);
435 if ( result
!= nullptr )
442 const char* ClosureBuilder::strdup_temp(const char* path
)
444 if ( _tempPaths
== nullptr )
445 _tempPaths
= PathPool::allocate();
446 return _tempPaths
->add(path
);
449 void ClosureBuilder::addMustBeMissingPath(const char* path
)
451 //fprintf(stderr, "must be missing: %s\n", path);
452 if ( _mustBeMissingPaths
== nullptr )
453 _mustBeMissingPaths
= PathPool::allocate();
454 _mustBeMissingPaths
->add(path
);
457 ClosureBuilder::BuilderLoadedImage
& ClosureBuilder::findLoadedImage(ImageNum imageNum
)
459 for (BuilderLoadedImage
& li
: _loadedImages
) {
460 if ( li
.imageNum
== imageNum
) {
464 for (BuilderLoadedImage
& li
: _loadedImages
) {
465 if ( li
.overrideImageNum
== imageNum
) {
469 assert(0 && "LoadedImage not found");
472 ClosureBuilder::BuilderLoadedImage
& ClosureBuilder::findLoadedImage(const MachOAnalyzer
* mh
)
474 for (BuilderLoadedImage
& li
: _loadedImages
) {
475 if ( li
.loadAddress() == mh
) {
479 assert(0 && "LoadedImage not found");
482 const MachOAnalyzer
* ClosureBuilder::machOForImageNum(ImageNum imageNum
)
484 return findLoadedImage(imageNum
).loadAddress();
487 const MachOAnalyzer
* ClosureBuilder::findDependent(const MachOLoaded
* mh
, uint32_t depIndex
)
489 for (const BuilderLoadedImage
& li
: _loadedImages
) {
490 if ( li
.loadAddress() == mh
) {
492 // Bad image duting building group 1 closures, so the dependents array
493 // is potentially incomplete.
496 ImageNum childNum
= li
.dependents
[depIndex
].imageNum();
497 return machOForImageNum(childNum
);
503 ImageNum
ClosureBuilder::imageNumForMachO(const MachOAnalyzer
* mh
)
505 for (const BuilderLoadedImage
& li
: _loadedImages
) {
506 if ( li
.loadAddress() == mh
) {
510 assert(0 && "unknown mach-o");
514 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain
& forImageChain
)
516 // if dependents is set, then we have already loaded this
517 if ( forImageChain
.image
.dependents
.begin() != nullptr )
520 uintptr_t startDepIndex
= _dependencies
.count();
522 __block
uint32_t depIndex
= 0;
523 forImageChain
.image
.loadAddress()->forEachDependentDylib(^(const char* loadPath
, bool isWeak
, bool isReExport
, bool isUpward
, uint32_t compatVersion
, uint32_t curVersion
, bool &stop
) {
524 Image::LinkKind kind
= Image::LinkKind::regular
;
526 kind
= Image::LinkKind::weak
;
527 else if ( isReExport
)
528 kind
= Image::LinkKind::reExport
;
530 kind
= Image::LinkKind::upward
;
531 BuilderLoadedImage
* foundImage
;
532 if ( findImage(loadPath
, forImageChain
, foundImage
, true, false) ) {
533 // verify this is compatable dylib version
534 if ( foundImage
->loadAddress()->filetype
!= MH_DYLIB
) {
535 _diag
.error("found '%s' which is not a dylib. Needed by '%s'", foundImage
->path(), forImageChain
.image
.path());
538 const char* installName
;
539 uint32_t foundCompatVers
;
540 uint32_t foundCurrentVers
;
541 foundImage
->loadAddress()->getDylibInstallName(&installName
, &foundCompatVers
, &foundCurrentVers
);
542 if ( (foundCompatVers
< compatVersion
) && foundImage
->loadAddress()->enforceCompatVersion() ) {
544 char requiredStr
[32];
545 MachOFile::packedVersionToString(foundCompatVers
, foundStr
);
546 MachOFile::packedVersionToString(compatVersion
, requiredStr
);
547 _diag
.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
548 foundImage
->path(), foundStr
, requiredStr
, forImageChain
.image
.path());
551 if ( _diag
.noError() )
552 _dependencies
.push_back(Image::LinkedImage(kind
, foundImage
->imageNum
));
555 _dependencies
.push_back(Image::LinkedImage(Image::LinkKind::weak
, kMissingWeakLinkedImage
));
558 BLOCK_ACCCESSIBLE_ARRAY(char, extra
, 4096);
560 const char* targetLeaf
= strrchr(loadPath
, '/');
561 if ( targetLeaf
== nullptr )
562 targetLeaf
= loadPath
;
563 if ( _mustBeMissingPaths
!= nullptr ) {
564 strcpy(extra
, ", tried: ");
565 _mustBeMissingPaths
->forEachPath(^(const char* aPath
) {
566 const char* aLeaf
= strrchr(aPath
, '/');
567 if ( aLeaf
== nullptr )
569 if ( strcmp(targetLeaf
, aLeaf
) == 0 ) {
570 strlcat(extra
, "'", 4096);
571 strlcat(extra
, aPath
, 4096);
572 strlcat(extra
, "' ", 4096);
576 if ( _diag
.hasError() ) {
577 #if BUILDING_CACHE_BUILDER
578 std::string errorMessageBuffer
= _diag
.errorMessage();
579 const char* msg
= errorMessageBuffer
.c_str();
581 const char* msg
= _diag
.errorMessage();
583 char msgCopy
[strlen(msg
)+4];
584 strcpy(msgCopy
, msg
);
585 _diag
.error("dependent dylib '%s' not found for '%s'. %s", loadPath
, forImageChain
.image
.path(), msgCopy
);
588 _diag
.error("dependent dylib '%s' not found for '%s'%s", loadPath
, forImageChain
.image
.path(), extra
);
590 if ( _launchErrorInfo
!= nullptr ) {
591 _launchErrorInfo
->kind
= DYLD_EXIT_REASON_DYLIB_MISSING
;
592 _launchErrorInfo
->clientOfDylibPath
= forImageChain
.image
.path();
593 _launchErrorInfo
->targetDylibPath
= loadPath
;
594 _launchErrorInfo
->symbol
= nullptr;
598 if ( _diag
.hasError() )
601 if ( _diag
.hasError() )
603 forImageChain
.image
.dependents
= _dependencies
.subArray(startDepIndex
, depIndex
);
605 // breadth first recurse
606 for (Image::LinkedImage dep
: forImageChain
.image
.dependents
) {
607 // don't recurse upwards
608 if ( dep
.kind() == Image::LinkKind::upward
)
610 // don't recurse down missing weak links
611 if ( (dep
.kind() == Image::LinkKind::weak
) && (dep
.imageNum() == kMissingWeakLinkedImage
) )
613 BuilderLoadedImage
& depLoadedImage
= findLoadedImage(dep
.imageNum());
614 LoadedImageChain chain
= { &forImageChain
, depLoadedImage
};
615 recursiveLoadDependents(chain
);
616 if ( _diag
.hasError() )
621 void ClosureBuilder::loadDanglingUpwardLinks()
625 danglingFixed
= false;
626 for (BuilderLoadedImage
& li
: _loadedImages
) {
627 if ( li
.dependents
.begin() == nullptr ) {
628 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
629 LoadedImageChain chain
= { nullptr, li
};
630 recursiveLoadDependents(chain
);
631 danglingFixed
= true;
635 } while (danglingFixed
&& _diag
.noError());
638 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage
& forImage
)
640 // only set on dylibs in the dyld shared cache
641 if ( !_makingDyldCacheImages
)
644 // on macOS dylibs always override cache
645 if ( _platform
== Platform::macOS
)
648 // on embedded platforms with Internal cache, allow overrides
649 if ( !_makingCustomerCache
)
652 // embedded platform customer caches, no overrides
653 return false; // FIXME, allow libdispatch.dylib to be overridden
656 void ClosureBuilder::buildImage(ImageWriter
& writer
, BuilderLoadedImage
& forImage
)
658 const MachOAnalyzer
* macho
= forImage
.loadAddress();
660 writer
.setImageNum(forImage
.imageNum
);
663 writer
.setHasWeakDefs(macho
->hasWeakDefs());
664 writer
.setIsBundle(macho
->isBundle());
665 writer
.setIsDylib(macho
->isDylib());
666 writer
.setIs64(macho
->is64());
667 writer
.setIsExecutable(macho
->isMainExecutable());
668 writer
.setUses16KPages(macho
->uses16KPages());
669 writer
.setOverridableDylib(overridableDylib(forImage
));
670 writer
.setInDyldCache(macho
->inDyldCache());
671 if ( macho
->hasObjC() ) {
672 writer
.setHasObjC(true);
673 bool hasPlusLoads
= macho
->hasPlusLoadMethod(_diag
);
674 writer
.setHasPlusLoads(hasPlusLoads
);
676 forImage
.hasInits
= true;
679 writer
.setHasObjC(false);
680 writer
.setHasPlusLoads(false);
683 if ( forImage
.markNeverUnload
) {
684 writer
.setNeverUnload(true);
687 #if BUILDING_DYLD || BUILDING_LIBDYLD
688 // shared cache not built by dyld or libdyld.dylib, so must be real file
689 writer
.setFileInfo(forImage
.loadedFileInfo
.inode
, forImage
.loadedFileInfo
.mtime
);
691 if ( _platform
== Platform::macOS
) {
692 if ( macho
->inDyldCache() && !_dyldCache
->header
.dylibsExpectedOnDisk
) {
693 // don't add file info for shared cache files mastered out of final file system
696 // file is either not in cache or is in cache but not mastered out
697 writer
.setFileInfo(forImage
.loadedFileInfo
.inode
, forImage
.loadedFileInfo
.mtime
);
701 // all other platforms, cache is built off-device, so inodes are not known
705 // add info on how to load image
706 if ( !macho
->inDyldCache() ) {
707 writer
.setMappingInfo(forImage
.loadedFileInfo
.sliceOffset
, macho
->mappedSize());
708 // add code signature, if signed
709 uint32_t codeSigFileOffset
;
710 uint32_t codeSigSize
;
711 if ( macho
->hasCodeSignature(codeSigFileOffset
, codeSigSize
) ) {
712 writer
.setCodeSignatureLocation(codeSigFileOffset
, codeSigSize
);
714 if ( macho
->getCDHash(cdHash
) )
715 writer
.setCDHash(cdHash
);
717 // add FairPlay encryption range if encrypted
718 uint32_t fairPlayFileOffset
;
719 uint32_t fairPlaySize
;
720 if ( macho
->isFairPlayEncrypted(fairPlayFileOffset
, fairPlaySize
) ) {
721 writer
.setFairPlayEncryptionRange(fairPlayFileOffset
, fairPlaySize
);
726 writer
.addPath(forImage
.path());
727 if ( _aliases
!= nullptr ) {
728 for (const CachedDylibAlias
& alias
: *_aliases
) {
729 if ( strcmp(alias
.realPath
, forImage
.path()) == 0 )
730 writer
.addPath(alias
.aliasPath
);
734 // set uuid, if has one
736 if ( macho
->getUuid(uuid
) )
737 writer
.setUUID(uuid
);
740 writer
.setDependents(forImage
.dependents
);
743 addSegments(writer
, macho
);
745 // record if this dylib overrides something in the cache
746 if ( forImage
.overrideImageNum
!= 0 ) {
747 writer
.setAsOverrideOf(forImage
.overrideImageNum
);
748 const char* overridePath
= _dyldImageArray
->imageForNum(forImage
.overrideImageNum
)->path();
749 writer
.addPath(overridePath
);
750 if ( strcmp(overridePath
, "/usr/lib/system/libdyld.dylib") == 0 )
751 _libDyldImageNum
= forImage
.imageNum
;
752 else if ( strcmp(overridePath
, "/usr/lib/libSystem.B.dylib") == 0 )
753 _libSystemImageNum
= forImage
.imageNum
;
757 // do fix up info for non-cached, and cached if building cache
758 if ( !macho
->inDyldCache() || _makingDyldCacheImages
) {
759 if ( macho
->hasChainedFixups() ) {
760 addChainedFixupInfo(writer
, forImage
);
763 if ( _handlers
!= nullptr ) {
764 reportRebasesAndBinds(writer
, forImage
);
767 addRebaseInfo(writer
, macho
);
768 if ( _diag
.noError() )
769 addBindInfo(writer
, forImage
);
773 if ( _diag
.hasError() ) {
779 bool contentRebased
= forImage
.contentRebased
;
780 __block
unsigned initCount
= 0;
781 macho
->forEachInitializer(_diag
, contentRebased
, ^(uint32_t offset
) {
784 if ( initCount
!= 0 ) {
785 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets
, initCount
);
786 __block
unsigned index
= 0;
787 macho
->forEachInitializer(_diag
, contentRebased
, ^(uint32_t offset
) {
788 initOffsets
[index
++] = offset
;
790 writer
.setInitOffsets(initOffsets
, initCount
);
791 forImage
.hasInits
= true;
794 // record if image has DOF sections
795 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets
, 256);
796 macho
->forEachDOFSection(_diag
, ^(uint32_t offset
) {
797 dofSectionOffsets
.push_back(offset
);
799 if ( !dofSectionOffsets
.empty() ) {
800 writer
.setDofOffsets(dofSectionOffsets
);
805 void ClosureBuilder::addSegments(ImageWriter
& writer
, const MachOAnalyzer
* mh
)
807 const uint32_t segCount
= mh
->segmentCount();
808 if ( mh
->inDyldCache() ) {
809 uint64_t cacheUnslideBaseAddress
= _dyldCache
->unslidLoadAddress();
810 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment
, segs
, segCount
);
811 mh
->forEachSegment(^(const MachOAnalyzer::SegmentInfo
& info
, bool& stop
) {
812 segs
[info
.segIndex
] = { (uint32_t)(info
.vmAddr
-cacheUnslideBaseAddress
), (uint32_t)info
.vmSize
, info
.protections
};
814 writer
.setCachedSegments(segs
, segCount
);
817 const uint32_t pageSize
= (mh
->uses16KPages() ? 0x4000 : 0x1000);
818 __block
uint32_t diskSegIndex
= 0;
819 __block
uint32_t totalPageCount
= 0;
820 __block
uint32_t lastFileOffsetEnd
= 0;
821 __block
uint64_t lastVmAddrEnd
= 0;
822 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment
, dsegs
, segCount
*3); // room for padding
823 mh
->forEachSegment(^(const MachOAnalyzer::SegmentInfo
& info
, bool& stop
) {
824 if ( (info
.fileOffset
!= 0) && (info
.fileOffset
!= lastFileOffsetEnd
) ) {
825 Image::DiskSegment filePadding
;
826 filePadding
.filePageCount
= (info
.fileOffset
- lastFileOffsetEnd
)/pageSize
;
827 filePadding
.vmPageCount
= 0;
828 filePadding
.permissions
= 0;
829 filePadding
.paddingNotSeg
= 1;
830 dsegs
[diskSegIndex
++] = filePadding
;
832 if ( (lastVmAddrEnd
!= 0) && (info
.vmAddr
!= lastVmAddrEnd
) ) {
833 Image::DiskSegment vmPadding
;
834 vmPadding
.filePageCount
= 0;
835 vmPadding
.vmPageCount
= (info
.vmAddr
- lastVmAddrEnd
)/pageSize
;
836 vmPadding
.permissions
= 0;
837 vmPadding
.paddingNotSeg
= 1;
838 dsegs
[diskSegIndex
++] = vmPadding
;
839 totalPageCount
+= vmPadding
.vmPageCount
;
842 Image::DiskSegment segInfo
;
843 segInfo
.filePageCount
= (info
.fileSize
+pageSize
-1)/pageSize
;
844 segInfo
.vmPageCount
= (info
.vmSize
+pageSize
-1)/pageSize
;
845 segInfo
.permissions
= info
.protections
& 7;
846 segInfo
.paddingNotSeg
= 0;
847 dsegs
[diskSegIndex
++] = segInfo
;
848 totalPageCount
+= segInfo
.vmPageCount
;
849 if ( info
.fileSize
!= 0 )
850 lastFileOffsetEnd
= (uint32_t)(info
.fileOffset
+ info
.fileSize
);
851 if ( info
.vmSize
!= 0 )
852 lastVmAddrEnd
= info
.vmAddr
+ info
.vmSize
;
855 writer
.setDiskSegments(dsegs
, diskSegIndex
);
859 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter
& writer
, const Image
* image
, const MachOAnalyzer
* mh
)
861 const unsigned pointerSize
= mh
->pointerSize();
862 mh
->forEachInterposingSection(_diag
, ^(uint64_t sectVmOffset
, uint64_t sectVmSize
, bool &stop
) {
863 const uint32_t entrySize
= 2*pointerSize
;
864 const uint32_t tupleCount
= (uint32_t)(sectVmSize
/entrySize
);
865 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple
, resolvedTuples
, tupleCount
);
866 for (uint32_t i
=0; i
< tupleCount
; ++i
) {
867 resolvedTuples
[i
].stockImplementation
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
868 resolvedTuples
[i
].stockImplementation
.absolute
.value
= 0;
869 resolvedTuples
[i
].newImplementation
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
870 resolvedTuples
[i
].newImplementation
.absolute
.value
= 0;
872 image
->forEachFixup(^(uint64_t imageOffsetToRebase
, bool &rebaseStop
) {
873 if ( imageOffsetToRebase
< sectVmOffset
)
875 if ( imageOffsetToRebase
> sectVmOffset
+sectVmSize
)
877 uint64_t offsetIntoSection
= imageOffsetToRebase
- sectVmOffset
;
878 uint64_t rebaseIndex
= offsetIntoSection
/entrySize
;
879 if ( rebaseIndex
*entrySize
!= offsetIntoSection
)
881 const void* content
= (uint8_t*)mh
+ imageOffsetToRebase
;
882 uint64_t unslidTargetAddress
= mh
->is64() ? *(uint64_t*)content
: *(uint32_t*)content
;
883 resolvedTuples
[rebaseIndex
].newImplementation
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
884 resolvedTuples
[rebaseIndex
].newImplementation
.image
.imageNum
= image
->imageNum();
885 resolvedTuples
[rebaseIndex
].newImplementation
.image
.offset
= unslidTargetAddress
- mh
->preferredLoadAddress();
886 }, ^(uint64_t imageOffsetToBind
, Image::ResolvedSymbolTarget bindTarget
, bool &bindStop
) {
887 if ( imageOffsetToBind
< sectVmOffset
)
889 if ( imageOffsetToBind
> sectVmOffset
+sectVmSize
)
891 uint64_t offsetIntoSection
= imageOffsetToBind
- sectVmOffset
;
892 uint64_t bindIndex
= offsetIntoSection
/entrySize
;
893 if ( bindIndex
*entrySize
+ pointerSize
!= offsetIntoSection
)
895 resolvedTuples
[bindIndex
].stockImplementation
= bindTarget
;
896 }, ^(uint64_t imageOffsetStart
, const Array
<Image::ResolvedSymbolTarget
>& targets
, bool& chainStop
) {
897 // walk each fixup in the chain
898 image
->forEachChainedFixup((void*)mh
, imageOffsetStart
, ^(uint64_t* fixupLoc
, MachOLoaded::ChainedFixupPointerOnDisk fixupInfo
, bool& stopChain
) {
899 uint64_t imageOffsetToFixup
= (uint64_t)fixupLoc
- (uint64_t)mh
;
900 if ( fixupInfo
.authRebase
.auth
) {
901 #if SUPPORT_ARCH_arm64e
902 if ( fixupInfo
.authBind
.bind
) {
903 closure::Image::ResolvedSymbolTarget bindTarget
= targets
[fixupInfo
.authBind
.ordinal
];
904 if ( imageOffsetToFixup
< sectVmOffset
)
906 if ( imageOffsetToFixup
> sectVmOffset
+sectVmSize
)
908 uint64_t offsetIntoSection
= imageOffsetToFixup
- sectVmOffset
;
909 uint64_t bindIndex
= offsetIntoSection
/entrySize
;
910 if ( bindIndex
*entrySize
+ pointerSize
!= offsetIntoSection
)
912 resolvedTuples
[bindIndex
].stockImplementation
= bindTarget
;
915 if ( imageOffsetToFixup
< sectVmOffset
)
917 if ( imageOffsetToFixup
> sectVmOffset
+sectVmSize
)
919 uint64_t offsetIntoSection
= imageOffsetToFixup
- sectVmOffset
;
920 uint64_t rebaseIndex
= offsetIntoSection
/entrySize
;
921 if ( rebaseIndex
*entrySize
!= offsetIntoSection
)
923 uint64_t unslidTargetAddress
= (uint64_t)mh
->preferredLoadAddress() + fixupInfo
.authRebase
.target
;
924 resolvedTuples
[rebaseIndex
].newImplementation
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
925 resolvedTuples
[rebaseIndex
].newImplementation
.image
.imageNum
= image
->imageNum();
926 resolvedTuples
[rebaseIndex
].newImplementation
.image
.offset
= unslidTargetAddress
- mh
->preferredLoadAddress();
929 _diag
.error("malformed chained pointer");
935 if ( fixupInfo
.plainRebase
.bind
) {
936 closure::Image::ResolvedSymbolTarget bindTarget
= targets
[fixupInfo
.plainBind
.ordinal
];
937 if ( imageOffsetToFixup
< sectVmOffset
)
939 if ( imageOffsetToFixup
> sectVmOffset
+sectVmSize
)
941 uint64_t offsetIntoSection
= imageOffsetToFixup
- sectVmOffset
;
942 uint64_t bindIndex
= offsetIntoSection
/entrySize
;
943 if ( bindIndex
*entrySize
+ pointerSize
!= offsetIntoSection
)
945 resolvedTuples
[bindIndex
].stockImplementation
= bindTarget
;
948 if ( imageOffsetToFixup
< sectVmOffset
)
950 if ( imageOffsetToFixup
> sectVmOffset
+sectVmSize
)
952 uint64_t offsetIntoSection
= imageOffsetToFixup
- sectVmOffset
;
953 uint64_t rebaseIndex
= offsetIntoSection
/entrySize
;
954 if ( rebaseIndex
*entrySize
!= offsetIntoSection
)
956 uint64_t unslidTargetAddress
= fixupInfo
.plainRebase
.signExtendedTarget();
957 resolvedTuples
[rebaseIndex
].newImplementation
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
958 resolvedTuples
[rebaseIndex
].newImplementation
.image
.imageNum
= image
->imageNum();
959 resolvedTuples
[rebaseIndex
].newImplementation
.image
.offset
= unslidTargetAddress
- mh
->preferredLoadAddress();
965 // remove any tuples in which both sides are not set (or target is weak-import NULL)
966 STACK_ALLOC_ARRAY(InterposingTuple
, goodTuples
, tupleCount
);
967 for (uint32_t i
=0; i
< tupleCount
; ++i
) {
968 if ( (resolvedTuples
[i
].stockImplementation
.image
.kind
!= Image::ResolvedSymbolTarget::kindAbsolute
)
969 && (resolvedTuples
[i
].newImplementation
.image
.kind
!= Image::ResolvedSymbolTarget::kindAbsolute
) )
970 goodTuples
.push_back(resolvedTuples
[i
]);
972 writer
.addInterposingTuples(goodTuples
);
974 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
975 STACK_ALLOC_ARRAY(Closure::PatchEntry
, patches
, goodTuples
.count());
976 for (const InterposingTuple
& aTuple
: goodTuples
) {
977 if ( aTuple
.stockImplementation
.sharedCache
.kind
== Image::ResolvedSymbolTarget::kindSharedCache
) {
979 assert(_dyldCache
->addressInText((uint32_t)aTuple
.stockImplementation
.sharedCache
.offset
, &imageIndex
));
980 ImageNum imageInCache
= imageIndex
+1;
981 Closure::PatchEntry patch
;
982 patch
.exportCacheOffset
= (uint32_t)aTuple
.stockImplementation
.sharedCache
.offset
;
983 patch
.overriddenDylibInCache
= imageInCache
;
984 patch
.replacement
= aTuple
.newImplementation
;
985 patches
.push_back(patch
);
988 writer
.addCachePatches(patches
);
992 void ClosureBuilder::addRebaseInfo(ImageWriter
& writer
, const MachOAnalyzer
* mh
)
994 const uint64_t ptrSize
= mh
->pointerSize();
995 Image::RebasePattern maxLeapPattern
= { 0xFFFFF, 0, 0xF };
996 const uint64_t maxLeapCount
= maxLeapPattern
.repeatCount
* maxLeapPattern
.skipCount
;
997 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern
, rebaseEntries
, 1024);
998 __block
uint64_t lastLocation
= -ptrSize
;
999 mh
->forEachRebase(_diag
, true, ^(uint64_t runtimeOffset
, bool& stop
) {
1000 const uint64_t delta
= runtimeOffset
- lastLocation
;
1001 const bool aligned
= ((delta
% ptrSize
) == 0);
1002 if ( delta
== ptrSize
) {
1003 // this rebase location is contiguous to previous
1004 if ( rebaseEntries
.back().contigCount
< 255 ) {
1005 // just bump previous's contigCount
1006 rebaseEntries
.back().contigCount
++;
1009 // previous contiguous run already has max 255, so start a new run
1010 rebaseEntries
.push_back({ 1, 1, 0 });
1013 else if ( aligned
&& (delta
<= (ptrSize
*15)) ) {
1014 // this rebase is within skip distance of last rebase
1015 rebaseEntries
.back().skipCount
= (uint8_t)((delta
-ptrSize
)/ptrSize
);
1016 int lastIndex
= (int)(rebaseEntries
.count() - 1);
1017 if ( lastIndex
> 1 ) {
1018 if ( (rebaseEntries
[lastIndex
].contigCount
== rebaseEntries
[lastIndex
-1].contigCount
)
1019 && (rebaseEntries
[lastIndex
].skipCount
== rebaseEntries
[lastIndex
-1].skipCount
) ) {
1020 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1021 rebaseEntries
.pop_back();
1022 rebaseEntries
.back().repeatCount
+= 1;
1025 rebaseEntries
.push_back({ 1, 1, 0 });
1028 uint64_t advanceCount
= (delta
-ptrSize
);
1029 if ( (runtimeOffset
< lastLocation
) && (lastLocation
!= -ptrSize
) ) {
1030 // out of rebases! handle this be resting rebase offset to zero
1031 rebaseEntries
.push_back({ 0, 0, 0 });
1032 advanceCount
= runtimeOffset
;
1034 // if next rebase is too far to reach with one pattern, use series
1035 while ( advanceCount
> maxLeapCount
) {
1036 rebaseEntries
.push_back(maxLeapPattern
);
1037 advanceCount
-= maxLeapCount
;
1039 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1040 while ( advanceCount
> maxLeapPattern
.repeatCount
) {
1041 uint64_t count
= advanceCount
/ maxLeapPattern
.skipCount
;
1042 rebaseEntries
.push_back({ (uint32_t)count
, 0, maxLeapPattern
.skipCount
});
1043 advanceCount
-= (count
*maxLeapPattern
.skipCount
);
1045 if ( advanceCount
!= 0 )
1046 rebaseEntries
.push_back({ (uint32_t)advanceCount
, 0, 1 });
1047 rebaseEntries
.push_back({ 1, 1, 0 });
1049 lastLocation
= runtimeOffset
;
1051 writer
.setRebaseInfo(rebaseEntries
);
1053 // i386 programs also use text relocs to rebase stubs
1054 if ( mh
->cputype
== CPU_TYPE_I386
) {
1055 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern
, textRebases
, 512);
1056 __block
uint64_t lastOffset
= -4;
1057 mh
->forEachTextRebase(_diag
, ^(uint64_t runtimeOffset
, bool& stop
) {
1058 if ( textRebases
.freeCount() < 2 ) {
1059 _diag
.error("too many text rebase locations (%ld) in %s", textRebases
.maxCount(), writer
.currentImage()->path());
1062 bool mergedIntoPrevious
= false;
1063 if ( (runtimeOffset
> lastOffset
) && !textRebases
.empty() ) {
1064 uint32_t skipAmount
= (uint32_t)(runtimeOffset
- lastOffset
);
1065 if ( (textRebases
.back().repeatCount
== 1) && (textRebases
.back().skipCount
== 0) ) {
1066 textRebases
.back().repeatCount
= 2;
1067 textRebases
.back().skipCount
= skipAmount
;
1068 mergedIntoPrevious
= true;
1070 else if ( textRebases
.back().skipCount
== skipAmount
) {
1071 textRebases
.back().repeatCount
+= 1;
1072 mergedIntoPrevious
= true;
1075 if ( !mergedIntoPrevious
) {
1076 Image::TextFixupPattern pattern
;
1077 pattern
.target
.raw
= 0;
1078 pattern
.startVmOffset
= (uint32_t)runtimeOffset
;
1079 pattern
.repeatCount
= 1;
1080 pattern
.skipCount
= 0;
1081 textRebases
.push_back(pattern
);
1083 lastOffset
= runtimeOffset
;
1085 writer
.setTextRebaseInfo(textRebases
);
1090 void ClosureBuilder::forEachBind(BuilderLoadedImage
& forImage
, void (^handler
)(uint64_t runtimeOffset
, Image::ResolvedSymbolTarget target
, const ResolvedTargetInfo
& targetInfo
, bool& stop
),
1091 void (^strongHandler
)(const char* strongSymbolName
))
1093 __block
int lastLibOrdinal
= 256;
1094 __block
const char* lastSymbolName
= nullptr;
1095 __block
uint64_t lastAddend
= 0;
1096 __block
Image::ResolvedSymbolTarget target
;
1097 __block ResolvedTargetInfo targetInfo
;
1098 forImage
.loadAddress()->forEachBind(_diag
, ^(uint64_t runtimeOffset
, int libOrdinal
, const char* symbolName
, bool weakImport
, uint64_t addend
, bool& stop
) {
1099 if ( (symbolName
== lastSymbolName
) && (libOrdinal
== lastLibOrdinal
) && (addend
== lastAddend
) ) {
1100 // same symbol lookup as last location
1101 handler(runtimeOffset
, target
, targetInfo
, stop
);
1103 else if ( findSymbol(forImage
, libOrdinal
, symbolName
, weakImport
, addend
, target
, targetInfo
) ) {
1104 handler(runtimeOffset
, target
, targetInfo
, stop
);
1105 lastSymbolName
= symbolName
;
1106 lastLibOrdinal
= libOrdinal
;
1107 lastAddend
= addend
;
1112 }, ^(const char* symbolName
) {
1113 strongHandler(symbolName
);
1117 void ClosureBuilder::addBindInfo(ImageWriter
& writer
, BuilderLoadedImage
& forImage
)
1119 const uint32_t ptrSize
= forImage
.loadAddress()->pointerSize();
1120 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern
, binds
, 512);
1121 __block
uint64_t lastOffset
= -ptrSize
;
1122 __block
Image::ResolvedSymbolTarget lastTarget
= { {0, 0} };
1123 forEachBind(forImage
, ^(uint64_t runtimeOffset
, Image::ResolvedSymbolTarget target
, const ResolvedTargetInfo
& targetInfo
, bool& stop
) {
1124 if ( targetInfo
.weakBindCoalese
) {
1125 // may be previous bind to this location
1126 // if so, update that rather create new BindPattern
1127 for (Image::BindPattern
& aBind
: binds
) {
1128 if ( (aBind
.startVmOffset
== runtimeOffset
) && (aBind
.repeatCount
== 1) && (aBind
.skipCount
== 0) ) {
1129 aBind
.target
= target
;
1134 bool mergedIntoPrevious
= false;
1135 if ( !mergedIntoPrevious
&& (target
== lastTarget
) && (runtimeOffset
> lastOffset
) && !binds
.empty() ) {
1136 uint64_t skipAmount
= (runtimeOffset
- lastOffset
- ptrSize
)/ptrSize
;
1137 if ( skipAmount
*ptrSize
!= (runtimeOffset
- lastOffset
- ptrSize
) ) {
1138 // misaligned pointer means we cannot optimize
1141 if ( (binds
.back().repeatCount
== 1) && (binds
.back().skipCount
== 0) && (skipAmount
<= 255) ) {
1142 binds
.back().repeatCount
= 2;
1143 binds
.back().skipCount
= skipAmount
;
1144 assert(binds
.back().skipCount
== skipAmount
); // check overflow
1145 mergedIntoPrevious
= true;
1147 else if ( (binds
.back().skipCount
== skipAmount
) && (binds
.back().repeatCount
< 0xfff) ) {
1148 uint32_t prevRepeatCount
= binds
.back().repeatCount
;
1149 binds
.back().repeatCount
+= 1;
1150 assert(binds
.back().repeatCount
> prevRepeatCount
); // check overflow
1151 mergedIntoPrevious
= true;
1155 if ( (target
== lastTarget
) && (runtimeOffset
== lastOffset
) && !binds
.empty() ) {
1156 // duplicate bind for same location, ignore this one
1157 mergedIntoPrevious
= true;
1159 if ( !mergedIntoPrevious
) {
1160 Image::BindPattern pattern
;
1161 pattern
.target
= target
;
1162 pattern
.startVmOffset
= runtimeOffset
;
1163 pattern
.repeatCount
= 1;
1164 pattern
.skipCount
= 0;
1165 assert(pattern
.startVmOffset
== runtimeOffset
);
1166 binds
.push_back(pattern
);
1168 lastTarget
= target
;
1169 lastOffset
= runtimeOffset
;
1170 }, ^(const char* strongSymbolName
) {
1171 if ( !_makingDyldCacheImages
) {
1172 // something has a strong symbol definition that may override a weak impl in the dyld cache
1173 Image::ResolvedSymbolTarget strongOverride
;
1174 ResolvedTargetInfo strongTargetInfo
;
1175 if ( findSymbolInImage(forImage
.loadAddress(), strongSymbolName
, 0, false, strongOverride
, strongTargetInfo
) ) {
1176 for (const BuilderLoadedImage
& li
: _loadedImages
) {
1177 if ( li
.loadAddress()->inDyldCache() && li
.loadAddress()->hasWeakDefs() ) {
1178 Image::ResolvedSymbolTarget implInCache
;
1179 ResolvedTargetInfo implInCacheInfo
;
1180 if ( findSymbolInImage(li
.loadAddress(), strongSymbolName
, 0, false, implInCache
, implInCacheInfo
) ) {
1181 // found another instance in some dylib in dyld cache, will need to patch it
1182 Closure::PatchEntry patch
;
1183 patch
.exportCacheOffset
= (uint32_t)implInCache
.sharedCache
.offset
;
1184 patch
.overriddenDylibInCache
= li
.imageNum
;
1185 patch
.replacement
= strongOverride
;
1186 _weakDefCacheOverrides
.push_back(patch
);
1193 writer
.setBindInfo(binds
);
1196 void ClosureBuilder::reportRebasesAndBinds(ImageWriter
& writer
, BuilderLoadedImage
& forImage
)
1198 // report all rebases
1199 forImage
.loadAddress()->forEachRebase(_diag
, true, ^(uint64_t runtimeOffset
, bool& stop
) {
1200 _handlers
->rebase(forImage
.imageNum
, forImage
.loadAddress(), (uint32_t)runtimeOffset
);
1204 forEachBind(forImage
, ^(uint64_t runtimeOffset
, Image::ResolvedSymbolTarget target
, const ResolvedTargetInfo
& targetInfo
, bool& stop
) {
1205 _handlers
->bind(forImage
.imageNum
, forImage
.loadAddress(), (uint32_t)runtimeOffset
, target
, targetInfo
);
1207 ^(const char* strongSymbolName
) {});
1209 // i386 programs also use text relocs to rebase stubs
1210 if ( forImage
.loadAddress()->cputype
== CPU_TYPE_I386
) {
1215 // These are mangled symbols for all the variants of operator new and delete
1216 // which a main executable can define (non-weak) and override the
1217 // weak-def implementation in the OS.
1218 static const char* sTreatAsWeak
[] = {
1219 "__Znwm", "__ZnwmRKSt9nothrow_t",
1220 "__Znam", "__ZnamRKSt9nothrow_t",
1221 "__ZdlPv", "__ZdlPvRKSt9nothrow_t", "__ZdlPvm",
1222 "__ZdaPv", "__ZdaPvRKSt9nothrow_t", "__ZdaPvm",
1223 "__ZnwmSt11align_val_t", "__ZnwmSt11align_val_tRKSt9nothrow_t",
1224 "__ZnamSt11align_val_t", "__ZnamSt11align_val_tRKSt9nothrow_t",
1225 "__ZdlPvSt11align_val_t", "__ZdlPvSt11align_val_tRKSt9nothrow_t", "__ZdlPvmSt11align_val_t",
1226 "__ZdaPvSt11align_val_t", "__ZdaPvSt11align_val_tRKSt9nothrow_t", "__ZdaPvmSt11align_val_t"
1230 void ClosureBuilder::addChainedFixupInfo(ImageWriter
& writer
, const BuilderLoadedImage
& forImage
)
1232 // calculate max page starts
1233 __block
uint32_t dataPageCount
= 1;
1234 forImage
.loadAddress()->forEachSegment(^(const dyld3::MachOFile::SegmentInfo
& info
, bool& stop
) {
1235 if ( info
.protections
& VM_PROT_WRITE
) {
1236 dataPageCount
+= ((info
.fileSize
+4095) / 4096);
1240 // build array of starts
1241 STACK_ALLOC_ARRAY(uint64_t, starts
, dataPageCount
);
1242 forImage
.loadAddress()->forEachChainedFixupStart(_diag
, ^(uint64_t runtimeOffset
, bool& stop
) {
1243 starts
.push_back(runtimeOffset
);
1246 // build array of targets
1247 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget
, targets
, 1024);
1248 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ResolvedTargetInfo
, targetInfos
, 1024);
1249 forImage
.loadAddress()->forEachChainedFixupTarget(_diag
, ^(int libOrdinal
, const char* symbolName
, uint64_t addend
, bool weakImport
, bool& stop
) {
1250 Image::ResolvedSymbolTarget target
;
1251 ResolvedTargetInfo targetInfo
;
1252 if ( !findSymbol(forImage
, libOrdinal
, symbolName
, weakImport
, addend
, target
, targetInfo
) ) {
1253 const char* expectedInPath
= forImage
.loadAddress()->dependentDylibLoadPath(libOrdinal
-1);
1254 _diag
.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName
, expectedInPath
, forImage
.path());
1258 if ( libOrdinal
== BIND_SPECIAL_DYLIB_WEAK_DEF_COALESCE
) {
1259 // add if not already in array
1260 bool alreadyInArray
= false;
1261 for (const char* sym
: _weakDefsFromChainedBinds
) {
1262 if ( strcmp(sym
, symbolName
) == 0 ) {
1263 alreadyInArray
= true;
1267 if ( !alreadyInArray
)
1268 _weakDefsFromChainedBinds
.push_back(symbolName
);
1270 targets
.push_back(target
);
1271 targetInfos
.push_back(targetInfo
);
1273 if ( _diag
.hasError() )
1276 if ( _handlers
!= nullptr )
1277 _handlers
->chainedBind(forImage
.imageNum
, forImage
.loadAddress(), starts
, targets
, targetInfos
);
1279 writer
.setChainedFixups(starts
, targets
); // store results in Image object
1281 // with chained fixups, main executable may define symbol that overrides weak-defs but has no fixup
1282 if ( _isLaunchClosure
&& forImage
.loadAddress()->hasWeakDefs() && forImage
.loadAddress()->isMainExecutable() ) {
1283 for (const char* weakSymbolName
: sTreatAsWeak
) {
1284 Diagnostics exportDiag
;
1285 dyld3::MachOAnalyzer::FoundSymbol foundInfo
;
1286 if ( forImage
.loadAddress()->findExportedSymbol(exportDiag
, weakSymbolName
, foundInfo
, nullptr) ) {
1287 _weakDefsFromChainedBinds
.push_back(weakSymbolName
);
1294 bool ClosureBuilder::findSymbolInImage(const MachOAnalyzer
* macho
, const char* symbolName
, uint64_t addend
, bool followReExports
,
1295 Image::ResolvedSymbolTarget
& target
, ResolvedTargetInfo
& targetInfo
)
1297 targetInfo
.foundInDylib
= nullptr;
1298 targetInfo
.requestedSymbolName
= symbolName
;
1299 targetInfo
.addend
= addend
;
1300 targetInfo
.isWeakDef
= false;
1301 MachOLoaded::DependentToMachOLoaded reexportFinder
= ^(const MachOLoaded
* mh
, uint32_t depIndex
) {
1302 return (const MachOLoaded
*)findDependent(mh
, depIndex
);
1304 MachOAnalyzer::DependentToMachOLoaded finder
= nullptr;
1305 if ( followReExports
)
1306 finder
= reexportFinder
;
1308 dyld3::MachOAnalyzer::FoundSymbol foundInfo
;
1309 if ( macho
->findExportedSymbol(_diag
, symbolName
, foundInfo
, finder
) ) {
1310 const MachOAnalyzer
* impDylib
= (const MachOAnalyzer
*)foundInfo
.foundInDylib
;
1311 targetInfo
.foundInDylib
= foundInfo
.foundInDylib
;
1312 targetInfo
.foundSymbolName
= foundInfo
.foundSymbolName
;
1313 if ( foundInfo
.isWeakDef
)
1314 targetInfo
.isWeakDef
= true;
1315 if ( foundInfo
.kind
== MachOAnalyzer::FoundSymbol::Kind::absolute
) {
1316 target
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
1317 target
.absolute
.value
= foundInfo
.value
+ addend
;
1319 else if ( impDylib
->inDyldCache() ) {
1320 target
.sharedCache
.kind
= Image::ResolvedSymbolTarget::kindSharedCache
;
1321 target
.sharedCache
.offset
= (uint8_t*)impDylib
- (uint8_t*)_dyldCache
+ foundInfo
.value
+ addend
;
1324 target
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
1325 target
.image
.imageNum
= findLoadedImage(impDylib
).imageNum
;
1326 target
.image
.offset
= foundInfo
.value
+ addend
;
1333 bool ClosureBuilder::findSymbol(const BuilderLoadedImage
& fromImage
, int libOrdinal
, const char* symbolName
, bool weakImport
, uint64_t addend
,
1334 Image::ResolvedSymbolTarget
& target
, ResolvedTargetInfo
& targetInfo
)
1336 targetInfo
.weakBindCoalese
= false;
1337 targetInfo
.weakBindSameImage
= false;
1338 targetInfo
.requestedSymbolName
= symbolName
;
1339 targetInfo
.libOrdinal
= libOrdinal
;
1340 if ( libOrdinal
== BIND_SPECIAL_DYLIB_FLAT_LOOKUP
) {
1341 for (const BuilderLoadedImage
& li
: _loadedImages
) {
1342 if ( !li
.rtldLocal
&& findSymbolInImage(li
.loadAddress(), symbolName
, addend
, true, target
, targetInfo
) )
1346 target
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
1347 target
.absolute
.value
= 0;
1350 _diag
.error("symbol '%s' not found, expected in flat namespace by '%s'", symbolName
, fromImage
.path());
1352 else if ( libOrdinal
== BIND_SPECIAL_DYLIB_WEAK_DEF_COALESCE
) {
1353 // to resolve weakDef coalesing, we need to search all images in order and use first definition
1354 // but, if first found is a weakDef, a later non-weak def overrides that
1355 bool foundWeakDefImpl
= false;
1356 bool foundStrongDefImpl
= false;
1357 bool foundImpl
= false;
1358 Image::ResolvedSymbolTarget aTarget
;
1359 ResolvedTargetInfo aTargetInfo
;
1360 STACK_ALLOC_ARRAY(const BuilderLoadedImage
*, cachedDylibsUsingSymbol
, 1024);
1361 for (const BuilderLoadedImage
& li
: _loadedImages
) {
1362 // only search images with weak-defs that were not loaded with RTLD_LOCAL
1363 if ( li
.loadAddress()->hasWeakDefs() && !li
.rtldLocal
) {
1364 if ( findSymbolInImage(li
.loadAddress(), symbolName
, addend
, false, aTarget
, aTargetInfo
) ) {
1366 // with non-chained images, weak-defs first have a rebase to their local impl, and a weak-bind which allows earlier impls to override
1367 if ( !li
.loadAddress()->hasChainedFixups() && (aTargetInfo
.foundInDylib
== fromImage
.loadAddress()) )
1368 targetInfo
.weakBindSameImage
= true;
1369 if ( aTargetInfo
.isWeakDef
) {
1370 // found a weakDef impl, if this is first found, set target to this
1371 if ( !foundWeakDefImpl
&& !foundStrongDefImpl
) {
1373 targetInfo
= aTargetInfo
;
1375 foundWeakDefImpl
= true;
1378 // found a non-weak impl, use this (unless early strong found)
1379 if ( !foundStrongDefImpl
) {
1381 targetInfo
= aTargetInfo
;
1383 foundStrongDefImpl
= true;
1386 if ( foundImpl
&& !_makingDyldCacheImages
&& li
.loadAddress()->inDyldCache() )
1387 cachedDylibsUsingSymbol
.push_back(&li
);
1390 // now that final target found, if any dylib in dyld cache uses that symbol name, redirect it to new target
1391 if ( !cachedDylibsUsingSymbol
.empty() ) {
1392 for (const BuilderLoadedImage
* li
: cachedDylibsUsingSymbol
) {
1393 Image::ResolvedSymbolTarget implInCache
;
1394 ResolvedTargetInfo implInCacheInfo
;
1395 if ( findSymbolInImage(li
->loadAddress(), symbolName
, addend
, false, implInCache
, implInCacheInfo
) ) {
1396 if ( implInCache
!= target
) {
1397 // found another instance in some dylib in dyld cache, will need to patch it
1398 Closure::PatchEntry patch
;
1399 patch
.exportCacheOffset
= (uint32_t)implInCache
.sharedCache
.offset
;
1400 patch
.overriddenDylibInCache
= li
->imageNum
;
1401 patch
.replacement
= target
;
1402 _weakDefCacheOverrides
.push_back(patch
);
1407 targetInfo
.weakBindCoalese
= true;
1411 _diag
.error("symbol '%s' not found, expected to be weak-def coalesced", symbolName
);
1414 const BuilderLoadedImage
* targetLoadedImage
= nullptr;
1415 if ( (libOrdinal
> 0) && (libOrdinal
<= (int)fromImage
.dependents
.count()) ) {
1416 ImageNum childNum
= fromImage
.dependents
[libOrdinal
- 1].imageNum();
1417 if ( childNum
!= kMissingWeakLinkedImage
) {
1418 targetLoadedImage
= &findLoadedImage(childNum
);
1421 else if ( libOrdinal
== BIND_SPECIAL_DYLIB_SELF
) {
1422 targetLoadedImage
= &fromImage
;
1424 else if ( libOrdinal
== BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE
) {
1425 targetLoadedImage
= &_loadedImages
[_mainProgLoadIndex
];
1428 _diag
.error("unknown special ordinal %d in %s", libOrdinal
, fromImage
.path());
1432 if ( targetLoadedImage
!= nullptr ) {
1433 if ( findSymbolInImage(targetLoadedImage
->loadAddress(), symbolName
, addend
, true, target
, targetInfo
) )
1438 target
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
1439 target
.absolute
.value
= 0;
1442 const char* expectedInPath
= targetLoadedImage
? targetLoadedImage
->path() : "unknown";
1443 _diag
.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName
, expectedInPath
, fromImage
.path());
1444 if ( _launchErrorInfo
!= nullptr ) {
1445 _launchErrorInfo
->kind
= DYLD_EXIT_REASON_SYMBOL_MISSING
;
1446 _launchErrorInfo
->clientOfDylibPath
= fromImage
.path();
1447 _launchErrorInfo
->targetDylibPath
= expectedInPath
;
1448 _launchErrorInfo
->symbol
= symbolName
;
1455 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex
, InitInfo initInfos
[], uint32_t& initOrder
, bool& hasError
)
1457 if ( initInfos
[loadIndex
].visited
)
1459 initInfos
[loadIndex
].visited
= true;
1460 initInfos
[loadIndex
].danglingUpward
= false;
1462 if (_loadedImages
[loadIndex
].isBadImage
) {
1467 for (const Image::LinkedImage
& dep
: _loadedImages
[loadIndex
].dependents
) {
1468 if ( dep
.imageNum() == kMissingWeakLinkedImage
)
1470 ClosureBuilder::BuilderLoadedImage
& depLi
= findLoadedImage(dep
.imageNum());
1471 uint32_t depLoadIndex
= (uint32_t)_loadedImages
.index(depLi
);
1472 if ( dep
.kind() == Image::LinkKind::upward
) {
1473 if ( !initInfos
[depLoadIndex
].visited
)
1474 initInfos
[depLoadIndex
].danglingUpward
= true;
1477 depthFirstRecurseSetInitInfo(depLoadIndex
, initInfos
, initOrder
, hasError
);
1482 initInfos
[loadIndex
].initOrder
= initOrder
++;
1485 void ClosureBuilder::computeInitOrder(ImageWriter
& imageWriter
, uint32_t loadIndex
)
1487 // allocate array to track initializers
1488 InitInfo initInfos
[_loadedImages
.count()];
1489 bzero(initInfos
, sizeof(initInfos
));
1491 // recurse all images and build initializer list from bottom up
1492 uint32_t initOrder
= 1;
1493 bool hasMissingDependent
= false;
1494 depthFirstRecurseSetInitInfo(loadIndex
, initInfos
, initOrder
, hasMissingDependent
);
1495 if (hasMissingDependent
) {
1496 imageWriter
.setInvalid();
1500 // any images not visited yet are are danging, force add them to end of init list
1501 for (uint32_t i
=0; i
< (uint32_t)_loadedImages
.count(); ++i
) {
1502 if ( !initInfos
[i
].visited
&& initInfos
[i
].danglingUpward
) {
1503 depthFirstRecurseSetInitInfo(i
, initInfos
, initOrder
, hasMissingDependent
);
1507 if (hasMissingDependent
) {
1508 imageWriter
.setInvalid();
1512 // build array of just images with initializer
1513 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits
, _loadedImages
.count());
1515 for (const BuilderLoadedImage
& li
: _loadedImages
) {
1516 if ( initInfos
[index
].visited
&& li
.hasInits
) {
1517 indexOfImagesWithInits
.push_back(index
);
1522 // bubble sort (FIXME)
1523 if ( indexOfImagesWithInits
.count() > 1 ) {
1524 for (uint32_t i
=0; i
< indexOfImagesWithInits
.count()-1; ++i
) {
1525 for (uint32_t j
=0; j
< indexOfImagesWithInits
.count()-i
-1; ++j
) {
1526 if ( initInfos
[indexOfImagesWithInits
[j
]].initOrder
> initInfos
[indexOfImagesWithInits
[j
+1]].initOrder
) {
1527 uint32_t temp
= indexOfImagesWithInits
[j
];
1528 indexOfImagesWithInits
[j
] = indexOfImagesWithInits
[j
+1];
1529 indexOfImagesWithInits
[j
+1] = temp
;
1535 // copy ImageNum of each image with initializers into array
1536 ImageNum initNums
[indexOfImagesWithInits
.count()];
1537 for (uint32_t i
=0; i
< indexOfImagesWithInits
.count(); ++i
) {
1538 initNums
[i
] = _loadedImages
[indexOfImagesWithInits
[i
]].imageNum
;
1541 // add to closure info
1542 imageWriter
.setInitsOrder(initNums
, (uint32_t)indexOfImagesWithInits
.count());
1545 void ClosureBuilder::addCachePatchInfo(ImageWriter
& imageWriter
, const BuilderLoadedImage
& forImage
)
1547 assert(_handlers
!= nullptr);
1548 _handlers
->forEachExportsPatch(forImage
.imageNum
, ^(const CacheDylibsBindingHandlers::PatchInfo
& info
) {
1549 assert(info
.usesCount
!= 0);
1550 imageWriter
.addExportPatchInfo(info
.exportCacheOffset
, info
.exportSymbolName
, info
.usesCount
, info
.usesArray
);
1554 void ClosureBuilder::addClosureInfo(LaunchClosureWriter
& closureWriter
)
1556 // record which is libSystem
1557 assert(_libSystemImageNum
!= 0);
1558 closureWriter
.setLibSystemImageNum(_libSystemImageNum
);
1560 // record which is libdyld
1561 assert(_libDyldImageNum
!= 0);
1562 Image::ResolvedSymbolTarget entryLocation
;
1563 ResolvedTargetInfo entryInfo
;
1564 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum
).loadAddress(), "__ZN5dyld318entryVectorForDyldE", 0, false, entryLocation
, entryInfo
) ) {
1565 const dyld3::LibDyldEntryVector
* libDyldEntry
= nullptr;
1566 switch ( entryLocation
.image
.kind
) {
1567 case Image::ResolvedSymbolTarget::kindSharedCache
:
1568 libDyldEntry
= (dyld3::LibDyldEntryVector
*)((uint8_t*)_dyldCache
+ entryLocation
.sharedCache
.offset
);
1570 case Image::ResolvedSymbolTarget::kindImage
:
1571 libDyldEntry
= (dyld3::LibDyldEntryVector
*)((uint8_t*)findLoadedImage(entryLocation
.image
.imageNum
).loadAddress() + entryLocation
.image
.offset
);
1574 if ( (libDyldEntry
!= nullptr) && (libDyldEntry
->binaryFormatVersion
== dyld3::closure::kFormatVersion
) )
1575 closureWriter
.setLibDyldEntry(entryLocation
);
1577 _diag
.error("libdyld.dylib entry vector is incompatible");
1580 _diag
.error("libdyld.dylib is missing entry vector");
1583 // record which is main executable
1584 ImageNum mainProgImageNum
= _loadedImages
[_mainProgLoadIndex
].imageNum
;
1585 closureWriter
.setTopImageNum(mainProgImageNum
);
1588 uint32_t entryOffset
;
1590 if ( _loadedImages
[_mainProgLoadIndex
].loadAddress()->getEntry(entryOffset
, usesCRT
) ) {
1591 Image::ResolvedSymbolTarget location
;
1592 location
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
1593 location
.image
.imageNum
= mainProgImageNum
;
1594 location
.image
.offset
= entryOffset
;
1596 closureWriter
.setStartEntry(location
);
1598 closureWriter
.setMainEntry(location
);
1601 // add env vars that must match at launch time
1602 _pathOverrides
.forEachEnvVar(^(const char* envVar
) {
1603 closureWriter
.addEnvVar(envVar
);
1606 // add list of files which must be missing
1607 STACK_ALLOC_ARRAY(const char*, paths
, 8192);
1608 if ( _mustBeMissingPaths
!= nullptr ) {
1609 _mustBeMissingPaths
->forEachPath(^(const char* aPath
) {
1610 paths
.push_back(aPath
);
1613 closureWriter
.setMustBeMissingFiles(paths
);
1617 // used at launch by dyld when kernel has already mapped main executable
1618 const LaunchClosure
* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo
& fileInfo
, bool allowInsertFailures
)
1620 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_BUILD_CLOSURE
, 0, 0, 0);
1621 const mach_header
* mainMH
= (const mach_header
*)fileInfo
.fileContent
;
1622 // set up stack based storage for all arrays
1623 BuilderLoadedImage loadImagesStorage
[512];
1624 Image::LinkedImage dependenciesStorage
[512*8];
1625 InterposingTuple tuplesStorage
[64];
1626 Closure::PatchEntry cachePatchStorage
[64];
1627 const char* weakDefNameStorage
[64];
1628 _loadedImages
.setInitialStorage(loadImagesStorage
, 512);
1629 _dependencies
.setInitialStorage(dependenciesStorage
, 512*8);
1630 _interposingTuples
.setInitialStorage(tuplesStorage
, 64);
1631 _weakDefCacheOverrides
.setInitialStorage(cachePatchStorage
, 64);
1632 _weakDefsFromChainedBinds
.setInitialStorage(weakDefNameStorage
, 64);
1633 ArrayFinalizer
<BuilderLoadedImage
> scopedCleanup(_loadedImages
, ^(BuilderLoadedImage
& li
) { if (li
.unmapWhenDone
) {_fileSystem
.unloadFile(li
.loadedFileInfo
); li
.unmapWhenDone
=false;} });
1635 const MachOAnalyzer
* mainExecutable
= MachOAnalyzer::validMainExecutable(_diag
, mainMH
, fileInfo
.path
, fileInfo
.sliceLen
, _archName
, _platform
);
1636 if ( mainExecutable
== nullptr )
1638 if ( !mainExecutable
->isDynamicExecutable() ) {
1639 _diag
.error("not a main executable");
1642 _isLaunchClosure
= true;
1644 // add any DYLD_INSERT_LIBRARIES
1646 _pathOverrides
.forEachInsertedDylib(^(const char* dylibPath
) {
1647 BuilderLoadedImage insertEntry
;
1648 insertEntry
.loadedFileInfo
.path
= strdup_temp(dylibPath
);
1649 insertEntry
.imageNum
= _startImageNum
+ _nextIndex
++;
1650 insertEntry
.unmapWhenDone
= true;
1651 insertEntry
.contentRebased
= false;
1652 insertEntry
.hasInits
= false;
1653 insertEntry
.markNeverUnload
= true;
1654 insertEntry
.rtldLocal
= false;
1655 insertEntry
.isBadImage
= false;
1656 insertEntry
.overrideImageNum
= 0;
1657 _loadedImages
.push_back(insertEntry
);
1659 _mainProgLoadIndex
= (uint32_t)_loadedImages
.count();
1661 // add main executable
1662 BuilderLoadedImage mainEntry
;
1663 mainEntry
.loadedFileInfo
= fileInfo
;
1664 mainEntry
.imageNum
= _startImageNum
+ _nextIndex
++;
1665 mainEntry
.unmapWhenDone
= false;
1666 mainEntry
.contentRebased
= false;
1667 mainEntry
.hasInits
= false;
1668 mainEntry
.markNeverUnload
= true;
1669 mainEntry
.rtldLocal
= false;
1670 mainEntry
.isBadImage
= false;
1671 mainEntry
.overrideImageNum
= 0;
1672 _loadedImages
.push_back(mainEntry
);
1674 // get mach_headers for all images needed to launch this main executable
1675 LoadedImageChain chainStart
= { nullptr, _loadedImages
[_mainProgLoadIndex
] };
1676 recursiveLoadDependents(chainStart
);
1677 if ( _diag
.hasError() )
1679 for (uint32_t i
=0; i
< _mainProgLoadIndex
; ++i
) {
1680 closure::LoadedFileInfo loadedFileInfo
= MachOAnalyzer::load(_diag
, _fileSystem
, _loadedImages
[i
].loadedFileInfo
.path
, _archName
, _platform
);
1681 const char* originalLoadPath
= _loadedImages
[i
].loadedFileInfo
.path
;
1682 _loadedImages
[i
].loadedFileInfo
= loadedFileInfo
;
1683 if ( _loadedImages
[i
].loadAddress() != nullptr ) {
1684 LoadedImageChain insertChainStart
= { nullptr, _loadedImages
[i
] };
1685 recursiveLoadDependents(insertChainStart
);
1687 if ( _diag
.hasError() || (_loadedImages
[i
].loadAddress() == nullptr) ) {
1688 if ( !allowInsertFailures
) {
1689 if ( _diag
.noError() )
1690 _diag
.error("could not load inserted dylib %s", originalLoadPath
);
1693 _diag
.clearError(); // FIXME add way to plumb back warning
1694 // remove slot for inserted image that could not loaded
1695 _loadedImages
.remove(i
);
1697 _mainProgLoadIndex
-= 1;
1699 // renumber images in this closure
1700 for (uint32_t j
=i
+1; j
< _loadedImages
.count(); ++j
) {
1701 if ( (_loadedImages
[j
].imageNum
>= _startImageNum
) && (_loadedImages
[j
].imageNum
<= _startImageNum
+_nextIndex
) )
1702 _loadedImages
[j
].imageNum
-= 1;
1706 loadDanglingUpwardLinks();
1708 // only some images need to go into closure (ones from dyld cache do not)
1709 STACK_ALLOC_ARRAY(ImageWriter
, writers
, _loadedImages
.count());
1710 for (BuilderLoadedImage
& li
: _loadedImages
) {
1711 if ( li
.imageNum
>= _startImageNum
) {
1712 writers
.push_back(ImageWriter());
1713 buildImage(writers
.back(), li
);
1714 if ( _diag
.hasError() )
1717 if ( li
.loadAddress()->isDylib() && (strcmp(li
.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
1718 _libDyldImageNum
= li
.imageNum
;
1719 else if ( strcmp(li
.path(), "/usr/lib/libSystem.B.dylib") == 0 )
1720 _libSystemImageNum
= li
.imageNum
;
1723 // add initializer order into top level Images (may be inserted dylibs before main executable)
1724 for (uint32_t i
=0; i
<= _mainProgLoadIndex
; ++i
)
1725 computeInitOrder(writers
[i
], i
);
1727 if ( _diag
.hasError() )
1730 // combine all Image objects into one ImageArray
1731 ImageArrayWriter
imageArrayWriter(_startImageNum
, (uint32_t)writers
.count());
1732 for (ImageWriter
& writer
: writers
) {
1733 imageArrayWriter
.appendImage(writer
.finalize());
1734 writer
.deallocate();
1736 const ImageArray
* imageArray
= imageArrayWriter
.finalize();
1738 // merge ImageArray object into LaunchClosure object
1739 __block LaunchClosureWriter
closureWriter(imageArray
);
1741 // record shared cache info
1742 if ( _dyldCache
!= nullptr ) {
1743 // record cache UUID
1745 _dyldCache
->getUUID(cacheUUID
);
1746 closureWriter
.setDyldCacheUUID(cacheUUID
);
1748 // record any cache patching needed because of dylib overriding cache
1749 for (const BuilderLoadedImage
& li
: _loadedImages
) {
1750 if ( li
.overrideImageNum
!= 0 ) {
1751 const Image
* cacheImage
= _dyldImageArray
->imageForNum(li
.overrideImageNum
);
1752 STACK_ALLOC_ARRAY(Closure::PatchEntry
, patches
, cacheImage
->patchableExportCount());
1753 MachOLoaded::DependentToMachOLoaded reexportFinder
= ^(const MachOLoaded
* mh
, uint32_t depIndex
) {
1754 return (const MachOLoaded
*)findDependent(mh
, depIndex
);
1756 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
1757 cacheImage
->forEachPatchableExport(^(uint32_t cacheOffsetOfImpl
, const char* symbolName
) {
1758 dyld3::MachOAnalyzer::FoundSymbol foundInfo
;
1759 Diagnostics patchDiag
;
1760 Closure::PatchEntry patch
;
1761 patch
.overriddenDylibInCache
= li
.overrideImageNum
;
1762 patch
.exportCacheOffset
= cacheOffsetOfImpl
;
1763 if ( li
.loadAddress()->findExportedSymbol(patchDiag
, symbolName
, foundInfo
, reexportFinder
) ) {
1764 const MachOAnalyzer
* impDylib
= (const MachOAnalyzer
*)foundInfo
.foundInDylib
;
1765 patch
.replacement
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
1766 patch
.replacement
.image
.imageNum
= findLoadedImage(impDylib
).imageNum
;
1767 patch
.replacement
.image
.offset
= foundInfo
.value
;
1770 // this means the symbol is missing in the cache override dylib, so set any uses to NULL
1771 patch
.replacement
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
1772 patch
.replacement
.absolute
.value
= 0;
1774 patches
.push_back(patch
);
1776 closureWriter
.addCachePatches(patches
);
1780 // handle any extra weak-def coalescing needed by chained fixups
1781 if ( !_weakDefsFromChainedBinds
.empty() ) {
1782 for (const char* symbolName
: _weakDefsFromChainedBinds
) {
1783 Image::ResolvedSymbolTarget cacheOverrideTarget
;
1784 bool haveCacheOverride
= false;
1785 bool foundCachOverrideIsWeakDef
= false;
1786 for (const BuilderLoadedImage
& li
: _loadedImages
) {
1787 if ( !li
.loadAddress()->hasWeakDefs() )
1789 Image::ResolvedSymbolTarget target
;
1790 ResolvedTargetInfo targetInfo
;
1791 if ( findSymbolInImage(li
.loadAddress(), symbolName
, 0, false, target
, targetInfo
) ) {
1792 if ( li
.loadAddress()->inDyldCache() ) {
1793 if ( haveCacheOverride
) {
1794 Closure::PatchEntry patch
;
1795 patch
.exportCacheOffset
= (uint32_t)target
.sharedCache
.offset
;
1796 patch
.overriddenDylibInCache
= li
.imageNum
;
1797 patch
.replacement
= cacheOverrideTarget
;
1798 _weakDefCacheOverrides
.push_back(patch
);
1801 // found first in cached dylib, so no need to patch cache for this symbol
1806 // found image that exports this symbol and is not in cache
1807 if ( !haveCacheOverride
|| (foundCachOverrideIsWeakDef
&& !targetInfo
.isWeakDef
) ) {
1808 // update cache to use this symbol if it if first found or it is first non-weak found
1809 cacheOverrideTarget
= target
;
1810 foundCachOverrideIsWeakDef
= targetInfo
.isWeakDef
;
1811 haveCacheOverride
= true;
1819 // record any cache patching needed because weak-def C++ symbols override dyld cache
1820 if ( !_weakDefCacheOverrides
.empty() )
1821 closureWriter
.addCachePatches(_weakDefCacheOverrides
);
1825 #if __IPHONE_OS_VERSION_MIN_REQUIRED
1826 // if closure is built on-device for iOS, then record boot UUID
1827 char bootSessionUUID
[256] = { 0 };
1828 size_t bootSize
= sizeof(bootSessionUUID
);
1829 if ( sysctlbyname("kern.bootsessionuuid", bootSessionUUID
, &bootSize
, NULL
, 0) == 0 )
1830 closureWriter
.setBootUUID(bootSessionUUID
);
1833 // record any interposing info
1834 imageArray
->forEachImage(^(const Image
* image
, bool &stop
) {
1835 if ( !image
->inDyldCache() )
1836 addInterposingTuples(closureWriter
, image
, findLoadedImage(image
->imageNum()).loadAddress());
1839 // modify fixups in contained Images by applying interposing tuples
1840 closureWriter
.applyInterposing();
1843 closureWriter
.setUsedAtPaths(_atPathUsed
);
1844 closureWriter
.setUsedFallbackPaths(_fallbackPathUsed
);
1845 closureWriter
.setInitImageCount((uint32_t)_loadedImages
.count());
1847 // add other closure attributes
1848 addClosureInfo(closureWriter
);
1851 const LaunchClosure
* result
= closureWriter
.finalize();
1852 imageArrayWriter
.deallocate();
1857 // used by libdyld for dlopen()
1858 const DlopenClosure
* ClosureBuilder::makeDlopenClosure(const char* path
, const LaunchClosure
* mainClosure
, const Array
<LoadedImage
>& alreadyLoadedList
,
1859 closure::ImageNum callerImageNum
, bool noLoad
, bool canUseSharedCacheClosure
, closure::ImageNum
* topImageNum
)
1861 dyld3::ScopedTimer
timer(DBG_DYLD_TIMING_BUILD_CLOSURE
, 0, 0, 0);
1862 // set up stack based storage for all arrays
1863 BuilderLoadedImage loadImagesStorage
[512];
1864 Image::LinkedImage dependenciesStorage
[512*8];
1865 Closure::PatchEntry cachePatchStorage
[64];
1866 _loadedImages
.setInitialStorage(loadImagesStorage
, 512);
1867 _dependencies
.setInitialStorage(dependenciesStorage
, 512*8);
1868 _weakDefCacheOverrides
.setInitialStorage(cachePatchStorage
, 64);
1869 ArrayFinalizer
<BuilderLoadedImage
> scopedCleanup(_loadedImages
, ^(BuilderLoadedImage
& li
) { if (li
.unmapWhenDone
) {_fileSystem
.unloadFile(li
.loadedFileInfo
); li
.unmapWhenDone
=false;} });
1871 // fill in builder array from already loaded images
1872 bool cachedDylibsExpectedOnDisk
= _dyldCache
? _dyldCache
->header
.dylibsExpectedOnDisk
: true;
1873 uintptr_t callerImageIndex
= UINTPTR_MAX
;
1874 for (const LoadedImage
& ali
: alreadyLoadedList
) {
1875 const Image
* image
= ali
.image();
1876 const MachOAnalyzer
* ma
= (MachOAnalyzer
*)(ali
.loadedAddress());
1877 bool inDyldCache
= ma
->inDyldCache();
1878 BuilderLoadedImage entry
;
1879 ImageNum overrideImageNum
;
1880 entry
.loadedFileInfo
.path
= image
->path();
1881 entry
.loadedFileInfo
.fileContent
= ma
;
1882 entry
.loadedFileInfo
.sliceOffset
= 0;
1883 entry
.loadedFileInfo
.inode
= 0;
1884 entry
.loadedFileInfo
.mtime
= 0;
1885 entry
.imageNum
= image
->imageNum();
1886 entry
.dependents
= image
->dependentsArray();
1887 entry
.unmapWhenDone
= false;
1888 entry
.contentRebased
= inDyldCache
;
1889 entry
.hasInits
= false;
1890 entry
.markNeverUnload
= image
->neverUnload();
1891 entry
.rtldLocal
= ali
.hideFromFlatSearch();
1892 entry
.isBadImage
= false;
1893 entry
.overrideImageNum
= 0;
1894 if ( !inDyldCache
&& image
->isOverrideOfDyldCacheImage(overrideImageNum
) ) {
1895 entry
.overrideImageNum
= overrideImageNum
;
1896 canUseSharedCacheClosure
= false;
1898 if ( !inDyldCache
|| cachedDylibsExpectedOnDisk
)
1899 image
->hasFileModTimeAndInode(entry
.loadedFileInfo
.inode
, entry
.loadedFileInfo
.mtime
);
1900 if ( entry
.imageNum
== callerImageNum
)
1901 callerImageIndex
= _loadedImages
.count();
1902 _loadedImages
.push_back(entry
);
1904 _alreadyInitedIndex
= (uint32_t)_loadedImages
.count();
1906 // find main executable (may be needed for @executable_path)
1907 _isLaunchClosure
= false;
1908 for (uint32_t i
=0; i
< alreadyLoadedList
.count(); ++i
) {
1909 if ( _loadedImages
[i
].loadAddress()->isMainExecutable() ) {
1910 _mainProgLoadIndex
= i
;
1915 // add top level dylib being dlopen()ed
1916 BuilderLoadedImage
* foundTopImage
;
1918 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
1919 BuilderLoadedImage
& callerImage
= (callerImageIndex
!= UINTPTR_MAX
) ? _loadedImages
[callerImageIndex
] : _loadedImages
[_mainProgLoadIndex
];
1920 LoadedImageChain chainCaller
= { nullptr, callerImage
};
1921 LoadedImageChain chainMain
= { &chainCaller
, _loadedImages
[_mainProgLoadIndex
] };
1922 if ( !findImage(path
, chainMain
, foundTopImage
, false, canUseSharedCacheClosure
) ) {
1923 // If we didn't find the image, but its a shared cache path, then try again with realpath.
1924 if ( (strncmp(path
, "/usr/lib/", 9) == 0) || (strncmp(path
, "/System/Library/", 16) == 0) ) {
1925 char resolvedPath
[PATH_MAX
];
1926 if ( _fileSystem
.getRealPath(path
, resolvedPath
) ) {
1927 if ( !findImage(resolvedPath
, chainMain
, foundTopImage
, false, canUseSharedCacheClosure
) ) {
1931 // We didn't find a new path from realpath
1935 // Not in /usr/lib/ or /System/Library/
1940 // exit early in RTLD_NOLOAD mode
1942 // if no new images added to _loadedImages, then requested path was already loaded
1943 if ( (uint32_t)_loadedImages
.count() == _alreadyInitedIndex
)
1944 *topImageNum
= foundTopImage
->imageNum
;
1950 // fast path if roots are not allowed and target is in dyld cache or is other
1951 if ( (_dyldCache
!= nullptr) && (_dyldCache
->header
.cacheType
== kDyldSharedCacheTypeProduction
) ) {
1952 if ( foundTopImage
->imageNum
< closure::kFirstLaunchClosureImageNum
) {
1953 *topImageNum
= foundTopImage
->imageNum
;
1958 // recursive load dependents
1959 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
1960 LoadedImageChain chainTopDylib
= { &chainMain
, *foundTopImage
};
1961 recursiveLoadDependents(chainTopDylib
);
1962 if ( _diag
.hasError() )
1964 loadDanglingUpwardLinks();
1966 // only some images need to go into closure (ones from dyld cache do not)
1967 STACK_ALLOC_ARRAY(ImageWriter
, writers
, _loadedImages
.count());
1968 for (BuilderLoadedImage
& li
: _loadedImages
) {
1969 if ( li
.imageNum
>= _startImageNum
) {
1970 writers
.push_back(ImageWriter());
1971 buildImage(writers
.back(), li
);
1975 if ( _diag
.hasError() )
1978 // check if top image loaded is in shared cache along with everything it depends on
1979 *topImageNum
= foundTopImage
->imageNum
;
1980 if ( writers
.count() == 0 ) {
1982 } else if ( canUseSharedCacheClosure
&& ( foundTopImage
->imageNum
< closure::kFirstLaunchClosureImageNum
) ) {
1983 // We used a shared cache built closure, but now discovered roots. We need to try again
1985 return sRetryDlopenClosure
;
1988 // add initializer order into top level Image
1989 computeInitOrder(writers
[0], (uint32_t)alreadyLoadedList
.count());
1991 if ( _diag
.hasError() )
1994 // combine all Image objects into one ImageArray
1995 ImageArrayWriter
imageArrayWriter(_startImageNum
, (uint32_t)writers
.count());
1996 for (ImageWriter
& writer
: writers
) {
1997 imageArrayWriter
.appendImage(writer
.finalize());
1998 writer
.deallocate();
2000 const ImageArray
* imageArray
= imageArrayWriter
.finalize();
2002 // merge ImageArray object into LaunchClosure object
2003 DlopenClosureWriter
closureWriter(imageArray
);
2005 // add other closure attributes
2006 closureWriter
.setTopImageNum(foundTopImage
->imageNum
);
2008 // record any cache patching needed because of dylib overriding cache
2009 if ( _dyldCache
!= nullptr ) {
2010 for (const BuilderLoadedImage
& li
: _loadedImages
) {
2011 if ( (li
.overrideImageNum
!= 0) && (li
.imageNum
>= _startImageNum
) ) {
2012 const Image
* cacheImage
= _dyldImageArray
->imageForNum(li
.overrideImageNum
);
2013 STACK_ALLOC_ARRAY(Closure::PatchEntry
, patches
, cacheImage
->patchableExportCount());
2014 MachOLoaded::DependentToMachOLoaded reexportFinder
= ^(const MachOLoaded
* mh
, uint32_t depIndex
) {
2015 return (const MachOLoaded
*)findDependent(mh
, depIndex
);
2017 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
2018 cacheImage
->forEachPatchableExport(^(uint32_t cacheOffsetOfImpl
, const char* symbolName
) {
2019 dyld3::MachOAnalyzer::FoundSymbol foundInfo
;
2020 Diagnostics patchDiag
;
2021 Closure::PatchEntry patch
;
2022 patch
.overriddenDylibInCache
= li
.overrideImageNum
;
2023 patch
.exportCacheOffset
= cacheOffsetOfImpl
;
2024 if ( li
.loadAddress()->findExportedSymbol(patchDiag
, symbolName
, foundInfo
, reexportFinder
) ) {
2025 const MachOAnalyzer
* impDylib
= (const MachOAnalyzer
*)foundInfo
.foundInDylib
;
2026 patch
.replacement
.image
.kind
= Image::ResolvedSymbolTarget::kindImage
;
2027 patch
.replacement
.image
.imageNum
= findLoadedImage(impDylib
).imageNum
;
2028 patch
.replacement
.image
.offset
= foundInfo
.value
;
2031 patch
.replacement
.absolute
.kind
= Image::ResolvedSymbolTarget::kindAbsolute
;
2032 patch
.replacement
.absolute
.value
= 0;
2034 patches
.push_back(patch
);
2036 closureWriter
.addCachePatches(patches
);
2041 // Dlopen's should never keep track of missing paths as we don't cache these closures.
2042 assert(_mustBeMissingPaths
== nullptr);
2044 // make final DlopenClosure object
2045 const DlopenClosure
* result
= closureWriter
.finalize();
2046 imageArrayWriter
.deallocate();
2051 // used by dyld_closure_util
2052 const LaunchClosure
* ClosureBuilder::makeLaunchClosure(const char* mainPath
, bool allowInsertFailures
)
2054 closure::LoadedFileInfo loadedFileInfo
= MachOAnalyzer::load(_diag
, _fileSystem
, mainPath
, _archName
, _platform
);
2055 const MachOAnalyzer
* mh
= (const MachOAnalyzer
*)loadedFileInfo
.fileContent
;
2056 loadedFileInfo
.path
= mainPath
;
2057 if (_diag
.hasError())
2059 if (mh
== nullptr) {
2060 _diag
.error("could not load file");
2063 if (!mh
->isDynamicExecutable()) {
2064 _diag
.error("file is not an executable");
2067 const_cast<PathOverrides
*>(&_pathOverrides
)->setMainExecutable(mh
, mainPath
);
2068 const LaunchClosure
* launchClosure
= makeLaunchClosure(loadedFileInfo
, allowInsertFailures
);
2069 loadedFileInfo
.unload(loadedFileInfo
);
2070 return launchClosure
;
2074 // used by dyld shared cache builder
2075 const ImageArray
* ClosureBuilder::makeDyldCacheImageArray(bool customerCache
, const Array
<CachedDylibInfo
>& dylibs
, const Array
<CachedDylibAlias
>& aliases
)
2077 // because this is run in cache builder using dispatch_apply() there is minimal stack space
2078 // so set up storage for all arrays to be vm_allocated
2079 uintptr_t maxImageCount
= dylibs
.count() + 16;
2080 _loadedImages
.reserve(maxImageCount
);
2081 _dependencies
.reserve(maxImageCount
*16);
2083 _makingDyldCacheImages
= true;
2084 _makingCustomerCache
= customerCache
;
2085 _aliases
= &aliases
;
2087 // build _loadedImages[] with every dylib in cache
2088 __block ImageNum imageNum
= _startImageNum
;
2089 for (const CachedDylibInfo
& aDylibInfo
: dylibs
) {
2090 BuilderLoadedImage entry
;
2091 entry
.loadedFileInfo
= aDylibInfo
.fileInfo
;
2092 entry
.imageNum
= imageNum
++;
2093 entry
.unmapWhenDone
= false;
2094 entry
.contentRebased
= false;
2095 entry
.hasInits
= false;
2096 entry
.markNeverUnload
= true;
2097 entry
.rtldLocal
= false;
2098 entry
.isBadImage
= false;
2099 entry
.overrideImageNum
= 0;
2100 _loadedImages
.push_back(entry
);
2103 // wire up dependencies between cached dylibs
2104 for (BuilderLoadedImage
& li
: _loadedImages
) {
2105 LoadedImageChain chainStart
= { nullptr, li
};
2106 recursiveLoadDependents(chainStart
);
2107 if ( _diag
.hasError() )
2110 assert(_loadedImages
.count() == dylibs
.count());
2112 // create an ImageWriter for each cached dylib
2113 STACK_ALLOC_ARRAY(ImageWriter
, writers
, _loadedImages
.count());
2114 for (BuilderLoadedImage
& li
: _loadedImages
) {
2115 writers
.push_back(ImageWriter());
2116 buildImage(writers
.back(), li
);
2119 // add initializer order into each dylib
2120 for (const BuilderLoadedImage
& li
: _loadedImages
) {
2121 uint32_t index
= li
.imageNum
- _startImageNum
;
2122 computeInitOrder(writers
[index
], index
);
2125 // add exports patch info for each dylib
2126 for (const BuilderLoadedImage
& li
: _loadedImages
) {
2127 uint32_t index
= li
.imageNum
- _startImageNum
;
2128 addCachePatchInfo(writers
[index
], li
);
2131 // combine all Image objects into one ImageArray
2132 ImageArrayWriter
imageArrayWriter(_startImageNum
, (uint32_t)writers
.count());
2133 for (ImageWriter
& writer
: writers
) {
2134 imageArrayWriter
.appendImage(writer
.finalize());
2135 writer
.deallocate();
2137 const ImageArray
* imageArray
= imageArrayWriter
.finalize();
2143 #if BUILDING_CACHE_BUILDER
2144 const ImageArray
* ClosureBuilder::makeOtherDylibsImageArray(const Array
<LoadedFileInfo
>& otherDylibs
, uint32_t cachedDylibsCount
)
2146 // because this is run in cache builder using dispatch_apply() there is minimal stack space
2147 // so set up storage for all arrays to be vm_allocated
2148 uintptr_t maxImageCount
= otherDylibs
.count() + cachedDylibsCount
+ 128;
2149 _loadedImages
.reserve(maxImageCount
);
2150 _dependencies
.reserve(maxImageCount
*16);
2152 // build _loadedImages[] with every dylib in cache, followed by others
2154 for (const LoadedFileInfo
& aDylibInfo
: otherDylibs
) {
2155 BuilderLoadedImage entry
;
2156 entry
.loadedFileInfo
= aDylibInfo
;
2157 entry
.imageNum
= _startImageNum
+ _nextIndex
++;
2158 entry
.unmapWhenDone
= false;
2159 entry
.contentRebased
= false;
2160 entry
.hasInits
= false;
2161 entry
.markNeverUnload
= false;
2162 entry
.rtldLocal
= false;
2163 entry
.isBadImage
= false;
2164 entry
.overrideImageNum
= 0;
2165 _loadedImages
.push_back(entry
);
2168 // wire up dependencies between cached dylibs
2169 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
2170 // to check the count on each iteration.
2171 for (uint64_t index
= 0; index
!= _loadedImages
.count(); ++index
) {
2172 BuilderLoadedImage
& li
= _loadedImages
[index
];
2173 LoadedImageChain chainStart
= { nullptr, li
};
2174 recursiveLoadDependents(chainStart
);
2175 if ( _diag
.hasError() ) {
2176 _diag
.warning("while building dlopen closure for %s: %s", li
.loadedFileInfo
.path
, _diag
.errorMessage().c_str());
2177 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
2179 li
.isBadImage
= true; // mark bad
2183 auto invalidateBadImages
= [&]() {
2184 // Invalidate images with bad dependencies
2186 bool madeChange
= false;
2187 for (BuilderLoadedImage
& li
: _loadedImages
) {
2188 if (li
.isBadImage
) {
2189 // Already invalidated
2192 for (Image::LinkedImage depIndex
: li
.dependents
) {
2193 if ( depIndex
.imageNum() == kMissingWeakLinkedImage
)
2195 if ( depIndex
.imageNum() < dyld3::closure::kLastDyldCacheImageNum
)
2197 BuilderLoadedImage
& depImage
= findLoadedImage(depIndex
.imageNum());
2198 if (depImage
.isBadImage
) {
2199 _diag
.warning("while building dlopen closure for %s: dependent dylib had error", li
.loadedFileInfo
.path
);
2200 li
.isBadImage
= true; // mark bad
2210 invalidateBadImages();
2212 // create an ImageWriter for each cached dylib
2213 STACK_ALLOC_ARRAY(ImageWriter
, writers
, _loadedImages
.count());
2214 for (BuilderLoadedImage
& li
: _loadedImages
) {
2215 if ( li
.imageNum
== 0 ) {
2216 writers
.push_back(ImageWriter());
2217 writers
.back().setInvalid();
2220 if ( li
.imageNum
< dyld3::closure::kLastDyldCacheImageNum
)
2222 writers
.push_back(ImageWriter());
2223 buildImage(writers
.back(), li
);
2224 if ( _diag
.hasError() ) {
2225 _diag
.warning("while building dlopen closure for %s: %s", li
.loadedFileInfo
.path
, _diag
.errorMessage().c_str());
2226 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
2228 li
.isBadImage
= true; // mark bad
2229 writers
.back().setInvalid();
2233 invalidateBadImages();
2235 // add initializer order into each dylib
2236 for (const BuilderLoadedImage
& li
: _loadedImages
) {
2237 if ( li
.imageNum
< dyld3::closure::kLastDyldCacheImageNum
)
2241 uint32_t index
= li
.imageNum
- _startImageNum
;
2242 computeInitOrder(writers
[index
], index
);
2245 // combine all Image objects into one ImageArray
2246 ImageArrayWriter
imageArrayWriter(_startImageNum
, (uint32_t)writers
.count());
2247 for (ImageWriter
& writer
: writers
) {
2248 imageArrayWriter
.appendImage(writer
.finalize());
2249 writer
.deallocate();
2251 const ImageArray
* imageArray
= imageArrayWriter
.finalize();
2258 bool ClosureBuilder::inLoadedImageArray(const Array
<LoadedImage
>& loadedList
, ImageNum imageNum
)
2260 for (const LoadedImage
& ali
: loadedList
) {
2261 if ( ali
.image()->representsImageNum(imageNum
) )
2267 void ClosureBuilder::buildLoadOrderRecurse(Array
<LoadedImage
>& loadedList
, const Array
<const ImageArray
*>& imagesArrays
, const Image
* image
)
2269 // breadth first load
2270 STACK_ALLOC_ARRAY(const Image
*, needToRecurse
, 256);
2271 image
->forEachDependentImage(^(uint32_t dependentIndex
, dyld3::closure::Image::LinkKind kind
, ImageNum depImageNum
, bool &stop
) {
2272 if ( !inLoadedImageArray(loadedList
, depImageNum
) ) {
2273 const Image
* depImage
= ImageArray::findImage(imagesArrays
, depImageNum
);
2274 loadedList
.push_back(LoadedImage::make(depImage
));
2275 needToRecurse
.push_back(depImage
);
2280 for (const Image
* img
: needToRecurse
) {
2281 buildLoadOrderRecurse(loadedList
, imagesArrays
, img
);
2285 void ClosureBuilder::buildLoadOrder(Array
<LoadedImage
>& loadedList
, const Array
<const ImageArray
*>& imagesArrays
, const Closure
* toAdd
)
2287 const dyld3::closure::Image
* topImage
= ImageArray::findImage(imagesArrays
, toAdd
->topImage());
2288 loadedList
.push_back(LoadedImage::make(topImage
));
2289 buildLoadOrderRecurse(loadedList
, imagesArrays
, topImage
);
2294 } // namespace closure
2295 } // namespace dyld3