]> git.saurik.com Git - apple/dyld.git/blob - dyld3/ClosureBuilder.cpp
dyld-640.2.tar.gz
[apple/dyld.git] / dyld3 / ClosureBuilder.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/errno.h>
27 #include <sys/mman.h>
28 #include <sys/mman.h>
29 #include <sys/param.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <string.h>
33 #include <sys/types.h>
34 #include <sys/sysctl.h>
35
36 #include "mach-o/dyld_priv.h"
37
38 #include "ClosureWriter.h"
39 #include "ClosureBuilder.h"
40 #include "MachOAnalyzer.h"
41 #include "libdyldEntryVector.h"
42 #include "Tracing.h"
43
44 namespace dyld3 {
45 namespace closure {
46
47 const DlopenClosure* ClosureBuilder::sRetryDlopenClosure = (const DlopenClosure*)(-1);
48
49 ClosureBuilder::ClosureBuilder(uint32_t startImageNum, const FileSystem& fileSystem, const DyldSharedCache* dyldCache, bool dyldCacheIsLive,
50 const PathOverrides& pathOverrides, AtPath atPathHandling, LaunchErrorInfo* errorInfo,
51 const char* archName, Platform platform,
52 const CacheDylibsBindingHandlers* handlers)
53 : _fileSystem(fileSystem), _dyldCache(dyldCache), _pathOverrides(pathOverrides), _archName(archName), _platform(platform), _startImageNum(startImageNum),
54 _handlers(handlers), _atPathHandling(atPathHandling), _launchErrorInfo(errorInfo), _dyldCacheIsLive(dyldCacheIsLive)
55 {
56 if ( dyldCache != nullptr ) {
57 _dyldImageArray = dyldCache->cachedDylibsImageArray();
58 if ( (dyldCache->header.otherImageArrayAddr != 0) && (dyldCache->header.progClosuresSize == 0) )
59 _makingClosuresInCache = true;
60 }
61 }
62
63
64 ClosureBuilder::~ClosureBuilder() {
65 if ( _tempPaths != nullptr )
66 PathPool::deallocate(_tempPaths);
67 if ( _mustBeMissingPaths != nullptr )
68 PathPool::deallocate(_mustBeMissingPaths);
69 }
70
71 bool ClosureBuilder::findImage(const char* loadPath, const LoadedImageChain& forImageChain, BuilderLoadedImage*& foundImage, bool staticLinkage, bool allowOther)
72 {
73 __block bool result = false;
74
75 _pathOverrides.forEachPathVariant(loadPath, ^(const char* possiblePath, bool isFallbackPath, bool& stop) {
76 bool unmapWhenDone = false;
77 bool contentRebased = false;
78 bool hasInits = false;
79 bool fileFound = false;
80 bool markNeverUnload = staticLinkage ? forImageChain.image.markNeverUnload : false;
81 ImageNum overrideImageNum = 0;
82 ImageNum foundImageNum = 0;
83 const MachOAnalyzer* mh = nullptr;
84 const char* filePath = nullptr;
85 LoadedFileInfo loadedFileInfo;
86
87 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
88 bool isRPath = (strncmp(possiblePath, "@rpath/", 7) == 0);
89
90 // passing a leaf name to dlopen() allows rpath searching for it
91 bool implictRPath = !staticLinkage && (loadPath[0] != '/') && (loadPath == possiblePath) && (_atPathHandling != AtPath::none);
92
93 // expand @ paths
94 const char* prePathVarExpansion = possiblePath;
95 possiblePath = resolvePathVar(possiblePath, forImageChain, implictRPath);
96 if ( prePathVarExpansion != possiblePath )
97 _atPathUsed = true;
98
99 // look at already loaded images
100 const char* leafName = strrchr(possiblePath, '/');
101 for (BuilderLoadedImage& li: _loadedImages) {
102 if ( strcmp(li.path(), possiblePath) == 0 ) {
103 foundImage = &li;
104 result = true;
105 stop = true;
106 return;
107 }
108 else if ( isRPath ) {
109 // Special case @rpath/ because name in li.fileInfo.path is full path.
110 // Getting installName is expensive, so first see if an already loaded image
111 // has same leaf name and if so see if its installName matches request @rpath
112 if (const char* aLeaf = strrchr(li.path(), '/')) {
113 if ( strcmp(aLeaf, leafName) == 0 ) {
114 if ( li.loadAddress()->isDylib() && (strcmp(loadPath, li.loadAddress()->installName()) == 0) ) {
115 foundImage = &li;
116 result = true;
117 stop = true;
118 return;
119 }
120 }
121 }
122 }
123 }
124
125 // look to see if image already loaded via a different symlink
126 if ( _fileSystem.fileExists(possiblePath, &loadedFileInfo.inode, &loadedFileInfo.mtime) ) {
127 fileFound = true;
128 for (BuilderLoadedImage& li: _loadedImages) {
129 if ( (li.loadedFileInfo.inode == loadedFileInfo.inode) && (li.loadedFileInfo.mtime == loadedFileInfo.mtime) ) {
130 foundImage = &li;
131 result = true;
132 stop = true;
133 return;
134 }
135 }
136 }
137
138 // look in dyld cache
139 filePath = possiblePath;
140 char realPath[MAXPATHLEN];
141 if ( _dyldImageArray != nullptr && (_dyldCache->header.formatVersion == dyld3::closure::kFormatVersion) ) {
142 uint32_t dyldCacheImageIndex;
143 bool foundInCache = _dyldCache->hasImagePath(possiblePath, dyldCacheImageIndex);
144 if ( !foundInCache && fileFound ) {
145 // see if this is an OS dylib/bundle with a pre-built dlopen closure
146 if ( allowOther ) {
147 if (const dyld3::closure::Image* otherImage = _dyldCache->findDlopenOtherImage(possiblePath) ) {
148 uint64_t expectedInode;
149 uint64_t expectedModTime;
150 if ( !otherImage->isInvalid() ) {
151 bool hasInodeInfo = otherImage->hasFileModTimeAndInode(expectedInode, expectedModTime);
152 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
153 if ( !hasInodeInfo || ((expectedInode == loadedFileInfo.inode) && (expectedModTime == loadedFileInfo.mtime)) ) {
154 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, possiblePath, _archName, _platform);
155 if ( _diag.noError() ) {
156 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
157 foundImageNum = otherImage->imageNum();
158 unmapWhenDone = true;
159 contentRebased = false;
160 hasInits = otherImage->hasInitializers() || otherImage->mayHavePlusLoads();
161 }
162 }
163 }
164 }
165 }
166 // if not found in cache, may be a symlink to something in cache
167 if ( mh == nullptr ) {
168 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
169 foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
170 if ( foundInCache ) {
171 filePath = realPath;
172 #if BUILDING_LIBDYLD
173 // handle case where OS dylib was updated after this process launched
174 if ( foundInCache ) {
175 for (BuilderLoadedImage& li: _loadedImages) {
176 if ( strcmp(li.path(), realPath) == 0 ) {
177 foundImage = &li;
178 result = true;
179 stop = true;
180 return;
181 }
182 }
183 }
184 #endif
185 }
186 }
187 }
188 }
189
190 // if using a cached dylib, look to see if there is an override
191 if ( foundInCache ) {
192 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
193 bool useCache = true;
194 markNeverUnload = true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
195 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
196 if ( image->overridableDylib() ) {
197 if ( fileFound && (_platform == MachOFile::currentPlatform()) ) {
198 uint64_t expectedInode;
199 uint64_t expectedModTime;
200 if ( image->hasFileModTimeAndInode(expectedInode, expectedModTime) ) {
201 // macOS where dylibs remain on disk. only use cache if mtime and inode have not changed
202 useCache = ( (loadedFileInfo.inode == expectedInode) && (loadedFileInfo.mtime == expectedModTime) );
203 }
204 else if ( _makingClosuresInCache ) {
205 // during iOS cache build, don't look at files on disk, use ones in cache
206 useCache = true;
207 }
208 else {
209 // iOS internal build. Any disk on cache overrides cache
210 useCache = false;
211 }
212 }
213 if ( !useCache )
214 overrideImageNum = dyldCacheImageNum;
215 }
216 if ( useCache ) {
217 foundImageNum = dyldCacheImageNum;
218 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(foundImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
219 unmapWhenDone = false;
220 // if we are building ImageArray in dyld cache, content is not rebased
221 contentRebased = !_makingDyldCacheImages && _dyldCacheIsLive;
222 hasInits = image->hasInitializers() || image->mayHavePlusLoads();
223 }
224 }
225 }
226
227 // If we are building the cache, and don't find an image, then it might be weak so just return
228 if (_makingDyldCacheImages) {
229 addMustBeMissingPath(possiblePath);
230 return;
231 }
232
233 // if not found yet, mmap file
234 if ( mh == nullptr ) {
235 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, filePath, _archName, _platform);
236 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
237 if ( mh == nullptr ) {
238 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
239 if (_isLaunchClosure) {
240 addMustBeMissingPath(possiblePath);
241 }
242 return;
243 }
244 if ( staticLinkage ) {
245 // LC_LOAD_DYLIB can only link with dylibs
246 if ( !mh->isDylib() ) {
247 _diag.error("not a dylib");
248 return;
249 }
250 }
251 else if ( mh->isMainExecutable() ) {
252 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
253 if ( !mh->isPIE() || !mh->isDynamicExecutable() ) {
254 _diag.error("not PIE");
255 return;
256 }
257 }
258 foundImageNum = _startImageNum + _nextIndex++;
259 unmapWhenDone = true;
260 } else {
261 loadedFileInfo.fileContent = mh;
262 }
263
264 // if path is not original path
265 if ( filePath != loadPath ) {
266 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
267 filePath = strdup_temp(filePath);
268 // check if this overrides what would have been found in cache
269 if ( overrideImageNum == 0 ) {
270 if ( _dyldImageArray != nullptr ) {
271 uint32_t dyldCacheImageIndex;
272 if ( _dyldCache->hasImagePath(loadPath, dyldCacheImageIndex) ) {
273 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
274 if ( possibleOverrideNum != foundImageNum )
275 overrideImageNum = possibleOverrideNum;
276 }
277 }
278 }
279 }
280
281 if ( !markNeverUnload ) {
282 // If the parent didn't force us to be never unload, other conditions still may
283 if ( mh->hasThreadLocalVariables() ) {
284 markNeverUnload = true;
285 } else if ( mh->hasObjC() && mh->isDylib() ) {
286 markNeverUnload = true;
287 } else {
288 // record if image has DOF sections
289 __block bool hasDOFs = false;
290 mh->forEachDOFSection(_diag, ^(uint32_t offset) {
291 hasDOFs = true;
292 });
293 if ( hasDOFs )
294 markNeverUnload = true;
295 }
296 }
297
298 // Set the path again just in case it was strdup'ed.
299 loadedFileInfo.path = filePath;
300
301 // add new entry
302 BuilderLoadedImage entry;
303 entry.loadedFileInfo = loadedFileInfo;
304 entry.imageNum = foundImageNum;
305 entry.unmapWhenDone = unmapWhenDone;
306 entry.contentRebased = contentRebased;
307 entry.hasInits = hasInits;
308 entry.markNeverUnload = markNeverUnload;
309 entry.rtldLocal = false;
310 entry.isBadImage = false;
311 entry.overrideImageNum = overrideImageNum;
312 _loadedImages.push_back(entry);
313 foundImage = &_loadedImages.back();
314 if ( isFallbackPath )
315 _fallbackPathUsed = true;
316 stop = true;
317 result = true;
318 }, _platform);
319
320 return result;
321 }
322
323 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath, bool fromLCRPATH, const BuilderLoadedImage& loadedImage, char fixedPath[])
324 {
325 switch ( _atPathHandling ) {
326 case AtPath::none:
327 return false;
328 case AtPath::onlyInRPaths:
329 if ( !fromLCRPATH ) {
330 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
331 if ( _isLaunchClosure )
332 return false;
333 }
334 break;
335 case AtPath::all:
336 break;
337 }
338 if ( strncmp(loadPath, "@loader_path/", 13) != 0 )
339 return false;
340
341 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
342 char* lastSlash = strrchr(fixedPath, '/');
343 if ( lastSlash != nullptr ) {
344 strcpy(lastSlash+1, &loadPath[13]);
345 return true;
346 }
347 return false;
348 }
349
350 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath, bool fromLCRPATH, char fixedPath[])
351 {
352 switch ( _atPathHandling ) {
353 case AtPath::none:
354 return false;
355 case AtPath::onlyInRPaths:
356 if ( !fromLCRPATH )
357 return false;
358 break;
359 case AtPath::all:
360 break;
361 }
362 if ( strncmp(loadPath, "@executable_path/", 17) != 0 )
363 return false;
364
365 if ( _atPathHandling != AtPath::all )
366 return false;
367
368 strlcpy(fixedPath, _loadedImages[_mainProgLoadIndex].path(), PATH_MAX);
369 char* lastSlash = strrchr(fixedPath, '/');
370 if ( lastSlash != nullptr ) {
371 strcpy(lastSlash+1, &loadPath[17]);
372 return true;
373 }
374 return false;
375 }
376
377 const char* ClosureBuilder::resolvePathVar(const char* loadPath, const LoadedImageChain& forImageChain, bool implictRPath)
378 {
379 // don't expand @ path if disallowed
380 if ( (_atPathHandling == AtPath::none) && (loadPath[0] == '@') )
381 return loadPath;
382
383 // quick out if not @ path or not implicit rpath
384 if ( !implictRPath && (loadPath[0] != '@') )
385 return loadPath;
386
387 // expand @loader_path
388 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath, PATH_MAX); // read as: char tempPath[PATH_MAX];
389 if ( expandAtLoaderPath(loadPath, false, forImageChain.image, tempPath) )
390 return strdup_temp(tempPath);
391
392 // expand @executable_path
393 if ( expandAtExecutablePath(loadPath, false, tempPath) )
394 return strdup_temp(tempPath);
395
396 // expand @rpath
397 const char* rpathTail = nullptr;
398 char implicitRpathBuffer[PATH_MAX];
399 if ( strncmp(loadPath, "@rpath/", 7) == 0 ) {
400 // note: rpathTail starts with '/'
401 rpathTail = &loadPath[6];
402 }
403 else if ( implictRPath ) {
404 // make rpathTail starts with '/'
405 strlcpy(implicitRpathBuffer, "/", PATH_MAX);
406 strlcat(implicitRpathBuffer, loadPath, PATH_MAX);
407 rpathTail = implicitRpathBuffer;
408 }
409 if ( rpathTail != nullptr ) {
410 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
411 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
412 __block const char* result = nullptr;
413 for (const LoadedImageChain* link = &forImageChain; (link != nullptr) && (result == nullptr); link = link->previous) {
414 link->image.loadAddress()->forEachRPath(^(const char* rPath, bool& stop) {
415 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.fileInfo.path);
416 if ( expandAtLoaderPath(rPath, true, link->image, tempPath) || expandAtExecutablePath(rPath, true, tempPath) ) {
417 strlcat(tempPath, rpathTail, PATH_MAX);
418 }
419 else {
420 strlcpy(tempPath, rPath, PATH_MAX);
421 strlcat(tempPath, rpathTail, PATH_MAX);
422 }
423 if ( _fileSystem.fileExists(tempPath) ) {
424 stop = true;
425 result = strdup_temp(tempPath);
426 }
427 else {
428 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
429 if (_isLaunchClosure) {
430 addMustBeMissingPath(tempPath);
431 }
432 }
433 });
434 }
435 if ( result != nullptr )
436 return result;
437 }
438
439 return loadPath;
440 }
441
442 const char* ClosureBuilder::strdup_temp(const char* path)
443 {
444 if ( _tempPaths == nullptr )
445 _tempPaths = PathPool::allocate();
446 return _tempPaths->add(path);
447 }
448
449 void ClosureBuilder::addMustBeMissingPath(const char* path)
450 {
451 //fprintf(stderr, "must be missing: %s\n", path);
452 if ( _mustBeMissingPaths == nullptr )
453 _mustBeMissingPaths = PathPool::allocate();
454 _mustBeMissingPaths->add(path);
455 }
456
457 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum)
458 {
459 for (BuilderLoadedImage& li : _loadedImages) {
460 if ( li.imageNum == imageNum ) {
461 return li;
462 }
463 }
464 for (BuilderLoadedImage& li : _loadedImages) {
465 if ( li.overrideImageNum == imageNum ) {
466 return li;
467 }
468 }
469 assert(0 && "LoadedImage not found");
470 }
471
472 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(const MachOAnalyzer* mh)
473 {
474 for (BuilderLoadedImage& li : _loadedImages) {
475 if ( li.loadAddress() == mh ) {
476 return li;
477 }
478 }
479 assert(0 && "LoadedImage not found");
480 }
481
482 const MachOAnalyzer* ClosureBuilder::machOForImageNum(ImageNum imageNum)
483 {
484 return findLoadedImage(imageNum).loadAddress();
485 }
486
487 const MachOAnalyzer* ClosureBuilder::findDependent(const MachOLoaded* mh, uint32_t depIndex)
488 {
489 for (const BuilderLoadedImage& li : _loadedImages) {
490 if ( li.loadAddress() == mh ) {
491 if (li.isBadImage) {
492 // Bad image duting building group 1 closures, so the dependents array
493 // is potentially incomplete.
494 return nullptr;
495 }
496 ImageNum childNum = li.dependents[depIndex].imageNum();
497 return machOForImageNum(childNum);
498 }
499 }
500 return nullptr;
501 }
502
503 ImageNum ClosureBuilder::imageNumForMachO(const MachOAnalyzer* mh)
504 {
505 for (const BuilderLoadedImage& li : _loadedImages) {
506 if ( li.loadAddress() == mh ) {
507 return li.imageNum;
508 }
509 }
510 assert(0 && "unknown mach-o");
511 return 0;
512 }
513
514 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain& forImageChain)
515 {
516 // if dependents is set, then we have already loaded this
517 if ( forImageChain.image.dependents.begin() != nullptr )
518 return;
519
520 uintptr_t startDepIndex = _dependencies.count();
521 // add dependents
522 __block uint32_t depIndex = 0;
523 forImageChain.image.loadAddress()->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
524 Image::LinkKind kind = Image::LinkKind::regular;
525 if ( isWeak )
526 kind = Image::LinkKind::weak;
527 else if ( isReExport )
528 kind = Image::LinkKind::reExport;
529 else if ( isUpward )
530 kind = Image::LinkKind::upward;
531 BuilderLoadedImage* foundImage;
532 if ( findImage(loadPath, forImageChain, foundImage, true, false) ) {
533 // verify this is compatable dylib version
534 if ( foundImage->loadAddress()->filetype != MH_DYLIB ) {
535 _diag.error("found '%s' which is not a dylib. Needed by '%s'", foundImage->path(), forImageChain.image.path());
536 }
537 else {
538 const char* installName;
539 uint32_t foundCompatVers;
540 uint32_t foundCurrentVers;
541 foundImage->loadAddress()->getDylibInstallName(&installName, &foundCompatVers, &foundCurrentVers);
542 if ( (foundCompatVers < compatVersion) && foundImage->loadAddress()->enforceCompatVersion() ) {
543 char foundStr[32];
544 char requiredStr[32];
545 MachOFile::packedVersionToString(foundCompatVers, foundStr);
546 MachOFile::packedVersionToString(compatVersion, requiredStr);
547 _diag.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
548 foundImage->path(), foundStr, requiredStr, forImageChain.image.path());
549 }
550 }
551 if ( _diag.noError() )
552 _dependencies.push_back(Image::LinkedImage(kind, foundImage->imageNum));
553 }
554 else if ( isWeak ) {
555 _dependencies.push_back(Image::LinkedImage(Image::LinkKind::weak, kMissingWeakLinkedImage));
556 }
557 else {
558 BLOCK_ACCCESSIBLE_ARRAY(char, extra, 4096);
559 extra[0] = '\0';
560 const char* targetLeaf = strrchr(loadPath, '/');
561 if ( targetLeaf == nullptr )
562 targetLeaf = loadPath;
563 if ( _mustBeMissingPaths != nullptr ) {
564 strcpy(extra, ", tried: ");
565 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
566 const char* aLeaf = strrchr(aPath, '/');
567 if ( aLeaf == nullptr )
568 aLeaf = aPath;
569 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
570 strlcat(extra, "'", 4096);
571 strlcat(extra, aPath, 4096);
572 strlcat(extra, "' ", 4096);
573 }
574 });
575 }
576 if ( _diag.hasError() ) {
577 #if BUILDING_CACHE_BUILDER
578 std::string errorMessageBuffer = _diag.errorMessage();
579 const char* msg = errorMessageBuffer.c_str();
580 #else
581 const char* msg = _diag.errorMessage();
582 #endif
583 char msgCopy[strlen(msg)+4];
584 strcpy(msgCopy, msg);
585 _diag.error("dependent dylib '%s' not found for '%s'. %s", loadPath, forImageChain.image.path(), msgCopy);
586 }
587 else {
588 _diag.error("dependent dylib '%s' not found for '%s'%s", loadPath, forImageChain.image.path(), extra);
589 }
590 if ( _launchErrorInfo != nullptr ) {
591 _launchErrorInfo->kind = DYLD_EXIT_REASON_DYLIB_MISSING;
592 _launchErrorInfo->clientOfDylibPath = forImageChain.image.path();
593 _launchErrorInfo->targetDylibPath = loadPath;
594 _launchErrorInfo->symbol = nullptr;
595 }
596 }
597 ++depIndex;
598 if ( _diag.hasError() )
599 stop = true;
600 });
601 if ( _diag.hasError() )
602 return;
603 forImageChain.image.dependents = _dependencies.subArray(startDepIndex, depIndex);
604
605 // breadth first recurse
606 for (Image::LinkedImage dep : forImageChain.image.dependents) {
607 // don't recurse upwards
608 if ( dep.kind() == Image::LinkKind::upward )
609 continue;
610 // don't recurse down missing weak links
611 if ( (dep.kind() == Image::LinkKind::weak) && (dep.imageNum() == kMissingWeakLinkedImage) )
612 continue;
613 BuilderLoadedImage& depLoadedImage = findLoadedImage(dep.imageNum());
614 LoadedImageChain chain = { &forImageChain, depLoadedImage };
615 recursiveLoadDependents(chain);
616 if ( _diag.hasError() )
617 break;
618 }
619 }
620
621 void ClosureBuilder::loadDanglingUpwardLinks()
622 {
623 bool danglingFixed;
624 do {
625 danglingFixed = false;
626 for (BuilderLoadedImage& li : _loadedImages) {
627 if ( li.dependents.begin() == nullptr ) {
628 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
629 LoadedImageChain chain = { nullptr, li };
630 recursiveLoadDependents(chain);
631 danglingFixed = true;
632 break;
633 }
634 }
635 } while (danglingFixed && _diag.noError());
636 }
637
638 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage& forImage)
639 {
640 // only set on dylibs in the dyld shared cache
641 if ( !_makingDyldCacheImages )
642 return false;
643
644 // on macOS dylibs always override cache
645 if ( _platform == Platform::macOS )
646 return true;
647
648 // on embedded platforms with Internal cache, allow overrides
649 if ( !_makingCustomerCache )
650 return true;
651
652 // embedded platform customer caches, no overrides
653 return false; // FIXME, allow libdispatch.dylib to be overridden
654 }
655
656 void ClosureBuilder::buildImage(ImageWriter& writer, BuilderLoadedImage& forImage)
657 {
658 const MachOAnalyzer* macho = forImage.loadAddress();
659 // set ImageNum
660 writer.setImageNum(forImage.imageNum);
661
662 // set flags
663 writer.setHasWeakDefs(macho->hasWeakDefs());
664 writer.setIsBundle(macho->isBundle());
665 writer.setIsDylib(macho->isDylib());
666 writer.setIs64(macho->is64());
667 writer.setIsExecutable(macho->isMainExecutable());
668 writer.setUses16KPages(macho->uses16KPages());
669 writer.setOverridableDylib(overridableDylib(forImage));
670 writer.setInDyldCache(macho->inDyldCache());
671 if ( macho->hasObjC() ) {
672 writer.setHasObjC(true);
673 bool hasPlusLoads = macho->hasPlusLoadMethod(_diag);
674 writer.setHasPlusLoads(hasPlusLoads);
675 if ( hasPlusLoads )
676 forImage.hasInits = true;
677 }
678 else {
679 writer.setHasObjC(false);
680 writer.setHasPlusLoads(false);
681 }
682
683 if ( forImage.markNeverUnload ) {
684 writer.setNeverUnload(true);
685 }
686
687 #if BUILDING_DYLD || BUILDING_LIBDYLD
688 // shared cache not built by dyld or libdyld.dylib, so must be real file
689 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
690 #else
691 if ( _platform == Platform::macOS ) {
692 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
693 // don't add file info for shared cache files mastered out of final file system
694 }
695 else {
696 // file is either not in cache or is in cache but not mastered out
697 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
698 }
699 }
700 else {
701 // all other platforms, cache is built off-device, so inodes are not known
702 }
703 #endif
704
705 // add info on how to load image
706 if ( !macho->inDyldCache() ) {
707 writer.setMappingInfo(forImage.loadedFileInfo.sliceOffset, macho->mappedSize());
708 // add code signature, if signed
709 uint32_t codeSigFileOffset;
710 uint32_t codeSigSize;
711 if ( macho->hasCodeSignature(codeSigFileOffset, codeSigSize) ) {
712 writer.setCodeSignatureLocation(codeSigFileOffset, codeSigSize);
713 uint8_t cdHash[20];
714 if ( macho->getCDHash(cdHash) )
715 writer.setCDHash(cdHash);
716 }
717 // add FairPlay encryption range if encrypted
718 uint32_t fairPlayFileOffset;
719 uint32_t fairPlaySize;
720 if ( macho->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
721 writer.setFairPlayEncryptionRange(fairPlayFileOffset, fairPlaySize);
722 }
723 }
724
725 // set path
726 writer.addPath(forImage.path());
727 if ( _aliases != nullptr ) {
728 for (const CachedDylibAlias& alias : *_aliases) {
729 if ( strcmp(alias.realPath, forImage.path()) == 0 )
730 writer.addPath(alias.aliasPath);
731 }
732 }
733
734 // set uuid, if has one
735 uuid_t uuid;
736 if ( macho->getUuid(uuid) )
737 writer.setUUID(uuid);
738
739 // set dependents
740 writer.setDependents(forImage.dependents);
741
742 // set segments
743 addSegments(writer, macho);
744
745 // record if this dylib overrides something in the cache
746 if ( forImage.overrideImageNum != 0 ) {
747 writer.setAsOverrideOf(forImage.overrideImageNum);
748 const char* overridePath = _dyldImageArray->imageForNum(forImage.overrideImageNum)->path();
749 writer.addPath(overridePath);
750 if ( strcmp(overridePath, "/usr/lib/system/libdyld.dylib") == 0 )
751 _libDyldImageNum = forImage.imageNum;
752 else if ( strcmp(overridePath, "/usr/lib/libSystem.B.dylib") == 0 )
753 _libSystemImageNum = forImage.imageNum;
754 }
755
756
757 // do fix up info for non-cached, and cached if building cache
758 if ( !macho->inDyldCache() || _makingDyldCacheImages ) {
759 if ( macho->hasChainedFixups() ) {
760 addChainedFixupInfo(writer, forImage);
761 }
762 else {
763 if ( _handlers != nullptr ) {
764 reportRebasesAndBinds(writer, forImage);
765 }
766 else {
767 addRebaseInfo(writer, macho);
768 if ( _diag.noError() )
769 addBindInfo(writer, forImage);
770 }
771 }
772 }
773 if ( _diag.hasError() ) {
774 writer.setInvalid();
775 return;
776 }
777
778 // add initializers
779 bool contentRebased = forImage.contentRebased;
780 __block unsigned initCount = 0;
781 macho->forEachInitializer(_diag, contentRebased, ^(uint32_t offset) {
782 ++initCount;
783 }, _dyldCache);
784 if ( initCount != 0 ) {
785 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets, initCount);
786 __block unsigned index = 0;
787 macho->forEachInitializer(_diag, contentRebased, ^(uint32_t offset) {
788 initOffsets[index++] = offset;
789 }, _dyldCache);
790 writer.setInitOffsets(initOffsets, initCount);
791 forImage.hasInits = true;
792 }
793
794 // record if image has DOF sections
795 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets, 256);
796 macho->forEachDOFSection(_diag, ^(uint32_t offset) {
797 dofSectionOffsets.push_back(offset);
798 });
799 if ( !dofSectionOffsets.empty() ) {
800 writer.setDofOffsets(dofSectionOffsets);
801 }
802
803 }
804
805 void ClosureBuilder::addSegments(ImageWriter& writer, const MachOAnalyzer* mh)
806 {
807 const uint32_t segCount = mh->segmentCount();
808 if ( mh->inDyldCache() ) {
809 uint64_t cacheUnslideBaseAddress = _dyldCache->unslidLoadAddress();
810 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment, segs, segCount);
811 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
812 segs[info.segIndex] = { (uint32_t)(info.vmAddr-cacheUnslideBaseAddress), (uint32_t)info.vmSize, info.protections };
813 });
814 writer.setCachedSegments(segs, segCount);
815 }
816 else {
817 const uint32_t pageSize = (mh->uses16KPages() ? 0x4000 : 0x1000);
818 __block uint32_t diskSegIndex = 0;
819 __block uint32_t totalPageCount = 0;
820 __block uint32_t lastFileOffsetEnd = 0;
821 __block uint64_t lastVmAddrEnd = 0;
822 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment, dsegs, segCount*3); // room for padding
823 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
824 if ( (info.fileOffset != 0) && (info.fileOffset != lastFileOffsetEnd) ) {
825 Image::DiskSegment filePadding;
826 filePadding.filePageCount = (info.fileOffset - lastFileOffsetEnd)/pageSize;
827 filePadding.vmPageCount = 0;
828 filePadding.permissions = 0;
829 filePadding.paddingNotSeg = 1;
830 dsegs[diskSegIndex++] = filePadding;
831 }
832 if ( (lastVmAddrEnd != 0) && (info.vmAddr != lastVmAddrEnd) ) {
833 Image::DiskSegment vmPadding;
834 vmPadding.filePageCount = 0;
835 vmPadding.vmPageCount = (info.vmAddr - lastVmAddrEnd)/pageSize;
836 vmPadding.permissions = 0;
837 vmPadding.paddingNotSeg = 1;
838 dsegs[diskSegIndex++] = vmPadding;
839 totalPageCount += vmPadding.vmPageCount;
840 }
841 {
842 Image::DiskSegment segInfo;
843 segInfo.filePageCount = (info.fileSize+pageSize-1)/pageSize;
844 segInfo.vmPageCount = (info.vmSize+pageSize-1)/pageSize;
845 segInfo.permissions = info.protections & 7;
846 segInfo.paddingNotSeg = 0;
847 dsegs[diskSegIndex++] = segInfo;
848 totalPageCount += segInfo.vmPageCount;
849 if ( info.fileSize != 0 )
850 lastFileOffsetEnd = (uint32_t)(info.fileOffset + info.fileSize);
851 if ( info.vmSize != 0 )
852 lastVmAddrEnd = info.vmAddr + info.vmSize;
853 }
854 });
855 writer.setDiskSegments(dsegs, diskSegIndex);
856 }
857 }
858
859 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter& writer, const Image* image, const MachOAnalyzer* mh)
860 {
861 const unsigned pointerSize = mh->pointerSize();
862 mh->forEachInterposingSection(_diag, ^(uint64_t sectVmOffset, uint64_t sectVmSize, bool &stop) {
863 const uint32_t entrySize = 2*pointerSize;
864 const uint32_t tupleCount = (uint32_t)(sectVmSize/entrySize);
865 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple, resolvedTuples, tupleCount);
866 for (uint32_t i=0; i < tupleCount; ++i) {
867 resolvedTuples[i].stockImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
868 resolvedTuples[i].stockImplementation.absolute.value = 0;
869 resolvedTuples[i].newImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
870 resolvedTuples[i].newImplementation.absolute.value = 0;
871 }
872 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool &rebaseStop) {
873 if ( imageOffsetToRebase < sectVmOffset )
874 return;
875 if ( imageOffsetToRebase > sectVmOffset+sectVmSize )
876 return;
877 uint64_t offsetIntoSection = imageOffsetToRebase - sectVmOffset;
878 uint64_t rebaseIndex = offsetIntoSection/entrySize;
879 if ( rebaseIndex*entrySize != offsetIntoSection )
880 return;
881 const void* content = (uint8_t*)mh + imageOffsetToRebase;
882 uint64_t unslidTargetAddress = mh->is64() ? *(uint64_t*)content : *(uint32_t*)content;
883 resolvedTuples[rebaseIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
884 resolvedTuples[rebaseIndex].newImplementation.image.imageNum = image->imageNum();
885 resolvedTuples[rebaseIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
886 }, ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
887 if ( imageOffsetToBind < sectVmOffset )
888 return;
889 if ( imageOffsetToBind > sectVmOffset+sectVmSize )
890 return;
891 uint64_t offsetIntoSection = imageOffsetToBind - sectVmOffset;
892 uint64_t bindIndex = offsetIntoSection/entrySize;
893 if ( bindIndex*entrySize + pointerSize != offsetIntoSection )
894 return;
895 resolvedTuples[bindIndex].stockImplementation = bindTarget;
896 }, ^(uint64_t imageOffsetStart, const Array<Image::ResolvedSymbolTarget>& targets, bool& chainStop) {
897 // walk each fixup in the chain
898 image->forEachChainedFixup((void*)mh, imageOffsetStart, ^(uint64_t* fixupLoc, MachOLoaded::ChainedFixupPointerOnDisk fixupInfo, bool& stopChain) {
899 uint64_t imageOffsetToFixup = (uint64_t)fixupLoc - (uint64_t)mh;
900 if ( fixupInfo.authRebase.auth ) {
901 #if SUPPORT_ARCH_arm64e
902 if ( fixupInfo.authBind.bind ) {
903 closure::Image::ResolvedSymbolTarget bindTarget = targets[fixupInfo.authBind.ordinal];
904 if ( imageOffsetToFixup < sectVmOffset )
905 return;
906 if ( imageOffsetToFixup > sectVmOffset+sectVmSize )
907 return;
908 uint64_t offsetIntoSection = imageOffsetToFixup - sectVmOffset;
909 uint64_t bindIndex = offsetIntoSection/entrySize;
910 if ( bindIndex*entrySize + pointerSize != offsetIntoSection )
911 return;
912 resolvedTuples[bindIndex].stockImplementation = bindTarget;
913 }
914 else {
915 if ( imageOffsetToFixup < sectVmOffset )
916 return;
917 if ( imageOffsetToFixup > sectVmOffset+sectVmSize )
918 return;
919 uint64_t offsetIntoSection = imageOffsetToFixup - sectVmOffset;
920 uint64_t rebaseIndex = offsetIntoSection/entrySize;
921 if ( rebaseIndex*entrySize != offsetIntoSection )
922 return;
923 uint64_t unslidTargetAddress = (uint64_t)mh->preferredLoadAddress() + fixupInfo.authRebase.target;
924 resolvedTuples[rebaseIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
925 resolvedTuples[rebaseIndex].newImplementation.image.imageNum = image->imageNum();
926 resolvedTuples[rebaseIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
927 }
928 #else
929 _diag.error("malformed chained pointer");
930 stop = true;
931 stopChain = true;
932 #endif
933 }
934 else {
935 if ( fixupInfo.plainRebase.bind ) {
936 closure::Image::ResolvedSymbolTarget bindTarget = targets[fixupInfo.plainBind.ordinal];
937 if ( imageOffsetToFixup < sectVmOffset )
938 return;
939 if ( imageOffsetToFixup > sectVmOffset+sectVmSize )
940 return;
941 uint64_t offsetIntoSection = imageOffsetToFixup - sectVmOffset;
942 uint64_t bindIndex = offsetIntoSection/entrySize;
943 if ( bindIndex*entrySize + pointerSize != offsetIntoSection )
944 return;
945 resolvedTuples[bindIndex].stockImplementation = bindTarget;
946 }
947 else {
948 if ( imageOffsetToFixup < sectVmOffset )
949 return;
950 if ( imageOffsetToFixup > sectVmOffset+sectVmSize )
951 return;
952 uint64_t offsetIntoSection = imageOffsetToFixup - sectVmOffset;
953 uint64_t rebaseIndex = offsetIntoSection/entrySize;
954 if ( rebaseIndex*entrySize != offsetIntoSection )
955 return;
956 uint64_t unslidTargetAddress = fixupInfo.plainRebase.signExtendedTarget();
957 resolvedTuples[rebaseIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
958 resolvedTuples[rebaseIndex].newImplementation.image.imageNum = image->imageNum();
959 resolvedTuples[rebaseIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
960 }
961 }
962 });
963 });
964
965 // remove any tuples in which both sides are not set (or target is weak-import NULL)
966 STACK_ALLOC_ARRAY(InterposingTuple, goodTuples, tupleCount);
967 for (uint32_t i=0; i < tupleCount; ++i) {
968 if ( (resolvedTuples[i].stockImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute)
969 && (resolvedTuples[i].newImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute) )
970 goodTuples.push_back(resolvedTuples[i]);
971 }
972 writer.addInterposingTuples(goodTuples);
973
974 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
975 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, goodTuples.count());
976 for (const InterposingTuple& aTuple : goodTuples) {
977 if ( aTuple.stockImplementation.sharedCache.kind == Image::ResolvedSymbolTarget::kindSharedCache ) {
978 uint32_t imageIndex;
979 assert(_dyldCache->addressInText((uint32_t)aTuple.stockImplementation.sharedCache.offset, &imageIndex));
980 ImageNum imageInCache = imageIndex+1;
981 Closure::PatchEntry patch;
982 patch.exportCacheOffset = (uint32_t)aTuple.stockImplementation.sharedCache.offset;
983 patch.overriddenDylibInCache = imageInCache;
984 patch.replacement = aTuple.newImplementation;
985 patches.push_back(patch);
986 }
987 }
988 writer.addCachePatches(patches);
989 });
990 }
991
992 void ClosureBuilder::addRebaseInfo(ImageWriter& writer, const MachOAnalyzer* mh)
993 {
994 const uint64_t ptrSize = mh->pointerSize();
995 Image::RebasePattern maxLeapPattern = { 0xFFFFF, 0, 0xF };
996 const uint64_t maxLeapCount = maxLeapPattern.repeatCount * maxLeapPattern.skipCount;
997 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern, rebaseEntries, 1024);
998 __block uint64_t lastLocation = -ptrSize;
999 mh->forEachRebase(_diag, true, ^(uint64_t runtimeOffset, bool& stop) {
1000 const uint64_t delta = runtimeOffset - lastLocation;
1001 const bool aligned = ((delta % ptrSize) == 0);
1002 if ( delta == ptrSize ) {
1003 // this rebase location is contiguous to previous
1004 if ( rebaseEntries.back().contigCount < 255 ) {
1005 // just bump previous's contigCount
1006 rebaseEntries.back().contigCount++;
1007 }
1008 else {
1009 // previous contiguous run already has max 255, so start a new run
1010 rebaseEntries.push_back({ 1, 1, 0 });
1011 }
1012 }
1013 else if ( aligned && (delta <= (ptrSize*15)) ) {
1014 // this rebase is within skip distance of last rebase
1015 rebaseEntries.back().skipCount = (uint8_t)((delta-ptrSize)/ptrSize);
1016 int lastIndex = (int)(rebaseEntries.count() - 1);
1017 if ( lastIndex > 1 ) {
1018 if ( (rebaseEntries[lastIndex].contigCount == rebaseEntries[lastIndex-1].contigCount)
1019 && (rebaseEntries[lastIndex].skipCount == rebaseEntries[lastIndex-1].skipCount) ) {
1020 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1021 rebaseEntries.pop_back();
1022 rebaseEntries.back().repeatCount += 1;
1023 }
1024 }
1025 rebaseEntries.push_back({ 1, 1, 0 });
1026 }
1027 else {
1028 uint64_t advanceCount = (delta-ptrSize);
1029 if ( (runtimeOffset < lastLocation) && (lastLocation != -ptrSize) ) {
1030 // out of rebases! handle this be resting rebase offset to zero
1031 rebaseEntries.push_back({ 0, 0, 0 });
1032 advanceCount = runtimeOffset;
1033 }
1034 // if next rebase is too far to reach with one pattern, use series
1035 while ( advanceCount > maxLeapCount ) {
1036 rebaseEntries.push_back(maxLeapPattern);
1037 advanceCount -= maxLeapCount;
1038 }
1039 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1040 while ( advanceCount > maxLeapPattern.repeatCount ) {
1041 uint64_t count = advanceCount / maxLeapPattern.skipCount;
1042 rebaseEntries.push_back({ (uint32_t)count, 0, maxLeapPattern.skipCount });
1043 advanceCount -= (count*maxLeapPattern.skipCount);
1044 }
1045 if ( advanceCount != 0 )
1046 rebaseEntries.push_back({ (uint32_t)advanceCount, 0, 1 });
1047 rebaseEntries.push_back({ 1, 1, 0 });
1048 }
1049 lastLocation = runtimeOffset;
1050 });
1051 writer.setRebaseInfo(rebaseEntries);
1052
1053 // i386 programs also use text relocs to rebase stubs
1054 if ( mh->cputype == CPU_TYPE_I386 ) {
1055 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern, textRebases, 512);
1056 __block uint64_t lastOffset = -4;
1057 mh->forEachTextRebase(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1058 if ( textRebases.freeCount() < 2 ) {
1059 _diag.error("too many text rebase locations (%ld) in %s", textRebases.maxCount(), writer.currentImage()->path());
1060 stop = true;
1061 }
1062 bool mergedIntoPrevious = false;
1063 if ( (runtimeOffset > lastOffset) && !textRebases.empty() ) {
1064 uint32_t skipAmount = (uint32_t)(runtimeOffset - lastOffset);
1065 if ( (textRebases.back().repeatCount == 1) && (textRebases.back().skipCount == 0) ) {
1066 textRebases.back().repeatCount = 2;
1067 textRebases.back().skipCount = skipAmount;
1068 mergedIntoPrevious = true;
1069 }
1070 else if ( textRebases.back().skipCount == skipAmount ) {
1071 textRebases.back().repeatCount += 1;
1072 mergedIntoPrevious = true;
1073 }
1074 }
1075 if ( !mergedIntoPrevious ) {
1076 Image::TextFixupPattern pattern;
1077 pattern.target.raw = 0;
1078 pattern.startVmOffset = (uint32_t)runtimeOffset;
1079 pattern.repeatCount = 1;
1080 pattern.skipCount = 0;
1081 textRebases.push_back(pattern);
1082 }
1083 lastOffset = runtimeOffset;
1084 });
1085 writer.setTextRebaseInfo(textRebases);
1086 }
1087 }
1088
1089
1090 void ClosureBuilder::forEachBind(BuilderLoadedImage& forImage, void (^handler)(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop),
1091 void (^strongHandler)(const char* strongSymbolName))
1092 {
1093 __block int lastLibOrdinal = 256;
1094 __block const char* lastSymbolName = nullptr;
1095 __block uint64_t lastAddend = 0;
1096 __block Image::ResolvedSymbolTarget target;
1097 __block ResolvedTargetInfo targetInfo;
1098 forImage.loadAddress()->forEachBind(_diag, ^(uint64_t runtimeOffset, int libOrdinal, const char* symbolName, bool weakImport, uint64_t addend, bool& stop) {
1099 if ( (symbolName == lastSymbolName) && (libOrdinal == lastLibOrdinal) && (addend == lastAddend) ) {
1100 // same symbol lookup as last location
1101 handler(runtimeOffset, target, targetInfo, stop);
1102 }
1103 else if ( findSymbol(forImage, libOrdinal, symbolName, weakImport, addend, target, targetInfo) ) {
1104 handler(runtimeOffset, target, targetInfo, stop);
1105 lastSymbolName = symbolName;
1106 lastLibOrdinal = libOrdinal;
1107 lastAddend = addend;
1108 }
1109 else {
1110 stop = true;
1111 }
1112 }, ^(const char* symbolName) {
1113 strongHandler(symbolName);
1114 });
1115 }
1116
1117 void ClosureBuilder::addBindInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1118 {
1119 const uint32_t ptrSize = forImage.loadAddress()->pointerSize();
1120 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern, binds, 512);
1121 __block uint64_t lastOffset = -ptrSize;
1122 __block Image::ResolvedSymbolTarget lastTarget = { {0, 0} };
1123 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1124 if ( targetInfo.weakBindCoalese ) {
1125 // may be previous bind to this location
1126 // if so, update that rather create new BindPattern
1127 for (Image::BindPattern& aBind : binds) {
1128 if ( (aBind.startVmOffset == runtimeOffset) && (aBind.repeatCount == 1) && (aBind.skipCount == 0) ) {
1129 aBind.target = target;
1130 return;
1131 }
1132 }
1133 }
1134 bool mergedIntoPrevious = false;
1135 if ( !mergedIntoPrevious && (target == lastTarget) && (runtimeOffset > lastOffset) && !binds.empty() ) {
1136 uint64_t skipAmount = (runtimeOffset - lastOffset - ptrSize)/ptrSize;
1137 if ( skipAmount*ptrSize != (runtimeOffset - lastOffset - ptrSize) ) {
1138 // misaligned pointer means we cannot optimize
1139 }
1140 else {
1141 if ( (binds.back().repeatCount == 1) && (binds.back().skipCount == 0) && (skipAmount <= 255) ) {
1142 binds.back().repeatCount = 2;
1143 binds.back().skipCount = skipAmount;
1144 assert(binds.back().skipCount == skipAmount); // check overflow
1145 mergedIntoPrevious = true;
1146 }
1147 else if ( (binds.back().skipCount == skipAmount) && (binds.back().repeatCount < 0xfff) ) {
1148 uint32_t prevRepeatCount = binds.back().repeatCount;
1149 binds.back().repeatCount += 1;
1150 assert(binds.back().repeatCount > prevRepeatCount); // check overflow
1151 mergedIntoPrevious = true;
1152 }
1153 }
1154 }
1155 if ( (target == lastTarget) && (runtimeOffset == lastOffset) && !binds.empty() ) {
1156 // duplicate bind for same location, ignore this one
1157 mergedIntoPrevious = true;
1158 }
1159 if ( !mergedIntoPrevious ) {
1160 Image::BindPattern pattern;
1161 pattern.target = target;
1162 pattern.startVmOffset = runtimeOffset;
1163 pattern.repeatCount = 1;
1164 pattern.skipCount = 0;
1165 assert(pattern.startVmOffset == runtimeOffset);
1166 binds.push_back(pattern);
1167 }
1168 lastTarget = target;
1169 lastOffset = runtimeOffset;
1170 }, ^(const char* strongSymbolName) {
1171 if ( !_makingDyldCacheImages ) {
1172 // something has a strong symbol definition that may override a weak impl in the dyld cache
1173 Image::ResolvedSymbolTarget strongOverride;
1174 ResolvedTargetInfo strongTargetInfo;
1175 if ( findSymbolInImage(forImage.loadAddress(), strongSymbolName, 0, false, strongOverride, strongTargetInfo) ) {
1176 for (const BuilderLoadedImage& li : _loadedImages) {
1177 if ( li.loadAddress()->inDyldCache() && li.loadAddress()->hasWeakDefs() ) {
1178 Image::ResolvedSymbolTarget implInCache;
1179 ResolvedTargetInfo implInCacheInfo;
1180 if ( findSymbolInImage(li.loadAddress(), strongSymbolName, 0, false, implInCache, implInCacheInfo) ) {
1181 // found another instance in some dylib in dyld cache, will need to patch it
1182 Closure::PatchEntry patch;
1183 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1184 patch.overriddenDylibInCache = li.imageNum;
1185 patch.replacement = strongOverride;
1186 _weakDefCacheOverrides.push_back(patch);
1187 }
1188 }
1189 }
1190 }
1191 }
1192 });
1193 writer.setBindInfo(binds);
1194 }
1195
1196 void ClosureBuilder::reportRebasesAndBinds(ImageWriter& writer, BuilderLoadedImage& forImage)
1197 {
1198 // report all rebases
1199 forImage.loadAddress()->forEachRebase(_diag, true, ^(uint64_t runtimeOffset, bool& stop) {
1200 _handlers->rebase(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset);
1201 });
1202
1203 // report all binds
1204 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1205 _handlers->bind(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset, target, targetInfo);
1206 },
1207 ^(const char* strongSymbolName) {});
1208
1209 // i386 programs also use text relocs to rebase stubs
1210 if ( forImage.loadAddress()->cputype == CPU_TYPE_I386 ) {
1211 // FIX ME
1212 }
1213 }
1214
1215 // These are mangled symbols for all the variants of operator new and delete
1216 // which a main executable can define (non-weak) and override the
1217 // weak-def implementation in the OS.
1218 static const char* sTreatAsWeak[] = {
1219 "__Znwm", "__ZnwmRKSt9nothrow_t",
1220 "__Znam", "__ZnamRKSt9nothrow_t",
1221 "__ZdlPv", "__ZdlPvRKSt9nothrow_t", "__ZdlPvm",
1222 "__ZdaPv", "__ZdaPvRKSt9nothrow_t", "__ZdaPvm",
1223 "__ZnwmSt11align_val_t", "__ZnwmSt11align_val_tRKSt9nothrow_t",
1224 "__ZnamSt11align_val_t", "__ZnamSt11align_val_tRKSt9nothrow_t",
1225 "__ZdlPvSt11align_val_t", "__ZdlPvSt11align_val_tRKSt9nothrow_t", "__ZdlPvmSt11align_val_t",
1226 "__ZdaPvSt11align_val_t", "__ZdaPvSt11align_val_tRKSt9nothrow_t", "__ZdaPvmSt11align_val_t"
1227 };
1228
1229
1230 void ClosureBuilder::addChainedFixupInfo(ImageWriter& writer, const BuilderLoadedImage& forImage)
1231 {
1232 // calculate max page starts
1233 __block uint32_t dataPageCount = 1;
1234 forImage.loadAddress()->forEachSegment(^(const dyld3::MachOFile::SegmentInfo& info, bool& stop) {
1235 if ( info.protections & VM_PROT_WRITE ) {
1236 dataPageCount += ((info.fileSize+4095) / 4096);
1237 }
1238 });
1239
1240 // build array of starts
1241 STACK_ALLOC_ARRAY(uint64_t, starts, dataPageCount);
1242 forImage.loadAddress()->forEachChainedFixupStart(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1243 starts.push_back(runtimeOffset);
1244 });
1245
1246 // build array of targets
1247 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget, targets, 1024);
1248 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ResolvedTargetInfo, targetInfos, 1024);
1249 forImage.loadAddress()->forEachChainedFixupTarget(_diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool& stop) {
1250 Image::ResolvedSymbolTarget target;
1251 ResolvedTargetInfo targetInfo;
1252 if ( !findSymbol(forImage, libOrdinal, symbolName, weakImport, addend, target, targetInfo) ) {
1253 const char* expectedInPath = forImage.loadAddress()->dependentDylibLoadPath(libOrdinal-1);
1254 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, forImage.path());
1255 stop = true;
1256 return;
1257 }
1258 if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_DEF_COALESCE ) {
1259 // add if not already in array
1260 bool alreadyInArray = false;
1261 for (const char* sym : _weakDefsFromChainedBinds) {
1262 if ( strcmp(sym, symbolName) == 0 ) {
1263 alreadyInArray = true;
1264 break;
1265 }
1266 }
1267 if ( !alreadyInArray )
1268 _weakDefsFromChainedBinds.push_back(symbolName);
1269 }
1270 targets.push_back(target);
1271 targetInfos.push_back(targetInfo);
1272 });
1273 if ( _diag.hasError() )
1274 return;
1275
1276 if ( _handlers != nullptr )
1277 _handlers->chainedBind(forImage.imageNum, forImage.loadAddress(), starts, targets, targetInfos);
1278 else
1279 writer.setChainedFixups(starts, targets); // store results in Image object
1280
1281 // with chained fixups, main executable may define symbol that overrides weak-defs but has no fixup
1282 if ( _isLaunchClosure && forImage.loadAddress()->hasWeakDefs() && forImage.loadAddress()->isMainExecutable() ) {
1283 for (const char* weakSymbolName : sTreatAsWeak) {
1284 Diagnostics exportDiag;
1285 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1286 if ( forImage.loadAddress()->findExportedSymbol(exportDiag, weakSymbolName, foundInfo, nullptr) ) {
1287 _weakDefsFromChainedBinds.push_back(weakSymbolName);
1288 }
1289 }
1290 }
1291 }
1292
1293
1294 bool ClosureBuilder::findSymbolInImage(const MachOAnalyzer* macho, const char* symbolName, uint64_t addend, bool followReExports,
1295 Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1296 {
1297 targetInfo.foundInDylib = nullptr;
1298 targetInfo.requestedSymbolName = symbolName;
1299 targetInfo.addend = addend;
1300 targetInfo.isWeakDef = false;
1301 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
1302 return (const MachOLoaded*)findDependent(mh, depIndex);
1303 };
1304 MachOAnalyzer::DependentToMachOLoaded finder = nullptr;
1305 if ( followReExports )
1306 finder = reexportFinder;
1307
1308 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1309 if ( macho->findExportedSymbol(_diag, symbolName, foundInfo, finder) ) {
1310 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
1311 targetInfo.foundInDylib = foundInfo.foundInDylib;
1312 targetInfo.foundSymbolName = foundInfo.foundSymbolName;
1313 if ( foundInfo.isWeakDef )
1314 targetInfo.isWeakDef = true;
1315 if ( foundInfo.kind == MachOAnalyzer::FoundSymbol::Kind::absolute ) {
1316 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1317 target.absolute.value = foundInfo.value + addend;
1318 }
1319 else if ( impDylib->inDyldCache() ) {
1320 target.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
1321 target.sharedCache.offset = (uint8_t*)impDylib - (uint8_t*)_dyldCache + foundInfo.value + addend;
1322 }
1323 else {
1324 target.image.kind = Image::ResolvedSymbolTarget::kindImage;
1325 target.image.imageNum = findLoadedImage(impDylib).imageNum;
1326 target.image.offset = foundInfo.value + addend;
1327 }
1328 return true;
1329 }
1330 return false;
1331 }
1332
1333 bool ClosureBuilder::findSymbol(const BuilderLoadedImage& fromImage, int libOrdinal, const char* symbolName, bool weakImport, uint64_t addend,
1334 Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1335 {
1336 targetInfo.weakBindCoalese = false;
1337 targetInfo.weakBindSameImage = false;
1338 targetInfo.requestedSymbolName = symbolName;
1339 targetInfo.libOrdinal = libOrdinal;
1340 if ( libOrdinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP ) {
1341 for (const BuilderLoadedImage& li : _loadedImages) {
1342 if ( !li.rtldLocal && findSymbolInImage(li.loadAddress(), symbolName, addend, true, target, targetInfo) )
1343 return true;
1344 }
1345 if ( weakImport ) {
1346 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1347 target.absolute.value = 0;
1348 return true;
1349 }
1350 _diag.error("symbol '%s' not found, expected in flat namespace by '%s'", symbolName, fromImage.path());
1351 }
1352 else if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_DEF_COALESCE ) {
1353 // to resolve weakDef coalesing, we need to search all images in order and use first definition
1354 // but, if first found is a weakDef, a later non-weak def overrides that
1355 bool foundWeakDefImpl = false;
1356 bool foundStrongDefImpl = false;
1357 bool foundImpl = false;
1358 Image::ResolvedSymbolTarget aTarget;
1359 ResolvedTargetInfo aTargetInfo;
1360 STACK_ALLOC_ARRAY(const BuilderLoadedImage*, cachedDylibsUsingSymbol, 1024);
1361 for (const BuilderLoadedImage& li : _loadedImages) {
1362 // only search images with weak-defs that were not loaded with RTLD_LOCAL
1363 if ( li.loadAddress()->hasWeakDefs() && !li.rtldLocal ) {
1364 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, aTarget, aTargetInfo) ) {
1365 foundImpl = true;
1366 // with non-chained images, weak-defs first have a rebase to their local impl, and a weak-bind which allows earlier impls to override
1367 if ( !li.loadAddress()->hasChainedFixups() && (aTargetInfo.foundInDylib == fromImage.loadAddress()) )
1368 targetInfo.weakBindSameImage = true;
1369 if ( aTargetInfo.isWeakDef ) {
1370 // found a weakDef impl, if this is first found, set target to this
1371 if ( !foundWeakDefImpl && !foundStrongDefImpl ) {
1372 target = aTarget;
1373 targetInfo = aTargetInfo;
1374 }
1375 foundWeakDefImpl = true;
1376 }
1377 else {
1378 // found a non-weak impl, use this (unless early strong found)
1379 if ( !foundStrongDefImpl ) {
1380 target = aTarget;
1381 targetInfo = aTargetInfo;
1382 }
1383 foundStrongDefImpl = true;
1384 }
1385 }
1386 if ( foundImpl && !_makingDyldCacheImages && li.loadAddress()->inDyldCache() )
1387 cachedDylibsUsingSymbol.push_back(&li);
1388 }
1389 }
1390 // now that final target found, if any dylib in dyld cache uses that symbol name, redirect it to new target
1391 if ( !cachedDylibsUsingSymbol.empty() ) {
1392 for (const BuilderLoadedImage* li : cachedDylibsUsingSymbol) {
1393 Image::ResolvedSymbolTarget implInCache;
1394 ResolvedTargetInfo implInCacheInfo;
1395 if ( findSymbolInImage(li->loadAddress(), symbolName, addend, false, implInCache, implInCacheInfo) ) {
1396 if ( implInCache != target ) {
1397 // found another instance in some dylib in dyld cache, will need to patch it
1398 Closure::PatchEntry patch;
1399 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1400 patch.overriddenDylibInCache = li->imageNum;
1401 patch.replacement = target;
1402 _weakDefCacheOverrides.push_back(patch);
1403 }
1404 }
1405 }
1406 }
1407 targetInfo.weakBindCoalese = true;
1408
1409 if ( foundImpl )
1410 return true;
1411 _diag.error("symbol '%s' not found, expected to be weak-def coalesced", symbolName);
1412 }
1413 else {
1414 const BuilderLoadedImage* targetLoadedImage = nullptr;
1415 if ( (libOrdinal > 0) && (libOrdinal <= (int)fromImage.dependents.count()) ) {
1416 ImageNum childNum = fromImage.dependents[libOrdinal - 1].imageNum();
1417 if ( childNum != kMissingWeakLinkedImage ) {
1418 targetLoadedImage = &findLoadedImage(childNum);
1419 }
1420 }
1421 else if ( libOrdinal == BIND_SPECIAL_DYLIB_SELF ) {
1422 targetLoadedImage = &fromImage;
1423 }
1424 else if ( libOrdinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE ) {
1425 targetLoadedImage = &_loadedImages[_mainProgLoadIndex];
1426 }
1427 else {
1428 _diag.error("unknown special ordinal %d in %s", libOrdinal, fromImage.path());
1429 return false;
1430 }
1431
1432 if ( targetLoadedImage != nullptr ) {
1433 if ( findSymbolInImage(targetLoadedImage->loadAddress(), symbolName, addend, true, target, targetInfo) )
1434 return true;
1435 }
1436
1437 if ( weakImport ) {
1438 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1439 target.absolute.value = 0;
1440 return true;
1441 }
1442 const char* expectedInPath = targetLoadedImage ? targetLoadedImage->path() : "unknown";
1443 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, fromImage.path());
1444 if ( _launchErrorInfo != nullptr ) {
1445 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
1446 _launchErrorInfo->clientOfDylibPath = fromImage.path();
1447 _launchErrorInfo->targetDylibPath = expectedInPath;
1448 _launchErrorInfo->symbol = symbolName;
1449 }
1450 }
1451 return false;
1452 }
1453
1454
1455 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex, InitInfo initInfos[], uint32_t& initOrder, bool& hasError)
1456 {
1457 if ( initInfos[loadIndex].visited )
1458 return;
1459 initInfos[loadIndex].visited = true;
1460 initInfos[loadIndex].danglingUpward = false;
1461
1462 if (_loadedImages[loadIndex].isBadImage) {
1463 hasError = true;
1464 return;
1465 }
1466
1467 for (const Image::LinkedImage& dep : _loadedImages[loadIndex].dependents) {
1468 if ( dep.imageNum() == kMissingWeakLinkedImage )
1469 continue;
1470 ClosureBuilder::BuilderLoadedImage& depLi = findLoadedImage(dep.imageNum());
1471 uint32_t depLoadIndex = (uint32_t)_loadedImages.index(depLi);
1472 if ( dep.kind() == Image::LinkKind::upward ) {
1473 if ( !initInfos[depLoadIndex].visited )
1474 initInfos[depLoadIndex].danglingUpward = true;
1475 }
1476 else {
1477 depthFirstRecurseSetInitInfo(depLoadIndex, initInfos, initOrder, hasError);
1478 if (hasError)
1479 return;
1480 }
1481 }
1482 initInfos[loadIndex].initOrder = initOrder++;
1483 }
1484
1485 void ClosureBuilder::computeInitOrder(ImageWriter& imageWriter, uint32_t loadIndex)
1486 {
1487 // allocate array to track initializers
1488 InitInfo initInfos[_loadedImages.count()];
1489 bzero(initInfos, sizeof(initInfos));
1490
1491 // recurse all images and build initializer list from bottom up
1492 uint32_t initOrder = 1;
1493 bool hasMissingDependent = false;
1494 depthFirstRecurseSetInitInfo(loadIndex, initInfos, initOrder, hasMissingDependent);
1495 if (hasMissingDependent) {
1496 imageWriter.setInvalid();
1497 return;
1498 }
1499
1500 // any images not visited yet are are danging, force add them to end of init list
1501 for (uint32_t i=0; i < (uint32_t)_loadedImages.count(); ++i) {
1502 if ( !initInfos[i].visited && initInfos[i].danglingUpward ) {
1503 depthFirstRecurseSetInitInfo(i, initInfos, initOrder, hasMissingDependent);
1504 }
1505 }
1506
1507 if (hasMissingDependent) {
1508 imageWriter.setInvalid();
1509 return;
1510 }
1511
1512 // build array of just images with initializer
1513 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits, _loadedImages.count());
1514 uint32_t index = 0;
1515 for (const BuilderLoadedImage& li : _loadedImages) {
1516 if ( initInfos[index].visited && li.hasInits ) {
1517 indexOfImagesWithInits.push_back(index);
1518 }
1519 ++index;
1520 }
1521
1522 // bubble sort (FIXME)
1523 if ( indexOfImagesWithInits.count() > 1 ) {
1524 for (uint32_t i=0; i < indexOfImagesWithInits.count()-1; ++i) {
1525 for (uint32_t j=0; j < indexOfImagesWithInits.count()-i-1; ++j) {
1526 if ( initInfos[indexOfImagesWithInits[j]].initOrder > initInfos[indexOfImagesWithInits[j+1]].initOrder ) {
1527 uint32_t temp = indexOfImagesWithInits[j];
1528 indexOfImagesWithInits[j] = indexOfImagesWithInits[j+1];
1529 indexOfImagesWithInits[j+1] = temp;
1530 }
1531 }
1532 }
1533 }
1534
1535 // copy ImageNum of each image with initializers into array
1536 ImageNum initNums[indexOfImagesWithInits.count()];
1537 for (uint32_t i=0; i < indexOfImagesWithInits.count(); ++i) {
1538 initNums[i] = _loadedImages[indexOfImagesWithInits[i]].imageNum;
1539 }
1540
1541 // add to closure info
1542 imageWriter.setInitsOrder(initNums, (uint32_t)indexOfImagesWithInits.count());
1543 }
1544
1545 void ClosureBuilder::addCachePatchInfo(ImageWriter& imageWriter, const BuilderLoadedImage& forImage)
1546 {
1547 assert(_handlers != nullptr);
1548 _handlers->forEachExportsPatch(forImage.imageNum, ^(const CacheDylibsBindingHandlers::PatchInfo& info) {
1549 assert(info.usesCount != 0);
1550 imageWriter.addExportPatchInfo(info.exportCacheOffset, info.exportSymbolName, info.usesCount, info.usesArray);
1551 });
1552 }
1553
1554 void ClosureBuilder::addClosureInfo(LaunchClosureWriter& closureWriter)
1555 {
1556 // record which is libSystem
1557 assert(_libSystemImageNum != 0);
1558 closureWriter.setLibSystemImageNum(_libSystemImageNum);
1559
1560 // record which is libdyld
1561 assert(_libDyldImageNum != 0);
1562 Image::ResolvedSymbolTarget entryLocation;
1563 ResolvedTargetInfo entryInfo;
1564 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld318entryVectorForDyldE", 0, false, entryLocation, entryInfo) ) {
1565 const dyld3::LibDyldEntryVector* libDyldEntry = nullptr;
1566 switch ( entryLocation.image.kind ) {
1567 case Image::ResolvedSymbolTarget::kindSharedCache:
1568 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)_dyldCache + entryLocation.sharedCache.offset);
1569 break;
1570 case Image::ResolvedSymbolTarget::kindImage:
1571 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)findLoadedImage(entryLocation.image.imageNum).loadAddress() + entryLocation.image.offset);
1572 break;
1573 }
1574 if ( (libDyldEntry != nullptr) && (libDyldEntry->binaryFormatVersion == dyld3::closure::kFormatVersion) )
1575 closureWriter.setLibDyldEntry(entryLocation);
1576 else
1577 _diag.error("libdyld.dylib entry vector is incompatible");
1578 }
1579 else {
1580 _diag.error("libdyld.dylib is missing entry vector");
1581 }
1582
1583 // record which is main executable
1584 ImageNum mainProgImageNum = _loadedImages[_mainProgLoadIndex].imageNum;
1585 closureWriter.setTopImageNum(mainProgImageNum);
1586
1587 // add entry
1588 uint32_t entryOffset;
1589 bool usesCRT;
1590 if ( _loadedImages[_mainProgLoadIndex].loadAddress()->getEntry(entryOffset, usesCRT) ) {
1591 Image::ResolvedSymbolTarget location;
1592 location.image.kind = Image::ResolvedSymbolTarget::kindImage;
1593 location.image.imageNum = mainProgImageNum;
1594 location.image.offset = entryOffset;
1595 if ( usesCRT )
1596 closureWriter.setStartEntry(location);
1597 else
1598 closureWriter.setMainEntry(location);
1599 }
1600
1601 // add env vars that must match at launch time
1602 _pathOverrides.forEachEnvVar(^(const char* envVar) {
1603 closureWriter.addEnvVar(envVar);
1604 });
1605
1606 // add list of files which must be missing
1607 STACK_ALLOC_ARRAY(const char*, paths, 8192);
1608 if ( _mustBeMissingPaths != nullptr ) {
1609 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
1610 paths.push_back(aPath);
1611 });
1612 }
1613 closureWriter.setMustBeMissingFiles(paths);
1614 }
1615
1616
1617 // used at launch by dyld when kernel has already mapped main executable
1618 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo& fileInfo, bool allowInsertFailures)
1619 {
1620 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
1621 const mach_header* mainMH = (const mach_header*)fileInfo.fileContent;
1622 // set up stack based storage for all arrays
1623 BuilderLoadedImage loadImagesStorage[512];
1624 Image::LinkedImage dependenciesStorage[512*8];
1625 InterposingTuple tuplesStorage[64];
1626 Closure::PatchEntry cachePatchStorage[64];
1627 const char* weakDefNameStorage[64];
1628 _loadedImages.setInitialStorage(loadImagesStorage, 512);
1629 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
1630 _interposingTuples.setInitialStorage(tuplesStorage, 64);
1631 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
1632 _weakDefsFromChainedBinds.setInitialStorage(weakDefNameStorage, 64);
1633 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
1634
1635 const MachOAnalyzer* mainExecutable = MachOAnalyzer::validMainExecutable(_diag, mainMH, fileInfo.path, fileInfo.sliceLen, _archName, _platform);
1636 if ( mainExecutable == nullptr )
1637 return nullptr;
1638 if ( !mainExecutable->isDynamicExecutable() ) {
1639 _diag.error("not a main executable");
1640 return nullptr;
1641 }
1642 _isLaunchClosure = true;
1643
1644 // add any DYLD_INSERT_LIBRARIES
1645 _nextIndex = 0;
1646 _pathOverrides.forEachInsertedDylib(^(const char* dylibPath) {
1647 BuilderLoadedImage insertEntry;
1648 insertEntry.loadedFileInfo.path = strdup_temp(dylibPath);
1649 insertEntry.imageNum = _startImageNum + _nextIndex++;
1650 insertEntry.unmapWhenDone = true;
1651 insertEntry.contentRebased = false;
1652 insertEntry.hasInits = false;
1653 insertEntry.markNeverUnload = true;
1654 insertEntry.rtldLocal = false;
1655 insertEntry.isBadImage = false;
1656 insertEntry.overrideImageNum = 0;
1657 _loadedImages.push_back(insertEntry);
1658 });
1659 _mainProgLoadIndex = (uint32_t)_loadedImages.count();
1660
1661 // add main executable
1662 BuilderLoadedImage mainEntry;
1663 mainEntry.loadedFileInfo = fileInfo;
1664 mainEntry.imageNum = _startImageNum + _nextIndex++;
1665 mainEntry.unmapWhenDone = false;
1666 mainEntry.contentRebased = false;
1667 mainEntry.hasInits = false;
1668 mainEntry.markNeverUnload = true;
1669 mainEntry.rtldLocal = false;
1670 mainEntry.isBadImage = false;
1671 mainEntry.overrideImageNum = 0;
1672 _loadedImages.push_back(mainEntry);
1673
1674 // get mach_headers for all images needed to launch this main executable
1675 LoadedImageChain chainStart = { nullptr, _loadedImages[_mainProgLoadIndex] };
1676 recursiveLoadDependents(chainStart);
1677 if ( _diag.hasError() )
1678 return nullptr;
1679 for (uint32_t i=0; i < _mainProgLoadIndex; ++i) {
1680 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, _loadedImages[i].loadedFileInfo.path, _archName, _platform);
1681 const char* originalLoadPath = _loadedImages[i].loadedFileInfo.path;
1682 _loadedImages[i].loadedFileInfo = loadedFileInfo;
1683 if ( _loadedImages[i].loadAddress() != nullptr ) {
1684 LoadedImageChain insertChainStart = { nullptr, _loadedImages[i] };
1685 recursiveLoadDependents(insertChainStart);
1686 }
1687 if ( _diag.hasError() || (_loadedImages[i].loadAddress() == nullptr) ) {
1688 if ( !allowInsertFailures ) {
1689 if ( _diag.noError() )
1690 _diag.error("could not load inserted dylib %s", originalLoadPath);
1691 return nullptr;
1692 }
1693 _diag.clearError(); // FIXME add way to plumb back warning
1694 // remove slot for inserted image that could not loaded
1695 _loadedImages.remove(i);
1696 i -= 1;
1697 _mainProgLoadIndex -= 1;
1698 _nextIndex -= 1;
1699 // renumber images in this closure
1700 for (uint32_t j=i+1; j < _loadedImages.count(); ++j) {
1701 if ( (_loadedImages[j].imageNum >= _startImageNum) && (_loadedImages[j].imageNum <= _startImageNum+_nextIndex) )
1702 _loadedImages[j].imageNum -= 1;
1703 }
1704 }
1705 }
1706 loadDanglingUpwardLinks();
1707
1708 // only some images need to go into closure (ones from dyld cache do not)
1709 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
1710 for (BuilderLoadedImage& li : _loadedImages) {
1711 if ( li.imageNum >= _startImageNum ) {
1712 writers.push_back(ImageWriter());
1713 buildImage(writers.back(), li);
1714 if ( _diag.hasError() )
1715 return nullptr;
1716 }
1717 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
1718 _libDyldImageNum = li.imageNum;
1719 else if ( strcmp(li.path(), "/usr/lib/libSystem.B.dylib") == 0 )
1720 _libSystemImageNum = li.imageNum;
1721 }
1722
1723 // add initializer order into top level Images (may be inserted dylibs before main executable)
1724 for (uint32_t i=0; i <= _mainProgLoadIndex; ++i)
1725 computeInitOrder(writers[i], i);
1726
1727 // combine all Image objects into one ImageArray
1728 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count());
1729 for (ImageWriter& writer : writers) {
1730 imageArrayWriter.appendImage(writer.finalize());
1731 writer.deallocate();
1732 }
1733 const ImageArray* imageArray = imageArrayWriter.finalize();
1734
1735 // merge ImageArray object into LaunchClosure object
1736 __block LaunchClosureWriter closureWriter(imageArray);
1737
1738 // record shared cache info
1739 if ( _dyldCache != nullptr ) {
1740 // record cache UUID
1741 uuid_t cacheUUID;
1742 _dyldCache->getUUID(cacheUUID);
1743 closureWriter.setDyldCacheUUID(cacheUUID);
1744
1745 // record any cache patching needed because of dylib overriding cache
1746 for (const BuilderLoadedImage& li : _loadedImages) {
1747 if ( li.overrideImageNum != 0 ) {
1748 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
1749 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, cacheImage->patchableExportCount());
1750 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
1751 return (const MachOLoaded*)findDependent(mh, depIndex);
1752 };
1753 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
1754 cacheImage->forEachPatchableExport(^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
1755 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1756 Diagnostics patchDiag;
1757 Closure::PatchEntry patch;
1758 patch.overriddenDylibInCache = li.overrideImageNum;
1759 patch.exportCacheOffset = cacheOffsetOfImpl;
1760 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, foundInfo, reexportFinder) ) {
1761 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
1762 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
1763 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
1764 patch.replacement.image.offset = foundInfo.value;
1765 }
1766 else {
1767 // this means the symbol is missing in the cache override dylib, so set any uses to NULL
1768 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1769 patch.replacement.absolute.value = 0;
1770 }
1771 patches.push_back(patch);
1772 });
1773 closureWriter.addCachePatches(patches);
1774 }
1775 }
1776
1777 // handle any extra weak-def coalescing needed by chained fixups
1778 if ( !_weakDefsFromChainedBinds.empty() ) {
1779 for (const char* symbolName : _weakDefsFromChainedBinds) {
1780 Image::ResolvedSymbolTarget cacheOverrideTarget;
1781 bool haveCacheOverride = false;
1782 bool foundCachOverrideIsWeakDef = false;
1783 for (const BuilderLoadedImage& li : _loadedImages) {
1784 if ( !li.loadAddress()->hasWeakDefs() )
1785 continue;
1786 Image::ResolvedSymbolTarget target;
1787 ResolvedTargetInfo targetInfo;
1788 if ( findSymbolInImage(li.loadAddress(), symbolName, 0, false, target, targetInfo) ) {
1789 if ( li.loadAddress()->inDyldCache() ) {
1790 if ( haveCacheOverride ) {
1791 Closure::PatchEntry patch;
1792 patch.exportCacheOffset = (uint32_t)target.sharedCache.offset;
1793 patch.overriddenDylibInCache = li.imageNum;
1794 patch.replacement = cacheOverrideTarget;
1795 _weakDefCacheOverrides.push_back(patch);
1796 }
1797 else {
1798 // found first in cached dylib, so no need to patch cache for this symbol
1799 break;
1800 }
1801 }
1802 else {
1803 // found image that exports this symbol and is not in cache
1804 if ( !haveCacheOverride || (foundCachOverrideIsWeakDef && !targetInfo.isWeakDef) ) {
1805 // update cache to use this symbol if it if first found or it is first non-weak found
1806 cacheOverrideTarget = target;
1807 foundCachOverrideIsWeakDef = targetInfo.isWeakDef;
1808 haveCacheOverride = true;
1809 }
1810 }
1811 }
1812 }
1813 }
1814 }
1815
1816 // record any cache patching needed because weak-def C++ symbols override dyld cache
1817 if ( !_weakDefCacheOverrides.empty() )
1818 closureWriter.addCachePatches(_weakDefCacheOverrides);
1819
1820 }
1821
1822 #if __IPHONE_OS_VERSION_MIN_REQUIRED
1823 // if closure is built on-device for iOS, then record boot UUID
1824 char bootSessionUUID[256] = { 0 };
1825 size_t bootSize = sizeof(bootSessionUUID);
1826 if ( sysctlbyname("kern.bootsessionuuid", bootSessionUUID, &bootSize, NULL, 0) == 0 )
1827 closureWriter.setBootUUID(bootSessionUUID);
1828 #endif
1829
1830 // record any interposing info
1831 imageArray->forEachImage(^(const Image* image, bool &stop) {
1832 if ( !image->inDyldCache() )
1833 addInterposingTuples(closureWriter, image, findLoadedImage(image->imageNum()).loadAddress());
1834 });
1835
1836 // modify fixups in contained Images by applying interposing tuples
1837 closureWriter.applyInterposing();
1838
1839 // set flags
1840 closureWriter.setUsedAtPaths(_atPathUsed);
1841 closureWriter.setUsedFallbackPaths(_fallbackPathUsed);
1842 closureWriter.setInitImageCount((uint32_t)_loadedImages.count());
1843
1844 // add other closure attributes
1845 addClosureInfo(closureWriter);
1846
1847 // make result
1848 const LaunchClosure* result = closureWriter.finalize();
1849 imageArrayWriter.deallocate();
1850
1851 return result;
1852 }
1853
1854 // used by libdyld for dlopen()
1855 const DlopenClosure* ClosureBuilder::makeDlopenClosure(const char* path, const LaunchClosure* mainClosure, const Array<LoadedImage>& alreadyLoadedList,
1856 closure::ImageNum callerImageNum, bool noLoad, bool canUseSharedCacheClosure, closure::ImageNum* topImageNum)
1857 {
1858 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
1859 // set up stack based storage for all arrays
1860 BuilderLoadedImage loadImagesStorage[512];
1861 Image::LinkedImage dependenciesStorage[512*8];
1862 Closure::PatchEntry cachePatchStorage[64];
1863 _loadedImages.setInitialStorage(loadImagesStorage, 512);
1864 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
1865 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
1866 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
1867
1868 // fill in builder array from already loaded images
1869 bool cachedDylibsExpectedOnDisk = _dyldCache ? _dyldCache->header.dylibsExpectedOnDisk : true;
1870 uintptr_t callerImageIndex = UINTPTR_MAX;
1871 for (const LoadedImage& ali : alreadyLoadedList) {
1872 const Image* image = ali.image();
1873 const MachOAnalyzer* ma = (MachOAnalyzer*)(ali.loadedAddress());
1874 bool inDyldCache = ma->inDyldCache();
1875 BuilderLoadedImage entry;
1876 ImageNum overrideImageNum;
1877 entry.loadedFileInfo.path = image->path();
1878 entry.loadedFileInfo.fileContent = ma;
1879 entry.loadedFileInfo.sliceOffset = 0;
1880 entry.loadedFileInfo.inode = 0;
1881 entry.loadedFileInfo.mtime = 0;
1882 entry.imageNum = image->imageNum();
1883 entry.dependents = image->dependentsArray();
1884 entry.unmapWhenDone = false;
1885 entry.contentRebased = inDyldCache;
1886 entry.hasInits = false;
1887 entry.markNeverUnload = image->neverUnload();
1888 entry.rtldLocal = ali.hideFromFlatSearch();
1889 entry.isBadImage = false;
1890 entry.overrideImageNum = 0;
1891 if ( !inDyldCache && image->isOverrideOfDyldCacheImage(overrideImageNum) ) {
1892 entry.overrideImageNum = overrideImageNum;
1893 canUseSharedCacheClosure = false;
1894 }
1895 if ( !inDyldCache || cachedDylibsExpectedOnDisk )
1896 image->hasFileModTimeAndInode(entry.loadedFileInfo.inode, entry.loadedFileInfo.mtime);
1897 if ( entry.imageNum == callerImageNum )
1898 callerImageIndex = _loadedImages.count();
1899 _loadedImages.push_back(entry);
1900 }
1901 _alreadyInitedIndex = (uint32_t)_loadedImages.count();
1902
1903 // find main executable (may be needed for @executable_path)
1904 _isLaunchClosure = false;
1905 for (uint32_t i=0; i < alreadyLoadedList.count(); ++i) {
1906 if ( _loadedImages[i].loadAddress()->isMainExecutable() ) {
1907 _mainProgLoadIndex = i;
1908 break;
1909 }
1910 }
1911
1912 // add top level dylib being dlopen()ed
1913 BuilderLoadedImage* foundTopImage;
1914 _nextIndex = 0;
1915 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
1916 BuilderLoadedImage& callerImage = (callerImageIndex != UINTPTR_MAX) ? _loadedImages[callerImageIndex] : _loadedImages[_mainProgLoadIndex];
1917 LoadedImageChain chainCaller = { nullptr, callerImage };
1918 LoadedImageChain chainMain = { &chainCaller, _loadedImages[_mainProgLoadIndex] };
1919 if ( !findImage(path, chainMain, foundTopImage, false, canUseSharedCacheClosure) ) {
1920 // If we didn't find the image, but its a shared cache path, then try again with realpath.
1921 if ( (strncmp(path, "/usr/lib/", 9) == 0) || (strncmp(path, "/System/Library/", 16) == 0) ) {
1922 char resolvedPath[PATH_MAX];
1923 if ( _fileSystem.getRealPath(path, resolvedPath) ) {
1924 if ( !findImage(resolvedPath, chainMain, foundTopImage, false, canUseSharedCacheClosure) ) {
1925 return nullptr;
1926 }
1927 } else {
1928 // We didn't find a new path from realpath
1929 return nullptr;
1930 }
1931 } else {
1932 // Not in /usr/lib/ or /System/Library/
1933 return nullptr;
1934 }
1935 }
1936
1937 // exit early in RTLD_NOLOAD mode
1938 if ( noLoad ) {
1939 // if no new images added to _loadedImages, then requested path was already loaded
1940 if ( (uint32_t)_loadedImages.count() == _alreadyInitedIndex )
1941 *topImageNum = foundTopImage->imageNum;
1942 else
1943 *topImageNum = 0;
1944 return nullptr;
1945 }
1946
1947 // fast path if roots are not allowed and target is in dyld cache or is other
1948 if ( (_dyldCache != nullptr) && (_dyldCache->header.cacheType == kDyldSharedCacheTypeProduction) ) {
1949 if ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) {
1950 *topImageNum = foundTopImage->imageNum;
1951 return nullptr;
1952 }
1953 }
1954
1955 // recursive load dependents
1956 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
1957 LoadedImageChain chainTopDylib = { &chainMain, *foundTopImage };
1958 recursiveLoadDependents(chainTopDylib);
1959 if ( _diag.hasError() )
1960 return nullptr;
1961 loadDanglingUpwardLinks();
1962
1963 // only some images need to go into closure (ones from dyld cache do not)
1964 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
1965 for (BuilderLoadedImage& li : _loadedImages) {
1966 if ( li.imageNum >= _startImageNum ) {
1967 writers.push_back(ImageWriter());
1968 buildImage(writers.back(), li);
1969 }
1970 }
1971
1972 // check if top image loaded is in shared cache along with everything it depends on
1973 *topImageNum = foundTopImage->imageNum;
1974 if ( writers.count() == 0 ) {
1975 return nullptr;
1976 } else if ( canUseSharedCacheClosure && ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) ) {
1977 // We used a shared cache built closure, but now discovered roots. We need to try again
1978 topImageNum = 0;
1979 return sRetryDlopenClosure;
1980 }
1981
1982 // add initializer order into top level Image
1983 computeInitOrder(writers[0], (uint32_t)alreadyLoadedList.count());
1984
1985 // combine all Image objects into one ImageArray
1986 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count());
1987 for (ImageWriter& writer : writers) {
1988 imageArrayWriter.appendImage(writer.finalize());
1989 writer.deallocate();
1990 }
1991 const ImageArray* imageArray = imageArrayWriter.finalize();
1992
1993 // merge ImageArray object into LaunchClosure object
1994 DlopenClosureWriter closureWriter(imageArray);
1995
1996 // add other closure attributes
1997 closureWriter.setTopImageNum(foundTopImage->imageNum);
1998
1999 // record any cache patching needed because of dylib overriding cache
2000 if ( _dyldCache != nullptr ) {
2001 for (const BuilderLoadedImage& li : _loadedImages) {
2002 if ( (li.overrideImageNum != 0) && (li.imageNum >= _startImageNum) ) {
2003 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
2004 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, cacheImage->patchableExportCount());
2005 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
2006 return (const MachOLoaded*)findDependent(mh, depIndex);
2007 };
2008 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
2009 cacheImage->forEachPatchableExport(^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
2010 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
2011 Diagnostics patchDiag;
2012 Closure::PatchEntry patch;
2013 patch.overriddenDylibInCache = li.overrideImageNum;
2014 patch.exportCacheOffset = cacheOffsetOfImpl;
2015 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, foundInfo, reexportFinder) ) {
2016 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
2017 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
2018 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
2019 patch.replacement.image.offset = foundInfo.value;
2020 }
2021 else {
2022 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
2023 patch.replacement.absolute.value = 0;
2024 }
2025 patches.push_back(patch);
2026 });
2027 closureWriter.addCachePatches(patches);
2028 }
2029 }
2030 }
2031
2032 // Dlopen's should never keep track of missing paths as we don't cache these closures.
2033 assert(_mustBeMissingPaths == nullptr);
2034
2035 // make final DlopenClosure object
2036 const DlopenClosure* result = closureWriter.finalize();
2037 imageArrayWriter.deallocate();
2038 return result;
2039 }
2040
2041
2042 // used by dyld_closure_util
2043 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const char* mainPath, bool allowInsertFailures)
2044 {
2045 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, mainPath, _archName, _platform);
2046 const MachOAnalyzer* mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
2047 loadedFileInfo.path = mainPath;
2048 if (_diag.hasError())
2049 return nullptr;
2050 if (mh == nullptr) {
2051 _diag.error("could not load file");
2052 return nullptr;
2053 }
2054 if (!mh->isDynamicExecutable()) {
2055 _diag.error("file is not an executable");
2056 return nullptr;
2057 }
2058 const_cast<PathOverrides*>(&_pathOverrides)->setMainExecutable(mh, mainPath);
2059 const LaunchClosure* launchClosure = makeLaunchClosure(loadedFileInfo, allowInsertFailures);
2060 loadedFileInfo.unload(loadedFileInfo);
2061 return launchClosure;
2062 }
2063
2064
2065 // used by dyld shared cache builder
2066 const ImageArray* ClosureBuilder::makeDyldCacheImageArray(bool customerCache, const Array<CachedDylibInfo>& dylibs, const Array<CachedDylibAlias>& aliases)
2067 {
2068 // because this is run in cache builder using dispatch_apply() there is minimal stack space
2069 // so set up storage for all arrays to be vm_allocated
2070 uintptr_t maxImageCount = dylibs.count() + 16;
2071 _loadedImages.reserve(maxImageCount);
2072 _dependencies.reserve(maxImageCount*16);
2073
2074 _makingDyldCacheImages = true;
2075 _makingCustomerCache = customerCache;
2076 _aliases = &aliases;
2077
2078 // build _loadedImages[] with every dylib in cache
2079 __block ImageNum imageNum = _startImageNum;
2080 for (const CachedDylibInfo& aDylibInfo : dylibs) {
2081 BuilderLoadedImage entry;
2082 entry.loadedFileInfo = aDylibInfo.fileInfo;
2083 entry.imageNum = imageNum++;
2084 entry.unmapWhenDone = false;
2085 entry.contentRebased = false;
2086 entry.hasInits = false;
2087 entry.markNeverUnload = true;
2088 entry.rtldLocal = false;
2089 entry.isBadImage = false;
2090 entry.overrideImageNum = 0;
2091 _loadedImages.push_back(entry);
2092 }
2093
2094 // wire up dependencies between cached dylibs
2095 for (BuilderLoadedImage& li : _loadedImages) {
2096 LoadedImageChain chainStart = { nullptr, li };
2097 recursiveLoadDependents(chainStart);
2098 if ( _diag.hasError() )
2099 break;
2100 }
2101 assert(_loadedImages.count() == dylibs.count());
2102
2103 // create an ImageWriter for each cached dylib
2104 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
2105 for (BuilderLoadedImage& li : _loadedImages) {
2106 writers.push_back(ImageWriter());
2107 buildImage(writers.back(), li);
2108 }
2109
2110 // add initializer order into each dylib
2111 for (const BuilderLoadedImage& li : _loadedImages) {
2112 uint32_t index = li.imageNum - _startImageNum;
2113 computeInitOrder(writers[index], index);
2114 }
2115
2116 // add exports patch info for each dylib
2117 for (const BuilderLoadedImage& li : _loadedImages) {
2118 uint32_t index = li.imageNum - _startImageNum;
2119 addCachePatchInfo(writers[index], li);
2120 }
2121
2122 // combine all Image objects into one ImageArray
2123 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count());
2124 for (ImageWriter& writer : writers) {
2125 imageArrayWriter.appendImage(writer.finalize());
2126 writer.deallocate();
2127 }
2128 const ImageArray* imageArray = imageArrayWriter.finalize();
2129
2130 return imageArray;
2131 }
2132
2133
2134 #if BUILDING_CACHE_BUILDER
2135 const ImageArray* ClosureBuilder::makeOtherDylibsImageArray(const Array<LoadedFileInfo>& otherDylibs, uint32_t cachedDylibsCount)
2136 {
2137 // because this is run in cache builder using dispatch_apply() there is minimal stack space
2138 // so set up storage for all arrays to be vm_allocated
2139 uintptr_t maxImageCount = otherDylibs.count() + cachedDylibsCount + 128;
2140 _loadedImages.reserve(maxImageCount);
2141 _dependencies.reserve(maxImageCount*16);
2142
2143 // build _loadedImages[] with every dylib in cache, followed by others
2144 _nextIndex = 0;
2145 for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
2146 BuilderLoadedImage entry;
2147 entry.loadedFileInfo = aDylibInfo;
2148 entry.imageNum = _startImageNum + _nextIndex++;
2149 entry.unmapWhenDone = false;
2150 entry.contentRebased = false;
2151 entry.hasInits = false;
2152 entry.markNeverUnload = false;
2153 entry.rtldLocal = false;
2154 entry.isBadImage = false;
2155 entry.overrideImageNum = 0;
2156 _loadedImages.push_back(entry);
2157 }
2158
2159 // wire up dependencies between cached dylibs
2160 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
2161 // to check the count on each iteration.
2162 for (uint64_t index = 0; index != _loadedImages.count(); ++index) {
2163 BuilderLoadedImage& li = _loadedImages[index];
2164 LoadedImageChain chainStart = { nullptr, li };
2165 recursiveLoadDependents(chainStart);
2166 if ( _diag.hasError() ) {
2167 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
2168 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
2169 _diag.clearError();
2170 li.isBadImage = true; // mark bad
2171 }
2172 }
2173
2174 auto invalidateBadImages = [&]() {
2175 // Invalidate images with bad dependencies
2176 while (true) {
2177 bool madeChange = false;
2178 for (BuilderLoadedImage& li : _loadedImages) {
2179 if (li.isBadImage) {
2180 // Already invalidated
2181 continue;
2182 }
2183 for (Image::LinkedImage depIndex : li.dependents) {
2184 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
2185 continue;
2186 if ( depIndex.imageNum() < dyld3::closure::kLastDyldCacheImageNum )
2187 continue;
2188 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
2189 if (depImage.isBadImage) {
2190 _diag.warning("while building dlopen closure for %s: dependent dylib had error", li.loadedFileInfo.path);
2191 li.isBadImage = true; // mark bad
2192 madeChange = true;
2193 }
2194 }
2195 }
2196 if (!madeChange)
2197 break;
2198 }
2199 };
2200
2201 invalidateBadImages();
2202
2203 // create an ImageWriter for each cached dylib
2204 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
2205 for (BuilderLoadedImage& li : _loadedImages) {
2206 if ( li.imageNum == 0 ) {
2207 writers.push_back(ImageWriter());
2208 writers.back().setInvalid();
2209 continue;
2210 }
2211 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
2212 continue;
2213 writers.push_back(ImageWriter());
2214 buildImage(writers.back(), li);
2215 if ( _diag.hasError() ) {
2216 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
2217 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
2218 _diag.clearError();
2219 li.isBadImage = true; // mark bad
2220 writers.back().setInvalid();
2221 }
2222 }
2223
2224 invalidateBadImages();
2225
2226 // add initializer order into each dylib
2227 for (const BuilderLoadedImage& li : _loadedImages) {
2228 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
2229 continue;
2230 if (li.isBadImage)
2231 continue;
2232 uint32_t index = li.imageNum - _startImageNum;
2233 computeInitOrder(writers[index], index);
2234 }
2235
2236 // combine all Image objects into one ImageArray
2237 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count());
2238 for (ImageWriter& writer : writers) {
2239 imageArrayWriter.appendImage(writer.finalize());
2240 writer.deallocate();
2241 }
2242 const ImageArray* imageArray = imageArrayWriter.finalize();
2243
2244 return imageArray;
2245 }
2246 #endif
2247
2248
2249 bool ClosureBuilder::inLoadedImageArray(const Array<LoadedImage>& loadedList, ImageNum imageNum)
2250 {
2251 for (const LoadedImage& ali : loadedList) {
2252 if ( ali.image()->representsImageNum(imageNum) )
2253 return true;
2254 }
2255 return false;
2256 }
2257
2258 void ClosureBuilder::buildLoadOrderRecurse(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Image* image)
2259 {
2260 // breadth first load
2261 STACK_ALLOC_ARRAY(const Image*, needToRecurse, 256);
2262 image->forEachDependentImage(^(uint32_t dependentIndex, dyld3::closure::Image::LinkKind kind, ImageNum depImageNum, bool &stop) {
2263 if ( !inLoadedImageArray(loadedList, depImageNum) ) {
2264 const Image* depImage = ImageArray::findImage(imagesArrays, depImageNum);
2265 loadedList.push_back(LoadedImage::make(depImage));
2266 needToRecurse.push_back(depImage);
2267 }
2268 });
2269
2270 // recurse load
2271 for (const Image* img : needToRecurse) {
2272 buildLoadOrderRecurse(loadedList, imagesArrays, img);
2273 }
2274 }
2275
2276 void ClosureBuilder::buildLoadOrder(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Closure* toAdd)
2277 {
2278 const dyld3::closure::Image* topImage = ImageArray::findImage(imagesArrays, toAdd->topImage());
2279 loadedList.push_back(LoadedImage::make(topImage));
2280 buildLoadOrderRecurse(loadedList, imagesArrays, topImage);
2281 }
2282
2283
2284
2285 } // namespace closure
2286 } // namespace dyld3