]> git.saurik.com Git - apple/dyld.git/blob - dyld3/ClosureBuilder.cpp
dyld-750.5.tar.gz
[apple/dyld.git] / dyld3 / ClosureBuilder.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/errno.h>
27 #include <sys/mman.h>
28 #include <sys/mman.h>
29 #include <sys/param.h>
30 #include <ext/__hash>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
36
37 #include <mach-o/dyld_priv.h>
38
39 #include "ClosureWriter.h"
40 #include "ClosureBuilder.h"
41 #include "MachOAnalyzer.h"
42 #include "libdyldEntryVector.h"
43 #include "Tracing.h"
44
45 #define CLOSURE_SELOPT_WRITE
46 #include "objc-shared-cache.h"
47
48 namespace dyld3 {
49 namespace closure {
50
51
52 const DlopenClosure* ClosureBuilder::sRetryDlopenClosure = (const DlopenClosure*)(-1);
53
54 ClosureBuilder::ClosureBuilder(uint32_t startImageNum, const FileSystem& fileSystem, const DyldSharedCache* dyldCache, bool dyldCacheIsLive,
55 const GradedArchs& archs, const PathOverrides& pathOverrides, AtPath atPathHandling, bool allowRelativePaths,
56 LaunchErrorInfo* errorInfo, Platform platform, const CacheDylibsBindingHandlers* handlers)
57 : _fileSystem(fileSystem), _dyldCache(dyldCache), _pathOverrides(pathOverrides), _archs(archs), _platform(platform), _startImageNum(startImageNum),
58 _handlers(handlers), _atPathHandling(atPathHandling), _launchErrorInfo(errorInfo), _dyldCacheIsLive(dyldCacheIsLive), _allowRelativePaths(allowRelativePaths)
59 {
60 if ( dyldCache != nullptr ) {
61 _dyldImageArray = dyldCache->cachedDylibsImageArray();
62 if ( (dyldCache->header.otherImageArrayAddr != 0) && (dyldCache->header.progClosuresSize == 0) )
63 _makingClosuresInCache = true;
64 }
65 }
66
67
68 ClosureBuilder::~ClosureBuilder() {
69 if ( _tempPaths != nullptr )
70 PathPool::deallocate(_tempPaths);
71 if ( _mustBeMissingPaths != nullptr )
72 PathPool::deallocate(_mustBeMissingPaths);
73 if ( _objcDuplicateClassWarnings != nullptr )
74 PathPool::deallocate(_objcDuplicateClassWarnings);
75 }
76
77 bool ClosureBuilder::findImage(const char* loadPath, const LoadedImageChain& forImageChain, BuilderLoadedImage*& foundImage, LinkageType linkageType,
78 uint32_t compatVersion, bool canUseSharedCacheClosure)
79 {
80 // There shouldn't be an error here as the callers should stop trying to find more images if they get an error for an image
81 _diag.assertNoError();
82
83 __block bool result = false;
84
85 // record if this is a non-overridable path
86 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
87 bool dylibsExpectedOnDisk = true;
88 if ( _dyldCache != nullptr ) {
89 pathIsInDyldCacheWhichCannotBeOverridden = _dyldCache->hasNonOverridablePath(loadPath);
90 dylibsExpectedOnDisk = _dyldCache->header.dylibsExpectedOnDisk;
91 }
92
93 _pathOverrides.forEachPathVariant(loadPath, pathIsInDyldCacheWhichCannotBeOverridden, ^(const char* possibleVariantPath, bool isFallbackPath, bool& stopPathVariant) {
94
95 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
96 bool isRPath = (strncmp(possibleVariantPath, "@rpath/", 7) == 0);
97
98 // passing a leaf name to dlopen() allows rpath searching for it
99 // FIXME: Does this apply to DYLD_INSERT_LIBRARIES too?
100 bool implictRPath = (linkageType == LinkageType::kDynamic) && (loadPath[0] != '/') && (loadPath == possibleVariantPath) && (_atPathHandling != AtPath::none);
101
102 // expand @ paths
103 forEachResolvedPathVar(possibleVariantPath, forImageChain, implictRPath, linkageType,
104 ^(const char* possiblePath, bool& stop) {
105 if ( possibleVariantPath != possiblePath )
106 _atPathUsed = true;
107
108 // look at already loaded images
109 const char* leafName = strrchr(possiblePath, '/');
110 for (BuilderLoadedImage& li: _loadedImages) {
111 if ( strcmp(li.path(), possiblePath) == 0 ) {
112 foundImage = &li;
113 result = true;
114 stop = true;
115 return;
116 }
117 else if ( isRPath ) {
118 // Special case @rpath/ because name in li.fileInfo.path is full path.
119 // Getting installName is expensive, so first see if an already loaded image
120 // has same leaf name and if so see if its installName matches request @rpath
121 if (const char* aLeaf = strrchr(li.path(), '/')) {
122 if ( strcmp(aLeaf, leafName) == 0 ) {
123 if ( li.loadAddress()->isDylib() && (strcmp(loadPath, li.loadAddress()->installName()) == 0) ) {
124 foundImage = &li;
125 result = true;
126 stop = true;
127 return;
128 }
129 }
130 }
131 }
132 }
133
134 // look to see if image already loaded via a different symlink
135 bool fileFound = false;
136 uint64_t fileFoundINode = 0;
137 uint64_t fileFoundMTime = 0;
138 bool inodesMatchRuntime = false;
139 // Note, we only do this check if we even expect to find this on-disk
140 // We can also use the pathIsInDyldCacheWhichCannotBeOverridden result if we are still trying the same path
141 // it was computed from
142 if ( dylibsExpectedOnDisk || !pathIsInDyldCacheWhichCannotBeOverridden || (loadPath != possiblePath) ) {
143 if ( _fileSystem.fileExists(possiblePath, &fileFoundINode, &fileFoundMTime, nullptr, &inodesMatchRuntime) ) {
144 fileFound = true;
145 for (BuilderLoadedImage& li: _loadedImages) {
146 if ( (li.loadedFileInfo.inode == fileFoundINode) && (li.loadedFileInfo.mtime == fileFoundMTime) ) {
147 foundImage = &li;
148 result = true;
149 stop = true;
150 return;
151 }
152 }
153 }
154 }
155
156 bool unmapWhenDone = false;
157 bool contentRebased = false;
158 bool hasInits = false;
159 bool markNeverUnload = false;
160 bool mustBuildClosure = _dyldCacheInvalidFormatVersion;
161 ImageNum overrideImageNum = 0;
162 ImageNum foundImageNum = 0;
163 const MachOAnalyzer* mh = nullptr;
164 const char* filePath = nullptr;
165 LoadedFileInfo loadedFileInfo;
166
167 // look in dyld cache
168 filePath = possiblePath;
169 char realPath[MAXPATHLEN];
170 if ( _dyldImageArray != nullptr ) {
171 uint32_t dyldCacheImageIndex;
172 bool foundInCache = _dyldCache->hasImagePath(possiblePath, dyldCacheImageIndex);
173 if ( !foundInCache && fileFound ) {
174 // see if this is an OS dylib/bundle with a pre-built dlopen closure
175 // We can only use the pre-built closure if we are dynamic linkage (a dlopen) and
176 // there are no roots
177 if ( canUseSharedCacheClosure && (linkageType == LinkageType::kDynamic) ) {
178 if (const dyld3::closure::Image* otherImage = _dyldCache->findDlopenOtherImage(possiblePath) ) {
179 uint64_t expectedInode;
180 uint64_t expectedModTime;
181 if ( !otherImage->isInvalid() ) {
182 bool hasInodeInfo = otherImage->hasFileModTimeAndInode(expectedInode, expectedModTime);
183 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
184 if ( !hasInodeInfo || ((expectedInode == fileFoundINode) && (expectedModTime == fileFoundMTime)) ) {
185 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, possiblePath, _archs, _platform, realPath);
186 if ( _diag.noError() ) {
187 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
188 foundImageNum = otherImage->imageNum();
189 unmapWhenDone = true;
190 contentRebased = false;
191 hasInits = otherImage->hasInitializers() || otherImage->mayHavePlusLoads();
192 // Use the realpath in the case where we loaded a symlink
193 // The closure must have recordered an alias path
194 if (realPath[0] != '\0')
195 filePath = realPath;
196 }
197 }
198 }
199 }
200 }
201 // if not found in cache, may be a symlink to something in cache
202 if ( mh == nullptr ) {
203 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
204 foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
205 if ( foundInCache ) {
206 filePath = realPath;
207 #if BUILDING_LIBDYLD
208 // handle case where OS dylib was updated after this process launched
209 if ( foundInCache ) {
210 for (BuilderLoadedImage& li: _loadedImages) {
211 if ( strcmp(li.path(), realPath) == 0 ) {
212 foundImage = &li;
213 result = true;
214 stop = true;
215 return;
216 }
217 }
218 }
219 #endif
220 }
221 }
222 }
223 }
224
225 // if using a cached dylib, look to see if there is an override
226 if ( foundInCache ) {
227 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
228 bool useCache = true;
229 markNeverUnload = true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
230 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
231 if ( image->overridableDylib() ) {
232 if ( fileFound ) {
233 uint64_t expectedInode;
234 uint64_t expectedModTime;
235 if ( image->hasFileModTimeAndInode(expectedInode, expectedModTime) ) {
236 // macOS where dylibs remain on disk. only use cache if mtime and inode have not changed
237 useCache = ( (fileFoundINode == expectedInode) && (fileFoundMTime == expectedModTime) );
238 }
239 else if ( _makingClosuresInCache ) {
240 // during iOS cache build, don't look at files on disk, use ones in cache
241 useCache = true;
242 }
243 else {
244 // iOS internal build. Any disk on cache overrides cache
245 useCache = false;
246 }
247 }
248 if ( !useCache ) {
249 overrideImageNum = dyldCacheImageNum;
250 _foundDyldCacheRoots = true;
251 }
252 }
253 if ( useCache ) {
254 foundImageNum = dyldCacheImageNum;
255 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(foundImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
256 unmapWhenDone = false;
257 // if we are building ImageArray in dyld cache, content is not rebased
258 contentRebased = !_makingDyldCacheImages && _dyldCacheIsLive;
259 hasInits = image->hasInitializers() || image->mayHavePlusLoads();
260 // If the cache format is different from dyld/libdyld then we can't use this closure.
261 if ( (_dyldCache->header.formatVersion != dyld3::closure::kFormatVersion) || !canUseSharedCacheClosure ) {
262 mustBuildClosure = true;
263 _foundDyldCacheRoots = true;
264 }
265 }
266 }
267 }
268
269 // If we are building the cache, and don't find an image, then it might be weak so just return
270 if (_makingDyldCacheImages) {
271 addMustBeMissingPath(possiblePath);
272 return;
273 }
274
275 // if not found yet, mmap file
276 if ( mh == nullptr ) {
277 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, filePath, _archs, _platform, realPath);
278 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
279 if ( mh == nullptr ) {
280 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
281 if (_isLaunchClosure) {
282 // If we found the file then we want to skip it as its not a valid macho for this platform/arch
283 // We can't record skipped file mtime/inode for caches built on a different machine that it runs on.
284 // In that case, we expect the file to be mastered out, as otherwise we couldn't detect if its
285 // changed or not on the device side
286 if (fileFound && inodesMatchRuntime) {
287 addSkippedFile(possiblePath, fileFoundINode, fileFoundMTime);
288 } else {
289 addMustBeMissingPath(possiblePath);
290 }
291 }
292 return;
293 }
294 if ( linkageType != LinkageType::kDynamic ) {
295 // LC_LOAD_DYLIB can only link with dylibs, and DYLD_INSERT_LIBRARIES can only be dylibs
296 if ( !mh->isDylib() ) {
297 _diag.error("found '%s' which is not a dylib. Needed by '%s'", filePath, forImageChain.image.path());
298 return;
299 }
300 // verify this is compatable dylib version
301 const char* installName;
302 uint32_t foundCompatVers;
303 uint32_t foundCurrentVers;
304 mh->getDylibInstallName(&installName, &foundCompatVers, &foundCurrentVers);
305 if ( (foundCompatVers < compatVersion) && mh->enforceCompatVersion() ) {
306 char foundStr[32];
307 char requiredStr[32];
308 MachOFile::packedVersionToString(foundCompatVers, foundStr);
309 MachOFile::packedVersionToString(compatVersion, requiredStr);
310 _diag.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
311 filePath, foundStr, requiredStr, forImageChain.image.path());
312 return;
313 }
314 }
315 else if ( mh->isMainExecutable() ) {
316 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
317 if ( !mh->isPIE() || !mh->isDynamicExecutable() ) {
318 _diag.error("not PIE");
319 return;
320 }
321 }
322 // Use the realpath in the case where we loaded a symlink
323 // The closure must have recordered an alias path
324 if (realPath[0] != '\0')
325 filePath = realPath;
326 foundImageNum = _startImageNum + _nextIndex++;
327 _foundNonCachedImage = true;
328 mustBuildClosure = true;
329 unmapWhenDone = true;
330 } else {
331 loadedFileInfo.fileContent = mh;
332 }
333
334 // if path is not original path, or its an inserted path (as forEachInColonList uses a stack temporary)
335 if ( (filePath != loadPath) || (linkageType == LinkageType::kInserted) ) {
336 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
337 filePath = strdup_temp(filePath);
338 // check if this overrides what would have been found in cache
339 // This is the case where we didn't find the image with the path in the shared cache, perhaps as it used library paths
340 // but the path we requested had pointed in to the cache
341 // FIXME: What if load path is via an @rpath and we will override the cache?
342 if ( overrideImageNum == 0 ) {
343 if ( _dyldImageArray != nullptr ) {
344 uint32_t dyldCacheImageIndex;
345 if ( _dyldCache->hasImagePath(loadPath, dyldCacheImageIndex) ) {
346 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
347 if ( possibleOverrideNum != foundImageNum )
348 overrideImageNum = possibleOverrideNum;
349 }
350 }
351 }
352 }
353
354 if ( !markNeverUnload ) {
355 switch (linkageType) {
356 case LinkageType::kStatic:
357 // Static linkages can only be unloaded if the image loading us can be unloaded
358 markNeverUnload = forImageChain.image.markNeverUnload;
359 break;
360 case LinkageType::kDynamic:
361 markNeverUnload = false;
362 break;
363 case LinkageType::kInserted:
364 // Inserted libraries must never be unloaded
365 markNeverUnload = true;
366 break;
367 };
368 }
369
370 if ( !markNeverUnload ) {
371 // If the parent didn't force us to be never unload, other conditions still may
372 if ( mh->hasThreadLocalVariables() ) {
373 markNeverUnload = true;
374 } else if ( mh->hasObjC() && mh->isDylib() ) {
375 markNeverUnload = true;
376 } else {
377 // record if image has DOF sections
378 __block bool hasDOFs = false;
379 mh->forEachDOFSection(_diag, ^(uint32_t offset) {
380 hasDOFs = true;
381 });
382 if ( hasDOFs )
383 markNeverUnload = true;
384 }
385 }
386
387 // Set the path again just in case it was strdup'ed.
388 loadedFileInfo.path = filePath;
389
390 // add new entry
391 BuilderLoadedImage entry;
392 entry.loadedFileInfo = loadedFileInfo;
393 entry.imageNum = foundImageNum;
394 entry.unmapWhenDone = unmapWhenDone;
395 entry.contentRebased = contentRebased;
396 entry.hasInits = hasInits;
397 entry.markNeverUnload = markNeverUnload;
398 entry.rtldLocal = false;
399 entry.isBadImage = false;
400 entry.mustBuildClosure = mustBuildClosure;
401 entry.hasMissingWeakImports = false;
402 entry.overrideImageNum = overrideImageNum;
403 _loadedImages.push_back(entry);
404 foundImage = &_loadedImages.back();
405 if ( isFallbackPath )
406 _fallbackPathUsed = true;
407 stop = true;
408 result = true;
409 });
410 if (result)
411 stopPathVariant = true;
412 }, _platform);
413
414 // If we found a file, but also had an error, then we must have logged a diagnostic for a file we couldn't use.
415 // Clear that for now.
416 // FIXME: Surface this to the user in case they wanted to see the error
417 if (result && _diag.hasError())
418 _diag.clearError();
419
420 return result;
421 }
422
423 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath, bool fromLCRPATH, const BuilderLoadedImage& loadedImage, char fixedPath[])
424 {
425 switch ( _atPathHandling ) {
426 case AtPath::none:
427 return false;
428 case AtPath::onlyInRPaths:
429 if ( !fromLCRPATH ) {
430 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
431 if ( _isLaunchClosure )
432 return false;
433 }
434 break;
435 case AtPath::all:
436 break;
437 }
438 if ( strncmp(loadPath, "@loader_path/", 13) == 0 ) {
439 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
440 char* lastSlash = strrchr(fixedPath, '/');
441 if ( lastSlash != nullptr ) {
442 strcpy(lastSlash+1, &loadPath[13]);
443 return true;
444 }
445 }
446 else if ( fromLCRPATH && (strcmp(loadPath, "@loader_path") == 0) ) {
447 // <rdar://problem/52881387> in LC_RPATH allow "@loader_path" without trailing slash
448 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
449 char* lastSlash = strrchr(fixedPath, '/');
450 if ( lastSlash != nullptr ) {
451 lastSlash[1] = '\0';
452 return true;
453 }
454 }
455
456 return false;
457 }
458
459 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath, bool fromLCRPATH, char fixedPath[])
460 {
461 switch ( _atPathHandling ) {
462 case AtPath::none:
463 return false;
464 case AtPath::onlyInRPaths:
465 if ( !fromLCRPATH )
466 return false;
467 break;
468 case AtPath::all:
469 break;
470 }
471
472 if ( strncmp(loadPath, "@executable_path/", 17) == 0 ) {
473 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
474 char* lastSlash = strrchr(fixedPath, '/');
475 if ( lastSlash != nullptr ) {
476 strcpy(lastSlash+1, &loadPath[17]);
477 return true;
478 }
479 }
480 else if ( fromLCRPATH && (strcmp(loadPath, "@executable_path") == 0) ) {
481 // <rdar://problem/52881387> in LC_RPATH allow "@executable_path" without trailing slash
482 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
483 char* lastSlash = strrchr(fixedPath, '/');
484 if ( lastSlash != nullptr ) {
485 lastSlash[1] = '\0';
486 return true;
487 }
488 }
489
490 return false;
491 }
492
493 void ClosureBuilder::forEachResolvedPathVar(const char* loadPath, const LoadedImageChain& forImageChain,
494 bool implictRPath, LinkageType linkageType,
495 void (^handler)(const char* possiblePath, bool& stop))
496 {
497 // don't expand @loader_path or @executable_path if disallowed
498 if ( (_atPathHandling == AtPath::none) && (loadPath[0] == '@') && (loadPath[1] != 'r') ) {
499 bool stop = false;
500 handler(loadPath, stop);
501 return;
502 }
503
504 // quick out if not @ path or not implicit rpath
505 if ( !implictRPath && (loadPath[0] != '@') ) {
506 bool stop = false;
507 handler(loadPath, stop);
508 return;
509 }
510
511 // expand @loader_path
512 // Note this isn't supported for DYLD_INSERT_LIBRARIES
513 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath, PATH_MAX); // read as: char tempPath[PATH_MAX];
514 if ( (linkageType != LinkageType::kInserted) && expandAtLoaderPath(loadPath, false, forImageChain.image, tempPath) ) {
515 bool stop = false;
516 handler(tempPath, stop);
517 return;
518 }
519
520 // expand @executable_path
521 // Note this is supported for DYLD_INSERT_LIBRARIES
522 if ( expandAtExecutablePath(loadPath, false, tempPath) ) {
523 bool stop = false;
524 handler(tempPath, stop);
525 return;
526 }
527
528 // expand @rpath
529 // Note this isn't supported for DYLD_INSERT_LIBRARIES
530 const char* rpathTail = nullptr;
531 char implicitRpathBuffer[PATH_MAX];
532 if ( linkageType != LinkageType::kInserted ) {
533 if ( strncmp(loadPath, "@rpath/", 7) == 0 ) {
534 // note: rpathTail starts with '/'
535 rpathTail = &loadPath[6];
536 }
537 else if ( implictRPath ) {
538 // make rpathTail starts with '/'
539 strlcpy(implicitRpathBuffer, "/", PATH_MAX);
540 strlcat(implicitRpathBuffer, loadPath, PATH_MAX);
541 rpathTail = implicitRpathBuffer;
542 }
543 }
544 if ( rpathTail != nullptr ) {
545 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
546 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
547 __block bool done = false;
548 for (const LoadedImageChain* link = &forImageChain; (link != nullptr) && !done; link = link->previous) {
549 link->image.loadAddress()->forEachRPath(^(const char* rPath, bool& stop) {
550 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.loadedFileInfo.path);
551 if ( expandAtLoaderPath(rPath, true, link->image, tempPath) || expandAtExecutablePath(rPath, true, tempPath) ) {
552 // @loader_path allowed and expended
553 strlcat(tempPath, rpathTail, PATH_MAX);
554 handler(tempPath, stop);
555 }
556 else if ( rPath[0] == '/' ) {
557 // LC_RPATH is an absolute path, not blocked by AtPath::none
558 strlcpy(tempPath, rPath, PATH_MAX);
559 strlcat(tempPath, rpathTail, PATH_MAX);
560 handler(tempPath, stop);
561 }
562 if (stop)
563 done = true;
564 #if 0
565 if ( _fileSystem.fileExists(tempPath) ) {
566 stop = true;
567 result = strdup_temp(tempPath);
568 }
569 else {
570 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
571 if (_isLaunchClosure) {
572 addMustBeMissingPath(tempPath);
573 }
574 }
575 #endif
576 });
577 }
578 if (done)
579 return;
580 }
581
582 bool stop = false;
583 handler(loadPath, stop);
584 }
585
586 const char* ClosureBuilder::strdup_temp(const char* path)
587 {
588 if ( _tempPaths == nullptr )
589 _tempPaths = PathPool::allocate();
590 return _tempPaths->add(path);
591 }
592
593 void ClosureBuilder::addMustBeMissingPath(const char* path)
594 {
595 //fprintf(stderr, "must be missing: %s\n", path);
596 if ( _mustBeMissingPaths == nullptr )
597 _mustBeMissingPaths = PathPool::allocate();
598 _mustBeMissingPaths->add(path);
599 }
600
601 void ClosureBuilder::addSkippedFile(const char* path, uint64_t inode, uint64_t mtime)
602 {
603 _skippedFiles.push_back({ strdup_temp(path), inode, mtime });
604 }
605
606 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum)
607 {
608 for (BuilderLoadedImage& li : _loadedImages) {
609 if ( li.imageNum == imageNum ) {
610 return li;
611 }
612 }
613 for (BuilderLoadedImage& li : _loadedImages) {
614 if ( li.overrideImageNum == imageNum ) {
615 return li;
616 }
617 }
618 assert(0 && "LoadedImage not found");
619 }
620
621 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(const MachOAnalyzer* mh)
622 {
623 for (BuilderLoadedImage& li : _loadedImages) {
624 if ( li.loadAddress() == mh ) {
625 return li;
626 }
627 }
628 assert(0 && "LoadedImage not found");
629 }
630
631 const MachOAnalyzer* ClosureBuilder::machOForImageNum(ImageNum imageNum)
632 {
633 return findLoadedImage(imageNum).loadAddress();
634 }
635
636 const MachOAnalyzer* ClosureBuilder::findDependent(const MachOLoaded* mh, uint32_t depIndex)
637 {
638 for (const BuilderLoadedImage& li : _loadedImages) {
639 if ( li.loadAddress() == mh ) {
640 if (li.isBadImage) {
641 // Bad image duting building group 1 closures, so the dependents array
642 // is potentially incomplete.
643 return nullptr;
644 }
645 ImageNum childNum = li.dependents[depIndex].imageNum();
646 // This is typically something like a missing weak-dylib we are re-exporting a weak-import symbol from
647 if (childNum == kMissingWeakLinkedImage)
648 return nullptr;
649 return machOForImageNum(childNum);
650 }
651 }
652 return nullptr;
653 }
654
655 ImageNum ClosureBuilder::imageNumForMachO(const MachOAnalyzer* mh)
656 {
657 for (const BuilderLoadedImage& li : _loadedImages) {
658 if ( li.loadAddress() == mh ) {
659 return li.imageNum;
660 }
661 }
662 assert(0 && "unknown mach-o");
663 return 0;
664 }
665
666 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain& forImageChain, bool canUseSharedCacheClosure)
667 {
668 // if dependents is set, then we have already loaded this
669 if ( forImageChain.image.dependents.begin() != nullptr )
670 return;
671
672 uintptr_t startDepIndex = _dependencies.count();
673 // add dependents
674 __block uint32_t depIndex = 0;
675 forImageChain.image.loadAddress()->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
676 Image::LinkKind kind = Image::LinkKind::regular;
677 if ( isWeak )
678 kind = Image::LinkKind::weak;
679 else if ( isReExport )
680 kind = Image::LinkKind::reExport;
681 else if ( isUpward )
682 kind = Image::LinkKind::upward;
683 BuilderLoadedImage* foundImage;
684 if ( findImage(loadPath, forImageChain, foundImage, LinkageType::kStatic, compatVersion, canUseSharedCacheClosure) ) {
685 ImageNum foundImageNum = foundImage->imageNum;
686 if ( _diag.noError() )
687 _dependencies.push_back(Image::LinkedImage(kind, foundImageNum));
688 }
689 else if ( isWeak ) {
690 _dependencies.push_back(Image::LinkedImage(Image::LinkKind::weak, kMissingWeakLinkedImage));
691 // <rdar://problem/54387345> don't let an error loading weak dylib cause everything to fail
692 // _diag is checked after each dependent load, so if there is an error it was with loading the current dylib.
693 // Since it is a weak load, it is ok to ignore and and go on.
694 _diag.clearError();
695 }
696 else {
697 BLOCK_ACCCESSIBLE_ARRAY(char, extra, 4096);
698 extra[0] = '\0';
699 const char* targetLeaf = strrchr(loadPath, '/');
700 if ( targetLeaf == nullptr )
701 targetLeaf = loadPath;
702 if ( _mustBeMissingPaths != nullptr ) {
703 strcpy(extra, ", tried but didn't find: ");
704 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
705 const char* aLeaf = strrchr(aPath, '/');
706 if ( aLeaf == nullptr )
707 aLeaf = aPath;
708 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
709 strlcat(extra, "'", 4096);
710 strlcat(extra, aPath, 4096);
711 strlcat(extra, "' ", 4096);
712 }
713 });
714 }
715 if ( !_skippedFiles.empty() ) {
716 strcpy(extra, ", tried but invalid: ");
717 for (const SkippedFile& skippedFile : _skippedFiles) {
718 const char* aPath = skippedFile.path;
719 const char* aLeaf = strrchr(aPath, '/');
720 if ( aLeaf == nullptr )
721 aLeaf = aPath;
722 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
723 strlcat(extra, "'", 4096);
724 strlcat(extra, aPath, 4096);
725 strlcat(extra, "' ", 4096);
726 }
727 }
728 }
729 if ( _diag.hasError() ) {
730 #if BUILDING_CACHE_BUILDER
731 std::string errorMessageBuffer = _diag.errorMessage();
732 const char* msg = errorMessageBuffer.c_str();
733 #else
734 const char* msg = _diag.errorMessage();
735 #endif
736 char msgCopy[strlen(msg)+4];
737 strcpy(msgCopy, msg);
738 _diag.error("dependent dylib '%s' not found for '%s'. %s", loadPath, forImageChain.image.path(), msgCopy);
739 }
740 else {
741 _diag.error("dependent dylib '%s' not found for '%s'%s", loadPath, forImageChain.image.path(), extra);
742 }
743 if ( _launchErrorInfo != nullptr ) {
744 _launchErrorInfo->kind = DYLD_EXIT_REASON_DYLIB_MISSING;
745 _launchErrorInfo->clientOfDylibPath = strdup_temp(forImageChain.image.path());
746 _launchErrorInfo->targetDylibPath = strdup_temp(loadPath);
747 _launchErrorInfo->symbol = nullptr;
748 }
749 }
750 ++depIndex;
751 if ( _diag.hasError() )
752 stop = true;
753 });
754 if ( _diag.hasError() )
755 return;
756 forImageChain.image.dependents = _dependencies.subArray(startDepIndex, depIndex);
757
758 // breadth first recurse
759 for (Image::LinkedImage dep : forImageChain.image.dependents) {
760 // don't recurse upwards
761 if ( dep.kind() == Image::LinkKind::upward )
762 continue;
763 // don't recurse down missing weak links
764 if ( (dep.kind() == Image::LinkKind::weak) && (dep.imageNum() == kMissingWeakLinkedImage) )
765 continue;
766 BuilderLoadedImage& depLoadedImage = findLoadedImage(dep.imageNum());
767 LoadedImageChain chain = { &forImageChain, depLoadedImage };
768 recursiveLoadDependents(chain, canUseSharedCacheClosure);
769 if ( _diag.hasError() )
770 break;
771 }
772 }
773
774 void ClosureBuilder::loadDanglingUpwardLinks(bool canUseSharedCacheClosure)
775 {
776 bool danglingFixed;
777 do {
778 danglingFixed = false;
779 for (BuilderLoadedImage& li : _loadedImages) {
780 if ( li.dependents.begin() == nullptr ) {
781 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
782 LoadedImageChain chain = { nullptr, li };
783 recursiveLoadDependents(chain, canUseSharedCacheClosure);
784 danglingFixed = true;
785 break;
786 }
787 }
788 } while (danglingFixed && _diag.noError());
789 }
790
791 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage& forImage)
792 {
793 // only set on dylibs in the dyld shared cache
794 if ( !_makingDyldCacheImages )
795 return false;
796
797 // on macOS dylibs always override cache
798 if ( _platform == Platform::macOS )
799 return true;
800
801 // on embedded platforms with Internal cache, allow overrides
802 if ( !_makingCustomerCache )
803 return true;
804
805 // embedded platform customer caches, no overrides
806 return false; // FIXME, allow libdispatch.dylib to be overridden
807 }
808
809 void ClosureBuilder::buildImage(ImageWriter& writer, BuilderLoadedImage& forImage)
810 {
811 const MachOAnalyzer* macho = forImage.loadAddress();
812 // set ImageNum
813 writer.setImageNum(forImage.imageNum);
814
815 // set flags
816 writer.setHasWeakDefs(macho->hasWeakDefs());
817 writer.setIsBundle(macho->isBundle());
818 writer.setIsDylib(macho->isDylib());
819 writer.setIs64(macho->is64());
820 writer.setIsExecutable(macho->isMainExecutable());
821 writer.setUses16KPages(macho->uses16KPages());
822 writer.setOverridableDylib(overridableDylib(forImage));
823 writer.setInDyldCache(macho->inDyldCache());
824 if ( macho->hasObjC() ) {
825 writer.setHasObjC(true);
826 bool hasPlusLoads = macho->hasPlusLoadMethod(_diag);
827 writer.setHasPlusLoads(hasPlusLoads);
828 if ( hasPlusLoads )
829 forImage.hasInits = true;
830 }
831 else {
832 writer.setHasObjC(false);
833 writer.setHasPlusLoads(false);
834 }
835
836 if ( forImage.markNeverUnload ) {
837 writer.setNeverUnload(true);
838 }
839
840 #if BUILDING_DYLD || BUILDING_LIBDYLD
841 if ( _foundDyldCacheRoots ) {
842 // If we had roots, then some images are potentially on-disk while others are
843 // being rebuilt for a new initializer order, but do not exist on disk
844 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
845 // don't add file info for shared cache files mastered out of final file system
846 }
847 else {
848 // file is either not in cache or is in cache but not mastered out
849 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
850 }
851 } else {
852 // shared cache not built by dyld or libdyld.dylib, so must be real file
853 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
854 }
855 #else
856 if ( _platform == Platform::macOS || MachOFile::isSimulatorPlatform(_platform) ) {
857 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
858 // don't add file info for shared cache files mastered out of final file system
859 }
860 else {
861 // file is either not in cache or is in cache but not mastered out
862 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
863 }
864 }
865 else {
866 // all other platforms, cache is built off-device, so inodes are not known
867 }
868 #endif
869
870 // add info on how to load image
871 if ( !macho->inDyldCache() ) {
872 writer.setMappingInfo(forImage.loadedFileInfo.sliceOffset, macho->mappedSize());
873 // add code signature, if signed
874 uint32_t codeSigFileOffset;
875 uint32_t codeSigSize;
876 if ( macho->hasCodeSignature(codeSigFileOffset, codeSigSize) ) {
877 writer.setCodeSignatureLocation(codeSigFileOffset, codeSigSize);
878 macho->forEachCDHash(^(const uint8_t *cdHash) {
879 writer.addCDHash(cdHash);
880 });
881 }
882 // add FairPlay encryption range if encrypted
883 uint32_t fairPlayFileOffset;
884 uint32_t fairPlaySize;
885 if ( macho->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
886 writer.setFairPlayEncryptionRange(fairPlayFileOffset, fairPlaySize);
887 }
888 }
889
890 // set path
891 writer.addPath(forImage.path());
892 if ( _aliases != nullptr ) {
893 for (const CachedDylibAlias& alias : *_aliases) {
894 if ( strcmp(alias.realPath, forImage.path()) == 0 )
895 writer.addPath(alias.aliasPath);
896 }
897 }
898
899 // set uuid, if has one
900 uuid_t uuid;
901 if ( macho->getUuid(uuid) )
902 writer.setUUID(uuid);
903
904 // set dependents
905 writer.setDependents(forImage.dependents);
906
907 // set segments
908 addSegments(writer, macho);
909
910 // record if this dylib overrides something in the cache
911 if ( forImage.overrideImageNum != 0 ) {
912 writer.setAsOverrideOf(forImage.overrideImageNum);
913 const char* overridePath = _dyldImageArray->imageForNum(forImage.overrideImageNum)->path();
914 writer.addPath(overridePath);
915 if ( strcmp(overridePath, "/usr/lib/system/libdyld.dylib") == 0 )
916 _libDyldImageNum = forImage.imageNum;
917 else if ( strcmp(overridePath, "/usr/lib/libSystem.B.dylib") == 0 )
918 _libSystemImageNum = forImage.imageNum;
919 }
920
921 // do fix up info for non-cached, and cached if building cache
922 if ( !macho->inDyldCache() || _makingDyldCacheImages ) {
923 if ( macho->hasChainedFixups() ) {
924 addChainedFixupInfo(writer, forImage);
925 }
926 else {
927 if ( _handlers != nullptr ) {
928 reportRebasesAndBinds(writer, forImage);
929 }
930 else {
931 // Note we have to do binds before rebases so that we know if we have missing lazy binds
932 addBindInfo(writer, forImage);
933 if ( _diag.noError() )
934 addRebaseInfo(writer, macho);
935 }
936 }
937 }
938 if ( _diag.hasError() ) {
939 writer.setInvalid();
940 return;
941 }
942
943 // Don't build iOSMac for now. Just add an invalid placeholder
944 if ( _makingDyldCacheImages && strncmp(forImage.path(), "/System/iOSSupport/", 19) == 0 ) {
945 writer.setInvalid();
946 return;
947 }
948
949 // add initializers
950 bool contentRebased = forImage.contentRebased;
951 __block unsigned initCount = 0;
952 Diagnostics initializerDiag;
953 macho->forEachInitializer(initializerDiag, contentRebased, ^(uint32_t offset) {
954 ++initCount;
955 }, _dyldCache);
956 if ( initializerDiag.noError() ) {
957 if ( initCount != 0 ) {
958 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets, initCount);
959 __block unsigned index = 0;
960 macho->forEachInitializer(_diag, contentRebased, ^(uint32_t offset) {
961 initOffsets[index++] = offset;
962 }, _dyldCache);
963 writer.setInitOffsets(initOffsets, initCount);
964 forImage.hasInits = true;
965 }
966 }
967 else {
968 // mod_init_func section is malformed, might be self modifying pointers
969 macho->forEachInitializerPointerSection(_diag, ^(uint32_t sectionOffset, uint32_t sectionSize, const uint8_t* content, bool& stop) {
970 writer.setInitSectRange(sectionOffset, sectionSize);
971 forImage.hasInits = true;
972 });
973 }
974
975
976 // add terminators (except for dylibs in the cache because they are never unloaded)
977 if ( !macho->inDyldCache() ) {
978 __block unsigned termCount = 0;
979 macho->forEachTerminator(_diag, contentRebased, ^(uint32_t offset) {
980 ++termCount;
981 });
982 if ( termCount != 0 ) {
983 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, termOffsets, termCount);
984 __block unsigned index = 0;
985 macho->forEachTerminator(_diag, contentRebased, ^(uint32_t offset) {
986 termOffsets[index++] = offset;
987 });
988 writer.setTermOffsets(termOffsets, termCount);
989 }
990 }
991
992 // record if image has DOF sections
993 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets, 256);
994 macho->forEachDOFSection(_diag, ^(uint32_t offset) {
995 dofSectionOffsets.push_back(offset);
996 });
997 if ( !dofSectionOffsets.empty() ) {
998 writer.setDofOffsets(dofSectionOffsets);
999 }
1000
1001 }
1002
1003 void ClosureBuilder::addSegments(ImageWriter& writer, const MachOAnalyzer* mh)
1004 {
1005 const uint32_t segCount = mh->segmentCount();
1006 if ( mh->inDyldCache() ) {
1007 uint64_t cacheUnslideBaseAddress = _dyldCache->unslidLoadAddress();
1008 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment, segs, segCount);
1009 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1010 segs[info.segIndex] = { (uint32_t)(info.vmAddr-cacheUnslideBaseAddress), (uint32_t)info.vmSize, info.protections };
1011 });
1012 writer.setCachedSegments(segs, segCount);
1013 }
1014 else {
1015 const uint32_t pageSize = (mh->uses16KPages() ? 0x4000 : 0x1000);
1016 __block uint32_t diskSegIndex = 0;
1017 __block uint32_t totalPageCount = 0;
1018 __block uint32_t lastFileOffsetEnd = 0;
1019 __block uint64_t lastVmAddrEnd = 0;
1020 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment, dsegs, segCount*3); // room for padding
1021 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1022 if ( (info.fileOffset != 0) && (info.fileOffset != lastFileOffsetEnd) ) {
1023 Image::DiskSegment filePadding;
1024 filePadding.filePageCount = (info.fileOffset - lastFileOffsetEnd)/pageSize;
1025 filePadding.vmPageCount = 0;
1026 filePadding.permissions = 0;
1027 filePadding.paddingNotSeg = 1;
1028 dsegs[diskSegIndex++] = filePadding;
1029 }
1030 if ( (lastVmAddrEnd != 0) && (info.vmAddr != lastVmAddrEnd) ) {
1031 Image::DiskSegment vmPadding;
1032 vmPadding.filePageCount = 0;
1033 vmPadding.vmPageCount = (info.vmAddr - lastVmAddrEnd)/pageSize;
1034 vmPadding.permissions = 0;
1035 vmPadding.paddingNotSeg = 1;
1036 dsegs[diskSegIndex++] = vmPadding;
1037 totalPageCount += vmPadding.vmPageCount;
1038 }
1039 {
1040 Image::DiskSegment segInfo;
1041 segInfo.filePageCount = (info.fileSize+pageSize-1)/pageSize;
1042 segInfo.vmPageCount = (info.vmSize+pageSize-1)/pageSize;
1043 segInfo.permissions = info.protections & 7;
1044 segInfo.paddingNotSeg = 0;
1045 if ( info.readOnlyData )
1046 segInfo.permissions = Image::DiskSegment::kReadOnlyDataPermissions;
1047 dsegs[diskSegIndex++] = segInfo;
1048 totalPageCount += segInfo.vmPageCount;
1049 if ( info.fileSize != 0 )
1050 lastFileOffsetEnd = (uint32_t)(info.fileOffset + info.fileSize);
1051 if ( info.vmSize != 0 )
1052 lastVmAddrEnd = info.vmAddr + info.vmSize;
1053 }
1054 });
1055 writer.setDiskSegments(dsegs, diskSegIndex);
1056 }
1057 }
1058
1059 static bool isTupleFixup(uint64_t tupleSectVmStartOffset, uint64_t tupleSectVmEndOffset, uint64_t imageOffsetOfFixup, uint32_t entrySize, uint32_t& tupleIndex)
1060 {
1061 if ( imageOffsetOfFixup < tupleSectVmStartOffset )
1062 return false;
1063 if ( imageOffsetOfFixup > tupleSectVmEndOffset )
1064 return false;
1065 uint64_t offsetIntoSection = imageOffsetOfFixup - tupleSectVmStartOffset;
1066 tupleIndex = (uint32_t)(offsetIntoSection/entrySize);
1067 return (tupleIndex*entrySize == offsetIntoSection) || ((tupleIndex*entrySize+entrySize/2) == offsetIntoSection);
1068 }
1069
1070 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter& writer, const Image* image, const MachOAnalyzer* mh)
1071 {
1072 const unsigned pointerSize = mh->pointerSize();
1073 const uint64_t baseAddress = mh->preferredLoadAddress();
1074 mh->forEachInterposingSection(_diag, ^(uint64_t sectVmOffset, uint64_t sectVmSize, bool &stop) {
1075 const uint32_t entrySize = 2*pointerSize;
1076 const uint32_t tupleCount = (uint32_t)(sectVmSize/entrySize);
1077 const uint64_t sectVmEndOffset = sectVmOffset + sectVmSize;
1078 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple, resolvedTuples, tupleCount);
1079 for (uint32_t i=0; i < tupleCount; ++i) {
1080 resolvedTuples[i].stockImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1081 resolvedTuples[i].stockImplementation.absolute.value = 0;
1082 resolvedTuples[i].newImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1083 resolvedTuples[i].newImplementation.absolute.value = 0;
1084 }
1085 // figure out what the replacement (rebase) and replacement (bind) of the tuple point to
1086 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& rebaseStop) {
1087 uint32_t tupleIndex;
1088 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToRebase, entrySize, tupleIndex) ) {
1089 const void* content = (uint8_t*)mh + imageOffsetToRebase;
1090 uint64_t unslidTargetAddress = mh->is64() ? *(uint64_t*)content : *(uint32_t*)content;
1091 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1092 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1093 resolvedTuples[tupleIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
1094 }
1095 },
1096 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1097 uint32_t tupleIndex;
1098 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToBind, entrySize, tupleIndex) ) {
1099 resolvedTuples[tupleIndex].stockImplementation = bindTarget;
1100 }
1101 },
1102 ^(uint64_t imageOffsetToStartsInfo, const Array<Image::ResolvedSymbolTarget>& targets, bool& chainStop) {
1103 mh->withChainStarts(_diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* startsInfo) {
1104 mh->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc, const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
1105 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)mh;
1106 uint32_t tupleIndex;
1107 if ( !isTupleFixup(sectVmOffset, sectVmEndOffset, fixupOffset, entrySize, tupleIndex) )
1108 return;
1109 uint32_t bindOrdinal;
1110 uint64_t rebaseTargetOffset;
1111 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal) ) {
1112 if ( bindOrdinal < targets.count() ) {
1113 resolvedTuples[tupleIndex].stockImplementation = targets[bindOrdinal];
1114 }
1115 else {
1116 _diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
1117 fixupsStop = true;
1118 }
1119 }
1120 else if ( fixupLoc->isRebase(segInfo->pointer_format, baseAddress, rebaseTargetOffset) ) {
1121 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1122 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1123 resolvedTuples[tupleIndex].newImplementation.image.offset = rebaseTargetOffset;
1124 }
1125 });
1126 });
1127 },
1128 ^(uint64_t imageOffsetToFixup) {
1129 // objc optimisation can't be interposed so nothing to do here.
1130 },
1131 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1132 // objc protocol optimisation fixups can't be interposed so nothing to do here.
1133 },
1134 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &fixupStop) {
1135 // objc selector optimisation fixups can't be interposed so nothing to do here.
1136 },
1137 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1138 // objc stable Swift optimisation fixups can't be interposed so nothing to do here.
1139 },
1140 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1141 // objc method list optimisation fixups can't be interposed so nothing to do here.
1142 });
1143
1144 // remove any tuples in which both sides are not set (or target is weak-import NULL)
1145 STACK_ALLOC_ARRAY(InterposingTuple, goodTuples, tupleCount);
1146 for (uint32_t i=0; i < tupleCount; ++i) {
1147 if ( (resolvedTuples[i].stockImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute)
1148 && (resolvedTuples[i].newImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute) )
1149 goodTuples.push_back(resolvedTuples[i]);
1150 }
1151 writer.addInterposingTuples(goodTuples);
1152 _interposingTuplesUsed = !goodTuples.empty();
1153
1154 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
1155 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, goodTuples.count());
1156 for (const InterposingTuple& aTuple : goodTuples) {
1157 if ( aTuple.stockImplementation.sharedCache.kind == Image::ResolvedSymbolTarget::kindSharedCache ) {
1158 uint32_t imageIndex;
1159 assert(_dyldCache->addressInText((uint32_t)aTuple.stockImplementation.sharedCache.offset, &imageIndex));
1160 ImageNum imageInCache = imageIndex+1;
1161 Closure::PatchEntry patch;
1162 patch.exportCacheOffset = (uint32_t)aTuple.stockImplementation.sharedCache.offset;
1163 patch.overriddenDylibInCache = imageInCache;
1164 patch.replacement = aTuple.newImplementation;
1165 patches.push_back(patch);
1166 }
1167 }
1168 writer.addCachePatches(patches);
1169 });
1170 }
1171
1172 void ClosureBuilder::addRebaseInfo(ImageWriter& writer, const MachOAnalyzer* mh)
1173 {
1174 const uint64_t ptrSize = mh->pointerSize();
1175 Image::RebasePattern maxLeapPattern = { 0xFFFFF, 0, 0xF };
1176 const uint64_t maxLeapCount = maxLeapPattern.repeatCount * maxLeapPattern.skipCount;
1177 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern, rebaseEntries, 1024);
1178 __block uint64_t lastLocation = -ptrSize;
1179 mh->forEachRebase(_diag, !_foundMissingLazyBinds, ^(uint64_t runtimeOffset, bool& stop) {
1180 const uint64_t delta = runtimeOffset - lastLocation;
1181 const bool aligned = ((delta % ptrSize) == 0);
1182 if ( delta == ptrSize ) {
1183 // this rebase location is contiguous to previous
1184 if ( rebaseEntries.back().contigCount < 255 ) {
1185 // just bump previous's contigCount
1186 rebaseEntries.back().contigCount++;
1187 }
1188 else {
1189 // previous contiguous run already has max 255, so start a new run
1190 rebaseEntries.push_back({ 1, 1, 0 });
1191 }
1192 }
1193 else if ( aligned && (delta <= (ptrSize*15)) ) {
1194 // this rebase is within skip distance of last rebase
1195 rebaseEntries.back().skipCount = (uint8_t)((delta-ptrSize)/ptrSize);
1196 int lastIndex = (int)(rebaseEntries.count() - 1);
1197 if ( lastIndex > 1 ) {
1198 if ( (rebaseEntries[lastIndex].contigCount == rebaseEntries[lastIndex-1].contigCount)
1199 && (rebaseEntries[lastIndex].skipCount == rebaseEntries[lastIndex-1].skipCount) ) {
1200 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1201 rebaseEntries.pop_back();
1202 rebaseEntries.back().repeatCount += 1;
1203 }
1204 }
1205 rebaseEntries.push_back({ 1, 1, 0 });
1206 }
1207 else {
1208 uint64_t advanceCount = (delta-ptrSize);
1209 if ( (runtimeOffset < lastLocation) && (lastLocation != -ptrSize) ) {
1210 // out of rebases! handle this be resting rebase offset to zero
1211 rebaseEntries.push_back({ 0, 0, 0 });
1212 advanceCount = runtimeOffset;
1213 }
1214 // if next rebase is too far to reach with one pattern, use series
1215 while ( advanceCount > maxLeapCount ) {
1216 rebaseEntries.push_back(maxLeapPattern);
1217 advanceCount -= maxLeapCount;
1218 }
1219 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1220 while ( advanceCount > maxLeapPattern.repeatCount ) {
1221 uint64_t count = advanceCount / maxLeapPattern.skipCount;
1222 rebaseEntries.push_back({ (uint32_t)count, 0, maxLeapPattern.skipCount });
1223 advanceCount -= (count*maxLeapPattern.skipCount);
1224 }
1225 if ( advanceCount != 0 )
1226 rebaseEntries.push_back({ (uint32_t)advanceCount, 0, 1 });
1227 rebaseEntries.push_back({ 1, 1, 0 });
1228 }
1229 lastLocation = runtimeOffset;
1230 });
1231 writer.setRebaseInfo(rebaseEntries);
1232
1233 // i386 programs also use text relocs to rebase stubs
1234 if ( mh->cputype == CPU_TYPE_I386 ) {
1235 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern, textRebases, 512);
1236 __block uint64_t lastOffset = -4;
1237 mh->forEachTextRebase(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1238 if ( textRebases.freeCount() < 2 ) {
1239 _diag.error("too many text rebase locations (%ld) in %s", textRebases.maxCount(), writer.currentImage()->path());
1240 stop = true;
1241 }
1242 bool mergedIntoPrevious = false;
1243 if ( (runtimeOffset > lastOffset) && !textRebases.empty() ) {
1244 uint32_t skipAmount = (uint32_t)(runtimeOffset - lastOffset);
1245 if ( (textRebases.back().repeatCount == 1) && (textRebases.back().skipCount == 0) ) {
1246 textRebases.back().repeatCount = 2;
1247 textRebases.back().skipCount = skipAmount;
1248 mergedIntoPrevious = true;
1249 }
1250 else if ( textRebases.back().skipCount == skipAmount ) {
1251 textRebases.back().repeatCount += 1;
1252 mergedIntoPrevious = true;
1253 }
1254 }
1255 if ( !mergedIntoPrevious ) {
1256 Image::TextFixupPattern pattern;
1257 pattern.target.raw = 0;
1258 pattern.startVmOffset = (uint32_t)runtimeOffset;
1259 pattern.repeatCount = 1;
1260 pattern.skipCount = 0;
1261 textRebases.push_back(pattern);
1262 }
1263 lastOffset = runtimeOffset;
1264 });
1265 writer.setTextRebaseInfo(textRebases);
1266 }
1267 }
1268
1269
1270 void ClosureBuilder::forEachBind(BuilderLoadedImage& forImage, void (^handler)(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop),
1271 void (^strongHandler)(const char* strongSymbolName),
1272 void (^missingLazyBindHandler)())
1273 {
1274 __block int lastLibOrdinal = 256;
1275 __block const char* lastSymbolName = nullptr;
1276 __block uint64_t lastAddend = 0;
1277 __block Image::ResolvedSymbolTarget target;
1278 __block ResolvedTargetInfo targetInfo;
1279 forImage.loadAddress()->forEachBind(_diag, ^(uint64_t runtimeOffset, int libOrdinal, const char* symbolName, bool weakImport, bool lazyBind, uint64_t addend, bool& stop) {
1280 if ( (symbolName == lastSymbolName) && (libOrdinal == lastLibOrdinal) && (addend == lastAddend) ) {
1281 // same symbol lookup as last location
1282 handler(runtimeOffset, target, targetInfo, stop);
1283 }
1284 else if ( findSymbol(forImage, libOrdinal, symbolName, weakImport, lazyBind, addend, target, targetInfo) ) {
1285 if ( !targetInfo.skippableWeakDef ) {
1286 handler(runtimeOffset, target, targetInfo, stop);
1287 lastSymbolName = symbolName;
1288 lastLibOrdinal = libOrdinal;
1289 lastAddend = addend;
1290 }
1291 }
1292 else {
1293 stop = true;
1294 }
1295 }, ^(const char* symbolName) {
1296 strongHandler(symbolName);
1297 }, ^() {
1298 missingLazyBindHandler();
1299 });
1300 }
1301
1302 void ClosureBuilder::addBindInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1303 {
1304 const uint32_t ptrSize = forImage.loadAddress()->pointerSize();
1305 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern, binds, 512);
1306 __block uint64_t lastOffset = -ptrSize;
1307 __block Image::ResolvedSymbolTarget lastTarget = { {0, 0} };
1308 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1309 if ( targetInfo.weakBindCoalese ) {
1310 // may be previous bind to this location
1311 // if so, update that rather create new BindPattern
1312 for (Image::BindPattern& aBind : binds) {
1313 if ( (aBind.startVmOffset == runtimeOffset) && (aBind.repeatCount == 1) && (aBind.skipCount == 0) ) {
1314 aBind.target = target;
1315 return;
1316 }
1317 }
1318 }
1319 bool mergedIntoPrevious = false;
1320 if ( !mergedIntoPrevious && (target == lastTarget) && (runtimeOffset > lastOffset) && !binds.empty() ) {
1321 uint64_t skipAmount = (runtimeOffset - lastOffset - ptrSize)/ptrSize;
1322 if ( skipAmount*ptrSize != (runtimeOffset - lastOffset - ptrSize) ) {
1323 // misaligned pointer means we cannot optimize
1324 }
1325 else {
1326 if ( (binds.back().repeatCount == 1) && (binds.back().skipCount == 0) && (skipAmount <= 255) ) {
1327 binds.back().repeatCount = 2;
1328 binds.back().skipCount = skipAmount;
1329 assert(binds.back().skipCount == skipAmount); // check overflow
1330 mergedIntoPrevious = true;
1331 }
1332 else if ( (binds.back().skipCount == skipAmount) && (binds.back().repeatCount < 0xfff) ) {
1333 uint32_t prevRepeatCount = binds.back().repeatCount;
1334 binds.back().repeatCount += 1;
1335 assert(binds.back().repeatCount > prevRepeatCount); // check overflow
1336 mergedIntoPrevious = true;
1337 }
1338 }
1339 }
1340 if ( (target == lastTarget) && (runtimeOffset == lastOffset) && !binds.empty() ) {
1341 // duplicate bind for same location, ignore this one
1342 mergedIntoPrevious = true;
1343 }
1344 if ( !mergedIntoPrevious ) {
1345 Image::BindPattern pattern;
1346 pattern.target = target;
1347 pattern.startVmOffset = runtimeOffset;
1348 pattern.repeatCount = 1;
1349 pattern.skipCount = 0;
1350 assert(pattern.startVmOffset == runtimeOffset);
1351 binds.push_back(pattern);
1352 }
1353 lastTarget = target;
1354 lastOffset = runtimeOffset;
1355 }, ^(const char* strongSymbolName) {
1356 if ( !_makingDyldCacheImages ) {
1357 // something has a strong symbol definition that may override a weak impl in the dyld cache
1358 Image::ResolvedSymbolTarget strongOverride;
1359 ResolvedTargetInfo strongTargetInfo;
1360 if ( findSymbolInImage(forImage.loadAddress(), strongSymbolName, 0, false, false, strongOverride, strongTargetInfo) ) {
1361 for (const BuilderLoadedImage& li : _loadedImages) {
1362 if ( li.loadAddress()->inDyldCache() && li.loadAddress()->hasWeakDefs() ) {
1363 Image::ResolvedSymbolTarget implInCache;
1364 ResolvedTargetInfo implInCacheInfo;
1365 if ( findSymbolInImage(li.loadAddress(), strongSymbolName, 0, false, false, implInCache, implInCacheInfo) ) {
1366 // found another instance in some dylib in dyld cache, will need to patch it
1367 Closure::PatchEntry patch;
1368 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1369 patch.overriddenDylibInCache = li.imageNum;
1370 patch.replacement = strongOverride;
1371 _weakDefCacheOverrides.push_back(patch);
1372 }
1373 }
1374 }
1375 }
1376 }
1377 }, ^() {
1378 _foundMissingLazyBinds = true;
1379 });
1380
1381 // check for __dyld section in main executable to support licenseware
1382 if ( forImage.loadAddress()->filetype == MH_EXECUTE ) {
1383 forImage.loadAddress()->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
1384 if ( (strcmp(sectInfo.sectName, "__dyld") == 0) && (strcmp(sectInfo.segInfo.segName, "__DATA") == 0) ) {
1385 // find dyld3::compatFuncLookup in libdyld.dylib
1386 assert(_libDyldImageNum != 0);
1387 Image::ResolvedSymbolTarget lookupFuncTarget;
1388 ResolvedTargetInfo lookupFuncInfo;
1389 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld316compatFuncLookupEPKcPPv", 0, false, false, lookupFuncTarget, lookupFuncInfo) ) {
1390 // add bind to set second pointer in __dyld section to be dyld3::compatFuncLookup
1391 uint64_t runtimeOffset = sectInfo.sectAddr - forImage.loadAddress()->preferredLoadAddress() + forImage.loadAddress()->pointerSize();
1392 Image::BindPattern compatFuncPattern;
1393 compatFuncPattern.target = lookupFuncTarget;
1394 compatFuncPattern.startVmOffset = runtimeOffset;
1395 compatFuncPattern.repeatCount = 1;
1396 compatFuncPattern.skipCount = 0;
1397 assert(compatFuncPattern.startVmOffset == runtimeOffset);
1398 binds.push_back(compatFuncPattern);
1399 }
1400 else {
1401 _diag.error("libdyld.dylib is dyld3::compatFuncLookup");
1402 }
1403 }
1404 });
1405 }
1406
1407 writer.setBindInfo(binds);
1408 }
1409
1410 void ClosureBuilder::reportRebasesAndBinds(ImageWriter& writer, BuilderLoadedImage& forImage)
1411 {
1412 // report all rebases
1413 forImage.loadAddress()->forEachRebase(_diag, true, ^(uint64_t runtimeOffset, bool& stop) {
1414 _handlers->rebase(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset);
1415 });
1416
1417 // report all binds
1418 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1419 _handlers->bind(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset, target, targetInfo);
1420 },
1421 ^(const char* strongSymbolName) {},
1422 ^() { });
1423
1424 // i386 programs also use text relocs to rebase stubs
1425 if ( forImage.loadAddress()->cputype == CPU_TYPE_I386 ) {
1426 // FIX ME
1427 }
1428 }
1429
1430 // These are mangled symbols for all the variants of operator new and delete
1431 // which a main executable can define (non-weak) and override the
1432 // weak-def implementation in the OS.
1433 static const char* const sTreatAsWeak[] = {
1434 "__Znwm", "__ZnwmRKSt9nothrow_t",
1435 "__Znam", "__ZnamRKSt9nothrow_t",
1436 "__ZdlPv", "__ZdlPvRKSt9nothrow_t", "__ZdlPvm",
1437 "__ZdaPv", "__ZdaPvRKSt9nothrow_t", "__ZdaPvm",
1438 "__ZnwmSt11align_val_t", "__ZnwmSt11align_val_tRKSt9nothrow_t",
1439 "__ZnamSt11align_val_t", "__ZnamSt11align_val_tRKSt9nothrow_t",
1440 "__ZdlPvSt11align_val_t", "__ZdlPvSt11align_val_tRKSt9nothrow_t", "__ZdlPvmSt11align_val_t",
1441 "__ZdaPvSt11align_val_t", "__ZdaPvSt11align_val_tRKSt9nothrow_t", "__ZdaPvmSt11align_val_t"
1442 };
1443
1444
1445 void ClosureBuilder::addChainedFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1446 {
1447 // build array of targets
1448 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget, targets, 1024);
1449 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ResolvedTargetInfo, targetInfos, 1024);
1450 forImage.loadAddress()->forEachChainedFixupTarget(_diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool& stop) {
1451 Image::ResolvedSymbolTarget target;
1452 ResolvedTargetInfo targetInfo;
1453 if ( !findSymbol(forImage, libOrdinal, symbolName, weakImport, false, addend, target, targetInfo) ) {
1454 stop = true;
1455 return;
1456 }
1457 if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
1458 // add if not already in array
1459 bool alreadyInArray = false;
1460 for (const char* sym : _weakDefsFromChainedBinds) {
1461 if ( strcmp(sym, symbolName) == 0 ) {
1462 alreadyInArray = true;
1463 break;
1464 }
1465 }
1466 if ( !alreadyInArray )
1467 _weakDefsFromChainedBinds.push_back(symbolName);
1468 }
1469 targets.push_back(target);
1470 targetInfos.push_back(targetInfo);
1471 });
1472 if ( _diag.hasError() )
1473 return;
1474
1475 uint64_t chainStartsOffset = forImage.loadAddress()->chainStartsOffset();
1476 if ( _handlers != nullptr ) {
1477 forImage.loadAddress()->withChainStarts(_diag, chainStartsOffset, ^(const dyld_chained_starts_in_image* starts) {
1478 _handlers->chainedBind(forImage.imageNum, forImage.loadAddress(), starts, targets, targetInfos);
1479 });
1480 }
1481 else {
1482 writer.setChainedFixups(chainStartsOffset, targets);
1483 }
1484
1485 // with chained fixups, main executable may define symbol that overrides weak-defs but has no fixup
1486 if ( _isLaunchClosure && forImage.loadAddress()->hasWeakDefs() && forImage.loadAddress()->isMainExecutable() ) {
1487 for (const char* weakSymbolName : sTreatAsWeak) {
1488 Diagnostics exportDiag;
1489 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1490 if ( forImage.loadAddress()->findExportedSymbol(exportDiag, weakSymbolName, false, foundInfo, nullptr) ) {
1491 _weakDefsFromChainedBinds.push_back(weakSymbolName);
1492 }
1493 }
1494 }
1495 }
1496
1497
1498 bool ClosureBuilder::findSymbolInImage(const MachOAnalyzer* macho, const char* symbolName, uint64_t addend, bool followReExports,
1499 bool weakImport, Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1500 {
1501 targetInfo.foundInDylib = nullptr;
1502 targetInfo.requestedSymbolName = symbolName;
1503 targetInfo.addend = addend;
1504 targetInfo.weakBindCoalese = false;
1505 targetInfo.weakBindSameImage = false;
1506 targetInfo.isWeakDef = false;
1507 targetInfo.skippableWeakDef = false;
1508 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
1509 return (const MachOLoaded*)findDependent(mh, depIndex);
1510 };
1511 MachOAnalyzer::DependentToMachOLoaded finder = nullptr;
1512 if ( followReExports )
1513 finder = reexportFinder;
1514
1515 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1516 if ( macho->findExportedSymbol(_diag, symbolName, weakImport, foundInfo, finder) ) {
1517 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
1518 targetInfo.foundInDylib = foundInfo.foundInDylib;
1519 targetInfo.foundSymbolName = foundInfo.foundSymbolName;
1520 if ( foundInfo.isWeakDef )
1521 targetInfo.isWeakDef = true;
1522 if ( foundInfo.kind == MachOAnalyzer::FoundSymbol::Kind::absolute ) {
1523 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1524 target.absolute.value = foundInfo.value + addend;
1525 }
1526 else if ( impDylib->inDyldCache() ) {
1527 uint64_t offsetValue = (uint8_t*)impDylib - (uint8_t*)_dyldCache + foundInfo.value + addend;
1528 target.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
1529 target.sharedCache.offset = offsetValue;
1530 assert(target.sharedCache.offset == offsetValue);
1531 }
1532 else {
1533 uint64_t offsetValue = foundInfo.value + addend;
1534 target.image.kind = Image::ResolvedSymbolTarget::kindImage;
1535 target.image.imageNum = findLoadedImage(impDylib).imageNum;
1536 target.image.offset = offsetValue;
1537 assert(target.image.offset == offsetValue);
1538 }
1539 return true;
1540 }
1541 return false;
1542 }
1543
1544 bool ClosureBuilder::findSymbol(BuilderLoadedImage& fromImage, int libOrdinal, const char* symbolName, bool weakImport, bool lazyBind,
1545 uint64_t addend, Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1546 {
1547 target.raw = 0;
1548 targetInfo.weakBindCoalese = false;
1549 targetInfo.weakBindSameImage = false;
1550 targetInfo.isWeakDef = false;
1551 targetInfo.skippableWeakDef = false;
1552 targetInfo.requestedSymbolName = symbolName;
1553 targetInfo.foundSymbolName = nullptr;
1554 targetInfo.libOrdinal = libOrdinal;
1555 if ( libOrdinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP ) {
1556 for (const BuilderLoadedImage& li : _loadedImages) {
1557 if ( !li.rtldLocal && findSymbolInImage(li.loadAddress(), symbolName, addend, true, weakImport, target, targetInfo) )
1558 return true;
1559 }
1560 if ( weakImport ) {
1561 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1562 target.absolute.value = 0;
1563 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1564 fromImage.hasMissingWeakImports = true;
1565 return true;
1566 }
1567 // <rdar://problem/44315944> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld in flat namespace
1568 if ( lazyBind && _allowMissingLazies ) {
1569 if ( findMissingSymbolHandler(target, targetInfo) )
1570 return true;
1571 }
1572 _diag.error("symbol '%s' not found, expected in flat namespace by '%s'", symbolName, fromImage.path());
1573 }
1574 else if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
1575 // to resolve weakDef coalesing, we need to search all images in order and use first definition
1576 // but, if first found is a weakDef, a later non-weak def overrides that
1577 bool foundWeakDefImpl = false;
1578 bool foundStrongDefImpl = false;
1579 bool foundImpl = false;
1580
1581 if ( _makingDyldCacheImages ) {
1582 // _loadedImages is all dylibs in the dyld cache, it is not load-order, so need alterate weak-def binding algorithm
1583 // look first in /usr/lib/libc++, most will be here
1584 for (const BuilderLoadedImage& li : _loadedImages) {
1585 if ( li.loadAddress()->hasWeakDefs() && (strncmp(li.path(), "/usr/lib/libc++", 15) == 0) ) {
1586 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1587 foundImpl = true;
1588 break;
1589 }
1590 }
1591 }
1592 // if not found, try looking in the images itself, most custom weak-def symbols have a copy in the image itself
1593 if ( !foundImpl ) {
1594 if ( findSymbolInImage(fromImage.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1595 foundImpl = true;
1596 }
1597 }
1598 // if still not found, then this is the rare case of a simple use of a weak-def symbol
1599 if ( !foundImpl ) {
1600 // look in all direct dependents
1601 for (Image::LinkedImage child : fromImage.dependents) {
1602 if (child.imageNum() == kMissingWeakLinkedImage)
1603 continue;
1604 BuilderLoadedImage& childLi = findLoadedImage(child.imageNum());
1605 if ( childLi.loadAddress()->hasWeakDefs() && findSymbolInImage(childLi.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1606 foundImpl = true;
1607 break;
1608 }
1609 }
1610 }
1611 targetInfo.weakBindCoalese = true;
1612 }
1613 else {
1614 // walk images in load-order to find first that implements this symbol
1615 Image::ResolvedSymbolTarget aTarget;
1616 ResolvedTargetInfo aTargetInfo;
1617 STACK_ALLOC_ARRAY(const BuilderLoadedImage*, cachedDylibsUsingSymbol, 1024);
1618 for (const BuilderLoadedImage& li : _loadedImages) {
1619 // only search images with weak-defs that were not loaded with RTLD_LOCAL
1620 if ( li.loadAddress()->hasWeakDefs() && !li.rtldLocal ) {
1621 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, weakImport, aTarget, aTargetInfo) ) {
1622 foundImpl = true;
1623 // with non-chained images, weak-defs first have a rebase to their local impl, and a weak-bind which allows earlier impls to override
1624 if ( !li.loadAddress()->hasChainedFixups() && (aTargetInfo.foundInDylib == fromImage.loadAddress()) )
1625 targetInfo.weakBindSameImage = true;
1626 if ( aTargetInfo.isWeakDef ) {
1627 // found a weakDef impl, if this is first found, set target to this
1628 if ( !foundWeakDefImpl && !foundStrongDefImpl ) {
1629 target = aTarget;
1630 targetInfo = aTargetInfo;
1631 }
1632 foundWeakDefImpl = true;
1633 }
1634 else {
1635 // found a non-weak impl, use this (unless early strong found)
1636 if ( !foundStrongDefImpl ) {
1637 target = aTarget;
1638 targetInfo = aTargetInfo;
1639 }
1640 foundStrongDefImpl = true;
1641 }
1642 }
1643 if ( foundImpl && li.loadAddress()->inDyldCache() )
1644 cachedDylibsUsingSymbol.push_back(&li);
1645 }
1646 }
1647
1648 // now that final target found, if any dylib in dyld cache uses that symbol name, redirect it to new target
1649 if ( !cachedDylibsUsingSymbol.empty() ) {
1650 for (const BuilderLoadedImage* li : cachedDylibsUsingSymbol) {
1651 Image::ResolvedSymbolTarget implInCache;
1652 ResolvedTargetInfo implInCacheInfo;
1653 if ( findSymbolInImage(li->loadAddress(), symbolName, addend, false, weakImport, implInCache, implInCacheInfo) ) {
1654 if ( implInCache != target ) {
1655 // found another instance in some dylib in dyld cache, will need to patch it
1656 Closure::PatchEntry patch;
1657 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1658 patch.overriddenDylibInCache = li->imageNum;
1659 patch.replacement = target;
1660 _weakDefCacheOverrides.push_back(patch);
1661 }
1662 }
1663 }
1664 }
1665 targetInfo.weakBindCoalese = true;
1666 }
1667
1668 if ( foundImpl )
1669 return true;
1670 if ( weakImport ) {
1671 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1672 target.absolute.value = 0;
1673 return true;
1674 }
1675 if ( ! fromImage.loadAddress()->hasChainedFixups() ) {
1676 // support old binaries where symbols have been stripped and have weak_bind to itself
1677 targetInfo.skippableWeakDef = true;
1678 return true;
1679 }
1680
1681 _diag.error("symbol '%s' not found, expected to be weak-def coalesced by '%s'", symbolName, fromImage.path());
1682 }
1683 else {
1684 const BuilderLoadedImage* targetLoadedImage = nullptr;
1685 if ( (libOrdinal > 0) && (libOrdinal <= (int)fromImage.dependents.count()) ) {
1686 ImageNum childNum = fromImage.dependents[libOrdinal - 1].imageNum();
1687 if ( childNum != kMissingWeakLinkedImage ) {
1688 targetLoadedImage = &findLoadedImage(childNum);
1689 }
1690 }
1691 else if ( libOrdinal == BIND_SPECIAL_DYLIB_SELF ) {
1692 targetLoadedImage = &fromImage;
1693 }
1694 else if ( libOrdinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE ) {
1695 targetLoadedImage = &_loadedImages[_mainProgLoadIndex];
1696 }
1697 else {
1698 _diag.error("unknown special ordinal %d in %s", libOrdinal, fromImage.path());
1699 return false;
1700 }
1701
1702 if ( targetLoadedImage != nullptr ) {
1703 if ( findSymbolInImage(targetLoadedImage->loadAddress(), symbolName, addend, true, weakImport, target, targetInfo) )
1704 return true;
1705 }
1706
1707 if ( weakImport ) {
1708 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1709 target.absolute.value = 0;
1710 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1711 fromImage.hasMissingWeakImports = true;
1712 return true;
1713 }
1714
1715 // <rdar://problem/43315403> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld
1716 if ( lazyBind && _allowMissingLazies ) {
1717 if ( findMissingSymbolHandler(target, targetInfo) )
1718 return true;
1719 }
1720
1721 // symbol not found and not weak or lazy so error out
1722 const char* expectedInPath = targetLoadedImage ? targetLoadedImage->path() : "unknown";
1723 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, fromImage.path());
1724 if ( _launchErrorInfo != nullptr ) {
1725 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
1726 _launchErrorInfo->clientOfDylibPath = strdup_temp(fromImage.path());
1727 _launchErrorInfo->targetDylibPath = strdup_temp(expectedInPath);
1728 _launchErrorInfo->symbol = symbolName;
1729 }
1730 }
1731 return false;
1732 }
1733
1734
1735 bool ClosureBuilder::findMissingSymbolHandler(Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1736 {
1737 for (BuilderLoadedImage& li : _loadedImages) {
1738 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) ) {
1739 if ( findSymbolInImage(li.loadAddress(), "__dyld_missing_symbol_abort", 0, false, false, target, targetInfo) ) {
1740 return true;
1741 }
1742 break;
1743 }
1744 }
1745 return false;
1746 }
1747
1748 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex, InitInfo initInfos[], uint32_t& initOrder, bool& hasError)
1749 {
1750 if ( initInfos[loadIndex].visited )
1751 return;
1752 initInfos[loadIndex].visited = true;
1753 initInfos[loadIndex].danglingUpward = false;
1754
1755 if (_loadedImages[loadIndex].isBadImage) {
1756 hasError = true;
1757 return;
1758 }
1759
1760 for (const Image::LinkedImage& dep : _loadedImages[loadIndex].dependents) {
1761 if ( dep.imageNum() == kMissingWeakLinkedImage )
1762 continue;
1763 ClosureBuilder::BuilderLoadedImage& depLi = findLoadedImage(dep.imageNum());
1764 uint32_t depLoadIndex = (uint32_t)_loadedImages.index(depLi);
1765 if ( dep.kind() == Image::LinkKind::upward ) {
1766 if ( !initInfos[depLoadIndex].visited )
1767 initInfos[depLoadIndex].danglingUpward = true;
1768 }
1769 else {
1770 depthFirstRecurseSetInitInfo(depLoadIndex, initInfos, initOrder, hasError);
1771 if (hasError)
1772 return;
1773 }
1774 }
1775 initInfos[loadIndex].initOrder = initOrder++;
1776 }
1777
1778 void ClosureBuilder::computeInitOrder(ImageWriter& imageWriter, uint32_t loadIndex)
1779 {
1780 // allocate array to track initializers
1781 InitInfo initInfos[_loadedImages.count()];
1782 bzero(initInfos, sizeof(initInfos));
1783
1784 // recurse all images and build initializer list from bottom up
1785 uint32_t initOrder = 1;
1786 bool hasMissingDependent = false;
1787 depthFirstRecurseSetInitInfo(loadIndex, initInfos, initOrder, hasMissingDependent);
1788 if (hasMissingDependent) {
1789 imageWriter.setInvalid();
1790 return;
1791 }
1792
1793 // any images not visited yet are are danging, force add them to end of init list
1794 for (uint32_t i=0; i < (uint32_t)_loadedImages.count(); ++i) {
1795 if ( !initInfos[i].visited && initInfos[i].danglingUpward ) {
1796 depthFirstRecurseSetInitInfo(i, initInfos, initOrder, hasMissingDependent);
1797 }
1798 }
1799
1800 if (hasMissingDependent) {
1801 imageWriter.setInvalid();
1802 return;
1803 }
1804
1805 // build array of just images with initializer
1806 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits, _loadedImages.count());
1807 uint32_t index = 0;
1808 for (const BuilderLoadedImage& li : _loadedImages) {
1809 if ( initInfos[index].visited && li.hasInits ) {
1810 indexOfImagesWithInits.push_back(index);
1811 }
1812 ++index;
1813 }
1814
1815 // bubble sort (FIXME)
1816 if ( indexOfImagesWithInits.count() > 1 ) {
1817 for (uint32_t i=0; i < indexOfImagesWithInits.count()-1; ++i) {
1818 for (uint32_t j=0; j < indexOfImagesWithInits.count()-i-1; ++j) {
1819 if ( initInfos[indexOfImagesWithInits[j]].initOrder > initInfos[indexOfImagesWithInits[j+1]].initOrder ) {
1820 uint32_t temp = indexOfImagesWithInits[j];
1821 indexOfImagesWithInits[j] = indexOfImagesWithInits[j+1];
1822 indexOfImagesWithInits[j+1] = temp;
1823 }
1824 }
1825 }
1826 }
1827
1828 // copy ImageNum of each image with initializers into array
1829 ImageNum initNums[indexOfImagesWithInits.count()];
1830 for (uint32_t i=0; i < indexOfImagesWithInits.count(); ++i) {
1831 initNums[i] = _loadedImages[indexOfImagesWithInits[i]].imageNum;
1832 }
1833
1834 // add to closure info
1835 imageWriter.setInitsOrder(initNums, (uint32_t)indexOfImagesWithInits.count());
1836 }
1837
1838 void ClosureBuilder::addClosureInfo(LaunchClosureWriter& closureWriter)
1839 {
1840 // record which is libSystem
1841 assert(_libSystemImageNum != 0);
1842 closureWriter.setLibSystemImageNum(_libSystemImageNum);
1843
1844 // record which is libdyld
1845 assert(_libDyldImageNum != 0);
1846 Image::ResolvedSymbolTarget entryLocation;
1847 ResolvedTargetInfo entryInfo;
1848 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld318entryVectorForDyldE", 0, false, false, entryLocation, entryInfo) ) {
1849 const dyld3::LibDyldEntryVector* libDyldEntry = nullptr;
1850 switch ( entryLocation.image.kind ) {
1851 case Image::ResolvedSymbolTarget::kindSharedCache:
1852 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)_dyldCache + entryLocation.sharedCache.offset);
1853 break;
1854 case Image::ResolvedSymbolTarget::kindImage:
1855 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)findLoadedImage(entryLocation.image.imageNum).loadAddress() + entryLocation.image.offset);
1856 break;
1857 }
1858 if ( (libDyldEntry != nullptr) && ((libDyldEntry->binaryFormatVersion & LibDyldEntryVector::kBinaryFormatVersionMask) == dyld3::closure::kFormatVersion) )
1859 closureWriter.setLibDyldEntry(entryLocation);
1860 else
1861 _diag.error("libdyld.dylib entry vector is incompatible");
1862 }
1863 else {
1864 _diag.error("libdyld.dylib is missing entry vector");
1865 }
1866
1867 // record which is main executable
1868 ImageNum mainProgImageNum = _loadedImages[_mainProgLoadIndex].imageNum;
1869 closureWriter.setTopImageNum(mainProgImageNum);
1870
1871 // add entry
1872 uint32_t entryOffset;
1873 bool usesCRT;
1874 if ( _loadedImages[_mainProgLoadIndex].loadAddress()->getEntry(entryOffset, usesCRT) ) {
1875 Image::ResolvedSymbolTarget location;
1876 location.image.kind = Image::ResolvedSymbolTarget::kindImage;
1877 location.image.imageNum = mainProgImageNum;
1878 location.image.offset = entryOffset;
1879 if ( usesCRT )
1880 closureWriter.setStartEntry(location);
1881 else
1882 closureWriter.setMainEntry(location);
1883 }
1884
1885 // add env vars that must match at launch time
1886 _pathOverrides.forEachEnvVar(^(const char* envVar) {
1887 closureWriter.addEnvVar(envVar);
1888 });
1889
1890 // add list of files which must be missing
1891 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const char*, paths, 8192);
1892 if ( _mustBeMissingPaths != nullptr ) {
1893 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
1894 paths.push_back(aPath);
1895 });
1896 }
1897 closureWriter.setMustBeMissingFiles(paths);
1898
1899 // add list of files which must be be present with a specific inode/mtime
1900 if (!_skippedFiles.empty())
1901 closureWriter.setMustExistFiles(_skippedFiles);
1902 }
1903 void ClosureBuilder::invalidateInitializerRoots()
1904 {
1905 while (true) {
1906 bool madeChange = false;
1907 for (uintptr_t loadedImageIndex = _alreadyInitedIndex; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
1908 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
1909 if ( li.mustBuildClosure ) {
1910 // Already invalidated
1911 continue;
1912 }
1913 for (Image::LinkedImage depIndex : li.dependents) {
1914 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
1915 continue;
1916 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
1917 // If a dependent is bad, or a new image num, or an override, then we need this image to get a new closure
1918 if ( depImage.mustBuildClosure ) {
1919 li.mustBuildClosure = true; // mark bad
1920 madeChange = true;
1921 }
1922 }
1923 }
1924 if (!madeChange)
1925 break;
1926 // If we made a change, then we detected an existing image with a dependent which needed to be rebuilt.
1927 // This corresponds to a root of the shared cache where the existing image is a shared cache one and the root is the depImage
1928 _foundDyldCacheRoots = true;
1929 }
1930 }
1931
1932 size_t ClosureBuilder::HashCString::hash(const char* v) {
1933 // FIXME: Use hash<string_view> when it has the correct visibility markup
1934 return __gnu_cxx::hash<const char*>{}(v);
1935 }
1936
1937 bool ClosureBuilder::EqualCString::equal(const char* s1, const char* s2) {
1938 return strcmp(s1, s2) == 0;
1939 }
1940
1941
1942 struct HashUInt64 {
1943 static size_t hash(const uint64_t& v) {
1944 return std::hash<uint64_t>{}(v);
1945 }
1946 };
1947
1948 struct EqualUInt64 {
1949 static bool equal(uint64_t s1, uint64_t s2) {
1950 return s1 == s2;
1951 }
1952 };
1953
1954 void ClosureBuilder::writeClassOrProtocolHashTable(bool classes, Array<ObjCOptimizerImage>& objcImages) {
1955 __block MultiMap<const char*, dyld3::closure::Image::ObjCClassImageOffset, HashCString, EqualCString> seenClassesMap;
1956 __block Map<const char*, dyld3::closure::Image::ObjCClassNameImageOffset, HashCString, EqualCString> classNameMap;
1957 __block OverflowSafeArray<const char*> classNames;
1958
1959 // Note we walk the images backwards as we want them in load order to match the order they are registered with objc
1960 for (size_t imageIndex = 0, reverseIndex = (objcImages.count() - 1); imageIndex != objcImages.count(); ++imageIndex, --reverseIndex) {
1961 if (objcImages[reverseIndex].diag.hasError())
1962 continue;
1963 ObjCOptimizerImage& image = objcImages[reverseIndex];
1964 const OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = classes ? image.seenClasses : image.seenProtocols;
1965
1966 for (const ObjCOptimizerImage::SeenClass& seenClass : seenClasses) {
1967 closure::Image::ObjCClassNameImageOffset classNameTarget = seenClass.first;
1968 dyld3::closure::Image::ObjCClassImageOffset classDataTarget = seenClass.second;
1969 Image::ObjCClassImage classImage = _objcClassesHashTableImages[classNameTarget.classNameImageIndex];
1970
1971 const BuilderLoadedImage& li = findLoadedImage(classImage.imageNum);
1972 const dyld3::MachOAnalyzer* ma = li.loadAddress();
1973
1974 const char* className = ((const char*)ma) + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1975 //uint64_t nameVMAddr = ma->preferredLoadAddress() + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1976 //printf("%s: 0x%08llx = '%s'\n", li.path(), nameVMAddr, className);
1977 seenClassesMap.insert({ className, classDataTarget });
1978
1979 // Also track the name
1980 auto itAndInserted = classNameMap.insert({ className, dyld3::closure::Image::ObjCClassNameImageOffset() });
1981 if (itAndInserted.second) {
1982 // We inserted the class name so we need to add it to the strings for the closure hash table
1983 classNames.push_back(className);
1984
1985 // We already computed a class name target in a previous loop so use that one
1986 itAndInserted.first->second = seenClass.first;
1987
1988 // If we are processing protocols, and this is the first one we've seen, then track its ISA to be fixed up
1989 if ( !classes ) {
1990 uint64_t protocolVMOffset = classImage.offsetOfClasses + classDataTarget.classData.imageOffset;
1991 image.protocolISAFixups.push_back(protocolVMOffset);
1992 }
1993 }
1994 }
1995 }
1996
1997 __block uint32_t duplicateCount = 0;
1998 seenClassesMap.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values,
1999 uint64_t valuesCount) {
2000 if (valuesCount != 1)
2001 duplicateCount += valuesCount;
2002 });
2003
2004 // If we have closure class names, we need to make a hash table for them.
2005 OverflowSafeArray<uint8_t>& hashTable = classes ? _objcClassesHashTable : _objcProtocolsHashTable;
2006 if (!classNames.empty()) {
2007 objc_opt::perfect_hash phash;
2008 objc_opt::make_perfect(classNames, phash);
2009 size_t size = ObjCClassOpt::size(phash, duplicateCount);
2010 hashTable.resize(size);
2011 //printf("Class table size: %lld\n", size);
2012 ObjCClassOpt* classesHashTable = (ObjCClassOpt*)hashTable.begin();
2013 classesHashTable->write(phash, classNameMap.array(), seenClassesMap, duplicateCount);
2014 }
2015 }
2016
2017 bool ClosureBuilder::optimizeObjC(Array<ImageWriter>& writers) {
2018 if ( _dyldCache == nullptr )
2019 return false;
2020
2021 // If we have the read only data, make sure it has a valid selector table inside.
2022 const objc_opt::objc_clsopt_t* objcClassOpt = nullptr;
2023 const objc_opt::objc_selopt_t* objcSelOpt = nullptr;
2024 const objc_opt::objc_protocolopt2_t* objcProtocolOpt = nullptr;
2025 if (const objc_opt::objc_opt_t* optObjCHeader = _dyldCache->objcOpt()) {
2026 objcClassOpt = optObjCHeader->clsopt();
2027 objcSelOpt = optObjCHeader->selopt();
2028 objcProtocolOpt = optObjCHeader->protocolopt2();
2029 }
2030
2031 if ( !objcClassOpt || !objcSelOpt || !objcProtocolOpt )
2032 return false;
2033
2034 // We have 24 bits of index in SelectorReferenceFixup so we can't handle a
2035 // shared cache selector table larger than that
2036 if ( objcSelOpt->usedCount() >= (1 << 24) )
2037 return false;
2038
2039 // Make sure we have the pointers section with the pointer to the protocol class
2040 const void* objcOptPtrs = _dyldCache->objcOptPtrs();
2041 if ( objcOptPtrs == nullptr )
2042 return false;
2043
2044 uint32_t pointerSize = _loadedImages.begin()->loadAddress()->pointerSize();
2045 uint64_t classProtocolVMAddr = (pointerSize == 8) ? *(uint64_t*)objcOptPtrs : *(uint32_t*)objcOptPtrs;
2046
2047 Image::ResolvedSymbolTarget objcProtocolClassTarget;
2048 objcProtocolClassTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
2049 if ( _dyldCacheIsLive ) {
2050 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - (uint64_t)_dyldCache;
2051 } else {
2052 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - _dyldCache->unslidLoadAddress();
2053 }
2054
2055 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ObjCOptimizerImage, objcImages, 32);
2056 ArrayFinalizer<ObjCOptimizerImage> scopedCleanup(objcImages,
2057 ^(ObjCOptimizerImage& objcImage) {
2058 objcImage.~ObjCOptimizerImage();
2059 });
2060
2061 // Find all the images with valid objc info
2062 // Also add shared cache images to a map so that we can see them later for looking up classes
2063 Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer> sharedCacheImagesMap;
2064 for (size_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
2065 BuilderLoadedImage& li = _loadedImages[imageIndex];
2066
2067 // Skip shared cache images as even if they need a new closure, the objc runtime can still use
2068 // the optimized shared cache tables.
2069 if ( li.loadAddress()->inDyldCache() ) {
2070 sharedCacheImagesMap.insert({ li.loadAddress(), true });
2071 // Bump the writer index if we have a writer for this image
2072 if ( li.mustBuildClosure )
2073 ++writerIndex;
2074 continue;
2075 }
2076 // Images which don't need a closure can be skipped. They are from the shared cache
2077 if ( !li.mustBuildClosure )
2078 continue;
2079
2080 // If we have a root of libobjc, just give up for now
2081 if ( !strcmp(li.path(), "/usr/lib/libobjc.A.dylib"))
2082 return false;
2083
2084 ImageWriter& writer = writers[writerIndex];
2085 ++writerIndex;
2086
2087 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2088
2089 // Skip images with chained fixups other than arm64e legacy fixups until we can test them
2090 // FIXME: Handle chained fixups
2091 if ( ma->hasChainedFixups() ) {
2092 switch ( ma->chainedPointerFormat() ) {
2093 case DYLD_CHAINED_PTR_ARM64E:
2094 case DYLD_CHAINED_PTR_64:
2095 // We've tested the 64-bit chained fixups.
2096 break;
2097 case DYLD_CHAINED_PTR_64_OFFSET:
2098 case DYLD_CHAINED_PTR_ARM64E_OFFSET:
2099 case DYLD_CHAINED_PTR_ARM64E_USERLAND:
2100 // FIXME: Test 64-bit offset chained fixups then enable this.
2101 continue;
2102 case DYLD_CHAINED_PTR_32:
2103 case DYLD_CHAINED_PTR_32_CACHE:
2104 case DYLD_CHAINED_PTR_32_FIRMWARE:
2105 // FIXME: Test 32-bit chained fixups then enable this.
2106 continue;
2107 }
2108 }
2109
2110 const MachOAnalyzer::ObjCImageInfo* objcImageInfo = ma->objcImageInfo();
2111 if ( objcImageInfo == nullptr )
2112 continue;
2113
2114 // This image is good so record it for use later.
2115 objcImages.default_constuct_back();
2116 ObjCOptimizerImage& image = objcImages.back();
2117 image.loadedImage = &li;
2118 image.writer = &writer;
2119
2120 // Find FairPlay encryption range if encrypted
2121 uint32_t fairPlayFileOffset;
2122 uint32_t fairPlaySize;
2123 if ( ma->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
2124 image.fairplayFileOffsetStart = fairPlayFileOffset;
2125 image.fairplayFileOffsetEnd = fairPlayFileOffset;
2126 }
2127
2128 // Set the offset to the objc image info
2129 image.objcImageInfoVMOffset = (uint64_t)objcImageInfo - (uint64_t)ma;
2130 }
2131
2132 OverflowSafeArray<const char*> closureSelectorStrings;
2133 Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString> closureSelectorMap;
2134 OverflowSafeArray<const char*> closureDuplicateSharedCacheClassNames;
2135 Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString> closureDuplicateSharedCacheClassMap;
2136 for (ObjCOptimizerImage& image : objcImages) {
2137 optimizeObjCClasses(objcClassOpt, sharedCacheImagesMap, closureDuplicateSharedCacheClassMap, image);
2138 if (image.diag.hasError())
2139 continue;
2140
2141 optimizeObjCProtocols(objcProtocolOpt, sharedCacheImagesMap, image);
2142 if (image.diag.hasError())
2143 continue;
2144
2145 optimizeObjCSelectors(objcSelOpt, closureSelectorMap, image);
2146 if (image.diag.hasError())
2147 continue;
2148
2149 // If this image is still valid, then add its intermediate results to the main tables
2150
2151 // Class results
2152 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2153 uint64_t nameVMOffset = nameAndDataVMOffset.first;
2154 uint64_t dataVMOffset = nameAndDataVMOffset.second;
2155 _objcClassesHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)nameVMOffset, (uint32_t)dataVMOffset });
2156 }
2157 image.classesNameAndDataVMOffsets.clear();
2158
2159 for (const auto& stringAndDuplicate : image.classSharedCacheDuplicates) {
2160 closureDuplicateSharedCacheClassMap[stringAndDuplicate.first] = stringAndDuplicate.second;
2161 closureDuplicateSharedCacheClassNames.push_back(stringAndDuplicate.first);
2162 }
2163
2164 // Selector results
2165 // Note we don't need to add the selector binds here. Its easier just to process them later from each image
2166 for (const auto& stringAndTarget : image.selectorMap) {
2167 closureSelectorMap[stringAndTarget.first] = stringAndTarget.second;
2168 closureSelectorStrings.push_back(stringAndTarget.first);
2169 }
2170 if (image.methodNameVMOffset)
2171 _objcSelectorsHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)*image.methodNameVMOffset });
2172 }
2173
2174 // If we successfully analyzed the classes and selectors, we can now emit their data
2175 // Set all the writers to have optimized objc
2176 for (ObjCOptimizerImage& image : objcImages) {
2177 if (image.diag.hasError())
2178 continue;
2179 image.writer->setHasPrecomputedObjC(true);
2180 }
2181
2182 // Write out the class table
2183 writeClassOrProtocolHashTable(true, objcImages);
2184
2185 // Write out the protocol table
2186 writeClassOrProtocolHashTable(false, objcImages);
2187
2188 // If we have closure duplicate classes, we need to make a hash table for them.
2189 closure::ObjCStringTable* duplicateClassesTable = nullptr;
2190 if (!closureDuplicateSharedCacheClassNames.empty()) {
2191 objc_opt::perfect_hash phash;
2192 objc_opt::make_perfect(closureDuplicateSharedCacheClassNames, phash);
2193 size_t size = ObjCStringTable::size(phash);
2194 _objcClassesDuplicatesHashTable.resize(size);
2195 //printf("Duplicate classes table size: %lld\n", size);
2196 duplicateClassesTable = (closure::ObjCClassDuplicatesOpt*)_objcClassesDuplicatesHashTable.begin();
2197 duplicateClassesTable->write(phash, closureDuplicateSharedCacheClassMap.array());
2198 }
2199
2200 // If we have closure selectors, we need to make a hash table for them.
2201 closure::ObjCStringTable* selectorStringTable = nullptr;
2202 if (!closureSelectorStrings.empty()) {
2203 objc_opt::perfect_hash phash;
2204 objc_opt::make_perfect(closureSelectorStrings, phash);
2205 size_t size = ObjCStringTable::size(phash);
2206 _objcSelectorsHashTable.resize(size);
2207 //printf("Selector table size: %lld\n", size);
2208 selectorStringTable = (closure::ObjCStringTable*)_objcSelectorsHashTable.begin();
2209 selectorStringTable->write(phash, closureSelectorMap.array());
2210 }
2211
2212 // Add fixups for the image info, protocol ISAs, and selector refs
2213 for (ObjCOptimizerImage& image : objcImages) {
2214 if (image.diag.hasError())
2215 continue;
2216
2217 // Protocol ISA references
2218 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ProtocolISAFixup, protocolFixups, 512);
2219 if ( !image.protocolISAFixups.empty() ) {
2220
2221 __block uint64_t lastOffset = -pointerSize;
2222 for (uint64_t runtimeOffset : image.protocolISAFixups) {
2223 bool mergedIntoPrevious = false;
2224 if ( (runtimeOffset > lastOffset) && !protocolFixups.empty() ) {
2225 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2226 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2227 // misaligned pointer means we cannot optimize
2228 }
2229 else {
2230 if ( (protocolFixups.back().repeatCount == 1) && (protocolFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2231 protocolFixups.back().repeatCount = 2;
2232 protocolFixups.back().skipCount = skipAmount;
2233 assert(protocolFixups.back().skipCount == skipAmount); // check overflow
2234 mergedIntoPrevious = true;
2235 }
2236 else if ( (protocolFixups.back().skipCount == skipAmount) && (protocolFixups.back().repeatCount < 0xfff) ) {
2237 uint32_t prevRepeatCount = protocolFixups.back().repeatCount;
2238 protocolFixups.back().repeatCount += 1;
2239 assert(protocolFixups.back().repeatCount > prevRepeatCount); // check overflow
2240 mergedIntoPrevious = true;
2241 }
2242 }
2243 }
2244 if ( !mergedIntoPrevious ) {
2245 Image::ProtocolISAFixup pattern;
2246 pattern.startVmOffset = runtimeOffset;
2247 pattern.repeatCount = 1;
2248 pattern.skipCount = 0;
2249 assert(pattern.startVmOffset == runtimeOffset);
2250 protocolFixups.push_back(pattern);
2251 }
2252 lastOffset = runtimeOffset;
2253 }
2254 }
2255
2256 // Selector references
2257 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::SelectorReferenceFixup, selRefFixups, 512);
2258 if ( !image.selectorFixups.empty() ) {
2259 uint64_t prevVMOffset = 0;
2260 const uint64_t maxChainOffset = (4 * ((1 << 7) - 1));
2261 for (const ObjCOptimizerImage::SelectorFixup& selectorFixup : image.selectorFixups) {
2262 assert( (selectorFixup.fixupVMOffset & 3) == 0 );
2263 if ( (selectorFixup.fixupVMOffset - prevVMOffset) <= maxChainOffset ) {
2264 // Add this to the previous chain
2265 selRefFixups.back().chainEntry.next = (uint32_t)(selectorFixup.fixupVMOffset - prevVMOffset) / 4;
2266 } else {
2267 // Need to start a new chain as the previous offset can't reach
2268 Image::SelectorReferenceFixup fixup;
2269 fixup.chainStartVMOffset = selectorFixup.fixupVMOffset;
2270 selRefFixups.push_back(fixup);
2271 }
2272
2273 if ( selectorFixup.isSharedCache ) {
2274 // If the entry is in the shared cache then we already have the index for it
2275 Image::SelectorReferenceFixup fixup;
2276 fixup.chainEntry.index = selectorFixup.sharedCache.selectorTableIndex;
2277 fixup.chainEntry.next = 0;
2278 fixup.chainEntry.inSharedCache = 1;
2279 selRefFixups.push_back(fixup);
2280 } else {
2281 // We had to record the string for the closure table entries as we don't know the
2282 // index until now
2283 uint32_t selectorTableIndex = selectorStringTable->getIndex(selectorFixup.image.selectorString);
2284 assert(selectorTableIndex != ObjCSelectorOpt::indexNotFound);
2285 Image::SelectorReferenceFixup fixup;
2286 fixup.chainEntry.index = selectorTableIndex;
2287 fixup.chainEntry.next = 0;
2288 fixup.chainEntry.inSharedCache = 0;
2289 selRefFixups.push_back(fixup);
2290 }
2291
2292 prevVMOffset = selectorFixup.fixupVMOffset;
2293 }
2294 }
2295
2296 // Stable Swift fixups
2297 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ClassStableSwiftFixup, stableSwiftFixups, 512);
2298 if ( !image.classStableSwiftFixups.empty() ) {
2299
2300 __block uint64_t lastOffset = -pointerSize;
2301 for (uint64_t runtimeOffset : image.classStableSwiftFixups) {
2302 bool mergedIntoPrevious = false;
2303 if ( (runtimeOffset > lastOffset) && !stableSwiftFixups.empty() ) {
2304 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2305 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2306 // misaligned pointer means we cannot optimize
2307 }
2308 else {
2309 if ( (stableSwiftFixups.back().repeatCount == 1) && (stableSwiftFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2310 stableSwiftFixups.back().repeatCount = 2;
2311 stableSwiftFixups.back().skipCount = skipAmount;
2312 assert(stableSwiftFixups.back().skipCount == skipAmount); // check overflow
2313 mergedIntoPrevious = true;
2314 }
2315 else if ( (stableSwiftFixups.back().skipCount == skipAmount) && (stableSwiftFixups.back().repeatCount < 0xfff) ) {
2316 uint32_t prevRepeatCount = stableSwiftFixups.back().repeatCount;
2317 stableSwiftFixups.back().repeatCount += 1;
2318 assert(stableSwiftFixups.back().repeatCount > prevRepeatCount); // check overflow
2319 mergedIntoPrevious = true;
2320 }
2321 }
2322 }
2323 if ( !mergedIntoPrevious ) {
2324 Image::ClassStableSwiftFixup pattern;
2325 pattern.startVmOffset = runtimeOffset;
2326 pattern.repeatCount = 1;
2327 pattern.skipCount = 0;
2328 assert(pattern.startVmOffset == runtimeOffset);
2329 stableSwiftFixups.push_back(pattern);
2330 }
2331 lastOffset = runtimeOffset;
2332 }
2333 }
2334
2335 // Method list fixups
2336 // TODO: Implement this
2337 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::MethodListFixup, methodListFixups, 512);
2338
2339 image.writer->setObjCFixupInfo(objcProtocolClassTarget, image.objcImageInfoVMOffset, protocolFixups,
2340 selRefFixups, stableSwiftFixups, methodListFixups);
2341 }
2342
2343 return true;
2344 }
2345
2346 void ClosureBuilder::optimizeObjCSelectors(const objc_opt::objc_selopt_t* objcSelOpt,
2347 const Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString>& closureSelectorMap,
2348 ObjCOptimizerImage& image) {
2349
2350 BuilderLoadedImage& li = *image.loadedImage;
2351
2352 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2353 uint32_t pointerSize = ma->pointerSize();
2354 const uint64_t loadAddress = ma->preferredLoadAddress();
2355
2356 // The legacy (objc1) codebase uses a bunch of sections we don't want to reason about. If we see them just give up.
2357 __block bool foundBadSection = false;
2358 ma->forEachSection(^(const MachOAnalyzer::SectionInfo &sectInfo, bool malformedSectionRange, bool &stop) {
2359 if ( strcmp(sectInfo.segInfo.segName, "__OBJC") != 0 )
2360 return;
2361 if (strcmp(sectInfo.sectName, "__module_info") == 0) {
2362 foundBadSection = true;
2363 stop = true;
2364 return;
2365 }
2366 if (strcmp(sectInfo.sectName, "__protocol") == 0) {
2367 foundBadSection = true;
2368 stop = true;
2369 return;
2370 }
2371 if (strcmp(sectInfo.sectName, "__message_refs") == 0) {
2372 foundBadSection = true;
2373 stop = true;
2374 return;
2375 }
2376 });
2377 if (foundBadSection) {
2378 image.diag.error("Old objc section");
2379 return;
2380 }
2381
2382 __block MachOAnalyzer::SectionCache selectorStringSectionCache(ma);
2383
2384 uint32_t sharedCacheSentinelIndex = objcSelOpt->getSentinelIndex();
2385
2386 auto visitReferenceToObjCSelector = ^void(uint64_t selectorStringVMAddr, uint64_t selectorReferenceVMAddr) {
2387
2388 uint64_t selectorUseImageOffset = selectorReferenceVMAddr - loadAddress;
2389 if ( (selectorUseImageOffset & 3) != 0 ) {
2390 image.diag.error("Unaligned selector reference fixup");
2391 return;
2392 }
2393
2394 // Image::SelectorReferenceFixup only has a 32-bit reach
2395 if ( selectorUseImageOffset >= (1ULL << 32) ) {
2396 image.diag.error("Selector reference fixup exceeds supported vm offset");
2397 return;
2398 }
2399
2400 // Get the section for the name
2401 const char* selectorString = nullptr;
2402 MachOAnalyzer::PrintableStringResult selectorStringResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2403 __block uint64_t selectorStringSectionStartVMAddr = 0;
2404 auto selectorStringSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2405
2406 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2407 if (sectInfo.sectSize >= Image::ObjCImageOffset::maximumOffset) {
2408 return false;
2409 }
2410
2411 // We use 32-bit offsets so make sure the section is no larger than that.
2412 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2413 if (classNameVMOffset >= (1ULL << 32)) {
2414 return false;
2415 }
2416
2417 selectorStringSectionStartVMAddr = sectInfo.sectAddr;
2418 return true;
2419 };
2420 selectorString = ma->getPrintableString(selectorStringVMAddr, selectorStringResult,
2421 &selectorStringSectionCache, selectorStringSectionHandler);
2422
2423 if ( selectorStringResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2424 image.diag.error("Invalid selector string for objc optimisation");
2425 return;
2426 }
2427
2428 uint32_t cacheSelectorIndex = objcSelOpt->getIndexForKey(selectorString);
2429 //printf("selector: %p -> %p %s\n", methodName, cacheSelector, selectorString);
2430
2431 if ( cacheSelectorIndex != sharedCacheSentinelIndex ) {
2432 // We got the selector from the cache so add a fixup to point there.
2433 ObjCOptimizerImage::SelectorFixup fixup;
2434 fixup.isSharedCache = true;
2435 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2436 fixup.sharedCache.selectorTableIndex = cacheSelectorIndex;
2437
2438 //printf("Overriding fixup at 0x%08llX to cache offset 0x%08llX\n", selectorUseImageOffset, (uint64_t)cacheSelector - (uint64_t)_dyldCache);
2439 image.selectorFixups.push_back(fixup);
2440 return;
2441 }
2442
2443 // See if this selector is already in the closure map from a previous image
2444 auto closureSelectorIt = closureSelectorMap.find(selectorString);
2445 if (closureSelectorIt != closureSelectorMap.end()) {
2446 // This selector was found in a previous image, so use it here.
2447 ObjCOptimizerImage::SelectorFixup fixup;
2448 fixup.isSharedCache = false;
2449 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2450 fixup.image.selectorString = selectorString;
2451
2452 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2453 image.selectorFixups.push_back(fixup);
2454 return;
2455 }
2456
2457 // See if this selector is already in the map for this image
2458 auto itAndInserted = image.selectorMap.insert({ selectorString, dyld3::closure::Image::ObjCImageOffset() });
2459 if (itAndInserted.second) {
2460 // We added the selector so its pointing in to our own image.
2461 // We don't need to add a fixup to our image, but we do need to
2462 // populate the data for other images later to point here.
2463 // First put our image in the list if its not already there.
2464 uint64_t methodNameVMOffset = selectorStringSectionStartVMAddr - loadAddress;
2465 if (!image.methodNameVMOffset) {
2466 if ( _objcSelectorsHashTableImages.count() == Image::ObjCImageOffset::maximumImageIndex ) {
2467 image.diag.error("Out of space for selector hash images");
2468 return;
2469 }
2470 image.methodNameVMOffset = methodNameVMOffset;
2471 } else {
2472 // If we already set the offset to the start of the method names section, double check that
2473 // the section we are in right now is the same as that one. Otherwise we don't have the code
2474 // to handle both right now.
2475 if (*image.methodNameVMOffset != methodNameVMOffset) {
2476 image.diag.error("Cannot handle more than one selector strings section");
2477 return;
2478 }
2479 }
2480
2481 dyld3::closure::Image::ObjCImageOffset target;
2482 target.imageIndex = (uint32_t)_objcSelectorsHashTableImages.count();
2483 target.imageOffset = (uint32_t)(selectorStringVMAddr - selectorStringSectionStartVMAddr);
2484 itAndInserted.first->second = target;
2485 return;
2486 }
2487
2488 // This selector was found elsewhere in our image. If this reference already points to the same
2489 // selector string as we found before (and it should!) then we have nothing to do. Otherwise we
2490 // need to add a fixup here to make sure we point to our chosen definition.
2491 uint32_t imageOffset = (uint32_t)(selectorStringVMAddr - loadAddress);
2492 if ( imageOffset == (*image.methodNameVMOffset + itAndInserted.first->second.imageOffset) )
2493 return;
2494
2495 ObjCOptimizerImage::SelectorFixup fixup;
2496 fixup.isSharedCache = false;
2497 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2498 fixup.image.selectorString = selectorString;
2499
2500 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2501 image.selectorFixups.push_back(fixup);
2502 };
2503
2504 auto visitMethod = ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
2505 visitReferenceToObjCSelector(method.nameVMAddr, method.nameLocationVMAddr);
2506 };
2507
2508 auto visitClass = ^(Diagnostics& diag, uint64_t classVMAddr,
2509 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2510 const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
2511 ma->forEachObjCMethod(objcClass.baseMethodsVMAddr(pointerSize), li.contentRebased,
2512 visitMethod);
2513 };
2514
2515 auto visitCategory = ^(Diagnostics& diag, uint64_t categoryVMAddr,
2516 const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
2517 ma->forEachObjCMethod(objcCategory.instanceMethodsVMAddr, li.contentRebased,
2518 visitMethod);
2519 ma->forEachObjCMethod(objcCategory.classMethodsVMAddr, li.contentRebased,
2520 visitMethod);
2521 };
2522 auto visitProtocol = ^(Diagnostics& diag, uint64_t protocolVMAddr,
2523 const dyld3::MachOAnalyzer::ObjCProtocol& objCProtocol) {
2524 ma->forEachObjCMethod(objCProtocol.instanceMethodsVMAddr, li.contentRebased,
2525 visitMethod);
2526 ma->forEachObjCMethod(objCProtocol.classMethodsVMAddr, li.contentRebased,
2527 visitMethod);
2528 ma->forEachObjCMethod(objCProtocol.optionalInstanceMethodsVMAddr, li.contentRebased,
2529 visitMethod);
2530 ma->forEachObjCMethod(objCProtocol.optionalClassMethodsVMAddr, li.contentRebased,
2531 visitMethod);
2532 };
2533
2534 // Walk the class list
2535 ma->forEachObjCClass(image.diag, li.contentRebased, visitClass);
2536 if (image.diag.hasError())
2537 return;
2538
2539 // Walk the category list
2540 ma->forEachObjCCategory(image.diag, li.contentRebased, visitCategory);
2541 if (image.diag.hasError())
2542 return;
2543
2544 // Walk the protocol list
2545 ma->forEachObjCProtocol(image.diag, li.contentRebased, visitProtocol);
2546 if (image.diag.hasError())
2547 return;
2548
2549 // Visit the selector refs
2550 ma->forEachObjCSelectorReference(image.diag, li.contentRebased, ^(uint64_t selRefVMAddr, uint64_t selRefTargetVMAddr) {
2551 visitReferenceToObjCSelector(selRefTargetVMAddr, selRefVMAddr);
2552 });
2553 if (image.diag.hasError())
2554 return;
2555
2556 // Visit the message refs
2557 // Note this isn't actually supported in libobjc any more. Its logic for deciding whether to support it is if this is true:
2558 // #if (defined(__x86_64__) && (TARGET_OS_OSX || TARGET_OS_SIMULATOR))
2559 // So to keep it simple, lets only do this walk if we are x86_64
2560 if ( ma->isArch("x86_64") || ma->isArch("x86_64h") ) {
2561 if (ma->hasObjCMessageReferences()) {
2562 image.diag.error("Cannot handle message refs");
2563 return;
2564 }
2565 }
2566 }
2567
2568 static const dyld3::MachOAnalyzer* getMachHeaderFromObjCHeaderInfo(const void* opaqueHeaderInfo, uint32_t pointerSize) {
2569 if (pointerSize == 8) {
2570 typedef int64_t PtrTy;
2571 struct HeaderInfo {
2572 PtrTy mhdr_offset; // offset to mach_header_64
2573 PtrTy info_offset; // offset to objc_image_info *
2574 };
2575 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2576 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2577 } else {
2578 typedef int32_t PtrTy;
2579 struct HeaderInfo {
2580 PtrTy mhdr_offset; // offset to mach_header
2581 PtrTy info_offset; // offset to objc_image_info *
2582 };
2583 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2584 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2585 }
2586 }
2587
2588 void ClosureBuilder::addDuplicateObjCClassWarning(const char* className,
2589 const char* duplicateDefinitionPath,
2590 const char* canonicalDefinitionPath)
2591 {
2592 if ( _objcDuplicateClassWarnings == nullptr )
2593 _objcDuplicateClassWarnings = PathPool::allocate();
2594 // Use a diagnostic to give us a buffer we can safely print to
2595 Diagnostics diag;
2596 diag.error("Class %s is implemented in both %s and %s. One of the two will be used. Which one is undefined.",
2597 className, canonicalDefinitionPath, duplicateDefinitionPath);
2598 #if BUILDING_CACHE_BUILDER
2599 _objcDuplicateClassWarnings->add(diag.errorMessage().c_str());
2600 #else
2601 _objcDuplicateClassWarnings->add(diag.errorMessage());
2602 #endif
2603 }
2604
2605 void ClosureBuilder::optimizeObjCClasses(const objc_opt::objc_clsopt_t* objcClassOpt,
2606 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2607 const Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString>& duplicateSharedCacheClasses,
2608 ObjCOptimizerImage& image) {
2609
2610 BuilderLoadedImage& li = *image.loadedImage;
2611 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = image.seenClasses;
2612
2613 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2614 const uint32_t pointerSize = ma->pointerSize();
2615 const uint64_t loadAddress = ma->preferredLoadAddress();
2616
2617 // Keep track of any missing weak imports so that we can tell if the superclasses are nil
2618 // This is necessary as the shared cache will be marked with 'no missing weak superclasses'
2619 // and so we need to continue to satisfy that constraint
2620 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> missingWeakImportOffets;
2621 if (li.hasMissingWeakImports) {
2622 if (ma->hasChainedFixups()) {
2623 const Image* closureImage = image.writer->currentImage();
2624
2625 const Array<Image::ResolvedSymbolTarget> targets = closureImage->chainedTargets();
2626 if ( !targets.empty() ) {
2627 ma->withChainStarts(_diag, closureImage->chainedStartsOffset(), ^(const dyld_chained_starts_in_image* startsInfo) {
2628 ma->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc,
2629 const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
2630 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)ma;
2631 uint32_t bindOrdinal;
2632 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal) ) {
2633 if ( bindOrdinal < targets.count() ) {
2634 const Image::ResolvedSymbolTarget& target = targets[bindOrdinal];
2635 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2636 missingWeakImportOffets[fixupOffset] = true;
2637 }
2638 else {
2639 image.diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
2640 fixupsStop = true;
2641 }
2642 }
2643 });
2644 });
2645 if (image.diag.hasError())
2646 return;
2647 }
2648 } else {
2649 forEachBind(li, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
2650 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2651 missingWeakImportOffets[runtimeOffset] = true;
2652 }, ^(const char *strongSymbolName) {
2653 }, ^() { });
2654 }
2655 }
2656
2657 // Class names and data may be in different sections depending on swift vs objc so handle multiple sections
2658 __block MachOAnalyzer::SectionCache classNameSectionCache(ma);
2659 __block MachOAnalyzer::SectionCache classSectionCache(ma);
2660
2661 ma->forEachObjCClass(image.diag, li.contentRebased, ^(Diagnostics &diag, uint64_t classVMAddr,
2662 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2663 const MachOAnalyzer::ObjCClassInfo &objcClass, bool isMetaClass) {
2664 if (isMetaClass) return;
2665
2666 // Make sure the superclass pointer is not nil
2667 uint64_t superclassRuntimeOffset = classSuperclassVMAddr - loadAddress;
2668 if (missingWeakImportOffets.find(superclassRuntimeOffset) != missingWeakImportOffets.end()) {
2669 diag.error("Missing weak superclass");
2670 return;
2671 }
2672
2673 // Does this class need to be fixed up for stable Swift ABI.
2674 // Note the order matches the objc runtime in that we always do this fix before checking for dupes,
2675 // but after excluding classes with missing weak superclasses.
2676 if (objcClass.isUnfixedBackwardDeployingStableSwift()) {
2677 // Class really is stable Swift, pretending to be pre-stable.
2678 // Fix its lie. This involves fixing the FAST bits on the class data value, so record that vmaddr
2679 image.classStableSwiftFixups.push_back(classDataVMAddr - loadAddress);
2680 }
2681
2682 // Get the section for the name
2683 const char* className = nullptr;
2684 MachOAnalyzer::PrintableStringResult classNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2685 __block uint64_t classNameSectionStartVMAddr = 0;
2686 auto classNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2687 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2688 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2689 return false;
2690 }
2691
2692 // We use 32-bit offsets so make sure the section is no larger than that.
2693 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2694 if (classNameVMOffset >= (1ULL << 32)) {
2695 return false;
2696 }
2697
2698 classNameSectionStartVMAddr = sectInfo.sectAddr;
2699 return true;
2700 };
2701 uint64_t classNameVMAddr = objcClass.nameVMAddr(pointerSize);
2702 className = ma->getPrintableString(classNameVMAddr, classNameResult,
2703 &classNameSectionCache, classNameSectionHandler);
2704
2705 if ( classNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2706 diag.error("Invalid class name for objc optimisation");
2707 return;
2708 }
2709
2710 // If the class also exists in a shared cache image which is loaded, then objc
2711 // would have found that one, regardless of load order. So we can just skip this one.
2712 {
2713 void *cls;
2714 void *hi;
2715 uint32_t index;
2716 uint32_t count = objcClassOpt->getClassHeaderAndIndex(className, cls, hi, index);
2717 if (count == 1) {
2718 // exactly one matching class. Check if its loaded
2719 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hi, pointerSize);
2720 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2721 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2722
2723 // We have a duplicate class, so check if we've already got it in our map.
2724 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2725 // We haven't seen this one yet
2726 Image::ObjCDuplicateClass duplicateClass;
2727 duplicateClass.sharedCacheClassOptIndex = index;
2728 duplicateClass.sharedCacheClassDuplicateIndex = 0;
2729 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2730 }
2731 }
2732 }
2733 else if (count > 1) {
2734 // more than one matching class - find one that is loaded
2735 void *clslist[count];
2736 void *hilist[count];
2737 objcClassOpt->getClassesAndHeaders(className, clslist, hilist);
2738 for (uint32_t i = 0; i < count; i++) {
2739 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize);
2740 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2741 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2742
2743 // We have a duplicate class, so check if we've already got it in our map.
2744 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2745 // We haven't seen this one yet
2746 Image::ObjCDuplicateClass duplicateClass;
2747 duplicateClass.sharedCacheClassOptIndex = index;
2748 duplicateClass.sharedCacheClassDuplicateIndex = i;
2749 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2750 }
2751
2752 break;
2753 }
2754 }
2755 }
2756 }
2757
2758 // Get the section for the class itself
2759 __block uint64_t classSectionStartVMAddr = 0;
2760 auto classSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2761 // We only have 23-bits in ObjCClassImageOffset to index in to the classes
2762 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2763 return false;
2764 }
2765
2766 // We use 32-bit offsets so make sure the section is no larger than that.
2767 uint64_t classDatasVMOffset = sectInfo.sectAddr - loadAddress;
2768 if (classDatasVMOffset >= (1ULL << 32)) {
2769 return false;
2770 }
2771
2772 classSectionStartVMAddr = sectInfo.sectAddr;
2773 return true;
2774 };
2775 if (!classSectionCache.findSectionForVMAddr(classVMAddr, classSectionHandler)) {
2776 diag.error("Invalid class for objc optimisation");
2777 return;
2778 }
2779
2780 // Make sure we have an entry for our images offsets for later
2781 uint64_t classNameSectionVMOffset = classNameSectionStartVMAddr - loadAddress;
2782 uint64_t classSectionVMOffset = classSectionStartVMAddr - loadAddress;
2783 uint64_t hashTableVMOffsetsIndex = 0;
2784 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2785 if ( (nameAndDataVMOffset.first == classNameSectionVMOffset) && (nameAndDataVMOffset.second == classSectionVMOffset) )
2786 break;
2787 ++hashTableVMOffsetsIndex;
2788 }
2789
2790 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2791 // Didn't find an image entry with this offset. Add one if we have space
2792 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2793 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2794 // No more space. We need to give up
2795 diag.error("No more space for class hash table image");
2796 return;
2797 }
2798 image.classesNameAndDataVMOffsets.push_back({ classNameSectionVMOffset, classSectionVMOffset });
2799 }
2800
2801 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2802
2803 uint64_t classNameOffset = classNameVMAddr - classNameSectionStartVMAddr;
2804 uint64_t classDataOffset = classVMAddr - classSectionStartVMAddr;
2805
2806 closure::Image::ObjCClassNameImageOffset classNameTarget;
2807 classNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2808 classNameTarget.classNameImageOffset = (uint32_t)classNameOffset;
2809
2810 dyld3::closure::Image::ObjCClassImageOffset classDataTarget;
2811 classDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2812 classDataTarget.classData.imageOffset = (uint32_t)classDataOffset;
2813 classDataTarget.classData.isDuplicate = 0;
2814
2815 seenClasses.push_back({ classNameTarget, classDataTarget });
2816 });
2817 }
2818
2819 void ClosureBuilder::optimizeObjCProtocols(const objc_opt::objc_protocolopt2_t* objcProtocolOpt,
2820 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2821 ObjCOptimizerImage& image) {
2822
2823 BuilderLoadedImage& li = *image.loadedImage;
2824 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenProtocols = image.seenProtocols;
2825
2826 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2827 const uint32_t pointerSize = ma->pointerSize();
2828 const uint64_t loadAddress = ma->preferredLoadAddress();
2829
2830 // Protocol names and data may be in different sections depending on swift vs objc so handle multiple sections
2831 __block MachOAnalyzer::SectionCache protocolNameSectionCache(ma);
2832 __block MachOAnalyzer::SectionCache protocolSectionCache(ma);
2833
2834 ma->forEachObjCProtocol(image.diag, li.contentRebased, ^(Diagnostics &diag, uint64_t protocolVMAddr,
2835 const dyld3::MachOAnalyzer::ObjCProtocol &objCProtocol) {
2836 if ( objCProtocol.requiresObjCReallocation ) {
2837 // We can't optimize this protocol as the runtime needs all fields to be present
2838 diag.error("Protocol is too small to be optimized");
2839 return;
2840 }
2841 if ( objCProtocol.isaVMAddr != 0 ) {
2842 // We can't optimize this protocol if it has an ISA as we want to override it
2843 diag.error("Protocol ISA cannot be non-zero");
2844 return;
2845 }
2846
2847 // Get the section for the name
2848 const char* protocolName = nullptr;
2849 MachOAnalyzer::PrintableStringResult protocolNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2850 __block uint64_t protocolNameSectionStartVMAddr = 0;
2851 auto protocolNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2852 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2853 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2854 return false;
2855 }
2856
2857 // We use 32-bit offsets so make sure the section is no larger than that.
2858 uint64_t protocolNameVMOffset = sectInfo.sectAddr - loadAddress;
2859 if (protocolNameVMOffset >= (1ULL << 32)) {
2860 return false;
2861 }
2862
2863 protocolNameSectionStartVMAddr = sectInfo.sectAddr;
2864 return true;
2865 };
2866 uint64_t protocolNameVMAddr = objCProtocol.nameVMAddr;
2867 protocolName = ma->getPrintableString(protocolNameVMAddr, protocolNameResult,
2868 &protocolNameSectionCache, protocolNameSectionHandler);
2869
2870 if ( protocolNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2871 diag.error("Invalid protocol name for objc optimisation");
2872 return;
2873 }
2874
2875 // If the protocol also exists in a shared cache image which is loaded, then objc
2876 // would have found that one, regardless of load order. So we can just skip this one.
2877 {
2878 void *cls;
2879 void *hi;
2880 uint32_t count = objcProtocolOpt->getClassAndHeader(protocolName, cls, hi);
2881 if (count == 1) {
2882 // exactly one matching protocol. Check if its loaded
2883 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hi, pointerSize)) != sharedCacheImagesMap.end())
2884 return;
2885 }
2886 else if (count > 1) {
2887 // more than one matching protocol - find one that is loaded
2888 void *clslist[count];
2889 void *hilist[count];
2890 objcProtocolOpt->getClassesAndHeaders(protocolName, clslist, hilist);
2891 for (uint32_t i = 0; i < count; i++) {
2892 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize)) != sharedCacheImagesMap.end())
2893 return;
2894 }
2895 }
2896 }
2897
2898 // Get the section for the protocol itself
2899 __block uint64_t protocolSectionStartVMAddr = 0;
2900 auto protocolSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2901 // We only have 23-bits in ObjCClassImageOffset to index in to the protocols
2902 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2903 return false;
2904 }
2905
2906 // We use 32-bit offsets so make sure the section is no larger than that.
2907 uint64_t protocolDatasVMOffset = sectInfo.sectAddr - loadAddress;
2908 if (protocolDatasVMOffset >= (1ULL << 32)) {
2909 return false;
2910 }
2911
2912 protocolSectionStartVMAddr = sectInfo.sectAddr;
2913 return true;
2914 };
2915 if (!protocolSectionCache.findSectionForVMAddr(protocolVMAddr, protocolSectionHandler)) {
2916 diag.error("Invalid protocol for objc optimisation");
2917 return;
2918 }
2919
2920 // Make sure we have an entry for our images offsets for later
2921 uint64_t protocolNameSectionVMOffset = protocolNameSectionStartVMAddr - loadAddress;
2922 uint64_t protocolSectionVMOffset = protocolSectionStartVMAddr - loadAddress;
2923 uint64_t hashTableVMOffsetsIndex = 0;
2924 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2925 if ( (nameAndDataVMOffset.first == protocolNameSectionVMOffset) && (nameAndDataVMOffset.second == protocolSectionVMOffset) )
2926 break;
2927 ++hashTableVMOffsetsIndex;
2928 }
2929
2930 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2931 // Didn't find an image entry with this offset. Add one if we have space
2932 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2933 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2934 // No more space. We need to give up
2935 diag.error("No more space for protocol hash table image");
2936 return;
2937 }
2938 image.classesNameAndDataVMOffsets.push_back({ protocolNameSectionVMOffset, protocolSectionVMOffset });
2939 }
2940
2941 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2942
2943 uint64_t protocolNameOffset = protocolNameVMAddr - protocolNameSectionStartVMAddr;
2944 uint64_t protocolDataOffset = protocolVMAddr - protocolSectionStartVMAddr;
2945
2946 closure::Image::ObjCClassNameImageOffset protocolNameTarget;
2947 protocolNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2948 protocolNameTarget.classNameImageOffset = (uint32_t)protocolNameOffset;
2949
2950 dyld3::closure::Image::ObjCClassImageOffset protocolDataTarget;
2951 protocolDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2952 protocolDataTarget.classData.imageOffset = (uint32_t)protocolDataOffset;
2953 protocolDataTarget.classData.isDuplicate = 0;
2954
2955 seenProtocols.push_back({ protocolNameTarget, protocolDataTarget });
2956 });
2957 }
2958
2959 // used at launch by dyld when kernel has already mapped main executable
2960 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo& fileInfo, bool allowInsertFailures)
2961 {
2962 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
2963 const mach_header* mainMH = (const mach_header*)fileInfo.fileContent;
2964 // set up stack based storage for all arrays
2965 BuilderLoadedImage loadImagesStorage[512];
2966 Image::LinkedImage dependenciesStorage[512*8];
2967 InterposingTuple tuplesStorage[64];
2968 Closure::PatchEntry cachePatchStorage[64];
2969 const char* weakDefNameStorage[64];
2970 _loadedImages.setInitialStorage(loadImagesStorage, 512);
2971 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
2972 _interposingTuples.setInitialStorage(tuplesStorage, 64);
2973 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
2974 _weakDefsFromChainedBinds.setInitialStorage(weakDefNameStorage, 64);
2975 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
2976
2977 const MachOAnalyzer* mainExecutable = MachOAnalyzer::validMainExecutable(_diag, mainMH, fileInfo.path, fileInfo.sliceLen, _archs, _platform);
2978 if ( mainExecutable == nullptr )
2979 return nullptr;
2980 if ( !mainExecutable->isDynamicExecutable() ) {
2981 _diag.error("not a main executable");
2982 return nullptr;
2983 }
2984 _isLaunchClosure = true;
2985 _allowMissingLazies = true;
2986
2987 _nextIndex = 0;
2988
2989 // add main executable
2990 __block BuilderLoadedImage mainEntry;
2991 mainEntry.loadedFileInfo = fileInfo;
2992 mainEntry.imageNum = 0; // We can't fill this in until we've done inserted dylibs
2993 mainEntry.unmapWhenDone = false;
2994 mainEntry.contentRebased = false;
2995 mainEntry.hasInits = false;
2996 mainEntry.markNeverUnload = true;
2997 mainEntry.rtldLocal = false;
2998 mainEntry.isBadImage = false;
2999 mainEntry.mustBuildClosure = true;
3000 mainEntry.hasMissingWeakImports = false;
3001 mainEntry.overrideImageNum = 0;
3002
3003 // Set the executable load path so that @executable_path can use it later
3004 _mainProgLoadPath = fileInfo.path;
3005
3006 // add any DYLD_INSERT_LIBRARIES
3007 _pathOverrides.forEachInsertedDylib(^(const char* dylibPath, bool &stop) {
3008 LoadedImageChain chainMain = { nullptr, mainEntry };
3009 BuilderLoadedImage* foundTopImage;
3010 if ( !findImage(dylibPath, chainMain, foundTopImage, LinkageType::kInserted, 0, true) ) {
3011 if ( !allowInsertFailures ) {
3012 if ( _diag.noError() )
3013 _diag.error("could not load inserted dylib %s", dylibPath);
3014 stop = true;
3015 return;
3016 }
3017 _diag.clearError(); // FIXME add way to plumb back warning
3018 }
3019 });
3020
3021 if ( _diag.hasError() )
3022 return nullptr;
3023
3024 _mainProgLoadIndex = (uint32_t)_loadedImages.count();
3025 mainEntry.imageNum = _startImageNum + _nextIndex++;
3026 _loadedImages.push_back(mainEntry);
3027
3028 // get mach_headers for all images needed to launch this main executable
3029 LoadedImageChain chainStart = { nullptr, _loadedImages[_mainProgLoadIndex] };
3030 recursiveLoadDependents(chainStart);
3031 if ( _diag.hasError() )
3032 return nullptr;
3033 for (uint32_t i=0; i < _mainProgLoadIndex; ++i) {
3034 LoadedImageChain insertChainStart = { nullptr, _loadedImages[i] };
3035 recursiveLoadDependents(insertChainStart);
3036 if ( _diag.hasError() )
3037 return nullptr;
3038 }
3039 loadDanglingUpwardLinks();
3040
3041 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3042 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3043 invalidateInitializerRoots();
3044
3045 // now that everything loaded, set _libDyldImageNum and _libSystemImageNum
3046 for (BuilderLoadedImage& li : _loadedImages) {
3047 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
3048 _libDyldImageNum = li.imageNum;
3049 else if ( strcmp(li.path(), "/usr/lib/libSystem.B.dylib") == 0 )
3050 _libSystemImageNum = li.imageNum;
3051 }
3052
3053 // only some images need to go into closure (non-rooted ones from dyld cache do not)
3054 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3055 for (BuilderLoadedImage& li : _loadedImages) {
3056 if ( li.mustBuildClosure ) {
3057 writers.push_back(ImageWriter());
3058 buildImage(writers.back(), li);
3059 if ( _diag.hasError() )
3060 return nullptr;
3061 }
3062 }
3063
3064 bool optimizedObjC = optimizeObjC(writers);
3065
3066 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3067 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3068 BuilderLoadedImage& li = _loadedImages[imageIndex];
3069 if ( li.mustBuildClosure ) {
3070 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3071 writerIndex++;
3072 }
3073 }
3074
3075 // combine all Image objects into one ImageArray
3076 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3077 for (ImageWriter& writer : writers) {
3078 imageArrayWriter.appendImage(writer.finalize());
3079 writer.deallocate();
3080 }
3081 const ImageArray* imageArray = imageArrayWriter.finalize();
3082
3083 // merge ImageArray object into LaunchClosure object
3084 __block LaunchClosureWriter closureWriter(imageArray);
3085
3086 if (optimizedObjC) {
3087 if (!_objcSelectorsHashTable.empty())
3088 closureWriter.setObjCSelectorInfo(_objcSelectorsHashTable, _objcSelectorsHashTableImages);
3089
3090 if (!_objcClassesHashTableImages.empty()) {
3091 closureWriter.setObjCClassAndProtocolInfo(_objcClassesHashTable, _objcProtocolsHashTable,
3092 _objcClassesHashTableImages);
3093 }
3094
3095 if ( _objcDuplicateClassWarnings != nullptr ) {
3096 _objcDuplicateClassWarnings->forEachPath(^(const char* warning) {
3097 closureWriter.addWarning(Closure::Warning::duplicateObjCClass, warning);
3098 });
3099 }
3100
3101 if (!_objcClassesDuplicatesHashTable.empty())
3102 closureWriter.setObjCDuplicateClassesInfo(_objcClassesDuplicatesHashTable);
3103 }
3104
3105 // record shared cache info
3106 if ( _dyldCache != nullptr ) {
3107 // record cache UUID
3108 uuid_t cacheUUID;
3109 _dyldCache->getUUID(cacheUUID);
3110 closureWriter.setDyldCacheUUID(cacheUUID);
3111
3112 // record any cache patching needed because of dylib overriding cache
3113 for (const BuilderLoadedImage& li : _loadedImages) {
3114 if ( li.overrideImageNum != 0 ) {
3115 uint32_t imageIndex = li.overrideImageNum - (uint32_t)_dyldImageArray->startImageNum();
3116 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3117 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3118 return (const MachOLoaded*)findDependent(mh, depIndex);
3119 };
3120 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3121 _dyldCache->forEachPatchableExport(imageIndex, ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3122 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3123 Diagnostics patchDiag;
3124 Closure::PatchEntry patch;
3125 patch.overriddenDylibInCache = li.overrideImageNum;
3126 patch.exportCacheOffset = cacheOffsetOfImpl;
3127 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3128 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3129 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3130 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3131 patch.replacement.image.offset = foundInfo.value;
3132 }
3133 else {
3134 // this means the symbol is missing in the cache override dylib, so set any uses to NULL
3135 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3136 patch.replacement.absolute.value = 0;
3137 }
3138 patches.push_back(patch);
3139 });
3140 closureWriter.addCachePatches(patches);
3141 }
3142 }
3143
3144 // handle any extra weak-def coalescing needed by chained fixups
3145 if ( !_weakDefsFromChainedBinds.empty() ) {
3146 for (const char* symbolName : _weakDefsFromChainedBinds) {
3147 Image::ResolvedSymbolTarget cacheOverrideTarget;
3148 bool haveCacheOverride = false;
3149 bool foundCachOverrideIsWeakDef = false;
3150 for (const BuilderLoadedImage& li : _loadedImages) {
3151 if ( !li.loadAddress()->hasWeakDefs() )
3152 continue;
3153 Image::ResolvedSymbolTarget target;
3154 ResolvedTargetInfo targetInfo;
3155 if ( findSymbolInImage(li.loadAddress(), symbolName, 0, false, false, target, targetInfo) ) {
3156 if ( li.loadAddress()->inDyldCache() ) {
3157 if ( haveCacheOverride ) {
3158 Closure::PatchEntry patch;
3159 patch.exportCacheOffset = (uint32_t)target.sharedCache.offset;
3160 patch.overriddenDylibInCache = li.imageNum;
3161 patch.replacement = cacheOverrideTarget;
3162 _weakDefCacheOverrides.push_back(patch);
3163 }
3164 else {
3165 // found first in cached dylib, so no need to patch cache for this symbol
3166 break;
3167 }
3168 }
3169 else {
3170 // found image that exports this symbol and is not in cache
3171 if ( !haveCacheOverride || (foundCachOverrideIsWeakDef && !targetInfo.isWeakDef) ) {
3172 // update cache to use this symbol if it if first found or it is first non-weak found
3173 cacheOverrideTarget = target;
3174 foundCachOverrideIsWeakDef = targetInfo.isWeakDef;
3175 haveCacheOverride = true;
3176 }
3177 }
3178 }
3179 }
3180 }
3181 }
3182
3183 // record any cache patching needed because weak-def C++ symbols override dyld cache
3184 if ( !_weakDefCacheOverrides.empty() )
3185 closureWriter.addCachePatches(_weakDefCacheOverrides);
3186
3187 }
3188
3189 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3190 // if closure is built on-device for iOS, then record boot UUID
3191 char bootSessionUUID[256] = { 0 };
3192 size_t bootSize = sizeof(bootSessionUUID);
3193 if ( sysctlbyname("kern.bootsessionuuid", bootSessionUUID, &bootSize, NULL, 0) == 0 )
3194 closureWriter.setBootUUID(bootSessionUUID);
3195 #endif
3196
3197 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3198 uint32_t progVarsOffset;
3199 if ( mainExecutable->hasProgramVars(_diag, progVarsOffset) ) {
3200 // on macOS binaries may have a __dyld section that has ProgramVars to use
3201 closureWriter.setHasProgramVars(progVarsOffset);
3202 }
3203 if ( _diag.hasError() )
3204 return nullptr;
3205 #endif
3206
3207 // record any interposing info
3208 if ( !_interposingDisabled ) {
3209 imageArray->forEachImage(^(const Image* image, bool &stop) {
3210 if ( !image->inDyldCache() )
3211 addInterposingTuples(closureWriter, image, findLoadedImage(image->imageNum()).loadAddress());
3212 });
3213 }
3214
3215 // modify fixups in contained Images by applying interposing tuples
3216 closureWriter.applyInterposing((const LaunchClosure*)closureWriter.currentTypedBytes());
3217
3218 // set flags
3219 closureWriter.setUsedInterposing(_interposingTuplesUsed);
3220 closureWriter.setUsedAtPaths(_atPathUsed);
3221 closureWriter.setUsedFallbackPaths(_fallbackPathUsed);
3222 closureWriter.setHasInsertedLibraries(_mainProgLoadIndex > 0);
3223 closureWriter.setInitImageCount((uint32_t)_loadedImages.count());
3224
3225 // add other closure attributes
3226 addClosureInfo(closureWriter);
3227
3228 // make result
3229 const LaunchClosure* result = closureWriter.finalize();
3230 imageArrayWriter.deallocate();
3231
3232 timer.setData4(dyld3::DyldTimingBuildClosure::LaunchClosure_Built);
3233
3234 return result;
3235 }
3236
3237 // used by libdyld for dlopen()
3238 const DlopenClosure* ClosureBuilder::makeDlopenClosure(const char* path, const LaunchClosure* mainClosure, const Array<LoadedImage>& alreadyLoadedList,
3239 closure::ImageNum callerImageNum, bool noLoad, bool forceBindLazies, bool canUseSharedCacheClosure, closure::ImageNum* topImageNum)
3240 {
3241 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3242 // set up stack based storage for all arrays
3243 BuilderLoadedImage loadImagesStorage[300];
3244 Image::LinkedImage dependenciesStorage[128];
3245 Closure::PatchEntry cachePatchStorage[64];
3246 _loadedImages.setInitialStorage(loadImagesStorage, 300);
3247 _dependencies.setInitialStorage(dependenciesStorage, 128);
3248 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3249 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3250
3251 // fill in builder array from already loaded images
3252 bool cachedDylibsExpectedOnDisk = _dyldCache ? _dyldCache->header.dylibsExpectedOnDisk : true;
3253 uintptr_t callerImageIndex = UINTPTR_MAX;
3254 for (const LoadedImage& ali : alreadyLoadedList) {
3255 const Image* image = ali.image();
3256 const MachOAnalyzer* ma = (MachOAnalyzer*)(ali.loadedAddress());
3257 bool inDyldCache = ma->inDyldCache();
3258 BuilderLoadedImage entry;
3259 ImageNum overrideImageNum;
3260 entry.loadedFileInfo.path = image->path();
3261 entry.loadedFileInfo.fileContent = ma;
3262 entry.loadedFileInfo.sliceOffset = 0;
3263 entry.loadedFileInfo.inode = 0;
3264 entry.loadedFileInfo.mtime = 0;
3265 entry.imageNum = image->imageNum();
3266 entry.dependents = image->dependentsArray();
3267 entry.unmapWhenDone = false;
3268 entry.contentRebased = inDyldCache;
3269 entry.hasInits = false;
3270 entry.markNeverUnload = image->neverUnload();
3271 entry.rtldLocal = ali.hideFromFlatSearch();
3272 entry.isBadImage = false;
3273 entry.mustBuildClosure = false;
3274 entry.hasMissingWeakImports = false;
3275 entry.overrideImageNum = 0;
3276 if ( !inDyldCache && image->isOverrideOfDyldCacheImage(overrideImageNum) ) {
3277 entry.overrideImageNum = overrideImageNum;
3278 canUseSharedCacheClosure = false;
3279 }
3280 if ( !inDyldCache || cachedDylibsExpectedOnDisk )
3281 image->hasFileModTimeAndInode(entry.loadedFileInfo.inode, entry.loadedFileInfo.mtime);
3282 if ( entry.imageNum == callerImageNum )
3283 callerImageIndex = _loadedImages.count();
3284 _loadedImages.push_back(entry);
3285 }
3286 _alreadyInitedIndex = (uint32_t)_loadedImages.count();
3287
3288 // find main executable (may be needed for @executable_path)
3289 _isLaunchClosure = false;
3290 for (uint32_t i=0; i < alreadyLoadedList.count(); ++i) {
3291 if ( _loadedImages[i].loadAddress()->isMainExecutable() ) {
3292 _mainProgLoadIndex = i;
3293 _mainProgLoadPath = _loadedImages[i].path();
3294 break;
3295 }
3296 }
3297
3298 // We can't use an existing dlopen closure if the main closure had interposing tuples
3299 if (canUseSharedCacheClosure) {
3300 if (mainClosure->hasInterposings())
3301 canUseSharedCacheClosure = false;
3302 }
3303
3304 // add top level dylib being dlopen()ed
3305 BuilderLoadedImage* foundTopImage;
3306 _nextIndex = 0;
3307 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
3308 BuilderLoadedImage& callerImage = (callerImageIndex != UINTPTR_MAX) ? _loadedImages[callerImageIndex] : _loadedImages[_mainProgLoadIndex];
3309 LoadedImageChain chainCaller = { nullptr, callerImage };
3310 LoadedImageChain chainMain = { &chainCaller, _loadedImages[_mainProgLoadIndex] };
3311 if ( !findImage(path, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3312 // If we didn't find the image, it might be a symlink to something in the dyld cache that is not on disk
3313 if ( (_dyldCache != nullptr) && !_dyldCache->header.dylibsExpectedOnDisk ) {
3314 char resolvedPath[PATH_MAX];
3315 if ( _fileSystem.getRealPath(path, resolvedPath) ) {
3316 _diag.clearError();
3317 if ( !findImage(resolvedPath, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3318 return nullptr;
3319 }
3320 } else {
3321 // We didn't find a new path from realpath
3322 return nullptr;
3323 }
3324 } else {
3325 // cached dylibs on disk, so don't call realpath() again, it would have been found first call to findImage()
3326 return nullptr;
3327 }
3328 }
3329
3330 // exit early in RTLD_NOLOAD mode
3331 if ( noLoad ) {
3332 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_NoLoad);
3333 // if no new images added to _loadedImages, then requested path was already loaded
3334 if ( (uint32_t)_loadedImages.count() == _alreadyInitedIndex )
3335 *topImageNum = foundTopImage->imageNum;
3336 else
3337 *topImageNum = 0;
3338 return nullptr;
3339 }
3340
3341 // fast path if roots are not allowed and target is in dyld cache or is other
3342 if ( (_dyldCache != nullptr) && (_dyldCache->header.cacheType == kDyldSharedCacheTypeProduction) ) {
3343 if ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) {
3344 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3345 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3346 else
3347 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3348 *topImageNum = foundTopImage->imageNum;
3349 return nullptr;
3350 }
3351 }
3352
3353 // recursive load dependents
3354 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
3355 LoadedImageChain chainTopDylib = { &chainMain, *foundTopImage };
3356 recursiveLoadDependents(chainTopDylib, canUseSharedCacheClosure);
3357 if ( _diag.hasError() )
3358 return nullptr;
3359 loadDanglingUpwardLinks(canUseSharedCacheClosure);
3360 if ( _diag.hasError() )
3361 return nullptr;
3362
3363 // RTLD_NOW means fail the dlopen() if a symbol cannot be bound
3364 _allowMissingLazies = !forceBindLazies;
3365
3366 // only some images need to go into closure (ones from dyld cache do not, unless the cache format changed)
3367 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3368 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3369 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3370 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3371 invalidateInitializerRoots();
3372
3373 for (uintptr_t loadedImageIndex = 0; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
3374 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
3375 if ( li.mustBuildClosure ) {
3376 writers.push_back(ImageWriter());
3377 buildImage(writers.back(), li);
3378 if ( _diag.hasError() )
3379 return nullptr;
3380 }
3381 }
3382
3383 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3384 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3385 BuilderLoadedImage& li = _loadedImages[imageIndex];
3386 if ( li.mustBuildClosure ) {
3387 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3388 writerIndex++;
3389 }
3390 }
3391 }
3392 if ( _diag.hasError() )
3393 return nullptr;
3394
3395 // check if top image loaded is in shared cache along with everything it depends on
3396 *topImageNum = foundTopImage->imageNum;
3397 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3398 if ( canUseSharedCacheClosure && ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) ) {
3399 // We used a shared cache built closure, but now discovered roots. We need to try again
3400 topImageNum = 0;
3401 return sRetryDlopenClosure;
3402 }
3403 } else {
3404 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3405 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3406 else
3407 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3408 return nullptr;
3409 }
3410
3411 // combine all Image objects into one ImageArray
3412 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3413 for (ImageWriter& writer : writers) {
3414 imageArrayWriter.appendImage(writer.finalize());
3415 writer.deallocate();
3416 }
3417 const ImageArray* imageArray = imageArrayWriter.finalize();
3418
3419 // merge ImageArray object into LaunchClosure object
3420 DlopenClosureWriter closureWriter(imageArray);
3421
3422 // add other closure attributes
3423 closureWriter.setTopImageNum(foundTopImage->imageNum);
3424
3425 // record any cache patching needed because of dylib overriding cache
3426 if ( _dyldCache != nullptr ) {
3427 for (const BuilderLoadedImage& li : _loadedImages) {
3428 if ( (li.overrideImageNum != 0) && (li.imageNum >= _startImageNum) ) {
3429 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
3430 uint32_t imageIndex = cacheImage->imageNum() - (uint32_t)_dyldCache->cachedDylibsImageArray()->startImageNum();
3431 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3432 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3433 return (const MachOLoaded*)findDependent(mh, depIndex);
3434 };
3435 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3436 _dyldCache->forEachPatchableExport(imageIndex,
3437 ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3438 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3439 Diagnostics patchDiag;
3440 Closure::PatchEntry patch;
3441 patch.overriddenDylibInCache = li.overrideImageNum;
3442 patch.exportCacheOffset = cacheOffsetOfImpl;
3443 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3444 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3445 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3446 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3447 patch.replacement.image.offset = foundInfo.value;
3448 }
3449 else {
3450 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3451 patch.replacement.absolute.value = 0;
3452 }
3453 patches.push_back(patch);
3454 });
3455 closureWriter.addCachePatches(patches);
3456 }
3457 }
3458 }
3459
3460 // modify fixups in contained Images by applying interposing tuples
3461 closureWriter.applyInterposing(mainClosure);
3462
3463 // Dlopen's should never keep track of missing paths as we don't cache these closures.
3464 assert(_mustBeMissingPaths == nullptr);
3465
3466 // make final DlopenClosure object
3467 const DlopenClosure* result = closureWriter.finalize();
3468 imageArrayWriter.deallocate();
3469 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_Built);
3470 return result;
3471 }
3472
3473
3474 // used by dyld_closure_util
3475 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const char* mainPath, bool allowInsertFailures)
3476 {
3477 char realerPath[MAXPATHLEN];
3478 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, mainPath, _archs, _platform, realerPath);
3479 const MachOAnalyzer* mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
3480 loadedFileInfo.path = mainPath;
3481 if (_diag.hasError())
3482 return nullptr;
3483 if (mh == nullptr) {
3484 _diag.error("could not load file");
3485 return nullptr;
3486 }
3487 if (!mh->isDynamicExecutable()) {
3488 _diag.error("file is not an executable");
3489 return nullptr;
3490 }
3491 const_cast<PathOverrides*>(&_pathOverrides)->setMainExecutable(mh, mainPath);
3492 const LaunchClosure* launchClosure = makeLaunchClosure(loadedFileInfo, allowInsertFailures);
3493 loadedFileInfo.unload(loadedFileInfo);
3494 return launchClosure;
3495 }
3496
3497 void ClosureBuilder::setDyldCacheInvalidFormatVersion() {
3498 _dyldCacheInvalidFormatVersion = true;
3499 }
3500
3501
3502 // used by dyld shared cache builder
3503 const ImageArray* ClosureBuilder::makeDyldCacheImageArray(bool customerCache, const Array<CachedDylibInfo>& dylibs, const Array<CachedDylibAlias>& aliases)
3504 {
3505 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3506 // so set up storage for all arrays to be vm_allocated
3507 uintptr_t maxImageCount = dylibs.count() + 16;
3508 _loadedImages.reserve(maxImageCount);
3509 _dependencies.reserve(maxImageCount*16);
3510
3511 _makingDyldCacheImages = true;
3512 _allowMissingLazies = false;
3513 _makingCustomerCache = customerCache;
3514 _aliases = &aliases;
3515
3516 // build _loadedImages[] with every dylib in cache
3517 __block ImageNum imageNum = _startImageNum;
3518 for (const CachedDylibInfo& aDylibInfo : dylibs) {
3519 BuilderLoadedImage entry;
3520 entry.loadedFileInfo = aDylibInfo.fileInfo;
3521 entry.imageNum = imageNum++;
3522 entry.unmapWhenDone = false;
3523 entry.contentRebased = false;
3524 entry.hasInits = false;
3525 entry.markNeverUnload = true;
3526 entry.rtldLocal = false;
3527 entry.isBadImage = false;
3528 entry.mustBuildClosure = false;
3529 entry.hasMissingWeakImports = false;
3530 entry.overrideImageNum = 0;
3531 _loadedImages.push_back(entry);
3532 }
3533
3534 // wire up dependencies between cached dylibs
3535 for (BuilderLoadedImage& li : _loadedImages) {
3536 LoadedImageChain chainStart = { nullptr, li };
3537 recursiveLoadDependents(chainStart);
3538 if ( _diag.hasError() )
3539 break;
3540 }
3541 assert(_loadedImages.count() == dylibs.count());
3542
3543 // create an ImageWriter for each cached dylib
3544 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3545 for (BuilderLoadedImage& li : _loadedImages) {
3546 writers.push_back(ImageWriter());
3547 buildImage(writers.back(), li);
3548 }
3549
3550 // add initializer order into each dylib
3551 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3552 for (const BuilderLoadedImage& li : _loadedImages) {
3553 uint32_t index = li.imageNum - _startImageNum;
3554 computeInitOrder(writers[index], index);
3555 }
3556
3557 // combine all Image objects into one ImageArray
3558 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3559 for (ImageWriter& writer : writers) {
3560 imageArrayWriter.appendImage(writer.finalize());
3561 writer.deallocate();
3562 }
3563 const ImageArray* imageArray = imageArrayWriter.finalize();
3564
3565 return imageArray;
3566 }
3567
3568
3569 #if BUILDING_CACHE_BUILDER
3570 const ImageArray* ClosureBuilder::makeOtherDylibsImageArray(const Array<LoadedFileInfo>& otherDylibs, uint32_t cachedDylibsCount)
3571 {
3572 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3573 // so set up storage for all arrays to be vm_allocated
3574 uintptr_t maxImageCount = otherDylibs.count() + cachedDylibsCount + 128;
3575 _loadedImages.reserve(maxImageCount);
3576 _dependencies.reserve(maxImageCount*16);
3577
3578 // build _loadedImages[] with every dylib in cache, followed by others
3579 _nextIndex = 0;
3580 for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
3581 BuilderLoadedImage entry;
3582 entry.loadedFileInfo = aDylibInfo;
3583 entry.imageNum = _startImageNum + _nextIndex++;
3584 entry.unmapWhenDone = false;
3585 entry.contentRebased = false;
3586 entry.hasInits = false;
3587 entry.markNeverUnload = false;
3588 entry.rtldLocal = false;
3589 entry.isBadImage = false;
3590 entry.mustBuildClosure = false;
3591 entry.hasMissingWeakImports = false;
3592 entry.overrideImageNum = 0;
3593 _loadedImages.push_back(entry);
3594 }
3595
3596 // wire up dependencies between cached dylibs
3597 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
3598 // to check the count on each iteration.
3599 for (uint64_t index = 0; index != _loadedImages.count(); ++index) {
3600 BuilderLoadedImage& li = _loadedImages[index];
3601 LoadedImageChain chainStart = { nullptr, li };
3602 recursiveLoadDependents(chainStart);
3603 if ( _diag.hasError() ) {
3604 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3605 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3606 _diag.clearError();
3607 li.isBadImage = true; // mark bad
3608 }
3609 }
3610
3611 auto invalidateBadImages = [&]() {
3612 // Invalidate images with bad dependencies
3613 while (true) {
3614 bool madeChange = false;
3615 for (BuilderLoadedImage& li : _loadedImages) {
3616 if (li.isBadImage) {
3617 // Already invalidated
3618 continue;
3619 }
3620 for (Image::LinkedImage depIndex : li.dependents) {
3621 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
3622 continue;
3623 if ( depIndex.imageNum() >= dyld3::closure::kLastDyldCacheImageNum ) {
3624 // dlopen closures can only depend on the shared cache. This is because if foo.dylib links bar.dylib
3625 // and bar.dylib is loaded in to the launch closure, then the dlopen closure for foo.dylib wouldn't see
3626 // bar.dylib at the image num in the launch closure
3627 _diag.warning("while building dlopen closure for %s: dependent dylib is not from shared cache", li.loadedFileInfo.path);
3628 li.isBadImage = true; // mark bad
3629 madeChange = true;
3630 continue;
3631 }
3632 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
3633 if (depImage.isBadImage) {
3634 _diag.warning("while building dlopen closure for %s: dependent dylib had error", li.loadedFileInfo.path);
3635 li.isBadImage = true; // mark bad
3636 madeChange = true;
3637 }
3638 }
3639 }
3640 if (!madeChange)
3641 break;
3642 }
3643 };
3644
3645 invalidateBadImages();
3646
3647 // create an ImageWriter for each cached dylib
3648 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3649 for (BuilderLoadedImage& li : _loadedImages) {
3650 if ( li.isBadImage ) {
3651 writers.push_back(ImageWriter());
3652 writers.back().setInvalid();
3653 continue;
3654 }
3655 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3656 continue;
3657 writers.push_back(ImageWriter());
3658 buildImage(writers.back(), li);
3659 if ( _diag.hasError() ) {
3660 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3661 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3662 _diag.clearError();
3663 li.isBadImage = true; // mark bad
3664 writers.back().setInvalid();
3665 }
3666 }
3667
3668 invalidateBadImages();
3669
3670 // add initializer order into each dylib
3671 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3672 for (const BuilderLoadedImage& li : _loadedImages) {
3673 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3674 continue;
3675 if (li.isBadImage)
3676 continue;
3677 uint32_t index = li.imageNum - _startImageNum;
3678 computeInitOrder(writers[index], index);
3679 }
3680
3681 // combine all Image objects into one ImageArray
3682 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3683 for (ImageWriter& writer : writers) {
3684 imageArrayWriter.appendImage(writer.finalize());
3685 writer.deallocate();
3686 }
3687 const ImageArray* imageArray = imageArrayWriter.finalize();
3688
3689 return imageArray;
3690 }
3691 #endif
3692
3693
3694 bool ClosureBuilder::inLoadedImageArray(const Array<LoadedImage>& loadedList, ImageNum imageNum)
3695 {
3696 for (const LoadedImage& ali : loadedList) {
3697 if ( ali.image()->representsImageNum(imageNum) )
3698 return true;
3699 }
3700 return false;
3701 }
3702
3703 void ClosureBuilder::buildLoadOrderRecurse(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Image* image)
3704 {
3705 // breadth first load
3706 STACK_ALLOC_ARRAY(const Image*, needToRecurse, 256);
3707 image->forEachDependentImage(^(uint32_t dependentIndex, dyld3::closure::Image::LinkKind kind, ImageNum depImageNum, bool &stop) {
3708 if ( !inLoadedImageArray(loadedList, depImageNum) ) {
3709 const Image* depImage = ImageArray::findImage(imagesArrays, depImageNum);
3710 loadedList.push_back(LoadedImage::make(depImage));
3711 needToRecurse.push_back(depImage);
3712 }
3713 });
3714
3715 // recurse load
3716 for (const Image* img : needToRecurse) {
3717 buildLoadOrderRecurse(loadedList, imagesArrays, img);
3718 }
3719 }
3720
3721 void ClosureBuilder::buildLoadOrder(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Closure* toAdd)
3722 {
3723 const dyld3::closure::Image* topImage = ImageArray::findImage(imagesArrays, toAdd->topImage());
3724 loadedList.push_back(LoadedImage::make(topImage));
3725 buildLoadOrderRecurse(loadedList, imagesArrays, topImage);
3726 }
3727
3728
3729
3730 //////////////////////////// ObjCStringTable ////////////////////////////////////////
3731
3732 template<typename PerfectHashT, typename ImageOffsetT>
3733 void ObjCStringTable::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings)
3734 {
3735 ObjCSelectorOpt::StringTarget sentinel = (ObjCSelectorOpt::StringTarget)ImageOffsetT::sentinelValue;
3736 // Set header
3737 capacity = phash.capacity;
3738 occupied = phash.occupied;
3739 shift = phash.shift;
3740 mask = phash.mask;
3741 sentinelTarget = sentinel;
3742 roundedTabSize = std::max(phash.mask+1, 4U);
3743 salt = phash.salt;
3744
3745 // Set hash data
3746 for (uint32_t i = 0; i < 256; i++) {
3747 scramble[i] = phash.scramble[i];
3748 }
3749 for (uint32_t i = 0; i < phash.mask+1; i++) {
3750 tab[i] = phash.tab[i];
3751 }
3752
3753 dyld3::Array<StringTarget> targetsArray = targets();
3754 dyld3::Array<StringHashCheckByte> checkBytesArray = checkBytes();
3755
3756 // Set offsets to the sentinel
3757 for (uint32_t i = 0; i < phash.capacity; i++) {
3758 targetsArray[i] = sentinel;
3759 }
3760 // Set checkbytes to 0
3761 for (uint32_t i = 0; i < phash.capacity; i++) {
3762 checkBytesArray[i] = 0;
3763 }
3764
3765 // Set real string offsets and checkbytes
3766 for (const auto& s : strings) {
3767 assert(s.second.raw != sentinelTarget);
3768 uint32_t h = hash(s.first);
3769 targetsArray[h] = s.second.raw;
3770 checkBytesArray[h] = checkbyte(s.first);
3771 }
3772 }
3773
3774 //////////////////////////// ObjCClassOpt ////////////////////////////////////////
3775
3776
3777 template<typename PerfectHashT, typename ImageOffsetT, typename ClassesMapT>
3778 void ObjCClassOpt::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings,
3779 const ClassesMapT& classes, uint32_t preCalculatedDuplicateCount)
3780 {
3781 ObjCStringTable::write(phash, strings);
3782
3783 __block dyld3::Array<ClassTarget> classOffsetsArray = classOffsets();
3784 __block dyld3::Array<ClassTarget> duplicateOffsetsArray = duplicateOffsets(preCalculatedDuplicateCount);
3785
3786 // Set class offsets to 0
3787 for (uint32_t i = 0; i < capacity; i++) {
3788 classOffsetsArray[i].raw = dyld3::closure::Image::ObjCImageOffset::sentinelValue;
3789 }
3790
3791 classes.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values, uint64_t valuesCount) {
3792 uint32_t keyIndex = getIndex(key);
3793 assert(keyIndex != indexNotFound);
3794 assert(classOffsetsArray[keyIndex].raw == dyld3::closure::Image::ObjCImageOffset::sentinelValue);
3795
3796 if (valuesCount == 1) {
3797 // Only one entry so write it in to the class offsets directly
3798 Image::ObjCClassImageOffset classImageOffset = *(values[0]);
3799 assert(classImageOffset.classData.isDuplicate == 0);
3800 classOffsetsArray[keyIndex] = classImageOffset;
3801 return;
3802 }
3803
3804 // We have more than one value. We add a placeholder to the class offsets which tells us the head
3805 // of the linked list of classes in the duplicates array
3806 uint32_t dest = duplicateCount();
3807 duplicateCount() += valuesCount;
3808
3809 Image::ObjCClassImageOffset classImagePlaceholder;
3810 assert(valuesCount < (1 << 8));
3811 classImagePlaceholder.duplicateData.count = (uint32_t)valuesCount;
3812 classImagePlaceholder.duplicateData.index = dest;
3813 classImagePlaceholder.duplicateData.isDuplicate = 1;
3814 classOffsetsArray[keyIndex] = classImagePlaceholder;
3815
3816 for (uint64_t i = 0; i != valuesCount; ++i) {
3817 Image::ObjCClassImageOffset classImageOffset = *(values[i]);
3818 assert(classImageOffset.classData.isDuplicate == 0);
3819 duplicateOffsetsArray.push_back(classImageOffset);
3820 }
3821 });
3822 }
3823
3824 } // namespace closure
3825 } // namespace dyld3