]> git.saurik.com Git - apple/dyld.git/blob - dyld3/ClosureBuilder.cpp
dyld-733.8.tar.gz
[apple/dyld.git] / dyld3 / ClosureBuilder.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/errno.h>
27 #include <sys/mman.h>
28 #include <sys/mman.h>
29 #include <sys/param.h>
30 #include <ext/__hash>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
36
37 #include <mach-o/dyld_priv.h>
38
39 #include "ClosureWriter.h"
40 #include "ClosureBuilder.h"
41 #include "MachOAnalyzer.h"
42 #include "libdyldEntryVector.h"
43 #include "Tracing.h"
44
45 #define CLOSURE_SELOPT_WRITE
46 #include "objc-shared-cache.h"
47
48 namespace dyld3 {
49 namespace closure {
50
51
52 const DlopenClosure* ClosureBuilder::sRetryDlopenClosure = (const DlopenClosure*)(-1);
53
54 ClosureBuilder::ClosureBuilder(uint32_t startImageNum, const FileSystem& fileSystem, const DyldSharedCache* dyldCache, bool dyldCacheIsLive,
55 const GradedArchs& archs, const PathOverrides& pathOverrides, AtPath atPathHandling, bool allowRelativePaths,
56 LaunchErrorInfo* errorInfo, Platform platform, const CacheDylibsBindingHandlers* handlers)
57 : _fileSystem(fileSystem), _dyldCache(dyldCache), _pathOverrides(pathOverrides), _archs(archs), _platform(platform), _startImageNum(startImageNum),
58 _handlers(handlers), _atPathHandling(atPathHandling), _launchErrorInfo(errorInfo), _dyldCacheIsLive(dyldCacheIsLive), _allowRelativePaths(allowRelativePaths)
59 {
60 if ( dyldCache != nullptr ) {
61 _dyldImageArray = dyldCache->cachedDylibsImageArray();
62 if ( (dyldCache->header.otherImageArrayAddr != 0) && (dyldCache->header.progClosuresSize == 0) )
63 _makingClosuresInCache = true;
64 }
65 }
66
67
68 ClosureBuilder::~ClosureBuilder() {
69 if ( _tempPaths != nullptr )
70 PathPool::deallocate(_tempPaths);
71 if ( _mustBeMissingPaths != nullptr )
72 PathPool::deallocate(_mustBeMissingPaths);
73 if ( _objcDuplicateClassWarnings != nullptr )
74 PathPool::deallocate(_objcDuplicateClassWarnings);
75 }
76
77 bool ClosureBuilder::findImage(const char* loadPath, const LoadedImageChain& forImageChain, BuilderLoadedImage*& foundImage, LinkageType linkageType,
78 uint32_t compatVersion, bool canUseSharedCacheClosure)
79 {
80 // There shouldn't be an error here as the callers should stop trying to find more images if they get an error for an image
81 _diag.assertNoError();
82
83 __block bool result = false;
84
85 // record if this is a non-overridable path
86 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
87 bool dylibsExpectedOnDisk = true;
88 if ( _dyldCache != nullptr ) {
89 pathIsInDyldCacheWhichCannotBeOverridden = _dyldCache->hasNonOverridablePath(loadPath);
90 dylibsExpectedOnDisk = _dyldCache->header.dylibsExpectedOnDisk;
91 }
92
93 _pathOverrides.forEachPathVariant(loadPath, pathIsInDyldCacheWhichCannotBeOverridden, ^(const char* possibleVariantPath, bool isFallbackPath, bool& stopPathVariant) {
94
95 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
96 bool isRPath = (strncmp(possibleVariantPath, "@rpath/", 7) == 0);
97
98 // passing a leaf name to dlopen() allows rpath searching for it
99 // FIXME: Does this apply to DYLD_INSERT_LIBRARIES too?
100 bool implictRPath = (linkageType == LinkageType::kDynamic) && (loadPath[0] != '/') && (loadPath == possibleVariantPath) && (_atPathHandling != AtPath::none);
101
102 // expand @ paths
103 forEachResolvedPathVar(possibleVariantPath, forImageChain, implictRPath, linkageType,
104 ^(const char* possiblePath, bool& stop) {
105 if ( possibleVariantPath != possiblePath )
106 _atPathUsed = true;
107
108 // look at already loaded images
109 const char* leafName = strrchr(possiblePath, '/');
110 for (BuilderLoadedImage& li: _loadedImages) {
111 if ( strcmp(li.path(), possiblePath) == 0 ) {
112 foundImage = &li;
113 result = true;
114 stop = true;
115 return;
116 }
117 else if ( isRPath ) {
118 // Special case @rpath/ because name in li.fileInfo.path is full path.
119 // Getting installName is expensive, so first see if an already loaded image
120 // has same leaf name and if so see if its installName matches request @rpath
121 if (const char* aLeaf = strrchr(li.path(), '/')) {
122 if ( strcmp(aLeaf, leafName) == 0 ) {
123 if ( li.loadAddress()->isDylib() && (strcmp(loadPath, li.loadAddress()->installName()) == 0) ) {
124 foundImage = &li;
125 result = true;
126 stop = true;
127 return;
128 }
129 }
130 }
131 }
132 }
133
134 // look to see if image already loaded via a different symlink
135 bool fileFound = false;
136 uint64_t fileFoundINode = 0;
137 uint64_t fileFoundMTime = 0;
138 bool inodesMatchRuntime = false;
139 // Note, we only do this check if we even expect to find this on-disk
140 // We can also use the pathIsInDyldCacheWhichCannotBeOverridden result if we are still trying the same path
141 // it was computed from
142 if ( dylibsExpectedOnDisk || !pathIsInDyldCacheWhichCannotBeOverridden || (loadPath != possiblePath) ) {
143 if ( _fileSystem.fileExists(possiblePath, &fileFoundINode, &fileFoundMTime, nullptr, &inodesMatchRuntime) ) {
144 fileFound = true;
145 for (BuilderLoadedImage& li: _loadedImages) {
146 if ( (li.loadedFileInfo.inode == fileFoundINode) && (li.loadedFileInfo.mtime == fileFoundMTime) ) {
147 foundImage = &li;
148 result = true;
149 stop = true;
150 return;
151 }
152 }
153 }
154 }
155
156 bool unmapWhenDone = false;
157 bool contentRebased = false;
158 bool hasInits = false;
159 bool markNeverUnload = false;
160 bool mustBuildClosure = _dyldCacheInvalidFormatVersion;
161 ImageNum overrideImageNum = 0;
162 ImageNum foundImageNum = 0;
163 const MachOAnalyzer* mh = nullptr;
164 const char* filePath = nullptr;
165 LoadedFileInfo loadedFileInfo;
166
167 // look in dyld cache
168 filePath = possiblePath;
169 char realPath[MAXPATHLEN];
170 if ( _dyldImageArray != nullptr ) {
171 uint32_t dyldCacheImageIndex;
172 bool foundInCache = _dyldCache->hasImagePath(possiblePath, dyldCacheImageIndex);
173 if ( !foundInCache && fileFound ) {
174 // see if this is an OS dylib/bundle with a pre-built dlopen closure
175 // We can only use the pre-built closure if we are dynamic linkage (a dlopen) and
176 // there are no roots
177 if ( canUseSharedCacheClosure && (linkageType == LinkageType::kDynamic) ) {
178 if (const dyld3::closure::Image* otherImage = _dyldCache->findDlopenOtherImage(possiblePath) ) {
179 uint64_t expectedInode;
180 uint64_t expectedModTime;
181 if ( !otherImage->isInvalid() ) {
182 bool hasInodeInfo = otherImage->hasFileModTimeAndInode(expectedInode, expectedModTime);
183 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
184 if ( !hasInodeInfo || ((expectedInode == fileFoundINode) && (expectedModTime == fileFoundMTime)) ) {
185 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, possiblePath, _archs, _platform, realPath);
186 if ( _diag.noError() ) {
187 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
188 foundImageNum = otherImage->imageNum();
189 unmapWhenDone = true;
190 contentRebased = false;
191 hasInits = otherImage->hasInitializers() || otherImage->mayHavePlusLoads();
192 // Use the realpath in the case where we loaded a symlink
193 // The closure must have recordered an alias path
194 if (realPath[0] != '\0')
195 filePath = realPath;
196 }
197 }
198 }
199 }
200 }
201 // if not found in cache, may be a symlink to something in cache
202 if ( mh == nullptr ) {
203 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
204 foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
205 if ( foundInCache ) {
206 filePath = realPath;
207 #if BUILDING_LIBDYLD
208 // handle case where OS dylib was updated after this process launched
209 if ( foundInCache ) {
210 for (BuilderLoadedImage& li: _loadedImages) {
211 if ( strcmp(li.path(), realPath) == 0 ) {
212 foundImage = &li;
213 result = true;
214 stop = true;
215 return;
216 }
217 }
218 }
219 #endif
220 }
221 }
222 }
223 }
224
225 // if using a cached dylib, look to see if there is an override
226 if ( foundInCache ) {
227 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
228 bool useCache = true;
229 markNeverUnload = true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
230 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
231 if ( image->overridableDylib() ) {
232 if ( fileFound ) {
233 uint64_t expectedInode;
234 uint64_t expectedModTime;
235 if ( image->hasFileModTimeAndInode(expectedInode, expectedModTime) ) {
236 // macOS where dylibs remain on disk. only use cache if mtime and inode have not changed
237 useCache = ( (fileFoundINode == expectedInode) && (fileFoundMTime == expectedModTime) );
238 }
239 else if ( _makingClosuresInCache ) {
240 // during iOS cache build, don't look at files on disk, use ones in cache
241 useCache = true;
242 }
243 else {
244 // iOS internal build. Any disk on cache overrides cache
245 useCache = false;
246 }
247 }
248 if ( !useCache ) {
249 overrideImageNum = dyldCacheImageNum;
250 _foundDyldCacheRoots = true;
251 }
252 }
253 if ( useCache ) {
254 foundImageNum = dyldCacheImageNum;
255 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(foundImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
256 unmapWhenDone = false;
257 // if we are building ImageArray in dyld cache, content is not rebased
258 contentRebased = !_makingDyldCacheImages && _dyldCacheIsLive;
259 hasInits = image->hasInitializers() || image->mayHavePlusLoads();
260 // If the cache format is different from dyld/libdyld then we can't use this closure.
261 if ( (_dyldCache->header.formatVersion != dyld3::closure::kFormatVersion) || !canUseSharedCacheClosure ) {
262 mustBuildClosure = true;
263 _foundDyldCacheRoots = true;
264 }
265 }
266 }
267 }
268
269 // If we are building the cache, and don't find an image, then it might be weak so just return
270 if (_makingDyldCacheImages) {
271 addMustBeMissingPath(possiblePath);
272 return;
273 }
274
275 // if not found yet, mmap file
276 if ( mh == nullptr ) {
277 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, filePath, _archs, _platform, realPath);
278 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
279 if ( mh == nullptr ) {
280 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
281 if (_isLaunchClosure) {
282 // If we found the file then we want to skip it as its not a valid macho for this platform/arch
283 // We can't record skipped file mtime/inode for caches built on a different machine that it runs on.
284 // In that case, we expect the file to be mastered out, as otherwise we couldn't detect if its
285 // changed or not on the device side
286 if (fileFound && inodesMatchRuntime) {
287 addSkippedFile(possiblePath, fileFoundINode, fileFoundMTime);
288 } else {
289 addMustBeMissingPath(possiblePath);
290 }
291 }
292 return;
293 }
294 if ( linkageType != LinkageType::kDynamic ) {
295 // LC_LOAD_DYLIB can only link with dylibs, and DYLD_INSERT_LIBRARIES can only be dylibs
296 if ( !mh->isDylib() ) {
297 _diag.error("found '%s' which is not a dylib. Needed by '%s'", filePath, forImageChain.image.path());
298 return;
299 }
300 // verify this is compatable dylib version
301 const char* installName;
302 uint32_t foundCompatVers;
303 uint32_t foundCurrentVers;
304 mh->getDylibInstallName(&installName, &foundCompatVers, &foundCurrentVers);
305 if ( (foundCompatVers < compatVersion) && mh->enforceCompatVersion() ) {
306 char foundStr[32];
307 char requiredStr[32];
308 MachOFile::packedVersionToString(foundCompatVers, foundStr);
309 MachOFile::packedVersionToString(compatVersion, requiredStr);
310 _diag.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
311 filePath, foundStr, requiredStr, forImageChain.image.path());
312 return;
313 }
314 }
315 else if ( mh->isMainExecutable() ) {
316 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
317 if ( !mh->isPIE() || !mh->isDynamicExecutable() ) {
318 _diag.error("not PIE");
319 return;
320 }
321 }
322 // Use the realpath in the case where we loaded a symlink
323 // The closure must have recordered an alias path
324 if (realPath[0] != '\0')
325 filePath = realPath;
326 foundImageNum = _startImageNum + _nextIndex++;
327 _foundNonCachedImage = true;
328 mustBuildClosure = true;
329 unmapWhenDone = true;
330 } else {
331 loadedFileInfo.fileContent = mh;
332 }
333
334 // if path is not original path, or its an inserted path (as forEachInColonList uses a stack temporary)
335 if ( (filePath != loadPath) || (linkageType == LinkageType::kInserted) ) {
336 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
337 filePath = strdup_temp(filePath);
338 // check if this overrides what would have been found in cache
339 // This is the case where we didn't find the image with the path in the shared cache, perhaps as it used library paths
340 // but the path we requested had pointed in to the cache
341 // FIXME: What if load path is via an @rpath and we will override the cache?
342 if ( overrideImageNum == 0 ) {
343 if ( _dyldImageArray != nullptr ) {
344 uint32_t dyldCacheImageIndex;
345 if ( _dyldCache->hasImagePath(loadPath, dyldCacheImageIndex) ) {
346 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
347 if ( possibleOverrideNum != foundImageNum )
348 overrideImageNum = possibleOverrideNum;
349 }
350 }
351 }
352 }
353
354 if ( !markNeverUnload ) {
355 switch (linkageType) {
356 case LinkageType::kStatic:
357 // Static linkages can only be unloaded if the image loading us can be unloaded
358 markNeverUnload = forImageChain.image.markNeverUnload;
359 break;
360 case LinkageType::kDynamic:
361 markNeverUnload = false;
362 break;
363 case LinkageType::kInserted:
364 // Inserted libraries must never be unloaded
365 markNeverUnload = true;
366 break;
367 };
368 }
369
370 if ( !markNeverUnload ) {
371 // If the parent didn't force us to be never unload, other conditions still may
372 if ( mh->hasThreadLocalVariables() ) {
373 markNeverUnload = true;
374 } else if ( mh->hasObjC() && mh->isDylib() ) {
375 markNeverUnload = true;
376 } else {
377 // record if image has DOF sections
378 __block bool hasDOFs = false;
379 mh->forEachDOFSection(_diag, ^(uint32_t offset) {
380 hasDOFs = true;
381 });
382 if ( hasDOFs )
383 markNeverUnload = true;
384 }
385 }
386
387 // Set the path again just in case it was strdup'ed.
388 loadedFileInfo.path = filePath;
389
390 // add new entry
391 BuilderLoadedImage entry;
392 entry.loadedFileInfo = loadedFileInfo;
393 entry.imageNum = foundImageNum;
394 entry.unmapWhenDone = unmapWhenDone;
395 entry.contentRebased = contentRebased;
396 entry.hasInits = hasInits;
397 entry.markNeverUnload = markNeverUnload;
398 entry.rtldLocal = false;
399 entry.isBadImage = false;
400 entry.mustBuildClosure = mustBuildClosure;
401 entry.hasMissingWeakImports = false;
402 entry.overrideImageNum = overrideImageNum;
403 _loadedImages.push_back(entry);
404 foundImage = &_loadedImages.back();
405 if ( isFallbackPath )
406 _fallbackPathUsed = true;
407 stop = true;
408 result = true;
409 });
410 if (result)
411 stopPathVariant = true;
412 }, _platform);
413
414 // If we found a file, but also had an error, then we must have logged a diagnostic for a file we couldn't use.
415 // Clear that for now.
416 // FIXME: Surface this to the user in case they wanted to see the error
417 if (result && _diag.hasError())
418 _diag.clearError();
419
420 return result;
421 }
422
423 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath, bool fromLCRPATH, const BuilderLoadedImage& loadedImage, char fixedPath[])
424 {
425 switch ( _atPathHandling ) {
426 case AtPath::none:
427 return false;
428 case AtPath::onlyInRPaths:
429 if ( !fromLCRPATH ) {
430 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
431 if ( _isLaunchClosure )
432 return false;
433 }
434 break;
435 case AtPath::all:
436 break;
437 }
438 if ( strncmp(loadPath, "@loader_path/", 13) != 0 )
439 return false;
440
441 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
442 char* lastSlash = strrchr(fixedPath, '/');
443 if ( lastSlash != nullptr ) {
444 strcpy(lastSlash+1, &loadPath[13]);
445 return true;
446 }
447 return false;
448 }
449
450 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath, bool fromLCRPATH, char fixedPath[])
451 {
452 switch ( _atPathHandling ) {
453 case AtPath::none:
454 return false;
455 case AtPath::onlyInRPaths:
456 if ( !fromLCRPATH )
457 return false;
458 break;
459 case AtPath::all:
460 break;
461 }
462 if ( strncmp(loadPath, "@executable_path/", 17) != 0 )
463 return false;
464
465 if ( _atPathHandling != AtPath::all )
466 return false;
467
468 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
469 char* lastSlash = strrchr(fixedPath, '/');
470 if ( lastSlash != nullptr ) {
471 strcpy(lastSlash+1, &loadPath[17]);
472 return true;
473 }
474 return false;
475 }
476
477 void ClosureBuilder::forEachResolvedPathVar(const char* loadPath, const LoadedImageChain& forImageChain,
478 bool implictRPath, LinkageType linkageType,
479 void (^handler)(const char* possiblePath, bool& stop))
480 {
481 // don't expand @loader_path or @executable_path if disallowed
482 if ( (_atPathHandling == AtPath::none) && (loadPath[0] == '@') && (loadPath[1] != 'r') ) {
483 bool stop = false;
484 handler(loadPath, stop);
485 return;
486 }
487
488 // quick out if not @ path or not implicit rpath
489 if ( !implictRPath && (loadPath[0] != '@') ) {
490 bool stop = false;
491 handler(loadPath, stop);
492 return;
493 }
494
495 // expand @loader_path
496 // Note this isn't supported for DYLD_INSERT_LIBRARIES
497 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath, PATH_MAX); // read as: char tempPath[PATH_MAX];
498 if ( (linkageType != LinkageType::kInserted) && expandAtLoaderPath(loadPath, false, forImageChain.image, tempPath) ) {
499 bool stop = false;
500 handler(tempPath, stop);
501 return;
502 }
503
504 // expand @executable_path
505 // Note this is supported for DYLD_INSERT_LIBRARIES
506 if ( expandAtExecutablePath(loadPath, false, tempPath) ) {
507 bool stop = false;
508 handler(tempPath, stop);
509 return;
510 }
511
512 // expand @rpath
513 // Note this isn't supported for DYLD_INSERT_LIBRARIES
514 const char* rpathTail = nullptr;
515 char implicitRpathBuffer[PATH_MAX];
516 if ( linkageType != LinkageType::kInserted ) {
517 if ( strncmp(loadPath, "@rpath/", 7) == 0 ) {
518 // note: rpathTail starts with '/'
519 rpathTail = &loadPath[6];
520 }
521 else if ( implictRPath ) {
522 // make rpathTail starts with '/'
523 strlcpy(implicitRpathBuffer, "/", PATH_MAX);
524 strlcat(implicitRpathBuffer, loadPath, PATH_MAX);
525 rpathTail = implicitRpathBuffer;
526 }
527 }
528 if ( rpathTail != nullptr ) {
529 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
530 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
531 __block bool done = false;
532 for (const LoadedImageChain* link = &forImageChain; (link != nullptr) && !done; link = link->previous) {
533 link->image.loadAddress()->forEachRPath(^(const char* rPath, bool& stop) {
534 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.loadedFileInfo.path);
535 if ( expandAtLoaderPath(rPath, true, link->image, tempPath) || expandAtExecutablePath(rPath, true, tempPath) ) {
536 // @loader_path allowed and expended
537 strlcat(tempPath, rpathTail, PATH_MAX);
538 handler(tempPath, stop);
539 }
540 else if ( rPath[0] == '/' ) {
541 // LC_RPATH is an absolute path, not blocked by AtPath::none
542 strlcpy(tempPath, rPath, PATH_MAX);
543 strlcat(tempPath, rpathTail, PATH_MAX);
544 handler(tempPath, stop);
545 }
546 if (stop)
547 done = true;
548 #if 0
549 if ( _fileSystem.fileExists(tempPath) ) {
550 stop = true;
551 result = strdup_temp(tempPath);
552 }
553 else {
554 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
555 if (_isLaunchClosure) {
556 addMustBeMissingPath(tempPath);
557 }
558 }
559 #endif
560 });
561 }
562 if (done)
563 return;
564 }
565
566 bool stop = false;
567 handler(loadPath, stop);
568 }
569
570 const char* ClosureBuilder::strdup_temp(const char* path)
571 {
572 if ( _tempPaths == nullptr )
573 _tempPaths = PathPool::allocate();
574 return _tempPaths->add(path);
575 }
576
577 void ClosureBuilder::addMustBeMissingPath(const char* path)
578 {
579 //fprintf(stderr, "must be missing: %s\n", path);
580 if ( _mustBeMissingPaths == nullptr )
581 _mustBeMissingPaths = PathPool::allocate();
582 _mustBeMissingPaths->add(path);
583 }
584
585 void ClosureBuilder::addSkippedFile(const char* path, uint64_t inode, uint64_t mtime)
586 {
587 _skippedFiles.push_back({ strdup_temp(path), inode, mtime });
588 }
589
590 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum)
591 {
592 for (BuilderLoadedImage& li : _loadedImages) {
593 if ( li.imageNum == imageNum ) {
594 return li;
595 }
596 }
597 for (BuilderLoadedImage& li : _loadedImages) {
598 if ( li.overrideImageNum == imageNum ) {
599 return li;
600 }
601 }
602 assert(0 && "LoadedImage not found");
603 }
604
605 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(const MachOAnalyzer* mh)
606 {
607 for (BuilderLoadedImage& li : _loadedImages) {
608 if ( li.loadAddress() == mh ) {
609 return li;
610 }
611 }
612 assert(0 && "LoadedImage not found");
613 }
614
615 const MachOAnalyzer* ClosureBuilder::machOForImageNum(ImageNum imageNum)
616 {
617 return findLoadedImage(imageNum).loadAddress();
618 }
619
620 const MachOAnalyzer* ClosureBuilder::findDependent(const MachOLoaded* mh, uint32_t depIndex)
621 {
622 for (const BuilderLoadedImage& li : _loadedImages) {
623 if ( li.loadAddress() == mh ) {
624 if (li.isBadImage) {
625 // Bad image duting building group 1 closures, so the dependents array
626 // is potentially incomplete.
627 return nullptr;
628 }
629 ImageNum childNum = li.dependents[depIndex].imageNum();
630 // This is typically something like a missing weak-dylib we are re-exporting a weak-import symbol from
631 if (childNum == kMissingWeakLinkedImage)
632 return nullptr;
633 return machOForImageNum(childNum);
634 }
635 }
636 return nullptr;
637 }
638
639 ImageNum ClosureBuilder::imageNumForMachO(const MachOAnalyzer* mh)
640 {
641 for (const BuilderLoadedImage& li : _loadedImages) {
642 if ( li.loadAddress() == mh ) {
643 return li.imageNum;
644 }
645 }
646 assert(0 && "unknown mach-o");
647 return 0;
648 }
649
650 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain& forImageChain, bool canUseSharedCacheClosure)
651 {
652 // if dependents is set, then we have already loaded this
653 if ( forImageChain.image.dependents.begin() != nullptr )
654 return;
655
656 uintptr_t startDepIndex = _dependencies.count();
657 // add dependents
658 __block uint32_t depIndex = 0;
659 forImageChain.image.loadAddress()->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
660 Image::LinkKind kind = Image::LinkKind::regular;
661 if ( isWeak )
662 kind = Image::LinkKind::weak;
663 else if ( isReExport )
664 kind = Image::LinkKind::reExport;
665 else if ( isUpward )
666 kind = Image::LinkKind::upward;
667 BuilderLoadedImage* foundImage;
668 if ( findImage(loadPath, forImageChain, foundImage, LinkageType::kStatic, compatVersion, canUseSharedCacheClosure) ) {
669 ImageNum foundImageNum = foundImage->imageNum;
670 if ( _diag.noError() )
671 _dependencies.push_back(Image::LinkedImage(kind, foundImageNum));
672 }
673 else if ( isWeak ) {
674 _dependencies.push_back(Image::LinkedImage(Image::LinkKind::weak, kMissingWeakLinkedImage));
675 // <rdar://problem/54387345> don't let an error loading weak dylib cause everything to fail
676 // _diag is checked after each dependent load, so if there is an error it was with loading the current dylib.
677 // Since it is a weak load, it is ok to ignore and and go on.
678 _diag.clearError();
679 }
680 else {
681 BLOCK_ACCCESSIBLE_ARRAY(char, extra, 4096);
682 extra[0] = '\0';
683 const char* targetLeaf = strrchr(loadPath, '/');
684 if ( targetLeaf == nullptr )
685 targetLeaf = loadPath;
686 if ( _mustBeMissingPaths != nullptr ) {
687 strcpy(extra, ", tried but didn't find: ");
688 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
689 const char* aLeaf = strrchr(aPath, '/');
690 if ( aLeaf == nullptr )
691 aLeaf = aPath;
692 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
693 strlcat(extra, "'", 4096);
694 strlcat(extra, aPath, 4096);
695 strlcat(extra, "' ", 4096);
696 }
697 });
698 }
699 if ( !_skippedFiles.empty() ) {
700 strcpy(extra, ", tried but invalid: ");
701 for (const SkippedFile& skippedFile : _skippedFiles) {
702 const char* aPath = skippedFile.path;
703 const char* aLeaf = strrchr(aPath, '/');
704 if ( aLeaf == nullptr )
705 aLeaf = aPath;
706 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
707 strlcat(extra, "'", 4096);
708 strlcat(extra, aPath, 4096);
709 strlcat(extra, "' ", 4096);
710 }
711 }
712 }
713 if ( _diag.hasError() ) {
714 #if BUILDING_CACHE_BUILDER
715 std::string errorMessageBuffer = _diag.errorMessage();
716 const char* msg = errorMessageBuffer.c_str();
717 #else
718 const char* msg = _diag.errorMessage();
719 #endif
720 char msgCopy[strlen(msg)+4];
721 strcpy(msgCopy, msg);
722 _diag.error("dependent dylib '%s' not found for '%s'. %s", loadPath, forImageChain.image.path(), msgCopy);
723 }
724 else {
725 _diag.error("dependent dylib '%s' not found for '%s'%s", loadPath, forImageChain.image.path(), extra);
726 }
727 if ( _launchErrorInfo != nullptr ) {
728 _launchErrorInfo->kind = DYLD_EXIT_REASON_DYLIB_MISSING;
729 _launchErrorInfo->clientOfDylibPath = strdup_temp(forImageChain.image.path());
730 _launchErrorInfo->targetDylibPath = strdup_temp(loadPath);
731 _launchErrorInfo->symbol = nullptr;
732 }
733 }
734 ++depIndex;
735 if ( _diag.hasError() )
736 stop = true;
737 });
738 if ( _diag.hasError() )
739 return;
740 forImageChain.image.dependents = _dependencies.subArray(startDepIndex, depIndex);
741
742 // breadth first recurse
743 for (Image::LinkedImage dep : forImageChain.image.dependents) {
744 // don't recurse upwards
745 if ( dep.kind() == Image::LinkKind::upward )
746 continue;
747 // don't recurse down missing weak links
748 if ( (dep.kind() == Image::LinkKind::weak) && (dep.imageNum() == kMissingWeakLinkedImage) )
749 continue;
750 BuilderLoadedImage& depLoadedImage = findLoadedImage(dep.imageNum());
751 LoadedImageChain chain = { &forImageChain, depLoadedImage };
752 recursiveLoadDependents(chain, canUseSharedCacheClosure);
753 if ( _diag.hasError() )
754 break;
755 }
756 }
757
758 void ClosureBuilder::loadDanglingUpwardLinks(bool canUseSharedCacheClosure)
759 {
760 bool danglingFixed;
761 do {
762 danglingFixed = false;
763 for (BuilderLoadedImage& li : _loadedImages) {
764 if ( li.dependents.begin() == nullptr ) {
765 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
766 LoadedImageChain chain = { nullptr, li };
767 recursiveLoadDependents(chain, canUseSharedCacheClosure);
768 danglingFixed = true;
769 break;
770 }
771 }
772 } while (danglingFixed && _diag.noError());
773 }
774
775 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage& forImage)
776 {
777 // only set on dylibs in the dyld shared cache
778 if ( !_makingDyldCacheImages )
779 return false;
780
781 // on macOS dylibs always override cache
782 if ( _platform == Platform::macOS )
783 return true;
784
785 // on embedded platforms with Internal cache, allow overrides
786 if ( !_makingCustomerCache )
787 return true;
788
789 // embedded platform customer caches, no overrides
790 return false; // FIXME, allow libdispatch.dylib to be overridden
791 }
792
793 void ClosureBuilder::buildImage(ImageWriter& writer, BuilderLoadedImage& forImage)
794 {
795 const MachOAnalyzer* macho = forImage.loadAddress();
796 // set ImageNum
797 writer.setImageNum(forImage.imageNum);
798
799 // set flags
800 writer.setHasWeakDefs(macho->hasWeakDefs());
801 writer.setIsBundle(macho->isBundle());
802 writer.setIsDylib(macho->isDylib());
803 writer.setIs64(macho->is64());
804 writer.setIsExecutable(macho->isMainExecutable());
805 writer.setUses16KPages(macho->uses16KPages());
806 writer.setOverridableDylib(overridableDylib(forImage));
807 writer.setInDyldCache(macho->inDyldCache());
808 if ( macho->hasObjC() ) {
809 writer.setHasObjC(true);
810 bool hasPlusLoads = macho->hasPlusLoadMethod(_diag);
811 writer.setHasPlusLoads(hasPlusLoads);
812 if ( hasPlusLoads )
813 forImage.hasInits = true;
814 }
815 else {
816 writer.setHasObjC(false);
817 writer.setHasPlusLoads(false);
818 }
819
820 if ( forImage.markNeverUnload ) {
821 writer.setNeverUnload(true);
822 }
823
824 #if BUILDING_DYLD || BUILDING_LIBDYLD
825 if ( _foundDyldCacheRoots ) {
826 // If we had roots, then some images are potentially on-disk while others are
827 // being rebuilt for a new initializer order, but do not exist on disk
828 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
829 // don't add file info for shared cache files mastered out of final file system
830 }
831 else {
832 // file is either not in cache or is in cache but not mastered out
833 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
834 }
835 } else {
836 // shared cache not built by dyld or libdyld.dylib, so must be real file
837 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
838 }
839 #else
840 if ( _platform == Platform::macOS || MachOFile::isSimulatorPlatform(_platform) ) {
841 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
842 // don't add file info for shared cache files mastered out of final file system
843 }
844 else {
845 // file is either not in cache or is in cache but not mastered out
846 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
847 }
848 }
849 else {
850 // all other platforms, cache is built off-device, so inodes are not known
851 }
852 #endif
853
854 // add info on how to load image
855 if ( !macho->inDyldCache() ) {
856 writer.setMappingInfo(forImage.loadedFileInfo.sliceOffset, macho->mappedSize());
857 // add code signature, if signed
858 uint32_t codeSigFileOffset;
859 uint32_t codeSigSize;
860 if ( macho->hasCodeSignature(codeSigFileOffset, codeSigSize) ) {
861 writer.setCodeSignatureLocation(codeSigFileOffset, codeSigSize);
862 macho->forEachCDHash(^(const uint8_t *cdHash) {
863 writer.addCDHash(cdHash);
864 });
865 }
866 // add FairPlay encryption range if encrypted
867 uint32_t fairPlayFileOffset;
868 uint32_t fairPlaySize;
869 if ( macho->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
870 writer.setFairPlayEncryptionRange(fairPlayFileOffset, fairPlaySize);
871 }
872 }
873
874 // set path
875 writer.addPath(forImage.path());
876 if ( _aliases != nullptr ) {
877 for (const CachedDylibAlias& alias : *_aliases) {
878 if ( strcmp(alias.realPath, forImage.path()) == 0 )
879 writer.addPath(alias.aliasPath);
880 }
881 }
882
883 // set uuid, if has one
884 uuid_t uuid;
885 if ( macho->getUuid(uuid) )
886 writer.setUUID(uuid);
887
888 // set dependents
889 writer.setDependents(forImage.dependents);
890
891 // set segments
892 addSegments(writer, macho);
893
894 // record if this dylib overrides something in the cache
895 if ( forImage.overrideImageNum != 0 ) {
896 writer.setAsOverrideOf(forImage.overrideImageNum);
897 const char* overridePath = _dyldImageArray->imageForNum(forImage.overrideImageNum)->path();
898 writer.addPath(overridePath);
899 if ( strcmp(overridePath, "/usr/lib/system/libdyld.dylib") == 0 )
900 _libDyldImageNum = forImage.imageNum;
901 else if ( strcmp(overridePath, "/usr/lib/libSystem.B.dylib") == 0 )
902 _libSystemImageNum = forImage.imageNum;
903 }
904
905 // do fix up info for non-cached, and cached if building cache
906 if ( !macho->inDyldCache() || _makingDyldCacheImages ) {
907 if ( macho->hasChainedFixups() ) {
908 addChainedFixupInfo(writer, forImage);
909 }
910 else {
911 if ( _handlers != nullptr ) {
912 reportRebasesAndBinds(writer, forImage);
913 }
914 else {
915 // Note we have to do binds before rebases so that we know if we have missing lazy binds
916 addBindInfo(writer, forImage);
917 if ( _diag.noError() )
918 addRebaseInfo(writer, macho);
919 }
920 }
921 }
922 if ( _diag.hasError() ) {
923 writer.setInvalid();
924 return;
925 }
926
927 // Don't build iOSMac for now. Just add an invalid placeholder
928 if ( _makingDyldCacheImages && strncmp(forImage.path(), "/System/iOSSupport/", 19) == 0 ) {
929 writer.setInvalid();
930 return;
931 }
932
933 // add initializers
934 bool contentRebased = forImage.contentRebased;
935 __block unsigned initCount = 0;
936 Diagnostics initializerDiag;
937 macho->forEachInitializer(initializerDiag, contentRebased, ^(uint32_t offset) {
938 ++initCount;
939 }, _dyldCache);
940 if ( initializerDiag.noError() ) {
941 if ( initCount != 0 ) {
942 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets, initCount);
943 __block unsigned index = 0;
944 macho->forEachInitializer(_diag, contentRebased, ^(uint32_t offset) {
945 initOffsets[index++] = offset;
946 }, _dyldCache);
947 writer.setInitOffsets(initOffsets, initCount);
948 forImage.hasInits = true;
949 }
950 }
951 else {
952 // mod_init_func section is malformed, might be self modifying pointers
953 macho->forEachInitializerPointerSection(_diag, ^(uint32_t sectionOffset, uint32_t sectionSize, const uint8_t* content, bool& stop) {
954 writer.setInitSectRange(sectionOffset, sectionSize);
955 forImage.hasInits = true;
956 });
957 }
958
959
960 // add terminators (except for dylibs in the cache because they are never unloaded)
961 if ( !macho->inDyldCache() ) {
962 __block unsigned termCount = 0;
963 macho->forEachTerminator(_diag, contentRebased, ^(uint32_t offset) {
964 ++termCount;
965 });
966 if ( termCount != 0 ) {
967 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, termOffsets, termCount);
968 __block unsigned index = 0;
969 macho->forEachTerminator(_diag, contentRebased, ^(uint32_t offset) {
970 termOffsets[index++] = offset;
971 });
972 writer.setTermOffsets(termOffsets, termCount);
973 }
974 }
975
976 // record if image has DOF sections
977 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets, 256);
978 macho->forEachDOFSection(_diag, ^(uint32_t offset) {
979 dofSectionOffsets.push_back(offset);
980 });
981 if ( !dofSectionOffsets.empty() ) {
982 writer.setDofOffsets(dofSectionOffsets);
983 }
984
985 }
986
987 void ClosureBuilder::addSegments(ImageWriter& writer, const MachOAnalyzer* mh)
988 {
989 const uint32_t segCount = mh->segmentCount();
990 if ( mh->inDyldCache() ) {
991 uint64_t cacheUnslideBaseAddress = _dyldCache->unslidLoadAddress();
992 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment, segs, segCount);
993 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
994 segs[info.segIndex] = { (uint32_t)(info.vmAddr-cacheUnslideBaseAddress), (uint32_t)info.vmSize, info.protections };
995 });
996 writer.setCachedSegments(segs, segCount);
997 }
998 else {
999 const uint32_t pageSize = (mh->uses16KPages() ? 0x4000 : 0x1000);
1000 __block uint32_t diskSegIndex = 0;
1001 __block uint32_t totalPageCount = 0;
1002 __block uint32_t lastFileOffsetEnd = 0;
1003 __block uint64_t lastVmAddrEnd = 0;
1004 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment, dsegs, segCount*3); // room for padding
1005 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1006 if ( (info.fileOffset != 0) && (info.fileOffset != lastFileOffsetEnd) ) {
1007 Image::DiskSegment filePadding;
1008 filePadding.filePageCount = (info.fileOffset - lastFileOffsetEnd)/pageSize;
1009 filePadding.vmPageCount = 0;
1010 filePadding.permissions = 0;
1011 filePadding.paddingNotSeg = 1;
1012 dsegs[diskSegIndex++] = filePadding;
1013 }
1014 if ( (lastVmAddrEnd != 0) && (info.vmAddr != lastVmAddrEnd) ) {
1015 Image::DiskSegment vmPadding;
1016 vmPadding.filePageCount = 0;
1017 vmPadding.vmPageCount = (info.vmAddr - lastVmAddrEnd)/pageSize;
1018 vmPadding.permissions = 0;
1019 vmPadding.paddingNotSeg = 1;
1020 dsegs[diskSegIndex++] = vmPadding;
1021 totalPageCount += vmPadding.vmPageCount;
1022 }
1023 {
1024 Image::DiskSegment segInfo;
1025 segInfo.filePageCount = (info.fileSize+pageSize-1)/pageSize;
1026 segInfo.vmPageCount = (info.vmSize+pageSize-1)/pageSize;
1027 segInfo.permissions = info.protections & 7;
1028 segInfo.paddingNotSeg = 0;
1029 if ( info.readOnlyData )
1030 segInfo.permissions = Image::DiskSegment::kReadOnlyDataPermissions;
1031 dsegs[diskSegIndex++] = segInfo;
1032 totalPageCount += segInfo.vmPageCount;
1033 if ( info.fileSize != 0 )
1034 lastFileOffsetEnd = (uint32_t)(info.fileOffset + info.fileSize);
1035 if ( info.vmSize != 0 )
1036 lastVmAddrEnd = info.vmAddr + info.vmSize;
1037 }
1038 });
1039 writer.setDiskSegments(dsegs, diskSegIndex);
1040 }
1041 }
1042
1043 static bool isTupleFixup(uint64_t tupleSectVmStartOffset, uint64_t tupleSectVmEndOffset, uint64_t imageOffsetOfFixup, uint32_t entrySize, uint32_t& tupleIndex)
1044 {
1045 if ( imageOffsetOfFixup < tupleSectVmStartOffset )
1046 return false;
1047 if ( imageOffsetOfFixup > tupleSectVmEndOffset )
1048 return false;
1049 uint64_t offsetIntoSection = imageOffsetOfFixup - tupleSectVmStartOffset;
1050 tupleIndex = (uint32_t)(offsetIntoSection/entrySize);
1051 return (tupleIndex*entrySize == offsetIntoSection) || ((tupleIndex*entrySize+entrySize/2) == offsetIntoSection);
1052 }
1053
1054 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter& writer, const Image* image, const MachOAnalyzer* mh)
1055 {
1056 const unsigned pointerSize = mh->pointerSize();
1057 const uint64_t baseAddress = mh->preferredLoadAddress();
1058 mh->forEachInterposingSection(_diag, ^(uint64_t sectVmOffset, uint64_t sectVmSize, bool &stop) {
1059 const uint32_t entrySize = 2*pointerSize;
1060 const uint32_t tupleCount = (uint32_t)(sectVmSize/entrySize);
1061 const uint64_t sectVmEndOffset = sectVmOffset + sectVmSize;
1062 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple, resolvedTuples, tupleCount);
1063 for (uint32_t i=0; i < tupleCount; ++i) {
1064 resolvedTuples[i].stockImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1065 resolvedTuples[i].stockImplementation.absolute.value = 0;
1066 resolvedTuples[i].newImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1067 resolvedTuples[i].newImplementation.absolute.value = 0;
1068 }
1069 // figure out what the replacement (rebase) and replacement (bind) of the tuple point to
1070 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& rebaseStop) {
1071 uint32_t tupleIndex;
1072 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToRebase, entrySize, tupleIndex) ) {
1073 const void* content = (uint8_t*)mh + imageOffsetToRebase;
1074 uint64_t unslidTargetAddress = mh->is64() ? *(uint64_t*)content : *(uint32_t*)content;
1075 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1076 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1077 resolvedTuples[tupleIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
1078 }
1079 },
1080 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1081 uint32_t tupleIndex;
1082 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToBind, entrySize, tupleIndex) ) {
1083 resolvedTuples[tupleIndex].stockImplementation = bindTarget;
1084 }
1085 },
1086 ^(uint64_t imageOffsetToStartsInfo, const Array<Image::ResolvedSymbolTarget>& targets, bool& chainStop) {
1087 mh->withChainStarts(_diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* startsInfo) {
1088 mh->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc, const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
1089 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)mh;
1090 uint32_t tupleIndex;
1091 if ( !isTupleFixup(sectVmOffset, sectVmEndOffset, fixupOffset, entrySize, tupleIndex) )
1092 return;
1093 uint32_t bindOrdinal;
1094 uint64_t rebaseTargetOffset;
1095 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal) ) {
1096 if ( bindOrdinal < targets.count() ) {
1097 resolvedTuples[tupleIndex].stockImplementation = targets[bindOrdinal];
1098 }
1099 else {
1100 _diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
1101 fixupsStop = true;
1102 }
1103 }
1104 else if ( fixupLoc->isRebase(segInfo->pointer_format, baseAddress, rebaseTargetOffset) ) {
1105 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1106 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1107 resolvedTuples[tupleIndex].newImplementation.image.offset = rebaseTargetOffset;
1108 }
1109 });
1110 });
1111 },
1112 ^(uint64_t imageOffsetToFixup) {
1113 // objc optimisation can't be interposed so nothing to do here.
1114 },
1115 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1116 // objc protocol optimisation fixups can't be interposed so nothing to do here.
1117 },
1118 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &fixupStop) {
1119 // objc selector optimisation fixups can't be interposed so nothing to do here.
1120 },
1121 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1122 // objc stable Swift optimisation fixups can't be interposed so nothing to do here.
1123 },
1124 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1125 // objc method list optimisation fixups can't be interposed so nothing to do here.
1126 });
1127
1128 // remove any tuples in which both sides are not set (or target is weak-import NULL)
1129 STACK_ALLOC_ARRAY(InterposingTuple, goodTuples, tupleCount);
1130 for (uint32_t i=0; i < tupleCount; ++i) {
1131 if ( (resolvedTuples[i].stockImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute)
1132 && (resolvedTuples[i].newImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute) )
1133 goodTuples.push_back(resolvedTuples[i]);
1134 }
1135 writer.addInterposingTuples(goodTuples);
1136 _interposingTuplesUsed = !goodTuples.empty();
1137
1138 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
1139 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, goodTuples.count());
1140 for (const InterposingTuple& aTuple : goodTuples) {
1141 if ( aTuple.stockImplementation.sharedCache.kind == Image::ResolvedSymbolTarget::kindSharedCache ) {
1142 uint32_t imageIndex;
1143 assert(_dyldCache->addressInText((uint32_t)aTuple.stockImplementation.sharedCache.offset, &imageIndex));
1144 ImageNum imageInCache = imageIndex+1;
1145 Closure::PatchEntry patch;
1146 patch.exportCacheOffset = (uint32_t)aTuple.stockImplementation.sharedCache.offset;
1147 patch.overriddenDylibInCache = imageInCache;
1148 patch.replacement = aTuple.newImplementation;
1149 patches.push_back(patch);
1150 }
1151 }
1152 writer.addCachePatches(patches);
1153 });
1154 }
1155
1156 void ClosureBuilder::addRebaseInfo(ImageWriter& writer, const MachOAnalyzer* mh)
1157 {
1158 const uint64_t ptrSize = mh->pointerSize();
1159 Image::RebasePattern maxLeapPattern = { 0xFFFFF, 0, 0xF };
1160 const uint64_t maxLeapCount = maxLeapPattern.repeatCount * maxLeapPattern.skipCount;
1161 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern, rebaseEntries, 1024);
1162 __block uint64_t lastLocation = -ptrSize;
1163 mh->forEachRebase(_diag, !_foundMissingLazyBinds, ^(uint64_t runtimeOffset, bool& stop) {
1164 const uint64_t delta = runtimeOffset - lastLocation;
1165 const bool aligned = ((delta % ptrSize) == 0);
1166 if ( delta == ptrSize ) {
1167 // this rebase location is contiguous to previous
1168 if ( rebaseEntries.back().contigCount < 255 ) {
1169 // just bump previous's contigCount
1170 rebaseEntries.back().contigCount++;
1171 }
1172 else {
1173 // previous contiguous run already has max 255, so start a new run
1174 rebaseEntries.push_back({ 1, 1, 0 });
1175 }
1176 }
1177 else if ( aligned && (delta <= (ptrSize*15)) ) {
1178 // this rebase is within skip distance of last rebase
1179 rebaseEntries.back().skipCount = (uint8_t)((delta-ptrSize)/ptrSize);
1180 int lastIndex = (int)(rebaseEntries.count() - 1);
1181 if ( lastIndex > 1 ) {
1182 if ( (rebaseEntries[lastIndex].contigCount == rebaseEntries[lastIndex-1].contigCount)
1183 && (rebaseEntries[lastIndex].skipCount == rebaseEntries[lastIndex-1].skipCount) ) {
1184 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1185 rebaseEntries.pop_back();
1186 rebaseEntries.back().repeatCount += 1;
1187 }
1188 }
1189 rebaseEntries.push_back({ 1, 1, 0 });
1190 }
1191 else {
1192 uint64_t advanceCount = (delta-ptrSize);
1193 if ( (runtimeOffset < lastLocation) && (lastLocation != -ptrSize) ) {
1194 // out of rebases! handle this be resting rebase offset to zero
1195 rebaseEntries.push_back({ 0, 0, 0 });
1196 advanceCount = runtimeOffset;
1197 }
1198 // if next rebase is too far to reach with one pattern, use series
1199 while ( advanceCount > maxLeapCount ) {
1200 rebaseEntries.push_back(maxLeapPattern);
1201 advanceCount -= maxLeapCount;
1202 }
1203 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1204 while ( advanceCount > maxLeapPattern.repeatCount ) {
1205 uint64_t count = advanceCount / maxLeapPattern.skipCount;
1206 rebaseEntries.push_back({ (uint32_t)count, 0, maxLeapPattern.skipCount });
1207 advanceCount -= (count*maxLeapPattern.skipCount);
1208 }
1209 if ( advanceCount != 0 )
1210 rebaseEntries.push_back({ (uint32_t)advanceCount, 0, 1 });
1211 rebaseEntries.push_back({ 1, 1, 0 });
1212 }
1213 lastLocation = runtimeOffset;
1214 });
1215 writer.setRebaseInfo(rebaseEntries);
1216
1217 // i386 programs also use text relocs to rebase stubs
1218 if ( mh->cputype == CPU_TYPE_I386 ) {
1219 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern, textRebases, 512);
1220 __block uint64_t lastOffset = -4;
1221 mh->forEachTextRebase(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1222 if ( textRebases.freeCount() < 2 ) {
1223 _diag.error("too many text rebase locations (%ld) in %s", textRebases.maxCount(), writer.currentImage()->path());
1224 stop = true;
1225 }
1226 bool mergedIntoPrevious = false;
1227 if ( (runtimeOffset > lastOffset) && !textRebases.empty() ) {
1228 uint32_t skipAmount = (uint32_t)(runtimeOffset - lastOffset);
1229 if ( (textRebases.back().repeatCount == 1) && (textRebases.back().skipCount == 0) ) {
1230 textRebases.back().repeatCount = 2;
1231 textRebases.back().skipCount = skipAmount;
1232 mergedIntoPrevious = true;
1233 }
1234 else if ( textRebases.back().skipCount == skipAmount ) {
1235 textRebases.back().repeatCount += 1;
1236 mergedIntoPrevious = true;
1237 }
1238 }
1239 if ( !mergedIntoPrevious ) {
1240 Image::TextFixupPattern pattern;
1241 pattern.target.raw = 0;
1242 pattern.startVmOffset = (uint32_t)runtimeOffset;
1243 pattern.repeatCount = 1;
1244 pattern.skipCount = 0;
1245 textRebases.push_back(pattern);
1246 }
1247 lastOffset = runtimeOffset;
1248 });
1249 writer.setTextRebaseInfo(textRebases);
1250 }
1251 }
1252
1253
1254 void ClosureBuilder::forEachBind(BuilderLoadedImage& forImage, void (^handler)(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop),
1255 void (^strongHandler)(const char* strongSymbolName),
1256 void (^missingLazyBindHandler)())
1257 {
1258 __block int lastLibOrdinal = 256;
1259 __block const char* lastSymbolName = nullptr;
1260 __block uint64_t lastAddend = 0;
1261 __block Image::ResolvedSymbolTarget target;
1262 __block ResolvedTargetInfo targetInfo;
1263 forImage.loadAddress()->forEachBind(_diag, ^(uint64_t runtimeOffset, int libOrdinal, const char* symbolName, bool weakImport, bool lazyBind, uint64_t addend, bool& stop) {
1264 if ( (symbolName == lastSymbolName) && (libOrdinal == lastLibOrdinal) && (addend == lastAddend) ) {
1265 // same symbol lookup as last location
1266 handler(runtimeOffset, target, targetInfo, stop);
1267 }
1268 else if ( findSymbol(forImage, libOrdinal, symbolName, weakImport, lazyBind, addend, target, targetInfo) ) {
1269 if ( !targetInfo.skippableWeakDef ) {
1270 handler(runtimeOffset, target, targetInfo, stop);
1271 lastSymbolName = symbolName;
1272 lastLibOrdinal = libOrdinal;
1273 lastAddend = addend;
1274 }
1275 }
1276 else {
1277 stop = true;
1278 }
1279 }, ^(const char* symbolName) {
1280 strongHandler(symbolName);
1281 }, ^() {
1282 missingLazyBindHandler();
1283 });
1284 }
1285
1286 void ClosureBuilder::addBindInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1287 {
1288 const uint32_t ptrSize = forImage.loadAddress()->pointerSize();
1289 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern, binds, 512);
1290 __block uint64_t lastOffset = -ptrSize;
1291 __block Image::ResolvedSymbolTarget lastTarget = { {0, 0} };
1292 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1293 if ( targetInfo.weakBindCoalese ) {
1294 // may be previous bind to this location
1295 // if so, update that rather create new BindPattern
1296 for (Image::BindPattern& aBind : binds) {
1297 if ( (aBind.startVmOffset == runtimeOffset) && (aBind.repeatCount == 1) && (aBind.skipCount == 0) ) {
1298 aBind.target = target;
1299 return;
1300 }
1301 }
1302 }
1303 bool mergedIntoPrevious = false;
1304 if ( !mergedIntoPrevious && (target == lastTarget) && (runtimeOffset > lastOffset) && !binds.empty() ) {
1305 uint64_t skipAmount = (runtimeOffset - lastOffset - ptrSize)/ptrSize;
1306 if ( skipAmount*ptrSize != (runtimeOffset - lastOffset - ptrSize) ) {
1307 // misaligned pointer means we cannot optimize
1308 }
1309 else {
1310 if ( (binds.back().repeatCount == 1) && (binds.back().skipCount == 0) && (skipAmount <= 255) ) {
1311 binds.back().repeatCount = 2;
1312 binds.back().skipCount = skipAmount;
1313 assert(binds.back().skipCount == skipAmount); // check overflow
1314 mergedIntoPrevious = true;
1315 }
1316 else if ( (binds.back().skipCount == skipAmount) && (binds.back().repeatCount < 0xfff) ) {
1317 uint32_t prevRepeatCount = binds.back().repeatCount;
1318 binds.back().repeatCount += 1;
1319 assert(binds.back().repeatCount > prevRepeatCount); // check overflow
1320 mergedIntoPrevious = true;
1321 }
1322 }
1323 }
1324 if ( (target == lastTarget) && (runtimeOffset == lastOffset) && !binds.empty() ) {
1325 // duplicate bind for same location, ignore this one
1326 mergedIntoPrevious = true;
1327 }
1328 if ( !mergedIntoPrevious ) {
1329 Image::BindPattern pattern;
1330 pattern.target = target;
1331 pattern.startVmOffset = runtimeOffset;
1332 pattern.repeatCount = 1;
1333 pattern.skipCount = 0;
1334 assert(pattern.startVmOffset == runtimeOffset);
1335 binds.push_back(pattern);
1336 }
1337 lastTarget = target;
1338 lastOffset = runtimeOffset;
1339 }, ^(const char* strongSymbolName) {
1340 if ( !_makingDyldCacheImages ) {
1341 // something has a strong symbol definition that may override a weak impl in the dyld cache
1342 Image::ResolvedSymbolTarget strongOverride;
1343 ResolvedTargetInfo strongTargetInfo;
1344 if ( findSymbolInImage(forImage.loadAddress(), strongSymbolName, 0, false, false, strongOverride, strongTargetInfo) ) {
1345 for (const BuilderLoadedImage& li : _loadedImages) {
1346 if ( li.loadAddress()->inDyldCache() && li.loadAddress()->hasWeakDefs() ) {
1347 Image::ResolvedSymbolTarget implInCache;
1348 ResolvedTargetInfo implInCacheInfo;
1349 if ( findSymbolInImage(li.loadAddress(), strongSymbolName, 0, false, false, implInCache, implInCacheInfo) ) {
1350 // found another instance in some dylib in dyld cache, will need to patch it
1351 Closure::PatchEntry patch;
1352 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1353 patch.overriddenDylibInCache = li.imageNum;
1354 patch.replacement = strongOverride;
1355 _weakDefCacheOverrides.push_back(patch);
1356 }
1357 }
1358 }
1359 }
1360 }
1361 }, ^() {
1362 _foundMissingLazyBinds = true;
1363 });
1364
1365 // check for __dyld section in main executable to support licenseware
1366 if ( forImage.loadAddress()->filetype == MH_EXECUTE ) {
1367 forImage.loadAddress()->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
1368 if ( (strcmp(sectInfo.sectName, "__dyld") == 0) && (strcmp(sectInfo.segInfo.segName, "__DATA") == 0) ) {
1369 // find dyld3::compatFuncLookup in libdyld.dylib
1370 assert(_libDyldImageNum != 0);
1371 Image::ResolvedSymbolTarget lookupFuncTarget;
1372 ResolvedTargetInfo lookupFuncInfo;
1373 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld316compatFuncLookupEPKcPPv", 0, false, false, lookupFuncTarget, lookupFuncInfo) ) {
1374 // add bind to set second pointer in __dyld section to be dyld3::compatFuncLookup
1375 uint64_t runtimeOffset = sectInfo.sectAddr - forImage.loadAddress()->preferredLoadAddress() + forImage.loadAddress()->pointerSize();
1376 Image::BindPattern compatFuncPattern;
1377 compatFuncPattern.target = lookupFuncTarget;
1378 compatFuncPattern.startVmOffset = runtimeOffset;
1379 compatFuncPattern.repeatCount = 1;
1380 compatFuncPattern.skipCount = 0;
1381 assert(compatFuncPattern.startVmOffset == runtimeOffset);
1382 binds.push_back(compatFuncPattern);
1383 }
1384 else {
1385 _diag.error("libdyld.dylib is dyld3::compatFuncLookup");
1386 }
1387 }
1388 });
1389 }
1390
1391 writer.setBindInfo(binds);
1392 }
1393
1394 void ClosureBuilder::reportRebasesAndBinds(ImageWriter& writer, BuilderLoadedImage& forImage)
1395 {
1396 // report all rebases
1397 forImage.loadAddress()->forEachRebase(_diag, true, ^(uint64_t runtimeOffset, bool& stop) {
1398 _handlers->rebase(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset);
1399 });
1400
1401 // report all binds
1402 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1403 _handlers->bind(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset, target, targetInfo);
1404 },
1405 ^(const char* strongSymbolName) {},
1406 ^() { });
1407
1408 // i386 programs also use text relocs to rebase stubs
1409 if ( forImage.loadAddress()->cputype == CPU_TYPE_I386 ) {
1410 // FIX ME
1411 }
1412 }
1413
1414 // These are mangled symbols for all the variants of operator new and delete
1415 // which a main executable can define (non-weak) and override the
1416 // weak-def implementation in the OS.
1417 static const char* const sTreatAsWeak[] = {
1418 "__Znwm", "__ZnwmRKSt9nothrow_t",
1419 "__Znam", "__ZnamRKSt9nothrow_t",
1420 "__ZdlPv", "__ZdlPvRKSt9nothrow_t", "__ZdlPvm",
1421 "__ZdaPv", "__ZdaPvRKSt9nothrow_t", "__ZdaPvm",
1422 "__ZnwmSt11align_val_t", "__ZnwmSt11align_val_tRKSt9nothrow_t",
1423 "__ZnamSt11align_val_t", "__ZnamSt11align_val_tRKSt9nothrow_t",
1424 "__ZdlPvSt11align_val_t", "__ZdlPvSt11align_val_tRKSt9nothrow_t", "__ZdlPvmSt11align_val_t",
1425 "__ZdaPvSt11align_val_t", "__ZdaPvSt11align_val_tRKSt9nothrow_t", "__ZdaPvmSt11align_val_t"
1426 };
1427
1428
1429 void ClosureBuilder::addChainedFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1430 {
1431 // build array of targets
1432 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget, targets, 1024);
1433 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ResolvedTargetInfo, targetInfos, 1024);
1434 forImage.loadAddress()->forEachChainedFixupTarget(_diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool& stop) {
1435 Image::ResolvedSymbolTarget target;
1436 ResolvedTargetInfo targetInfo;
1437 if ( !findSymbol(forImage, libOrdinal, symbolName, weakImport, false, addend, target, targetInfo) ) {
1438 const char* expectedInPath = forImage.loadAddress()->dependentDylibLoadPath(libOrdinal-1);
1439 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, forImage.path());
1440 stop = true;
1441 return;
1442 }
1443 if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
1444 // add if not already in array
1445 bool alreadyInArray = false;
1446 for (const char* sym : _weakDefsFromChainedBinds) {
1447 if ( strcmp(sym, symbolName) == 0 ) {
1448 alreadyInArray = true;
1449 break;
1450 }
1451 }
1452 if ( !alreadyInArray )
1453 _weakDefsFromChainedBinds.push_back(symbolName);
1454 }
1455 targets.push_back(target);
1456 targetInfos.push_back(targetInfo);
1457 });
1458 if ( _diag.hasError() )
1459 return;
1460
1461 uint64_t chainStartsOffset = forImage.loadAddress()->chainStartsOffset();
1462 if ( _handlers != nullptr ) {
1463 forImage.loadAddress()->withChainStarts(_diag, chainStartsOffset, ^(const dyld_chained_starts_in_image* starts) {
1464 _handlers->chainedBind(forImage.imageNum, forImage.loadAddress(), starts, targets, targetInfos);
1465 });
1466 }
1467 else {
1468 writer.setChainedFixups(chainStartsOffset, targets);
1469 }
1470
1471 // with chained fixups, main executable may define symbol that overrides weak-defs but has no fixup
1472 if ( _isLaunchClosure && forImage.loadAddress()->hasWeakDefs() && forImage.loadAddress()->isMainExecutable() ) {
1473 for (const char* weakSymbolName : sTreatAsWeak) {
1474 Diagnostics exportDiag;
1475 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1476 if ( forImage.loadAddress()->findExportedSymbol(exportDiag, weakSymbolName, false, foundInfo, nullptr) ) {
1477 _weakDefsFromChainedBinds.push_back(weakSymbolName);
1478 }
1479 }
1480 }
1481 }
1482
1483
1484 bool ClosureBuilder::findSymbolInImage(const MachOAnalyzer* macho, const char* symbolName, uint64_t addend, bool followReExports,
1485 bool weakImport, Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1486 {
1487 targetInfo.foundInDylib = nullptr;
1488 targetInfo.requestedSymbolName = symbolName;
1489 targetInfo.addend = addend;
1490 targetInfo.weakBindCoalese = false;
1491 targetInfo.weakBindSameImage = false;
1492 targetInfo.isWeakDef = false;
1493 targetInfo.skippableWeakDef = false;
1494 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
1495 return (const MachOLoaded*)findDependent(mh, depIndex);
1496 };
1497 MachOAnalyzer::DependentToMachOLoaded finder = nullptr;
1498 if ( followReExports )
1499 finder = reexportFinder;
1500
1501 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1502 if ( macho->findExportedSymbol(_diag, symbolName, weakImport, foundInfo, finder) ) {
1503 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
1504 targetInfo.foundInDylib = foundInfo.foundInDylib;
1505 targetInfo.foundSymbolName = foundInfo.foundSymbolName;
1506 if ( foundInfo.isWeakDef )
1507 targetInfo.isWeakDef = true;
1508 if ( foundInfo.kind == MachOAnalyzer::FoundSymbol::Kind::absolute ) {
1509 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1510 target.absolute.value = foundInfo.value + addend;
1511 }
1512 else if ( impDylib->inDyldCache() ) {
1513 uint64_t offsetValue = (uint8_t*)impDylib - (uint8_t*)_dyldCache + foundInfo.value + addend;
1514 target.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
1515 target.sharedCache.offset = offsetValue;
1516 assert(target.sharedCache.offset == offsetValue);
1517 }
1518 else {
1519 uint64_t offsetValue = foundInfo.value + addend;
1520 target.image.kind = Image::ResolvedSymbolTarget::kindImage;
1521 target.image.imageNum = findLoadedImage(impDylib).imageNum;
1522 target.image.offset = offsetValue;
1523 assert(target.image.offset == offsetValue);
1524 }
1525 return true;
1526 }
1527 return false;
1528 }
1529
1530 bool ClosureBuilder::findSymbol(BuilderLoadedImage& fromImage, int libOrdinal, const char* symbolName, bool weakImport, bool lazyBind,
1531 uint64_t addend, Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1532 {
1533 target.raw = 0;
1534 targetInfo.weakBindCoalese = false;
1535 targetInfo.weakBindSameImage = false;
1536 targetInfo.isWeakDef = false;
1537 targetInfo.skippableWeakDef = false;
1538 targetInfo.requestedSymbolName = symbolName;
1539 targetInfo.libOrdinal = libOrdinal;
1540 if ( libOrdinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP ) {
1541 for (const BuilderLoadedImage& li : _loadedImages) {
1542 if ( !li.rtldLocal && findSymbolInImage(li.loadAddress(), symbolName, addend, true, weakImport, target, targetInfo) )
1543 return true;
1544 }
1545 if ( weakImport ) {
1546 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1547 target.absolute.value = 0;
1548 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1549 fromImage.hasMissingWeakImports = true;
1550 return true;
1551 }
1552 // <rdar://problem/44315944> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld in flat namespace
1553 if ( lazyBind && _allowMissingLazies ) {
1554 if ( findMissingSymbolHandler(target, targetInfo) )
1555 return true;
1556 }
1557 _diag.error("symbol '%s' not found, expected in flat namespace by '%s'", symbolName, fromImage.path());
1558 }
1559 else if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
1560 // to resolve weakDef coalesing, we need to search all images in order and use first definition
1561 // but, if first found is a weakDef, a later non-weak def overrides that
1562 bool foundWeakDefImpl = false;
1563 bool foundStrongDefImpl = false;
1564 bool foundImpl = false;
1565
1566 if ( _makingDyldCacheImages ) {
1567 // _loadedImages is all dylibs in the dyld cache, it is not load-order, so need alterate weak-def binding algorithm
1568 // look first in /usr/lib/libc++, most will be here
1569 for (const BuilderLoadedImage& li : _loadedImages) {
1570 if ( li.loadAddress()->hasWeakDefs() && (strncmp(li.path(), "/usr/lib/libc++", 15) == 0) ) {
1571 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1572 foundImpl = true;
1573 break;
1574 }
1575 }
1576 }
1577 // if not found, try looking in the images itself, most custom weak-def symbols have a copy in the image itself
1578 if ( !foundImpl ) {
1579 if ( findSymbolInImage(fromImage.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1580 foundImpl = true;
1581 }
1582 }
1583 // if still not found, then this is the rare case of a simple use of a weak-def symbol
1584 if ( !foundImpl ) {
1585 // look in all direct dependents
1586 for (Image::LinkedImage child : fromImage.dependents) {
1587 if (child.imageNum() == kMissingWeakLinkedImage)
1588 continue;
1589 BuilderLoadedImage& childLi = findLoadedImage(child.imageNum());
1590 if ( childLi.loadAddress()->hasWeakDefs() && findSymbolInImage(childLi.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1591 foundImpl = true;
1592 break;
1593 }
1594 }
1595 }
1596 targetInfo.weakBindCoalese = true;
1597 }
1598 else {
1599 // walk images in load-order to find first that implements this symbol
1600 Image::ResolvedSymbolTarget aTarget;
1601 ResolvedTargetInfo aTargetInfo;
1602 STACK_ALLOC_ARRAY(const BuilderLoadedImage*, cachedDylibsUsingSymbol, 1024);
1603 for (const BuilderLoadedImage& li : _loadedImages) {
1604 // only search images with weak-defs that were not loaded with RTLD_LOCAL
1605 if ( li.loadAddress()->hasWeakDefs() && !li.rtldLocal ) {
1606 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, weakImport, aTarget, aTargetInfo) ) {
1607 foundImpl = true;
1608 // with non-chained images, weak-defs first have a rebase to their local impl, and a weak-bind which allows earlier impls to override
1609 if ( !li.loadAddress()->hasChainedFixups() && (aTargetInfo.foundInDylib == fromImage.loadAddress()) )
1610 targetInfo.weakBindSameImage = true;
1611 if ( aTargetInfo.isWeakDef ) {
1612 // found a weakDef impl, if this is first found, set target to this
1613 if ( !foundWeakDefImpl && !foundStrongDefImpl ) {
1614 target = aTarget;
1615 targetInfo = aTargetInfo;
1616 }
1617 foundWeakDefImpl = true;
1618 }
1619 else {
1620 // found a non-weak impl, use this (unless early strong found)
1621 if ( !foundStrongDefImpl ) {
1622 target = aTarget;
1623 targetInfo = aTargetInfo;
1624 }
1625 foundStrongDefImpl = true;
1626 }
1627 }
1628 if ( foundImpl && li.loadAddress()->inDyldCache() )
1629 cachedDylibsUsingSymbol.push_back(&li);
1630 }
1631 }
1632
1633 // now that final target found, if any dylib in dyld cache uses that symbol name, redirect it to new target
1634 if ( !cachedDylibsUsingSymbol.empty() ) {
1635 for (const BuilderLoadedImage* li : cachedDylibsUsingSymbol) {
1636 Image::ResolvedSymbolTarget implInCache;
1637 ResolvedTargetInfo implInCacheInfo;
1638 if ( findSymbolInImage(li->loadAddress(), symbolName, addend, false, weakImport, implInCache, implInCacheInfo) ) {
1639 if ( implInCache != target ) {
1640 // found another instance in some dylib in dyld cache, will need to patch it
1641 Closure::PatchEntry patch;
1642 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1643 patch.overriddenDylibInCache = li->imageNum;
1644 patch.replacement = target;
1645 _weakDefCacheOverrides.push_back(patch);
1646 }
1647 }
1648 }
1649 }
1650 targetInfo.weakBindCoalese = true;
1651 }
1652
1653 if ( foundImpl )
1654 return true;
1655 if ( weakImport ) {
1656 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1657 target.absolute.value = 0;
1658 return true;
1659 }
1660 if ( ! fromImage.loadAddress()->hasChainedFixups() ) {
1661 // support old binaries where symbols have been stripped and have weak_bind to itself
1662 targetInfo.skippableWeakDef = true;
1663 return true;
1664 }
1665
1666 _diag.error("symbol '%s' not found, expected to be weak-def coalesced by '%s'", symbolName, fromImage.path());
1667 }
1668 else {
1669 const BuilderLoadedImage* targetLoadedImage = nullptr;
1670 if ( (libOrdinal > 0) && (libOrdinal <= (int)fromImage.dependents.count()) ) {
1671 ImageNum childNum = fromImage.dependents[libOrdinal - 1].imageNum();
1672 if ( childNum != kMissingWeakLinkedImage ) {
1673 targetLoadedImage = &findLoadedImage(childNum);
1674 }
1675 }
1676 else if ( libOrdinal == BIND_SPECIAL_DYLIB_SELF ) {
1677 targetLoadedImage = &fromImage;
1678 }
1679 else if ( libOrdinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE ) {
1680 targetLoadedImage = &_loadedImages[_mainProgLoadIndex];
1681 }
1682 else {
1683 _diag.error("unknown special ordinal %d in %s", libOrdinal, fromImage.path());
1684 return false;
1685 }
1686
1687 if ( targetLoadedImage != nullptr ) {
1688 if ( findSymbolInImage(targetLoadedImage->loadAddress(), symbolName, addend, true, weakImport, target, targetInfo) )
1689 return true;
1690 }
1691
1692 if ( weakImport ) {
1693 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1694 target.absolute.value = 0;
1695 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1696 fromImage.hasMissingWeakImports = true;
1697 return true;
1698 }
1699
1700 // <rdar://problem/43315403> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld
1701 if ( lazyBind && _allowMissingLazies ) {
1702 if ( findMissingSymbolHandler(target, targetInfo) )
1703 return true;
1704 }
1705
1706 // symbol not found and not weak or lazy so error out
1707 const char* expectedInPath = targetLoadedImage ? targetLoadedImage->path() : "unknown";
1708 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, fromImage.path());
1709 if ( _launchErrorInfo != nullptr ) {
1710 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
1711 _launchErrorInfo->clientOfDylibPath = strdup_temp(fromImage.path());
1712 _launchErrorInfo->targetDylibPath = strdup_temp(expectedInPath);
1713 _launchErrorInfo->symbol = symbolName;
1714 }
1715 }
1716 return false;
1717 }
1718
1719
1720 bool ClosureBuilder::findMissingSymbolHandler(Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1721 {
1722 for (BuilderLoadedImage& li : _loadedImages) {
1723 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) ) {
1724 if ( findSymbolInImage(li.loadAddress(), "__dyld_missing_symbol_abort", 0, false, false, target, targetInfo) ) {
1725 return true;
1726 }
1727 break;
1728 }
1729 }
1730 return false;
1731 }
1732
1733 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex, InitInfo initInfos[], uint32_t& initOrder, bool& hasError)
1734 {
1735 if ( initInfos[loadIndex].visited )
1736 return;
1737 initInfos[loadIndex].visited = true;
1738 initInfos[loadIndex].danglingUpward = false;
1739
1740 if (_loadedImages[loadIndex].isBadImage) {
1741 hasError = true;
1742 return;
1743 }
1744
1745 for (const Image::LinkedImage& dep : _loadedImages[loadIndex].dependents) {
1746 if ( dep.imageNum() == kMissingWeakLinkedImage )
1747 continue;
1748 ClosureBuilder::BuilderLoadedImage& depLi = findLoadedImage(dep.imageNum());
1749 uint32_t depLoadIndex = (uint32_t)_loadedImages.index(depLi);
1750 if ( dep.kind() == Image::LinkKind::upward ) {
1751 if ( !initInfos[depLoadIndex].visited )
1752 initInfos[depLoadIndex].danglingUpward = true;
1753 }
1754 else {
1755 depthFirstRecurseSetInitInfo(depLoadIndex, initInfos, initOrder, hasError);
1756 if (hasError)
1757 return;
1758 }
1759 }
1760 initInfos[loadIndex].initOrder = initOrder++;
1761 }
1762
1763 void ClosureBuilder::computeInitOrder(ImageWriter& imageWriter, uint32_t loadIndex)
1764 {
1765 // allocate array to track initializers
1766 InitInfo initInfos[_loadedImages.count()];
1767 bzero(initInfos, sizeof(initInfos));
1768
1769 // recurse all images and build initializer list from bottom up
1770 uint32_t initOrder = 1;
1771 bool hasMissingDependent = false;
1772 depthFirstRecurseSetInitInfo(loadIndex, initInfos, initOrder, hasMissingDependent);
1773 if (hasMissingDependent) {
1774 imageWriter.setInvalid();
1775 return;
1776 }
1777
1778 // any images not visited yet are are danging, force add them to end of init list
1779 for (uint32_t i=0; i < (uint32_t)_loadedImages.count(); ++i) {
1780 if ( !initInfos[i].visited && initInfos[i].danglingUpward ) {
1781 depthFirstRecurseSetInitInfo(i, initInfos, initOrder, hasMissingDependent);
1782 }
1783 }
1784
1785 if (hasMissingDependent) {
1786 imageWriter.setInvalid();
1787 return;
1788 }
1789
1790 // build array of just images with initializer
1791 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits, _loadedImages.count());
1792 uint32_t index = 0;
1793 for (const BuilderLoadedImage& li : _loadedImages) {
1794 if ( initInfos[index].visited && li.hasInits ) {
1795 indexOfImagesWithInits.push_back(index);
1796 }
1797 ++index;
1798 }
1799
1800 // bubble sort (FIXME)
1801 if ( indexOfImagesWithInits.count() > 1 ) {
1802 for (uint32_t i=0; i < indexOfImagesWithInits.count()-1; ++i) {
1803 for (uint32_t j=0; j < indexOfImagesWithInits.count()-i-1; ++j) {
1804 if ( initInfos[indexOfImagesWithInits[j]].initOrder > initInfos[indexOfImagesWithInits[j+1]].initOrder ) {
1805 uint32_t temp = indexOfImagesWithInits[j];
1806 indexOfImagesWithInits[j] = indexOfImagesWithInits[j+1];
1807 indexOfImagesWithInits[j+1] = temp;
1808 }
1809 }
1810 }
1811 }
1812
1813 // copy ImageNum of each image with initializers into array
1814 ImageNum initNums[indexOfImagesWithInits.count()];
1815 for (uint32_t i=0; i < indexOfImagesWithInits.count(); ++i) {
1816 initNums[i] = _loadedImages[indexOfImagesWithInits[i]].imageNum;
1817 }
1818
1819 // add to closure info
1820 imageWriter.setInitsOrder(initNums, (uint32_t)indexOfImagesWithInits.count());
1821 }
1822
1823 void ClosureBuilder::addClosureInfo(LaunchClosureWriter& closureWriter)
1824 {
1825 // record which is libSystem
1826 assert(_libSystemImageNum != 0);
1827 closureWriter.setLibSystemImageNum(_libSystemImageNum);
1828
1829 // record which is libdyld
1830 assert(_libDyldImageNum != 0);
1831 Image::ResolvedSymbolTarget entryLocation;
1832 ResolvedTargetInfo entryInfo;
1833 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld318entryVectorForDyldE", 0, false, false, entryLocation, entryInfo) ) {
1834 const dyld3::LibDyldEntryVector* libDyldEntry = nullptr;
1835 switch ( entryLocation.image.kind ) {
1836 case Image::ResolvedSymbolTarget::kindSharedCache:
1837 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)_dyldCache + entryLocation.sharedCache.offset);
1838 break;
1839 case Image::ResolvedSymbolTarget::kindImage:
1840 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)findLoadedImage(entryLocation.image.imageNum).loadAddress() + entryLocation.image.offset);
1841 break;
1842 }
1843 if ( (libDyldEntry != nullptr) && ((libDyldEntry->binaryFormatVersion & LibDyldEntryVector::kBinaryFormatVersionMask) == dyld3::closure::kFormatVersion) )
1844 closureWriter.setLibDyldEntry(entryLocation);
1845 else
1846 _diag.error("libdyld.dylib entry vector is incompatible");
1847 }
1848 else {
1849 _diag.error("libdyld.dylib is missing entry vector");
1850 }
1851
1852 // record which is main executable
1853 ImageNum mainProgImageNum = _loadedImages[_mainProgLoadIndex].imageNum;
1854 closureWriter.setTopImageNum(mainProgImageNum);
1855
1856 // add entry
1857 uint32_t entryOffset;
1858 bool usesCRT;
1859 if ( _loadedImages[_mainProgLoadIndex].loadAddress()->getEntry(entryOffset, usesCRT) ) {
1860 Image::ResolvedSymbolTarget location;
1861 location.image.kind = Image::ResolvedSymbolTarget::kindImage;
1862 location.image.imageNum = mainProgImageNum;
1863 location.image.offset = entryOffset;
1864 if ( usesCRT )
1865 closureWriter.setStartEntry(location);
1866 else
1867 closureWriter.setMainEntry(location);
1868 }
1869
1870 // add env vars that must match at launch time
1871 _pathOverrides.forEachEnvVar(^(const char* envVar) {
1872 closureWriter.addEnvVar(envVar);
1873 });
1874
1875 // add list of files which must be missing
1876 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const char*, paths, 8192);
1877 if ( _mustBeMissingPaths != nullptr ) {
1878 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
1879 paths.push_back(aPath);
1880 });
1881 }
1882 closureWriter.setMustBeMissingFiles(paths);
1883
1884 // add list of files which must be be present with a specific inode/mtime
1885 if (!_skippedFiles.empty())
1886 closureWriter.setMustExistFiles(_skippedFiles);
1887 }
1888 void ClosureBuilder::invalidateInitializerRoots()
1889 {
1890 while (true) {
1891 bool madeChange = false;
1892 for (uintptr_t loadedImageIndex = _alreadyInitedIndex; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
1893 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
1894 if ( li.mustBuildClosure ) {
1895 // Already invalidated
1896 continue;
1897 }
1898 for (Image::LinkedImage depIndex : li.dependents) {
1899 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
1900 continue;
1901 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
1902 // If a dependent is bad, or a new image num, or an override, then we need this image to get a new closure
1903 if ( depImage.mustBuildClosure ) {
1904 li.mustBuildClosure = true; // mark bad
1905 madeChange = true;
1906 }
1907 }
1908 }
1909 if (!madeChange)
1910 break;
1911 // If we made a change, then we detected an existing image with a dependent which needed to be rebuilt.
1912 // This corresponds to a root of the shared cache where the existing image is a shared cache one and the root is the depImage
1913 _foundDyldCacheRoots = true;
1914 }
1915 }
1916
1917 size_t ClosureBuilder::HashCString::hash(const char* v) {
1918 // FIXME: Use hash<string_view> when it has the correct visibility markup
1919 return __gnu_cxx::hash<const char*>{}(v);
1920 }
1921
1922 bool ClosureBuilder::EqualCString::equal(const char* s1, const char* s2) {
1923 return strcmp(s1, s2) == 0;
1924 }
1925
1926
1927 struct HashUInt64 {
1928 static size_t hash(const uint64_t& v) {
1929 return std::hash<uint64_t>{}(v);
1930 }
1931 };
1932
1933 struct EqualUInt64 {
1934 static bool equal(uint64_t s1, uint64_t s2) {
1935 return s1 == s2;
1936 }
1937 };
1938
1939 void ClosureBuilder::writeClassOrProtocolHashTable(bool classes, Array<ObjCOptimizerImage>& objcImages) {
1940 __block MultiMap<const char*, dyld3::closure::Image::ObjCClassImageOffset, HashCString, EqualCString> seenClassesMap;
1941 __block Map<const char*, dyld3::closure::Image::ObjCClassNameImageOffset, HashCString, EqualCString> classNameMap;
1942 __block OverflowSafeArray<const char*> classNames;
1943
1944 // Note we walk the images backwards as we want them in load order to match the order they are registered with objc
1945 for (size_t imageIndex = 0, reverseIndex = (objcImages.count() - 1); imageIndex != objcImages.count(); ++imageIndex, --reverseIndex) {
1946 if (objcImages[reverseIndex].diag.hasError())
1947 continue;
1948 ObjCOptimizerImage& image = objcImages[reverseIndex];
1949 const OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = classes ? image.seenClasses : image.seenProtocols;
1950
1951 for (const ObjCOptimizerImage::SeenClass& seenClass : seenClasses) {
1952 closure::Image::ObjCClassNameImageOffset classNameTarget = seenClass.first;
1953 dyld3::closure::Image::ObjCClassImageOffset classDataTarget = seenClass.second;
1954 Image::ObjCClassImage classImage = _objcClassesHashTableImages[classNameTarget.classNameImageIndex];
1955
1956 const BuilderLoadedImage& li = findLoadedImage(classImage.imageNum);
1957 const dyld3::MachOAnalyzer* ma = li.loadAddress();
1958
1959 const char* className = ((const char*)ma) + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1960 //uint64_t nameVMAddr = ma->preferredLoadAddress() + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1961 //printf("%s: 0x%08llx = '%s'\n", li.path(), nameVMAddr, className);
1962 seenClassesMap.insert({ className, classDataTarget });
1963
1964 // Also track the name
1965 auto itAndInserted = classNameMap.insert({ className, dyld3::closure::Image::ObjCClassNameImageOffset() });
1966 if (itAndInserted.second) {
1967 // We inserted the class name so we need to add it to the strings for the closure hash table
1968 classNames.push_back(className);
1969
1970 // We already computed a class name target in a previous loop so use that one
1971 itAndInserted.first->second = seenClass.first;
1972
1973 // If we are processing protocols, and this is the first one we've seen, then track its ISA to be fixed up
1974 if ( !classes ) {
1975 uint64_t protocolVMOffset = classImage.offsetOfClasses + classDataTarget.classData.imageOffset;
1976 image.protocolISAFixups.push_back(protocolVMOffset);
1977 }
1978 }
1979 }
1980 }
1981
1982 __block uint32_t duplicateCount = 0;
1983 seenClassesMap.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values,
1984 uint64_t valuesCount) {
1985 if (valuesCount != 1)
1986 duplicateCount += valuesCount;
1987 });
1988
1989 // If we have closure class names, we need to make a hash table for them.
1990 OverflowSafeArray<uint8_t>& hashTable = classes ? _objcClassesHashTable : _objcProtocolsHashTable;
1991 if (!classNames.empty()) {
1992 objc_opt::perfect_hash phash;
1993 objc_opt::make_perfect(classNames, phash);
1994 size_t size = ObjCClassOpt::size(phash, duplicateCount);
1995 hashTable.resize(size);
1996 //printf("Class table size: %lld\n", size);
1997 ObjCClassOpt* classesHashTable = (ObjCClassOpt*)hashTable.begin();
1998 classesHashTable->write(phash, classNameMap.array(), seenClassesMap, duplicateCount);
1999 }
2000 }
2001
2002 bool ClosureBuilder::optimizeObjC(Array<ImageWriter>& writers) {
2003 if ( _dyldCache == nullptr )
2004 return false;
2005
2006 // If we have the read only data, make sure it has a valid selector table inside.
2007 const objc_opt::objc_clsopt_t* objcClassOpt = nullptr;
2008 const objc_opt::objc_selopt_t* objcSelOpt = nullptr;
2009 const objc_opt::objc_protocolopt2_t* objcProtocolOpt = nullptr;
2010 if (const objc_opt::objc_opt_t* optObjCHeader = _dyldCache->objcOpt()) {
2011 objcClassOpt = optObjCHeader->clsopt();
2012 objcSelOpt = optObjCHeader->selopt();
2013 objcProtocolOpt = optObjCHeader->protocolopt2();
2014 }
2015
2016 if ( !objcClassOpt || !objcSelOpt || !objcProtocolOpt )
2017 return false;
2018
2019 // We have 24 bits of index in SelectorReferenceFixup so we can't handle a
2020 // shared cache selector table larger than that
2021 if ( objcSelOpt->usedCount() >= (1 << 24) )
2022 return false;
2023
2024 // Make sure we have the pointers section with the pointer to the protocol class
2025 const void* objcOptPtrs = _dyldCache->objcOptPtrs();
2026 if ( objcOptPtrs == nullptr )
2027 return false;
2028
2029 uint32_t pointerSize = _loadedImages.begin()->loadAddress()->pointerSize();
2030 uint64_t classProtocolVMAddr = (pointerSize == 8) ? *(uint64_t*)objcOptPtrs : *(uint32_t*)objcOptPtrs;
2031
2032 Image::ResolvedSymbolTarget objcProtocolClassTarget;
2033 objcProtocolClassTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
2034 if ( _dyldCacheIsLive ) {
2035 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - (uint64_t)_dyldCache;
2036 } else {
2037 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - _dyldCache->unslidLoadAddress();
2038 }
2039
2040 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ObjCOptimizerImage, objcImages, 32);
2041 ArrayFinalizer<ObjCOptimizerImage> scopedCleanup(objcImages,
2042 ^(ObjCOptimizerImage& objcImage) {
2043 objcImage.~ObjCOptimizerImage();
2044 });
2045
2046 // Find all the images with valid objc info
2047 // Also add shared cache images to a map so that we can see them later for looking up classes
2048 Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer> sharedCacheImagesMap;
2049 for (size_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
2050 BuilderLoadedImage& li = _loadedImages[imageIndex];
2051
2052 // Skip shared cache images as even if they need a new closure, the objc runtime can still use
2053 // the optimized shared cache tables.
2054 if ( li.loadAddress()->inDyldCache() ) {
2055 sharedCacheImagesMap.insert({ li.loadAddress(), true });
2056 // Bump the writer index if we have a writer for this image
2057 if ( li.mustBuildClosure )
2058 ++writerIndex;
2059 continue;
2060 }
2061 // Images which don't need a closure can be skipped. They are from the shared cache
2062 if ( !li.mustBuildClosure )
2063 continue;
2064
2065 // If we have a root of libobjc, just give up for now
2066 if ( !strcmp(li.path(), "/usr/lib/libobjc.A.dylib"))
2067 return false;
2068
2069 ImageWriter& writer = writers[writerIndex];
2070 ++writerIndex;
2071
2072 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2073
2074 // Skip images with chained fixups other than arm64e legacy fixups until we can test them
2075 // FIXME: Handle chained fixups
2076 if ( ma->hasChainedFixups() ) {
2077 switch ( ma->chainedPointerFormat() ) {
2078 case DYLD_CHAINED_PTR_ARM64E:
2079 case DYLD_CHAINED_PTR_64:
2080 // We've tested the 64-bit chained fixups.
2081 break;
2082 case DYLD_CHAINED_PTR_32:
2083 case DYLD_CHAINED_PTR_32_CACHE:
2084 case DYLD_CHAINED_PTR_32_FIRMWARE:
2085 // FIXME: Test 32-bit chained fixups then enable this.
2086 continue;
2087 }
2088 }
2089
2090 const MachOAnalyzer::ObjCImageInfo* objcImageInfo = ma->objcImageInfo();
2091 if ( objcImageInfo == nullptr )
2092 continue;
2093
2094 // This image is good so record it for use later.
2095 objcImages.default_constuct_back();
2096 ObjCOptimizerImage& image = objcImages.back();
2097 image.loadedImage = &li;
2098 image.writer = &writer;
2099
2100 // Find FairPlay encryption range if encrypted
2101 uint32_t fairPlayFileOffset;
2102 uint32_t fairPlaySize;
2103 if ( ma->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
2104 image.fairplayFileOffsetStart = fairPlayFileOffset;
2105 image.fairplayFileOffsetEnd = fairPlayFileOffset;
2106 }
2107
2108 // Set the offset to the objc image info
2109 image.objcImageInfoVMOffset = (uint64_t)objcImageInfo - (uint64_t)ma;
2110 }
2111
2112 OverflowSafeArray<const char*> closureSelectorStrings;
2113 Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString> closureSelectorMap;
2114 OverflowSafeArray<const char*> closureDuplicateSharedCacheClassNames;
2115 Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString> closureDuplicateSharedCacheClassMap;
2116 for (ObjCOptimizerImage& image : objcImages) {
2117 optimizeObjCClasses(objcClassOpt, sharedCacheImagesMap, closureDuplicateSharedCacheClassMap, image);
2118 if (image.diag.hasError())
2119 continue;
2120
2121 optimizeObjCProtocols(objcProtocolOpt, sharedCacheImagesMap, image);
2122 if (image.diag.hasError())
2123 continue;
2124
2125 optimizeObjCSelectors(objcSelOpt, closureSelectorMap, image);
2126 if (image.diag.hasError())
2127 continue;
2128
2129 // If this image is still valid, then add its intermediate results to the main tables
2130
2131 // Class results
2132 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2133 uint64_t nameVMOffset = nameAndDataVMOffset.first;
2134 uint64_t dataVMOffset = nameAndDataVMOffset.second;
2135 _objcClassesHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)nameVMOffset, (uint32_t)dataVMOffset });
2136 }
2137 image.classesNameAndDataVMOffsets.clear();
2138
2139 for (const auto& stringAndDuplicate : image.classSharedCacheDuplicates) {
2140 closureDuplicateSharedCacheClassMap[stringAndDuplicate.first] = stringAndDuplicate.second;
2141 closureDuplicateSharedCacheClassNames.push_back(stringAndDuplicate.first);
2142 }
2143
2144 // Selector results
2145 // Note we don't need to add the selector binds here. Its easier just to process them later from each image
2146 for (const auto& stringAndTarget : image.selectorMap) {
2147 closureSelectorMap[stringAndTarget.first] = stringAndTarget.second;
2148 closureSelectorStrings.push_back(stringAndTarget.first);
2149 }
2150 if (image.methodNameVMOffset)
2151 _objcSelectorsHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)*image.methodNameVMOffset });
2152 }
2153
2154 // If we successfully analyzed the classes and selectors, we can now emit their data
2155 // Set all the writers to have optimized objc
2156 for (ObjCOptimizerImage& image : objcImages) {
2157 if (image.diag.hasError())
2158 continue;
2159 image.writer->setHasPrecomputedObjC(true);
2160 }
2161
2162 // Write out the class table
2163 writeClassOrProtocolHashTable(true, objcImages);
2164
2165 // Write out the protocol table
2166 writeClassOrProtocolHashTable(false, objcImages);
2167
2168 // If we have closure duplicate classes, we need to make a hash table for them.
2169 closure::ObjCStringTable* duplicateClassesTable = nullptr;
2170 if (!closureDuplicateSharedCacheClassNames.empty()) {
2171 objc_opt::perfect_hash phash;
2172 objc_opt::make_perfect(closureDuplicateSharedCacheClassNames, phash);
2173 size_t size = ObjCStringTable::size(phash);
2174 _objcClassesDuplicatesHashTable.resize(size);
2175 //printf("Duplicate classes table size: %lld\n", size);
2176 duplicateClassesTable = (closure::ObjCClassDuplicatesOpt*)_objcClassesDuplicatesHashTable.begin();
2177 duplicateClassesTable->write(phash, closureDuplicateSharedCacheClassMap.array());
2178 }
2179
2180 // If we have closure selectors, we need to make a hash table for them.
2181 closure::ObjCStringTable* selectorStringTable = nullptr;
2182 if (!closureSelectorStrings.empty()) {
2183 objc_opt::perfect_hash phash;
2184 objc_opt::make_perfect(closureSelectorStrings, phash);
2185 size_t size = ObjCStringTable::size(phash);
2186 _objcSelectorsHashTable.resize(size);
2187 //printf("Selector table size: %lld\n", size);
2188 selectorStringTable = (closure::ObjCStringTable*)_objcSelectorsHashTable.begin();
2189 selectorStringTable->write(phash, closureSelectorMap.array());
2190 }
2191
2192 // Add fixups for the image info, protocol ISAs, and selector refs
2193 for (ObjCOptimizerImage& image : objcImages) {
2194 if (image.diag.hasError())
2195 continue;
2196
2197 // Protocol ISA references
2198 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ProtocolISAFixup, protocolFixups, 512);
2199 if ( !image.protocolISAFixups.empty() ) {
2200
2201 __block uint64_t lastOffset = -pointerSize;
2202 for (uint64_t runtimeOffset : image.protocolISAFixups) {
2203 bool mergedIntoPrevious = false;
2204 if ( (runtimeOffset > lastOffset) && !protocolFixups.empty() ) {
2205 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2206 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2207 // misaligned pointer means we cannot optimize
2208 }
2209 else {
2210 if ( (protocolFixups.back().repeatCount == 1) && (protocolFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2211 protocolFixups.back().repeatCount = 2;
2212 protocolFixups.back().skipCount = skipAmount;
2213 assert(protocolFixups.back().skipCount == skipAmount); // check overflow
2214 mergedIntoPrevious = true;
2215 }
2216 else if ( (protocolFixups.back().skipCount == skipAmount) && (protocolFixups.back().repeatCount < 0xfff) ) {
2217 uint32_t prevRepeatCount = protocolFixups.back().repeatCount;
2218 protocolFixups.back().repeatCount += 1;
2219 assert(protocolFixups.back().repeatCount > prevRepeatCount); // check overflow
2220 mergedIntoPrevious = true;
2221 }
2222 }
2223 }
2224 if ( !mergedIntoPrevious ) {
2225 Image::ProtocolISAFixup pattern;
2226 pattern.startVmOffset = runtimeOffset;
2227 pattern.repeatCount = 1;
2228 pattern.skipCount = 0;
2229 assert(pattern.startVmOffset == runtimeOffset);
2230 protocolFixups.push_back(pattern);
2231 }
2232 lastOffset = runtimeOffset;
2233 }
2234 }
2235
2236 // Selector references
2237 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::SelectorReferenceFixup, selRefFixups, 512);
2238 if ( !image.selectorFixups.empty() ) {
2239 uint64_t prevVMOffset = 0;
2240 const uint64_t maxChainOffset = (4 * ((1 << 7) - 1));
2241 for (const ObjCOptimizerImage::SelectorFixup& selectorFixup : image.selectorFixups) {
2242 assert( (selectorFixup.fixupVMOffset & 3) == 0 );
2243 if ( (selectorFixup.fixupVMOffset - prevVMOffset) <= maxChainOffset ) {
2244 // Add this to the previous chain
2245 selRefFixups.back().chainEntry.next = (uint32_t)(selectorFixup.fixupVMOffset - prevVMOffset) / 4;
2246 } else {
2247 // Need to start a new chain as the previous offset can't reach
2248 Image::SelectorReferenceFixup fixup;
2249 fixup.chainStartVMOffset = selectorFixup.fixupVMOffset;
2250 selRefFixups.push_back(fixup);
2251 }
2252
2253 if ( selectorFixup.isSharedCache ) {
2254 // If the entry is in the shared cache then we already have the index for it
2255 Image::SelectorReferenceFixup fixup;
2256 fixup.chainEntry.index = selectorFixup.sharedCache.selectorTableIndex;
2257 fixup.chainEntry.next = 0;
2258 fixup.chainEntry.inSharedCache = 1;
2259 selRefFixups.push_back(fixup);
2260 } else {
2261 // We had to record the string for the closure table entries as we don't know the
2262 // index until now
2263 uint32_t selectorTableIndex = selectorStringTable->getIndex(selectorFixup.image.selectorString);
2264 assert(selectorTableIndex != ObjCSelectorOpt::indexNotFound);
2265 Image::SelectorReferenceFixup fixup;
2266 fixup.chainEntry.index = selectorTableIndex;
2267 fixup.chainEntry.next = 0;
2268 fixup.chainEntry.inSharedCache = 0;
2269 selRefFixups.push_back(fixup);
2270 }
2271
2272 prevVMOffset = selectorFixup.fixupVMOffset;
2273 }
2274 }
2275
2276 // Stable Swift fixups
2277 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ClassStableSwiftFixup, stableSwiftFixups, 512);
2278 if ( !image.classStableSwiftFixups.empty() ) {
2279
2280 __block uint64_t lastOffset = -pointerSize;
2281 for (uint64_t runtimeOffset : image.classStableSwiftFixups) {
2282 bool mergedIntoPrevious = false;
2283 if ( (runtimeOffset > lastOffset) && !stableSwiftFixups.empty() ) {
2284 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2285 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2286 // misaligned pointer means we cannot optimize
2287 }
2288 else {
2289 if ( (stableSwiftFixups.back().repeatCount == 1) && (stableSwiftFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2290 stableSwiftFixups.back().repeatCount = 2;
2291 stableSwiftFixups.back().skipCount = skipAmount;
2292 assert(stableSwiftFixups.back().skipCount == skipAmount); // check overflow
2293 mergedIntoPrevious = true;
2294 }
2295 else if ( (stableSwiftFixups.back().skipCount == skipAmount) && (stableSwiftFixups.back().repeatCount < 0xfff) ) {
2296 uint32_t prevRepeatCount = stableSwiftFixups.back().repeatCount;
2297 stableSwiftFixups.back().repeatCount += 1;
2298 assert(stableSwiftFixups.back().repeatCount > prevRepeatCount); // check overflow
2299 mergedIntoPrevious = true;
2300 }
2301 }
2302 }
2303 if ( !mergedIntoPrevious ) {
2304 Image::ClassStableSwiftFixup pattern;
2305 pattern.startVmOffset = runtimeOffset;
2306 pattern.repeatCount = 1;
2307 pattern.skipCount = 0;
2308 assert(pattern.startVmOffset == runtimeOffset);
2309 stableSwiftFixups.push_back(pattern);
2310 }
2311 lastOffset = runtimeOffset;
2312 }
2313 }
2314
2315 // Method list fixups
2316 // TODO: Implement this
2317 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::MethodListFixup, methodListFixups, 512);
2318
2319 image.writer->setObjCFixupInfo(objcProtocolClassTarget, image.objcImageInfoVMOffset, protocolFixups,
2320 selRefFixups, stableSwiftFixups, methodListFixups);
2321 }
2322
2323 return true;
2324 }
2325
2326 void ClosureBuilder::optimizeObjCSelectors(const objc_opt::objc_selopt_t* objcSelOpt,
2327 const Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString>& closureSelectorMap,
2328 ObjCOptimizerImage& image) {
2329
2330 BuilderLoadedImage& li = *image.loadedImage;
2331
2332 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2333 uint32_t pointerSize = ma->pointerSize();
2334 const uint64_t loadAddress = ma->preferredLoadAddress();
2335
2336 // The legacy (objc1) codebase uses a bunch of sections we don't want to reason about. If we see them just give up.
2337 __block bool foundBadSection = false;
2338 ma->forEachSection(^(const MachOAnalyzer::SectionInfo &sectInfo, bool malformedSectionRange, bool &stop) {
2339 if ( strcmp(sectInfo.segInfo.segName, "__OBJC") != 0 )
2340 return;
2341 if (strcmp(sectInfo.sectName, "__module_info") == 0) {
2342 foundBadSection = true;
2343 stop = true;
2344 return;
2345 }
2346 if (strcmp(sectInfo.sectName, "__protocol") == 0) {
2347 foundBadSection = true;
2348 stop = true;
2349 return;
2350 }
2351 if (strcmp(sectInfo.sectName, "__message_refs") == 0) {
2352 foundBadSection = true;
2353 stop = true;
2354 return;
2355 }
2356 });
2357 if (foundBadSection) {
2358 image.diag.error("Old objc section");
2359 return;
2360 }
2361
2362 __block MachOAnalyzer::SectionCache selectorStringSectionCache(ma);
2363
2364 uint32_t sharedCacheSentinelIndex = objcSelOpt->getSentinelIndex();
2365
2366 auto visitReferenceToObjCSelector = ^void(uint64_t selectorStringVMAddr, uint64_t selectorReferenceVMAddr) {
2367
2368 uint64_t selectorUseImageOffset = selectorReferenceVMAddr - loadAddress;
2369 if ( (selectorUseImageOffset & 3) != 0 ) {
2370 image.diag.error("Unaligned selector reference fixup");
2371 return;
2372 }
2373
2374 // Image::SelectorReferenceFixup only has a 32-bit reach
2375 if ( selectorUseImageOffset >= (1ULL << 32) ) {
2376 image.diag.error("Selector reference fixup exceeds supported vm offset");
2377 return;
2378 }
2379
2380 // Get the section for the name
2381 const char* selectorString = nullptr;
2382 MachOAnalyzer::PrintableStringResult selectorStringResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2383 __block uint64_t selectorStringSectionStartVMAddr = 0;
2384 auto selectorStringSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2385
2386 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2387 if (sectInfo.sectSize >= Image::ObjCImageOffset::maximumOffset) {
2388 return false;
2389 }
2390
2391 // We use 32-bit offsets so make sure the section is no larger than that.
2392 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2393 if (classNameVMOffset >= (1ULL << 32)) {
2394 return false;
2395 }
2396
2397 selectorStringSectionStartVMAddr = sectInfo.sectAddr;
2398 return true;
2399 };
2400 selectorString = ma->getPrintableString(selectorStringVMAddr, selectorStringResult,
2401 &selectorStringSectionCache, selectorStringSectionHandler);
2402
2403 if ( selectorStringResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2404 image.diag.error("Invalid selector string for objc optimisation");
2405 return;
2406 }
2407
2408 uint32_t cacheSelectorIndex = objcSelOpt->getIndexForKey(selectorString);
2409 //printf("selector: %p -> %p %s\n", methodName, cacheSelector, selectorString);
2410
2411 if ( cacheSelectorIndex != sharedCacheSentinelIndex ) {
2412 // We got the selector from the cache so add a fixup to point there.
2413 ObjCOptimizerImage::SelectorFixup fixup;
2414 fixup.isSharedCache = true;
2415 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2416 fixup.sharedCache.selectorTableIndex = cacheSelectorIndex;
2417
2418 //printf("Overriding fixup at 0x%08llX to cache offset 0x%08llX\n", selectorUseImageOffset, (uint64_t)cacheSelector - (uint64_t)_dyldCache);
2419 image.selectorFixups.push_back(fixup);
2420 return;
2421 }
2422
2423 // See if this selector is already in the closure map from a previous image
2424 auto closureSelectorIt = closureSelectorMap.find(selectorString);
2425 if (closureSelectorIt != closureSelectorMap.end()) {
2426 // This selector was found in a previous image, so use it here.
2427 ObjCOptimizerImage::SelectorFixup fixup;
2428 fixup.isSharedCache = false;
2429 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2430 fixup.image.selectorString = selectorString;
2431
2432 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2433 image.selectorFixups.push_back(fixup);
2434 return;
2435 }
2436
2437 // See if this selector is already in the map for this image
2438 auto itAndInserted = image.selectorMap.insert({ selectorString, dyld3::closure::Image::ObjCImageOffset() });
2439 if (itAndInserted.second) {
2440 // We added the selector so its pointing in to our own image.
2441 // We don't need to add a fixup to our image, but we do need to
2442 // populate the data for other images later to point here.
2443 // First put our image in the list if its not already there.
2444 uint64_t methodNameVMOffset = selectorStringSectionStartVMAddr - loadAddress;
2445 if (!image.methodNameVMOffset) {
2446 if ( _objcSelectorsHashTableImages.count() == Image::ObjCImageOffset::maximumImageIndex ) {
2447 image.diag.error("Out of space for selector hash images");
2448 return;
2449 }
2450 image.methodNameVMOffset = methodNameVMOffset;
2451 } else {
2452 // If we already set the offset to the start of the method names section, double check that
2453 // the section we are in right now is the same as that one. Otherwise we don't have the code
2454 // to handle both right now.
2455 if (*image.methodNameVMOffset != methodNameVMOffset) {
2456 image.diag.error("Cannot handle more than one selector strings section");
2457 return;
2458 }
2459 }
2460
2461 dyld3::closure::Image::ObjCImageOffset target;
2462 target.imageIndex = (uint32_t)_objcSelectorsHashTableImages.count();
2463 target.imageOffset = (uint32_t)(selectorStringVMAddr - selectorStringSectionStartVMAddr);
2464 itAndInserted.first->second = target;
2465 return;
2466 }
2467
2468 // This selector was found elsewhere in our image. If this reference already points to the same
2469 // selector string as we found before (and it should!) then we have nothing to do. Otherwise we
2470 // need to add a fixup here to make sure we point to our chosen definition.
2471 uint32_t imageOffset = (uint32_t)(selectorStringVMAddr - loadAddress);
2472 if ( imageOffset == (*image.methodNameVMOffset + itAndInserted.first->second.imageOffset) )
2473 return;
2474
2475 ObjCOptimizerImage::SelectorFixup fixup;
2476 fixup.isSharedCache = false;
2477 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2478 fixup.image.selectorString = selectorString;
2479
2480 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2481 image.selectorFixups.push_back(fixup);
2482 };
2483
2484 auto visitMethod = ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
2485 visitReferenceToObjCSelector(method.nameVMAddr, method.nameLocationVMAddr);
2486 };
2487
2488 auto visitClass = ^(Diagnostics& diag, uint64_t classVMAddr,
2489 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2490 const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
2491 ma->forEachObjCMethod(objcClass.baseMethodsVMAddr(pointerSize), li.contentRebased,
2492 visitMethod);
2493 };
2494
2495 auto visitCategory = ^(Diagnostics& diag, uint64_t categoryVMAddr,
2496 const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
2497 ma->forEachObjCMethod(objcCategory.instanceMethodsVMAddr, li.contentRebased,
2498 visitMethod);
2499 ma->forEachObjCMethod(objcCategory.classMethodsVMAddr, li.contentRebased,
2500 visitMethod);
2501 };
2502 auto visitProtocol = ^(Diagnostics& diag, uint64_t protocolVMAddr,
2503 const dyld3::MachOAnalyzer::ObjCProtocol& objCProtocol) {
2504 ma->forEachObjCMethod(objCProtocol.instanceMethodsVMAddr, li.contentRebased,
2505 visitMethod);
2506 ma->forEachObjCMethod(objCProtocol.classMethodsVMAddr, li.contentRebased,
2507 visitMethod);
2508 ma->forEachObjCMethod(objCProtocol.optionalInstanceMethodsVMAddr, li.contentRebased,
2509 visitMethod);
2510 ma->forEachObjCMethod(objCProtocol.optionalClassMethodsVMAddr, li.contentRebased,
2511 visitMethod);
2512 };
2513
2514 // Walk the class list
2515 ma->forEachObjCClass(image.diag, li.contentRebased, visitClass);
2516 if (image.diag.hasError())
2517 return;
2518
2519 // Walk the category list
2520 ma->forEachObjCCategory(image.diag, li.contentRebased, visitCategory);
2521 if (image.diag.hasError())
2522 return;
2523
2524 // Walk the protocol list
2525 ma->forEachObjCProtocol(image.diag, li.contentRebased, visitProtocol);
2526 if (image.diag.hasError())
2527 return;
2528
2529 // Visit the selector refs
2530 ma->forEachObjCSelectorReference(image.diag, li.contentRebased, ^(uint64_t selRefVMAddr, uint64_t selRefTargetVMAddr) {
2531 visitReferenceToObjCSelector(selRefTargetVMAddr, selRefVMAddr);
2532 });
2533 if (image.diag.hasError())
2534 return;
2535
2536 // Visit the message refs
2537 // Note this isn't actually supported in libobjc any more. Its logic for deciding whether to support it is if this is true:
2538 // #if (defined(__x86_64__) && (TARGET_OS_OSX || TARGET_OS_SIMULATOR))
2539 // So to keep it simple, lets only do this walk if we are x86_64
2540 if ( ma->isArch("x86_64") || ma->isArch("x86_64h") ) {
2541 if (ma->hasObjCMessageReferences()) {
2542 image.diag.error("Cannot handle message refs");
2543 return;
2544 }
2545 }
2546 }
2547
2548 static const dyld3::MachOAnalyzer* getMachHeaderFromObjCHeaderInfo(const void* opaqueHeaderInfo, uint32_t pointerSize) {
2549 if (pointerSize == 8) {
2550 typedef int64_t PtrTy;
2551 struct HeaderInfo {
2552 PtrTy mhdr_offset; // offset to mach_header_64
2553 PtrTy info_offset; // offset to objc_image_info *
2554 };
2555 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2556 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2557 } else {
2558 typedef int32_t PtrTy;
2559 struct HeaderInfo {
2560 PtrTy mhdr_offset; // offset to mach_header
2561 PtrTy info_offset; // offset to objc_image_info *
2562 };
2563 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2564 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2565 }
2566 }
2567
2568 void ClosureBuilder::addDuplicateObjCClassWarning(const char* className,
2569 const char* duplicateDefinitionPath,
2570 const char* canonicalDefinitionPath)
2571 {
2572 if ( _objcDuplicateClassWarnings == nullptr )
2573 _objcDuplicateClassWarnings = PathPool::allocate();
2574 // Use a diagnostic to give us a buffer we can safely print to
2575 Diagnostics diag;
2576 diag.error("Class %s is implemented in both %s and %s. One of the two will be used. Which one is undefined.",
2577 className, canonicalDefinitionPath, duplicateDefinitionPath);
2578 #if BUILDING_CACHE_BUILDER
2579 _objcDuplicateClassWarnings->add(diag.errorMessage().c_str());
2580 #else
2581 _objcDuplicateClassWarnings->add(diag.errorMessage());
2582 #endif
2583 }
2584
2585 void ClosureBuilder::optimizeObjCClasses(const objc_opt::objc_clsopt_t* objcClassOpt,
2586 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2587 const Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString>& duplicateSharedCacheClasses,
2588 ObjCOptimizerImage& image) {
2589
2590 BuilderLoadedImage& li = *image.loadedImage;
2591 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = image.seenClasses;
2592
2593 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2594 const uint32_t pointerSize = ma->pointerSize();
2595 const uint64_t loadAddress = ma->preferredLoadAddress();
2596
2597 // Keep track of any missing weak imports so that we can tell if the superclasses are nil
2598 // This is necessary as the shared cache will be marked with 'no missing weak superclasses'
2599 // and so we need to continue to satisfy that constraint
2600 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> missingWeakImportOffets;
2601 if (li.hasMissingWeakImports) {
2602 if (ma->hasChainedFixups()) {
2603 const Image* closureImage = image.writer->currentImage();
2604
2605 const Array<Image::ResolvedSymbolTarget> targets = closureImage->chainedTargets();
2606 if ( !targets.empty() ) {
2607 ma->withChainStarts(_diag, closureImage->chainedStartsOffset(), ^(const dyld_chained_starts_in_image* startsInfo) {
2608 ma->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc,
2609 const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
2610 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)ma;
2611 uint32_t bindOrdinal;
2612 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal) ) {
2613 if ( bindOrdinal < targets.count() ) {
2614 const Image::ResolvedSymbolTarget& target = targets[bindOrdinal];
2615 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2616 missingWeakImportOffets[fixupOffset] = true;
2617 }
2618 else {
2619 image.diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
2620 fixupsStop = true;
2621 }
2622 }
2623 });
2624 });
2625 if (image.diag.hasError())
2626 return;
2627 }
2628 } else {
2629 forEachBind(li, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
2630 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2631 missingWeakImportOffets[runtimeOffset] = true;
2632 }, ^(const char *strongSymbolName) {
2633 }, ^() { });
2634 }
2635 }
2636
2637 // Class names and data may be in different sections depending on swift vs objc so handle multiple sections
2638 __block MachOAnalyzer::SectionCache classNameSectionCache(ma);
2639 __block MachOAnalyzer::SectionCache classSectionCache(ma);
2640
2641 ma->forEachObjCClass(image.diag, li.contentRebased, ^(Diagnostics &diag, uint64_t classVMAddr,
2642 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2643 const MachOAnalyzer::ObjCClassInfo &objcClass, bool isMetaClass) {
2644 if (isMetaClass) return;
2645
2646 // Make sure the superclass pointer is not nil
2647 uint64_t superclassRuntimeOffset = classSuperclassVMAddr - loadAddress;
2648 if (missingWeakImportOffets.find(superclassRuntimeOffset) != missingWeakImportOffets.end()) {
2649 diag.error("Missing weak superclass");
2650 return;
2651 }
2652
2653 // Does this class need to be fixed up for stable Swift ABI.
2654 // Note the order matches the objc runtime in that we always do this fix before checking for dupes,
2655 // but after excluding classes with missing weak superclasses.
2656 if (objcClass.isUnfixedBackwardDeployingStableSwift()) {
2657 // Class really is stable Swift, pretending to be pre-stable.
2658 // Fix its lie. This involves fixing the FAST bits on the class data value, so record that vmaddr
2659 image.classStableSwiftFixups.push_back(classDataVMAddr - loadAddress);
2660 }
2661
2662 // Get the section for the name
2663 const char* className = nullptr;
2664 MachOAnalyzer::PrintableStringResult classNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2665 __block uint64_t classNameSectionStartVMAddr = 0;
2666 auto classNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2667 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2668 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2669 return false;
2670 }
2671
2672 // We use 32-bit offsets so make sure the section is no larger than that.
2673 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2674 if (classNameVMOffset >= (1ULL << 32)) {
2675 return false;
2676 }
2677
2678 classNameSectionStartVMAddr = sectInfo.sectAddr;
2679 return true;
2680 };
2681 uint64_t classNameVMAddr = objcClass.nameVMAddr(pointerSize);
2682 className = ma->getPrintableString(classNameVMAddr, classNameResult,
2683 &classNameSectionCache, classNameSectionHandler);
2684
2685 if ( classNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2686 diag.error("Invalid class name for objc optimisation");
2687 return;
2688 }
2689
2690 // If the class also exists in a shared cache image which is loaded, then objc
2691 // would have found that one, regardless of load order. So we can just skip this one.
2692 {
2693 void *cls;
2694 void *hi;
2695 uint32_t index;
2696 uint32_t count = objcClassOpt->getClassHeaderAndIndex(className, cls, hi, index);
2697 if (count == 1) {
2698 // exactly one matching class. Check if its loaded
2699 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hi, pointerSize);
2700 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2701 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2702
2703 // We have a duplicate class, so check if we've already got it in our map.
2704 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2705 // We haven't seen this one yet
2706 Image::ObjCDuplicateClass duplicateClass;
2707 duplicateClass.sharedCacheClassOptIndex = index;
2708 duplicateClass.sharedCacheClassDuplicateIndex = 0;
2709 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2710 }
2711 }
2712 }
2713 else if (count > 1) {
2714 // more than one matching class - find one that is loaded
2715 void *clslist[count];
2716 void *hilist[count];
2717 objcClassOpt->getClassesAndHeaders(className, clslist, hilist);
2718 for (uint32_t i = 0; i < count; i++) {
2719 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize);
2720 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2721 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2722
2723 // We have a duplicate class, so check if we've already got it in our map.
2724 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2725 // We haven't seen this one yet
2726 Image::ObjCDuplicateClass duplicateClass;
2727 duplicateClass.sharedCacheClassOptIndex = index;
2728 duplicateClass.sharedCacheClassDuplicateIndex = i;
2729 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2730 }
2731
2732 break;
2733 }
2734 }
2735 }
2736 }
2737
2738 // Get the section for the class itself
2739 __block uint64_t classSectionStartVMAddr = 0;
2740 auto classSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2741 // We only have 23-bits in ObjCClassImageOffset to index in to the classes
2742 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2743 return false;
2744 }
2745
2746 // We use 32-bit offsets so make sure the section is no larger than that.
2747 uint64_t classDatasVMOffset = sectInfo.sectAddr - loadAddress;
2748 if (classDatasVMOffset >= (1ULL << 32)) {
2749 return false;
2750 }
2751
2752 classSectionStartVMAddr = sectInfo.sectAddr;
2753 return true;
2754 };
2755 if (!classSectionCache.findSectionForVMAddr(classVMAddr, classSectionHandler)) {
2756 diag.error("Invalid class for objc optimisation");
2757 return;
2758 }
2759
2760 // Make sure we have an entry for our images offsets for later
2761 uint64_t classNameSectionVMOffset = classNameSectionStartVMAddr - loadAddress;
2762 uint64_t classSectionVMOffset = classSectionStartVMAddr - loadAddress;
2763 uint64_t hashTableVMOffsetsIndex = 0;
2764 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2765 if ( (nameAndDataVMOffset.first == classNameSectionVMOffset) && (nameAndDataVMOffset.second == classSectionVMOffset) )
2766 break;
2767 ++hashTableVMOffsetsIndex;
2768 }
2769
2770 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2771 // Didn't find an image entry with this offset. Add one if we have space
2772 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2773 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2774 // No more space. We need to give up
2775 diag.error("No more space for class hash table image");
2776 return;
2777 }
2778 image.classesNameAndDataVMOffsets.push_back({ classNameSectionVMOffset, classSectionVMOffset });
2779 }
2780
2781 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2782
2783 uint64_t classNameOffset = classNameVMAddr - classNameSectionStartVMAddr;
2784 uint64_t classDataOffset = classVMAddr - classSectionStartVMAddr;
2785
2786 closure::Image::ObjCClassNameImageOffset classNameTarget;
2787 classNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2788 classNameTarget.classNameImageOffset = (uint32_t)classNameOffset;
2789
2790 dyld3::closure::Image::ObjCClassImageOffset classDataTarget;
2791 classDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2792 classDataTarget.classData.imageOffset = (uint32_t)classDataOffset;
2793 classDataTarget.classData.isDuplicate = 0;
2794
2795 seenClasses.push_back({ classNameTarget, classDataTarget });
2796 });
2797 }
2798
2799 void ClosureBuilder::optimizeObjCProtocols(const objc_opt::objc_protocolopt2_t* objcProtocolOpt,
2800 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2801 ObjCOptimizerImage& image) {
2802
2803 BuilderLoadedImage& li = *image.loadedImage;
2804 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenProtocols = image.seenProtocols;
2805
2806 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2807 const uint32_t pointerSize = ma->pointerSize();
2808 const uint64_t loadAddress = ma->preferredLoadAddress();
2809
2810 // Protocol names and data may be in different sections depending on swift vs objc so handle multiple sections
2811 __block MachOAnalyzer::SectionCache protocolNameSectionCache(ma);
2812 __block MachOAnalyzer::SectionCache protocolSectionCache(ma);
2813
2814 ma->forEachObjCProtocol(image.diag, li.contentRebased, ^(Diagnostics &diag, uint64_t protocolVMAddr,
2815 const dyld3::MachOAnalyzer::ObjCProtocol &objCProtocol) {
2816 if ( objCProtocol.requiresObjCReallocation ) {
2817 // We can't optimize this protocol as the runtime needs all fields to be present
2818 diag.error("Protocol is too small to be optimized");
2819 return;
2820 }
2821 if ( objCProtocol.isaVMAddr != 0 ) {
2822 // We can't optimize this protocol if it has an ISA as we want to override it
2823 diag.error("Protocol ISA cannot be non-zero");
2824 return;
2825 }
2826
2827 // Get the section for the name
2828 const char* protocolName = nullptr;
2829 MachOAnalyzer::PrintableStringResult protocolNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2830 __block uint64_t protocolNameSectionStartVMAddr = 0;
2831 auto protocolNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2832 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2833 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2834 return false;
2835 }
2836
2837 // We use 32-bit offsets so make sure the section is no larger than that.
2838 uint64_t protocolNameVMOffset = sectInfo.sectAddr - loadAddress;
2839 if (protocolNameVMOffset >= (1ULL << 32)) {
2840 return false;
2841 }
2842
2843 protocolNameSectionStartVMAddr = sectInfo.sectAddr;
2844 return true;
2845 };
2846 uint64_t protocolNameVMAddr = objCProtocol.nameVMAddr;
2847 protocolName = ma->getPrintableString(protocolNameVMAddr, protocolNameResult,
2848 &protocolNameSectionCache, protocolNameSectionHandler);
2849
2850 if ( protocolNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2851 diag.error("Invalid protocol name for objc optimisation");
2852 return;
2853 }
2854
2855 // If the protocol also exists in a shared cache image which is loaded, then objc
2856 // would have found that one, regardless of load order. So we can just skip this one.
2857 {
2858 void *cls;
2859 void *hi;
2860 uint32_t count = objcProtocolOpt->getClassAndHeader(protocolName, cls, hi);
2861 if (count == 1) {
2862 // exactly one matching protocol. Check if its loaded
2863 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hi, pointerSize)) != sharedCacheImagesMap.end())
2864 return;
2865 }
2866 else if (count > 1) {
2867 // more than one matching protocol - find one that is loaded
2868 void *clslist[count];
2869 void *hilist[count];
2870 objcProtocolOpt->getClassesAndHeaders(protocolName, clslist, hilist);
2871 for (uint32_t i = 0; i < count; i++) {
2872 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize)) != sharedCacheImagesMap.end())
2873 return;
2874 }
2875 }
2876 }
2877
2878 // Get the section for the protocol itself
2879 __block uint64_t protocolSectionStartVMAddr = 0;
2880 auto protocolSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2881 // We only have 23-bits in ObjCClassImageOffset to index in to the protocols
2882 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2883 return false;
2884 }
2885
2886 // We use 32-bit offsets so make sure the section is no larger than that.
2887 uint64_t protocolDatasVMOffset = sectInfo.sectAddr - loadAddress;
2888 if (protocolDatasVMOffset >= (1ULL << 32)) {
2889 return false;
2890 }
2891
2892 protocolSectionStartVMAddr = sectInfo.sectAddr;
2893 return true;
2894 };
2895 if (!protocolSectionCache.findSectionForVMAddr(protocolVMAddr, protocolSectionHandler)) {
2896 diag.error("Invalid protocol for objc optimisation");
2897 return;
2898 }
2899
2900 // Make sure we have an entry for our images offsets for later
2901 uint64_t protocolNameSectionVMOffset = protocolNameSectionStartVMAddr - loadAddress;
2902 uint64_t protocolSectionVMOffset = protocolSectionStartVMAddr - loadAddress;
2903 uint64_t hashTableVMOffsetsIndex = 0;
2904 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2905 if ( (nameAndDataVMOffset.first == protocolNameSectionVMOffset) && (nameAndDataVMOffset.second == protocolSectionVMOffset) )
2906 break;
2907 ++hashTableVMOffsetsIndex;
2908 }
2909
2910 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2911 // Didn't find an image entry with this offset. Add one if we have space
2912 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2913 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2914 // No more space. We need to give up
2915 diag.error("No more space for protocol hash table image");
2916 return;
2917 }
2918 image.classesNameAndDataVMOffsets.push_back({ protocolNameSectionVMOffset, protocolSectionVMOffset });
2919 }
2920
2921 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2922
2923 uint64_t protocolNameOffset = protocolNameVMAddr - protocolNameSectionStartVMAddr;
2924 uint64_t protocolDataOffset = protocolVMAddr - protocolSectionStartVMAddr;
2925
2926 closure::Image::ObjCClassNameImageOffset protocolNameTarget;
2927 protocolNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2928 protocolNameTarget.classNameImageOffset = (uint32_t)protocolNameOffset;
2929
2930 dyld3::closure::Image::ObjCClassImageOffset protocolDataTarget;
2931 protocolDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2932 protocolDataTarget.classData.imageOffset = (uint32_t)protocolDataOffset;
2933 protocolDataTarget.classData.isDuplicate = 0;
2934
2935 seenProtocols.push_back({ protocolNameTarget, protocolDataTarget });
2936 });
2937 }
2938
2939 // used at launch by dyld when kernel has already mapped main executable
2940 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo& fileInfo, bool allowInsertFailures)
2941 {
2942 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
2943 const mach_header* mainMH = (const mach_header*)fileInfo.fileContent;
2944 // set up stack based storage for all arrays
2945 BuilderLoadedImage loadImagesStorage[512];
2946 Image::LinkedImage dependenciesStorage[512*8];
2947 InterposingTuple tuplesStorage[64];
2948 Closure::PatchEntry cachePatchStorage[64];
2949 const char* weakDefNameStorage[64];
2950 _loadedImages.setInitialStorage(loadImagesStorage, 512);
2951 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
2952 _interposingTuples.setInitialStorage(tuplesStorage, 64);
2953 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
2954 _weakDefsFromChainedBinds.setInitialStorage(weakDefNameStorage, 64);
2955 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
2956
2957 const MachOAnalyzer* mainExecutable = MachOAnalyzer::validMainExecutable(_diag, mainMH, fileInfo.path, fileInfo.sliceLen, _archs, _platform);
2958 if ( mainExecutable == nullptr )
2959 return nullptr;
2960 if ( !mainExecutable->isDynamicExecutable() ) {
2961 _diag.error("not a main executable");
2962 return nullptr;
2963 }
2964 _isLaunchClosure = true;
2965 _allowMissingLazies = true;
2966
2967 _nextIndex = 0;
2968
2969 // add main executable
2970 __block BuilderLoadedImage mainEntry;
2971 mainEntry.loadedFileInfo = fileInfo;
2972 mainEntry.imageNum = 0; // We can't fill this in until we've done inserted dylibs
2973 mainEntry.unmapWhenDone = false;
2974 mainEntry.contentRebased = false;
2975 mainEntry.hasInits = false;
2976 mainEntry.markNeverUnload = true;
2977 mainEntry.rtldLocal = false;
2978 mainEntry.isBadImage = false;
2979 mainEntry.mustBuildClosure = true;
2980 mainEntry.hasMissingWeakImports = false;
2981 mainEntry.overrideImageNum = 0;
2982
2983 // Set the executable load path so that @executable_path can use it later
2984 _mainProgLoadPath = fileInfo.path;
2985
2986 // add any DYLD_INSERT_LIBRARIES
2987 _pathOverrides.forEachInsertedDylib(^(const char* dylibPath, bool &stop) {
2988 LoadedImageChain chainMain = { nullptr, mainEntry };
2989 BuilderLoadedImage* foundTopImage;
2990 if ( !findImage(dylibPath, chainMain, foundTopImage, LinkageType::kInserted, 0, true) ) {
2991 if ( !allowInsertFailures ) {
2992 if ( _diag.noError() )
2993 _diag.error("could not load inserted dylib %s", dylibPath);
2994 stop = true;
2995 return;
2996 }
2997 _diag.clearError(); // FIXME add way to plumb back warning
2998 }
2999 });
3000
3001 if ( _diag.hasError() )
3002 return nullptr;
3003
3004 _mainProgLoadIndex = (uint32_t)_loadedImages.count();
3005 mainEntry.imageNum = _startImageNum + _nextIndex++;
3006 _loadedImages.push_back(mainEntry);
3007
3008 // get mach_headers for all images needed to launch this main executable
3009 LoadedImageChain chainStart = { nullptr, _loadedImages[_mainProgLoadIndex] };
3010 recursiveLoadDependents(chainStart);
3011 if ( _diag.hasError() )
3012 return nullptr;
3013 for (uint32_t i=0; i < _mainProgLoadIndex; ++i) {
3014 LoadedImageChain insertChainStart = { nullptr, _loadedImages[i] };
3015 recursiveLoadDependents(insertChainStart);
3016 if ( _diag.hasError() )
3017 return nullptr;
3018 }
3019 loadDanglingUpwardLinks();
3020
3021 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3022 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3023 invalidateInitializerRoots();
3024
3025 // now that everything loaded, set _libDyldImageNum and _libSystemImageNum
3026 for (BuilderLoadedImage& li : _loadedImages) {
3027 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
3028 _libDyldImageNum = li.imageNum;
3029 else if ( strcmp(li.path(), "/usr/lib/libSystem.B.dylib") == 0 )
3030 _libSystemImageNum = li.imageNum;
3031 }
3032
3033 // only some images need to go into closure (non-rooted ones from dyld cache do not)
3034 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3035 for (BuilderLoadedImage& li : _loadedImages) {
3036 if ( li.mustBuildClosure ) {
3037 writers.push_back(ImageWriter());
3038 buildImage(writers.back(), li);
3039 if ( _diag.hasError() )
3040 return nullptr;
3041 }
3042 }
3043
3044 bool optimizedObjC = optimizeObjC(writers);
3045
3046 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3047 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3048 BuilderLoadedImage& li = _loadedImages[imageIndex];
3049 if ( li.mustBuildClosure ) {
3050 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3051 writerIndex++;
3052 }
3053 }
3054
3055 // combine all Image objects into one ImageArray
3056 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3057 for (ImageWriter& writer : writers) {
3058 imageArrayWriter.appendImage(writer.finalize());
3059 writer.deallocate();
3060 }
3061 const ImageArray* imageArray = imageArrayWriter.finalize();
3062
3063 // merge ImageArray object into LaunchClosure object
3064 __block LaunchClosureWriter closureWriter(imageArray);
3065
3066 if (optimizedObjC) {
3067 if (!_objcSelectorsHashTable.empty())
3068 closureWriter.setObjCSelectorInfo(_objcSelectorsHashTable, _objcSelectorsHashTableImages);
3069
3070 if (!_objcClassesHashTableImages.empty()) {
3071 closureWriter.setObjCClassAndProtocolInfo(_objcClassesHashTable, _objcProtocolsHashTable,
3072 _objcClassesHashTableImages);
3073 }
3074
3075 if ( _objcDuplicateClassWarnings != nullptr ) {
3076 _objcDuplicateClassWarnings->forEachPath(^(const char* warning) {
3077 closureWriter.addWarning(Closure::Warning::duplicateObjCClass, warning);
3078 });
3079 }
3080
3081 if (!_objcClassesDuplicatesHashTable.empty())
3082 closureWriter.setObjCDuplicateClassesInfo(_objcClassesDuplicatesHashTable);
3083 }
3084
3085 // record shared cache info
3086 if ( _dyldCache != nullptr ) {
3087 // record cache UUID
3088 uuid_t cacheUUID;
3089 _dyldCache->getUUID(cacheUUID);
3090 closureWriter.setDyldCacheUUID(cacheUUID);
3091
3092 // record any cache patching needed because of dylib overriding cache
3093 for (const BuilderLoadedImage& li : _loadedImages) {
3094 if ( li.overrideImageNum != 0 ) {
3095 uint32_t imageIndex = li.overrideImageNum - (uint32_t)_dyldImageArray->startImageNum();
3096 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3097 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3098 return (const MachOLoaded*)findDependent(mh, depIndex);
3099 };
3100 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3101 _dyldCache->forEachPatchableExport(imageIndex, ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3102 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3103 Diagnostics patchDiag;
3104 Closure::PatchEntry patch;
3105 patch.overriddenDylibInCache = li.overrideImageNum;
3106 patch.exportCacheOffset = cacheOffsetOfImpl;
3107 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3108 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3109 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3110 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3111 patch.replacement.image.offset = foundInfo.value;
3112 }
3113 else {
3114 // this means the symbol is missing in the cache override dylib, so set any uses to NULL
3115 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3116 patch.replacement.absolute.value = 0;
3117 }
3118 patches.push_back(patch);
3119 });
3120 closureWriter.addCachePatches(patches);
3121 }
3122 }
3123
3124 // handle any extra weak-def coalescing needed by chained fixups
3125 if ( !_weakDefsFromChainedBinds.empty() ) {
3126 for (const char* symbolName : _weakDefsFromChainedBinds) {
3127 Image::ResolvedSymbolTarget cacheOverrideTarget;
3128 bool haveCacheOverride = false;
3129 bool foundCachOverrideIsWeakDef = false;
3130 for (const BuilderLoadedImage& li : _loadedImages) {
3131 if ( !li.loadAddress()->hasWeakDefs() )
3132 continue;
3133 Image::ResolvedSymbolTarget target;
3134 ResolvedTargetInfo targetInfo;
3135 if ( findSymbolInImage(li.loadAddress(), symbolName, 0, false, false, target, targetInfo) ) {
3136 if ( li.loadAddress()->inDyldCache() ) {
3137 if ( haveCacheOverride ) {
3138 Closure::PatchEntry patch;
3139 patch.exportCacheOffset = (uint32_t)target.sharedCache.offset;
3140 patch.overriddenDylibInCache = li.imageNum;
3141 patch.replacement = cacheOverrideTarget;
3142 _weakDefCacheOverrides.push_back(patch);
3143 }
3144 else {
3145 // found first in cached dylib, so no need to patch cache for this symbol
3146 break;
3147 }
3148 }
3149 else {
3150 // found image that exports this symbol and is not in cache
3151 if ( !haveCacheOverride || (foundCachOverrideIsWeakDef && !targetInfo.isWeakDef) ) {
3152 // update cache to use this symbol if it if first found or it is first non-weak found
3153 cacheOverrideTarget = target;
3154 foundCachOverrideIsWeakDef = targetInfo.isWeakDef;
3155 haveCacheOverride = true;
3156 }
3157 }
3158 }
3159 }
3160 }
3161 }
3162
3163 // record any cache patching needed because weak-def C++ symbols override dyld cache
3164 if ( !_weakDefCacheOverrides.empty() )
3165 closureWriter.addCachePatches(_weakDefCacheOverrides);
3166
3167 }
3168
3169 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3170 // if closure is built on-device for iOS, then record boot UUID
3171 char bootSessionUUID[256] = { 0 };
3172 size_t bootSize = sizeof(bootSessionUUID);
3173 if ( sysctlbyname("kern.bootsessionuuid", bootSessionUUID, &bootSize, NULL, 0) == 0 )
3174 closureWriter.setBootUUID(bootSessionUUID);
3175 #endif
3176
3177 // record any interposing info
3178 if ( !_interposingDisabled ) {
3179 imageArray->forEachImage(^(const Image* image, bool &stop) {
3180 if ( !image->inDyldCache() )
3181 addInterposingTuples(closureWriter, image, findLoadedImage(image->imageNum()).loadAddress());
3182 });
3183 }
3184
3185 // modify fixups in contained Images by applying interposing tuples
3186 closureWriter.applyInterposing((const LaunchClosure*)closureWriter.currentTypedBytes());
3187
3188 // set flags
3189 closureWriter.setUsedInterposing(_interposingTuplesUsed);
3190 closureWriter.setUsedAtPaths(_atPathUsed);
3191 closureWriter.setUsedFallbackPaths(_fallbackPathUsed);
3192 closureWriter.setHasInsertedLibraries(_mainProgLoadIndex > 0);
3193 closureWriter.setInitImageCount((uint32_t)_loadedImages.count());
3194
3195 // add other closure attributes
3196 addClosureInfo(closureWriter);
3197
3198 // make result
3199 const LaunchClosure* result = closureWriter.finalize();
3200 imageArrayWriter.deallocate();
3201
3202 timer.setData4(dyld3::DyldTimingBuildClosure::LaunchClosure_Built);
3203
3204 return result;
3205 }
3206
3207 // used by libdyld for dlopen()
3208 const DlopenClosure* ClosureBuilder::makeDlopenClosure(const char* path, const LaunchClosure* mainClosure, const Array<LoadedImage>& alreadyLoadedList,
3209 closure::ImageNum callerImageNum, bool noLoad, bool forceBindLazies, bool canUseSharedCacheClosure, closure::ImageNum* topImageNum)
3210 {
3211 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3212 // set up stack based storage for all arrays
3213 BuilderLoadedImage loadImagesStorage[300];
3214 Image::LinkedImage dependenciesStorage[128];
3215 Closure::PatchEntry cachePatchStorage[64];
3216 _loadedImages.setInitialStorage(loadImagesStorage, 300);
3217 _dependencies.setInitialStorage(dependenciesStorage, 128);
3218 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3219 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3220
3221 // fill in builder array from already loaded images
3222 bool cachedDylibsExpectedOnDisk = _dyldCache ? _dyldCache->header.dylibsExpectedOnDisk : true;
3223 uintptr_t callerImageIndex = UINTPTR_MAX;
3224 for (const LoadedImage& ali : alreadyLoadedList) {
3225 const Image* image = ali.image();
3226 const MachOAnalyzer* ma = (MachOAnalyzer*)(ali.loadedAddress());
3227 bool inDyldCache = ma->inDyldCache();
3228 BuilderLoadedImage entry;
3229 ImageNum overrideImageNum;
3230 entry.loadedFileInfo.path = image->path();
3231 entry.loadedFileInfo.fileContent = ma;
3232 entry.loadedFileInfo.sliceOffset = 0;
3233 entry.loadedFileInfo.inode = 0;
3234 entry.loadedFileInfo.mtime = 0;
3235 entry.imageNum = image->imageNum();
3236 entry.dependents = image->dependentsArray();
3237 entry.unmapWhenDone = false;
3238 entry.contentRebased = inDyldCache;
3239 entry.hasInits = false;
3240 entry.markNeverUnload = image->neverUnload();
3241 entry.rtldLocal = ali.hideFromFlatSearch();
3242 entry.isBadImage = false;
3243 entry.mustBuildClosure = false;
3244 entry.hasMissingWeakImports = false;
3245 entry.overrideImageNum = 0;
3246 if ( !inDyldCache && image->isOverrideOfDyldCacheImage(overrideImageNum) ) {
3247 entry.overrideImageNum = overrideImageNum;
3248 canUseSharedCacheClosure = false;
3249 }
3250 if ( !inDyldCache || cachedDylibsExpectedOnDisk )
3251 image->hasFileModTimeAndInode(entry.loadedFileInfo.inode, entry.loadedFileInfo.mtime);
3252 if ( entry.imageNum == callerImageNum )
3253 callerImageIndex = _loadedImages.count();
3254 _loadedImages.push_back(entry);
3255 }
3256 _alreadyInitedIndex = (uint32_t)_loadedImages.count();
3257
3258 // find main executable (may be needed for @executable_path)
3259 _isLaunchClosure = false;
3260 for (uint32_t i=0; i < alreadyLoadedList.count(); ++i) {
3261 if ( _loadedImages[i].loadAddress()->isMainExecutable() ) {
3262 _mainProgLoadIndex = i;
3263 _mainProgLoadPath = _loadedImages[i].path();
3264 break;
3265 }
3266 }
3267
3268 // We can't use an existing dlopen closure if the main closure had interposing tuples
3269 if (canUseSharedCacheClosure) {
3270 if (mainClosure->hasInterposings())
3271 canUseSharedCacheClosure = false;
3272 }
3273
3274 // add top level dylib being dlopen()ed
3275 BuilderLoadedImage* foundTopImage;
3276 _nextIndex = 0;
3277 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
3278 BuilderLoadedImage& callerImage = (callerImageIndex != UINTPTR_MAX) ? _loadedImages[callerImageIndex] : _loadedImages[_mainProgLoadIndex];
3279 LoadedImageChain chainCaller = { nullptr, callerImage };
3280 LoadedImageChain chainMain = { &chainCaller, _loadedImages[_mainProgLoadIndex] };
3281 if ( !findImage(path, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3282 // If we didn't find the image, it might be a symlink to something in the dyld cache that is not on disk
3283 if ( (_dyldCache != nullptr) && !_dyldCache->header.dylibsExpectedOnDisk ) {
3284 char resolvedPath[PATH_MAX];
3285 if ( _fileSystem.getRealPath(path, resolvedPath) ) {
3286 _diag.clearError();
3287 if ( !findImage(resolvedPath, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3288 return nullptr;
3289 }
3290 } else {
3291 // We didn't find a new path from realpath
3292 return nullptr;
3293 }
3294 } else {
3295 // cached dylibs on disk, so don't call realpath() again, it would have been found first call to findImage()
3296 return nullptr;
3297 }
3298 }
3299
3300 // exit early in RTLD_NOLOAD mode
3301 if ( noLoad ) {
3302 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_NoLoad);
3303 // if no new images added to _loadedImages, then requested path was already loaded
3304 if ( (uint32_t)_loadedImages.count() == _alreadyInitedIndex )
3305 *topImageNum = foundTopImage->imageNum;
3306 else
3307 *topImageNum = 0;
3308 return nullptr;
3309 }
3310
3311 // fast path if roots are not allowed and target is in dyld cache or is other
3312 if ( (_dyldCache != nullptr) && (_dyldCache->header.cacheType == kDyldSharedCacheTypeProduction) ) {
3313 if ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) {
3314 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3315 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3316 else
3317 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3318 *topImageNum = foundTopImage->imageNum;
3319 return nullptr;
3320 }
3321 }
3322
3323 // recursive load dependents
3324 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
3325 LoadedImageChain chainTopDylib = { &chainMain, *foundTopImage };
3326 recursiveLoadDependents(chainTopDylib, canUseSharedCacheClosure);
3327 if ( _diag.hasError() )
3328 return nullptr;
3329 loadDanglingUpwardLinks(canUseSharedCacheClosure);
3330 if ( _diag.hasError() )
3331 return nullptr;
3332
3333 // RTLD_NOW means fail the dlopen() if a symbol cannot be bound
3334 _allowMissingLazies = !forceBindLazies;
3335
3336 // only some images need to go into closure (ones from dyld cache do not, unless the cache format changed)
3337 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3338 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3339 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3340 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3341 invalidateInitializerRoots();
3342
3343 for (uintptr_t loadedImageIndex = 0; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
3344 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
3345 if ( li.mustBuildClosure ) {
3346 writers.push_back(ImageWriter());
3347 buildImage(writers.back(), li);
3348 if ( _diag.hasError() )
3349 return nullptr;
3350 }
3351 }
3352
3353 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3354 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3355 BuilderLoadedImage& li = _loadedImages[imageIndex];
3356 if ( li.mustBuildClosure ) {
3357 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3358 writerIndex++;
3359 }
3360 }
3361 }
3362 if ( _diag.hasError() )
3363 return nullptr;
3364
3365 // check if top image loaded is in shared cache along with everything it depends on
3366 *topImageNum = foundTopImage->imageNum;
3367 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3368 if ( canUseSharedCacheClosure && ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) ) {
3369 // We used a shared cache built closure, but now discovered roots. We need to try again
3370 topImageNum = 0;
3371 return sRetryDlopenClosure;
3372 }
3373 } else {
3374 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3375 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3376 else
3377 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3378 return nullptr;
3379 }
3380
3381 // combine all Image objects into one ImageArray
3382 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3383 for (ImageWriter& writer : writers) {
3384 imageArrayWriter.appendImage(writer.finalize());
3385 writer.deallocate();
3386 }
3387 const ImageArray* imageArray = imageArrayWriter.finalize();
3388
3389 // merge ImageArray object into LaunchClosure object
3390 DlopenClosureWriter closureWriter(imageArray);
3391
3392 // add other closure attributes
3393 closureWriter.setTopImageNum(foundTopImage->imageNum);
3394
3395 // record any cache patching needed because of dylib overriding cache
3396 if ( _dyldCache != nullptr ) {
3397 for (const BuilderLoadedImage& li : _loadedImages) {
3398 if ( (li.overrideImageNum != 0) && (li.imageNum >= _startImageNum) ) {
3399 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
3400 uint32_t imageIndex = cacheImage->imageNum() - (uint32_t)_dyldCache->cachedDylibsImageArray()->startImageNum();
3401 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3402 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3403 return (const MachOLoaded*)findDependent(mh, depIndex);
3404 };
3405 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3406 _dyldCache->forEachPatchableExport(imageIndex,
3407 ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3408 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3409 Diagnostics patchDiag;
3410 Closure::PatchEntry patch;
3411 patch.overriddenDylibInCache = li.overrideImageNum;
3412 patch.exportCacheOffset = cacheOffsetOfImpl;
3413 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3414 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3415 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3416 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3417 patch.replacement.image.offset = foundInfo.value;
3418 }
3419 else {
3420 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3421 patch.replacement.absolute.value = 0;
3422 }
3423 patches.push_back(patch);
3424 });
3425 closureWriter.addCachePatches(patches);
3426 }
3427 }
3428 }
3429
3430 // modify fixups in contained Images by applying interposing tuples
3431 closureWriter.applyInterposing(mainClosure);
3432
3433 // Dlopen's should never keep track of missing paths as we don't cache these closures.
3434 assert(_mustBeMissingPaths == nullptr);
3435
3436 // make final DlopenClosure object
3437 const DlopenClosure* result = closureWriter.finalize();
3438 imageArrayWriter.deallocate();
3439 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_Built);
3440 return result;
3441 }
3442
3443
3444 // used by dyld_closure_util
3445 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const char* mainPath, bool allowInsertFailures)
3446 {
3447 char realerPath[MAXPATHLEN];
3448 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, mainPath, _archs, _platform, realerPath);
3449 const MachOAnalyzer* mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
3450 loadedFileInfo.path = mainPath;
3451 if (_diag.hasError())
3452 return nullptr;
3453 if (mh == nullptr) {
3454 _diag.error("could not load file");
3455 return nullptr;
3456 }
3457 if (!mh->isDynamicExecutable()) {
3458 _diag.error("file is not an executable");
3459 return nullptr;
3460 }
3461 const_cast<PathOverrides*>(&_pathOverrides)->setMainExecutable(mh, mainPath);
3462 const LaunchClosure* launchClosure = makeLaunchClosure(loadedFileInfo, allowInsertFailures);
3463 loadedFileInfo.unload(loadedFileInfo);
3464 return launchClosure;
3465 }
3466
3467 void ClosureBuilder::setDyldCacheInvalidFormatVersion() {
3468 _dyldCacheInvalidFormatVersion = true;
3469 }
3470
3471
3472 // used by dyld shared cache builder
3473 const ImageArray* ClosureBuilder::makeDyldCacheImageArray(bool customerCache, const Array<CachedDylibInfo>& dylibs, const Array<CachedDylibAlias>& aliases)
3474 {
3475 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3476 // so set up storage for all arrays to be vm_allocated
3477 uintptr_t maxImageCount = dylibs.count() + 16;
3478 _loadedImages.reserve(maxImageCount);
3479 _dependencies.reserve(maxImageCount*16);
3480
3481 _makingDyldCacheImages = true;
3482 _allowMissingLazies = false;
3483 _makingCustomerCache = customerCache;
3484 _aliases = &aliases;
3485
3486 // build _loadedImages[] with every dylib in cache
3487 __block ImageNum imageNum = _startImageNum;
3488 for (const CachedDylibInfo& aDylibInfo : dylibs) {
3489 BuilderLoadedImage entry;
3490 entry.loadedFileInfo = aDylibInfo.fileInfo;
3491 entry.imageNum = imageNum++;
3492 entry.unmapWhenDone = false;
3493 entry.contentRebased = false;
3494 entry.hasInits = false;
3495 entry.markNeverUnload = true;
3496 entry.rtldLocal = false;
3497 entry.isBadImage = false;
3498 entry.mustBuildClosure = false;
3499 entry.hasMissingWeakImports = false;
3500 entry.overrideImageNum = 0;
3501 _loadedImages.push_back(entry);
3502 }
3503
3504 // wire up dependencies between cached dylibs
3505 for (BuilderLoadedImage& li : _loadedImages) {
3506 LoadedImageChain chainStart = { nullptr, li };
3507 recursiveLoadDependents(chainStart);
3508 if ( _diag.hasError() )
3509 break;
3510 }
3511 assert(_loadedImages.count() == dylibs.count());
3512
3513 // create an ImageWriter for each cached dylib
3514 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3515 for (BuilderLoadedImage& li : _loadedImages) {
3516 writers.push_back(ImageWriter());
3517 buildImage(writers.back(), li);
3518 }
3519
3520 // add initializer order into each dylib
3521 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3522 for (const BuilderLoadedImage& li : _loadedImages) {
3523 uint32_t index = li.imageNum - _startImageNum;
3524 computeInitOrder(writers[index], index);
3525 }
3526
3527 // combine all Image objects into one ImageArray
3528 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3529 for (ImageWriter& writer : writers) {
3530 imageArrayWriter.appendImage(writer.finalize());
3531 writer.deallocate();
3532 }
3533 const ImageArray* imageArray = imageArrayWriter.finalize();
3534
3535 return imageArray;
3536 }
3537
3538
3539 #if BUILDING_CACHE_BUILDER
3540 const ImageArray* ClosureBuilder::makeOtherDylibsImageArray(const Array<LoadedFileInfo>& otherDylibs, uint32_t cachedDylibsCount)
3541 {
3542 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3543 // so set up storage for all arrays to be vm_allocated
3544 uintptr_t maxImageCount = otherDylibs.count() + cachedDylibsCount + 128;
3545 _loadedImages.reserve(maxImageCount);
3546 _dependencies.reserve(maxImageCount*16);
3547
3548 // build _loadedImages[] with every dylib in cache, followed by others
3549 _nextIndex = 0;
3550 for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
3551 BuilderLoadedImage entry;
3552 entry.loadedFileInfo = aDylibInfo;
3553 entry.imageNum = _startImageNum + _nextIndex++;
3554 entry.unmapWhenDone = false;
3555 entry.contentRebased = false;
3556 entry.hasInits = false;
3557 entry.markNeverUnload = false;
3558 entry.rtldLocal = false;
3559 entry.isBadImage = false;
3560 entry.mustBuildClosure = false;
3561 entry.hasMissingWeakImports = false;
3562 entry.overrideImageNum = 0;
3563 _loadedImages.push_back(entry);
3564 }
3565
3566 // wire up dependencies between cached dylibs
3567 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
3568 // to check the count on each iteration.
3569 for (uint64_t index = 0; index != _loadedImages.count(); ++index) {
3570 BuilderLoadedImage& li = _loadedImages[index];
3571 LoadedImageChain chainStart = { nullptr, li };
3572 recursiveLoadDependents(chainStart);
3573 if ( _diag.hasError() ) {
3574 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3575 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3576 _diag.clearError();
3577 li.isBadImage = true; // mark bad
3578 }
3579 }
3580
3581 auto invalidateBadImages = [&]() {
3582 // Invalidate images with bad dependencies
3583 while (true) {
3584 bool madeChange = false;
3585 for (BuilderLoadedImage& li : _loadedImages) {
3586 if (li.isBadImage) {
3587 // Already invalidated
3588 continue;
3589 }
3590 for (Image::LinkedImage depIndex : li.dependents) {
3591 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
3592 continue;
3593 if ( depIndex.imageNum() >= dyld3::closure::kLastDyldCacheImageNum ) {
3594 // dlopen closures can only depend on the shared cache. This is because if foo.dylib links bar.dylib
3595 // and bar.dylib is loaded in to the launch closure, then the dlopen closure for foo.dylib wouldn't see
3596 // bar.dylib at the image num in the launch closure
3597 _diag.warning("while building dlopen closure for %s: dependent dylib is not from shared cache", li.loadedFileInfo.path);
3598 li.isBadImage = true; // mark bad
3599 madeChange = true;
3600 continue;
3601 }
3602 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
3603 if (depImage.isBadImage) {
3604 _diag.warning("while building dlopen closure for %s: dependent dylib had error", li.loadedFileInfo.path);
3605 li.isBadImage = true; // mark bad
3606 madeChange = true;
3607 }
3608 }
3609 }
3610 if (!madeChange)
3611 break;
3612 }
3613 };
3614
3615 invalidateBadImages();
3616
3617 // create an ImageWriter for each cached dylib
3618 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3619 for (BuilderLoadedImage& li : _loadedImages) {
3620 if ( li.isBadImage ) {
3621 writers.push_back(ImageWriter());
3622 writers.back().setInvalid();
3623 continue;
3624 }
3625 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3626 continue;
3627 writers.push_back(ImageWriter());
3628 buildImage(writers.back(), li);
3629 if ( _diag.hasError() ) {
3630 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3631 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3632 _diag.clearError();
3633 li.isBadImage = true; // mark bad
3634 writers.back().setInvalid();
3635 }
3636 }
3637
3638 invalidateBadImages();
3639
3640 // add initializer order into each dylib
3641 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3642 for (const BuilderLoadedImage& li : _loadedImages) {
3643 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3644 continue;
3645 if (li.isBadImage)
3646 continue;
3647 uint32_t index = li.imageNum - _startImageNum;
3648 computeInitOrder(writers[index], index);
3649 }
3650
3651 // combine all Image objects into one ImageArray
3652 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3653 for (ImageWriter& writer : writers) {
3654 imageArrayWriter.appendImage(writer.finalize());
3655 writer.deallocate();
3656 }
3657 const ImageArray* imageArray = imageArrayWriter.finalize();
3658
3659 return imageArray;
3660 }
3661 #endif
3662
3663
3664 bool ClosureBuilder::inLoadedImageArray(const Array<LoadedImage>& loadedList, ImageNum imageNum)
3665 {
3666 for (const LoadedImage& ali : loadedList) {
3667 if ( ali.image()->representsImageNum(imageNum) )
3668 return true;
3669 }
3670 return false;
3671 }
3672
3673 void ClosureBuilder::buildLoadOrderRecurse(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Image* image)
3674 {
3675 // breadth first load
3676 STACK_ALLOC_ARRAY(const Image*, needToRecurse, 256);
3677 image->forEachDependentImage(^(uint32_t dependentIndex, dyld3::closure::Image::LinkKind kind, ImageNum depImageNum, bool &stop) {
3678 if ( !inLoadedImageArray(loadedList, depImageNum) ) {
3679 const Image* depImage = ImageArray::findImage(imagesArrays, depImageNum);
3680 loadedList.push_back(LoadedImage::make(depImage));
3681 needToRecurse.push_back(depImage);
3682 }
3683 });
3684
3685 // recurse load
3686 for (const Image* img : needToRecurse) {
3687 buildLoadOrderRecurse(loadedList, imagesArrays, img);
3688 }
3689 }
3690
3691 void ClosureBuilder::buildLoadOrder(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Closure* toAdd)
3692 {
3693 const dyld3::closure::Image* topImage = ImageArray::findImage(imagesArrays, toAdd->topImage());
3694 loadedList.push_back(LoadedImage::make(topImage));
3695 buildLoadOrderRecurse(loadedList, imagesArrays, topImage);
3696 }
3697
3698
3699
3700 //////////////////////////// ObjCStringTable ////////////////////////////////////////
3701
3702 template<typename PerfectHashT, typename ImageOffsetT>
3703 void ObjCStringTable::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings)
3704 {
3705 ObjCSelectorOpt::StringTarget sentinel = (ObjCSelectorOpt::StringTarget)ImageOffsetT::sentinelValue;
3706 // Set header
3707 capacity = phash.capacity;
3708 occupied = phash.occupied;
3709 shift = phash.shift;
3710 mask = phash.mask;
3711 sentinelTarget = sentinel;
3712 roundedTabSize = std::max(phash.mask+1, 4U);
3713 salt = phash.salt;
3714
3715 // Set hash data
3716 for (uint32_t i = 0; i < 256; i++) {
3717 scramble[i] = phash.scramble[i];
3718 }
3719 for (uint32_t i = 0; i < phash.mask+1; i++) {
3720 tab[i] = phash.tab[i];
3721 }
3722
3723 dyld3::Array<StringTarget> targetsArray = targets();
3724 dyld3::Array<StringHashCheckByte> checkBytesArray = checkBytes();
3725
3726 // Set offsets to the sentinel
3727 for (uint32_t i = 0; i < phash.capacity; i++) {
3728 targetsArray[i] = sentinel;
3729 }
3730 // Set checkbytes to 0
3731 for (uint32_t i = 0; i < phash.capacity; i++) {
3732 checkBytesArray[i] = 0;
3733 }
3734
3735 // Set real string offsets and checkbytes
3736 for (const auto& s : strings) {
3737 assert(s.second.raw != sentinelTarget);
3738 uint32_t h = hash(s.first);
3739 targetsArray[h] = s.second.raw;
3740 checkBytesArray[h] = checkbyte(s.first);
3741 }
3742 }
3743
3744 //////////////////////////// ObjCClassOpt ////////////////////////////////////////
3745
3746
3747 template<typename PerfectHashT, typename ImageOffsetT, typename ClassesMapT>
3748 void ObjCClassOpt::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings,
3749 const ClassesMapT& classes, uint32_t preCalculatedDuplicateCount)
3750 {
3751 ObjCStringTable::write(phash, strings);
3752
3753 __block dyld3::Array<ClassTarget> classOffsetsArray = classOffsets();
3754 __block dyld3::Array<ClassTarget> duplicateOffsetsArray = duplicateOffsets(preCalculatedDuplicateCount);
3755
3756 // Set class offsets to 0
3757 for (uint32_t i = 0; i < capacity; i++) {
3758 classOffsetsArray[i].raw = dyld3::closure::Image::ObjCImageOffset::sentinelValue;
3759 }
3760
3761 classes.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values, uint64_t valuesCount) {
3762 uint32_t keyIndex = getIndex(key);
3763 assert(keyIndex != indexNotFound);
3764 assert(classOffsetsArray[keyIndex].raw == dyld3::closure::Image::ObjCImageOffset::sentinelValue);
3765
3766 if (valuesCount == 1) {
3767 // Only one entry so write it in to the class offsets directly
3768 Image::ObjCClassImageOffset classImageOffset = *(values[0]);
3769 assert(classImageOffset.classData.isDuplicate == 0);
3770 classOffsetsArray[keyIndex] = classImageOffset;
3771 return;
3772 }
3773
3774 // We have more than one value. We add a placeholder to the class offsets which tells us the head
3775 // of the linked list of classes in the duplicates array
3776 uint32_t dest = duplicateCount();
3777 duplicateCount() += valuesCount;
3778
3779 Image::ObjCClassImageOffset classImagePlaceholder;
3780 assert(valuesCount < (1 << 8));
3781 classImagePlaceholder.duplicateData.count = (uint32_t)valuesCount;
3782 classImagePlaceholder.duplicateData.index = dest;
3783 classImagePlaceholder.duplicateData.isDuplicate = 1;
3784 classOffsetsArray[keyIndex] = classImagePlaceholder;
3785
3786 for (uint64_t i = 0; i != valuesCount; ++i) {
3787 Image::ObjCClassImageOffset classImageOffset = *(values[i]);
3788 assert(classImageOffset.classData.isDuplicate == 0);
3789 duplicateOffsetsArray.push_back(classImageOffset);
3790 }
3791 });
3792 }
3793
3794 } // namespace closure
3795 } // namespace dyld3