dyld-732.8.tar.gz
[apple/dyld.git] / dyld3 / ClosureBuilder.cpp
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <sys/errno.h>
27 #include <sys/mman.h>
28 #include <sys/mman.h>
29 #include <sys/param.h>
30 #include <ext/__hash>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/sysctl.h>
36
37 #include <mach-o/dyld_priv.h>
38
39 #include "ClosureWriter.h"
40 #include "ClosureBuilder.h"
41 #include "MachOAnalyzer.h"
42 #include "libdyldEntryVector.h"
43 #include "Tracing.h"
44
45 #define CLOSURE_SELOPT_WRITE
46 #include "objc-shared-cache.h"
47
48 namespace dyld3 {
49 namespace closure {
50
51
52 const DlopenClosure* ClosureBuilder::sRetryDlopenClosure = (const DlopenClosure*)(-1);
53
54 ClosureBuilder::ClosureBuilder(uint32_t startImageNum, const FileSystem& fileSystem, const DyldSharedCache* dyldCache, bool dyldCacheIsLive,
55 const GradedArchs& archs, const PathOverrides& pathOverrides, AtPath atPathHandling, bool allowRelativePaths,
56 LaunchErrorInfo* errorInfo, Platform platform, const CacheDylibsBindingHandlers* handlers)
57 : _fileSystem(fileSystem), _dyldCache(dyldCache), _pathOverrides(pathOverrides), _archs(archs), _platform(platform), _startImageNum(startImageNum),
58 _handlers(handlers), _atPathHandling(atPathHandling), _launchErrorInfo(errorInfo), _dyldCacheIsLive(dyldCacheIsLive), _allowRelativePaths(allowRelativePaths)
59 {
60 if ( dyldCache != nullptr ) {
61 _dyldImageArray = dyldCache->cachedDylibsImageArray();
62 if ( (dyldCache->header.otherImageArrayAddr != 0) && (dyldCache->header.progClosuresSize == 0) )
63 _makingClosuresInCache = true;
64 }
65 }
66
67
68 ClosureBuilder::~ClosureBuilder() {
69 if ( _tempPaths != nullptr )
70 PathPool::deallocate(_tempPaths);
71 if ( _mustBeMissingPaths != nullptr )
72 PathPool::deallocate(_mustBeMissingPaths);
73 if ( _objcDuplicateClassWarnings != nullptr )
74 PathPool::deallocate(_objcDuplicateClassWarnings);
75 }
76
77 bool ClosureBuilder::findImage(const char* loadPath, const LoadedImageChain& forImageChain, BuilderLoadedImage*& foundImage, LinkageType linkageType,
78 uint32_t compatVersion, bool canUseSharedCacheClosure)
79 {
80 // There shouldn't be an error here as the callers should stop trying to find more images if they get an error for an image
81 _diag.assertNoError();
82
83 __block bool result = false;
84
85 // record if this is a non-overridable path
86 bool pathIsInDyldCacheWhichCannotBeOverridden = false;
87 bool dylibsExpectedOnDisk = true;
88 if ( _dyldCache != nullptr ) {
89 pathIsInDyldCacheWhichCannotBeOverridden = _dyldCache->hasNonOverridablePath(loadPath);
90 dylibsExpectedOnDisk = _dyldCache->header.dylibsExpectedOnDisk;
91 }
92
93 _pathOverrides.forEachPathVariant(loadPath, pathIsInDyldCacheWhichCannotBeOverridden, ^(const char* possibleVariantPath, bool isFallbackPath, bool& stopPathVariant) {
94
95 // This check is within forEachPathVariant() to let DYLD_LIBRARY_PATH override LC_RPATH
96 bool isRPath = (strncmp(possibleVariantPath, "@rpath/", 7) == 0);
97
98 // passing a leaf name to dlopen() allows rpath searching for it
99 // FIXME: Does this apply to DYLD_INSERT_LIBRARIES too?
100 bool implictRPath = (linkageType == LinkageType::kDynamic) && (loadPath[0] != '/') && (loadPath == possibleVariantPath) && (_atPathHandling != AtPath::none);
101
102 // expand @ paths
103 forEachResolvedPathVar(possibleVariantPath, forImageChain, implictRPath, linkageType,
104 ^(const char* possiblePath, bool& stop) {
105 if ( possibleVariantPath != possiblePath )
106 _atPathUsed = true;
107
108 // look at already loaded images
109 const char* leafName = strrchr(possiblePath, '/');
110 for (BuilderLoadedImage& li: _loadedImages) {
111 if ( strcmp(li.path(), possiblePath) == 0 ) {
112 foundImage = &li;
113 result = true;
114 stop = true;
115 return;
116 }
117 else if ( isRPath ) {
118 // Special case @rpath/ because name in li.fileInfo.path is full path.
119 // Getting installName is expensive, so first see if an already loaded image
120 // has same leaf name and if so see if its installName matches request @rpath
121 if (const char* aLeaf = strrchr(li.path(), '/')) {
122 if ( strcmp(aLeaf, leafName) == 0 ) {
123 if ( li.loadAddress()->isDylib() && (strcmp(loadPath, li.loadAddress()->installName()) == 0) ) {
124 foundImage = &li;
125 result = true;
126 stop = true;
127 return;
128 }
129 }
130 }
131 }
132 }
133
134 // look to see if image already loaded via a different symlink
135 bool fileFound = false;
136 uint64_t fileFoundINode = 0;
137 uint64_t fileFoundMTime = 0;
138 bool inodesMatchRuntime = false;
139 // Note, we only do this check if we even expect to find this on-disk
140 // We can also use the pathIsInDyldCacheWhichCannotBeOverridden result if we are still trying the same path
141 // it was computed from
142 if ( dylibsExpectedOnDisk || !pathIsInDyldCacheWhichCannotBeOverridden || (loadPath != possiblePath) ) {
143 if ( _fileSystem.fileExists(possiblePath, &fileFoundINode, &fileFoundMTime, nullptr, &inodesMatchRuntime) ) {
144 fileFound = true;
145 for (BuilderLoadedImage& li: _loadedImages) {
146 if ( (li.loadedFileInfo.inode == fileFoundINode) && (li.loadedFileInfo.mtime == fileFoundMTime) ) {
147 foundImage = &li;
148 result = true;
149 stop = true;
150 return;
151 }
152 }
153 }
154 }
155
156 bool unmapWhenDone = false;
157 bool contentRebased = false;
158 bool hasInits = false;
159 bool markNeverUnload = false;
160 bool mustBuildClosure = _dyldCacheInvalidFormatVersion;
161 ImageNum overrideImageNum = 0;
162 ImageNum foundImageNum = 0;
163 const MachOAnalyzer* mh = nullptr;
164 const char* filePath = nullptr;
165 LoadedFileInfo loadedFileInfo;
166
167 // look in dyld cache
168 filePath = possiblePath;
169 char realPath[MAXPATHLEN];
170 if ( _dyldImageArray != nullptr ) {
171 uint32_t dyldCacheImageIndex;
172 bool foundInCache = _dyldCache->hasImagePath(possiblePath, dyldCacheImageIndex);
173 if ( !foundInCache && fileFound ) {
174 // see if this is an OS dylib/bundle with a pre-built dlopen closure
175 // We can only use the pre-built closure if we are dynamic linkage (a dlopen) and
176 // there are no roots
177 if ( canUseSharedCacheClosure && (linkageType == LinkageType::kDynamic) ) {
178 if (const dyld3::closure::Image* otherImage = _dyldCache->findDlopenOtherImage(possiblePath) ) {
179 uint64_t expectedInode;
180 uint64_t expectedModTime;
181 if ( !otherImage->isInvalid() ) {
182 bool hasInodeInfo = otherImage->hasFileModTimeAndInode(expectedInode, expectedModTime);
183 // use pre-built Image if it does not have mtime/inode or it does and it has matches current file info
184 if ( !hasInodeInfo || ((expectedInode == fileFoundINode) && (expectedModTime == fileFoundMTime)) ) {
185 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, possiblePath, _archs, _platform, realPath);
186 if ( _diag.noError() ) {
187 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
188 foundImageNum = otherImage->imageNum();
189 unmapWhenDone = true;
190 contentRebased = false;
191 hasInits = otherImage->hasInitializers() || otherImage->mayHavePlusLoads();
192 // Use the realpath in the case where we loaded a symlink
193 // The closure must have recordered an alias path
194 if (realPath[0] != '\0')
195 filePath = realPath;
196 }
197 }
198 }
199 }
200 }
201 // if not found in cache, may be a symlink to something in cache
202 if ( mh == nullptr ) {
203 if ( _fileSystem.getRealPath(possiblePath, realPath) ) {
204 foundInCache = _dyldCache->hasImagePath(realPath, dyldCacheImageIndex);
205 if ( foundInCache ) {
206 filePath = realPath;
207 #if BUILDING_LIBDYLD
208 // handle case where OS dylib was updated after this process launched
209 if ( foundInCache ) {
210 for (BuilderLoadedImage& li: _loadedImages) {
211 if ( strcmp(li.path(), realPath) == 0 ) {
212 foundImage = &li;
213 result = true;
214 stop = true;
215 return;
216 }
217 }
218 }
219 #endif
220 }
221 }
222 }
223 }
224
225 // if using a cached dylib, look to see if there is an override
226 if ( foundInCache ) {
227 ImageNum dyldCacheImageNum = dyldCacheImageIndex + 1;
228 bool useCache = true;
229 markNeverUnload = true; // dylibs in cache, or dylibs that override cache should not be unloaded at runtime
230 const Image* image = _dyldImageArray->imageForNum(dyldCacheImageNum);
231 if ( image->overridableDylib() ) {
232 if ( fileFound ) {
233 uint64_t expectedInode;
234 uint64_t expectedModTime;
235 if ( image->hasFileModTimeAndInode(expectedInode, expectedModTime) ) {
236 // macOS where dylibs remain on disk. only use cache if mtime and inode have not changed
237 useCache = ( (fileFoundINode == expectedInode) && (fileFoundMTime == expectedModTime) );
238 }
239 else if ( _makingClosuresInCache ) {
240 // during iOS cache build, don't look at files on disk, use ones in cache
241 useCache = true;
242 }
243 else {
244 // iOS internal build. Any disk on cache overrides cache
245 useCache = false;
246 }
247 }
248 if ( !useCache ) {
249 overrideImageNum = dyldCacheImageNum;
250 _foundDyldCacheRoots = true;
251 }
252 }
253 if ( useCache ) {
254 foundImageNum = dyldCacheImageNum;
255 mh = (MachOAnalyzer*)_dyldCache->getIndexedImageEntry(foundImageNum-1, loadedFileInfo.mtime, loadedFileInfo.inode);
256 unmapWhenDone = false;
257 // if we are building ImageArray in dyld cache, content is not rebased
258 contentRebased = !_makingDyldCacheImages && _dyldCacheIsLive;
259 hasInits = image->hasInitializers() || image->mayHavePlusLoads();
260 // If the cache format is different from dyld/libdyld then we can't use this closure.
261 if ( (_dyldCache->header.formatVersion != dyld3::closure::kFormatVersion) || !canUseSharedCacheClosure ) {
262 mustBuildClosure = true;
263 _foundDyldCacheRoots = true;
264 }
265 }
266 }
267 }
268
269 // If we are building the cache, and don't find an image, then it might be weak so just return
270 if (_makingDyldCacheImages) {
271 addMustBeMissingPath(possiblePath);
272 return;
273 }
274
275 // if not found yet, mmap file
276 if ( mh == nullptr ) {
277 loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, filePath, _archs, _platform, realPath);
278 mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
279 if ( mh == nullptr ) {
280 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
281 if (_isLaunchClosure) {
282 // If we found the file then we want to skip it as its not a valid macho for this platform/arch
283 // We can't record skipped file mtime/inode for caches built on a different machine that it runs on.
284 // In that case, we expect the file to be mastered out, as otherwise we couldn't detect if its
285 // changed or not on the device side
286 if (fileFound && inodesMatchRuntime) {
287 addSkippedFile(possiblePath, fileFoundINode, fileFoundMTime);
288 } else {
289 addMustBeMissingPath(possiblePath);
290 }
291 }
292 return;
293 }
294 if ( linkageType != LinkageType::kDynamic ) {
295 // LC_LOAD_DYLIB can only link with dylibs, and DYLD_INSERT_LIBRARIES can only be dylibs
296 if ( !mh->isDylib() ) {
297 _diag.error("found '%s' which is not a dylib. Needed by '%s'", filePath, forImageChain.image.path());
298 return;
299 }
300 // verify this is compatable dylib version
301 const char* installName;
302 uint32_t foundCompatVers;
303 uint32_t foundCurrentVers;
304 mh->getDylibInstallName(&installName, &foundCompatVers, &foundCurrentVers);
305 if ( (foundCompatVers < compatVersion) && mh->enforceCompatVersion() ) {
306 char foundStr[32];
307 char requiredStr[32];
308 MachOFile::packedVersionToString(foundCompatVers, foundStr);
309 MachOFile::packedVersionToString(compatVersion, requiredStr);
310 _diag.error("found '%s' which has compat version (%s) which is less than required (%s). Needed by '%s'",
311 filePath, foundStr, requiredStr, forImageChain.image.path());
312 return;
313 }
314 }
315 else if ( mh->isMainExecutable() ) {
316 // when dlopen()ing a main executable, it must be dynamic Position Independent Executable
317 if ( !mh->isPIE() || !mh->isDynamicExecutable() ) {
318 _diag.error("not PIE");
319 return;
320 }
321 }
322 // Use the realpath in the case where we loaded a symlink
323 // The closure must have recordered an alias path
324 if (realPath[0] != '\0')
325 filePath = realPath;
326 foundImageNum = _startImageNum + _nextIndex++;
327 _foundNonCachedImage = true;
328 mustBuildClosure = true;
329 unmapWhenDone = true;
330 } else {
331 loadedFileInfo.fileContent = mh;
332 }
333
334 // if path is not original path, or its an inserted path (as forEachInColonList uses a stack temporary)
335 if ( (filePath != loadPath) || (linkageType == LinkageType::kInserted) ) {
336 // possiblePath may be a temporary (stack) string, since we found file at that path, make it permanent
337 filePath = strdup_temp(filePath);
338 // check if this overrides what would have been found in cache
339 // This is the case where we didn't find the image with the path in the shared cache, perhaps as it used library paths
340 // but the path we requested had pointed in to the cache
341 // FIXME: What if load path is via an @rpath and we will override the cache?
342 if ( overrideImageNum == 0 ) {
343 if ( _dyldImageArray != nullptr ) {
344 uint32_t dyldCacheImageIndex;
345 if ( _dyldCache->hasImagePath(loadPath, dyldCacheImageIndex) ) {
346 ImageNum possibleOverrideNum = dyldCacheImageIndex+1;
347 if ( possibleOverrideNum != foundImageNum )
348 overrideImageNum = possibleOverrideNum;
349 }
350 }
351 }
352 }
353
354 if ( !markNeverUnload ) {
355 switch (linkageType) {
356 case LinkageType::kStatic:
357 // Static linkages can only be unloaded if the image loading us can be unloaded
358 markNeverUnload = forImageChain.image.markNeverUnload;
359 break;
360 case LinkageType::kDynamic:
361 markNeverUnload = false;
362 break;
363 case LinkageType::kInserted:
364 // Inserted libraries must never be unloaded
365 markNeverUnload = true;
366 break;
367 };
368 }
369
370 if ( !markNeverUnload ) {
371 // If the parent didn't force us to be never unload, other conditions still may
372 if ( mh->hasThreadLocalVariables() ) {
373 markNeverUnload = true;
374 } else if ( mh->hasObjC() && mh->isDylib() ) {
375 markNeverUnload = true;
376 } else {
377 // record if image has DOF sections
378 __block bool hasDOFs = false;
379 mh->forEachDOFSection(_diag, ^(uint32_t offset) {
380 hasDOFs = true;
381 });
382 if ( hasDOFs )
383 markNeverUnload = true;
384 }
385 }
386
387 // Set the path again just in case it was strdup'ed.
388 loadedFileInfo.path = filePath;
389
390 // add new entry
391 BuilderLoadedImage entry;
392 entry.loadedFileInfo = loadedFileInfo;
393 entry.imageNum = foundImageNum;
394 entry.unmapWhenDone = unmapWhenDone;
395 entry.contentRebased = contentRebased;
396 entry.hasInits = hasInits;
397 entry.markNeverUnload = markNeverUnload;
398 entry.rtldLocal = false;
399 entry.isBadImage = false;
400 entry.mustBuildClosure = mustBuildClosure;
401 entry.hasMissingWeakImports = false;
402 entry.overrideImageNum = overrideImageNum;
403 _loadedImages.push_back(entry);
404 foundImage = &_loadedImages.back();
405 if ( isFallbackPath )
406 _fallbackPathUsed = true;
407 stop = true;
408 result = true;
409 });
410 if (result)
411 stopPathVariant = true;
412 }, _platform);
413
414 // If we found a file, but also had an error, then we must have logged a diagnostic for a file we couldn't use.
415 // Clear that for now.
416 // FIXME: Surface this to the user in case they wanted to see the error
417 if (result && _diag.hasError())
418 _diag.clearError();
419
420 return result;
421 }
422
423 bool ClosureBuilder::expandAtLoaderPath(const char* loadPath, bool fromLCRPATH, const BuilderLoadedImage& loadedImage, char fixedPath[])
424 {
425 switch ( _atPathHandling ) {
426 case AtPath::none:
427 return false;
428 case AtPath::onlyInRPaths:
429 if ( !fromLCRPATH ) {
430 // <rdar://42360708> allow @loader_path in LC_LOAD_DYLIB during dlopen()
431 if ( _isLaunchClosure )
432 return false;
433 }
434 break;
435 case AtPath::all:
436 break;
437 }
438 if ( strncmp(loadPath, "@loader_path/", 13) != 0 )
439 return false;
440
441 strlcpy(fixedPath, loadedImage.path(), PATH_MAX);
442 char* lastSlash = strrchr(fixedPath, '/');
443 if ( lastSlash != nullptr ) {
444 strcpy(lastSlash+1, &loadPath[13]);
445 return true;
446 }
447 return false;
448 }
449
450 bool ClosureBuilder::expandAtExecutablePath(const char* loadPath, bool fromLCRPATH, char fixedPath[])
451 {
452 switch ( _atPathHandling ) {
453 case AtPath::none:
454 return false;
455 case AtPath::onlyInRPaths:
456 if ( !fromLCRPATH )
457 return false;
458 break;
459 case AtPath::all:
460 break;
461 }
462 if ( strncmp(loadPath, "@executable_path/", 17) != 0 )
463 return false;
464
465 if ( _atPathHandling != AtPath::all )
466 return false;
467
468 strlcpy(fixedPath, _mainProgLoadPath, PATH_MAX);
469 char* lastSlash = strrchr(fixedPath, '/');
470 if ( lastSlash != nullptr ) {
471 strcpy(lastSlash+1, &loadPath[17]);
472 return true;
473 }
474 return false;
475 }
476
477 void ClosureBuilder::forEachResolvedPathVar(const char* loadPath, const LoadedImageChain& forImageChain,
478 bool implictRPath, LinkageType linkageType,
479 void (^handler)(const char* possiblePath, bool& stop))
480 {
481 // don't expand @loader_path or @executable_path if disallowed
482 if ( (_atPathHandling == AtPath::none) && (loadPath[0] == '@') && (loadPath[1] != 'r') ) {
483 bool stop = false;
484 handler(loadPath, stop);
485 return;
486 }
487
488 // quick out if not @ path or not implicit rpath
489 if ( !implictRPath && (loadPath[0] != '@') ) {
490 bool stop = false;
491 handler(loadPath, stop);
492 return;
493 }
494
495 // expand @loader_path
496 // Note this isn't supported for DYLD_INSERT_LIBRARIES
497 BLOCK_ACCCESSIBLE_ARRAY(char, tempPath, PATH_MAX); // read as: char tempPath[PATH_MAX];
498 if ( (linkageType != LinkageType::kInserted) && expandAtLoaderPath(loadPath, false, forImageChain.image, tempPath) ) {
499 bool stop = false;
500 handler(tempPath, stop);
501 return;
502 }
503
504 // expand @executable_path
505 // Note this is supported for DYLD_INSERT_LIBRARIES
506 if ( expandAtExecutablePath(loadPath, false, tempPath) ) {
507 bool stop = false;
508 handler(tempPath, stop);
509 return;
510 }
511
512 // expand @rpath
513 // Note this isn't supported for DYLD_INSERT_LIBRARIES
514 const char* rpathTail = nullptr;
515 char implicitRpathBuffer[PATH_MAX];
516 if ( linkageType != LinkageType::kInserted ) {
517 if ( strncmp(loadPath, "@rpath/", 7) == 0 ) {
518 // note: rpathTail starts with '/'
519 rpathTail = &loadPath[6];
520 }
521 else if ( implictRPath ) {
522 // make rpathTail starts with '/'
523 strlcpy(implicitRpathBuffer, "/", PATH_MAX);
524 strlcat(implicitRpathBuffer, loadPath, PATH_MAX);
525 rpathTail = implicitRpathBuffer;
526 }
527 }
528 if ( rpathTail != nullptr ) {
529 // rpath is expansion is technically a stack of rpath dirs built starting with main executable and pushing
530 // LC_RPATHS from each dylib as they are recursively loaded. Our imageChain represents that stack.
531 __block bool done = false;
532 for (const LoadedImageChain* link = &forImageChain; (link != nullptr) && !done; link = link->previous) {
533 link->image.loadAddress()->forEachRPath(^(const char* rPath, bool& stop) {
534 // fprintf(stderr, "LC_RPATH %s from %s\n", rPath, link->image.loadedFileInfo.path);
535 if ( expandAtLoaderPath(rPath, true, link->image, tempPath) || expandAtExecutablePath(rPath, true, tempPath) ) {
536 // @loader_path allowed and expended
537 strlcat(tempPath, rpathTail, PATH_MAX);
538 handler(tempPath, stop);
539 }
540 else if ( rPath[0] == '/' ) {
541 // LC_RPATH is an absolute path, not blocked by AtPath::none
542 strlcpy(tempPath, rPath, PATH_MAX);
543 strlcat(tempPath, rpathTail, PATH_MAX);
544 handler(tempPath, stop);
545 }
546 if (stop)
547 done = true;
548 #if 0
549 if ( _fileSystem.fileExists(tempPath) ) {
550 stop = true;
551 result = strdup_temp(tempPath);
552 }
553 else {
554 // Don't add must be missing paths for dlopen as we don't cache dlopen closures
555 if (_isLaunchClosure) {
556 addMustBeMissingPath(tempPath);
557 }
558 }
559 #endif
560 });
561 }
562 if (done)
563 return;
564 }
565
566 bool stop = false;
567 handler(loadPath, stop);
568 }
569
570 const char* ClosureBuilder::strdup_temp(const char* path)
571 {
572 if ( _tempPaths == nullptr )
573 _tempPaths = PathPool::allocate();
574 return _tempPaths->add(path);
575 }
576
577 void ClosureBuilder::addMustBeMissingPath(const char* path)
578 {
579 //fprintf(stderr, "must be missing: %s\n", path);
580 if ( _mustBeMissingPaths == nullptr )
581 _mustBeMissingPaths = PathPool::allocate();
582 _mustBeMissingPaths->add(path);
583 }
584
585 void ClosureBuilder::addSkippedFile(const char* path, uint64_t inode, uint64_t mtime)
586 {
587 _skippedFiles.push_back({ strdup_temp(path), inode, mtime });
588 }
589
590 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(ImageNum imageNum)
591 {
592 for (BuilderLoadedImage& li : _loadedImages) {
593 if ( li.imageNum == imageNum ) {
594 return li;
595 }
596 }
597 for (BuilderLoadedImage& li : _loadedImages) {
598 if ( li.overrideImageNum == imageNum ) {
599 return li;
600 }
601 }
602 assert(0 && "LoadedImage not found");
603 }
604
605 ClosureBuilder::BuilderLoadedImage& ClosureBuilder::findLoadedImage(const MachOAnalyzer* mh)
606 {
607 for (BuilderLoadedImage& li : _loadedImages) {
608 if ( li.loadAddress() == mh ) {
609 return li;
610 }
611 }
612 assert(0 && "LoadedImage not found");
613 }
614
615 const MachOAnalyzer* ClosureBuilder::machOForImageNum(ImageNum imageNum)
616 {
617 return findLoadedImage(imageNum).loadAddress();
618 }
619
620 const MachOAnalyzer* ClosureBuilder::findDependent(const MachOLoaded* mh, uint32_t depIndex)
621 {
622 for (const BuilderLoadedImage& li : _loadedImages) {
623 if ( li.loadAddress() == mh ) {
624 if (li.isBadImage) {
625 // Bad image duting building group 1 closures, so the dependents array
626 // is potentially incomplete.
627 return nullptr;
628 }
629 ImageNum childNum = li.dependents[depIndex].imageNum();
630 // This is typically something like a missing weak-dylib we are re-exporting a weak-import symbol from
631 if (childNum == kMissingWeakLinkedImage)
632 return nullptr;
633 return machOForImageNum(childNum);
634 }
635 }
636 return nullptr;
637 }
638
639 ImageNum ClosureBuilder::imageNumForMachO(const MachOAnalyzer* mh)
640 {
641 for (const BuilderLoadedImage& li : _loadedImages) {
642 if ( li.loadAddress() == mh ) {
643 return li.imageNum;
644 }
645 }
646 assert(0 && "unknown mach-o");
647 return 0;
648 }
649
650 void ClosureBuilder::recursiveLoadDependents(LoadedImageChain& forImageChain, bool canUseSharedCacheClosure)
651 {
652 // if dependents is set, then we have already loaded this
653 if ( forImageChain.image.dependents.begin() != nullptr )
654 return;
655
656 uintptr_t startDepIndex = _dependencies.count();
657 // add dependents
658 __block uint32_t depIndex = 0;
659 forImageChain.image.loadAddress()->forEachDependentDylib(^(const char* loadPath, bool isWeak, bool isReExport, bool isUpward, uint32_t compatVersion, uint32_t curVersion, bool &stop) {
660 Image::LinkKind kind = Image::LinkKind::regular;
661 if ( isWeak )
662 kind = Image::LinkKind::weak;
663 else if ( isReExport )
664 kind = Image::LinkKind::reExport;
665 else if ( isUpward )
666 kind = Image::LinkKind::upward;
667 BuilderLoadedImage* foundImage;
668 if ( findImage(loadPath, forImageChain, foundImage, LinkageType::kStatic, compatVersion, canUseSharedCacheClosure) ) {
669 ImageNum foundImageNum = foundImage->imageNum;
670 if ( _diag.noError() )
671 _dependencies.push_back(Image::LinkedImage(kind, foundImageNum));
672 }
673 else if ( isWeak ) {
674 _dependencies.push_back(Image::LinkedImage(Image::LinkKind::weak, kMissingWeakLinkedImage));
675 }
676 else {
677 BLOCK_ACCCESSIBLE_ARRAY(char, extra, 4096);
678 extra[0] = '\0';
679 const char* targetLeaf = strrchr(loadPath, '/');
680 if ( targetLeaf == nullptr )
681 targetLeaf = loadPath;
682 if ( _mustBeMissingPaths != nullptr ) {
683 strcpy(extra, ", tried but didn't find: ");
684 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
685 const char* aLeaf = strrchr(aPath, '/');
686 if ( aLeaf == nullptr )
687 aLeaf = aPath;
688 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
689 strlcat(extra, "'", 4096);
690 strlcat(extra, aPath, 4096);
691 strlcat(extra, "' ", 4096);
692 }
693 });
694 }
695 if ( !_skippedFiles.empty() ) {
696 strcpy(extra, ", tried but invalid: ");
697 for (const SkippedFile& skippedFile : _skippedFiles) {
698 const char* aPath = skippedFile.path;
699 const char* aLeaf = strrchr(aPath, '/');
700 if ( aLeaf == nullptr )
701 aLeaf = aPath;
702 if ( strcmp(targetLeaf, aLeaf) == 0 ) {
703 strlcat(extra, "'", 4096);
704 strlcat(extra, aPath, 4096);
705 strlcat(extra, "' ", 4096);
706 }
707 }
708 }
709 if ( _diag.hasError() ) {
710 #if BUILDING_CACHE_BUILDER
711 std::string errorMessageBuffer = _diag.errorMessage();
712 const char* msg = errorMessageBuffer.c_str();
713 #else
714 const char* msg = _diag.errorMessage();
715 #endif
716 char msgCopy[strlen(msg)+4];
717 strcpy(msgCopy, msg);
718 _diag.error("dependent dylib '%s' not found for '%s'. %s", loadPath, forImageChain.image.path(), msgCopy);
719 }
720 else {
721 _diag.error("dependent dylib '%s' not found for '%s'%s", loadPath, forImageChain.image.path(), extra);
722 }
723 if ( _launchErrorInfo != nullptr ) {
724 _launchErrorInfo->kind = DYLD_EXIT_REASON_DYLIB_MISSING;
725 _launchErrorInfo->clientOfDylibPath = strdup_temp(forImageChain.image.path());
726 _launchErrorInfo->targetDylibPath = strdup_temp(loadPath);
727 _launchErrorInfo->symbol = nullptr;
728 }
729 }
730 ++depIndex;
731 if ( _diag.hasError() )
732 stop = true;
733 });
734 if ( _diag.hasError() )
735 return;
736 forImageChain.image.dependents = _dependencies.subArray(startDepIndex, depIndex);
737
738 // breadth first recurse
739 for (Image::LinkedImage dep : forImageChain.image.dependents) {
740 // don't recurse upwards
741 if ( dep.kind() == Image::LinkKind::upward )
742 continue;
743 // don't recurse down missing weak links
744 if ( (dep.kind() == Image::LinkKind::weak) && (dep.imageNum() == kMissingWeakLinkedImage) )
745 continue;
746 BuilderLoadedImage& depLoadedImage = findLoadedImage(dep.imageNum());
747 LoadedImageChain chain = { &forImageChain, depLoadedImage };
748 recursiveLoadDependents(chain, canUseSharedCacheClosure);
749 if ( _diag.hasError() )
750 break;
751 }
752 }
753
754 void ClosureBuilder::loadDanglingUpwardLinks(bool canUseSharedCacheClosure)
755 {
756 bool danglingFixed;
757 do {
758 danglingFixed = false;
759 for (BuilderLoadedImage& li : _loadedImages) {
760 if ( li.dependents.begin() == nullptr ) {
761 // this image has not have dependents set (probably a dangling upward link or referenced by upward link)
762 LoadedImageChain chain = { nullptr, li };
763 recursiveLoadDependents(chain, canUseSharedCacheClosure);
764 danglingFixed = true;
765 break;
766 }
767 }
768 } while (danglingFixed && _diag.noError());
769 }
770
771 bool ClosureBuilder::overridableDylib(const BuilderLoadedImage& forImage)
772 {
773 // only set on dylibs in the dyld shared cache
774 if ( !_makingDyldCacheImages )
775 return false;
776
777 // on macOS dylibs always override cache
778 if ( _platform == Platform::macOS )
779 return true;
780
781 // on embedded platforms with Internal cache, allow overrides
782 if ( !_makingCustomerCache )
783 return true;
784
785 // embedded platform customer caches, no overrides
786 return false; // FIXME, allow libdispatch.dylib to be overridden
787 }
788
789 void ClosureBuilder::buildImage(ImageWriter& writer, BuilderLoadedImage& forImage)
790 {
791 const MachOAnalyzer* macho = forImage.loadAddress();
792 // set ImageNum
793 writer.setImageNum(forImage.imageNum);
794
795 // set flags
796 writer.setHasWeakDefs(macho->hasWeakDefs());
797 writer.setIsBundle(macho->isBundle());
798 writer.setIsDylib(macho->isDylib());
799 writer.setIs64(macho->is64());
800 writer.setIsExecutable(macho->isMainExecutable());
801 writer.setUses16KPages(macho->uses16KPages());
802 writer.setOverridableDylib(overridableDylib(forImage));
803 writer.setInDyldCache(macho->inDyldCache());
804 if ( macho->hasObjC() ) {
805 writer.setHasObjC(true);
806 bool hasPlusLoads = macho->hasPlusLoadMethod(_diag);
807 writer.setHasPlusLoads(hasPlusLoads);
808 if ( hasPlusLoads )
809 forImage.hasInits = true;
810 }
811 else {
812 writer.setHasObjC(false);
813 writer.setHasPlusLoads(false);
814 }
815
816 if ( forImage.markNeverUnload ) {
817 writer.setNeverUnload(true);
818 }
819
820 #if BUILDING_DYLD || BUILDING_LIBDYLD
821 if ( _foundDyldCacheRoots ) {
822 // If we had roots, then some images are potentially on-disk while others are
823 // being rebuilt for a new initializer order, but do not exist on disk
824 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
825 // don't add file info for shared cache files mastered out of final file system
826 }
827 else {
828 // file is either not in cache or is in cache but not mastered out
829 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
830 }
831 } else {
832 // shared cache not built by dyld or libdyld.dylib, so must be real file
833 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
834 }
835 #else
836 if ( _platform == Platform::macOS || MachOFile::isSimulatorPlatform(_platform) ) {
837 if ( macho->inDyldCache() && !_dyldCache->header.dylibsExpectedOnDisk ) {
838 // don't add file info for shared cache files mastered out of final file system
839 }
840 else {
841 // file is either not in cache or is in cache but not mastered out
842 writer.setFileInfo(forImage.loadedFileInfo.inode, forImage.loadedFileInfo.mtime);
843 }
844 }
845 else {
846 // all other platforms, cache is built off-device, so inodes are not known
847 }
848 #endif
849
850 // add info on how to load image
851 if ( !macho->inDyldCache() ) {
852 writer.setMappingInfo(forImage.loadedFileInfo.sliceOffset, macho->mappedSize());
853 // add code signature, if signed
854 uint32_t codeSigFileOffset;
855 uint32_t codeSigSize;
856 if ( macho->hasCodeSignature(codeSigFileOffset, codeSigSize) ) {
857 writer.setCodeSignatureLocation(codeSigFileOffset, codeSigSize);
858 macho->forEachCDHash(^(const uint8_t *cdHash) {
859 writer.addCDHash(cdHash);
860 });
861 }
862 // add FairPlay encryption range if encrypted
863 uint32_t fairPlayFileOffset;
864 uint32_t fairPlaySize;
865 if ( macho->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
866 writer.setFairPlayEncryptionRange(fairPlayFileOffset, fairPlaySize);
867 }
868 }
869
870 // set path
871 writer.addPath(forImage.path());
872 if ( _aliases != nullptr ) {
873 for (const CachedDylibAlias& alias : *_aliases) {
874 if ( strcmp(alias.realPath, forImage.path()) == 0 )
875 writer.addPath(alias.aliasPath);
876 }
877 }
878
879 // set uuid, if has one
880 uuid_t uuid;
881 if ( macho->getUuid(uuid) )
882 writer.setUUID(uuid);
883
884 // set dependents
885 writer.setDependents(forImage.dependents);
886
887 // set segments
888 addSegments(writer, macho);
889
890 // record if this dylib overrides something in the cache
891 if ( forImage.overrideImageNum != 0 ) {
892 writer.setAsOverrideOf(forImage.overrideImageNum);
893 const char* overridePath = _dyldImageArray->imageForNum(forImage.overrideImageNum)->path();
894 writer.addPath(overridePath);
895 if ( strcmp(overridePath, "/usr/lib/system/libdyld.dylib") == 0 )
896 _libDyldImageNum = forImage.imageNum;
897 else if ( strcmp(overridePath, "/usr/lib/libSystem.B.dylib") == 0 )
898 _libSystemImageNum = forImage.imageNum;
899 }
900
901 // do fix up info for non-cached, and cached if building cache
902 if ( !macho->inDyldCache() || _makingDyldCacheImages ) {
903 if ( macho->hasChainedFixups() ) {
904 addChainedFixupInfo(writer, forImage);
905 }
906 else {
907 if ( _handlers != nullptr ) {
908 reportRebasesAndBinds(writer, forImage);
909 }
910 else {
911 // Note we have to do binds before rebases so that we know if we have missing lazy binds
912 addBindInfo(writer, forImage);
913 if ( _diag.noError() )
914 addRebaseInfo(writer, macho);
915 }
916 }
917 }
918 if ( _diag.hasError() ) {
919 writer.setInvalid();
920 return;
921 }
922
923 // Don't build iOSMac for now. Just add an invalid placeholder
924 if ( _makingDyldCacheImages && strncmp(forImage.path(), "/System/iOSSupport/", 19) == 0 ) {
925 writer.setInvalid();
926 return;
927 }
928
929 // add initializers
930 bool contentRebased = forImage.contentRebased;
931 __block unsigned initCount = 0;
932 Diagnostics initializerDiag;
933 macho->forEachInitializer(initializerDiag, contentRebased, ^(uint32_t offset) {
934 ++initCount;
935 }, _dyldCache);
936 if ( initializerDiag.noError() ) {
937 if ( initCount != 0 ) {
938 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, initOffsets, initCount);
939 __block unsigned index = 0;
940 macho->forEachInitializer(_diag, contentRebased, ^(uint32_t offset) {
941 initOffsets[index++] = offset;
942 }, _dyldCache);
943 writer.setInitOffsets(initOffsets, initCount);
944 forImage.hasInits = true;
945 }
946 }
947 else {
948 // mod_init_func section is malformed, might be self modifying pointers
949 macho->forEachInitializerPointerSection(_diag, ^(uint32_t sectionOffset, uint32_t sectionSize, const uint8_t* content, bool& stop) {
950 writer.setInitSectRange(sectionOffset, sectionSize);
951 forImage.hasInits = true;
952 });
953 }
954
955
956 // add terminators (except for dylibs in the cache because they are never unloaded)
957 if ( !macho->inDyldCache() ) {
958 __block unsigned termCount = 0;
959 macho->forEachTerminator(_diag, contentRebased, ^(uint32_t offset) {
960 ++termCount;
961 });
962 if ( termCount != 0 ) {
963 BLOCK_ACCCESSIBLE_ARRAY(uint32_t, termOffsets, termCount);
964 __block unsigned index = 0;
965 macho->forEachTerminator(_diag, contentRebased, ^(uint32_t offset) {
966 termOffsets[index++] = offset;
967 });
968 writer.setTermOffsets(termOffsets, termCount);
969 }
970 }
971
972 // record if image has DOF sections
973 STACK_ALLOC_ARRAY(uint32_t, dofSectionOffsets, 256);
974 macho->forEachDOFSection(_diag, ^(uint32_t offset) {
975 dofSectionOffsets.push_back(offset);
976 });
977 if ( !dofSectionOffsets.empty() ) {
978 writer.setDofOffsets(dofSectionOffsets);
979 }
980
981 }
982
983 void ClosureBuilder::addSegments(ImageWriter& writer, const MachOAnalyzer* mh)
984 {
985 const uint32_t segCount = mh->segmentCount();
986 if ( mh->inDyldCache() ) {
987 uint64_t cacheUnslideBaseAddress = _dyldCache->unslidLoadAddress();
988 BLOCK_ACCCESSIBLE_ARRAY(Image::DyldCacheSegment, segs, segCount);
989 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
990 segs[info.segIndex] = { (uint32_t)(info.vmAddr-cacheUnslideBaseAddress), (uint32_t)info.vmSize, info.protections };
991 });
992 writer.setCachedSegments(segs, segCount);
993 }
994 else {
995 const uint32_t pageSize = (mh->uses16KPages() ? 0x4000 : 0x1000);
996 __block uint32_t diskSegIndex = 0;
997 __block uint32_t totalPageCount = 0;
998 __block uint32_t lastFileOffsetEnd = 0;
999 __block uint64_t lastVmAddrEnd = 0;
1000 BLOCK_ACCCESSIBLE_ARRAY(Image::DiskSegment, dsegs, segCount*3); // room for padding
1001 mh->forEachSegment(^(const MachOAnalyzer::SegmentInfo& info, bool& stop) {
1002 if ( (info.fileOffset != 0) && (info.fileOffset != lastFileOffsetEnd) ) {
1003 Image::DiskSegment filePadding;
1004 filePadding.filePageCount = (info.fileOffset - lastFileOffsetEnd)/pageSize;
1005 filePadding.vmPageCount = 0;
1006 filePadding.permissions = 0;
1007 filePadding.paddingNotSeg = 1;
1008 dsegs[diskSegIndex++] = filePadding;
1009 }
1010 if ( (lastVmAddrEnd != 0) && (info.vmAddr != lastVmAddrEnd) ) {
1011 Image::DiskSegment vmPadding;
1012 vmPadding.filePageCount = 0;
1013 vmPadding.vmPageCount = (info.vmAddr - lastVmAddrEnd)/pageSize;
1014 vmPadding.permissions = 0;
1015 vmPadding.paddingNotSeg = 1;
1016 dsegs[diskSegIndex++] = vmPadding;
1017 totalPageCount += vmPadding.vmPageCount;
1018 }
1019 {
1020 Image::DiskSegment segInfo;
1021 segInfo.filePageCount = (info.fileSize+pageSize-1)/pageSize;
1022 segInfo.vmPageCount = (info.vmSize+pageSize-1)/pageSize;
1023 segInfo.permissions = info.protections & 7;
1024 segInfo.paddingNotSeg = 0;
1025 if ( info.readOnlyData )
1026 segInfo.permissions = Image::DiskSegment::kReadOnlyDataPermissions;
1027 dsegs[diskSegIndex++] = segInfo;
1028 totalPageCount += segInfo.vmPageCount;
1029 if ( info.fileSize != 0 )
1030 lastFileOffsetEnd = (uint32_t)(info.fileOffset + info.fileSize);
1031 if ( info.vmSize != 0 )
1032 lastVmAddrEnd = info.vmAddr + info.vmSize;
1033 }
1034 });
1035 writer.setDiskSegments(dsegs, diskSegIndex);
1036 }
1037 }
1038
1039 static bool isTupleFixup(uint64_t tupleSectVmStartOffset, uint64_t tupleSectVmEndOffset, uint64_t imageOffsetOfFixup, uint32_t entrySize, uint32_t& tupleIndex)
1040 {
1041 if ( imageOffsetOfFixup < tupleSectVmStartOffset )
1042 return false;
1043 if ( imageOffsetOfFixup > tupleSectVmEndOffset )
1044 return false;
1045 uint64_t offsetIntoSection = imageOffsetOfFixup - tupleSectVmStartOffset;
1046 tupleIndex = (uint32_t)(offsetIntoSection/entrySize);
1047 return (tupleIndex*entrySize == offsetIntoSection) || ((tupleIndex*entrySize+entrySize/2) == offsetIntoSection);
1048 }
1049
1050 void ClosureBuilder::addInterposingTuples(LaunchClosureWriter& writer, const Image* image, const MachOAnalyzer* mh)
1051 {
1052 const unsigned pointerSize = mh->pointerSize();
1053 const uint64_t baseAddress = mh->preferredLoadAddress();
1054 mh->forEachInterposingSection(_diag, ^(uint64_t sectVmOffset, uint64_t sectVmSize, bool &stop) {
1055 const uint32_t entrySize = 2*pointerSize;
1056 const uint32_t tupleCount = (uint32_t)(sectVmSize/entrySize);
1057 const uint64_t sectVmEndOffset = sectVmOffset + sectVmSize;
1058 BLOCK_ACCCESSIBLE_ARRAY(InterposingTuple, resolvedTuples, tupleCount);
1059 for (uint32_t i=0; i < tupleCount; ++i) {
1060 resolvedTuples[i].stockImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1061 resolvedTuples[i].stockImplementation.absolute.value = 0;
1062 resolvedTuples[i].newImplementation.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1063 resolvedTuples[i].newImplementation.absolute.value = 0;
1064 }
1065 // figure out what the replacement (rebase) and replacement (bind) of the tuple point to
1066 image->forEachFixup(^(uint64_t imageOffsetToRebase, bool& rebaseStop) {
1067 uint32_t tupleIndex;
1068 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToRebase, entrySize, tupleIndex) ) {
1069 const void* content = (uint8_t*)mh + imageOffsetToRebase;
1070 uint64_t unslidTargetAddress = mh->is64() ? *(uint64_t*)content : *(uint32_t*)content;
1071 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1072 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1073 resolvedTuples[tupleIndex].newImplementation.image.offset = unslidTargetAddress - mh->preferredLoadAddress();
1074 }
1075 },
1076 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1077 uint32_t tupleIndex;
1078 if ( isTupleFixup(sectVmOffset, sectVmEndOffset, imageOffsetToBind, entrySize, tupleIndex) ) {
1079 resolvedTuples[tupleIndex].stockImplementation = bindTarget;
1080 }
1081 },
1082 ^(uint64_t imageOffsetToStartsInfo, const Array<Image::ResolvedSymbolTarget>& targets, bool& chainStop) {
1083 mh->withChainStarts(_diag, imageOffsetToStartsInfo, ^(const dyld_chained_starts_in_image* startsInfo) {
1084 mh->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc, const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
1085 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)mh;
1086 uint32_t tupleIndex;
1087 if ( !isTupleFixup(sectVmOffset, sectVmEndOffset, fixupOffset, entrySize, tupleIndex) )
1088 return;
1089 uint32_t bindOrdinal;
1090 uint64_t rebaseTargetOffset;
1091 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal) ) {
1092 if ( bindOrdinal < targets.count() ) {
1093 resolvedTuples[tupleIndex].stockImplementation = targets[bindOrdinal];
1094 }
1095 else {
1096 _diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
1097 fixupsStop = true;
1098 }
1099 }
1100 else if ( fixupLoc->isRebase(segInfo->pointer_format, baseAddress, rebaseTargetOffset) ) {
1101 resolvedTuples[tupleIndex].newImplementation.image.kind = Image::ResolvedSymbolTarget::kindImage;
1102 resolvedTuples[tupleIndex].newImplementation.image.imageNum = image->imageNum();
1103 resolvedTuples[tupleIndex].newImplementation.image.offset = rebaseTargetOffset;
1104 }
1105 });
1106 });
1107 },
1108 ^(uint64_t imageOffsetToFixup) {
1109 // objc optimisation can't be interposed so nothing to do here.
1110 },
1111 ^(uint64_t imageOffsetToBind, Image::ResolvedSymbolTarget bindTarget, bool &bindStop) {
1112 // objc protocol optimisation fixups can't be interposed so nothing to do here.
1113 },
1114 ^(uint64_t imageOffsetToFixup, uint32_t selectorIndex, bool inSharedCache, bool &fixupStop) {
1115 // objc selector optimisation fixups can't be interposed so nothing to do here.
1116 },
1117 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1118 // objc stable Swift optimisation fixups can't be interposed so nothing to do here.
1119 },
1120 ^(uint64_t imageOffsetToFixup, bool &fixupStop) {
1121 // objc method list optimisation fixups can't be interposed so nothing to do here.
1122 });
1123
1124 // remove any tuples in which both sides are not set (or target is weak-import NULL)
1125 STACK_ALLOC_ARRAY(InterposingTuple, goodTuples, tupleCount);
1126 for (uint32_t i=0; i < tupleCount; ++i) {
1127 if ( (resolvedTuples[i].stockImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute)
1128 && (resolvedTuples[i].newImplementation.image.kind != Image::ResolvedSymbolTarget::kindAbsolute) )
1129 goodTuples.push_back(resolvedTuples[i]);
1130 }
1131 writer.addInterposingTuples(goodTuples);
1132
1133 // if the target of the interposing is in the dyld shared cache, add a PatchEntry so the cache is fixed up at launch
1134 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, goodTuples.count());
1135 for (const InterposingTuple& aTuple : goodTuples) {
1136 if ( aTuple.stockImplementation.sharedCache.kind == Image::ResolvedSymbolTarget::kindSharedCache ) {
1137 uint32_t imageIndex;
1138 assert(_dyldCache->addressInText((uint32_t)aTuple.stockImplementation.sharedCache.offset, &imageIndex));
1139 ImageNum imageInCache = imageIndex+1;
1140 Closure::PatchEntry patch;
1141 patch.exportCacheOffset = (uint32_t)aTuple.stockImplementation.sharedCache.offset;
1142 patch.overriddenDylibInCache = imageInCache;
1143 patch.replacement = aTuple.newImplementation;
1144 patches.push_back(patch);
1145 }
1146 }
1147 writer.addCachePatches(patches);
1148 });
1149 }
1150
1151 void ClosureBuilder::addRebaseInfo(ImageWriter& writer, const MachOAnalyzer* mh)
1152 {
1153 const uint64_t ptrSize = mh->pointerSize();
1154 Image::RebasePattern maxLeapPattern = { 0xFFFFF, 0, 0xF };
1155 const uint64_t maxLeapCount = maxLeapPattern.repeatCount * maxLeapPattern.skipCount;
1156 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::RebasePattern, rebaseEntries, 1024);
1157 __block uint64_t lastLocation = -ptrSize;
1158 mh->forEachRebase(_diag, !_foundMissingLazyBinds, ^(uint64_t runtimeOffset, bool& stop) {
1159 const uint64_t delta = runtimeOffset - lastLocation;
1160 const bool aligned = ((delta % ptrSize) == 0);
1161 if ( delta == ptrSize ) {
1162 // this rebase location is contiguous to previous
1163 if ( rebaseEntries.back().contigCount < 255 ) {
1164 // just bump previous's contigCount
1165 rebaseEntries.back().contigCount++;
1166 }
1167 else {
1168 // previous contiguous run already has max 255, so start a new run
1169 rebaseEntries.push_back({ 1, 1, 0 });
1170 }
1171 }
1172 else if ( aligned && (delta <= (ptrSize*15)) ) {
1173 // this rebase is within skip distance of last rebase
1174 rebaseEntries.back().skipCount = (uint8_t)((delta-ptrSize)/ptrSize);
1175 int lastIndex = (int)(rebaseEntries.count() - 1);
1176 if ( lastIndex > 1 ) {
1177 if ( (rebaseEntries[lastIndex].contigCount == rebaseEntries[lastIndex-1].contigCount)
1178 && (rebaseEntries[lastIndex].skipCount == rebaseEntries[lastIndex-1].skipCount) ) {
1179 // this entry as same contig and skip as prev, so remove it and bump repeat count of previous
1180 rebaseEntries.pop_back();
1181 rebaseEntries.back().repeatCount += 1;
1182 }
1183 }
1184 rebaseEntries.push_back({ 1, 1, 0 });
1185 }
1186 else {
1187 uint64_t advanceCount = (delta-ptrSize);
1188 if ( (runtimeOffset < lastLocation) && (lastLocation != -ptrSize) ) {
1189 // out of rebases! handle this be resting rebase offset to zero
1190 rebaseEntries.push_back({ 0, 0, 0 });
1191 advanceCount = runtimeOffset;
1192 }
1193 // if next rebase is too far to reach with one pattern, use series
1194 while ( advanceCount > maxLeapCount ) {
1195 rebaseEntries.push_back(maxLeapPattern);
1196 advanceCount -= maxLeapCount;
1197 }
1198 // if next rebase is not reachable with skipCount==1 or skipCount==15, add intermediate
1199 while ( advanceCount > maxLeapPattern.repeatCount ) {
1200 uint64_t count = advanceCount / maxLeapPattern.skipCount;
1201 rebaseEntries.push_back({ (uint32_t)count, 0, maxLeapPattern.skipCount });
1202 advanceCount -= (count*maxLeapPattern.skipCount);
1203 }
1204 if ( advanceCount != 0 )
1205 rebaseEntries.push_back({ (uint32_t)advanceCount, 0, 1 });
1206 rebaseEntries.push_back({ 1, 1, 0 });
1207 }
1208 lastLocation = runtimeOffset;
1209 });
1210 writer.setRebaseInfo(rebaseEntries);
1211
1212 // i386 programs also use text relocs to rebase stubs
1213 if ( mh->cputype == CPU_TYPE_I386 ) {
1214 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::TextFixupPattern, textRebases, 512);
1215 __block uint64_t lastOffset = -4;
1216 mh->forEachTextRebase(_diag, ^(uint64_t runtimeOffset, bool& stop) {
1217 if ( textRebases.freeCount() < 2 ) {
1218 _diag.error("too many text rebase locations (%ld) in %s", textRebases.maxCount(), writer.currentImage()->path());
1219 stop = true;
1220 }
1221 bool mergedIntoPrevious = false;
1222 if ( (runtimeOffset > lastOffset) && !textRebases.empty() ) {
1223 uint32_t skipAmount = (uint32_t)(runtimeOffset - lastOffset);
1224 if ( (textRebases.back().repeatCount == 1) && (textRebases.back().skipCount == 0) ) {
1225 textRebases.back().repeatCount = 2;
1226 textRebases.back().skipCount = skipAmount;
1227 mergedIntoPrevious = true;
1228 }
1229 else if ( textRebases.back().skipCount == skipAmount ) {
1230 textRebases.back().repeatCount += 1;
1231 mergedIntoPrevious = true;
1232 }
1233 }
1234 if ( !mergedIntoPrevious ) {
1235 Image::TextFixupPattern pattern;
1236 pattern.target.raw = 0;
1237 pattern.startVmOffset = (uint32_t)runtimeOffset;
1238 pattern.repeatCount = 1;
1239 pattern.skipCount = 0;
1240 textRebases.push_back(pattern);
1241 }
1242 lastOffset = runtimeOffset;
1243 });
1244 writer.setTextRebaseInfo(textRebases);
1245 }
1246 }
1247
1248
1249 void ClosureBuilder::forEachBind(BuilderLoadedImage& forImage, void (^handler)(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop),
1250 void (^strongHandler)(const char* strongSymbolName),
1251 void (^missingLazyBindHandler)())
1252 {
1253 __block int lastLibOrdinal = 256;
1254 __block const char* lastSymbolName = nullptr;
1255 __block uint64_t lastAddend = 0;
1256 __block Image::ResolvedSymbolTarget target;
1257 __block ResolvedTargetInfo targetInfo;
1258 forImage.loadAddress()->forEachBind(_diag, ^(uint64_t runtimeOffset, int libOrdinal, const char* symbolName, bool weakImport, bool lazyBind, uint64_t addend, bool& stop) {
1259 if ( (symbolName == lastSymbolName) && (libOrdinal == lastLibOrdinal) && (addend == lastAddend) ) {
1260 // same symbol lookup as last location
1261 handler(runtimeOffset, target, targetInfo, stop);
1262 }
1263 else if ( findSymbol(forImage, libOrdinal, symbolName, weakImport, lazyBind, addend, target, targetInfo) ) {
1264 if ( !targetInfo.skippableWeakDef ) {
1265 handler(runtimeOffset, target, targetInfo, stop);
1266 lastSymbolName = symbolName;
1267 lastLibOrdinal = libOrdinal;
1268 lastAddend = addend;
1269 }
1270 }
1271 else {
1272 stop = true;
1273 }
1274 }, ^(const char* symbolName) {
1275 strongHandler(symbolName);
1276 }, ^() {
1277 missingLazyBindHandler();
1278 });
1279 }
1280
1281 void ClosureBuilder::addBindInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1282 {
1283 const uint32_t ptrSize = forImage.loadAddress()->pointerSize();
1284 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::BindPattern, binds, 512);
1285 __block uint64_t lastOffset = -ptrSize;
1286 __block Image::ResolvedSymbolTarget lastTarget = { {0, 0} };
1287 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1288 if ( targetInfo.weakBindCoalese ) {
1289 // may be previous bind to this location
1290 // if so, update that rather create new BindPattern
1291 for (Image::BindPattern& aBind : binds) {
1292 if ( (aBind.startVmOffset == runtimeOffset) && (aBind.repeatCount == 1) && (aBind.skipCount == 0) ) {
1293 aBind.target = target;
1294 return;
1295 }
1296 }
1297 }
1298 bool mergedIntoPrevious = false;
1299 if ( !mergedIntoPrevious && (target == lastTarget) && (runtimeOffset > lastOffset) && !binds.empty() ) {
1300 uint64_t skipAmount = (runtimeOffset - lastOffset - ptrSize)/ptrSize;
1301 if ( skipAmount*ptrSize != (runtimeOffset - lastOffset - ptrSize) ) {
1302 // misaligned pointer means we cannot optimize
1303 }
1304 else {
1305 if ( (binds.back().repeatCount == 1) && (binds.back().skipCount == 0) && (skipAmount <= 255) ) {
1306 binds.back().repeatCount = 2;
1307 binds.back().skipCount = skipAmount;
1308 assert(binds.back().skipCount == skipAmount); // check overflow
1309 mergedIntoPrevious = true;
1310 }
1311 else if ( (binds.back().skipCount == skipAmount) && (binds.back().repeatCount < 0xfff) ) {
1312 uint32_t prevRepeatCount = binds.back().repeatCount;
1313 binds.back().repeatCount += 1;
1314 assert(binds.back().repeatCount > prevRepeatCount); // check overflow
1315 mergedIntoPrevious = true;
1316 }
1317 }
1318 }
1319 if ( (target == lastTarget) && (runtimeOffset == lastOffset) && !binds.empty() ) {
1320 // duplicate bind for same location, ignore this one
1321 mergedIntoPrevious = true;
1322 }
1323 if ( !mergedIntoPrevious ) {
1324 Image::BindPattern pattern;
1325 pattern.target = target;
1326 pattern.startVmOffset = runtimeOffset;
1327 pattern.repeatCount = 1;
1328 pattern.skipCount = 0;
1329 assert(pattern.startVmOffset == runtimeOffset);
1330 binds.push_back(pattern);
1331 }
1332 lastTarget = target;
1333 lastOffset = runtimeOffset;
1334 }, ^(const char* strongSymbolName) {
1335 if ( !_makingDyldCacheImages ) {
1336 // something has a strong symbol definition that may override a weak impl in the dyld cache
1337 Image::ResolvedSymbolTarget strongOverride;
1338 ResolvedTargetInfo strongTargetInfo;
1339 if ( findSymbolInImage(forImage.loadAddress(), strongSymbolName, 0, false, false, strongOverride, strongTargetInfo) ) {
1340 for (const BuilderLoadedImage& li : _loadedImages) {
1341 if ( li.loadAddress()->inDyldCache() && li.loadAddress()->hasWeakDefs() ) {
1342 Image::ResolvedSymbolTarget implInCache;
1343 ResolvedTargetInfo implInCacheInfo;
1344 if ( findSymbolInImage(li.loadAddress(), strongSymbolName, 0, false, false, implInCache, implInCacheInfo) ) {
1345 // found another instance in some dylib in dyld cache, will need to patch it
1346 Closure::PatchEntry patch;
1347 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1348 patch.overriddenDylibInCache = li.imageNum;
1349 patch.replacement = strongOverride;
1350 _weakDefCacheOverrides.push_back(patch);
1351 }
1352 }
1353 }
1354 }
1355 }
1356 }, ^() {
1357 _foundMissingLazyBinds = true;
1358 });
1359
1360 // check for __dyld section in main executable to support licenseware
1361 if ( forImage.loadAddress()->filetype == MH_EXECUTE ) {
1362 forImage.loadAddress()->forEachSection(^(const MachOAnalyzer::SectionInfo& sectInfo, bool malformedSectionRange, bool& stop) {
1363 if ( (strcmp(sectInfo.sectName, "__dyld") == 0) && (strcmp(sectInfo.segInfo.segName, "__DATA") == 0) ) {
1364 // find dyld3::compatFuncLookup in libdyld.dylib
1365 assert(_libDyldImageNum != 0);
1366 Image::ResolvedSymbolTarget lookupFuncTarget;
1367 ResolvedTargetInfo lookupFuncInfo;
1368 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld316compatFuncLookupEPKcPPv", 0, false, false, lookupFuncTarget, lookupFuncInfo) ) {
1369 // add bind to set second pointer in __dyld section to be dyld3::compatFuncLookup
1370 uint64_t runtimeOffset = sectInfo.sectAddr - forImage.loadAddress()->preferredLoadAddress() + forImage.loadAddress()->pointerSize();
1371 Image::BindPattern compatFuncPattern;
1372 compatFuncPattern.target = lookupFuncTarget;
1373 compatFuncPattern.startVmOffset = runtimeOffset;
1374 compatFuncPattern.repeatCount = 1;
1375 compatFuncPattern.skipCount = 0;
1376 assert(compatFuncPattern.startVmOffset == runtimeOffset);
1377 binds.push_back(compatFuncPattern);
1378 }
1379 else {
1380 _diag.error("libdyld.dylib is dyld3::compatFuncLookup");
1381 }
1382 }
1383 });
1384 }
1385
1386 writer.setBindInfo(binds);
1387 }
1388
1389 void ClosureBuilder::reportRebasesAndBinds(ImageWriter& writer, BuilderLoadedImage& forImage)
1390 {
1391 // report all rebases
1392 forImage.loadAddress()->forEachRebase(_diag, true, ^(uint64_t runtimeOffset, bool& stop) {
1393 _handlers->rebase(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset);
1394 });
1395
1396 // report all binds
1397 forEachBind(forImage, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
1398 _handlers->bind(forImage.imageNum, forImage.loadAddress(), (uint32_t)runtimeOffset, target, targetInfo);
1399 },
1400 ^(const char* strongSymbolName) {},
1401 ^() { });
1402
1403 // i386 programs also use text relocs to rebase stubs
1404 if ( forImage.loadAddress()->cputype == CPU_TYPE_I386 ) {
1405 // FIX ME
1406 }
1407 }
1408
1409 // These are mangled symbols for all the variants of operator new and delete
1410 // which a main executable can define (non-weak) and override the
1411 // weak-def implementation in the OS.
1412 static const char* const sTreatAsWeak[] = {
1413 "__Znwm", "__ZnwmRKSt9nothrow_t",
1414 "__Znam", "__ZnamRKSt9nothrow_t",
1415 "__ZdlPv", "__ZdlPvRKSt9nothrow_t", "__ZdlPvm",
1416 "__ZdaPv", "__ZdaPvRKSt9nothrow_t", "__ZdaPvm",
1417 "__ZnwmSt11align_val_t", "__ZnwmSt11align_val_tRKSt9nothrow_t",
1418 "__ZnamSt11align_val_t", "__ZnamSt11align_val_tRKSt9nothrow_t",
1419 "__ZdlPvSt11align_val_t", "__ZdlPvSt11align_val_tRKSt9nothrow_t", "__ZdlPvmSt11align_val_t",
1420 "__ZdaPvSt11align_val_t", "__ZdaPvSt11align_val_tRKSt9nothrow_t", "__ZdaPvmSt11align_val_t"
1421 };
1422
1423
1424 void ClosureBuilder::addChainedFixupInfo(ImageWriter& writer, BuilderLoadedImage& forImage)
1425 {
1426 // build array of targets
1427 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ResolvedSymbolTarget, targets, 1024);
1428 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ResolvedTargetInfo, targetInfos, 1024);
1429 forImage.loadAddress()->forEachChainedFixupTarget(_diag, ^(int libOrdinal, const char* symbolName, uint64_t addend, bool weakImport, bool& stop) {
1430 Image::ResolvedSymbolTarget target;
1431 ResolvedTargetInfo targetInfo;
1432 if ( !findSymbol(forImage, libOrdinal, symbolName, weakImport, false, addend, target, targetInfo) ) {
1433 const char* expectedInPath = forImage.loadAddress()->dependentDylibLoadPath(libOrdinal-1);
1434 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, forImage.path());
1435 stop = true;
1436 return;
1437 }
1438 if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
1439 // add if not already in array
1440 bool alreadyInArray = false;
1441 for (const char* sym : _weakDefsFromChainedBinds) {
1442 if ( strcmp(sym, symbolName) == 0 ) {
1443 alreadyInArray = true;
1444 break;
1445 }
1446 }
1447 if ( !alreadyInArray )
1448 _weakDefsFromChainedBinds.push_back(symbolName);
1449 }
1450 targets.push_back(target);
1451 targetInfos.push_back(targetInfo);
1452 });
1453 if ( _diag.hasError() )
1454 return;
1455
1456 uint64_t chainStartsOffset = forImage.loadAddress()->chainStartsOffset();
1457 if ( _handlers != nullptr ) {
1458 forImage.loadAddress()->withChainStarts(_diag, chainStartsOffset, ^(const dyld_chained_starts_in_image* starts) {
1459 _handlers->chainedBind(forImage.imageNum, forImage.loadAddress(), starts, targets, targetInfos);
1460 });
1461 }
1462 else {
1463 writer.setChainedFixups(chainStartsOffset, targets);
1464 }
1465
1466 // with chained fixups, main executable may define symbol that overrides weak-defs but has no fixup
1467 if ( _isLaunchClosure && forImage.loadAddress()->hasWeakDefs() && forImage.loadAddress()->isMainExecutable() ) {
1468 for (const char* weakSymbolName : sTreatAsWeak) {
1469 Diagnostics exportDiag;
1470 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1471 if ( forImage.loadAddress()->findExportedSymbol(exportDiag, weakSymbolName, false, foundInfo, nullptr) ) {
1472 _weakDefsFromChainedBinds.push_back(weakSymbolName);
1473 }
1474 }
1475 }
1476 }
1477
1478
1479 bool ClosureBuilder::findSymbolInImage(const MachOAnalyzer* macho, const char* symbolName, uint64_t addend, bool followReExports,
1480 bool weakImport, Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1481 {
1482 targetInfo.foundInDylib = nullptr;
1483 targetInfo.requestedSymbolName = symbolName;
1484 targetInfo.addend = addend;
1485 targetInfo.weakBindCoalese = false;
1486 targetInfo.weakBindSameImage = false;
1487 targetInfo.isWeakDef = false;
1488 targetInfo.skippableWeakDef = false;
1489 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
1490 return (const MachOLoaded*)findDependent(mh, depIndex);
1491 };
1492 MachOAnalyzer::DependentToMachOLoaded finder = nullptr;
1493 if ( followReExports )
1494 finder = reexportFinder;
1495
1496 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
1497 if ( macho->findExportedSymbol(_diag, symbolName, weakImport, foundInfo, finder) ) {
1498 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
1499 targetInfo.foundInDylib = foundInfo.foundInDylib;
1500 targetInfo.foundSymbolName = foundInfo.foundSymbolName;
1501 if ( foundInfo.isWeakDef )
1502 targetInfo.isWeakDef = true;
1503 if ( foundInfo.kind == MachOAnalyzer::FoundSymbol::Kind::absolute ) {
1504 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1505 target.absolute.value = foundInfo.value + addend;
1506 }
1507 else if ( impDylib->inDyldCache() ) {
1508 target.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
1509 target.sharedCache.offset = (uint8_t*)impDylib - (uint8_t*)_dyldCache + foundInfo.value + addend;
1510 }
1511 else {
1512 target.image.kind = Image::ResolvedSymbolTarget::kindImage;
1513 target.image.imageNum = findLoadedImage(impDylib).imageNum;
1514 target.image.offset = foundInfo.value + addend;
1515 }
1516 return true;
1517 }
1518 return false;
1519 }
1520
1521 bool ClosureBuilder::findSymbol(BuilderLoadedImage& fromImage, int libOrdinal, const char* symbolName, bool weakImport, bool lazyBind,
1522 uint64_t addend, Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1523 {
1524 target.raw = 0;
1525 targetInfo.weakBindCoalese = false;
1526 targetInfo.weakBindSameImage = false;
1527 targetInfo.isWeakDef = false;
1528 targetInfo.skippableWeakDef = false;
1529 targetInfo.requestedSymbolName = symbolName;
1530 targetInfo.libOrdinal = libOrdinal;
1531 if ( libOrdinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP ) {
1532 for (const BuilderLoadedImage& li : _loadedImages) {
1533 if ( !li.rtldLocal && findSymbolInImage(li.loadAddress(), symbolName, addend, true, weakImport, target, targetInfo) )
1534 return true;
1535 }
1536 if ( weakImport ) {
1537 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1538 target.absolute.value = 0;
1539 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1540 fromImage.hasMissingWeakImports = true;
1541 return true;
1542 }
1543 // <rdar://problem/44315944> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld in flat namespace
1544 if ( lazyBind && _allowMissingLazies ) {
1545 if ( findMissingSymbolHandler(target, targetInfo) )
1546 return true;
1547 }
1548 _diag.error("symbol '%s' not found, expected in flat namespace by '%s'", symbolName, fromImage.path());
1549 }
1550 else if ( libOrdinal == BIND_SPECIAL_DYLIB_WEAK_LOOKUP ) {
1551 // to resolve weakDef coalesing, we need to search all images in order and use first definition
1552 // but, if first found is a weakDef, a later non-weak def overrides that
1553 bool foundWeakDefImpl = false;
1554 bool foundStrongDefImpl = false;
1555 bool foundImpl = false;
1556
1557 if ( _makingDyldCacheImages ) {
1558 // _loadedImages is all dylibs in the dyld cache, it is not load-order, so need alterate weak-def binding algorithm
1559 // look first in /usr/lib/libc++, most will be here
1560 for (const BuilderLoadedImage& li : _loadedImages) {
1561 if ( li.loadAddress()->hasWeakDefs() && (strncmp(li.path(), "/usr/lib/libc++", 15) == 0) ) {
1562 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1563 foundImpl = true;
1564 break;
1565 }
1566 }
1567 }
1568 // if not found, try looking in the images itself, most custom weak-def symbols have a copy in the image itself
1569 if ( !foundImpl ) {
1570 if ( findSymbolInImage(fromImage.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1571 foundImpl = true;
1572 }
1573 }
1574 // if still not found, then this is the rare case of a simple use of a weak-def symbol
1575 if ( !foundImpl ) {
1576 // look in all direct dependents
1577 for (Image::LinkedImage child : fromImage.dependents) {
1578 if (child.imageNum() == kMissingWeakLinkedImage)
1579 continue;
1580 BuilderLoadedImage& childLi = findLoadedImage(child.imageNum());
1581 if ( childLi.loadAddress()->hasWeakDefs() && findSymbolInImage(childLi.loadAddress(), symbolName, addend, false, weakImport, target, targetInfo) ) {
1582 foundImpl = true;
1583 break;
1584 }
1585 }
1586 }
1587 targetInfo.weakBindCoalese = true;
1588 }
1589 else {
1590 // walk images in load-order to find first that implements this symbol
1591 Image::ResolvedSymbolTarget aTarget;
1592 ResolvedTargetInfo aTargetInfo;
1593 STACK_ALLOC_ARRAY(const BuilderLoadedImage*, cachedDylibsUsingSymbol, 1024);
1594 for (const BuilderLoadedImage& li : _loadedImages) {
1595 // only search images with weak-defs that were not loaded with RTLD_LOCAL
1596 if ( li.loadAddress()->hasWeakDefs() && !li.rtldLocal ) {
1597 if ( findSymbolInImage(li.loadAddress(), symbolName, addend, false, weakImport, aTarget, aTargetInfo) ) {
1598 foundImpl = true;
1599 // with non-chained images, weak-defs first have a rebase to their local impl, and a weak-bind which allows earlier impls to override
1600 if ( !li.loadAddress()->hasChainedFixups() && (aTargetInfo.foundInDylib == fromImage.loadAddress()) )
1601 targetInfo.weakBindSameImage = true;
1602 if ( aTargetInfo.isWeakDef ) {
1603 // found a weakDef impl, if this is first found, set target to this
1604 if ( !foundWeakDefImpl && !foundStrongDefImpl ) {
1605 target = aTarget;
1606 targetInfo = aTargetInfo;
1607 }
1608 foundWeakDefImpl = true;
1609 }
1610 else {
1611 // found a non-weak impl, use this (unless early strong found)
1612 if ( !foundStrongDefImpl ) {
1613 target = aTarget;
1614 targetInfo = aTargetInfo;
1615 }
1616 foundStrongDefImpl = true;
1617 }
1618 }
1619 if ( foundImpl && li.loadAddress()->inDyldCache() )
1620 cachedDylibsUsingSymbol.push_back(&li);
1621 }
1622 }
1623
1624 // now that final target found, if any dylib in dyld cache uses that symbol name, redirect it to new target
1625 if ( !cachedDylibsUsingSymbol.empty() ) {
1626 for (const BuilderLoadedImage* li : cachedDylibsUsingSymbol) {
1627 Image::ResolvedSymbolTarget implInCache;
1628 ResolvedTargetInfo implInCacheInfo;
1629 if ( findSymbolInImage(li->loadAddress(), symbolName, addend, false, weakImport, implInCache, implInCacheInfo) ) {
1630 if ( implInCache != target ) {
1631 // found another instance in some dylib in dyld cache, will need to patch it
1632 Closure::PatchEntry patch;
1633 patch.exportCacheOffset = (uint32_t)implInCache.sharedCache.offset;
1634 patch.overriddenDylibInCache = li->imageNum;
1635 patch.replacement = target;
1636 _weakDefCacheOverrides.push_back(patch);
1637 }
1638 }
1639 }
1640 }
1641 targetInfo.weakBindCoalese = true;
1642 }
1643
1644 if ( foundImpl )
1645 return true;
1646 if ( weakImport ) {
1647 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1648 target.absolute.value = 0;
1649 return true;
1650 }
1651 if ( ! fromImage.loadAddress()->hasChainedFixups() ) {
1652 // support old binaries where symbols have been stripped and have weak_bind to itself
1653 targetInfo.skippableWeakDef = true;
1654 return true;
1655 }
1656
1657 _diag.error("symbol '%s' not found, expected to be weak-def coalesced by '%s'", symbolName, fromImage.path());
1658 }
1659 else {
1660 const BuilderLoadedImage* targetLoadedImage = nullptr;
1661 if ( (libOrdinal > 0) && (libOrdinal <= (int)fromImage.dependents.count()) ) {
1662 ImageNum childNum = fromImage.dependents[libOrdinal - 1].imageNum();
1663 if ( childNum != kMissingWeakLinkedImage ) {
1664 targetLoadedImage = &findLoadedImage(childNum);
1665 }
1666 }
1667 else if ( libOrdinal == BIND_SPECIAL_DYLIB_SELF ) {
1668 targetLoadedImage = &fromImage;
1669 }
1670 else if ( libOrdinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE ) {
1671 targetLoadedImage = &_loadedImages[_mainProgLoadIndex];
1672 }
1673 else {
1674 _diag.error("unknown special ordinal %d in %s", libOrdinal, fromImage.path());
1675 return false;
1676 }
1677
1678 if ( targetLoadedImage != nullptr ) {
1679 if ( findSymbolInImage(targetLoadedImage->loadAddress(), symbolName, addend, true, weakImport, target, targetInfo) )
1680 return true;
1681 }
1682
1683 if ( weakImport ) {
1684 target.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
1685 target.absolute.value = 0;
1686 // Record that we found a missing weak import so that the objc optimizer doens't have to check
1687 fromImage.hasMissingWeakImports = true;
1688 return true;
1689 }
1690
1691 // <rdar://problem/43315403> closures should bind missing lazy-bind symbols to a missing symbol handler in libdyld
1692 if ( lazyBind && _allowMissingLazies ) {
1693 if ( findMissingSymbolHandler(target, targetInfo) )
1694 return true;
1695 }
1696
1697 // symbol not found and not weak or lazy so error out
1698 const char* expectedInPath = targetLoadedImage ? targetLoadedImage->path() : "unknown";
1699 _diag.error("symbol '%s' not found, expected in '%s', needed by '%s'", symbolName, expectedInPath, fromImage.path());
1700 if ( _launchErrorInfo != nullptr ) {
1701 _launchErrorInfo->kind = DYLD_EXIT_REASON_SYMBOL_MISSING;
1702 _launchErrorInfo->clientOfDylibPath = strdup_temp(fromImage.path());
1703 _launchErrorInfo->targetDylibPath = strdup_temp(expectedInPath);
1704 _launchErrorInfo->symbol = symbolName;
1705 }
1706 }
1707 return false;
1708 }
1709
1710
1711 bool ClosureBuilder::findMissingSymbolHandler(Image::ResolvedSymbolTarget& target, ResolvedTargetInfo& targetInfo)
1712 {
1713 for (BuilderLoadedImage& li : _loadedImages) {
1714 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) ) {
1715 if ( findSymbolInImage(li.loadAddress(), "__dyld_missing_symbol_abort", 0, false, false, target, targetInfo) ) {
1716 return true;
1717 }
1718 break;
1719 }
1720 }
1721 return false;
1722 }
1723
1724 void ClosureBuilder::depthFirstRecurseSetInitInfo(uint32_t loadIndex, InitInfo initInfos[], uint32_t& initOrder, bool& hasError)
1725 {
1726 if ( initInfos[loadIndex].visited )
1727 return;
1728 initInfos[loadIndex].visited = true;
1729 initInfos[loadIndex].danglingUpward = false;
1730
1731 if (_loadedImages[loadIndex].isBadImage) {
1732 hasError = true;
1733 return;
1734 }
1735
1736 for (const Image::LinkedImage& dep : _loadedImages[loadIndex].dependents) {
1737 if ( dep.imageNum() == kMissingWeakLinkedImage )
1738 continue;
1739 ClosureBuilder::BuilderLoadedImage& depLi = findLoadedImage(dep.imageNum());
1740 uint32_t depLoadIndex = (uint32_t)_loadedImages.index(depLi);
1741 if ( dep.kind() == Image::LinkKind::upward ) {
1742 if ( !initInfos[depLoadIndex].visited )
1743 initInfos[depLoadIndex].danglingUpward = true;
1744 }
1745 else {
1746 depthFirstRecurseSetInitInfo(depLoadIndex, initInfos, initOrder, hasError);
1747 if (hasError)
1748 return;
1749 }
1750 }
1751 initInfos[loadIndex].initOrder = initOrder++;
1752 }
1753
1754 void ClosureBuilder::computeInitOrder(ImageWriter& imageWriter, uint32_t loadIndex)
1755 {
1756 // allocate array to track initializers
1757 InitInfo initInfos[_loadedImages.count()];
1758 bzero(initInfos, sizeof(initInfos));
1759
1760 // recurse all images and build initializer list from bottom up
1761 uint32_t initOrder = 1;
1762 bool hasMissingDependent = false;
1763 depthFirstRecurseSetInitInfo(loadIndex, initInfos, initOrder, hasMissingDependent);
1764 if (hasMissingDependent) {
1765 imageWriter.setInvalid();
1766 return;
1767 }
1768
1769 // any images not visited yet are are danging, force add them to end of init list
1770 for (uint32_t i=0; i < (uint32_t)_loadedImages.count(); ++i) {
1771 if ( !initInfos[i].visited && initInfos[i].danglingUpward ) {
1772 depthFirstRecurseSetInitInfo(i, initInfos, initOrder, hasMissingDependent);
1773 }
1774 }
1775
1776 if (hasMissingDependent) {
1777 imageWriter.setInvalid();
1778 return;
1779 }
1780
1781 // build array of just images with initializer
1782 STACK_ALLOC_ARRAY(uint32_t, indexOfImagesWithInits, _loadedImages.count());
1783 uint32_t index = 0;
1784 for (const BuilderLoadedImage& li : _loadedImages) {
1785 if ( initInfos[index].visited && li.hasInits ) {
1786 indexOfImagesWithInits.push_back(index);
1787 }
1788 ++index;
1789 }
1790
1791 // bubble sort (FIXME)
1792 if ( indexOfImagesWithInits.count() > 1 ) {
1793 for (uint32_t i=0; i < indexOfImagesWithInits.count()-1; ++i) {
1794 for (uint32_t j=0; j < indexOfImagesWithInits.count()-i-1; ++j) {
1795 if ( initInfos[indexOfImagesWithInits[j]].initOrder > initInfos[indexOfImagesWithInits[j+1]].initOrder ) {
1796 uint32_t temp = indexOfImagesWithInits[j];
1797 indexOfImagesWithInits[j] = indexOfImagesWithInits[j+1];
1798 indexOfImagesWithInits[j+1] = temp;
1799 }
1800 }
1801 }
1802 }
1803
1804 // copy ImageNum of each image with initializers into array
1805 ImageNum initNums[indexOfImagesWithInits.count()];
1806 for (uint32_t i=0; i < indexOfImagesWithInits.count(); ++i) {
1807 initNums[i] = _loadedImages[indexOfImagesWithInits[i]].imageNum;
1808 }
1809
1810 // add to closure info
1811 imageWriter.setInitsOrder(initNums, (uint32_t)indexOfImagesWithInits.count());
1812 }
1813
1814 void ClosureBuilder::addClosureInfo(LaunchClosureWriter& closureWriter)
1815 {
1816 // record which is libSystem
1817 assert(_libSystemImageNum != 0);
1818 closureWriter.setLibSystemImageNum(_libSystemImageNum);
1819
1820 // record which is libdyld
1821 assert(_libDyldImageNum != 0);
1822 Image::ResolvedSymbolTarget entryLocation;
1823 ResolvedTargetInfo entryInfo;
1824 if ( findSymbolInImage(findLoadedImage(_libDyldImageNum).loadAddress(), "__ZN5dyld318entryVectorForDyldE", 0, false, false, entryLocation, entryInfo) ) {
1825 const dyld3::LibDyldEntryVector* libDyldEntry = nullptr;
1826 switch ( entryLocation.image.kind ) {
1827 case Image::ResolvedSymbolTarget::kindSharedCache:
1828 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)_dyldCache + entryLocation.sharedCache.offset);
1829 break;
1830 case Image::ResolvedSymbolTarget::kindImage:
1831 libDyldEntry = (dyld3::LibDyldEntryVector*)((uint8_t*)findLoadedImage(entryLocation.image.imageNum).loadAddress() + entryLocation.image.offset);
1832 break;
1833 }
1834 if ( (libDyldEntry != nullptr) && ((libDyldEntry->binaryFormatVersion & LibDyldEntryVector::kBinaryFormatVersionMask) == dyld3::closure::kFormatVersion) )
1835 closureWriter.setLibDyldEntry(entryLocation);
1836 else
1837 _diag.error("libdyld.dylib entry vector is incompatible");
1838 }
1839 else {
1840 _diag.error("libdyld.dylib is missing entry vector");
1841 }
1842
1843 // record which is main executable
1844 ImageNum mainProgImageNum = _loadedImages[_mainProgLoadIndex].imageNum;
1845 closureWriter.setTopImageNum(mainProgImageNum);
1846
1847 // add entry
1848 uint32_t entryOffset;
1849 bool usesCRT;
1850 if ( _loadedImages[_mainProgLoadIndex].loadAddress()->getEntry(entryOffset, usesCRT) ) {
1851 Image::ResolvedSymbolTarget location;
1852 location.image.kind = Image::ResolvedSymbolTarget::kindImage;
1853 location.image.imageNum = mainProgImageNum;
1854 location.image.offset = entryOffset;
1855 if ( usesCRT )
1856 closureWriter.setStartEntry(location);
1857 else
1858 closureWriter.setMainEntry(location);
1859 }
1860
1861 // add env vars that must match at launch time
1862 _pathOverrides.forEachEnvVar(^(const char* envVar) {
1863 closureWriter.addEnvVar(envVar);
1864 });
1865
1866 // add list of files which must be missing
1867 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(const char*, paths, 8192);
1868 if ( _mustBeMissingPaths != nullptr ) {
1869 _mustBeMissingPaths->forEachPath(^(const char* aPath) {
1870 paths.push_back(aPath);
1871 });
1872 }
1873 closureWriter.setMustBeMissingFiles(paths);
1874
1875 // add list of files which must be be present with a specific inode/mtime
1876 if (!_skippedFiles.empty())
1877 closureWriter.setMustExistFiles(_skippedFiles);
1878 }
1879 void ClosureBuilder::invalidateInitializerRoots()
1880 {
1881 while (true) {
1882 bool madeChange = false;
1883 for (uintptr_t loadedImageIndex = _alreadyInitedIndex; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
1884 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
1885 if ( li.mustBuildClosure ) {
1886 // Already invalidated
1887 continue;
1888 }
1889 for (Image::LinkedImage depIndex : li.dependents) {
1890 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
1891 continue;
1892 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
1893 // If a dependent is bad, or a new image num, or an override, then we need this image to get a new closure
1894 if ( depImage.mustBuildClosure ) {
1895 li.mustBuildClosure = true; // mark bad
1896 madeChange = true;
1897 }
1898 }
1899 }
1900 if (!madeChange)
1901 break;
1902 // If we made a change, then we detected an existing image with a dependent which needed to be rebuilt.
1903 // This corresponds to a root of the shared cache where the existing image is a shared cache one and the root is the depImage
1904 _foundDyldCacheRoots = true;
1905 }
1906 }
1907
1908 size_t ClosureBuilder::HashCString::hash(const char* v) {
1909 // FIXME: Use hash<string_view> when it has the correct visibility markup
1910 return __gnu_cxx::hash<const char*>{}(v);
1911 }
1912
1913 bool ClosureBuilder::EqualCString::equal(const char* s1, const char* s2) {
1914 return strcmp(s1, s2) == 0;
1915 }
1916
1917
1918 struct HashUInt64 {
1919 static size_t hash(const uint64_t& v) {
1920 return std::hash<uint64_t>{}(v);
1921 }
1922 };
1923
1924 struct EqualUInt64 {
1925 static bool equal(uint64_t s1, uint64_t s2) {
1926 return s1 == s2;
1927 }
1928 };
1929
1930 void ClosureBuilder::writeClassOrProtocolHashTable(bool classes, Array<ObjCOptimizerImage>& objcImages) {
1931 __block MultiMap<const char*, dyld3::closure::Image::ObjCClassImageOffset, HashCString, EqualCString> seenClassesMap;
1932 __block Map<const char*, dyld3::closure::Image::ObjCClassNameImageOffset, HashCString, EqualCString> classNameMap;
1933 __block OverflowSafeArray<const char*> classNames;
1934
1935 // Note we walk the images backwards as we want them in load order to match the order they are registered with objc
1936 for (size_t imageIndex = 0, reverseIndex = (objcImages.count() - 1); imageIndex != objcImages.count(); ++imageIndex, --reverseIndex) {
1937 if (objcImages[reverseIndex].diag.hasError())
1938 continue;
1939 ObjCOptimizerImage& image = objcImages[reverseIndex];
1940 const OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = classes ? image.seenClasses : image.seenProtocols;
1941
1942 for (const ObjCOptimizerImage::SeenClass& seenClass : seenClasses) {
1943 closure::Image::ObjCClassNameImageOffset classNameTarget = seenClass.first;
1944 dyld3::closure::Image::ObjCClassImageOffset classDataTarget = seenClass.second;
1945 Image::ObjCClassImage classImage = _objcClassesHashTableImages[classNameTarget.classNameImageIndex];
1946
1947 const BuilderLoadedImage& li = findLoadedImage(classImage.imageNum);
1948 const dyld3::MachOAnalyzer* ma = li.loadAddress();
1949
1950 const char* className = ((const char*)ma) + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1951 //uint64_t nameVMAddr = ma->preferredLoadAddress() + classImage.offsetOfClassNames + classNameTarget.classNameImageOffset;
1952 //printf("%s: 0x%08llx = '%s'\n", li.path(), nameVMAddr, className);
1953 seenClassesMap.insert({ className, classDataTarget });
1954
1955 // Also track the name
1956 auto itAndInserted = classNameMap.insert({ className, dyld3::closure::Image::ObjCClassNameImageOffset() });
1957 if (itAndInserted.second) {
1958 // We inserted the class name so we need to add it to the strings for the closure hash table
1959 classNames.push_back(className);
1960
1961 // We already computed a class name target in a previous loop so use that one
1962 itAndInserted.first->second = seenClass.first;
1963
1964 // If we are processing protocols, and this is the first one we've seen, then track its ISA to be fixed up
1965 if ( !classes ) {
1966 uint64_t protocolVMOffset = classImage.offsetOfClasses + classDataTarget.classData.imageOffset;
1967 image.protocolISAFixups.push_back(protocolVMOffset);
1968 }
1969 }
1970 }
1971 }
1972
1973 __block uint32_t duplicateCount = 0;
1974 seenClassesMap.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values,
1975 uint64_t valuesCount) {
1976 if (valuesCount != 1)
1977 duplicateCount += valuesCount;
1978 });
1979
1980 // If we have closure class names, we need to make a hash table for them.
1981 OverflowSafeArray<uint8_t>& hashTable = classes ? _objcClassesHashTable : _objcProtocolsHashTable;
1982 if (!classNames.empty()) {
1983 objc_opt::perfect_hash phash;
1984 objc_opt::make_perfect(classNames, phash);
1985 size_t size = ObjCClassOpt::size(phash, duplicateCount);
1986 hashTable.resize(size);
1987 //printf("Class table size: %lld\n", size);
1988 ObjCClassOpt* classesHashTable = (ObjCClassOpt*)hashTable.begin();
1989 classesHashTable->write(phash, classNameMap.array(), seenClassesMap, duplicateCount);
1990 }
1991 }
1992
1993 bool ClosureBuilder::optimizeObjC(Array<ImageWriter>& writers) {
1994 if ( _dyldCache == nullptr )
1995 return false;
1996
1997 // If we have the read only data, make sure it has a valid selector table inside.
1998 const objc_opt::objc_clsopt_t* objcClassOpt = nullptr;
1999 const objc_opt::objc_selopt_t* objcSelOpt = nullptr;
2000 const objc_opt::objc_protocolopt2_t* objcProtocolOpt = nullptr;
2001 if (const objc_opt::objc_opt_t* optObjCHeader = _dyldCache->objcOpt()) {
2002 objcClassOpt = optObjCHeader->clsopt();
2003 objcSelOpt = optObjCHeader->selopt();
2004 objcProtocolOpt = optObjCHeader->protocolopt2();
2005 }
2006
2007 if ( !objcClassOpt || !objcSelOpt || !objcProtocolOpt )
2008 return false;
2009
2010 // We have 24 bits of index in SelectorReferenceFixup so we can't handle a
2011 // shared cache selector table larger than that
2012 if ( objcSelOpt->usedCount() >= (1 << 24) )
2013 return false;
2014
2015 // Make sure we have the pointers section with the pointer to the protocol class
2016 const void* objcOptPtrs = _dyldCache->objcOptPtrs();
2017 if ( objcOptPtrs == nullptr )
2018 return false;
2019
2020 uint32_t pointerSize = _loadedImages.begin()->loadAddress()->pointerSize();
2021 uint64_t classProtocolVMAddr = (pointerSize == 8) ? *(uint64_t*)objcOptPtrs : *(uint32_t*)objcOptPtrs;
2022
2023 Image::ResolvedSymbolTarget objcProtocolClassTarget;
2024 objcProtocolClassTarget.sharedCache.kind = Image::ResolvedSymbolTarget::kindSharedCache;
2025 if ( _dyldCacheIsLive ) {
2026 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - (uint64_t)_dyldCache;
2027 } else {
2028 objcProtocolClassTarget.sharedCache.offset = classProtocolVMAddr - _dyldCache->unslidLoadAddress();
2029 }
2030
2031 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(ObjCOptimizerImage, objcImages, 32);
2032 ArrayFinalizer<ObjCOptimizerImage> scopedCleanup(objcImages,
2033 ^(ObjCOptimizerImage& objcImage) {
2034 objcImage.~ObjCOptimizerImage();
2035 });
2036
2037 // Find all the images with valid objc info
2038 // Also add shared cache images to a map so that we can see them later for looking up classes
2039 Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer> sharedCacheImagesMap;
2040 for (size_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
2041 BuilderLoadedImage& li = _loadedImages[imageIndex];
2042
2043 // Skip shared cache images as even if they need a new closure, the objc runtime can still use
2044 // the optimized shared cache tables.
2045 if ( li.loadAddress()->inDyldCache() ) {
2046 sharedCacheImagesMap.insert({ li.loadAddress(), true });
2047 // Bump the writer index if we have a writer for this image
2048 if ( li.mustBuildClosure )
2049 ++writerIndex;
2050 continue;
2051 }
2052 // Images which don't need a closure can be skipped. They are from the shared cache
2053 if ( !li.mustBuildClosure )
2054 continue;
2055
2056 // If we have a root of libobjc, just give up for now
2057 if ( !strcmp(li.path(), "/usr/lib/libobjc.A.dylib"))
2058 return false;
2059
2060 ImageWriter& writer = writers[writerIndex];
2061 ++writerIndex;
2062
2063 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2064
2065 // Skip images with chained fixups other than arm64e legacy fixups until we can test them
2066 // FIXME: Handle chained fixups
2067 if ( ma->hasChainedFixups() ) {
2068 switch ( ma->chainedPointerFormat() ) {
2069 case DYLD_CHAINED_PTR_ARM64E:
2070 case DYLD_CHAINED_PTR_64:
2071 // We've tested the 64-bit chained fixups.
2072 break;
2073 case DYLD_CHAINED_PTR_32:
2074 case DYLD_CHAINED_PTR_32_CACHE:
2075 case DYLD_CHAINED_PTR_32_FIRMWARE:
2076 // FIXME: Test 32-bit chained fixups then enable this.
2077 continue;
2078 }
2079 }
2080
2081 const MachOAnalyzer::ObjCImageInfo* objcImageInfo = ma->objcImageInfo();
2082 if ( objcImageInfo == nullptr )
2083 continue;
2084
2085 // This image is good so record it for use later.
2086 objcImages.default_constuct_back();
2087 ObjCOptimizerImage& image = objcImages.back();
2088 image.loadedImage = &li;
2089 image.writer = &writer;
2090
2091 // Find FairPlay encryption range if encrypted
2092 uint32_t fairPlayFileOffset;
2093 uint32_t fairPlaySize;
2094 if ( ma->isFairPlayEncrypted(fairPlayFileOffset, fairPlaySize) ) {
2095 image.fairplayFileOffsetStart = fairPlayFileOffset;
2096 image.fairplayFileOffsetEnd = fairPlayFileOffset;
2097 }
2098
2099 // Set the offset to the objc image info
2100 image.objcImageInfoVMOffset = (uint64_t)objcImageInfo - (uint64_t)ma;
2101 }
2102
2103 OverflowSafeArray<const char*> closureSelectorStrings;
2104 Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString> closureSelectorMap;
2105 OverflowSafeArray<const char*> closureDuplicateSharedCacheClassNames;
2106 Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString> closureDuplicateSharedCacheClassMap;
2107 for (ObjCOptimizerImage& image : objcImages) {
2108 optimizeObjCClasses(objcClassOpt, sharedCacheImagesMap, closureDuplicateSharedCacheClassMap, image);
2109 if (image.diag.hasError())
2110 continue;
2111
2112 optimizeObjCProtocols(objcProtocolOpt, sharedCacheImagesMap, image);
2113 if (image.diag.hasError())
2114 continue;
2115
2116 optimizeObjCSelectors(objcSelOpt, closureSelectorMap, image);
2117 if (image.diag.hasError())
2118 continue;
2119
2120 // If this image is still valid, then add its intermediate results to the main tables
2121
2122 // Class results
2123 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2124 uint64_t nameVMOffset = nameAndDataVMOffset.first;
2125 uint64_t dataVMOffset = nameAndDataVMOffset.second;
2126 _objcClassesHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)nameVMOffset, (uint32_t)dataVMOffset });
2127 }
2128 image.classesNameAndDataVMOffsets.clear();
2129
2130 for (const auto& stringAndDuplicate : image.classSharedCacheDuplicates) {
2131 closureDuplicateSharedCacheClassMap[stringAndDuplicate.first] = stringAndDuplicate.second;
2132 closureDuplicateSharedCacheClassNames.push_back(stringAndDuplicate.first);
2133 }
2134
2135 // Selector results
2136 // Note we don't need to add the selector binds here. Its easier just to process them later from each image
2137 for (const auto& stringAndTarget : image.selectorMap) {
2138 closureSelectorMap[stringAndTarget.first] = stringAndTarget.second;
2139 closureSelectorStrings.push_back(stringAndTarget.first);
2140 }
2141 if (image.methodNameVMOffset)
2142 _objcSelectorsHashTableImages.push_back({ image.loadedImage->imageNum, (uint32_t)*image.methodNameVMOffset });
2143 }
2144
2145 // If we successfully analyzed the classes and selectors, we can now emit their data
2146 // Set all the writers to have optimized objc
2147 for (ObjCOptimizerImage& image : objcImages) {
2148 if (image.diag.hasError())
2149 continue;
2150 image.writer->setHasPrecomputedObjC(true);
2151 }
2152
2153 // Write out the class table
2154 writeClassOrProtocolHashTable(true, objcImages);
2155
2156 // Write out the protocol table
2157 writeClassOrProtocolHashTable(false, objcImages);
2158
2159 // If we have closure duplicate classes, we need to make a hash table for them.
2160 closure::ObjCStringTable* duplicateClassesTable = nullptr;
2161 if (!closureDuplicateSharedCacheClassNames.empty()) {
2162 objc_opt::perfect_hash phash;
2163 objc_opt::make_perfect(closureDuplicateSharedCacheClassNames, phash);
2164 size_t size = ObjCStringTable::size(phash);
2165 _objcClassesDuplicatesHashTable.resize(size);
2166 //printf("Duplicate classes table size: %lld\n", size);
2167 duplicateClassesTable = (closure::ObjCClassDuplicatesOpt*)_objcClassesDuplicatesHashTable.begin();
2168 duplicateClassesTable->write(phash, closureDuplicateSharedCacheClassMap.array());
2169 }
2170
2171 // If we have closure selectors, we need to make a hash table for them.
2172 closure::ObjCStringTable* selectorStringTable = nullptr;
2173 if (!closureSelectorStrings.empty()) {
2174 objc_opt::perfect_hash phash;
2175 objc_opt::make_perfect(closureSelectorStrings, phash);
2176 size_t size = ObjCStringTable::size(phash);
2177 _objcSelectorsHashTable.resize(size);
2178 //printf("Selector table size: %lld\n", size);
2179 selectorStringTable = (closure::ObjCStringTable*)_objcSelectorsHashTable.begin();
2180 selectorStringTable->write(phash, closureSelectorMap.array());
2181 }
2182
2183 // Add fixups for the image info, protocol ISAs, and selector refs
2184 for (ObjCOptimizerImage& image : objcImages) {
2185 if (image.diag.hasError())
2186 continue;
2187
2188 // Protocol ISA references
2189 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ProtocolISAFixup, protocolFixups, 512);
2190 if ( !image.protocolISAFixups.empty() ) {
2191
2192 __block uint64_t lastOffset = -pointerSize;
2193 for (uint64_t runtimeOffset : image.protocolISAFixups) {
2194 bool mergedIntoPrevious = false;
2195 if ( (runtimeOffset > lastOffset) && !protocolFixups.empty() ) {
2196 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2197 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2198 // misaligned pointer means we cannot optimize
2199 }
2200 else {
2201 if ( (protocolFixups.back().repeatCount == 1) && (protocolFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2202 protocolFixups.back().repeatCount = 2;
2203 protocolFixups.back().skipCount = skipAmount;
2204 assert(protocolFixups.back().skipCount == skipAmount); // check overflow
2205 mergedIntoPrevious = true;
2206 }
2207 else if ( (protocolFixups.back().skipCount == skipAmount) && (protocolFixups.back().repeatCount < 0xfff) ) {
2208 uint32_t prevRepeatCount = protocolFixups.back().repeatCount;
2209 protocolFixups.back().repeatCount += 1;
2210 assert(protocolFixups.back().repeatCount > prevRepeatCount); // check overflow
2211 mergedIntoPrevious = true;
2212 }
2213 }
2214 }
2215 if ( !mergedIntoPrevious ) {
2216 Image::ProtocolISAFixup pattern;
2217 pattern.startVmOffset = runtimeOffset;
2218 pattern.repeatCount = 1;
2219 pattern.skipCount = 0;
2220 assert(pattern.startVmOffset == runtimeOffset);
2221 protocolFixups.push_back(pattern);
2222 }
2223 lastOffset = runtimeOffset;
2224 }
2225 }
2226
2227 // Selector references
2228 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::SelectorReferenceFixup, selRefFixups, 512);
2229 if ( !image.selectorFixups.empty() ) {
2230 uint64_t prevVMOffset = 0;
2231 const uint64_t maxChainOffset = (4 * ((1 << 7) - 1));
2232 for (const ObjCOptimizerImage::SelectorFixup& selectorFixup : image.selectorFixups) {
2233 assert( (selectorFixup.fixupVMOffset & 3) == 0 );
2234 if ( (selectorFixup.fixupVMOffset - prevVMOffset) <= maxChainOffset ) {
2235 // Add this to the previous chain
2236 selRefFixups.back().chainEntry.next = (uint32_t)(selectorFixup.fixupVMOffset - prevVMOffset) / 4;
2237 } else {
2238 // Need to start a new chain as the previous offset can't reach
2239 Image::SelectorReferenceFixup fixup;
2240 fixup.chainStartVMOffset = selectorFixup.fixupVMOffset;
2241 selRefFixups.push_back(fixup);
2242 }
2243
2244 if ( selectorFixup.isSharedCache ) {
2245 // If the entry is in the shared cache then we already have the index for it
2246 Image::SelectorReferenceFixup fixup;
2247 fixup.chainEntry.index = selectorFixup.sharedCache.selectorTableIndex;
2248 fixup.chainEntry.next = 0;
2249 fixup.chainEntry.inSharedCache = 1;
2250 selRefFixups.push_back(fixup);
2251 } else {
2252 // We had to record the string for the closure table entries as we don't know the
2253 // index until now
2254 uint32_t selectorTableIndex = selectorStringTable->getIndex(selectorFixup.image.selectorString);
2255 assert(selectorTableIndex != ObjCSelectorOpt::indexNotFound);
2256 Image::SelectorReferenceFixup fixup;
2257 fixup.chainEntry.index = selectorTableIndex;
2258 fixup.chainEntry.next = 0;
2259 fixup.chainEntry.inSharedCache = 0;
2260 selRefFixups.push_back(fixup);
2261 }
2262
2263 prevVMOffset = selectorFixup.fixupVMOffset;
2264 }
2265 }
2266
2267 // Stable Swift fixups
2268 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::ClassStableSwiftFixup, stableSwiftFixups, 512);
2269 if ( !image.classStableSwiftFixups.empty() ) {
2270
2271 __block uint64_t lastOffset = -pointerSize;
2272 for (uint64_t runtimeOffset : image.classStableSwiftFixups) {
2273 bool mergedIntoPrevious = false;
2274 if ( (runtimeOffset > lastOffset) && !stableSwiftFixups.empty() ) {
2275 uint64_t skipAmount = (runtimeOffset - lastOffset - pointerSize)/pointerSize;
2276 if ( skipAmount*pointerSize != (runtimeOffset - lastOffset - pointerSize) ) {
2277 // misaligned pointer means we cannot optimize
2278 }
2279 else {
2280 if ( (stableSwiftFixups.back().repeatCount == 1) && (stableSwiftFixups.back().skipCount == 0) && (skipAmount <= 255) ) {
2281 stableSwiftFixups.back().repeatCount = 2;
2282 stableSwiftFixups.back().skipCount = skipAmount;
2283 assert(stableSwiftFixups.back().skipCount == skipAmount); // check overflow
2284 mergedIntoPrevious = true;
2285 }
2286 else if ( (stableSwiftFixups.back().skipCount == skipAmount) && (stableSwiftFixups.back().repeatCount < 0xfff) ) {
2287 uint32_t prevRepeatCount = stableSwiftFixups.back().repeatCount;
2288 stableSwiftFixups.back().repeatCount += 1;
2289 assert(stableSwiftFixups.back().repeatCount > prevRepeatCount); // check overflow
2290 mergedIntoPrevious = true;
2291 }
2292 }
2293 }
2294 if ( !mergedIntoPrevious ) {
2295 Image::ClassStableSwiftFixup pattern;
2296 pattern.startVmOffset = runtimeOffset;
2297 pattern.repeatCount = 1;
2298 pattern.skipCount = 0;
2299 assert(pattern.startVmOffset == runtimeOffset);
2300 stableSwiftFixups.push_back(pattern);
2301 }
2302 lastOffset = runtimeOffset;
2303 }
2304 }
2305
2306 // Method list fixups
2307 // TODO: Implement this
2308 STACK_ALLOC_OVERFLOW_SAFE_ARRAY(Image::MethodListFixup, methodListFixups, 512);
2309
2310 image.writer->setObjCFixupInfo(objcProtocolClassTarget, image.objcImageInfoVMOffset, protocolFixups,
2311 selRefFixups, stableSwiftFixups, methodListFixups);
2312 }
2313
2314 return true;
2315 }
2316
2317 void ClosureBuilder::optimizeObjCSelectors(const objc_opt::objc_selopt_t* objcSelOpt,
2318 const Map<const char*, dyld3::closure::Image::ObjCImageOffset, HashCString, EqualCString>& closureSelectorMap,
2319 ObjCOptimizerImage& image) {
2320
2321 BuilderLoadedImage& li = *image.loadedImage;
2322
2323 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2324 uint32_t pointerSize = ma->pointerSize();
2325 const uint64_t loadAddress = ma->preferredLoadAddress();
2326
2327 // The legacy (objc1) codebase uses a bunch of sections we don't want to reason about. If we see them just give up.
2328 __block bool foundBadSection = false;
2329 ma->forEachSection(^(const MachOAnalyzer::SectionInfo &sectInfo, bool malformedSectionRange, bool &stop) {
2330 if ( strcmp(sectInfo.segInfo.segName, "__OBJC") != 0 )
2331 return;
2332 if (strcmp(sectInfo.sectName, "__module_info") == 0) {
2333 foundBadSection = true;
2334 stop = true;
2335 return;
2336 }
2337 if (strcmp(sectInfo.sectName, "__protocol") == 0) {
2338 foundBadSection = true;
2339 stop = true;
2340 return;
2341 }
2342 if (strcmp(sectInfo.sectName, "__message_refs") == 0) {
2343 foundBadSection = true;
2344 stop = true;
2345 return;
2346 }
2347 });
2348 if (foundBadSection) {
2349 image.diag.error("Old objc section");
2350 return;
2351 }
2352
2353 __block MachOAnalyzer::SectionCache selectorStringSectionCache(ma);
2354
2355 uint32_t sharedCacheSentinelIndex = objcSelOpt->getSentinelIndex();
2356
2357 auto visitReferenceToObjCSelector = ^void(uint64_t selectorStringVMAddr, uint64_t selectorReferenceVMAddr) {
2358
2359 uint64_t selectorUseImageOffset = selectorReferenceVMAddr - loadAddress;
2360 if ( (selectorUseImageOffset & 3) != 0 ) {
2361 image.diag.error("Unaligned selector reference fixup");
2362 return;
2363 }
2364
2365 // Image::SelectorReferenceFixup only has a 32-bit reach
2366 if ( selectorUseImageOffset >= (1ULL << 32) ) {
2367 image.diag.error("Selector reference fixup exceeds supported vm offset");
2368 return;
2369 }
2370
2371 // Get the section for the name
2372 const char* selectorString = nullptr;
2373 MachOAnalyzer::PrintableStringResult selectorStringResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2374 __block uint64_t selectorStringSectionStartVMAddr = 0;
2375 auto selectorStringSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2376
2377 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2378 if (sectInfo.sectSize >= Image::ObjCImageOffset::maximumOffset) {
2379 return false;
2380 }
2381
2382 // We use 32-bit offsets so make sure the section is no larger than that.
2383 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2384 if (classNameVMOffset >= (1ULL << 32)) {
2385 return false;
2386 }
2387
2388 selectorStringSectionStartVMAddr = sectInfo.sectAddr;
2389 return true;
2390 };
2391 selectorString = ma->getPrintableString(selectorStringVMAddr, selectorStringResult,
2392 &selectorStringSectionCache, selectorStringSectionHandler);
2393
2394 if ( selectorStringResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2395 image.diag.error("Invalid selector string for objc optimisation");
2396 return;
2397 }
2398
2399 uint32_t cacheSelectorIndex = objcSelOpt->getIndexForKey(selectorString);
2400 //printf("selector: %p -> %p %s\n", methodName, cacheSelector, selectorString);
2401
2402 if ( cacheSelectorIndex != sharedCacheSentinelIndex ) {
2403 // We got the selector from the cache so add a fixup to point there.
2404 ObjCOptimizerImage::SelectorFixup fixup;
2405 fixup.isSharedCache = true;
2406 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2407 fixup.sharedCache.selectorTableIndex = cacheSelectorIndex;
2408
2409 //printf("Overriding fixup at 0x%08llX to cache offset 0x%08llX\n", selectorUseImageOffset, (uint64_t)cacheSelector - (uint64_t)_dyldCache);
2410 image.selectorFixups.push_back(fixup);
2411 return;
2412 }
2413
2414 // See if this selector is already in the closure map from a previous image
2415 auto closureSelectorIt = closureSelectorMap.find(selectorString);
2416 if (closureSelectorIt != closureSelectorMap.end()) {
2417 // This selector was found in a previous image, so use it here.
2418 ObjCOptimizerImage::SelectorFixup fixup;
2419 fixup.isSharedCache = false;
2420 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2421 fixup.image.selectorString = selectorString;
2422
2423 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2424 image.selectorFixups.push_back(fixup);
2425 return;
2426 }
2427
2428 // See if this selector is already in the map for this image
2429 auto itAndInserted = image.selectorMap.insert({ selectorString, dyld3::closure::Image::ObjCImageOffset() });
2430 if (itAndInserted.second) {
2431 // We added the selector so its pointing in to our own image.
2432 // We don't need to add a fixup to our image, but we do need to
2433 // populate the data for other images later to point here.
2434 // First put our image in the list if its not already there.
2435 uint64_t methodNameVMOffset = selectorStringSectionStartVMAddr - loadAddress;
2436 if (!image.methodNameVMOffset) {
2437 if ( _objcSelectorsHashTableImages.count() == Image::ObjCImageOffset::maximumImageIndex ) {
2438 image.diag.error("Out of space for selector hash images");
2439 return;
2440 }
2441 image.methodNameVMOffset = methodNameVMOffset;
2442 } else {
2443 // If we already set the offset to the start of the method names section, double check that
2444 // the section we are in right now is the same as that one. Otherwise we don't have the code
2445 // to handle both right now.
2446 if (*image.methodNameVMOffset != methodNameVMOffset) {
2447 image.diag.error("Cannot handle more than one selector strings section");
2448 return;
2449 }
2450 }
2451
2452 dyld3::closure::Image::ObjCImageOffset target;
2453 target.imageIndex = (uint32_t)_objcSelectorsHashTableImages.count();
2454 target.imageOffset = (uint32_t)(selectorStringVMAddr - selectorStringSectionStartVMAddr);
2455 itAndInserted.first->second = target;
2456 return;
2457 }
2458
2459 // This selector was found elsewhere in our image. If this reference already points to the same
2460 // selector string as we found before (and it should!) then we have nothing to do. Otherwise we
2461 // need to add a fixup here to make sure we point to our chosen definition.
2462 uint32_t imageOffset = (uint32_t)(selectorStringVMAddr - loadAddress);
2463 if ( imageOffset == (*image.methodNameVMOffset + itAndInserted.first->second.imageOffset) )
2464 return;
2465
2466 ObjCOptimizerImage::SelectorFixup fixup;
2467 fixup.isSharedCache = false;
2468 fixup.fixupVMOffset = (uint32_t)selectorUseImageOffset;
2469 fixup.image.selectorString = selectorString;
2470
2471 //printf("Overriding fixup at 0x%08llX to '%s' offset 0x%08llX\n", selectorUseImageOffset, findLoadedImage(target.image.imageNum).path(), target.image.offset);
2472 image.selectorFixups.push_back(fixup);
2473 };
2474
2475 auto visitMethod = ^(uint64_t methodVMAddr, const dyld3::MachOAnalyzer::ObjCMethod& method) {
2476 visitReferenceToObjCSelector(method.nameVMAddr, method.nameLocationVMAddr);
2477 };
2478
2479 auto visitClass = ^(Diagnostics& diag, uint64_t classVMAddr,
2480 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2481 const dyld3::MachOAnalyzer::ObjCClassInfo& objcClass, bool isMetaClass) {
2482 ma->forEachObjCMethod(objcClass.baseMethodsVMAddr(pointerSize), li.contentRebased,
2483 visitMethod);
2484 };
2485
2486 auto visitCategory = ^(Diagnostics& diag, uint64_t categoryVMAddr,
2487 const dyld3::MachOAnalyzer::ObjCCategory& objcCategory) {
2488 ma->forEachObjCMethod(objcCategory.instanceMethodsVMAddr, li.contentRebased,
2489 visitMethod);
2490 ma->forEachObjCMethod(objcCategory.classMethodsVMAddr, li.contentRebased,
2491 visitMethod);
2492 };
2493 auto visitProtocol = ^(Diagnostics& diag, uint64_t protocolVMAddr,
2494 const dyld3::MachOAnalyzer::ObjCProtocol& objCProtocol) {
2495 ma->forEachObjCMethod(objCProtocol.instanceMethodsVMAddr, li.contentRebased,
2496 visitMethod);
2497 ma->forEachObjCMethod(objCProtocol.classMethodsVMAddr, li.contentRebased,
2498 visitMethod);
2499 ma->forEachObjCMethod(objCProtocol.optionalInstanceMethodsVMAddr, li.contentRebased,
2500 visitMethod);
2501 ma->forEachObjCMethod(objCProtocol.optionalClassMethodsVMAddr, li.contentRebased,
2502 visitMethod);
2503 };
2504
2505 // Walk the class list
2506 ma->forEachObjCClass(image.diag, li.contentRebased, visitClass);
2507 if (image.diag.hasError())
2508 return;
2509
2510 // Walk the category list
2511 ma->forEachObjCCategory(image.diag, li.contentRebased, visitCategory);
2512 if (image.diag.hasError())
2513 return;
2514
2515 // Walk the protocol list
2516 ma->forEachObjCProtocol(image.diag, li.contentRebased, visitProtocol);
2517 if (image.diag.hasError())
2518 return;
2519
2520 // Visit the selector refs
2521 ma->forEachObjCSelectorReference(image.diag, li.contentRebased, ^(uint64_t selRefVMAddr, uint64_t selRefTargetVMAddr) {
2522 visitReferenceToObjCSelector(selRefTargetVMAddr, selRefVMAddr);
2523 });
2524 if (image.diag.hasError())
2525 return;
2526
2527 // Visit the message refs
2528 // Note this isn't actually supported in libobjc any more. Its logic for deciding whether to support it is if this is true:
2529 // #if (defined(__x86_64__) && (TARGET_OS_OSX || TARGET_OS_SIMULATOR))
2530 // So to keep it simple, lets only do this walk if we are x86_64
2531 if ( ma->isArch("x86_64") || ma->isArch("x86_64h") ) {
2532 if (ma->hasObjCMessageReferences()) {
2533 image.diag.error("Cannot handle message refs");
2534 return;
2535 }
2536 }
2537 }
2538
2539 static const dyld3::MachOAnalyzer* getMachHeaderFromObjCHeaderInfo(const void* opaqueHeaderInfo, uint32_t pointerSize) {
2540 if (pointerSize == 8) {
2541 typedef int64_t PtrTy;
2542 struct HeaderInfo {
2543 PtrTy mhdr_offset; // offset to mach_header_64
2544 PtrTy info_offset; // offset to objc_image_info *
2545 };
2546 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2547 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2548 } else {
2549 typedef int32_t PtrTy;
2550 struct HeaderInfo {
2551 PtrTy mhdr_offset; // offset to mach_header
2552 PtrTy info_offset; // offset to objc_image_info *
2553 };
2554 const HeaderInfo* headerInfo = (const HeaderInfo*)opaqueHeaderInfo;
2555 return (const dyld3::MachOAnalyzer*)(((const uint8_t*)&headerInfo->mhdr_offset) + headerInfo->mhdr_offset);
2556 }
2557 }
2558
2559 void ClosureBuilder::addDuplicateObjCClassWarning(const char* className,
2560 const char* duplicateDefinitionPath,
2561 const char* canonicalDefinitionPath)
2562 {
2563 if ( _objcDuplicateClassWarnings == nullptr )
2564 _objcDuplicateClassWarnings = PathPool::allocate();
2565 // Use a diagnostic to give us a buffer we can safely print to
2566 Diagnostics diag;
2567 diag.error("Class %s is implemented in both %s and %s. One of the two will be used. Which one is undefined.",
2568 className, canonicalDefinitionPath, duplicateDefinitionPath);
2569 #if BUILDING_CACHE_BUILDER
2570 _objcDuplicateClassWarnings->add(diag.errorMessage().c_str());
2571 #else
2572 _objcDuplicateClassWarnings->add(diag.errorMessage());
2573 #endif
2574 }
2575
2576 void ClosureBuilder::optimizeObjCClasses(const objc_opt::objc_clsopt_t* objcClassOpt,
2577 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2578 const Map<const char*, dyld3::closure::Image::ObjCDuplicateClass, HashCString, EqualCString>& duplicateSharedCacheClasses,
2579 ObjCOptimizerImage& image) {
2580
2581 BuilderLoadedImage& li = *image.loadedImage;
2582 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenClasses = image.seenClasses;
2583
2584 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2585 const uint32_t pointerSize = ma->pointerSize();
2586 const uint64_t loadAddress = ma->preferredLoadAddress();
2587
2588 // Keep track of any missing weak imports so that we can tell if the superclasses are nil
2589 // This is necessary as the shared cache will be marked with 'no missing weak superclasses'
2590 // and so we need to continue to satisfy that constraint
2591 __block Map<uint64_t, bool, HashUInt64, EqualUInt64> missingWeakImportOffets;
2592 if (li.hasMissingWeakImports) {
2593 if (ma->hasChainedFixups()) {
2594 const Image* closureImage = image.writer->currentImage();
2595
2596 const Array<Image::ResolvedSymbolTarget> targets = closureImage->chainedTargets();
2597 if ( !targets.empty() ) {
2598 ma->withChainStarts(_diag, closureImage->chainedStartsOffset(), ^(const dyld_chained_starts_in_image* startsInfo) {
2599 ma->forEachFixupInAllChains(_diag, startsInfo, false, ^(MachOLoaded::ChainedFixupPointerOnDisk* fixupLoc,
2600 const dyld_chained_starts_in_segment* segInfo, bool& fixupsStop) {
2601 uint64_t fixupOffset = (uint8_t*)fixupLoc - (uint8_t*)ma;
2602 uint32_t bindOrdinal;
2603 if ( fixupLoc->isBind(segInfo->pointer_format, bindOrdinal) ) {
2604 if ( bindOrdinal < targets.count() ) {
2605 const Image::ResolvedSymbolTarget& target = targets[bindOrdinal];
2606 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2607 missingWeakImportOffets[fixupOffset] = true;
2608 }
2609 else {
2610 image.diag.error("out of range bind ordinal %d (max %lu)", bindOrdinal, targets.count());
2611 fixupsStop = true;
2612 }
2613 }
2614 });
2615 });
2616 if (image.diag.hasError())
2617 return;
2618 }
2619 } else {
2620 forEachBind(li, ^(uint64_t runtimeOffset, Image::ResolvedSymbolTarget target, const ResolvedTargetInfo& targetInfo, bool& stop) {
2621 if ( (target.absolute.kind == Image::ResolvedSymbolTarget::kindAbsolute) && (target.absolute.value == 0) )
2622 missingWeakImportOffets[runtimeOffset] = true;
2623 }, ^(const char *strongSymbolName) {
2624 }, ^() { });
2625 }
2626 }
2627
2628 // Class names and data may be in different sections depending on swift vs objc so handle multiple sections
2629 __block MachOAnalyzer::SectionCache classNameSectionCache(ma);
2630 __block MachOAnalyzer::SectionCache classSectionCache(ma);
2631
2632 ma->forEachObjCClass(image.diag, li.contentRebased, ^(Diagnostics &diag, uint64_t classVMAddr,
2633 uint64_t classSuperclassVMAddr, uint64_t classDataVMAddr,
2634 const MachOAnalyzer::ObjCClassInfo &objcClass, bool isMetaClass) {
2635 if (isMetaClass) return;
2636
2637 // Make sure the superclass pointer is not nil
2638 uint64_t superclassRuntimeOffset = classSuperclassVMAddr - loadAddress;
2639 if (missingWeakImportOffets.find(superclassRuntimeOffset) != missingWeakImportOffets.end()) {
2640 diag.error("Missing weak superclass");
2641 return;
2642 }
2643
2644 // Does this class need to be fixed up for stable Swift ABI.
2645 // Note the order matches the objc runtime in that we always do this fix before checking for dupes,
2646 // but after excluding classes with missing weak superclasses.
2647 if (objcClass.isUnfixedBackwardDeployingStableSwift()) {
2648 // Class really is stable Swift, pretending to be pre-stable.
2649 // Fix its lie. This involves fixing the FAST bits on the class data value, so record that vmaddr
2650 image.classStableSwiftFixups.push_back(classDataVMAddr - loadAddress);
2651 }
2652
2653 // Get the section for the name
2654 const char* className = nullptr;
2655 MachOAnalyzer::PrintableStringResult classNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2656 __block uint64_t classNameSectionStartVMAddr = 0;
2657 auto classNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2658 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2659 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2660 return false;
2661 }
2662
2663 // We use 32-bit offsets so make sure the section is no larger than that.
2664 uint64_t classNameVMOffset = sectInfo.sectAddr - loadAddress;
2665 if (classNameVMOffset >= (1ULL << 32)) {
2666 return false;
2667 }
2668
2669 classNameSectionStartVMAddr = sectInfo.sectAddr;
2670 return true;
2671 };
2672 uint64_t classNameVMAddr = objcClass.nameVMAddr(pointerSize);
2673 className = ma->getPrintableString(classNameVMAddr, classNameResult,
2674 &classNameSectionCache, classNameSectionHandler);
2675
2676 if ( classNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2677 diag.error("Invalid class name for objc optimisation");
2678 return;
2679 }
2680
2681 // If the class also exists in a shared cache image which is loaded, then objc
2682 // would have found that one, regardless of load order. So we can just skip this one.
2683 {
2684 void *cls;
2685 void *hi;
2686 uint32_t index;
2687 uint32_t count = objcClassOpt->getClassHeaderAndIndex(className, cls, hi, index);
2688 if (count == 1) {
2689 // exactly one matching class. Check if its loaded
2690 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hi, pointerSize);
2691 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2692 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2693
2694 // We have a duplicate class, so check if we've already got it in our map.
2695 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2696 // We haven't seen this one yet
2697 Image::ObjCDuplicateClass duplicateClass;
2698 duplicateClass.sharedCacheClassOptIndex = index;
2699 duplicateClass.sharedCacheClassDuplicateIndex = 0;
2700 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2701 }
2702 }
2703 }
2704 else if (count > 1) {
2705 // more than one matching class - find one that is loaded
2706 void *clslist[count];
2707 void *hilist[count];
2708 objcClassOpt->getClassesAndHeaders(className, clslist, hilist);
2709 for (uint32_t i = 0; i < count; i++) {
2710 const dyld3::MachOAnalyzer* sharedCacheMA = getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize);
2711 if (sharedCacheImagesMap.find(sharedCacheMA) != sharedCacheImagesMap.end()) {
2712 addDuplicateObjCClassWarning(className, li.path(), sharedCacheMA->installName());
2713
2714 // We have a duplicate class, so check if we've already got it in our map.
2715 if ( duplicateSharedCacheClasses.find(className) == duplicateSharedCacheClasses.end() ) {
2716 // We haven't seen this one yet
2717 Image::ObjCDuplicateClass duplicateClass;
2718 duplicateClass.sharedCacheClassOptIndex = index;
2719 duplicateClass.sharedCacheClassDuplicateIndex = i;
2720 image.classSharedCacheDuplicates.insert({ className, duplicateClass });
2721 }
2722
2723 break;
2724 }
2725 }
2726 }
2727 }
2728
2729 // Get the section for the class itself
2730 __block uint64_t classSectionStartVMAddr = 0;
2731 auto classSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2732 // We only have 23-bits in ObjCClassImageOffset to index in to the classes
2733 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2734 return false;
2735 }
2736
2737 // We use 32-bit offsets so make sure the section is no larger than that.
2738 uint64_t classDatasVMOffset = sectInfo.sectAddr - loadAddress;
2739 if (classDatasVMOffset >= (1ULL << 32)) {
2740 return false;
2741 }
2742
2743 classSectionStartVMAddr = sectInfo.sectAddr;
2744 return true;
2745 };
2746 if (!classSectionCache.findSectionForVMAddr(classVMAddr, classSectionHandler)) {
2747 diag.error("Invalid class for objc optimisation");
2748 return;
2749 }
2750
2751 // Make sure we have an entry for our images offsets for later
2752 uint64_t classNameSectionVMOffset = classNameSectionStartVMAddr - loadAddress;
2753 uint64_t classSectionVMOffset = classSectionStartVMAddr - loadAddress;
2754 uint64_t hashTableVMOffsetsIndex = 0;
2755 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2756 if ( (nameAndDataVMOffset.first == classNameSectionVMOffset) && (nameAndDataVMOffset.second == classSectionVMOffset) )
2757 break;
2758 ++hashTableVMOffsetsIndex;
2759 }
2760
2761 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2762 // Didn't find an image entry with this offset. Add one if we have space
2763 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2764 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2765 // No more space. We need to give up
2766 diag.error("No more space for class hash table image");
2767 return;
2768 }
2769 image.classesNameAndDataVMOffsets.push_back({ classNameSectionVMOffset, classSectionVMOffset });
2770 }
2771
2772 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2773
2774 uint64_t classNameOffset = classNameVMAddr - classNameSectionStartVMAddr;
2775 uint64_t classDataOffset = classVMAddr - classSectionStartVMAddr;
2776
2777 closure::Image::ObjCClassNameImageOffset classNameTarget;
2778 classNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2779 classNameTarget.classNameImageOffset = (uint32_t)classNameOffset;
2780
2781 dyld3::closure::Image::ObjCClassImageOffset classDataTarget;
2782 classDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2783 classDataTarget.classData.imageOffset = (uint32_t)classDataOffset;
2784 classDataTarget.classData.isDuplicate = 0;
2785
2786 seenClasses.push_back({ classNameTarget, classDataTarget });
2787 });
2788 }
2789
2790 void ClosureBuilder::optimizeObjCProtocols(const objc_opt::objc_protocolopt2_t* objcProtocolOpt,
2791 const Map<const dyld3::MachOAnalyzer*, bool, HashPointer, EqualPointer>& sharedCacheImagesMap,
2792 ObjCOptimizerImage& image) {
2793
2794 BuilderLoadedImage& li = *image.loadedImage;
2795 OverflowSafeArray<ObjCOptimizerImage::SeenClass>& seenProtocols = image.seenProtocols;
2796
2797 const dyld3::MachOAnalyzer* ma = li.loadAddress();
2798 const uint32_t pointerSize = ma->pointerSize();
2799 const uint64_t loadAddress = ma->preferredLoadAddress();
2800
2801 // Protocol names and data may be in different sections depending on swift vs objc so handle multiple sections
2802 __block MachOAnalyzer::SectionCache protocolNameSectionCache(ma);
2803 __block MachOAnalyzer::SectionCache protocolSectionCache(ma);
2804
2805 ma->forEachObjCProtocol(image.diag, li.contentRebased, ^(Diagnostics &diag, uint64_t protocolVMAddr,
2806 const dyld3::MachOAnalyzer::ObjCProtocol &objCProtocol) {
2807 if ( objCProtocol.requiresObjCReallocation ) {
2808 // We can't optimize this protocol as the runtime needs all fields to be present
2809 diag.error("Protocol is too small to be optimized");
2810 return;
2811 }
2812 if ( objCProtocol.isaVMAddr != 0 ) {
2813 // We can't optimize this protocol if it has an ISA as we want to override it
2814 diag.error("Protocol ISA cannot be non-zero");
2815 return;
2816 }
2817
2818 // Get the section for the name
2819 const char* protocolName = nullptr;
2820 MachOAnalyzer::PrintableStringResult protocolNameResult = MachOAnalyzer::PrintableStringResult::UnknownSection;
2821 __block uint64_t protocolNameSectionStartVMAddr = 0;
2822 auto protocolNameSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2823 // We only have 24-bits in ObjCClassNameImageOffset to index in to the strings
2824 if (sectInfo.sectSize >= Image::ObjCClassNameImageOffset::maximumOffset) {
2825 return false;
2826 }
2827
2828 // We use 32-bit offsets so make sure the section is no larger than that.
2829 uint64_t protocolNameVMOffset = sectInfo.sectAddr - loadAddress;
2830 if (protocolNameVMOffset >= (1ULL << 32)) {
2831 return false;
2832 }
2833
2834 protocolNameSectionStartVMAddr = sectInfo.sectAddr;
2835 return true;
2836 };
2837 uint64_t protocolNameVMAddr = objCProtocol.nameVMAddr;
2838 protocolName = ma->getPrintableString(protocolNameVMAddr, protocolNameResult,
2839 &protocolNameSectionCache, protocolNameSectionHandler);
2840
2841 if ( protocolNameResult != MachOAnalyzer::PrintableStringResult::CanPrint ) {
2842 diag.error("Invalid protocol name for objc optimisation");
2843 return;
2844 }
2845
2846 // If the protocol also exists in a shared cache image which is loaded, then objc
2847 // would have found that one, regardless of load order. So we can just skip this one.
2848 {
2849 void *cls;
2850 void *hi;
2851 uint32_t count = objcProtocolOpt->getClassAndHeader(protocolName, cls, hi);
2852 if (count == 1) {
2853 // exactly one matching protocol. Check if its loaded
2854 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hi, pointerSize)) != sharedCacheImagesMap.end())
2855 return;
2856 }
2857 else if (count > 1) {
2858 // more than one matching protocol - find one that is loaded
2859 void *clslist[count];
2860 void *hilist[count];
2861 objcProtocolOpt->getClassesAndHeaders(protocolName, clslist, hilist);
2862 for (uint32_t i = 0; i < count; i++) {
2863 if (sharedCacheImagesMap.find(getMachHeaderFromObjCHeaderInfo(hilist[i], pointerSize)) != sharedCacheImagesMap.end())
2864 return;
2865 }
2866 }
2867 }
2868
2869 // Get the section for the protocol itself
2870 __block uint64_t protocolSectionStartVMAddr = 0;
2871 auto protocolSectionHandler = ^bool(const MachOAnalyzer::SectionInfo& sectInfo) {
2872 // We only have 23-bits in ObjCClassImageOffset to index in to the protocols
2873 if (sectInfo.sectSize > Image::ObjCClassImageOffset::maximumOffset) {
2874 return false;
2875 }
2876
2877 // We use 32-bit offsets so make sure the section is no larger than that.
2878 uint64_t protocolDatasVMOffset = sectInfo.sectAddr - loadAddress;
2879 if (protocolDatasVMOffset >= (1ULL << 32)) {
2880 return false;
2881 }
2882
2883 protocolSectionStartVMAddr = sectInfo.sectAddr;
2884 return true;
2885 };
2886 if (!protocolSectionCache.findSectionForVMAddr(protocolVMAddr, protocolSectionHandler)) {
2887 diag.error("Invalid protocol for objc optimisation");
2888 return;
2889 }
2890
2891 // Make sure we have an entry for our images offsets for later
2892 uint64_t protocolNameSectionVMOffset = protocolNameSectionStartVMAddr - loadAddress;
2893 uint64_t protocolSectionVMOffset = protocolSectionStartVMAddr - loadAddress;
2894 uint64_t hashTableVMOffsetsIndex = 0;
2895 for (auto nameAndDataVMOffset : image.classesNameAndDataVMOffsets) {
2896 if ( (nameAndDataVMOffset.first == protocolNameSectionVMOffset) && (nameAndDataVMOffset.second == protocolSectionVMOffset) )
2897 break;
2898 ++hashTableVMOffsetsIndex;
2899 }
2900
2901 if (hashTableVMOffsetsIndex == image.classesNameAndDataVMOffsets.count()) {
2902 // Didn't find an image entry with this offset. Add one if we have space
2903 uint64_t totalHashTableImages = image.classesNameAndDataVMOffsets.count() + _objcClassesHashTableImages.count();
2904 if ( totalHashTableImages == Image::ObjCClassNameImageOffset::maximumImageIndex ) {
2905 // No more space. We need to give up
2906 diag.error("No more space for protocol hash table image");
2907 return;
2908 }
2909 image.classesNameAndDataVMOffsets.push_back({ protocolNameSectionVMOffset, protocolSectionVMOffset });
2910 }
2911
2912 hashTableVMOffsetsIndex += _objcClassesHashTableImages.count();
2913
2914 uint64_t protocolNameOffset = protocolNameVMAddr - protocolNameSectionStartVMAddr;
2915 uint64_t protocolDataOffset = protocolVMAddr - protocolSectionStartVMAddr;
2916
2917 closure::Image::ObjCClassNameImageOffset protocolNameTarget;
2918 protocolNameTarget.classNameImageIndex = (uint32_t)hashTableVMOffsetsIndex;
2919 protocolNameTarget.classNameImageOffset = (uint32_t)protocolNameOffset;
2920
2921 dyld3::closure::Image::ObjCClassImageOffset protocolDataTarget;
2922 protocolDataTarget.classData.imageIndex = (uint32_t)hashTableVMOffsetsIndex;
2923 protocolDataTarget.classData.imageOffset = (uint32_t)protocolDataOffset;
2924 protocolDataTarget.classData.isDuplicate = 0;
2925
2926 seenProtocols.push_back({ protocolNameTarget, protocolDataTarget });
2927 });
2928 }
2929
2930 // used at launch by dyld when kernel has already mapped main executable
2931 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const LoadedFileInfo& fileInfo, bool allowInsertFailures)
2932 {
2933 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
2934 const mach_header* mainMH = (const mach_header*)fileInfo.fileContent;
2935 // set up stack based storage for all arrays
2936 BuilderLoadedImage loadImagesStorage[512];
2937 Image::LinkedImage dependenciesStorage[512*8];
2938 InterposingTuple tuplesStorage[64];
2939 Closure::PatchEntry cachePatchStorage[64];
2940 const char* weakDefNameStorage[64];
2941 _loadedImages.setInitialStorage(loadImagesStorage, 512);
2942 _dependencies.setInitialStorage(dependenciesStorage, 512*8);
2943 _interposingTuples.setInitialStorage(tuplesStorage, 64);
2944 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
2945 _weakDefsFromChainedBinds.setInitialStorage(weakDefNameStorage, 64);
2946 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
2947
2948 const MachOAnalyzer* mainExecutable = MachOAnalyzer::validMainExecutable(_diag, mainMH, fileInfo.path, fileInfo.sliceLen, _archs, _platform);
2949 if ( mainExecutable == nullptr )
2950 return nullptr;
2951 if ( !mainExecutable->isDynamicExecutable() ) {
2952 _diag.error("not a main executable");
2953 return nullptr;
2954 }
2955 _isLaunchClosure = true;
2956 _allowMissingLazies = true;
2957
2958 _nextIndex = 0;
2959
2960 // add main executable
2961 __block BuilderLoadedImage mainEntry;
2962 mainEntry.loadedFileInfo = fileInfo;
2963 mainEntry.imageNum = 0; // We can't fill this in until we've done inserted dylibs
2964 mainEntry.unmapWhenDone = false;
2965 mainEntry.contentRebased = false;
2966 mainEntry.hasInits = false;
2967 mainEntry.markNeverUnload = true;
2968 mainEntry.rtldLocal = false;
2969 mainEntry.isBadImage = false;
2970 mainEntry.mustBuildClosure = true;
2971 mainEntry.hasMissingWeakImports = false;
2972 mainEntry.overrideImageNum = 0;
2973
2974 // Set the executable load path so that @executable_path can use it later
2975 _mainProgLoadPath = fileInfo.path;
2976
2977 // add any DYLD_INSERT_LIBRARIES
2978 _pathOverrides.forEachInsertedDylib(^(const char* dylibPath, bool &stop) {
2979 LoadedImageChain chainMain = { nullptr, mainEntry };
2980 BuilderLoadedImage* foundTopImage;
2981 if ( !findImage(dylibPath, chainMain, foundTopImage, LinkageType::kInserted, 0, true) ) {
2982 if ( !allowInsertFailures ) {
2983 if ( _diag.noError() )
2984 _diag.error("could not load inserted dylib %s", dylibPath);
2985 stop = true;
2986 return;
2987 }
2988 _diag.clearError(); // FIXME add way to plumb back warning
2989 }
2990 });
2991
2992 if ( _diag.hasError() )
2993 return nullptr;
2994
2995 _mainProgLoadIndex = (uint32_t)_loadedImages.count();
2996 mainEntry.imageNum = _startImageNum + _nextIndex++;
2997 _loadedImages.push_back(mainEntry);
2998
2999 // get mach_headers for all images needed to launch this main executable
3000 LoadedImageChain chainStart = { nullptr, _loadedImages[_mainProgLoadIndex] };
3001 recursiveLoadDependents(chainStart);
3002 if ( _diag.hasError() )
3003 return nullptr;
3004 for (uint32_t i=0; i < _mainProgLoadIndex; ++i) {
3005 LoadedImageChain insertChainStart = { nullptr, _loadedImages[i] };
3006 recursiveLoadDependents(insertChainStart);
3007 if ( _diag.hasError() )
3008 return nullptr;
3009 }
3010 loadDanglingUpwardLinks();
3011
3012 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3013 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3014 invalidateInitializerRoots();
3015
3016 // now that everything loaded, set _libDyldImageNum and _libSystemImageNum
3017 for (BuilderLoadedImage& li : _loadedImages) {
3018 if ( li.loadAddress()->isDylib() && (strcmp(li.loadAddress()->installName(), "/usr/lib/system/libdyld.dylib") == 0) )
3019 _libDyldImageNum = li.imageNum;
3020 else if ( strcmp(li.path(), "/usr/lib/libSystem.B.dylib") == 0 )
3021 _libSystemImageNum = li.imageNum;
3022 }
3023
3024 // only some images need to go into closure (non-rooted ones from dyld cache do not)
3025 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3026 for (BuilderLoadedImage& li : _loadedImages) {
3027 if ( li.mustBuildClosure ) {
3028 writers.push_back(ImageWriter());
3029 buildImage(writers.back(), li);
3030 if ( _diag.hasError() )
3031 return nullptr;
3032 }
3033 }
3034
3035 bool optimizedObjC = optimizeObjC(writers);
3036
3037 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3038 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3039 BuilderLoadedImage& li = _loadedImages[imageIndex];
3040 if ( li.mustBuildClosure ) {
3041 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3042 writerIndex++;
3043 }
3044 }
3045
3046 // combine all Image objects into one ImageArray
3047 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3048 for (ImageWriter& writer : writers) {
3049 imageArrayWriter.appendImage(writer.finalize());
3050 writer.deallocate();
3051 }
3052 const ImageArray* imageArray = imageArrayWriter.finalize();
3053
3054 // merge ImageArray object into LaunchClosure object
3055 __block LaunchClosureWriter closureWriter(imageArray);
3056
3057 if (optimizedObjC) {
3058 if (!_objcSelectorsHashTable.empty())
3059 closureWriter.setObjCSelectorInfo(_objcSelectorsHashTable, _objcSelectorsHashTableImages);
3060
3061 if (!_objcClassesHashTableImages.empty()) {
3062 closureWriter.setObjCClassAndProtocolInfo(_objcClassesHashTable, _objcProtocolsHashTable,
3063 _objcClassesHashTableImages);
3064 }
3065
3066 if ( _objcDuplicateClassWarnings != nullptr ) {
3067 _objcDuplicateClassWarnings->forEachPath(^(const char* warning) {
3068 closureWriter.addWarning(Closure::Warning::duplicateObjCClass, warning);
3069 });
3070 }
3071
3072 if (!_objcClassesDuplicatesHashTable.empty())
3073 closureWriter.setObjCDuplicateClassesInfo(_objcClassesDuplicatesHashTable);
3074 }
3075
3076 // record shared cache info
3077 if ( _dyldCache != nullptr ) {
3078 // record cache UUID
3079 uuid_t cacheUUID;
3080 _dyldCache->getUUID(cacheUUID);
3081 closureWriter.setDyldCacheUUID(cacheUUID);
3082
3083 // record any cache patching needed because of dylib overriding cache
3084 for (const BuilderLoadedImage& li : _loadedImages) {
3085 if ( li.overrideImageNum != 0 ) {
3086 uint32_t imageIndex = li.overrideImageNum - (uint32_t)_dyldImageArray->startImageNum();
3087 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3088 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3089 return (const MachOLoaded*)findDependent(mh, depIndex);
3090 };
3091 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3092 _dyldCache->forEachPatchableExport(imageIndex, ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3093 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3094 Diagnostics patchDiag;
3095 Closure::PatchEntry patch;
3096 patch.overriddenDylibInCache = li.overrideImageNum;
3097 patch.exportCacheOffset = cacheOffsetOfImpl;
3098 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3099 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3100 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3101 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3102 patch.replacement.image.offset = foundInfo.value;
3103 }
3104 else {
3105 // this means the symbol is missing in the cache override dylib, so set any uses to NULL
3106 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3107 patch.replacement.absolute.value = 0;
3108 }
3109 patches.push_back(patch);
3110 });
3111 closureWriter.addCachePatches(patches);
3112 }
3113 }
3114
3115 // handle any extra weak-def coalescing needed by chained fixups
3116 if ( !_weakDefsFromChainedBinds.empty() ) {
3117 for (const char* symbolName : _weakDefsFromChainedBinds) {
3118 Image::ResolvedSymbolTarget cacheOverrideTarget;
3119 bool haveCacheOverride = false;
3120 bool foundCachOverrideIsWeakDef = false;
3121 for (const BuilderLoadedImage& li : _loadedImages) {
3122 if ( !li.loadAddress()->hasWeakDefs() )
3123 continue;
3124 Image::ResolvedSymbolTarget target;
3125 ResolvedTargetInfo targetInfo;
3126 if ( findSymbolInImage(li.loadAddress(), symbolName, 0, false, false, target, targetInfo) ) {
3127 if ( li.loadAddress()->inDyldCache() ) {
3128 if ( haveCacheOverride ) {
3129 Closure::PatchEntry patch;
3130 patch.exportCacheOffset = (uint32_t)target.sharedCache.offset;
3131 patch.overriddenDylibInCache = li.imageNum;
3132 patch.replacement = cacheOverrideTarget;
3133 _weakDefCacheOverrides.push_back(patch);
3134 }
3135 else {
3136 // found first in cached dylib, so no need to patch cache for this symbol
3137 break;
3138 }
3139 }
3140 else {
3141 // found image that exports this symbol and is not in cache
3142 if ( !haveCacheOverride || (foundCachOverrideIsWeakDef && !targetInfo.isWeakDef) ) {
3143 // update cache to use this symbol if it if first found or it is first non-weak found
3144 cacheOverrideTarget = target;
3145 foundCachOverrideIsWeakDef = targetInfo.isWeakDef;
3146 haveCacheOverride = true;
3147 }
3148 }
3149 }
3150 }
3151 }
3152 }
3153
3154 // record any cache patching needed because weak-def C++ symbols override dyld cache
3155 if ( !_weakDefCacheOverrides.empty() )
3156 closureWriter.addCachePatches(_weakDefCacheOverrides);
3157
3158 }
3159
3160 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3161 // if closure is built on-device for iOS, then record boot UUID
3162 char bootSessionUUID[256] = { 0 };
3163 size_t bootSize = sizeof(bootSessionUUID);
3164 if ( sysctlbyname("kern.bootsessionuuid", bootSessionUUID, &bootSize, NULL, 0) == 0 )
3165 closureWriter.setBootUUID(bootSessionUUID);
3166 #endif
3167
3168 // record any interposing info
3169 imageArray->forEachImage(^(const Image* image, bool &stop) {
3170 if ( !image->inDyldCache() )
3171 addInterposingTuples(closureWriter, image, findLoadedImage(image->imageNum()).loadAddress());
3172 });
3173
3174 // modify fixups in contained Images by applying interposing tuples
3175 closureWriter.applyInterposing((const LaunchClosure*)closureWriter.currentTypedBytes());
3176
3177 // set flags
3178 closureWriter.setUsedAtPaths(_atPathUsed);
3179 closureWriter.setUsedFallbackPaths(_fallbackPathUsed);
3180 closureWriter.setHasInsertedLibraries(_mainProgLoadIndex > 0);
3181 closureWriter.setInitImageCount((uint32_t)_loadedImages.count());
3182
3183 // add other closure attributes
3184 addClosureInfo(closureWriter);
3185
3186 // make result
3187 const LaunchClosure* result = closureWriter.finalize();
3188 imageArrayWriter.deallocate();
3189
3190 timer.setData4(dyld3::DyldTimingBuildClosure::LaunchClosure_Built);
3191
3192 return result;
3193 }
3194
3195 // used by libdyld for dlopen()
3196 const DlopenClosure* ClosureBuilder::makeDlopenClosure(const char* path, const LaunchClosure* mainClosure, const Array<LoadedImage>& alreadyLoadedList,
3197 closure::ImageNum callerImageNum, bool noLoad, bool forceBindLazies, bool canUseSharedCacheClosure, closure::ImageNum* topImageNum)
3198 {
3199 dyld3::ScopedTimer timer(DBG_DYLD_TIMING_BUILD_CLOSURE, 0, 0, 0);
3200 // set up stack based storage for all arrays
3201 BuilderLoadedImage loadImagesStorage[300];
3202 Image::LinkedImage dependenciesStorage[128];
3203 Closure::PatchEntry cachePatchStorage[64];
3204 _loadedImages.setInitialStorage(loadImagesStorage, 300);
3205 _dependencies.setInitialStorage(dependenciesStorage, 128);
3206 _weakDefCacheOverrides.setInitialStorage(cachePatchStorage, 64);
3207 ArrayFinalizer<BuilderLoadedImage> scopedCleanup(_loadedImages, ^(BuilderLoadedImage& li) { if (li.unmapWhenDone) {_fileSystem.unloadFile(li.loadedFileInfo); li.unmapWhenDone=false;} });
3208
3209 // fill in builder array from already loaded images
3210 bool cachedDylibsExpectedOnDisk = _dyldCache ? _dyldCache->header.dylibsExpectedOnDisk : true;
3211 uintptr_t callerImageIndex = UINTPTR_MAX;
3212 for (const LoadedImage& ali : alreadyLoadedList) {
3213 const Image* image = ali.image();
3214 const MachOAnalyzer* ma = (MachOAnalyzer*)(ali.loadedAddress());
3215 bool inDyldCache = ma->inDyldCache();
3216 BuilderLoadedImage entry;
3217 ImageNum overrideImageNum;
3218 entry.loadedFileInfo.path = image->path();
3219 entry.loadedFileInfo.fileContent = ma;
3220 entry.loadedFileInfo.sliceOffset = 0;
3221 entry.loadedFileInfo.inode = 0;
3222 entry.loadedFileInfo.mtime = 0;
3223 entry.imageNum = image->imageNum();
3224 entry.dependents = image->dependentsArray();
3225 entry.unmapWhenDone = false;
3226 entry.contentRebased = inDyldCache;
3227 entry.hasInits = false;
3228 entry.markNeverUnload = image->neverUnload();
3229 entry.rtldLocal = ali.hideFromFlatSearch();
3230 entry.isBadImage = false;
3231 entry.mustBuildClosure = false;
3232 entry.hasMissingWeakImports = false;
3233 entry.overrideImageNum = 0;
3234 if ( !inDyldCache && image->isOverrideOfDyldCacheImage(overrideImageNum) ) {
3235 entry.overrideImageNum = overrideImageNum;
3236 canUseSharedCacheClosure = false;
3237 }
3238 if ( !inDyldCache || cachedDylibsExpectedOnDisk )
3239 image->hasFileModTimeAndInode(entry.loadedFileInfo.inode, entry.loadedFileInfo.mtime);
3240 if ( entry.imageNum == callerImageNum )
3241 callerImageIndex = _loadedImages.count();
3242 _loadedImages.push_back(entry);
3243 }
3244 _alreadyInitedIndex = (uint32_t)_loadedImages.count();
3245
3246 // find main executable (may be needed for @executable_path)
3247 _isLaunchClosure = false;
3248 for (uint32_t i=0; i < alreadyLoadedList.count(); ++i) {
3249 if ( _loadedImages[i].loadAddress()->isMainExecutable() ) {
3250 _mainProgLoadIndex = i;
3251 _mainProgLoadPath = _loadedImages[i].path();
3252 break;
3253 }
3254 }
3255
3256 // We can't use an existing dlopen closure if the main closure had interposing tuples
3257 if (canUseSharedCacheClosure) {
3258 if (mainClosure->hasInterposings())
3259 canUseSharedCacheClosure = false;
3260 }
3261
3262 // add top level dylib being dlopen()ed
3263 BuilderLoadedImage* foundTopImage;
3264 _nextIndex = 0;
3265 // @rpath has caller's LC_PRATH, then main executable's LC_RPATH
3266 BuilderLoadedImage& callerImage = (callerImageIndex != UINTPTR_MAX) ? _loadedImages[callerImageIndex] : _loadedImages[_mainProgLoadIndex];
3267 LoadedImageChain chainCaller = { nullptr, callerImage };
3268 LoadedImageChain chainMain = { &chainCaller, _loadedImages[_mainProgLoadIndex] };
3269 if ( !findImage(path, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3270 // If we didn't find the image, it might be a symlink to something in the dyld cache that is not on disk
3271 if ( (_dyldCache != nullptr) && !_dyldCache->header.dylibsExpectedOnDisk ) {
3272 char resolvedPath[PATH_MAX];
3273 if ( _fileSystem.getRealPath(path, resolvedPath) ) {
3274 _diag.clearError();
3275 if ( !findImage(resolvedPath, chainMain, foundTopImage, LinkageType::kDynamic, 0, canUseSharedCacheClosure) ) {
3276 return nullptr;
3277 }
3278 } else {
3279 // We didn't find a new path from realpath
3280 return nullptr;
3281 }
3282 } else {
3283 // cached dylibs on disk, so don't call realpath() again, it would have been found first call to findImage()
3284 return nullptr;
3285 }
3286 }
3287
3288 // exit early in RTLD_NOLOAD mode
3289 if ( noLoad ) {
3290 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_NoLoad);
3291 // if no new images added to _loadedImages, then requested path was already loaded
3292 if ( (uint32_t)_loadedImages.count() == _alreadyInitedIndex )
3293 *topImageNum = foundTopImage->imageNum;
3294 else
3295 *topImageNum = 0;
3296 return nullptr;
3297 }
3298
3299 // fast path if roots are not allowed and target is in dyld cache or is other
3300 if ( (_dyldCache != nullptr) && (_dyldCache->header.cacheType == kDyldSharedCacheTypeProduction) ) {
3301 if ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) {
3302 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3303 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3304 else
3305 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3306 *topImageNum = foundTopImage->imageNum;
3307 return nullptr;
3308 }
3309 }
3310
3311 // recursive load dependents
3312 // @rpath for stuff top dylib depends on uses LC_RPATH from caller, main exe, and dylib being dlopen()ed
3313 LoadedImageChain chainTopDylib = { &chainMain, *foundTopImage };
3314 recursiveLoadDependents(chainTopDylib, canUseSharedCacheClosure);
3315 if ( _diag.hasError() )
3316 return nullptr;
3317 loadDanglingUpwardLinks(canUseSharedCacheClosure);
3318 if ( _diag.hasError() )
3319 return nullptr;
3320
3321 // RTLD_NOW means fail the dlopen() if a symbol cannot be bound
3322 _allowMissingLazies = !forceBindLazies;
3323
3324 // only some images need to go into closure (ones from dyld cache do not, unless the cache format changed)
3325 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3326 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3327 // If we have an on-disk image then we need all images which are dependent on the disk image to get a new
3328 // initializer order. Its not enough to just do the top level image as we may dlopen while in dlopen
3329 invalidateInitializerRoots();
3330
3331 for (uintptr_t loadedImageIndex = 0; loadedImageIndex != _loadedImages.count(); ++loadedImageIndex) {
3332 BuilderLoadedImage& li = _loadedImages[loadedImageIndex];
3333 if ( li.mustBuildClosure ) {
3334 writers.push_back(ImageWriter());
3335 buildImage(writers.back(), li);
3336 if ( _diag.hasError() )
3337 return nullptr;
3338 }
3339 }
3340
3341 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3342 for (uintptr_t imageIndex = 0, writerIndex = 0; imageIndex != _loadedImages.count(); ++imageIndex) {
3343 BuilderLoadedImage& li = _loadedImages[imageIndex];
3344 if ( li.mustBuildClosure ) {
3345 computeInitOrder(writers[writerIndex], (uint32_t)imageIndex);
3346 writerIndex++;
3347 }
3348 }
3349 }
3350 if ( _diag.hasError() )
3351 return nullptr;
3352
3353 // check if top image loaded is in shared cache along with everything it depends on
3354 *topImageNum = foundTopImage->imageNum;
3355 if ( _foundNonCachedImage || _foundDyldCacheRoots ) {
3356 if ( canUseSharedCacheClosure && ( foundTopImage->imageNum < closure::kFirstLaunchClosureImageNum ) ) {
3357 // We used a shared cache built closure, but now discovered roots. We need to try again
3358 topImageNum = 0;
3359 return sRetryDlopenClosure;
3360 }
3361 } else {
3362 if (foundTopImage->imageNum < closure::kLastDyldCacheImageNum)
3363 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheDylib);
3364 else
3365 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_UsedSharedCacheOther);
3366 return nullptr;
3367 }
3368
3369 // combine all Image objects into one ImageArray
3370 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3371 for (ImageWriter& writer : writers) {
3372 imageArrayWriter.appendImage(writer.finalize());
3373 writer.deallocate();
3374 }
3375 const ImageArray* imageArray = imageArrayWriter.finalize();
3376
3377 // merge ImageArray object into LaunchClosure object
3378 DlopenClosureWriter closureWriter(imageArray);
3379
3380 // add other closure attributes
3381 closureWriter.setTopImageNum(foundTopImage->imageNum);
3382
3383 // record any cache patching needed because of dylib overriding cache
3384 if ( _dyldCache != nullptr ) {
3385 for (const BuilderLoadedImage& li : _loadedImages) {
3386 if ( (li.overrideImageNum != 0) && (li.imageNum >= _startImageNum) ) {
3387 const Image* cacheImage = _dyldImageArray->imageForNum(li.overrideImageNum);
3388 uint32_t imageIndex = cacheImage->imageNum() - (uint32_t)_dyldCache->cachedDylibsImageArray()->startImageNum();
3389 STACK_ALLOC_ARRAY(Closure::PatchEntry, patches, _dyldCache->patchableExportCount(imageIndex));
3390 MachOLoaded::DependentToMachOLoaded reexportFinder = ^(const MachOLoaded* mh, uint32_t depIndex) {
3391 return (const MachOLoaded*)findDependent(mh, depIndex);
3392 };
3393 //fprintf(stderr, "'%s' overrides '%s'\n", li.loadedFileInfo.path, cacheImage->path());
3394 _dyldCache->forEachPatchableExport(imageIndex,
3395 ^(uint32_t cacheOffsetOfImpl, const char* symbolName) {
3396 dyld3::MachOAnalyzer::FoundSymbol foundInfo;
3397 Diagnostics patchDiag;
3398 Closure::PatchEntry patch;
3399 patch.overriddenDylibInCache = li.overrideImageNum;
3400 patch.exportCacheOffset = cacheOffsetOfImpl;
3401 if ( li.loadAddress()->findExportedSymbol(patchDiag, symbolName, false, foundInfo, reexportFinder) ) {
3402 const MachOAnalyzer* impDylib = (const MachOAnalyzer*)foundInfo.foundInDylib;
3403 patch.replacement.image.kind = Image::ResolvedSymbolTarget::kindImage;
3404 patch.replacement.image.imageNum = findLoadedImage(impDylib).imageNum;
3405 patch.replacement.image.offset = foundInfo.value;
3406 }
3407 else {
3408 patch.replacement.absolute.kind = Image::ResolvedSymbolTarget::kindAbsolute;
3409 patch.replacement.absolute.value = 0;
3410 }
3411 patches.push_back(patch);
3412 });
3413 closureWriter.addCachePatches(patches);
3414 }
3415 }
3416 }
3417
3418 // modify fixups in contained Images by applying interposing tuples
3419 closureWriter.applyInterposing(mainClosure);
3420
3421 // Dlopen's should never keep track of missing paths as we don't cache these closures.
3422 assert(_mustBeMissingPaths == nullptr);
3423
3424 // make final DlopenClosure object
3425 const DlopenClosure* result = closureWriter.finalize();
3426 imageArrayWriter.deallocate();
3427 timer.setData4(dyld3::DyldTimingBuildClosure::DlopenClosure_Built);
3428 return result;
3429 }
3430
3431
3432 // used by dyld_closure_util
3433 const LaunchClosure* ClosureBuilder::makeLaunchClosure(const char* mainPath, bool allowInsertFailures)
3434 {
3435 char realerPath[MAXPATHLEN];
3436 closure::LoadedFileInfo loadedFileInfo = MachOAnalyzer::load(_diag, _fileSystem, mainPath, _archs, _platform, realerPath);
3437 const MachOAnalyzer* mh = (const MachOAnalyzer*)loadedFileInfo.fileContent;
3438 loadedFileInfo.path = mainPath;
3439 if (_diag.hasError())
3440 return nullptr;
3441 if (mh == nullptr) {
3442 _diag.error("could not load file");
3443 return nullptr;
3444 }
3445 if (!mh->isDynamicExecutable()) {
3446 _diag.error("file is not an executable");
3447 return nullptr;
3448 }
3449 const_cast<PathOverrides*>(&_pathOverrides)->setMainExecutable(mh, mainPath);
3450 const LaunchClosure* launchClosure = makeLaunchClosure(loadedFileInfo, allowInsertFailures);
3451 loadedFileInfo.unload(loadedFileInfo);
3452 return launchClosure;
3453 }
3454
3455 void ClosureBuilder::setDyldCacheInvalidFormatVersion() {
3456 _dyldCacheInvalidFormatVersion = true;
3457 }
3458
3459
3460 // used by dyld shared cache builder
3461 const ImageArray* ClosureBuilder::makeDyldCacheImageArray(bool customerCache, const Array<CachedDylibInfo>& dylibs, const Array<CachedDylibAlias>& aliases)
3462 {
3463 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3464 // so set up storage for all arrays to be vm_allocated
3465 uintptr_t maxImageCount = dylibs.count() + 16;
3466 _loadedImages.reserve(maxImageCount);
3467 _dependencies.reserve(maxImageCount*16);
3468
3469 _makingDyldCacheImages = true;
3470 _allowMissingLazies = false;
3471 _makingCustomerCache = customerCache;
3472 _aliases = &aliases;
3473
3474 // build _loadedImages[] with every dylib in cache
3475 __block ImageNum imageNum = _startImageNum;
3476 for (const CachedDylibInfo& aDylibInfo : dylibs) {
3477 BuilderLoadedImage entry;
3478 entry.loadedFileInfo = aDylibInfo.fileInfo;
3479 entry.imageNum = imageNum++;
3480 entry.unmapWhenDone = false;
3481 entry.contentRebased = false;
3482 entry.hasInits = false;
3483 entry.markNeverUnload = true;
3484 entry.rtldLocal = false;
3485 entry.isBadImage = false;
3486 entry.mustBuildClosure = false;
3487 entry.hasMissingWeakImports = false;
3488 entry.overrideImageNum = 0;
3489 _loadedImages.push_back(entry);
3490 }
3491
3492 // wire up dependencies between cached dylibs
3493 for (BuilderLoadedImage& li : _loadedImages) {
3494 LoadedImageChain chainStart = { nullptr, li };
3495 recursiveLoadDependents(chainStart);
3496 if ( _diag.hasError() )
3497 break;
3498 }
3499 assert(_loadedImages.count() == dylibs.count());
3500
3501 // create an ImageWriter for each cached dylib
3502 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3503 for (BuilderLoadedImage& li : _loadedImages) {
3504 writers.push_back(ImageWriter());
3505 buildImage(writers.back(), li);
3506 }
3507
3508 // add initializer order into each dylib
3509 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3510 for (const BuilderLoadedImage& li : _loadedImages) {
3511 uint32_t index = li.imageNum - _startImageNum;
3512 computeInitOrder(writers[index], index);
3513 }
3514
3515 // combine all Image objects into one ImageArray
3516 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3517 for (ImageWriter& writer : writers) {
3518 imageArrayWriter.appendImage(writer.finalize());
3519 writer.deallocate();
3520 }
3521 const ImageArray* imageArray = imageArrayWriter.finalize();
3522
3523 return imageArray;
3524 }
3525
3526
3527 #if BUILDING_CACHE_BUILDER
3528 const ImageArray* ClosureBuilder::makeOtherDylibsImageArray(const Array<LoadedFileInfo>& otherDylibs, uint32_t cachedDylibsCount)
3529 {
3530 // because this is run in cache builder using dispatch_apply() there is minimal stack space
3531 // so set up storage for all arrays to be vm_allocated
3532 uintptr_t maxImageCount = otherDylibs.count() + cachedDylibsCount + 128;
3533 _loadedImages.reserve(maxImageCount);
3534 _dependencies.reserve(maxImageCount*16);
3535
3536 // build _loadedImages[] with every dylib in cache, followed by others
3537 _nextIndex = 0;
3538 for (const LoadedFileInfo& aDylibInfo : otherDylibs) {
3539 BuilderLoadedImage entry;
3540 entry.loadedFileInfo = aDylibInfo;
3541 entry.imageNum = _startImageNum + _nextIndex++;
3542 entry.unmapWhenDone = false;
3543 entry.contentRebased = false;
3544 entry.hasInits = false;
3545 entry.markNeverUnload = false;
3546 entry.rtldLocal = false;
3547 entry.isBadImage = false;
3548 entry.mustBuildClosure = false;
3549 entry.hasMissingWeakImports = false;
3550 entry.overrideImageNum = 0;
3551 _loadedImages.push_back(entry);
3552 }
3553
3554 // wire up dependencies between cached dylibs
3555 // Note, _loadedImages can grow when we call recursiveLoadDependents so we need
3556 // to check the count on each iteration.
3557 for (uint64_t index = 0; index != _loadedImages.count(); ++index) {
3558 BuilderLoadedImage& li = _loadedImages[index];
3559 LoadedImageChain chainStart = { nullptr, li };
3560 recursiveLoadDependents(chainStart);
3561 if ( _diag.hasError() ) {
3562 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3563 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3564 _diag.clearError();
3565 li.isBadImage = true; // mark bad
3566 }
3567 }
3568
3569 auto invalidateBadImages = [&]() {
3570 // Invalidate images with bad dependencies
3571 while (true) {
3572 bool madeChange = false;
3573 for (BuilderLoadedImage& li : _loadedImages) {
3574 if (li.isBadImage) {
3575 // Already invalidated
3576 continue;
3577 }
3578 for (Image::LinkedImage depIndex : li.dependents) {
3579 if ( depIndex.imageNum() == kMissingWeakLinkedImage )
3580 continue;
3581 if ( depIndex.imageNum() >= dyld3::closure::kLastDyldCacheImageNum ) {
3582 // dlopen closures can only depend on the shared cache. This is because if foo.dylib links bar.dylib
3583 // and bar.dylib is loaded in to the launch closure, then the dlopen closure for foo.dylib wouldn't see
3584 // bar.dylib at the image num in the launch closure
3585 _diag.warning("while building dlopen closure for %s: dependent dylib is not from shared cache", li.loadedFileInfo.path);
3586 li.isBadImage = true; // mark bad
3587 madeChange = true;
3588 continue;
3589 }
3590 BuilderLoadedImage& depImage = findLoadedImage(depIndex.imageNum());
3591 if (depImage.isBadImage) {
3592 _diag.warning("while building dlopen closure for %s: dependent dylib had error", li.loadedFileInfo.path);
3593 li.isBadImage = true; // mark bad
3594 madeChange = true;
3595 }
3596 }
3597 }
3598 if (!madeChange)
3599 break;
3600 }
3601 };
3602
3603 invalidateBadImages();
3604
3605 // create an ImageWriter for each cached dylib
3606 STACK_ALLOC_ARRAY(ImageWriter, writers, _loadedImages.count());
3607 for (BuilderLoadedImage& li : _loadedImages) {
3608 if ( li.isBadImage ) {
3609 writers.push_back(ImageWriter());
3610 writers.back().setInvalid();
3611 continue;
3612 }
3613 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3614 continue;
3615 writers.push_back(ImageWriter());
3616 buildImage(writers.back(), li);
3617 if ( _diag.hasError() ) {
3618 _diag.warning("while building dlopen closure for %s: %s", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3619 //fprintf(stderr, "while building dlopen closure for %s: %s\n", li.loadedFileInfo.path, _diag.errorMessage().c_str());
3620 _diag.clearError();
3621 li.isBadImage = true; // mark bad
3622 writers.back().setInvalid();
3623 }
3624 }
3625
3626 invalidateBadImages();
3627
3628 // add initializer order into each dylib
3629 // Note we have to compute the init order after buildImage as buildImage may set hasInits to true
3630 for (const BuilderLoadedImage& li : _loadedImages) {
3631 if ( li.imageNum < dyld3::closure::kLastDyldCacheImageNum )
3632 continue;
3633 if (li.isBadImage)
3634 continue;
3635 uint32_t index = li.imageNum - _startImageNum;
3636 computeInitOrder(writers[index], index);
3637 }
3638
3639 // combine all Image objects into one ImageArray
3640 ImageArrayWriter imageArrayWriter(_startImageNum, (uint32_t)writers.count(), _foundDyldCacheRoots);
3641 for (ImageWriter& writer : writers) {
3642 imageArrayWriter.appendImage(writer.finalize());
3643 writer.deallocate();
3644 }
3645 const ImageArray* imageArray = imageArrayWriter.finalize();
3646
3647 return imageArray;
3648 }
3649 #endif
3650
3651
3652 bool ClosureBuilder::inLoadedImageArray(const Array<LoadedImage>& loadedList, ImageNum imageNum)
3653 {
3654 for (const LoadedImage& ali : loadedList) {
3655 if ( ali.image()->representsImageNum(imageNum) )
3656 return true;
3657 }
3658 return false;
3659 }
3660
3661 void ClosureBuilder::buildLoadOrderRecurse(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Image* image)
3662 {
3663 // breadth first load
3664 STACK_ALLOC_ARRAY(const Image*, needToRecurse, 256);
3665 image->forEachDependentImage(^(uint32_t dependentIndex, dyld3::closure::Image::LinkKind kind, ImageNum depImageNum, bool &stop) {
3666 if ( !inLoadedImageArray(loadedList, depImageNum) ) {
3667 const Image* depImage = ImageArray::findImage(imagesArrays, depImageNum);
3668 loadedList.push_back(LoadedImage::make(depImage));
3669 needToRecurse.push_back(depImage);
3670 }
3671 });
3672
3673 // recurse load
3674 for (const Image* img : needToRecurse) {
3675 buildLoadOrderRecurse(loadedList, imagesArrays, img);
3676 }
3677 }
3678
3679 void ClosureBuilder::buildLoadOrder(Array<LoadedImage>& loadedList, const Array<const ImageArray*>& imagesArrays, const Closure* toAdd)
3680 {
3681 const dyld3::closure::Image* topImage = ImageArray::findImage(imagesArrays, toAdd->topImage());
3682 loadedList.push_back(LoadedImage::make(topImage));
3683 buildLoadOrderRecurse(loadedList, imagesArrays, topImage);
3684 }
3685
3686
3687
3688 //////////////////////////// ObjCStringTable ////////////////////////////////////////
3689
3690 template<typename PerfectHashT, typename ImageOffsetT>
3691 void ObjCStringTable::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings)
3692 {
3693 ObjCSelectorOpt::StringTarget sentinel = (ObjCSelectorOpt::StringTarget)ImageOffsetT::sentinelValue;
3694 // Set header
3695 capacity = phash.capacity;
3696 occupied = phash.occupied;
3697 shift = phash.shift;
3698 mask = phash.mask;
3699 sentinelTarget = sentinel;
3700 roundedTabSize = std::max(phash.mask+1, 4U);
3701 salt = phash.salt;
3702
3703 // Set hash data
3704 for (uint32_t i = 0; i < 256; i++) {
3705 scramble[i] = phash.scramble[i];
3706 }
3707 for (uint32_t i = 0; i < phash.mask+1; i++) {
3708 tab[i] = phash.tab[i];
3709 }
3710
3711 dyld3::Array<StringTarget> targetsArray = targets();
3712 dyld3::Array<StringHashCheckByte> checkBytesArray = checkBytes();
3713
3714 // Set offsets to the sentinel
3715 for (uint32_t i = 0; i < phash.capacity; i++) {
3716 targetsArray[i] = sentinel;
3717 }
3718 // Set checkbytes to 0
3719 for (uint32_t i = 0; i < phash.capacity; i++) {
3720 checkBytesArray[i] = 0;
3721 }
3722
3723 // Set real string offsets and checkbytes
3724 for (const auto& s : strings) {
3725 assert(s.second.raw != sentinelTarget);
3726 uint32_t h = hash(s.first);
3727 targetsArray[h] = s.second.raw;
3728 checkBytesArray[h] = checkbyte(s.first);
3729 }
3730 }
3731
3732 //////////////////////////// ObjCClassOpt ////////////////////////////////////////
3733
3734
3735 template<typename PerfectHashT, typename ImageOffsetT, typename ClassesMapT>
3736 void ObjCClassOpt::write(const PerfectHashT& phash, const Array<std::pair<const char*, ImageOffsetT>>& strings,
3737 const ClassesMapT& classes, uint32_t preCalculatedDuplicateCount)
3738 {
3739 ObjCStringTable::write(phash, strings);
3740
3741 __block dyld3::Array<ClassTarget> classOffsetsArray = classOffsets();
3742 __block dyld3::Array<ClassTarget> duplicateOffsetsArray = duplicateOffsets(preCalculatedDuplicateCount);
3743
3744 // Set class offsets to 0
3745 for (uint32_t i = 0; i < capacity; i++) {
3746 classOffsetsArray[i].raw = dyld3::closure::Image::ObjCImageOffset::sentinelValue;
3747 }
3748
3749 classes.forEachEntry(^(const char *const &key, const Image::ObjCClassImageOffset **values, uint64_t valuesCount) {
3750 uint32_t keyIndex = getIndex(key);
3751 assert(keyIndex != indexNotFound);
3752 assert(classOffsetsArray[keyIndex].raw == dyld3::closure::Image::ObjCImageOffset::sentinelValue);
3753
3754 if (valuesCount == 1) {
3755 // Only one entry so write it in to the class offsets directly
3756 Image::ObjCClassImageOffset classImageOffset = *(values[0]);
3757 assert(classImageOffset.classData.isDuplicate == 0);
3758 classOffsetsArray[keyIndex] = classImageOffset;
3759 return;
3760 }
3761
3762 // We have more than one value. We add a placeholder to the class offsets which tells us the head
3763 // of the linked list of classes in the duplicates array
3764 uint32_t dest = duplicateCount();
3765 duplicateCount() += valuesCount;
3766
3767 Image::ObjCClassImageOffset classImagePlaceholder;
3768 assert(valuesCount < (1 << 8));
3769 classImagePlaceholder.duplicateData.count = (uint32_t)valuesCount;
3770 classImagePlaceholder.duplicateData.index = dest;
3771 classImagePlaceholder.duplicateData.isDuplicate = 1;
3772 classOffsetsArray[keyIndex] = classImagePlaceholder;
3773
3774 for (uint64_t i = 0; i != valuesCount; ++i) {
3775 Image::ObjCClassImageOffset classImageOffset = *(values[i]);
3776 assert(classImageOffset.classData.isDuplicate == 0);
3777 duplicateOffsetsArray.push_back(classImageOffset);
3778 }
3779 });
3780 }
3781
3782 } // namespace closure
3783 } // namespace dyld3